filename
stringlengths 13
19
| text
stringlengths 134
1.04M
|
---|---|
the-stack_0_663 | import cv2
import numpy as np
from matplotlib import pyplot as plt
from .log import logger
MATCHER_DEBUG = False
FLANN_INDEX_KDTREE = 0
GOOD_DISTANCE_LIMIT = 0.7
SIFT = cv2.SIFT_create()
def is_in_poly(p, poly):
"""
:param p: [x, y]
:param poly: [[], [], [], [], ...]
:return:
"""
px, py = p
is_in = False
for i, corner in enumerate(poly):
next_i = i + 1 if i + 1 < len(poly) else 0
x1, y1 = corner
x2, y2 = poly[next_i]
if (x1 == px and y1 == py) or (x2 == px and y2 == py): # if point is on vertex
is_in = True
break
if min(y1, y2) < py <= max(y1, y2): # find horizontal edges of polygon
x = x1 + (py - y1) * (x2 - x1) / (y2 - y1)
if x == px: # if point is on edge
is_in = True
break
elif x > px: # if point is on left-side of line
is_in = not is_in
return is_in
class FlannBasedMatcher():
def __init__(self, origin):
self.origin = origin
self.kp, self.des = SIFT.detectAndCompute(origin, None)
logger.debug(f'FlannBasedMatcher init: shape ({origin.shape})')
def match(self, query, ret_square=True, draw=False, scope=None):
if self.des is None:
logger.debug('feature points is None')
if ret_square:
return None
return False
if scope is not None:
logger.debug(f'before: {len(self.kp)}')
logger.debug(f'scope: {scope}')
kp0, des0 = [], []
for kp, des in zip(self.kp, self.des):
if scope[0][0] <= kp.pt[0] and scope[0][1] <= kp.pt[1] and kp.pt[0] <= scope[1][0] and kp.pt[1] <= scope[1][1]:
kp0.append(kp)
des0.append(des)
logger.debug(f'after: {len(kp0)}')
kp0, des0 = np.array(kp0), np.array(des0)
else:
kp0, des0 = self.kp, self.des
h, w = query.shape
kp, des = SIFT.detectAndCompute(query, None)
index_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=5)
search_params = dict(checks=50)
flann = cv2.FlannBasedMatcher(index_params, search_params)
matches = flann.knnMatch(des, des0, k=2)
"""store all the good matches as per Lowe's ratio test."""
good = []
for x, y in matches:
if x.distance < GOOD_DISTANCE_LIMIT * y.distance:
good.append(x)
"""draw the result"""
if draw:
result = cv2.drawMatches(
query, kp, self.origin, kp0, good, None)
plt.imshow(result, 'gray')
plt.show()
if len(good) <= 4 or len(good) / len(des) < 0.2:
logger.debug(
f'not enough good matches are found: {len(good)} / {len(matches)} / {len(des)} / {len(good) / len(des)}')
if ret_square:
return None
return False
"""get the coordinates of good matches"""
src_pts = np.float32(
[kp[m.queryIdx].pt for m in good]).reshape(-1, 1, 2)
dst_pts = np.float32(
[kp0[m.trainIdx].pt for m in good]).reshape(-1, 1, 2)
"""calculated transformation matrix and the mask"""
M, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC, 5.0)
matchesMask = mask.ravel().tolist()
if M is None:
logger.debug('calculated transformation matrix failed')
if ret_square:
return None
return False
pts = np.float32([[0, 0], [0, h-1], [w-1, h-1],
[w-1, 0]]).reshape(-1, 1, 2)
dst = cv2.perspectiveTransform(pts, M)
dst_list = np.int32(dst).reshape(4, 2).tolist()
better = filter(lambda m: is_in_poly(
kp0[m.trainIdx].pt, dst_list), good)
better_kp_x = [kp[m.queryIdx].pt[0] for m in better]
if len(better_kp_x):
good_area_rate = np.ptp(better_kp_x) / w
else:
good_area_rate = 0
"""draw the result"""
if draw or MATCHER_DEBUG:
origin = np.array(self.origin)
cv2.polylines(origin, [np.int32(dst)], True, 0, 2, cv2.LINE_AA)
draw_params = dict(matchColor=(
0, 255, 0), singlePointColor=None, matchesMask=matchesMask, flags=2)
result = cv2.drawMatches(
query, kp, origin, kp0, good, None, **draw_params)
plt.imshow(result, 'gray')
plt.show()
if abs(dst[0][0][0] - dst[1][0][0]) > 30 or abs(dst[2][0][0] - dst[3][0][0]) > 30 or abs(dst[0][0][1] - dst[3][0][1]) > 30 or abs(dst[1][0][1] - dst[2][0][1]) > 30:
logger.debug(f'square is not rectangle: {dst_list}')
if ret_square:
return None
return False
if good_area_rate < 0.5:
logger.debug(f'good_area_rate is not enough: {good_area_rate}')
if ret_square:
return None
return False
logger.info(
f'matches: {len(good)} / {len(matches)} / {len(des)} / {len(good) / len(des)} / {good_area_rate}')
logger.debug(f'find in {dst_list}')
if ret_square:
return dst_list
return True
|
the-stack_0_664 | # -*- coding:utf-8 -*-
"""
Author:
Weichen Shen,[email protected]
Reference:
[1] Guo H, Tang R, Ye Y, et al. Deepfm: a factorization-machine based neural network for ctr prediction[J]. arXiv preprint arXiv:1703.04247, 2017.(https://arxiv.org/abs/1703.04247)
"""
import tensorflow as tf
from ..input_embedding import preprocess_input_embedding, get_linear_logit
from ..layers.core import PredictionLayer, DNN
from ..layers.interaction import FM
from ..layers.utils import concat_fun
from ..utils import check_feature_config_dict
def DeepFM(feature_dim_dict, embedding_size=8,
use_fm=True, dnn_hidden_units=(128, 128), l2_reg_linear=0.00001, l2_reg_embedding=0.00001, l2_reg_dnn=0,
init_std=0.0001, seed=1024, dnn_dropout=0, dnn_activation='relu', dnn_use_bn=False, task='binary'):
"""Instantiates the DeepFM Network architecture.
:param feature_dim_dict: dict,to indicate sparse field and dense field like {'sparse':{'field_1':4,'field_2':3,'field_3':2},'dense':['field_4','field_5']}
:param embedding_size: positive integer,sparse feature embedding_size
:param use_fm: bool,use FM part or not
:param dnn_hidden_units: list,list of positive integer or empty list, the layer number and units in each layer of DNN
:param l2_reg_linear: float. L2 regularizer strength applied to linear part
:param l2_reg_embedding: float. L2 regularizer strength applied to embedding vector
:param l2_reg_dnn: float. L2 regularizer strength applied to DNN
:param init_std: float,to use as the initialize std of embedding vector
:param seed: integer ,to use as random seed.
:param dnn_dropout: float in [0,1), the probability we will drop out a given DNN coordinate.
:param dnn_activation: Activation function to use in DNN
:param dnn_use_bn: bool. Whether use BatchNormalization before activation or not in DNN
:param task: str, ``"binary"`` for binary logloss or ``"regression"`` for regression loss
:return: A Keras model instance.
"""
check_feature_config_dict(feature_dim_dict)
deep_emb_list, linear_emb_list, dense_input_dict, inputs_list = preprocess_input_embedding(feature_dim_dict,
embedding_size,
l2_reg_embedding,
l2_reg_linear, init_std,
seed,
create_linear_weight=True)
linear_logit = get_linear_logit(linear_emb_list, dense_input_dict, l2_reg_linear)
fm_input = concat_fun(deep_emb_list, axis=1)
deep_input = tf.keras.layers.Flatten()(fm_input)
fm_out = FM()(fm_input)
deep_out = DNN(dnn_hidden_units, dnn_activation, l2_reg_dnn, dnn_dropout,
dnn_use_bn, seed)(deep_input)
deep_logit = tf.keras.layers.Dense(
1, use_bias=False, activation=None)(deep_out)
if len(dnn_hidden_units) == 0 and use_fm == False: # only linear
final_logit = linear_logit
elif len(dnn_hidden_units) == 0 and use_fm == True: # linear + FM
final_logit = tf.keras.layers.add([linear_logit, fm_out])
elif len(dnn_hidden_units) > 0 and use_fm == False: # linear + Deep
final_logit = tf.keras.layers.add([linear_logit, deep_logit])
elif len(dnn_hidden_units) > 0 and use_fm == True: # linear + FM + Deep
final_logit = tf.keras.layers.add([linear_logit, fm_out, deep_logit])
else:
raise NotImplementedError
output = PredictionLayer(task)(final_logit)
model = tf.keras.models.Model(inputs=inputs_list, outputs=output)
return model
|
the-stack_0_665 | # Copyright 2013-2019 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PyWidgetsnbextension(PythonPackage):
"""IPython HTML widgets for Jupyter"""
homepage = "https://pypi.python.org/pypi/widgetsnbextension"
url = "https://pypi.io/packages/source/w/widgetsnbextension/widgetsnbextension-1.2.6.tar.gz"
version('1.2.6', '0aa4e152c9ba2d704389dc2453f448c7')
depends_on('py-setuptools', type='build')
depends_on('[email protected]:2.8,3.3:')
depends_on('[email protected]:', type=('build', 'run'))
|
the-stack_0_667 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2012, Jan-Piet Mens <jpmens () gmail.com>
# Copyright: (c) 2015, Ales Nosek <anosek.nosek () gmail.com>
# Copyright: (c) 2017, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = r'''
---
module: ini_file
short_description: Tweak settings in INI files
extends_documentation_fragment: files
description:
- Manage (add, remove, change) individual settings in an INI-style file without having
to manage the file as a whole with, say, M(ansible.builtin.template) or M(ansible.builtin.assemble).
- Adds missing sections if they don't exist.
- Before Ansible 2.0, comments are discarded when the source file is read, and therefore will not show up in the destination file.
- Since Ansible 2.3, this module adds missing ending newlines to files to keep in line with the POSIX standard, even when
no other modifications need to be applied.
options:
path:
description:
- Path to the INI-style file; this file is created if required.
- Before Ansible 2.3 this option was only usable as I(dest).
type: path
required: true
aliases: [ dest ]
section:
description:
- Section name in INI file. This is added if C(state=present) automatically when
a single value is being set.
- If left empty or set to C(null), the I(option) will be placed before the first I(section).
- Using C(null) is also required if the config format does not support sections.
type: str
required: true
option:
description:
- If set (required for changing a I(value)), this is the name of the option.
- May be omitted if adding/removing a whole I(section).
type: str
value:
description:
- The string value to be associated with an I(option).
- May be omitted when removing an I(option).
type: str
backup:
description:
- Create a backup file including the timestamp information so you can get
the original file back if you somehow clobbered it incorrectly.
type: bool
default: no
state:
description:
- If set to C(absent) the option or section will be removed if present instead of created.
type: str
choices: [ absent, present ]
default: present
no_extra_spaces:
description:
- Do not insert spaces before and after '=' symbol.
type: bool
default: no
create:
description:
- If set to C(no), the module will fail if the file does not already exist.
- By default it will create the file if it is missing.
type: bool
default: yes
allow_no_value:
description:
- Allow option without value and without '=' symbol.
type: bool
default: no
notes:
- While it is possible to add an I(option) without specifying a I(value), this makes no sense.
- As of Ansible 2.3, the I(dest) option has been changed to I(path) as default, but I(dest) still works as well.
author:
- Jan-Piet Mens (@jpmens)
- Ales Nosek (@noseka1)
'''
EXAMPLES = r'''
# Before Ansible 2.3, option 'dest' was used instead of 'path'
- name: Ensure "fav=lemonade is in section "[drinks]" in specified file
community.general.ini_file:
path: /etc/conf
section: drinks
option: fav
value: lemonade
mode: '0600'
backup: yes
- name: Ensure "temperature=cold is in section "[drinks]" in specified file
community.general.ini_file:
path: /etc/anotherconf
section: drinks
option: temperature
value: cold
backup: yes
'''
import os
import re
import tempfile
import traceback
from ansible.module_utils.basic import AnsibleModule
def match_opt(option, line):
option = re.escape(option)
return re.match('( |\t)*%s( |\t)*(=|$)' % option, line) \
or re.match('#( |\t)*%s( |\t)*(=|$)' % option, line) \
or re.match(';( |\t)*%s( |\t)*(=|$)' % option, line)
def match_active_opt(option, line):
option = re.escape(option)
return re.match('( |\t)*%s( |\t)*(=|$)' % option, line)
def do_ini(module, filename, section=None, option=None, value=None,
state='present', backup=False, no_extra_spaces=False, create=True,
allow_no_value=False):
diff = dict(
before='',
after='',
before_header='%s (content)' % filename,
after_header='%s (content)' % filename,
)
if not os.path.exists(filename):
if not create:
module.fail_json(rc=257, msg='Destination %s does not exist !' % filename)
destpath = os.path.dirname(filename)
if not os.path.exists(destpath) and not module.check_mode:
os.makedirs(destpath)
ini_lines = []
else:
ini_file = open(filename, 'r')
try:
ini_lines = ini_file.readlines()
finally:
ini_file.close()
if module._diff:
diff['before'] = ''.join(ini_lines)
changed = False
# ini file could be empty
if not ini_lines:
ini_lines.append('\n')
# last line of file may not contain a trailing newline
if ini_lines[-1] == "" or ini_lines[-1][-1] != '\n':
ini_lines[-1] += '\n'
changed = True
# append fake section lines to simplify the logic
# At top:
# Fake random section to do not match any other in the file
# Using commit hash as fake section name
fake_section_name = "ad01e11446efb704fcdbdb21f2c43757423d91c5"
# Insert it at the beginning
ini_lines.insert(0, '[%s]' % fake_section_name)
# At botton:
ini_lines.append('[')
# If no section is defined, fake section is used
if not section:
section = fake_section_name
within_section = not section
section_start = 0
msg = 'OK'
if no_extra_spaces:
assignment_format = '%s=%s\n'
else:
assignment_format = '%s = %s\n'
for index, line in enumerate(ini_lines):
if line.startswith('[%s]' % section):
within_section = True
section_start = index
elif line.startswith('['):
if within_section:
if state == 'present':
# insert missing option line at the end of the section
for i in range(index, 0, -1):
# search backwards for previous non-blank or non-comment line
if not re.match(r'^[ \t]*([#;].*)?$', ini_lines[i - 1]):
if not value and allow_no_value:
ini_lines.insert(i, '%s\n' % option)
else:
ini_lines.insert(i, assignment_format % (option, value))
msg = 'option added'
changed = True
break
elif state == 'absent' and not option:
# remove the entire section
del ini_lines[section_start:index]
msg = 'section removed'
changed = True
break
else:
if within_section and option:
if state == 'present':
# change the existing option line
if match_opt(option, line):
if not value and allow_no_value:
newline = '%s\n' % option
else:
newline = assignment_format % (option, value)
option_changed = ini_lines[index] != newline
changed = changed or option_changed
if option_changed:
msg = 'option changed'
ini_lines[index] = newline
if option_changed:
# remove all possible option occurrences from the rest of the section
index = index + 1
while index < len(ini_lines):
line = ini_lines[index]
if line.startswith('['):
break
if match_active_opt(option, line):
del ini_lines[index]
else:
index = index + 1
break
elif state == 'absent':
# delete the existing line
if match_active_opt(option, line):
del ini_lines[index]
changed = True
msg = 'option changed'
break
# remove the fake section line
del ini_lines[0]
del ini_lines[-1:]
if not within_section and option and state == 'present':
ini_lines.append('[%s]\n' % section)
if not value and allow_no_value:
ini_lines.append('%s\n' % option)
else:
ini_lines.append(assignment_format % (option, value))
changed = True
msg = 'section and option added'
if module._diff:
diff['after'] = ''.join(ini_lines)
backup_file = None
if changed and not module.check_mode:
if backup:
backup_file = module.backup_local(filename)
try:
tmpfd, tmpfile = tempfile.mkstemp(dir=module.tmpdir)
f = os.fdopen(tmpfd, 'w')
f.writelines(ini_lines)
f.close()
except IOError:
module.fail_json(msg="Unable to create temporary file %s", traceback=traceback.format_exc())
try:
module.atomic_move(tmpfile, filename)
except IOError:
module.ansible.fail_json(msg='Unable to move temporary \
file %s to %s, IOError' % (tmpfile, filename), traceback=traceback.format_exc())
return (changed, backup_file, diff, msg)
def main():
module = AnsibleModule(
argument_spec=dict(
path=dict(type='path', required=True, aliases=['dest']),
section=dict(type='str', required=True),
option=dict(type='str'),
value=dict(type='str'),
backup=dict(type='bool', default=False),
state=dict(type='str', default='present', choices=['absent', 'present']),
no_extra_spaces=dict(type='bool', default=False),
allow_no_value=dict(type='bool', default=False),
create=dict(type='bool', default=True)
),
add_file_common_args=True,
supports_check_mode=True,
)
path = module.params['path']
section = module.params['section']
option = module.params['option']
value = module.params['value']
state = module.params['state']
backup = module.params['backup']
no_extra_spaces = module.params['no_extra_spaces']
allow_no_value = module.params['allow_no_value']
create = module.params['create']
(changed, backup_file, diff, msg) = do_ini(module, path, section, option, value, state, backup, no_extra_spaces, create, allow_no_value)
if not module.check_mode and os.path.exists(path):
file_args = module.load_file_common_arguments(module.params)
changed = module.set_fs_attributes_if_different(file_args, changed)
results = dict(
changed=changed,
diff=diff,
msg=msg,
path=path,
)
if backup_file is not None:
results['backup_file'] = backup_file
# Mission complete
module.exit_json(**results)
if __name__ == '__main__':
main()
|
the-stack_0_670 | """The Spark SQL dialect for ANSI Compliant Spark3.
Inherits from ANSI.
Spark SQL ANSI Mode is more restrictive regarding
keywords than the Default Mode, and still shares
some syntax with hive.
Based on:
- https://spark.apache.org/docs/latest/sql-ref.html
- https://spark.apache.org/docs/latest/sql-ref-ansi-compliance.html
- https://github.com/apache/spark/blob/master/sql/catalyst/src/main/antlr4/org/apache/spark/sql/catalyst/parser/SqlBase.g4
"""
from sqlfluff.core.parser import (
AnyNumberOf,
BaseSegment,
Bracketed,
CommentSegment,
Conditional,
Dedent,
Delimited,
Indent,
NamedParser,
OneOf,
OptionallyBracketed,
Ref,
RegexLexer,
Sequence,
StringParser,
SymbolSegment,
Anything,
)
from sqlfluff.core.dialects import load_raw_dialect
from sqlfluff.core.parser.segments.raw import CodeSegment, KeywordSegment
from sqlfluff.dialects.dialect_spark3_keywords import (
RESERVED_KEYWORDS,
UNRESERVED_KEYWORDS,
)
ansi_dialect = load_raw_dialect("ansi")
hive_dialect = load_raw_dialect("hive")
spark3_dialect = ansi_dialect.copy_as("spark3")
spark3_dialect.patch_lexer_matchers(
[
# Spark SQL, only -- is used for single-line comment
RegexLexer(
"inline_comment",
r"(--)[^\n]*",
CommentSegment,
segment_kwargs={"trim_start": "--"},
),
# == and <=> are valid equal operations
# <=> is a non-null equals in Spark SQL
# https://spark.apache.org/docs/latest/api/sql/index.html#_10
RegexLexer("equals", r"=|==|<=>", CodeSegment),
# identifiers are delimited with `
# within a delimited identifier, ` is used to escape special characters, including `
# Ex: select `delimited `` with escaped` from `just delimited`
# https://spark.apache.org/docs/latest/sql-ref-identifier.html#delimited-identifier
RegexLexer("back_quote", r"`([^`]|``)*`", CodeSegment),
]
)
# Set the bare functions
spark3_dialect.sets("bare_functions").clear()
spark3_dialect.sets("bare_functions").update(
[
"CURRENT_DATE",
"CURRENT_TIMESTAMP",
"CURRENT_USER",
]
)
# Set the datetime units
spark3_dialect.sets("datetime_units").clear()
spark3_dialect.sets("datetime_units").update(
[
"YEAR",
# Alternate syntax for YEAR
"YYYY",
"YY",
"QUARTER",
"MONTH",
# Alternate syntax for MONTH
"MON",
"MM",
"WEEK",
"DAY",
# Alternate syntax for DAY
"DD",
"HOUR",
"MINUTE",
"SECOND",
]
)
# Set Keywords
spark3_dialect.sets("unreserved_keywords").update(UNRESERVED_KEYWORDS)
spark3_dialect.sets("reserved_keywords").update(RESERVED_KEYWORDS)
# Set Angle Bracket Pairs
spark3_dialect.sets("angle_bracket_pairs").update(
[
("angle", "StartAngleBracketSegment", "EndAngleBracketSegment", False),
]
)
# Real Segments
spark3_dialect.replace(
ComparisonOperatorGrammar=OneOf(
Ref("EqualsSegment"),
Ref("EqualsSegment_a"),
Ref("EqualsSegment_b"),
Ref("GreaterThanSegment"),
Ref("LessThanSegment"),
Ref("GreaterThanOrEqualToSegment"),
Ref("LessThanOrEqualToSegment"),
Ref("NotEqualToSegment"),
Ref("LikeOperatorSegment"),
),
TemporaryGrammar=Sequence(
Sequence("GLOBAL", optional=True),
OneOf("TEMP", "TEMPORARY"),
),
QuotedIdentifierSegment=NamedParser(
"back_quote",
CodeSegment,
name="quoted_identifier",
type="identifier",
trim_chars=("`",),
),
)
spark3_dialect.add(
# Add Hive Segments TODO : Is there a way to retrieve this w/o redefining?
DoubleQuotedLiteralSegment=NamedParser(
"double_quote",
CodeSegment,
name="quoted_literal",
type="literal",
trim_chars=('"',),
),
JsonfileKeywordSegment=StringParser(
"JSONFILE",
KeywordSegment,
name="json_file",
type="file_format",
),
RcfileKeywordSegment=StringParser(
"RCFILE", KeywordSegment, name="rc_file", type="file_format"
),
SequencefileKeywordSegment=StringParser(
"SEQUENCEFILE", KeywordSegment, name="sequence_file", type="file_format"
),
TextfileKeywordSegment=StringParser(
"TEXTFILE", KeywordSegment, name="text_file", type="file_format"
),
StartAngleBracketSegment=StringParser(
"<", SymbolSegment, name="start_angle_bracket", type="start_angle_bracket"
),
EndAngleBracketSegment=StringParser(
">", SymbolSegment, name="end_angle_bracket", type="end_angle_bracket"
),
# Add Spark Segments
EqualsSegment_a=StringParser(
"==", SymbolSegment, name="equals", type="comparison_operator"
),
EqualsSegment_b=StringParser(
"<=>", SymbolSegment, name="equals", type="comparison_operator"
),
FileKeywordSegment=StringParser(
"FILE", KeywordSegment, name="file", type="file_type"
),
JarKeywordSegment=StringParser("JAR", KeywordSegment, name="jar", type="file_type"),
WhlKeywordSegment=StringParser("WHL", KeywordSegment, name="whl", type="file_type"),
# Add relevant Hive Grammar
BracketedPropertyListGrammar=hive_dialect.get_grammar(
"BracketedPropertyListGrammar"
),
CommentGrammar=hive_dialect.get_grammar("CommentGrammar"),
FileFormatGrammar=hive_dialect.get_grammar("FileFormatGrammar"),
LocationGrammar=hive_dialect.get_grammar("LocationGrammar"),
PropertyGrammar=hive_dialect.get_grammar("PropertyGrammar"),
SerdePropertiesGrammar=hive_dialect.get_grammar("SerdePropertiesGrammar"),
StoredAsGrammar=hive_dialect.get_grammar("StoredAsGrammar"),
StoredByGrammar=hive_dialect.get_grammar("StoredByGrammar"),
StorageFormatGrammar=hive_dialect.get_grammar("StorageFormatGrammar"),
SingleOrDoubleQuotedLiteralGrammar=hive_dialect.get_grammar(
"SingleOrDoubleQuotedLiteralGrammar"
),
TerminatedByGrammar=hive_dialect.get_grammar("TerminatedByGrammar"),
# Add Spark Grammar
BucketSpecGrammar=Sequence(
Ref("ClusterSpecGrammar"),
Ref("SortSpecGrammar", optional=True),
"INTO",
Ref("NumericLiteralSegment"),
"BUCKETS",
),
ClusterSpecGrammar=Sequence(
"CLUSTERED",
"BY",
Ref("BracketedColumnReferenceListGrammar"),
),
DatabasePropertiesGrammar=Sequence(
"DBPROPERTIES", Ref("BracketedPropertyListGrammar")
),
DataSourceFormatGrammar=OneOf(
# Spark Core Data Sources
# https://spark.apache.org/docs/latest/sql-data-sources.html
"AVRO",
"CSV",
"JSON",
"PARQUET",
"ORC",
"JDBC",
# Community Contributed Data Sources
"DELTA", # https://github.com/delta-io/delta
"XML", # https://github.com/databricks/spark-xml
),
PartitionSpecGrammar=Sequence(
OneOf("PARTITION", Sequence("PARTITIONED", "BY")),
Bracketed(
Delimited(
Sequence(
Ref("ColumnReferenceSegment"),
Ref("EqualsSegment", optional=True),
Ref("LiteralGrammar", optional=True),
Ref("CommentGrammar", optional=True),
),
),
),
),
ResourceFileGrammar=OneOf(
Ref("JarKeywordSegment"),
Ref("WhlKeywordSegment"),
Ref("FileKeywordSegment"),
),
ResourceLocationGrammar=Sequence(
"USING",
Ref("ResourceFileGrammar"),
Ref("SingleOrDoubleQuotedLiteralGrammar"),
),
SortSpecGrammar=Sequence(
"SORTED",
"BY",
Bracketed(
Delimited(
Sequence(
Ref("ColumnReferenceSegment"),
OneOf("ASC", "DESC", optional=True),
)
)
),
optional=True,
),
UnsetTablePropertiesGrammar=Sequence(
"UNSET",
"TBLPROPERTIES",
Ref("IfExistsGrammar", optional=True),
Bracketed(Delimited(Ref("SingleOrDoubleQuotedLiteralGrammar"))),
),
TablePropertiesGrammar=Sequence(
"TBLPROPERTIES", Ref("BracketedPropertyListGrammar")
),
)
# Hive Segments
@spark3_dialect.segment()
class RowFormatClauseSegment(hive_dialect.get_segment("RowFormatClauseSegment")): # type: ignore
"""`ROW FORMAT` clause in a CREATE HIVEFORMAT TABLE statement."""
type = "row_format_clause"
@spark3_dialect.segment()
class SkewedByClauseSegment(hive_dialect.get_segment("SkewedByClauseSegment")): # type: ignore
"""`SKEWED BY` clause in a CREATE HIVEFORMAT TABLE statement."""
type = "skewed_by_clause"
# Primitive Data Types
@spark3_dialect.segment()
class PrimitiveTypeSegment(BaseSegment):
"""Spark SQL Primitive data types.
https://spark.apache.org/docs/latest/sql-ref-datatypes.html
"""
type = "primitive_type"
match_grammar = OneOf(
"BOOLEAN",
# TODO : not currently supported; add segment - see NumericLiteralSegment
# "BYTE",
"TINYINT",
# TODO : not currently supported; add segment - see NumericLiteralSegment
# "SHORT",
"SMALLINT",
"INT",
"BIGINT",
"FLOAT",
"REAL",
"DOUBLE",
"DATE",
"TIMESTAMP",
"STRING",
Sequence(
OneOf("CHAR", "CHARACTER", "VARCHAR"),
Bracketed(Ref("NumericLiteralSegment"), optional=True),
),
"BINARY",
Sequence(
OneOf("DECIMAL", "DEC", "NUMERIC"),
Bracketed(
Ref("NumericLiteralSegment"),
Ref("CommaSegment"),
Ref("NumericLiteralSegment"),
optional=True,
),
),
"INTERVAL",
)
@spark3_dialect.segment(replace=True)
class DatatypeSegment(PrimitiveTypeSegment):
"""Spark SQL Data types.
https://spark.apache.org/docs/latest/sql-ref-datatypes.html
"""
type = "data_type"
match_grammar = OneOf(
Ref("PrimitiveTypeSegment"),
Sequence(
"ARRAY",
Bracketed(
Ref("DatatypeSegment"),
bracket_pairs_set="angle_bracket_pairs",
bracket_type="angle",
),
),
Sequence(
"MAP",
Bracketed(
Sequence(
Ref("PrimitiveTypeSegment"),
Ref("CommaSegment"),
Ref("DatatypeSegment"),
),
bracket_pairs_set="angle_bracket_pairs",
bracket_type="angle",
),
),
Sequence(
"STRUCT",
Bracketed(
Delimited(
Sequence(
Ref("NakedIdentifierSegment"),
Ref("ColonSegment"),
Ref("DatatypeSegment"),
Ref("CommentGrammar", optional=True),
),
),
bracket_pairs_set="angle_bracket_pairs",
bracket_type="angle",
),
),
)
# Data Definition Statements
# http://spark.apache.org/docs/latest/sql-ref-syntax-ddl.html
@spark3_dialect.segment()
class AlterDatabaseStatementSegment(BaseSegment):
"""An `ALTER DATABASE/SCHEMA` statement.
http://spark.apache.org/docs/latest/sql-ref-syntax-ddl-alter-database.html
"""
type = "alter_database_statement"
match_grammar = Sequence(
"ALTER",
OneOf("DATABASE", "SCHEMA"),
Ref("DatabaseReferenceSegment"),
"SET",
Ref("DatabasePropertiesGrammar"),
)
@spark3_dialect.segment(replace=True)
class AlterTableStatementSegment(BaseSegment):
"""A `ALTER TABLE` statement to change the table schema or properties.
http://spark.apache.org/docs/latest/sql-ref-syntax-ddl-alter-table.html
"""
type = "alter_table_statement"
match_grammar = Sequence(
"ALTER",
"TABLE",
Ref("TableReferenceSegment"),
OneOf(
# ALTER TABLE - RENAME TO `table_identifier`
Sequence(
"RENAME",
"TO",
Ref("TableReferenceSegment"),
),
# ALTER TABLE - RENAME `partition_spec`
Sequence(
Ref("PartitionSpecGrammar"),
"RENAME",
"TO",
Ref("PartitionSpecGrammar"),
),
# ALTER TABLE - ADD COLUMNS
Sequence(
"ADD",
"COLUMNS",
Bracketed(
Delimited(
Ref("ColumnDefinitionSegment"),
),
),
),
# ALTER TABLE - ALTER OR CHANGE COLUMN
Sequence(
OneOf("ALTER", "CHANGE"),
"COLUMN",
Ref("ColumnReferenceSegment"),
Sequence("TYPE", Ref("DatatypeSegment"), optional=True),
Ref("CommentGrammar", optional=True),
OneOf(
"FIRST",
Sequence("AFTER", Ref("ColumnReferenceSegment")),
optional=True,
),
Sequence(OneOf("SET", "DROP"), "NOT NULL", optional=True),
),
# ALTER TABLE - ADD PARTITION
Sequence(
"ADD",
Ref("IfNotExistsGrammar", optional=True),
AnyNumberOf(Ref("PartitionSpecGrammar")),
),
# ALTER TABLE - DROP PARTITION
Sequence(
"DROP",
Ref("IfExistsGrammar", optional=True),
Ref("PartitionSpecGrammar"),
Sequence("PURGE", optional=True),
),
# ALTER TABLE - REPAIR PARTITION
Sequence("RECOVER", "PARTITIONS"),
# ALTER TABLE - SET PROPERTIES
Sequence("SET", Ref("TablePropertiesGrammar")),
# ALTER TABLE - UNSET PROPERTIES
Ref("UnsetTablePropertiesGrammar"),
# ALTER TABLE - SET SERDE
Sequence(
Ref("PartitionSpecGrammar", optional=True),
"SET",
OneOf(
Sequence(
"SERDEPROPERTIES",
Ref("BracketedPropertyListGrammar"),
),
Sequence(
"SERDE",
Ref("SingleOrDoubleQuotedLiteralGrammar"),
Ref("SerdePropertiesGrammar", optional=True),
),
),
),
# ALTER TABLE - SET FILE FORMAT
Sequence(
Ref("PartitionSpecGrammar", optional=True),
"SET",
"FILEFORMAT",
Ref("DataSourceFormatGrammar"),
),
# ALTER TABLE - CHANGE FILE LOCATION
Sequence(
Ref("PartitionSpecGrammar"),
"SET",
Ref("LocationGrammar"),
),
),
)
@spark3_dialect.segment()
class AlterViewStatementSegment(BaseSegment):
"""A `ALTER VIEW` statement to change the view schema or properties.
https://spark.apache.org/docs/latest/sql-ref-syntax-ddl-alter-view.html
"""
type = "alter_view_statement"
match_grammar = Sequence(
"ALTER",
"VIEW",
Ref("TableReferenceSegment"),
OneOf(
Sequence(
"RENAME",
"TO",
Ref("TableReferenceSegment"),
),
Sequence("SET", Ref("TablePropertiesGrammar")),
Ref("UnsetTablePropertiesGrammar"),
Sequence(
"AS",
OptionallyBracketed(Ref("SelectStatementSegment")),
),
),
)
@spark3_dialect.segment(replace=True)
class CreateDatabaseStatementSegment(BaseSegment):
"""A `CREATE DATABASE` statement.
https://spark.apache.org/docs/latest/sql-ref-syntax-ddl-create-database.html
"""
type = "create_database_statement"
match_grammar = Sequence(
"CREATE",
OneOf("DATABASE", "SCHEMA"),
Ref("IfNotExistsGrammar", optional=True),
Ref("DatabaseReferenceSegment"),
Ref("CommentGrammar", optional=True),
Ref("LocationGrammar", optional=True),
Sequence(
"WITH", "DBPROPERTIES", Ref("BracketedPropertyListGrammar"), optional=True
),
)
@spark3_dialect.segment(replace=True)
class CreateFunctionStatementSegment(BaseSegment):
"""A `CREATE FUNCTION` statement.
https://spark.apache.org/docs/latest/sql-ref-syntax-ddl-create-function.html
"""
type = "create_function_statement"
match_grammar = Sequence(
"CREATE",
Sequence("OR", "REPLACE", optional=True),
Ref("TemporaryGrammar", optional=True),
"FUNCTION",
Anything(),
)
parse_grammar = Sequence(
"CREATE",
Sequence("OR", "REPLACE", optional=True),
Ref("TemporaryGrammar", optional=True),
"FUNCTION",
Ref("IfNotExistsGrammar", optional=True),
Ref("FunctionNameIdentifierSegment"),
"AS",
Ref("SingleOrDoubleQuotedLiteralGrammar"),
Ref("ResourceLocationGrammar", optional=True),
)
@spark3_dialect.segment(replace=True)
class CreateTableStatementSegment(BaseSegment):
"""A `CREATE TABLE` statement using a Data Source or Like.
http://spark.apache.org/docs/latest/sql-ref-syntax-ddl-create-table-datasource.html
https://spark.apache.org/docs/latest/sql-ref-syntax-ddl-create-table-like.html
"""
type = "create_table_statement"
match_grammar = Sequence(
"CREATE",
"TABLE",
Ref("IfNotExistsGrammar", optional=True),
Ref("TableReferenceSegment"),
OneOf(
# Columns and comment syntax:
Sequence(
Bracketed(
Delimited(
Sequence(
Ref("ColumnDefinitionSegment"),
Ref("CommentGrammar", optional=True),
),
),
),
),
# Like Syntax
Sequence(
"LIKE",
Ref("TableReferenceSegment"),
),
optional=True,
),
Sequence("USING", Ref("DataSourceFormatGrammar"), optional=True),
Ref("RowFormatClauseSegment", optional=True),
Ref("StoredAsGrammar", optional=True),
Sequence("OPTIONS", Ref("BracketedPropertyListGrammar"), optional=True),
Ref("PartitionSpecGrammar", optional=True),
Ref("BucketSpecGrammar", optional=True),
AnyNumberOf(
Ref("LocationGrammar", optional=True),
Ref("CommentGrammar", optional=True),
Ref("TablePropertiesGrammar", optional=True),
),
# Create AS syntax:
Sequence(
"AS",
OptionallyBracketed(Ref("SelectableGrammar")),
optional=True,
),
)
@spark3_dialect.segment()
class CreateHiveFormatTableStatementSegment(hive_dialect.get_segment("CreateTableStatementSegment")): # type: ignore
"""A `CREATE TABLE` statement using Hive format.
https://spark.apache.org/docs/latest/sql-ref-syntax-ddl-create-table-hiveformat.html
"""
type = "create_table_statement"
@spark3_dialect.segment(replace=True)
class CreateViewStatementSegment(BaseSegment):
"""A `CREATE VIEW` statement.
https://spark.apache.org/docs/3.0.0/sql-ref-syntax-ddl-create-view.html#syntax
"""
type = "create_view_statement"
match_grammar = Sequence(
"CREATE",
Ref("OrReplaceGrammar", optional=True),
Ref("TemporaryGrammar", optional=True),
"VIEW",
Ref("IfNotExistsGrammar", optional=True),
Ref("TableReferenceSegment"),
# Columns and comment syntax:
Sequence(
Bracketed(
Delimited(
Sequence(
Ref("ColumnReferenceSegment"),
Ref("CommentGrammar", optional=True),
),
),
),
optional=True,
),
Ref("CommentGrammar", optional=True),
Ref("TablePropertiesGrammar", optional=True),
"AS",
Ref("SelectableGrammar"),
Ref("WithNoSchemaBindingClauseSegment", optional=True),
)
@spark3_dialect.segment()
class DropFunctionStatementSegment(BaseSegment):
"""A `DROP FUNCTION` STATEMENT.
https://spark.apache.org/docs/latest/sql-ref-syntax-ddl-drop-function.html
"""
type = "drop_function_statement"
match_grammar = Sequence(
"DROP",
Ref("TemporaryGrammar", optional=True),
"FUNCTION",
Ref("IfExistsGrammar", optional=True),
Ref("FunctionNameSegment"),
)
@spark3_dialect.segment()
class MsckRepairTableStatementSegment(hive_dialect.get_segment("MsckRepairTableStatementSegment")): # type: ignore
"""A `REPAIR TABLE` statement using Hive MSCK (Metastore Check) format.
This class inherits from Hive since Spark leverages Hive format for this command and
is dependent on the Hive metastore.
https://spark.apache.org/docs/latest/sql-ref-syntax-ddl-repair-table.html
"""
type = "msck_repair_table_statement"
# Auxiliary Statements
@spark3_dialect.segment()
class AddExecutablePackage(BaseSegment):
"""A `ADD JAR` statement.
https://spark.apache.org/docs/latest/sql-ref-syntax-aux-resource-mgmt-add-jar.html
"""
type = "add_executable_package"
match_grammar = Sequence(
"ADD",
Ref("ResourceFileGrammar"),
Ref("SingleOrDoubleQuotedLiteralGrammar"),
)
@spark3_dialect.segment(replace=True)
class StatementSegment(BaseSegment):
"""Overriding StatementSegment to allow for additional segment parsing."""
match_grammar = ansi_dialect.get_segment("StatementSegment").match_grammar.copy()
parse_grammar = ansi_dialect.get_segment("StatementSegment").parse_grammar.copy(
# Segments defined in Spark3 dialect
insert=[
# Data Definition Statements
Ref("AlterDatabaseStatementSegment"),
Ref("AlterTableStatementSegment"),
Ref("AlterViewStatementSegment"),
Ref("CreateHiveFormatTableStatementSegment"),
Ref("DropFunctionStatementSegment"),
Ref("MsckRepairTableStatementSegment"),
# Auxiliary Statements
Ref("AddExecutablePackage"),
],
remove=[
Ref("TransactionStatementSegment"),
Ref("CreateSchemaStatementSegment"),
Ref("SetSchemaStatementSegment"),
Ref("CreateExtensionStatementSegment"),
Ref("CreateModelStatementSegment"),
Ref("DropModelStatementSegment"),
],
)
@spark3_dialect.segment(replace=True)
class JoinClauseSegment(BaseSegment):
"""Any number of join clauses, including the `JOIN` keyword.
https://spark.apache.org/docs/3.0.0/sql-ref-syntax-qry-select-join.html
TODO: Add NATURAL JOIN syntax.
"""
type = "join_clause"
match_grammar = Sequence(
# NB These qualifiers are optional
# TODO: Allow nested joins like:
# ....FROM S1.T1 t1 LEFT JOIN ( S2.T2 t2 JOIN S3.T3 t3 ON t2.col1=t3.col1) ON tab1.col1 = tab2.col1
OneOf(
"CROSS",
"INNER",
Sequence(
OneOf(
"FULL",
"LEFT",
"RIGHT",
),
Ref.keyword("OUTER", optional=True),
),
Sequence(
Ref.keyword("LEFT", optional=True),
"SEMI",
),
Sequence(
Ref.keyword("LEFT", optional=True),
"ANTI",
),
optional=True,
),
Ref("JoinKeywords"),
Indent,
Sequence(
Ref("FromExpressionElementSegment"),
Conditional(Dedent, indented_using_on=False),
# NB: this is optional
OneOf(
# ON clause
Ref("JoinOnConditionSegment"),
# USING clause
Sequence(
"USING",
Indent,
Bracketed(
# NB: We don't use BracketedColumnReferenceListGrammar
# here because we're just using SingleIdentifierGrammar,
# rather than ObjectReferenceSegment or ColumnReferenceSegment.
# This is a) so that we don't lint it as a reference and
# b) because the column will probably be returned anyway
# during parsing.
Delimited(
Ref("SingleIdentifierGrammar"),
ephemeral_name="UsingClauseContents",
)
),
Dedent,
),
# Unqualified joins *are* allowed. They just might not
# be a good idea.
optional=True,
),
Conditional(Indent, indented_using_on=False),
),
Dedent,
)
get_eventual_alias = ansi_dialect.get_segment(
"JoinClauseSegment"
).get_eventual_alias
|
the-stack_0_671 | """A logging handler that emits to a Discord webhook."""
import requests
from logging import Handler
class DiscordHandler(Handler):
"""A logging handler that emits to a Discord webhook."""
def __init__(self, webhook, *args, **kwargs):
"""Initialize the DiscordHandler class."""
super().__init__(*args, **kwargs)
self.webhook = webhook
def emit(self, record):
"""Emit record to the Discord webhook."""
json = {"content": self.format(record)}
try:
requests.post(self.webhook, json=json)
except requests.RequestException:
self.handleError(record)
|
the-stack_0_673 | """ Class to initialize common objects. """
import pickle
from pathlib import Path
################################################################
class Init():
#---------------------------------------------------------------
# Constructor
#---------------------------------------------------------------
def __init__(self, workdir, **kwargs):
print('Init class created.')
self.workdir = Path(workdir)
self.cachedir = self.workdir / 'cache'
print('workdir: {}'.format(self.workdir))
print('cachedir: {}'.format(self.cachedir))
#---------------------------------------------------------------
# Initialize settings as class members of obj
#---------------------------------------------------------------
def Initialize(self, obj):
obj.workdir = self.workdir
obj.cachedir = self.cachedir
obj.cachedir.mkdir(parents=True, exist_ok=True)
obj.AllData = pickle.load((obj.workdir / 'default.p').open('rb'))
#: Sets the collision systems for the entire project,
#: where each system is a string of the form
#: ``'<projectile 1><projectile 2><beam energy in GeV>'``,
#: such as ``'PbPb2760'``, ``'AuAu200'``, ``'pPb5020'``.
#: Even if the project uses only a single system,
#: this should still be a list of one system string.
obj.systems = obj.AllData["systems"]
#: Design attribute. This is a list of
#: strings describing the inputs.
#: The default is for the example data.
obj.keys = obj.AllData["keys"]
#: Design attribute. This is a list of input
#: labels in LaTeX for plotting.
#: The default is for the example data.
obj.labels = obj.AllData["labels"]
#: Design attribute. This is list of tuples of
#: (min,max) for each design input.
#: The default is for the example data.
obj.ranges = obj.AllData["ranges"]
#: Design array to use - should be a numpy array.
#: Keep at None generate a Latin Hypercube with above (specified) range.
#: Design array for example is commented under default.
obj.design_array = obj.AllData["design"]
#: Dictionary of the model output.
#: Form MUST be data_list[system][observable][subobservable][{'Y': ,'x': }].
#: 'Y' is an (n x p) numpy array of the output.
#:
#: 'x' is a (1 x p) numpy array of numeric index of columns of Y (if exists). In the example data, x is p_T.
#: This MUST be changed from None - no built-in default exists. Uncomment the line below default for example.
obj.data_list = obj.AllData["model"]
#: Dictionary for the model validation output
#: Must be the same for as the model output dictionary
#data_list_val = pickle.load((cachedir / 'model/validation/data_dict_val.p').open('rb'))
obj.data_list_val = None
#: Dictionary of the experimental data.
#: Form MUST be exp_data_list[system][observable][subobservable][{'y':,'x':,'yerr':{'stat':,'sys'}}].
#: 'y' is a (1 x p) numpy array of experimental data.
#:
#: 'x' is a (1 x p) numpy array of numeric index of columns of Y (if exists). In the example data, x is p_T.
#:
#: 'yerr' is a dictionary with keys 'stat' and 'sys'.
#:
#: 'stat' is a (1 x p) array of statistical errors.
#:
#: 'sys' is a (1 x p) array of systematic errors.
#: This MUST be changed from None - no built-in default exists. Uncomment the line below default for example.
obj.exp_data_list = obj.AllData["data"]
#: Experimental covariance matrix.
#: Set exp_cov = None to have the script estimate the covariance matrix.
#: Example commented below default.
obj.exp_cov = obj.AllData["cov"]
#: Observables to emulate as a list of 2-tuples
#: ``(obs, [list of subobs])``.
obj.observables = obj.AllData["observables"]
#---------------------------------------------------------------
# Initialize settings as class members of obj
#---------------------------------------------------------------
def systems(self):
AllData = pickle.load((self.workdir / 'default.p').open('rb'))
#: Sets the collision systems for the entire project,
#: where each system is a string of the form
#: ``'<projectile 1><projectile 2><beam energy in GeV>'``,
#: such as ``'PbPb2760'``, ``'AuAu200'``, ``'pPb5020'``.
#: Even if the project uses only a single system,
#: this should still be a list of one system string.
return AllData["systems"]
#---------------------------------------------------------------
# Return formatted string of class members
#---------------------------------------------------------------
def __str__(self):
s = []
variables = self.__dict__.keys()
for v in variables:
s.append('{} = {}'.format(v, self.__dict__[v]))
return "[i] {} with \n . {}".format(self.__class__.__name__, '\n . '.join(s))
|
the-stack_0_674 | #===============================================================================
# Copyright 2020 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#===============================================================================
import sys
import os
import argparse
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
import bench
import numpy as np
from cuml import KMeans
import warnings
from sklearn.metrics.cluster import davies_bouldin_score
warnings.filterwarnings('ignore', category=FutureWarning)
parser = argparse.ArgumentParser(description='cuML K-means benchmark')
parser.add_argument('-i', '--filei', '--fileI', '--init',
type=str, help='Initial clusters')
parser.add_argument('-t', '--tol', type=float, default=0.,
help='Absolute threshold')
parser.add_argument('--maxiter', type=int, default=100,
help='Maximum number of iterations')
parser.add_argument('--samples-per-batch', type=int, default=32768,
help='Maximum number of iterations')
parser.add_argument('--n-clusters', type=int, help='Number of clusters')
params = bench.parse_args(parser, prefix='cuml', loop_types=('fit', 'predict'))
# Load and convert generated data
X_train, X_test, _, _ = bench.load_data(params)
if params.filei == 'k-means++':
X_init = 'k-means++'
# Load initial centroids from specified path
elif params.filei is not None:
X_init = np.load(params.filei).astype(params.dtype)
params.n_clusters = X_init.shape[0]
# or choose random centroids from training data
else:
np.random.seed(params.seed)
centroids_idx = np.random.randint(0, X_train.shape[0],
size=params.n_clusters)
if hasattr(X_train, "iloc"):
X_init = X_train.iloc[centroids_idx].to_pandas().values
else:
X_init = X_train[centroids_idx]
# Workaround for cuML kmeans fail
# when second call of 'fit' method causes AttributeError
def kmeans_fit(X):
alg = KMeans(n_clusters=params.n_clusters, tol=params.tol,
max_iter=params.maxiter, init=X_init,
max_samples_per_batch=params.samples_per_batch)
alg.fit(X)
return alg
# Time fit
fit_time, kmeans = bench.measure_function_time(kmeans_fit, X_train, params=params)
train_predict = kmeans.predict(X_train)
# Time predict
predict_time, test_predict = bench.measure_function_time(kmeans.predict, X_test,
params=params)
X_train_host = bench.convert_to_numpy(X_train)
train_predict_host = bench.convert_to_numpy(train_predict)
acc_train = davies_bouldin_score(X_train_host, train_predict_host)
X_test_host = bench.convert_to_numpy(X_test)
test_predict_host = bench.convert_to_numpy(test_predict)
acc_test = davies_bouldin_score(X_test_host, test_predict_host)
bench.print_output(library='cuml', algorithm='kmeans',
stages=['training', 'prediction'], params=params,
functions=['KMeans.fit', 'KMeans.predict'],
times=[fit_time, predict_time], accuracy_type='davies_bouldin_score',
accuracies=[acc_train, acc_test], data=[X_train, X_test],
alg_instance=kmeans)
|
the-stack_0_678 | #! /usr/bin/env python
# Copyright (c) 2014, Dawn Robotics Ltd
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# 3. Neither the name of the Dawn Robotics Ltd nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import logging
import math
import time
import Queue
import mini_driver
import threading
#---------------------------------------------------------------------------------------------------
class RobotController:
MIN_ANGLE = 0.0
MAX_ANGLE = 180.0
CENTRE_ANGLE = (MIN_ANGLE + MAX_ANGLE)/2.0
MAX_UPDATE_TIME_DIFF = 0.25
TIME_BETWEEN_SERVO_SETTING_UPDATES = 1.0
TIME_BETWEEN_SENSOR_CONFIGURATION_UPDATES = 0.5
JOYSTICK_DEAD_ZONE = 0.1
MAX_ABS_NECK_SPEED = 30.0 # Degrees per second
MOTION_COMMAND_TIMEOUT = 2.0 # If no commands for the motors are recieved in this time then
# the motors (drive and servo) are set to zero speed
#-----------------------------------------------------------------------------------------------
def __init__( self, robotConfig ):
self.miniDriver = mini_driver.MiniDriver()
connected = self.miniDriver.connect()
if not connected:
raise Exception( "Unable to connect to the mini driver" )
self.robotConfig = robotConfig
self.leftMotorSpeed = 0
self.rightMotorSpeed = 0
self.panAngle = self.CENTRE_ANGLE
self.tiltAngle = self.CENTRE_ANGLE
self.panSpeed = 0.0
self.tiltSpeed = 0.0
self.lastServoSettingsSendTime = 0.0
self.lastSensorConfigurationSendTime = 0.0
self.lastUpdateTime = 0.0
self.lastMotionCommandTime = time.time()
self.piSensorModuleName = ""
self.piSensorModule = None
self.piSensorReader = None
self.piSensorDict = {}
#-----------------------------------------------------------------------------------------------
def __del__( self ):
self.disconnect()
#-----------------------------------------------------------------------------------------------
def disconnect( self ):
self.miniDriver.disconnect()
#-----------------------------------------------------------------------------------------------
def getStatusDict( self ):
presetMaxAbsMotorSpeed, presetMaxAbsTurnSpeed = self.miniDriver.getPresetMotorSpeeds()
statusDict = {
"batteryVoltage" : self.miniDriver.getBatteryVoltageReading().data,
"presetMaxAbsMotorSpeed" : presetMaxAbsMotorSpeed,
"presetMaxAbsTurnSpeed" : presetMaxAbsTurnSpeed,
"sensors" : self.getSensorDict()
}
return statusDict
#-----------------------------------------------------------------------------------------------
def getSensorDict( self ):
sensorDict = {
"batteryVoltage" : self.miniDriver.getBatteryVoltageReading(),
"digital" : self.miniDriver.getDigitalReadings(),
"analog" : self.miniDriver.getAnalogReadings(),
"ultrasonic" : self.miniDriver.getUltrasonicReading(),
"encoders" : self.miniDriver.getEncodersReading(),
}
sensorDict.update( self.piSensorDict )
return sensorDict
#-----------------------------------------------------------------------------------------------
def normaliseJoystickData( self, joystickX, joystickY ):
stickVectorLength = math.sqrt( joystickX**2 + joystickY**2 )
if stickVectorLength > 1.0:
joystickX /= stickVectorLength
joystickY /= stickVectorLength
if stickVectorLength < self.JOYSTICK_DEAD_ZONE:
joystickX = 0.0
joystickY = 0.0
return ( joystickX, joystickY )
#-----------------------------------------------------------------------------------------------
def centreNeck( self ):
self.panAngle = self.CENTRE_ANGLE
self.tiltAngle = self.CENTRE_ANGLE
self.panSpeed = 0.0
self.tiltSpeed = 0.0
#-----------------------------------------------------------------------------------------------
def setMotorJoystickPos( self, joystickX, joystickY ):
joystickX, joystickY = self.normaliseJoystickData( joystickX, joystickY )
if self.robotConfig.usePresetMotorSpeeds:
maxAbsMotorSpeed, maxAbsTurnSpeed = self.miniDriver.getPresetMotorSpeeds()
else:
maxAbsMotorSpeed = self.robotConfig.customMaxAbsMotorSpeed
maxAbsTurnSpeed = self.robotConfig.customMaxAbsTurnSpeed
# Set forward speed from joystickY
leftMotorSpeed = maxAbsMotorSpeed*joystickY
rightMotorSpeed = maxAbsMotorSpeed*joystickY
# Set turn speed from joystickX
leftMotorSpeed += maxAbsTurnSpeed*joystickX
rightMotorSpeed -= maxAbsTurnSpeed*joystickX
leftMotorSpeed = max( -maxAbsMotorSpeed, min( leftMotorSpeed, maxAbsMotorSpeed ) )
rightMotorSpeed = max( -maxAbsMotorSpeed, min( rightMotorSpeed, maxAbsMotorSpeed ) )
self.leftMotorSpeed = leftMotorSpeed*self.robotConfig.leftMotorScale
self.rightMotorSpeed = rightMotorSpeed
self.lastMotionCommandTime = time.time()
#-----------------------------------------------------------------------------------------------
def setMotorSpeeds( self, leftMotorSpeed, rightMotorSpeed ):
if self.robotConfig.usePresetMotorSpeeds:
maxAbsMotorSpeed, maxAbsTurnSpeed = self.miniDriver.getPresetMotorSpeeds()
else:
maxAbsMotorSpeed = self.robotConfig.customMaxAbsMotorSpeed
maxAbsTurnSpeed = self.robotConfig.customMaxAbsTurnSpeed
self.leftMotorSpeed = max( -maxAbsMotorSpeed, min( leftMotorSpeed, maxAbsMotorSpeed ) )
self.rightMotorSpeed = max( -maxAbsMotorSpeed, min( rightMotorSpeed, maxAbsMotorSpeed ) )
self.lastMotionCommandTime = time.time()
#-----------------------------------------------------------------------------------------------
def setNeckJoystickPos( self, joystickX, joystickY ):
joystickX, joystickY = self.normaliseJoystickData( joystickX, joystickY )
# Set pan and tilt angle speeds
self.panSpeed = -self.MAX_ABS_NECK_SPEED*joystickX
self.tiltSpeed = -self.MAX_ABS_NECK_SPEED*joystickY
self.lastMotionCommandTime = time.time()
#-----------------------------------------------------------------------------------------------
def setNeckAngles( self, panAngle, tiltAngle ):
self.panAngle = max( self.MIN_ANGLE, min( panAngle, self.MAX_ANGLE ) )
self.tiltAngle = max( self.MIN_ANGLE, min( tiltAngle, self.MAX_ANGLE ) )
self.panSpeed = 0.0
self.tiltSpeed = 0.0
self.lastMotionCommandTime = time.time()
#-----------------------------------------------------------------------------------------------
def _loadPiSensorModule( self ):
if self.robotConfig.piSensorModuleName != "":
# Try to import the new sensor module
newSensorModule = None
try:
newSensorModule = __import__( self.robotConfig.piSensorModuleName, fromlist=[''] )
except Exception as e:
logging.error( "Caught exception when trying to import Pi sensor module" )
logging.error( str( e ) )
if newSensorModule != None:
# We have a new sensor module. Shutdown any existing sensor reader
if self.piSensorReader != None:
self.piSensorReader.shutdown()
self.piSensorReader = None
# Remove reference to existing sensor module
self.piSensorModule = None
self.piSensorModuleName = ""
# Try to create the new Pi sensor reader
newSensorReader = None
try:
newSensorReader = newSensorModule.PiSensorReader()
except Exception as e:
logging.error( "Caught exception when trying to create Pi sensor reader" )
logging.error( str( e ) )
if newSensorReader != None:
self.piSensorModule = newSensorModule
self.piSensorModuleName = self.robotConfig.piSensorModuleName
self.piSensorReader = newSensorReader
#-----------------------------------------------------------------------------------------------
def update( self ):
if not self.miniDriver.isConnected():
return
curTime = time.time()
timeDiff = min( curTime - self.lastUpdateTime, self.MAX_UPDATE_TIME_DIFF )
# Turn off the motors if we haven't received a motion command for a while
if curTime - self.lastMotionCommandTime > self.MOTION_COMMAND_TIMEOUT:
self.leftMotorSpeed = 0.0
self.rightMotorSpeed = 0.0
self.panSpeed = 0.0
self.tiltSpeed = 0.0
# Update the pan and tilt angles
self.panAngle += self.panSpeed*timeDiff
self.tiltAngle += self.tiltSpeed*timeDiff
self.panAngle = max( self.MIN_ANGLE, min( self.panAngle, self.MAX_ANGLE ) )
self.tiltAngle = max( self.MIN_ANGLE, min( self.tiltAngle, self.MAX_ANGLE ) )
# Update the mini driver
self.miniDriver.setOutputs(
self.leftMotorSpeed, self.rightMotorSpeed, self.panAngle, self.tiltAngle )
self.miniDriver.update()
# Send servo settings if needed
if curTime - self.lastServoSettingsSendTime >= self.TIME_BETWEEN_SERVO_SETTING_UPDATES:
self.miniDriver.setPanServoLimits(
self.robotConfig.panPulseWidthMin,
self.robotConfig.panPulseWidthMax )
self.miniDriver.setTiltServoLimits(
self.robotConfig.tiltPulseWidthMin,
self.robotConfig.tiltPulseWidthMax )
self.lastServoSettingsSendTime = curTime
# Send sensor configuration if needed
if curTime - self.lastSensorConfigurationSendTime >= self.TIME_BETWEEN_SENSOR_CONFIGURATION_UPDATES:
self.miniDriver.setSensorConfiguration( self.robotConfig.miniDriverSensorConfiguration )
self.lastSensorConfigurationSendTime = curTime
# Change the Pi sensor module if needed
if self.robotConfig.piSensorModuleName != self.piSensorModuleName:
self._loadPiSensorModule()
# Read from any sensors attached to the Pi
if self.piSensorReader != None:
self.piSensorDict = {}
try:
self.piSensorDict = self.piSensorReader.readSensors()
except Exception as e:
logging.error( "Caught exception when trying to read from Pi sensor reader" )
logging.error( str( e ) )
self.lastUpdateTime = curTime |
the-stack_0_680 | """
``street.py``
=============
Módulo para o peso de um trecho de rua
"""
from __future__ import annotations
from protocols import Weightable
from random import choice
from functools import total_ordering
from typing import Optional, List, Any, Dict
#: Incluir velocidade máxima entre as possibilidades
#: de velocidade assumida naquele trecho de rua
INCLUDE_MAX_SPEED = False
@total_ordering
class Street(Weightable):
"""
Classe de peso (:class:`Weightable`) do trecho da rua
Assim que a propriedade :attr:`~street.Street.speed` é
lida pela primeira vez, ela assume um valor que é mantido
com ela durante a vida do objeto.
No entanto, quuando essa instância é copiada com :func:`copy.deepcopy`,
essa propriedade é desconfigurada e ela pode assumir um novo
valor.
:param distance: distância do trecho
:param max_speed: velocidade máxima do trecho
"""
def __init__(self, distance: float, max_speed: float):
self._distance = distance
self._max_speed = max_speed
self._latest_speeds: List[float] = []
self._speed: Optional[float] = None
def register_speeds(self, *speeds: float) -> None:
"""Registra as velocidades atuais no trecho"""
self._latest_speeds += list(speeds)
@property
def speed(self) -> float:
"""Velocidade assumida no trecho"""
if self._speed is None:
if INCLUDE_MAX_SPEED:
self._speed = choice(self._latest_speeds + [self._max_speed])
elif self._latest_speeds:
self._speed = choice(self._latest_speeds)
else:
self._speed = self._max_speed
return self._speed
@property
def distance(self) -> float:
"""distância do trecho"""
return self._distance
@property
def time(self) -> float:
"""tempo no trecho, com a velocidade assumida
Usado para a comparação entre trechos
"""
if self.speed:
return self.distance / self.speed
else:
return float('inf')
def is_inf(self) -> bool:
"""Se a velocidade assumida representa um tempo infinito"""
return not self.speed
def __eq__(self, other: Any) -> bool:
return isinstance(other, Street) and self.time == other.time
def __lt__(self, other: Any) -> bool:
return isinstance(other, Street) and self.time < other.time
def __add__(self, other: Street) -> Street:
"""A soma dos trechos equivale a soma dos tempos"""
d1, d2 = self.distance, other.distance
s1, s2 = self.speed, other.speed
distance = d1 + d2
if not s1 or not s2:
speed = 0.0
else:
speed = (distance * s1 * s2) / (d1 * s2 + d2 * s1)
return Street(distance, speed)
def __repr__(self) -> str:
return repr(self.time)
def __deepcopy__(self, memo: Dict[int, Any]) -> Street:
"""Cópia especial que não mantém a velocidade assumida"""
new = Street(self.distance, self._max_speed)
new.register_speeds(*self._latest_speeds)
memo[id(self)] = new
return new
|
the-stack_0_683 | import pyglet
from pyglet.window import key
from pyglet.window.key import MOD_SHIFT
from CGP import Individual, create_pop, evolve
from load import *
game_window = pyglet.window.Window(1600, 1000)
pyglet.resource.path = ['../assets']
pyglet.resource.reindex()
main_batch = pyglet.graphics.Batch()
pillar_batch = pyglet.graphics.Batch()
ai_batch = pyglet.graphics.Batch()
label_score = labels(batch=main_batch)
label_alive = labels(y=520, batch=main_batch)
label_best = labels(y=540, batch=main_batch)
label_generation = labels(y=560, batch=main_batch)
pillars = new_pillar(pillar_batch)
completion = False
score = 0
best_score = 0 # FIXME
time_count = 0
flag = 0
alive = 0
generation = 1
ai_num = ""
pop = None
birds_obj = []
ai_birds_obj = []
def create_ai_bird(pops):
global alive, ai_num
for ind in pops:
ai_birds_obj.append(new_ai_birds(individual=ind, batch=ai_batch))
alive += 1
ai_num = str(alive)
def clear_game():
global pillars, generation, score, time_count
for obj in pillars:
obj.delete()
pillars.remove(obj)
for obj in birds_obj:
obj.delete()
birds_obj.remove(obj)
generation += 1
score = 0
time_count = 0
pillars = new_pillar(pillar_batch)
def init():
global birds_obj, score
score = 0
label_score.text = "Score: " + str(score)
birds_obj.append(new_birds(main_batch))
def init_pop():
global ai_birds_obj, alive, ai_num, pop
pop = create_pop(10)
create_ai_bird(pop)
label_alive.text = "Alive: " + str(alive) + "/" + ai_num
label_generation.text = "Generation: " + str(generation)
label_best.text = "Best score: " + str(best_score)
@game_window.event
def on_draw():
global completion
game_window.clear()
main_batch.draw()
pillar_batch.draw()
ai_batch.draw()
for b in birds_obj:
game_window.push_handlers(b.get_key_handler())
@game_window.event
def on_key_press(symbol, modifiers):
# add a new player bird
if modifiers & MOD_SHIFT:
if symbol == key.N:
birds_obj.extend([new_birds(main_batch)])
# make it faster
if modifiers & MOD_SHIFT:
if symbol == key.EQUAL:
print("speed up")
pyglet.clock.schedule_interval(update, 1 / 120.0)
# make it stop
if modifiers & MOD_SHIFT:
if symbol == key.BACKSPACE:
print("stop")
pyglet.clock.unschedule(update)
def update(dt):
global completion, score, time_count, flag, alive, pop, best_score
time_count += 1
# update
for b in birds_obj:
b.update(dt)
# check collide
if b.collide_down(pillars[0]) or b.collide_up(pillars[1]):
b.dead = True
for p in pillars:
p.update(dt)
for b in ai_birds_obj:
if b.collide_down(pillars[0]) or b.collide_up(pillars[1]):
b.dead = True
b.update(dt)
# flap or not
b.check_flap(pillars[0].x, pillars[0].y)
# check pillars out of bounds
if pillars[0].check_bounds():
pillars[0].dead = True
pillars[1].dead = True
# remove dead objects
for to_remove in [obj for obj in pillars if obj.dead]:
to_remove.delete()
pillars.remove(to_remove)
for to_remove in [obj for obj in birds_obj if obj.dead]:
to_remove.delete()
birds_obj.remove(to_remove)
for to_remove in [obj for obj in ai_birds_obj if obj.dead]:
alive -= 1
to_remove.delete()
ai_birds_obj.remove(to_remove)
# add new pillars and reset flag for score
if time_count % 240 == 0:
time_count = 0
flag = 0
add_pillars = new_pillar(pillar_batch)
pillars.extend(add_pillars)
# label
# score
if flag == 0 and (len(birds_obj) > 0 or len(ai_birds_obj) > 0) and pillars[0].check_score():
# print(time_count)
flag += 1
score += 1
label_score.text = "Score: " + str(int(score))
# check alive AI
label_alive.text = "Alive: " + str(alive) + "/" + ai_num
# check best score
if score > best_score:
best_score = score
label_best.text = "Best score: " + str(best_score)
# check generation
label_generation.text = "Generation: " + str(generation)
# evolve AI
if alive == 0:
pop = evolve(pop, 0.03, 4, 6)
clear_game()
create_ai_bird(pop)
if __name__ == '__main__':
init()
init_pop()
# init_ai()
pyglet.clock.schedule_interval(update, 1 / 120.0)
pyglet.app.run()
|
the-stack_0_688 | # Copyright 2018-2020 Streamlit Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Streamlit.
How to use Streamlit in 3 seconds:
1. Write an app
>>> import streamlit as st
>>> st.write(anything_you_want)
2. Run your app
$ streamlit run my_script.py
3. Use your app
A new tab will open on your browser. That's your Streamlit app!
4. Modify your code, save it, and watch changes live on your browser.
Take a look at the other commands in this module to find out what else
Streamlit can do:
>>> dir(streamlit)
Or try running our "Hello World":
$ streamlit hello
For more detailed info, see https://docs.streamlit.io.
"""
# IMPORTANT: Prefix with an underscore anything that the user shouldn't see.
# NOTE: You'll see lots of "noqa: F821" in this file. That's because we
# manually mess with the local namespace so the linter can't know that some
# identifiers actually exist in the namespace.
# Must be at the top, to avoid circular dependency.
from streamlit import logger as _logger
from streamlit import config as _config
_LOGGER = _logger.get_logger("root")
# Give the package a version.
import pkg_resources as _pkg_resources
import uuid as _uuid
import subprocess
import platform
import os
from typing import Any, List, Tuple, Type
# This used to be pkg_resources.require('streamlit') but it would cause
# pex files to fail. See #394 for more details.
__version__ = _pkg_resources.get_distribution("streamlit").version
# Deterministic Unique Streamlit User ID
if (
platform.system() == "Linux"
and os.path.isfile("/etc/machine-id") == False
and os.path.isfile("/var/lib/dbus/machine-id") == False
):
print("Generate machine-id")
subprocess.run(["sudo", "dbus-uuidgen", "--ensure"])
machine_id = str(_uuid.getnode())
if os.path.isfile("/etc/machine-id"):
with open("/etc/machine-id", "r") as f:
machine_id = f.read()
elif os.path.isfile("/var/lib/dbus/machine-id"):
with open("/var/lib/dbus/machine-id", "r") as f:
machine_id = f.read()
__installation_id__ = str(_uuid.uuid5(_uuid.NAMESPACE_DNS, machine_id))
import contextlib as _contextlib
import re as _re
import sys as _sys
import textwrap as _textwrap
import threading as _threading
import traceback as _traceback
import types as _types
import json as _json
import numpy as _np
from streamlit import code_util as _code_util
from streamlit import env_util as _env_util
from streamlit import source_util as _source_util
from streamlit import string_util as _string_util
from streamlit import type_util as _type_util
from streamlit.DeltaGenerator import DeltaGenerator as _DeltaGenerator
from streamlit.ReportThread import add_report_ctx as _add_report_ctx
from streamlit.ReportThread import get_report_ctx as _get_report_ctx
from streamlit.errors import StreamlitAPIException
from streamlit.proto import BlockPath_pb2 as _BlockPath_pb2
from streamlit.util import functools_wraps as _functools_wraps
# Modules that the user should have access to. These are imported with "as"
# syntax pass mypy checking with implicit_reexport disabled.
from streamlit.caching import cache as cache # noqa: F401
# This is set to True inside cli._main_run(), and is False otherwise.
# If False, we should assume that DeltaGenerator functions are effectively
# no-ops, and adapt gracefully.
_is_running_with_streamlit = False
def _set_log_level():
_logger.set_log_level(_config.get_option("global.logLevel").upper())
_logger.init_tornado_logs()
# Make this file only depend on config option in an asynchronous manner. This
# avoids a race condition when another file (such as a test file) tries to pass
# in an alternative config.
_config.on_config_parsed(_set_log_level, True)
_main = _DeltaGenerator(container=_BlockPath_pb2.BlockPath.MAIN)
sidebar = _DeltaGenerator(container=_BlockPath_pb2.BlockPath.SIDEBAR)
# DeltaGenerator methods:
altair_chart = _main.altair_chart # noqa: E221
area_chart = _main.area_chart # noqa: E221
audio = _main.audio # noqa: E221
balloons = _main.balloons # noqa: E221
bar_chart = _main.bar_chart # noqa: E221
bokeh_chart = _main.bokeh_chart # noqa: E221
button = _main.button # noqa: E221
checkbox = _main.checkbox # noqa: E221
code = _main.code # noqa: E221
dataframe = _main.dataframe # noqa: E221
date_input = _main.date_input # noqa: E221
deck_gl_chart = _main.deck_gl_chart # noqa: E221
pydeck_chart = _main.pydeck_chart # noqa: E221
empty = _main.empty # noqa: E221
error = _main.error # noqa: E221
exception = _main.exception # noqa: E221
beta_set_favicon = _main.favicon # noqa: E221
file_uploader = _main.file_uploader # noqa: E221
graphviz_chart = _main.graphviz_chart # noqa: E221
header = _main.header # noqa: E221
help = _main.help # noqa: E221
image = _main.image # noqa: E221
info = _main.info # noqa: E221
json = _main.json # noqa: E221
latex = _main.latex # noqa: E221
line_chart = _main.line_chart # noqa: E221
map = _main.map # noqa: E221
markdown = _main.markdown # noqa: E221
multiselect = _main.multiselect # noqa: E221
number_input = _main.number_input # noqa: E221
plotly_chart = _main.plotly_chart # noqa: E221
progress = _main.progress # noqa: E221
pyplot = _main.pyplot # noqa: E221
radio = _main.radio # noqa: E221
selectbox = _main.selectbox # noqa: E221
slider = _main.slider # noqa: E221
subheader = _main.subheader # noqa: E221
success = _main.success # noqa: E221
table = _main.table # noqa: E221
text = _main.text # noqa: E221
text_area = _main.text_area # noqa: E221
text_input = _main.text_input # noqa: E221
time_input = _main.time_input # noqa: E221
title = _main.title # noqa: E221
vega_lite_chart = _main.vega_lite_chart # noqa: E221
video = _main.video # noqa: E221
warning = _main.warning # noqa: E221
beta_color_picker = _main.beta_color_picker # noqa: E221
# Config
get_option = _config.get_option
def set_option(key, value):
"""Set config option.
Currently, only two config options can be set within the script itself:
* client.caching
* client.displayEnabled
Calling with any other options will raise StreamlitAPIException.
Run `streamlit config show` in the terminal to see all available options.
Parameters
----------
key : str
The config option key of the form "section.optionName". To see all
available options, run `streamlit config show` on a terminal.
value
The new value to assign to this config option.
"""
opt = _config._config_options[key]
if opt.scriptable:
_config.set_option(key, value)
return
raise StreamlitAPIException(
"{key} cannot be set on the fly. Set as command line option, e.g. streamlit run script.py --{key}, or in config.toml instead.".format(
key=key
)
)
# Special methods:
_HELP_TYPES = (
_types.BuiltinFunctionType,
_types.BuiltinMethodType,
_types.FunctionType,
_types.MethodType,
_types.ModuleType,
) # type: Tuple[Type[Any], ...]
def write(*args, **kwargs):
"""Write arguments to the app.
This is the Swiss Army knife of Streamlit commands: it does different
things depending on what you throw at it. Unlike other Streamlit commands,
write() has some unique properties:
1. You can pass in multiple arguments, all of which will be written.
2. Its behavior depends on the input types as follows.
3. It returns None, so it's "slot" in the App cannot be reused.
Parameters
----------
*args : any
One or many objects to print to the App.
Arguments are handled as follows:
- write(string) : Prints the formatted Markdown string, with
support for LaTeX expression and emoji shortcodes.
See docs for st.markdown for more.
- write(data_frame) : Displays the DataFrame as a table.
- write(error) : Prints an exception specially.
- write(func) : Displays information about a function.
- write(module) : Displays information about the module.
- write(dict) : Displays dict in an interactive widget.
- write(obj) : The default is to print str(obj).
- write(mpl_fig) : Displays a Matplotlib figure.
- write(altair) : Displays an Altair chart.
- write(keras) : Displays a Keras model.
- write(graphviz) : Displays a Graphviz graph.
- write(plotly_fig) : Displays a Plotly figure.
- write(bokeh_fig) : Displays a Bokeh figure.
- write(sympy_expr) : Prints SymPy expression using LaTeX.
unsafe_allow_html : bool
This is a keyword-only argument that defaults to False.
By default, any HTML tags found in strings will be escaped and
therefore treated as pure text. This behavior may be turned off by
setting this argument to True.
That said, *we strongly advise* against it*. It is hard to write secure
HTML, so by using this argument you may be compromising your users'
security. For more information, see:
https://github.com/streamlit/streamlit/issues/152
**Also note that `unsafe_allow_html` is a temporary measure and may be
removed from Streamlit at any time.**
If you decide to turn on HTML anyway, we ask you to please tell us your
exact use case here:
https://discuss.streamlit.io/t/96 .
This will help us come up with safe APIs that allow you to do what you
want.
Example
-------
Its simplest use case is to draw Markdown-formatted text, whenever the
input is a string:
>>> write('Hello, *World!* :sunglasses:')
.. output::
https://share.streamlit.io/0.50.2-ZWk9/index.html?id=Pn5sjhgNs4a8ZbiUoSTRxE
height: 50px
As mentioned earlier, `st.write()` also accepts other data formats, such as
numbers, data frames, styled data frames, and assorted objects:
>>> st.write(1234)
>>> st.write(pd.DataFrame({
... 'first column': [1, 2, 3, 4],
... 'second column': [10, 20, 30, 40],
... }))
.. output::
https://share.streamlit.io/0.25.0-2JkNY/index.html?id=FCp9AMJHwHRsWSiqMgUZGD
height: 250px
Finally, you can pass in multiple arguments to do things like:
>>> st.write('1 + 1 = ', 2)
>>> st.write('Below is a DataFrame:', data_frame, 'Above is a dataframe.')
.. output::
https://share.streamlit.io/0.25.0-2JkNY/index.html?id=DHkcU72sxYcGarkFbf4kK1
height: 300px
Oh, one more thing: `st.write` accepts chart objects too! For example:
>>> import pandas as pd
>>> import numpy as np
>>> import altair as alt
>>>
>>> df = pd.DataFrame(
... np.random.randn(200, 3),
... columns=['a', 'b', 'c'])
...
>>> c = alt.Chart(df).mark_circle().encode(
... x='a', y='b', size='c', color='c', tooltip=['a', 'b', 'c'])
>>>
>>> st.write(c)
.. output::
https://share.streamlit.io/0.25.0-2JkNY/index.html?id=8jmmXR8iKoZGV4kXaKGYV5
height: 200px
"""
try:
string_buffer = [] # type: List[str]
unsafe_allow_html = kwargs.get("unsafe_allow_html", False)
def flush_buffer():
if string_buffer:
markdown(
" ".join(string_buffer), unsafe_allow_html=unsafe_allow_html,
) # noqa: F821
string_buffer[:] = []
for arg in args:
# Order matters!
if isinstance(arg, str):
string_buffer.append(arg)
elif _type_util.is_dataframe_like(arg):
flush_buffer()
if len(_np.shape(arg)) > 2:
text(arg)
else:
dataframe(arg) # noqa: F821
elif isinstance(arg, Exception):
flush_buffer()
exception(arg) # noqa: F821
elif isinstance(arg, _HELP_TYPES):
flush_buffer()
help(arg)
elif _type_util.is_altair_chart(arg):
flush_buffer()
altair_chart(arg)
elif _type_util.is_type(arg, "matplotlib.figure.Figure"):
flush_buffer()
pyplot(arg)
elif _type_util.is_plotly_chart(arg):
flush_buffer()
plotly_chart(arg)
elif _type_util.is_type(arg, "bokeh.plotting.figure.Figure"):
flush_buffer()
bokeh_chart(arg)
elif _type_util.is_graphviz_chart(arg):
flush_buffer()
graphviz_chart(arg)
elif _type_util.is_sympy_expession(arg):
flush_buffer()
latex(arg)
elif _type_util.is_keras_model(arg):
from tensorflow.python.keras.utils import vis_utils
flush_buffer()
dot = vis_utils.model_to_dot(arg)
graphviz_chart(dot.to_string())
elif isinstance(arg, (dict, list)):
flush_buffer()
json(arg)
elif _type_util.is_namedtuple(arg):
flush_buffer()
json(_json.dumps(arg._asdict()))
elif _type_util.is_pydeck(arg):
flush_buffer()
pydeck_chart(arg)
else:
string_buffer.append("`%s`" % str(arg).replace("`", "\\`"))
flush_buffer()
except Exception:
_, exc, exc_tb = _sys.exc_info()
exception(exc, exc_tb) # noqa: F821
def experimental_show(*args):
"""Write arguments and *argument names* to your app for debugging purposes.
Show() has similar properties to write():
1. You can pass in multiple arguments, all of which will be debugged.
2. It returns None, so it's "slot" in the app cannot be reused.
Note: This is an experimental feature. See
https://docs.streamlit.io/en/latest/pre_release_features.html for more information.
Parameters
----------
*args : any
One or many objects to debug in the App.
Example
-------
>>> dataframe = pd.DataFrame({
... 'first column': [1, 2, 3, 4],
... 'second column': [10, 20, 30, 40],
... }))
>>> st.experimental_show(dataframe)
Notes
-----
This is an experimental feature with usage limitations:
- The method must be called with the name `show`.
- Must be called in one line of code, and only once per line.
- When passing multiple arguments the inclusion of `,` or `)` in a string
argument may cause an error.
"""
if not args:
return
try:
import inspect
# Get the calling line of code
current_frame = inspect.currentframe()
if current_frame is None:
warning("`show` not enabled in the shell")
return
lines = inspect.getframeinfo(current_frame.f_back)[3]
if not lines:
warning("`show` not enabled in the shell")
return
# Parse arguments from the line
line = lines[0].split("show", 1)[1]
inputs = _code_util.get_method_args_from_code(args, line)
# Escape markdown and add deltas
for idx, input in enumerate(inputs):
escaped = _string_util.escape_markdown(input)
markdown("**%s**" % escaped)
write(args[idx])
except Exception:
_, exc, exc_tb = _sys.exc_info()
exception(exc, exc_tb) # noqa: F821
@_contextlib.contextmanager
def spinner(text="In progress..."):
"""Temporarily displays a message while executing a block of code.
Parameters
----------
text : str
A message to display while executing that block
Example
-------
>>> with st.spinner('Wait for it...'):
>>> time.sleep(5)
>>> st.success('Done!')
"""
import streamlit.caching as caching
# @st.cache optionally uses spinner for long-running computations.
# Normally, streamlit warns the user when they call st functions
# from within an @st.cache'd function. But we do *not* want to show
# these warnings for spinner's message, so we create and mutate this
# message delta within the "suppress_cached_st_function_warning"
# context.
with caching.suppress_cached_st_function_warning():
message = empty()
try:
# Set the message 0.1 seconds in the future to avoid annoying
# flickering if this spinner runs too quickly.
DELAY_SECS = 0.1
display_message = True
display_message_lock = _threading.Lock()
def set_message():
with display_message_lock:
if display_message:
with caching.suppress_cached_st_function_warning():
message.warning(str(text))
_add_report_ctx(_threading.Timer(DELAY_SECS, set_message)).start()
# Yield control back to the context.
yield
finally:
if display_message_lock:
with display_message_lock:
display_message = False
with caching.suppress_cached_st_function_warning():
message.empty()
_SPACES_RE = _re.compile("\\s*")
@_contextlib.contextmanager
def echo(code_location="above"):
"""Use in a `with` block to draw some code on the app, then execute it.
Parameters
----------
code_location : "above" or "below"
Whether to show the echoed code before or after the results of the
executed code block.
Example
-------
>>> with st.echo():
>>> st.write('This code will be printed')
"""
if code_location == "below":
show_code = code
show_warning = warning
else:
placeholder = empty() # noqa: F821
show_code = placeholder.code
show_warning = placeholder.warning
try:
frame = _traceback.extract_stack()[-3]
filename, start_line = frame.filename, frame.lineno
yield
frame = _traceback.extract_stack()[-3]
end_line = frame.lineno
lines_to_display = [] # type: List[str]
with _source_util.open_python_file(filename) as source_file:
source_lines = source_file.readlines()
lines_to_display.extend(source_lines[start_line:end_line])
match = _SPACES_RE.match(lines_to_display[0])
initial_spaces = match.end() if match else 0
for line in source_lines[end_line:]:
match = _SPACES_RE.match(line)
indentation = match.end() if match else 0
# The != 1 is because we want to allow '\n' between sections.
if indentation != 1 and indentation < initial_spaces:
break
lines_to_display.append(line)
line_to_display = _textwrap.dedent("".join(lines_to_display))
show_code(line_to_display, "python")
except FileNotFoundError as err:
show_warning("Unable to display code. %s" % err)
def _transparent_write(*args):
"""This is just st.write, but returns the arguments you passed to it."""
write(*args)
if len(args) == 1:
return args[0]
return args
# We want to show a warning when the user runs a Streamlit script without
# 'streamlit run', but we need to make sure the warning appears only once no
# matter how many times __init__ gets loaded.
_repl_warning_has_been_displayed = False
def _maybe_print_repl_warning():
global _repl_warning_has_been_displayed
if not _repl_warning_has_been_displayed:
_repl_warning_has_been_displayed = True
if _env_util.is_repl():
_LOGGER.warning(
_textwrap.dedent(
"""
Will not generate Streamlit app
To generate an app, use Streamlit in a file and run it with:
$ streamlit run [FILE_NAME] [ARGUMENTS]
"""
)
)
elif _config.get_option("global.showWarningOnDirectExecution"):
script_name = _sys.argv[0]
_LOGGER.warning(
_textwrap.dedent(
"""
Will not generate Streamlit App
To generate an App, run this file with:
$ streamlit run %s [ARGUMENTS]
"""
),
script_name,
)
|
the-stack_0_690 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from unittest import mock
from neutron_lib.callbacks import events
from neutron_lib import context as n_context
from neutron_lib.db import model_base
from neutron_lib import exceptions as n_exc
from neutron_lib.objects import common_types
from oslo_versionedobjects import fields as obj_fields
import sqlalchemy as sa
from neutron.db import rbac_db_models
from neutron.extensions import rbac as ext_rbac
from neutron.objects import base
from neutron.objects.db import api as obj_db_api
from neutron.objects import rbac_db
from neutron.tests.unit.objects import test_rbac
from neutron.tests.unit import testlib_api
class FakeDbModel(dict):
pass
class FakeRbacModel(rbac_db_models.RBACColumns, model_base.BASEV2):
object_id = sa.Column(sa.String(36), nullable=False)
object_type = 'fake_rbac_object'
def get_valid_actions(self):
return (rbac_db_models.ACCESS_SHARED,)
@base.NeutronObjectRegistry.register_if(False)
class FakeNeutronRbacObject(base.NeutronDbObject):
VERSION = '1.0'
db_model = FakeRbacModel
fields = {
'object_id': obj_fields.StringField(),
'target_tenant': obj_fields.StringField(),
'action': obj_fields.StringField(),
}
@base.NeutronObjectRegistry.register_if(False)
class FakeNeutronDbObject(rbac_db.NeutronRbacObject):
# Version 1.0: Initial version
VERSION = '1.0'
rbac_db_cls = FakeNeutronRbacObject
db_model = FakeDbModel
fields = {
'id': common_types.UUIDField(),
'field1': obj_fields.StringField(),
'field2': obj_fields.StringField(),
'shared': obj_fields.BooleanField(default=False),
}
fields_no_update = ['id']
synthetic_fields = ['field2']
def get_bound_project_ids(cls, context, policy_id):
pass
class RbacNeutronDbObjectTestCase(test_rbac.RBACBaseObjectIfaceTestCase,
testlib_api.SqlTestCase):
_test_class = FakeNeutronDbObject
def setUp(self):
super(RbacNeutronDbObjectTestCase, self).setUp()
FakeNeutronDbObject.update_post = mock.Mock()
@mock.patch.object(_test_class.rbac_db_cls, 'db_model')
def test_get_projects_with_shared_access_to_db_obj_return_project_ids(
self, *mocks):
ctx = mock.Mock()
fake_ids = {'project_id_' + str(i) for i in range(10)}
ctx.session.query.return_value.filter.return_value = [
(fake_id,) for fake_id in fake_ids]
ret_ids = self._test_class._get_projects_with_shared_access_to_db_obj(
ctx, 'fake_db_obj_id')
self.assertEqual(fake_ids, ret_ids)
def test_is_accessible_for_admin(self):
ctx = mock.Mock(is_admin=True, project_id='we_dont_care')
self.assertTrue(self._test_class.is_accessible(ctx, None))
def test_is_accessible_for_db_object_owner(self):
ctx = mock.Mock(is_admin=False, project_id='db_object_owner')
db_obj = mock.Mock(project_id=ctx.project_id)
self.assertTrue(self._test_class.is_accessible(ctx, db_obj))
@mock.patch.object(_test_class, 'is_shared_with_project',
return_value=True)
def test_is_accessible_if_shared_with_project(self, mock_is_shared):
ctx = mock.Mock(is_admin=False, project_id='db_object_shareholder')
db_obj = mock.Mock(project_id='db_object_owner')
self.assertTrue(self._test_class.is_accessible(ctx, db_obj))
mock_is_shared.assert_called_once_with(
mock.ANY, db_obj.id, ctx.project_id)
@mock.patch.object(_test_class, 'is_shared_with_project',
return_value=False)
def test_is_accessible_fails_for_unauthorized_project(self,
mock_is_shared):
ctx = mock.Mock(is_admin=False, project_id='Billy_the_kid')
db_obj = mock.Mock(project_id='db_object_owner')
self.assertFalse(self._test_class.is_accessible(ctx, db_obj))
mock_is_shared.assert_called_once_with(
mock.ANY, db_obj.id, ctx.project_id)
def _rbac_policy_generate_change_events(self, resource, trigger,
context, object_type, policy,
event_list):
for event in event_list:
payload = events.DBEventPayload(
context, states=(policy,),
metadata={'object_type': object_type})
if event == events.BEFORE_CREATE:
payload.states = []
payload.request_body = policy
self._test_class.validate_rbac_policy_change(
resource, event, trigger, payload=payload)
@mock.patch.object(_test_class, 'validate_rbac_policy_update')
def test_validate_rbac_policy_change_handles_only_object_type(
self, mock_validate_rbac_update):
self._rbac_policy_generate_change_events(
resource=None, trigger='dummy_trigger', context=None,
object_type='dummy_object_type', policy=None,
event_list=(events.BEFORE_CREATE, events.BEFORE_UPDATE,
events.BEFORE_DELETE))
mock_validate_rbac_update.assert_not_called()
@mock.patch.object(_test_class, 'validate_rbac_policy_update')
@mock.patch.object(obj_db_api, 'get_object',
return_value={'project_id': 'tyrion_lannister'})
def test_validate_rbac_policy_change_allowed_for_admin_or_owner(
self, mock_get_object, mock_validate_update):
context = mock.Mock(is_admin=True, project_id='db_obj_owner_id')
self._rbac_policy_generate_change_events(
resource=None, trigger='dummy_trigger', context=context,
object_type=self._test_class.rbac_db_cls.db_model.object_type,
policy={'object_id': 'fake_object_id'},
event_list=(events.BEFORE_CREATE, events.BEFORE_UPDATE))
self.assertTrue(self._test_class.validate_rbac_policy_update.called)
@mock.patch.object(_test_class, 'validate_rbac_policy_update')
@mock.patch.object(obj_db_api, 'get_object',
return_value={'project_id': 'king_beyond_the_wall'})
def test_validate_rbac_policy_change_forbidden_for_outsiders(
self, mock_get_object, mock_validate_update):
context = mock.Mock(is_admin=False, project_id='db_obj_owner_id')
self.assertRaises(
n_exc.InvalidInput,
self._rbac_policy_generate_change_events,
resource=mock.Mock(), trigger='dummy_trigger', context=context,
object_type=self._test_class.rbac_db_cls.db_model.object_type,
policy={'object_id': 'fake_object_id'},
event_list=(events.BEFORE_CREATE, events.BEFORE_UPDATE))
self.assertFalse(mock_validate_update.called)
@mock.patch.object(_test_class, '_validate_rbac_policy_delete')
def _test_validate_rbac_policy_delete_handles_policy(
self, policy, mock_validate_delete):
payload = events.DBEventPayload(
n_context.get_admin_context(),
states=(policy,),
metadata={
'object_type':
self._test_class.rbac_db_cls.db_model.object_type})
self._test_class.validate_rbac_policy_delete(
resource=mock.Mock(), event=events.BEFORE_DELETE,
trigger='dummy_trigger', payload=payload)
mock_validate_delete.assert_not_called()
def test_validate_rbac_policy_delete_handles_shared_action(self):
self._test_validate_rbac_policy_delete_handles_policy(
{'action': 'unknown_action'})
@mock.patch.object(obj_db_api, 'get_object')
def test_validate_rbac_policy_delete_skips_db_object_owner(self,
mock_get_object):
policy = {'action': rbac_db_models.ACCESS_SHARED,
'target_tenant': 'fake_project_id',
'object_id': 'fake_obj_id',
'project_id': 'fake_project_id'}
mock_get_object.return_value.project_id = policy['target_tenant']
self._test_validate_rbac_policy_delete_handles_policy(policy)
@mock.patch.object(obj_db_api, 'get_object')
@mock.patch.object(_test_class, 'get_bound_project_ids',
return_value='project_id_shared_with')
def test_validate_rbac_policy_delete_fails_single_project_and_in_use(
self, get_bound_project_ids_mock, mock_get_object):
policy = {'action': rbac_db_models.ACCESS_SHARED,
'target_tenant': 'project_id_shared_with',
'project_id': 'object_owner_project_id',
'object_id': 'fake_obj_id'}
context = mock.Mock()
with mock.patch.object(
self._test_class,
'_get_db_obj_rbac_entries') as target_tenants_mock:
filter_mock = target_tenants_mock.return_value.filter
filter_mock.return_value.count.return_value = 0
payload = events.DBEventPayload(
context,
states=(policy,),
metadata={
'object_type':
self._test_class.rbac_db_cls.db_model.object_type})
self.assertRaises(
ext_rbac.RbacPolicyInUse,
self._test_class.validate_rbac_policy_delete,
resource=None,
event=events.BEFORE_DELETE,
trigger='dummy_trigger',
payload=payload)
def test_validate_rbac_policy_delete_not_bound_project_success(self):
context = mock.Mock()
with mock.patch.object(
self._test_class, 'get_bound_project_ids',
return_value={'fake_tid2', 'fake_tid3'}), \
mock.patch.object(self._test_class,
'_get_db_obj_rbac_entries') as get_rbac_entries_mock, \
mock.patch.object(
self._test_class,
'_get_projects_with_shared_access_to_db_obj') as sh_tids:
get_rbac_entries_mock.filter.return_value.count.return_value = 0
self._test_class._validate_rbac_policy_delete(
context=context,
obj_id='fake_obj_id',
target_tenant='fake_tid1')
sh_tids.assert_not_called()
@mock.patch.object(_test_class, '_get_db_obj_rbac_entries')
@mock.patch.object(_test_class,
'_get_projects_with_shared_access_to_db_obj',
return_value=['some_other_project'])
@mock.patch.object(_test_class, 'get_bound_project_ids',
return_value={'fake_id1'})
def test_validate_rbac_policy_delete_fails_single_used_wildcarded(
self, get_bound_project_ids_mock, mock_projects_with_shared_access,
_get_db_obj_rbac_entries_mock):
policy = {'action': rbac_db_models.ACCESS_SHARED,
'target_tenant': '*',
'project_id': 'object_owner_project_id',
'object_id': 'fake_obj_id'}
context = mock.Mock()
payload = events.DBEventPayload(
context,
states=(policy,),
metadata={
'object_type':
self._test_class.rbac_db_cls.db_model.object_type})
with mock.patch.object(obj_db_api, 'get_object'):
self.assertRaises(
ext_rbac.RbacPolicyInUse,
self._test_class.validate_rbac_policy_delete,
resource=mock.Mock(),
event=events.BEFORE_DELETE,
trigger='dummy_trigger',
payload=payload)
@mock.patch.object(_test_class, 'attach_rbac')
@mock.patch.object(obj_db_api, 'get_object',
return_value=['fake_rbac_policy'])
@mock.patch.object(_test_class, '_validate_rbac_policy_delete')
def test_update_shared_avoid_duplicate_update(
self, mock_validate_delete, get_object_mock, attach_rbac_mock):
obj_id = 'fake_obj_id'
obj = self._test_class(mock.Mock())
obj.update_shared(is_shared_new=True, obj_id=obj_id)
get_object_mock.assert_called_with(
obj.rbac_db_cls, mock.ANY, object_id=obj_id,
target_tenant='*', action=rbac_db_models.ACCESS_SHARED)
self.assertFalse(mock_validate_delete.called)
self.assertFalse(attach_rbac_mock.called)
@mock.patch.object(_test_class, 'attach_rbac')
@mock.patch.object(obj_db_api, 'get_object', return_value=[])
@mock.patch.object(_test_class, '_validate_rbac_policy_delete')
def test_update_shared_wildcard(
self, mock_validate_delete, get_object_mock, attach_rbac_mock):
obj_id = 'fake_obj_id'
test_neutron_obj = self._test_class(mock.Mock())
test_neutron_obj.update_shared(is_shared_new=True, obj_id=obj_id)
get_object_mock.assert_called_with(
test_neutron_obj.rbac_db_cls, mock.ANY, object_id=obj_id,
target_tenant='*', action=rbac_db_models.ACCESS_SHARED)
attach_rbac_mock.assert_called_with(
obj_id, test_neutron_obj.obj_context.project_id)
def test_shared_field_false_without_context(self):
test_neutron_obj = self._test_class()
self.assertFalse(test_neutron_obj.to_dict()['shared'])
@mock.patch.object(_test_class, 'attach_rbac')
@mock.patch.object(obj_db_api, 'get_object',
return_value=['fake_rbac_policy'])
@mock.patch.object(_test_class, '_validate_rbac_policy_delete')
def test_update_shared_remove_wildcard_sharing(
self, mock_validate_delete, get_object_mock, attach_rbac_mock):
obj_id = 'fake_obj_id'
obj = self._test_class(mock.Mock())
obj.update_shared(is_shared_new=False, obj_id=obj_id)
get_object_mock.assert_called_with(
obj.rbac_db_cls, mock.ANY, object_id=obj_id,
target_tenant='*', action=rbac_db_models.ACCESS_SHARED)
self.assertFalse(attach_rbac_mock.attach_rbac.called)
mock_validate_delete.assert_called_with(mock.ANY, obj_id, '*')
@mock.patch.object(_test_class, 'create_rbac_policy')
def test_attach_rbac_returns_type(self, create_rbac_mock):
obj_id = 'fake_obj_id'
project_id = 'fake_project_id'
target_tenant = 'fake_target_project'
self._test_class(mock.Mock()).attach_rbac(obj_id, project_id,
target_tenant)
rbac_pol = create_rbac_mock.call_args_list[0][0][1]['rbac_policy']
self.assertEqual(rbac_pol['object_id'], obj_id)
self.assertEqual(rbac_pol['target_tenant'], target_tenant)
self.assertEqual(rbac_pol['action'], rbac_db_models.ACCESS_SHARED)
self.assertEqual(rbac_pol['object_type'],
self._test_class.rbac_db_cls.db_model.object_type)
|
the-stack_0_691 | """Various constants and distributions that decribe our dataset. Intended use
is normalization of the fields before sending them to a neural net.
See notebook distributions-of-parameters.ipynb"""
import logging
import numpy as np
import torch
import random
import xarray as xr
from .util import add_biweekly_dim, obs_to_biweekly, std_estimator, fix_s2s_dataset_dims
_logger = logging.getLogger(__name__)
FIELD_MEAN = {
"gh10": 30583.0,
"gh100": 16070.0,
"gh1000": 76.19,
"gh200": 11765.0,
"gh500": 5524.374,
"gh850": 1403.0,
"lsm": 0.0,
"msl": 100969.28,
"orog": 387.1,
"siconc": 0.17,
"sst": 286.96,
"st100": 268.75,
"st20": 268.69,
"sm20": 250.68,
"t2m": 278.2237,
"tp": 34.1,
"u1000": -0.17,
"u850": 1.26,
"u500": 6.43,
"u200": 14.43,
"u100": 5.30,
"v1000": 0.18,
"v850": 0.11,
"v500": -0.03,
"v200": -0.01,
"v100": 0.10,
}
FIELD_STD = {
"gh10": 993.0,
"gh100": 577.0,
"gh1000": 110.14,
"gh200": 605.0,
"gh500": 341.80862,
"gh850": 149.6,
"lsm": 1.0,
"msl": 1343.6,
"orog": 856.0,
"siconc": 0.35,
"sst": 11.73,
"st100": 26.74,
"st20": 26.91,
"sm20": 125.99,
"tp": 43.7,
"t2m": 21.2692,
"u1000": 6.09,
"u850": 8.07,
"u500": 11.73,
"u200": 17.76,
"u100": 12.02,
"v1000": 5.22,
"v850": 6.144,
"v500": 9.03,
"v200": 12.18,
"v100": 6.57,
}
def normalize_dataset(dataset):
for v in dataset.data_vars:
dataset[v] = (dataset[v] - FIELD_MEAN[v]) / FIELD_STD[v]
return dataset
def denormalize_dataset(dataset):
for v in dataset.data_vars:
dataset[v] = (dataset[v] * FIELD_STD[v]) + FIELD_MEAN[v]
return dataset
def apply_to_all(transform, example):
"""Utility function to apply a transform on all the kews of an example."""
new_example = {}
for k in example:
new_example[k] = transform(example[k])
return new_example
class AddBiweeklyDimTransform:
"""Transform that takes a training example and adds the biweekly dimension to it."""
def __init__(self, weeks_12=False, features=False):
self.weeks_12 = weeks_12
self.features = features
def __call__(self, example):
to_transform = ["model", "obs"]
if self.features:
to_transform.append("features")
new_example = {}
for k in example:
if k in to_transform:
new_example[k] = add_biweekly_dim(example[k], weeks_12=self.weeks_12)
else:
new_example[k] = example[k]
return new_example
class AddMetadata:
"""Add various metadata to the example dict."""
def __call__(self, example):
model = example["terciles"]
year = int(model.forecast_time.dt.year)
month = int(model.forecast_time.dt.month)
day = int(model.forecast_time.dt.day)
example["monthday"] = f"{month:02}{day:02}"
example["month"] = f"{month:02}"
example["year"] = f"{year:04}"
example["latitude"] = model.latitude
example["longitude"] = model.longitude
return example
class AddDryMask:
def __init__(self, threshold=0.01):
self.threshold = threshold
def __call__(self, example):
edges = example["edges"]
wet_mask = (edges.isel(category_edge=0) > self.threshold).drop("t2m")
example["dry_mask"] = ~wet_mask
return example
class ExampleToPytorch:
def __call__(self, example):
pytorch_example = {}
for dataset_name in [
"obs",
"model",
"features",
"terciles",
"edges",
"model_parameters",
"dry_mask",
"eccc_parameters",
"ncep_parameters",
]:
if dataset_name in example:
dataset = example[dataset_name]
for variable in dataset.data_vars:
new_key = f"{dataset_name}_{variable}"
pytorch_example[new_key] = torch.from_numpy(dataset[variable].data)
for k in ["year", "monthday", "month", "eccc_available", "ncep_available"]:
pytorch_example[k] = example[k]
for k in ["latitude", "longitude"]:
pytorch_example[k] = torch.from_numpy(example[k].data)
return pytorch_example
class CompositeTransform:
def __init__(self, transforms):
self.transforms = transforms
def __call__(self, example):
transformed_example = example
for t in self.transforms:
transformed_example = t(transformed_example)
return transformed_example
def __repr__(self):
inner_str = ", ".join([repr(t) for t in self.transforms])
return f"CompositeTransform([{inner_str}])"
def t2m_to_normal(model):
model_t2m_mean = model.t2m.mean(dim=["lead_time", "realization"]).rename("t2m_mu")
model_t2m_std = std_estimator(model.t2m, dim=["lead_time", "realization"]).rename(
"t2m_sigma"
)
return xr.merge([model_t2m_mean, model_t2m_std]).rename(
biweekly_forecast="lead_time"
)
def tp_to_normal(model):
model_tp_mean = model.tp.isel(lead_time=-1).mean(dim="realization").rename("tp_mu")
model_tp_std = std_estimator(model.tp.isel(lead_time=-1), dim="realization").rename(
"tp_sigma"
)
return (
xr.merge([model_tp_mean, model_tp_std])
.drop("lead_time")
.rename(biweekly_forecast="lead_time")
)
def model_to_distribution(model):
model_t2m = t2m_to_normal(model)
model_tp = tp_to_normal(model)
return xr.merge([model_t2m, model_tp])
class LinearModelAdapter:
def __init__(self, make_distributions=True):
self.make_distributions = make_distributions
def __call__(self, example):
if self.make_distributions:
example["model"] = model_to_distribution(example["model"])
example["obs"] = obs_to_biweekly(example["obs"])
return example
class CubeRootTP:
"""Apply a cubic root on precipitation data."""
def __init__(self):
pass
def __call__(self, example):
for k in ["obs_tp", "edges_tp"]:
if k in example:
example[k] = example[k] ** (1.0 / 3.0)
return example
class AddLatLonFeature:
def __init__(self):
pass
def __call__(self, example):
obs = example["terciles"]
lat_array = obs["latitude"].assign_coords(variable="lat")
lat_array = (lat_array / lat_array.max()).astype("float32")
lon_array = obs["longitude"].assign_coords(variable="lon")
lon_array = np.sin(np.deg2rad(lon_array)).astype("float32")
features_array = example["features"].features
catted_features = xr.concat(
[features_array, lat_array, lon_array], dim="variable"
)
example["features"] = catted_features.to_dataset()
return example
class AddGeographyFeatures:
def __init__(self, geography_file):
geo_dataset = fix_s2s_dataset_dims(xr.open_dataset(geography_file))
subset = geo_dataset[["orog"]]
geo = normalize_dataset(subset)
self.geo_features = geo.to_array().to_dataset(name="features")
def __call__(self, batch):
features = batch["features"]
geo_at_lead = self.geo_features.sel(lead_time=features.lead_time)
new_features_dataset = xr.concat([features, geo_at_lead], dim="variable")
batch["features"] = new_features_dataset
return batch
class RandomNoise:
def __init__(self, keys=["features_features"], sigma=0.01):
self.keys = keys
self.sigma = sigma
def __call__(self, example):
for k in self.keys:
x = example[k]
example[k] += self.sigma * torch.randn_like(x)
return example
class LongitudeRoll:
def __init__(self):
pass
def __call__(self, example):
obs = example["terciles"]
longitude_length = obs.sizes["longitude"]
roll = random.randint(0, longitude_length)
rolled_example = example
for k in example:
if k not in ["eccc_available", "ncep_available"]:
rolled_dataset = (
example[k].roll(longitude=roll, roll_coords=True).drop("longitude")
)
rolled_example[k] = rolled_dataset
return rolled_example
class MembersSubsetTransform:
def __init__(self, subset_size=1):
self.subset_size = subset_size
def __call__(self, example):
features = example["features"]
n_members = features.sizes["realization"]
members = sorted(random.choices(range(n_members), k=self.subset_size))
features = features.isel(realization=members)
example["features"] = features
return example
class AddDateFeatureTransform:
def __call__(self, example):
features = example["features"]
date_features = np.sin(
features.valid_time.assign_coords(variable="date").dt.dayofyear / 366
)
new_features = xr.concat(
[features.features, date_features], dim="variable"
).astype("float32")
example["features"] = new_features.to_dataset()
return example
class VariableFilterTransform:
def __init__(self, to_filter=None):
self.to_filter = to_filter
if to_filter is not None:
_logger.info("Will filter vars: %s", to_filter)
def __call__(self, batch):
if self.to_filter is not None:
batch["features"] = batch["features"].sel(variable=self.to_filter)
return batch
def full_transform(
geography_file,
weeks_12=False,
make_distributions=False,
random_noise_sigma=0.0,
roll=False,
n_members=1,
filter_vars=None,
biweekly_features=False,
add_date=False,
):
xarray_transforms = [
MembersSubsetTransform(n_members),
AddLatLonFeature(),
AddGeographyFeatures(geography_file),
VariableFilterTransform(filter_vars),
AddBiweeklyDimTransform(weeks_12, features=biweekly_features),
]
if add_date:
xarray_transforms.insert(2, AddDateFeatureTransform())
if roll:
xarray_transforms.append(LongitudeRoll())
transforms = [
*xarray_transforms,
# LinearModelAdapter(make_distributions=make_distributions),
AddMetadata(),
ExampleToPytorch(),
CubeRootTP(),
RandomNoise(sigma=random_noise_sigma),
]
return CompositeTransform(transforms)
|
the-stack_0_694 | #! /usr/bin/env python3
# Copyright 2019 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Heart Evaluation GUI Client for use in submitting data to worker.
"""
import os
import sys
import random
import json
import argparse
import logging
import secrets
# Tkinter imports
import tkinter as tk
import tkinter.messagebox as messagebox
import tkinter.font as font
from PIL import ImageTk, Image
# Avalon imports
import crypto_utils.crypto_utility as utility
import avalon_sdk.worker.worker_details as worker
from avalon_sdk.worker.worker_details import WorkerType
from avalon_sdk.work_order.work_order_params import WorkOrderParams
from avalon_sdk.direct.avalon_direct_client import AvalonDirectClient
import config.config as pconfig
import utility.logger as plogger
import crypto_utils.crypto.crypto as crypto
from error_code.error_status import WorkOrderStatus, ReceiptCreateStatus
import crypto_utils.signature as signature
from error_code.error_status import SignatureStatus
from avalon_sdk.work_order_receipt.work_order_receipt \
import WorkOrderReceiptRequest
# Remove duplicate loggers
for handler in logging.root.handlers[:]:
logging.root.removeHandler(handler)
logger = logging.getLogger(__name__)
# Default TCFHOME assumes PWD is examples/apps/heart_disease_eval/client :
TCFHOME = os.environ.get("TCF_HOME", "../../../../")
# GUI color scheme
BACKGROUND = "light sky blue"
ENTRY_COLOR = "light grey"
BUTTON_COLOR = "deep sky blue"
RESULT_BACKGROUND = "pale goldenrod"
# -----------------------------------------------------------------
def _generate_random_or_normal_number(normal, percent_normal, low, high):
"""Generate number "normal" for "percent_normal" % of the time.
Otherwise, generate a random number in the interval ["low", "high"].
"""
if percent_normal >= random.randint(0, 100):
return normal
return random.randint(low, high)
def _generate_random_data():
"""Generate a random data string for input as evaluation data.
For example: "35 0 1 67 102 125 1 95 0 10 1 1 3 1"
"""
age = _generate_random_or_normal_number(35, 67, 18, 100)
sex = _generate_random_or_normal_number(0, 50, 1, 1)
cp = _generate_random_or_normal_number(4, 67, 1, 3)
trestbps = _generate_random_or_normal_number(67, 67, 108, 218)
chol = _generate_random_or_normal_number(102, 67, 126, 309)
fbs = _generate_random_or_normal_number(125, 67, 98, 248)
restecg = _generate_random_or_normal_number(0, 67, 1, 2)
thalach = _generate_random_or_normal_number(95, 67, 61, 198)
exang = _generate_random_or_normal_number(0, 67, 1, 1)
oldpeak = _generate_random_or_normal_number(10, 67, 0, 100)
slop = _generate_random_or_normal_number(0, 67, 1, 2)
ca = _generate_random_or_normal_number(0, 67, 1, 3)
thaldur = _generate_random_or_normal_number(3, 67, 6, 7)
num = _generate_random_or_normal_number(0, 67, 1, 1)
return "{} {} {} {} {} {} {} {} {} {} {} {} {} {}".format(
age, sex, cp, trestbps, chol, fbs, restecg, thalach,
exang, oldpeak, slop, ca, thaldur, num)
def _int_validate(text):
"""Validates that input is a non-negative integer."""
if str.isdigit(text) or text == "":
return True
else:
return False
def _float_validate(text):
"""Validates that input is a non-negative, non-special float."""
if text == "":
return True
try:
float(text)
if float(text) < 0.0 or float(text) == float("NaN") \
or float(text) == float("INF") \
or float(text) == float("-INF"):
return False
return True
except ValueError:
return False
class intEntry:
"""User entry for non-negative integer."""
def __init__(self, master, name):
global cur_row
label = tk.Label(master, text=name, background=BACKGROUND)
label.grid(row=cur_row, column=0, sticky="e", pady=(5, 0))
validate = (master.register(_int_validate))
self.entry = tk.Entry(
master, validate="all",
validatecommand=(validate, "%P"), width=5,
background=ENTRY_COLOR)
self.entry.grid(
row=cur_row, column=1, padx=(10, 0), pady=(5, 0), sticky="w")
cur_row += 1
def get(self):
# Fails if empty field
try:
return int(self.entry.get())
except ValueError:
return None
def enable(self):
self.entry.config(state=tk.NORMAL)
def disable(self):
self.entry.config(state=tk.DISABLED)
class floatEntry:
"""User entry for non-negative, non-special floating point number."""
def __init__(self, master, name):
global cur_row
label = tk.Label(master, text=name, background=BACKGROUND)
label.grid(row=cur_row, column=0, sticky="e", pady=(5,))
validate = (master.register(_float_validate))
self.entry = tk.Entry(
master, validate="all",
validatecommand=(validate, "%P"), width=10,
background=ENTRY_COLOR)
self.entry.grid(row=cur_row, column=1, padx=(10, 0), pady=(5,),
sticky="w")
cur_row += 1
def get(self):
try:
return float(self.entry.get())
except ValueError:
return None
def enable(self):
self.entry.config(state=tk.NORMAL)
def disable(self):
self.entry.config(state=tk.DISABLED)
class radio:
"""User entry for a radio button."""
# Options is a list of text-value pairs
def __init__(self, master, name, options):
global cur_row
if not all(len(tup) == 2 for tup in options):
print("ERROR: Mismatched text-value pairs")
exit(1)
self.var = tk.IntVar()
self.var.set(None)
label = tk.Label(master, text=name, background=BACKGROUND)
label.grid(row=cur_row, column=0, pady=(5, 0), sticky="e")
self.button_list = []
for i in range(len(options)):
button = tk.Radiobutton(
master, text=options[i][0],
variable=self.var, value=options[i][1],
background=BACKGROUND)
self.button_list.append(button)
if i == 0:
button.grid(row=cur_row, column=1, pady=(5, 0),
sticky="w")
else:
button.grid(row=cur_row, column=1, sticky="w")
cur_row += 1
def get(self):
try:
return self.var.get()
except tk.TclError:
return None
def enable(self):
for button in self.button_list:
button.config(state=tk.NORMAL)
def disable(self):
for button in self.button_list:
button.config(state=tk.DISABLED)
class resultWindow(tk.Toplevel):
"""Create result window that appears after clicking "Evaluate"."""
def __init__(self, parent, message):
tk.Toplevel.__init__(self, parent)
self.config(background=RESULT_BACKGROUND)
self.parent = parent
# Lock main window
self.transient(parent)
self.grab_set()
self.initial_focus = self
self.initial_focus.focus_set()
self.title("Evaluation Result")
self.protocol("WM_DELETE_WINDOW", self.close)
# Main content
self.main_frame = tk.Frame(self, background=RESULT_BACKGROUND)
self.main_frame.pack()
self.frame1 = tk.Frame(self.main_frame)
self.frame1.pack(side=tk.LEFT)
self.result_text = tk.StringVar()
self.label = tk.Label(
self.frame1, textvariable=self.result_text, width=45,
background=RESULT_BACKGROUND)
default_font = font.Font(font="TkDefaultFont")
new_font = default_font
new_font.config(weight=font.BOLD)
self.label.config(font=new_font)
self.label.pack()
# JSON window display sidebar buttons
self.frame2 = tk.Frame(self.main_frame, background=RESULT_BACKGROUND)
self.frame2.pack(side=tk.LEFT)
self.frame2 = tk.Frame(
self.frame2, background=RESULT_BACKGROUND)
self.frame2.pack(side=tk.LEFT)
self.request_button = tk.Button(
self.frame2, text="View Request", command=self.request,
background=BUTTON_COLOR)
self.request_button.pack(fill=tk.X, padx=(0, 10), pady=(10, 0))
self.result_button = tk.Button(
self.frame2, text="View Result", command=self.result,
background=BUTTON_COLOR)
self.result_button.pack(fill=tk.X, padx=(0, 10), pady=(10, 0))
self.receipt_button = tk.Button(
self.frame2, text="View Receipt",
command=self.receipt, background=BUTTON_COLOR)
self.receipt_button.pack(fill=tk.X, padx=(0, 10), pady=(10, 0))
# Close button
self.close_button = tk.Button(
self, text="Close",
command=self.close, background=BUTTON_COLOR)
self.close_button.pack(pady=(0, 5))
self.evaluate(message)
def evaluate(self, message):
"""Create and submit workorder and wait for result."""
self.result_text.set("Waiting for evaluation result...")
self.update()
# Create, sign, and submit workorder.
# Convert workloadId to hex.
workload_id = "heart-disease-eval"
workload_id = workload_id.encode("UTF-8").hex()
session_iv = utility.generate_iv()
session_key = utility.generate_key()
requester_nonce = secrets.token_hex(16)
work_order_id = secrets.token_hex(32)
requester_id = secrets.token_hex(32)
wo_params = WorkOrderParams(
work_order_id, worker_id, workload_id, requester_id,
session_key, session_iv, requester_nonce,
result_uri=" ", notify_uri=" ",
worker_encryption_key=worker_obj.encryption_key,
data_encryption_algorithm="AES-GCM-256"
)
wo_params.add_in_data(message)
wo_params.add_encrypted_request_hash()
private_key = utility.generate_signing_keys()
if requester_signature:
# Add requester signature and requester verifying_key
if wo_params.add_requester_signature(private_key) is False:
logger.info("Work order request signing failed")
exit(1)
# Set text for JSON sidebar
req_id = 51
self.request_json = wo_params.to_jrpc_string(req_id)
work_order_instance = direct_jrpc.get_work_order_instance()
response = work_order_instance.work_order_submit(
wo_params.get_work_order_id(),
wo_params.get_worker_id(),
wo_params.get_requester_id(),
wo_params.to_string(),
id=req_id
)
logger.info("Work order submit response : {}\n ".format(
json.dumps(response, indent=4)
))
if "error" in response and response["error"]["code"] != \
WorkOrderStatus.PENDING:
sys.exit(1)
# Create work order receipt
req_id += 1
wo_receipt_instance = direct_jrpc.get_work_order_receipt_instance()
wo_request = json.loads(self.request_json)
wo_receipt_obj = WorkOrderReceiptRequest()
wo_create_receipt = wo_receipt_obj.create_receipt(
wo_request,
ReceiptCreateStatus.PENDING.value,
private_key
)
logger.info("Work order create receipt request : {} \n \n ".format(
json.dumps(wo_create_receipt, indent=4)
))
# Submit work order create receipt jrpc request
wo_receipt_resp = wo_receipt_instance.work_order_receipt_create(
wo_create_receipt["workOrderId"],
wo_create_receipt["workerServiceId"],
wo_create_receipt["workerId"],
wo_create_receipt["requesterId"],
wo_create_receipt["receiptCreateStatus"],
wo_create_receipt["workOrderRequestHash"],
wo_create_receipt["requesterGeneratedNonce"],
wo_create_receipt["requesterSignature"],
wo_create_receipt["signatureRules"],
wo_create_receipt["receiptVerificationKey"],
req_id
)
logger.info("Work order create receipt response : {} \n \n ".format(
wo_receipt_resp
))
# Retrieve result and set GUI result text
res = work_order_instance.work_order_get_result(
work_order_id,
req_id
)
self.result_json = json.dumps(res, indent=4)
if "result" in res:
sig_obj = signature.ClientSignature()
status = sig_obj.verify_signature(
res, worker_obj.verification_key)
try:
if status == SignatureStatus.PASSED:
logger.info("Signature verification Successful")
decrypted_res = utility. \
decrypted_response(res, session_key, session_iv)
logger.info("\n" +
"Decrypted response:\n {}".
format(decrypted_res))
else:
logger.info("Signature verification Failed")
sys.exit(1)
except Exception as err:
logger.info("ERROR: Failed to decrypt response: %s", str(err))
sys.exit(1)
else:
logger.info("\n Work order get result failed {}\n".format(res))
sys.exit(1)
# Set text for JSON sidebar
self.result_text.set(
decrypted_res[0]["data"])
# Retrieve receipt
# Set text for JSON sidebar
req_id += 1
self.receipt_json = json.dumps(
wo_receipt_instance.work_order_receipt_retrieve(
work_order_id,
req_id
),
indent=4
)
def request(self):
jsonWindow(self, self.request_json, "Request JSON")
def result(self):
jsonWindow(self, self.result_json, "Result JSON")
def receipt(self):
jsonWindow(self, self.receipt_json, "Receipt JSON")
def close(self):
self.parent.focus_set()
self.destroy()
class jsonWindow(tk.Toplevel):
"""Template for JSON display
(from clicking View Request/Result/Receipt buttons).
"""
def __init__(self, parent, json, title):
tk.Toplevel.__init__(self, parent)
self.title(title)
self.scrollbar = tk.Scrollbar(self)
self.scrollbar.pack(side=tk.RIGHT, fill=tk.Y)
self.text = tk.Text(self, yscrollcommand=self.scrollbar.set)
self.text.insert(tk.END, json)
self.text.config(state=tk.DISABLED)
self.text.pack(expand=True, fill="both")
self.scrollbar.config(command=self.text.yview)
def gui_main():
"""Create main Tkinter window and "Evaluate" event handler."""
root = tk.Tk()
root.title("Heart Disease Evaluation")
root.config(background=BACKGROUND)
# Display image
imageFile = TCFHOME + \
"/examples/apps/heart_disease_eval/images/ecg.jpg"
img = ImageTk.PhotoImage(Image.open(imageFile))
canvas = tk.Canvas(root, width=290, height=220, background=BACKGROUND)
canvas.pack()
canvas.create_image(20, 20, anchor=tk.NW, image=img)
# Setup left and right frames for data entry
var_root = tk.Frame(root, background=BACKGROUND)
var_root.pack(pady=(10, 0))
v_frame1 = tk.Frame(var_root, background=BACKGROUND)
v_frame1.pack(fill=tk.Y, side=tk.LEFT, padx=(10, 0))
v_frame2 = tk.Frame(var_root, background=BACKGROUND)
v_frame2.pack(fill=tk.Y, side=tk.LEFT, padx=(0, 10))
# Organizes parameter grid
global cur_row
cur_row = 0
# Parameter grid
age = intEntry(v_frame1, "Age")
sex = radio(v_frame1, "Sex", [("Male", 1), ("Female", 0)])
cp = radio(v_frame1, "Chest pain type", [("Typical angina", 1),
("Atypical angina", 2), ("Non-anginal pain", 3),
("Asymptomatic", 4)])
trestbps = intEntry(v_frame1, "Resting blood pressure\n (mm Hg)")
chol = intEntry(v_frame1, "Serum cholesterol (mg/dl)")
fbs = intEntry(v_frame1, "Fasting blood sugar (mg/dl)")
restecg = radio(v_frame1, "Electrocardiographic\n resting results",
[("Normal", 0), ("ST-T wave abnormality", 1),
("Showing hypertrophy", 2)])
thalach = intEntry(v_frame1, "Maximum heart rate")
exang = radio(v_frame2, "Exercise induced angina",
[("Yes", 1), ("No", 0)])
oldpeak = floatEntry(
v_frame2, "ST depression induced by\n exercise relative to rest")
slope = radio(v_frame2, "Slope of the peak\n exercise ST segment",
[("Upsloping", 0), ("Flat", 1), ("Downsloping", 2)])
ca = radio(v_frame2, "Major vessels colored\n by flouroscopy",
[("0", 0), ("1", 1), ("2", 2), ("3", 3)])
thal = radio(
v_frame2,
"Thallium stress test",
[("Normal", 3), ("Fixed defect", 6), ("Reversible defect", 7)])
num = radio(v_frame2, "Heart disease diagnosis",
[("<50% diameter narrowing", 0),
(">50% diameter narrowing", 1)])
var_list = [age, sex, cp, trestbps, chol, fbs, restecg, thalach,
exang, oldpeak, slope, ca, thal, num]
def string_toggle():
"""Disable/enable other variable entries/buttons based on
whether string input option is selected.
"""
if string_use.get() == 1 or random_use.get() == 1:
for var in var_list:
var.disable()
string_entry.config(state=tk.NORMAL)
else:
for var in var_list:
var.enable()
string_entry.config(state=tk.DISABLED)
# Input vars as string option with a check button to enable
random_frame = tk.Frame(root, background=ENTRY_COLOR)
random_frame.pack()
# Option to generate random data entry
random_use = tk.IntVar()
random_check = tk.Checkbutton(
random_frame, command=string_toggle, variable=random_use,
background=BACKGROUND)
random_check.pack(side=tk.LEFT)
random_label = tk.Label(
random_frame,
text="Generate random data ",
background=BACKGROUND)
random_label.pack(side=tk.LEFT)
# Option to enter data as space-separated string entries
string_frame = tk.Frame(root, background=ENTRY_COLOR)
string_frame.pack()
string_use = tk.IntVar()
string_check = tk.Checkbutton(
string_frame, command=string_toggle, variable=string_use,
background=BACKGROUND)
string_check.pack(side=tk.LEFT)
string_label = tk.Label(
string_frame,
text="Input variables as a string",
background=BACKGROUND)
string_label.pack(side=tk.LEFT)
string_entry = tk.Entry(
string_frame, state=tk.DISABLED, width=50,
background=ENTRY_COLOR)
string_entry.pack(side=tk.LEFT)
def evaluate():
"""Open window that will submit work order and retrieve
an evaluation result.
"""
message = "Heart disease evaluation data: "
if string_use.get() == 1: # input is space-separated numbers
input_data = string_entry.get()
if input_data is None or len(input_data) == 0:
messagebox.showwarning(
"Error", "Must input space-separated variables")
return
message = message + input_data
elif random_use.get() == 1:
input_data = _generate_random_data()
if input_data is None or len(input_data) == 0:
messagebox.showwarning(
"Error", "Random variable generation error")
return
message = message + input_data
else:
for var in var_list:
if var.get() is None:
messagebox.showwarning("Error", "Must input all variables")
return
message = message + str(var.get()) + " "
root.wait_window(resultWindow(root, message))
def aggregate():
"""Open window that will submit work order to retrieve
an aggregate result.
"""
message = "Heart disease aggregate data: "
root.wait_window(resultWindow(root, message))
# "Evaluate" button
eval_text = tk.StringVar()
eval_label = tk.Label(root, textvariable=eval_text,
background=BACKGROUND)
eval_label.pack()
eval_button = tk.Button(root, text="Evaluate", command=evaluate,
background=BUTTON_COLOR)
eval_button.pack()
# "Aggregate" button
aggr_text = tk.StringVar()
aggr_label = tk.Label(root, textvariable=aggr_text, background=BACKGROUND)
aggr_label.pack()
aggr_button = tk.Button(root, text="Aggregate all data",
command=aggregate, background=BUTTON_COLOR)
aggr_button.pack(pady=(0, 10))
root.mainloop()
def parse_command_line(args):
"""Setup and parse command line arguments and help information."""
global worker_obj
global worker_id
global verbose
global config
global off_chain
global requester_signature
parser = argparse.ArgumentParser()
use_service = parser.add_mutually_exclusive_group()
parser.add_argument(
"-c", "--config",
help="the config file containing the" +
" Ethereum contract information", type=str)
use_service.add_argument(
"-r", "--registry-list",
help="the Ethereum address of the registry list",
type=str)
use_service.add_argument(
"-s", "--service-uri",
help="skip URI lookup and send to specified URI",
type=str)
use_service.add_argument(
"-o", "--off-chain",
help="skip URI lookup and use the registry in the config file",
action="store_true")
parser.add_argument(
"-w", "--worker-id",
help="skip worker lookup and retrieve specified worker",
type=str)
parser.add_argument(
"-v", "--verbose",
help="increase output verbosity",
action="store_true")
parser.add_argument(
"-rs", "--requester_signature",
help="Enable requester signature for work order requests",
action="store_true")
options = parser.parse_args(args)
if options.config:
conf_files = [options.config]
else:
conf_files = [TCFHOME +
"/sdk/avalon_sdk/tcf_connector.toml"]
conf_paths = ["."]
try:
config = pconfig.parse_configuration_files(conf_files, conf_paths)
json.dumps(config, indent=4)
except pconfig.ConfigurationException as e:
logger.error(str(e))
sys.exit(-1)
global direct_jrpc
direct_jrpc = AvalonDirectClient(conf_files[0])
# Whether or not to connect to the registry list on the blockchain
off_chain = False
if options.registry_list:
config["ethereum"]["direct_registry_contract_address"] = \
options.registry_list
if options.service_uri:
service_uri = options.service_uri
off_chain = True
if options.off_chain:
service_uri = config["tcf"].get("json_rpc_uri")
off_chain = True
requester_signature = options.requester_signature
verbose = options.verbose
worker_id = options.worker_id
# Initializing Worker Object
worker_obj = worker.SGXWorkerDetails()
def initialize_logging(config):
"""Initialize logging."""
if verbose:
config["Logging"] = {
"LogFile": "__screen__",
"LogLevel": "INFO"
}
else:
config["Logging"] = {
"LogFile": "__screen__",
"LogLevel": "WARN"
}
plogger.setup_loggers(config.get("Logging", {}))
sys.stdout = plogger.stream_to_logger(
logging.getLogger("STDOUT"), logging.DEBUG)
sys.stderr = plogger.stream_to_logger(
logging.getLogger("STDERR"), logging.WARN)
def initialize_tcf(config):
"""Initialize Avalon: get Avalon worker instance."""
logger.info("***************** Avalon *****************")
# Retrieve Worker Registry
if not off_chain:
registry_list_instance = direct_jrpc. \
get_worker_registry_list_instance()
registry_count, lookup_tag, registry_list = \
registry_list_instance.registry_lookup()
logger.info("\n Registry lookup response : registry count {}\
lookup tag {} registry list {}\n".format(
registry_count, lookup_tag, registry_list
))
if (registry_count == 0):
logger.warn("No registries found")
sys.exit(1)
registry_retrieve_result = \
registry_list_instance.registry_retrieve(
registry_list[0])
logger.info("\n Registry retrieve response : {}\n".format(
registry_retrieve_result
))
config["tcf"]["json_rpc_uri"] = registry_retrieve_result[0]
# Prepare worker
global worker_id
if not worker_id:
worker_registry_instance = direct_jrpc.get_worker_registry_instance()
req_id = 31
worker_lookup_result = worker_registry_instance.worker_lookup(
worker_type=WorkerType.TEE_SGX,
id=req_id
)
logger.info("\n Worker lookup response : {} \n".format(
json.dumps(worker_lookup_result, indent=4)
))
if "result" in worker_lookup_result and \
"ids" in worker_lookup_result["result"].keys():
if worker_lookup_result["result"]["totalCount"] != 0:
worker_id = \
worker_lookup_result["result"]["ids"][0]
else:
logger.error("ERROR: No workers found")
sys.exit(1)
else:
logger.error("ERROR: Failed to lookup worker")
sys.exit(1)
req_id += 1
worker = worker_registry_instance.worker_retrieve(
worker_id,
req_id
)
logger.info("\n Worker retrieve response : {}\n".format(
json.dumps(worker, indent=4)
))
worker_obj.load_worker(
worker
)
logger.info("**********Worker details Updated with Worker ID" +
"*********\n%s\n", worker_id)
def main(args=None):
"""Entry point function."""
parse_command_line(args)
initialize_logging(config)
initialize_tcf(config)
# Open GUI
gui_main()
# -----------------------------------------------------------------------------
main()
|
the-stack_0_696 | import numpy as np
import collections
import numbers
import torch
import os
from . import joint_network
from .summary import LamanClassificationSummary
from .. import corruption_dataset, model as mo
from .representation import graph_to_rep, combine_graph_reps, encode_action, LamanRep, get_action_offsets
from ..molecule_models import _train_utils, _train_harness
from ._utils import cast_numpy_rec
def _transform(graph, act):
graph_rep = graph_to_rep(graph)
act_encoded = encode_action(act, graph)
act_coarse = act.action_type
offset = torch.from_numpy(get_action_offsets(graph)).int()
return {
'graph': graph_rep,
'label': act_encoded,
'label_coarse': act_coarse,
'label_offset': offset
}
def _collate(batch):
graph = combine_graph_reps([b['graph'] for b in batch])
graph = cast_numpy_rec(graph)
label_fine = torch.LongTensor([b['label'] for b in batch])
label_coarse = torch.LongTensor([b['label_coarse'] for b in batch])
offsets = torch.stack([b['label_offset'] for b in batch])
return {'graph': graph, 'label': label_fine, 'label_coarse': label_coarse, 'label_offset': offsets}
def make_dataloader(dataset, batch_size=128, num_workers=2):
return torch.utils.data.DataLoader(
dataset, batch_size, shuffle=True, collate_fn=_collate,
pin_memory=True, num_workers=num_workers)
class LamanJointHarness(_train_harness.TrainingHarness):
_keys = ['label', 'label_offset']
def __init__(self, model, optimizer, summary, task='train', profile=False):
super(LamanJointHarness, self).__init__(model, optimizer, summary, task=task, profile=profile)
def get_model_input(self, batch):
graph = LamanRep.from_sequence(batch['graph'])
return graph,
def get_loss(self, model_output, batch):
loss, summary_info = mo.classification.multi_classification_coarse_to_fine_loss(
model_output, batch['label_coarse'], batch['label'], summary_info=True)
self.summary.record_marginal_probability(
torch.nn.functional.softmax(summary_info['coarse_logit'].detach(), dim=1).mean(dim=0))
return loss
def record_step_summary(self, batch, model_output):
logits_and_scopes = model_output
prediction, label, label_offset = _train_harness.compute_and_aggregate_predictions(
logits_and_scopes, batch, self._keys)
if self.summary:
self.summary.record_statistics(prediction, label, label_offset)
def main(parameters=None):
if parameters is None:
parameters = {}
task = parameters.get('task', 'train')
batch_size = parameters.get('batch_size', 256)
dataset_path = parameters.get('dataset_path')
if dataset_path is None:
dataset_path = '../data/laman/low_decomp_dataset_sample.pkl'
dataset = corruption_dataset.LamanCorruptionDataset(dataset_path, transform=_transform)
dataloader = make_dataloader(dataset, batch_size, num_workers=parameters.get('num_workers', 2))
config = joint_network.JointClassificationNetworkConfig(
5, message_size=256)
model = joint_network.JointClassificationNetwork(config)
if 'model_path' in parameters and parameters['model_path'] is not None:
model.load_state_dict(torch.load(parameters['model_path'], map_location='cpu'))
model = model.cuda()
if task != 'train':
model = model.eval()
if task == 'train':
optimizer, schedulers = _train_utils.init_optimizer(model, parameters)
else:
optimizer = None
schedulers = []
summary = LamanClassificationSummary()
harness = LamanJointHarness(model, optimizer, summary, task)
harness.hooks.extend([
_train_harness.LogLossTimeHook(batch_size),
_train_harness.PrintAccuracyHook(summary, None)
])
savedir = _train_utils.get_save_dir(parameters)
for epoch in range(30):
dataset.set_epoch(epoch)
harness.set_epoch(epoch)
if task == 'train':
for scheduler in schedulers:
scheduler.step()
harness.train_epoch(dataloader)
if task == 'train':
torch.save(
model.state_dict(),
os.path.join(savedir, 'laman_joint_ep_{0}.pth'.format(epoch)))
if __name__ == '__main__':
args = _train_utils.parse_arguments()
main(vars(args))
|
the-stack_0_698 | import numpy as np
import matplotlib.pyplot as plt
from sympy import solve, Eq, symbols
import sys
import pandas
import math
import os
def degradeCOP(Tevap, Tcond, Qall, S):
degraded = ((Tevap * Tcond)/(Tcond - Tevap)) * (S/Qall)
return degraded
# This function calculates the reversible COP of a ES refrigerator based on thermal reservoirs
def reversibleCOP(Tevap, Tcond, Tgen):
revCOP = (((Tgen - Tcond)/(Tgen))/((Tcond-Tevap)/Tevap))
return revCOP
# This function solves the system of equations to calculate the mass flowrates of
# the combined absorber-evaporator system
def massabsorberevaporator(m6, m4, xa4, ya3, xa6):
m3, m5= symbols(['m3', 'm5'])
system = [
#Eq((xa4*m4)+ (ya3 * m3) - (0 * m5) - (xa6 * m6), 0),
Eq(m5 - (1-ya3)*m3,0),
Eq(m4 + m3 - m5 - m6, 0),
#Eq((1-ya3)*m3-m5, 0)
#Eq(m4 - (ya3*m3) - (m5) + (ya3 * m6), 0)
]
soln = solve(system, [m4, m3, m5, m6])
return float(m4), float(soln[m3]), float(soln[m5]), float(m6)
# This is an interpolate helper function to be used in other functions.
# targetcomp refers to ammonia composition. All CSV files are in ammonia composition.
def interpolateAW(targetcomp, T, var):
# must use the entropy-ammonia-water csv, entropy-ammonia-butane csv, or enthalpy-ammonia-water csv
script_dir = os.path.dirname(__file__) #<-- absolute dir the script is in
rel_path = 'data/mixed/'
abs_file_path = os.path.join(script_dir, rel_path)
colnames = ['pressure', 'ammoniacomp', 'prop']
propertyname = ''
if var is 1:
propertyname = 'enthalpy'
else:
propertyname = 'entropy'
filename = propertyname + '-kjmol-' + str(T) + 'K-ammoniawater'
data = pandas.read_csv(str(abs_file_path) + '%s.csv' %filename, names=colnames)
ammoniacomp = data.ammoniacomp.tolist()
prop = data.prop.tolist()
lower = prop[int(math.floor(targetcomp /0.05))]
higher = prop[(int((math.floor(targetcomp /0.05))+1))]
theta = (targetcomp - int(math.floor(targetcomp /0.05))*0.05 ) / ((int((math.floor(targetcomp /0.05))+1))*0.05 - int(math.floor(targetcomp /0.05))*0.05 )
return (theta * higher) + (1-theta)*lower
def interpolateAB(targetcomp, T, var):
# must use the entropy-ammonia-water csv, entropy-ammonia-butane csv, or enthalpy-ammonia-water csv
script_dir = os.path.dirname(__file__) #<-- absolute dir the script is in
rel_path = 'data/mixed/'
abs_file_path = os.path.join(script_dir, rel_path)
colnames = ['pressure', 'ammoniacomp', 'prop']
propertyname = ''
if var is 1:
propertyname = 'enthalpy'
else:
propertyname = 'entropy'
filename = propertyname + '-kjmol-' + str(T) + 'K-ammoniabutane'
data = pandas.read_csv(str(abs_file_path) + '%s.csv' %filename, names=colnames)
ammoniacomp = data.ammoniacomp.tolist()
prop = data.prop.tolist()
lower = prop[int(math.floor(targetcomp /0.05))]
higher = prop[(int((math.floor(targetcomp /0.05))+1))]
theta = (targetcomp - int(math.floor(targetcomp /0.05))*0.05 ) / ((int((math.floor(targetcomp /0.05))+1))*0.05 - int(math.floor(targetcomp /0.05))*0.05 )
return (theta * higher) + (1-theta)*lower
# This calculates the two mass flowrates and the compositions coming out of the flash drum
# given a mass flowrate and composition of ammonia coming in
# inputcomp is the ammonia composition
# temp is the temperature that the flash drum flashes at
def leverrule(inputflow, temp, inputcomp):
#t-xy of ammonia-water
#input composition of ammonia
script_dir = os.path.dirname(__file__) #<-- absolute dir the script is in
rel_path = "data/txy/"
abs_file_path = os.path.join(script_dir, rel_path)
colnames = ['pressure', 'ammoniacomp', 'temperature', 'vaporwater', 'vaporammonia', 'liquidwater', 'liquidammonia']
filename = 'txy-ammonia-4bar'
data = pandas.read_csv( str(abs_file_path) + '%s.csv' %filename, names = colnames)
ammoniacomp = data.ammoniacomp.tolist()
temperature = data.temperature.tolist()
vaporammonia = data.vaporammonia.tolist()
liquidammonia = data.liquidammonia.tolist()
index, valuetemp = min(enumerate(temperature), key=lambda x: abs(x[1]-temp))
liquiddistance = inputcomp - liquidammonia[index]
vapordistance = vaporammonia[index] - inputcomp
vaporflow = symbols('vaporflow')
system = [
Eq((vapordistance * vaporflow) + (-1.0*liquiddistance*(float(inputflow) - vaporflow)), 0)
]
soln = solve(system, [vaporflow])
# the order is: vapor flow, liquid flow, liquid ammonia composition. vapor ammonia composition
return soln[vaporflow], (inputflow - soln[vaporflow]) ,liquidammonia[index], vaporammonia[index]
# This calculates the Q of the generator
# compin is the ammonia composition
def Qgenerator(massin, compin, Tgen):
massout = massin
enthalpyin = interpolateAW(compin, 325, 1 )
enthalpyout = interpolateAW(compin, Tgen, 1)
Qgen = -1*(massin*enthalpyin - massout*enthalpyout)
return Qgen
# This calculates the S of the flash
# compin is the ammonia flash
def Sgenerator(massin, compin, Qgen, Tgen):
massout = massin
entropyin = interpolateAW(compin, 325, 2)
#RAHUL fixed Line 95 - wrong entropy values read in
entropyout = interpolateAW(compin, Tgen, 2)
Sgen = symbols('Sgen')
system = Eq((-1 * massin * entropyin ) + (massout*entropyout) + (Qgen/Tgen) - Sgen, 0)
soln = solve([system], Sgen)
return soln[Sgen]
def Qflash(massin, massvapor, massliquid, compin, vaporammonia, liquidammonia, Tgen):
enthalpyin = interpolateAW( compin, Tgen, 1)
enthalpyoutvapor = interpolateAW(vaporammonia, Tgen, 1)
enthalpyoutliquid = interpolateAW( liquidammonia, Tgen, 1)
Qflash = symbols('Qflash')
system = Eq(( massin* enthalpyin ) + (-1* massvapor*enthalpyoutvapor) + (-1* massliquid*enthalpyoutliquid) + Qflash, 0)
soln = solve([system], [Qflash])
return soln[Qflash]
# This calculates the S of the flash
# compin is the ammonia flash
def Sflash(massin, massvapor, massliquid, compin, vaporammonia, liquidammonia, Qflash, Tgen):
entropyin = interpolateAW( compin, Tgen, 2)
entropyoutvapor = interpolateAW(vaporammonia, Tgen, 2)
entropyoutliquid = interpolateAW(liquidammonia, Tgen, 2)
Sflash = symbols('Sflash')
system = Eq(( massin* entropyin ) + (-1* massvapor*entropyoutvapor) + (-1* massliquid*entropyoutliquid) + (Sflash/Tgen) - Qflash, 0)
soln = solve([system], Sflash)
return soln[Sflash]
def Qevaporator(m2, m3, m5, ya3, ya2, xa5, Tgen):
enthalpym2 = interpolateAW(ya2, Tgen, 1)
enthalpym3 = interpolateAB( ya3, 266, 1)
#print(enthalpym3)
enthalpym5 = interpolateAB( xa5, 325, 1)
#print(enthalpym5)
# print(enthalpym2 + enthalpym3 + enthalpym5)
Qevap = symbols('Qevap')
system = Eq(( m2 * enthalpym2 ) + (-1* m3*enthalpym3) + (m5*enthalpym5) + Qevap, 0)
soln = solve([system], Qevap)
#print(type(soln))
return soln[Qevap]
def Sevaporator(m2, m3, m5, ya3, ya2, xa5, Qevap, Tgen):
entropym2 = interpolateAW(ya2, Tgen, 2)
entropym3 = interpolateAB( ya3, 266, 2)
entropym5 = interpolateAB( xa5, 325, 2)
Sevap = symbols('Sevap')
system = Eq(( m2 * entropym2 ) + (-1* m3*entropym3) + (m5*entropym5) + (Qevap/266) - Sevap, 0)
soln = solve([system], Sevap)
return soln[Sevap]
def Qabsorber(m3, m4, m5, m6, ya3, xa4, xa5, xa6, Tgen):
enthalpym3 = interpolateAB( ya3, 266, 1)
enthalpym4 = interpolateAW( xa4, Tgen, 1)
enthalpym5 = interpolateAB(xa5, 325, 1)
enthalpym6 = interpolateAW( xa6, 325, 1)
Qabs = symbols('Qabs')
system = (m3 * enthalpym3 ) + (m4 * enthalpym4) + (-1*m5 * enthalpym5) + (-1 * m6 * enthalpym6) + Qabs
soln = solve([system], Qabs)
return soln[Qabs]
def Sabsorber(m3, m4, m5, m6, ya3, xa4, xa5, xa6, Qabs, Tgen):
entropym3 = interpolateAB( ya3, 266, 2)
entropym4 = interpolateAW( xa4, Tgen, 2)
entropym5 = interpolateAB( xa5, 325, 2)
entropym6 = interpolateAW(xa6, 325, 2)
Sabs = symbols('Sabs')
system = Eq((m3*entropym3) + (m4 * entropym4) + (-1*m5 * entropym5) + (-1*m6*entropym6) + (Qabs/325)- Sabs, 0)
soln = solve([system], Sabs)
return soln[Sabs] |
the-stack_0_700 | # SECUREAUTH LABS. Copyright 2018 SecureAuth Corporation. All rights reserved.
#
# This software is provided under under a slightly modified version
# of the Apache Software License. See the accompanying LICENSE file
# for more information.
#
# Author: Alberto Solino (@agsolino)
#
# Description:
# [MS-VDS]: Virtual Disk Service (VDS) Protocol
# This was used as a way to test the DCOM runtime. Further
# testing is needed to verify it is working as expected
#
# Best way to learn how to use these calls is to grab the protocol standard
# so you understand what the call does, and then read the test case located
# at https://github.com/SecureAuthCorp/impacket/tree/master/tests/SMB_RPC
#
# Since DCOM is like an OO RPC, instead of helper functions you will see the
# classes described in the standards developed.
# There are test cases for them too.
#
from impacket.dcerpc.v5.ndr import NDRSTRUCT, NDRUniConformantVaryingArray, NDRENUM
from impacket.dcerpc.v5.dcomrt import DCOMCALL, DCOMANSWER, IRemUnknown2, PMInterfacePointer, INTERFACE
from impacket.dcerpc.v5.dtypes import LPWSTR, ULONG, DWORD, SHORT, GUID
from impacket.dcerpc.v5.rpcrt import DCERPCException
from impacket.dcerpc.v5.enum import Enum
from impacket import hresult_errors
from impacket.uuid import string_to_bin
class DCERPCSessionError(DCERPCException):
def __init__(self, error_string=None, error_code=None, packet=None):
DCERPCException.__init__(self, error_string, error_code, packet)
def __str__( self ):
if self.error_code in hresult_errors.ERROR_MESSAGES:
error_msg_short = hresult_errors.ERROR_MESSAGES[self.error_code][0]
error_msg_verbose = hresult_errors.ERROR_MESSAGES[self.error_code][1]
return 'VDS SessionError: code: 0x%x - %s - %s' % (self.error_code, error_msg_short, error_msg_verbose)
else:
return 'VDS SessionError: unknown error code: 0x%x' % (self.error_code)
################################################################################
# CONSTANTS
################################################################################
# 1.9 Standards Assignments
CLSID_VirtualDiskService = string_to_bin('7D1933CB-86F6-4A98-8628-01BE94C9A575')
IID_IEnumVdsObject = string_to_bin('118610B7-8D94-4030-B5B8-500889788E4E')
IID_IVdsAdviseSink = string_to_bin('8326CD1D-CF59-4936-B786-5EFC08798E25')
IID_IVdsAsync = string_to_bin('D5D23B6D-5A55-4492-9889-397A3C2D2DBC')
IID_IVdsServiceInitialization = string_to_bin('4AFC3636-DB01-4052-80C3-03BBCB8D3C69')
IID_IVdsService = string_to_bin('0818A8EF-9BA9-40D8-A6F9-E22833CC771E')
IID_IVdsSwProvider = string_to_bin('9AA58360-CE33-4F92-B658-ED24B14425B8')
IID_IVdsProvider = string_to_bin('10C5E575-7984-4E81-A56B-431F5F92AE42')
error_status_t = ULONG
# 2.2.1.1.3 VDS_OBJECT_ID
VDS_OBJECT_ID = GUID
################################################################################
# STRUCTURES
################################################################################
# 2.2.2.1.3.1 VDS_SERVICE_PROP
class VDS_SERVICE_PROP(NDRSTRUCT):
structure = (
('pwszVersion',LPWSTR),
('ulFlags',ULONG),
)
class OBJECT_ARRAY(NDRUniConformantVaryingArray):
item = PMInterfacePointer
# 2.2.2.7.1.1 VDS_PROVIDER_TYPE
class VDS_PROVIDER_TYPE(NDRENUM):
class enumItems(Enum):
VDS_PT_UNKNOWN = 0
VDS_PT_SOFTWARE = 1
VDS_PT_HARDWARE = 2
VDS_PT_VIRTUALDISK = 3
VDS_PT_MAX = 4
# 2.2.2.7.2.1 VDS_PROVIDER_PROP
class VDS_PROVIDER_PROP(NDRSTRUCT):
structure = (
('id',VDS_OBJECT_ID),
('pwszName',LPWSTR),
('guidVersionId',GUID),
('pwszVersion',LPWSTR),
('type',VDS_PROVIDER_TYPE),
('ulFlags',ULONG),
('ulStripeSizeFlags',ULONG),
('sRebuildPriority',SHORT),
)
################################################################################
# RPC CALLS
################################################################################
# 3.4.5.2.5.1 IVdsServiceInitialization::Initialize (Opnum 3)
class IVdsServiceInitialization_Initialize(DCOMCALL):
opnum = 3
structure = (
('pwszMachineName', LPWSTR),
)
class IVdsServiceInitialization_InitializeResponse(DCOMANSWER):
structure = (
('ErrorCode', error_status_t),
)
# 3.4.5.2.4.1 IVdsService::IsServiceReady (Opnum 3)
class IVdsService_IsServiceReady(DCOMCALL):
opnum = 3
structure = (
)
class IVdsService_IsServiceReadyResponse(DCOMANSWER):
structure = (
('ErrorCode', error_status_t),
)
# 3.4.5.2.4.2 IVdsService::WaitForServiceReady (Opnum 4)
class IVdsService_WaitForServiceReady(DCOMCALL):
opnum = 4
structure = (
)
class IVdsService_WaitForServiceReadyResponse(DCOMANSWER):
structure = (
('ErrorCode', error_status_t),
)
# 3.4.5.2.4.3 IVdsService::GetProperties (Opnum 5)
class IVdsService_GetProperties(DCOMCALL):
opnum = 5
structure = (
)
class IVdsService_GetPropertiesResponse(DCOMANSWER):
structure = (
('pServiceProp', VDS_SERVICE_PROP),
('ErrorCode', error_status_t),
)
# 3.4.5.2.4.4 IVdsService::QueryProviders (Opnum 6)
class IVdsService_QueryProviders(DCOMCALL):
opnum = 6
structure = (
('masks', DWORD),
)
class IVdsService_QueryProvidersResponse(DCOMANSWER):
structure = (
('ppEnum', PMInterfacePointer),
('ErrorCode', error_status_t),
)
# 3.1.1.1 IEnumVdsObject Interface
# 3.4.5.2.1.1 IEnumVdsObject::Next (Opnum 3)
class IEnumVdsObject_Next(DCOMCALL):
opnum = 3
structure = (
('celt', ULONG),
)
class IEnumVdsObject_NextResponse(DCOMANSWER):
structure = (
('ppObjectArray', OBJECT_ARRAY),
('pcFetched', ULONG),
('ErrorCode', error_status_t),
)
# 3.4.5.2.14.1 IVdsProvider::GetProperties (Opnum 3)
class IVdsProvider_GetProperties(DCOMCALL):
opnum = 3
structure = (
)
class IVdsProvider_GetPropertiesResponse(DCOMANSWER):
structure = (
('pProviderProp', VDS_PROVIDER_PROP),
('ErrorCode', error_status_t),
)
################################################################################
# OPNUMs and their corresponding structures
################################################################################
OPNUMS = {
}
################################################################################
# HELPER FUNCTIONS AND INTERFACES
################################################################################
class IEnumVdsObject(IRemUnknown2):
def Next(self, celt=0xffff):
request = IEnumVdsObject_Next()
request['ORPCthis'] = self.get_cinstance().get_ORPCthis()
request['ORPCthis']['flags'] = 0
request['celt'] = celt
try:
resp = self.request(request, uuid = self.get_iPid())
except Exception as e:
resp = e.get_packet()
# If it is S_FALSE(1) means less items were returned
if resp['ErrorCode'] != 1:
raise
interfaces = list()
for interface in resp['ppObjectArray']:
interfaces.append(IRemUnknown2(INTERFACE(self.get_cinstance(), ''.join(interface['abData']), self.get_ipidRemUnknown(), target = self.get_target())))
return interfaces
class IVdsProvider(IRemUnknown2):
def GetProperties(self):
request = IVdsProvider_GetProperties()
request['ORPCthis'] = self.get_cinstance().get_ORPCthis()
request['ORPCthis']['flags'] = 0
resp = self.request(request, uuid = self.get_iPid())
return resp
class IVdsServiceInitialization(IRemUnknown2):
def __init__(self, interface):
IRemUnknown2.__init__(self, interface)
def Initialize(self):
request = IVdsServiceInitialization_Initialize()
request['ORPCthis'] = self.get_cinstance().get_ORPCthis()
request['ORPCthis']['flags'] = 0
request['pwszMachineName'] = '\x00'
resp = self.request(request, uuid = self.get_iPid())
return resp
class IVdsService(IRemUnknown2):
def __init__(self, interface):
IRemUnknown2.__init__(self, interface)
def IsServiceReady(self):
request = IVdsService_IsServiceReady()
request['ORPCthis'] = self.get_cinstance().get_ORPCthis()
request['ORPCthis']['flags'] = 0
try:
resp = self.request(request, uuid = self.get_iPid())
except Exception as e:
resp = e.get_packet()
return resp
def WaitForServiceReady(self):
request = IVdsService_WaitForServiceReady()
request['ORPCthis'] = self.get_cinstance().get_ORPCthis()
request['ORPCthis']['flags'] = 0
resp = self.request(request, uuid = self.get_iPid())
return resp
def GetProperties(self):
request = IVdsService_GetProperties()
request['ORPCthis'] = self.get_cinstance().get_ORPCthis()
request['ORPCthis']['flags'] = 0
resp = self.request(request, uuid = self.get_iPid())
return resp
def QueryProviders(self, masks):
request = IVdsService_QueryProviders()
request['ORPCthis'] = self.get_cinstance().get_ORPCthis()
request['ORPCthis']['flags'] = 0
request['masks'] = masks
resp = self.request(request, uuid = self.get_iPid())
return IEnumVdsObject(INTERFACE(self.get_cinstance(), ''.join(resp['ppEnum']['abData']), self.get_ipidRemUnknown(), target = self.get_target()))
|
the-stack_0_702 | import os
from unittest import TestCase
from configservice import Config, MissingEnviron, ErrorFlagTrue
class TestCore(TestCase):
def test__load_env(self):
# set an env to work with.
os.environ['TEST_ME_X'] = '1'
c = Config()
# Test simple recall.
res = c.get_env('TEST_ME_X')
self.assertEqual('1', res)
# Test default value
res = c.get_env('THIS_DOESNT_EXIST',
default_value='A')
self.assertEqual('A', res)
# Test default value where the key does exist (should take the key instead)
res = c.get_env('TEST_ME_X',
default_value='A')
self.assertEqual('1', res)
# Test test mode responses section.
######### TEST MODES ############
c._test_mode = True
# Test simple recall.
res = c.get_env('TEST_ME_X', test_response='test_res')
self.assertEqual('test_res', res)
# Test assigned value where no value assigned
res = c.get_env('TEST_ME_X',
default_value=24,
test_response='test_res')
self.assertEqual('1', res)
c._test_mode = False
######### End Test Mode Section ############
######## Check error states. ############
with self.assertRaises(MissingEnviron) as e:
res = c.get_env('THIS_DOESNT_EXIST', error_flag=True)
with self.assertRaises(ErrorFlagTrue) as e:
res = c.get_env('THIS_DOESNT_EXIST', error_flag=True, default_value='1')
###### Check data conversion ###########
# Test integer
os.environ['TEST_ME_X'] = '1'
res = c.get_env('TEST_ME_X', data_type_convert='int')
self.assertEqual(1, res)
# Test float
os.environ['TEST_ME_X'] = '1.11'
res = c.get_env('TEST_ME_X', data_type_convert='float')
self.assertEqual(1.11, res)
# Test Bool
os.environ['TEST_ME_X'] = '1'
res = c.get_env('TEST_ME_X', data_type_convert='bool')
self.assertTrue(res)
os.environ['TEST_ME_X'] = 'True'
res = c.get_env('TEST_ME_X', data_type_convert='bool')
self.assertTrue(res)
os.environ['TEST_ME_X'] = '0'
res = c.get_env('TEST_ME_X', data_type_convert='bool')
self.assertFalse(res)
os.environ['TEST_ME_X'] = 'false'
res = c.get_env('TEST_ME_X', data_type_convert='bool')
self.assertFalse(res)
# Test list
os.environ['TEST_ME_X'] = 'a,b,c,d'
res = c.get_env('TEST_ME_X', data_type_convert='list')
golden = ['a', 'b', 'c', 'd']
self.assertListEqual(golden, res)
# Test list int
os.environ['TEST_ME_X'] = '1,2,3,4,5'
res = c.get_env('TEST_ME_X', data_type_convert='list_int')
golden = [1, 2, 3, 4, 5]
self.assertListEqual(golden, res)
# Test list float
os.environ['TEST_ME_X'] = '1.2,2,3.6,4.6,5'
res = c.get_env('TEST_ME_X', data_type_convert='list_float')
golden = [1.2, 2, 3.6, 4.6, 5]
self.assertListEqual(golden, res)
# Test default value int
res = c.get_env('TEST_ME_NO', default_value='3', data_type_convert='int')
self.assertEqual(3, res)
# Test default value int
c._test_mode = True
res = c.get_env('TEST_ME_NO', test_response='2', default_value='3', data_type_convert='int')
self.assertEqual(3, res)
# Test default value int
c._test_mode = True
res = c.get_env('TEST_ME_NO', test_response='2', data_type_convert='int')
self.assertEqual(2, res)
|
the-stack_0_705 | #!/usr/bin/python3
# -*- coding: utf-8 -*-
str = """ACS3004 湖南新永利交通科工贸有限公司
ACS3005 三一帕尔菲格特种车装备有限公司
ACS3006 湖南新永利交通科工贸有限公司"""
print(str)
items = str.split(sep='\n')
for i, e in enumerate(items, 1):
print(i, '. ', e.split(sep=' ')[0])
for i in range(1):
print(i)
|
the-stack_0_706 | """
Contains abstract functionality for learning locally linear sparse model.
"""
import numpy as np
import scipy as sp
from sklearn.linear_model import Ridge, lars_path
from sklearn.utils import check_random_state
class LimeBase(object):
"""Class for learning a locally linear sparse model from perturbed data"""
def __init__(self,
kernel_fn,
verbose=False,
random_state=None):
"""Init function
Args:
kernel_fn: function that transforms an array of distances into an
array of proximity values (floats).
verbose: if true, print local prediction values from linear model.
random_state: an integer or numpy.RandomState that will be used to
generate random numbers. If None, the random state will be
initialized using the internal numpy seed.
"""
self.kernel_fn = kernel_fn
self.verbose = verbose
self.random_state = check_random_state(random_state)
@staticmethod
def generate_lars_path(weighted_data, weighted_labels):
"""Generates the lars path for weighted data.
Args:
weighted_data: data that has been weighted by kernel
weighted_label: labels, weighted by kernel
Returns:
(alphas, coefs), both are arrays corresponding to the
regularization parameter and coefficients, respectively
"""
x_vector = weighted_data
alphas, _, coefs = lars_path(x_vector,
weighted_labels,
method='lasso',
verbose=False)
return alphas, coefs
def forward_selection(self, data, labels, weights, num_features):
"""Iteratively adds features to the model"""
clf = Ridge(alpha=0, fit_intercept=True, random_state=self.random_state)
used_features = []
for _ in range(min(num_features, data.shape[1])):
max_ = -100000000
best = 0
for feature in range(data.shape[1]):
if feature in used_features:
continue
clf.fit(data[:, used_features + [feature]], labels,
sample_weight=weights)
score = clf.score(data[:, used_features + [feature]],
labels,
sample_weight=weights)
if score > max_:
best = feature
max_ = score
used_features.append(best)
return np.array(used_features)
def feature_selection(self, data, labels, weights, num_features, method):
"""Selects features for the model. see explain_instance_with_data to
understand the parameters."""
if method == 'none':
return np.array(range(data.shape[1]))
elif method == 'forward_selection':
return self.forward_selection(data, labels, weights, num_features)
elif method == 'highest_weights':
clf = Ridge(alpha=0.01, fit_intercept=True,
random_state=self.random_state)
clf.fit(data, labels, sample_weight=weights)
coef = clf.coef_
if sp.sparse.issparse(data):
coef = sp.sparse.csr_matrix(clf.coef_)
weighted_data = coef.multiply(data[0])
# Note: most efficient to slice the data before reversing
sdata = len(weighted_data.data)
argsort_data = np.abs(weighted_data.data).argsort()
# Edge case where data is more sparse than requested number of feature importances
# In that case, we just pad with zero-valued features
if sdata < num_features:
nnz_indexes = argsort_data[::-1]
indices = weighted_data.indices[nnz_indexes]
num_to_pad = num_features - sdata
indices = np.concatenate((indices, np.zeros(num_to_pad, dtype=indices.dtype)))
indices_set = set(indices)
pad_counter = 0
for i in range(data.shape[1]):
if i not in indices_set:
indices[pad_counter + sdata] = i
pad_counter += 1
if pad_counter >= num_to_pad:
break
else:
nnz_indexes = argsort_data[sdata - num_features:sdata][::-1]
indices = weighted_data.indices[nnz_indexes]
return indices
else:
weighted_data = coef * data[0]
feature_weights = sorted(
zip(range(data.shape[1]), weighted_data),
key=lambda x: np.abs(x[1]),
reverse=True)
return np.array([x[0] for x in feature_weights[:num_features]])
elif method == 'lasso_path':
weighted_data = ((data - np.average(data, axis=0, weights=weights))
* np.sqrt(weights[:, np.newaxis]))
weighted_labels = ((labels - np.average(labels, weights=weights))
* np.sqrt(weights))
nonzero = range(weighted_data.shape[1])
_, coefs = self.generate_lars_path(weighted_data,
weighted_labels)
for i in range(len(coefs.T) - 1, 0, -1):
nonzero = coefs.T[i].nonzero()[0]
if len(nonzero) <= num_features:
break
used_features = nonzero
return used_features
elif method == 'auto':
if num_features <= 6:
n_method = 'forward_selection'
else:
n_method = 'highest_weights'
return self.feature_selection(data, labels, weights,
num_features, n_method)
def explain_instance_with_data(self,
neighborhood_data,
neighborhood_labels,
distances,
label,
num_features,
feature_selection='auto',
model_regressor=None):
"""Takes perturbed data, labels and distances, returns explanation.
Args:
neighborhood_data: perturbed data, 2d array. first element is
assumed to be the original data point.
neighborhood_labels: corresponding perturbed labels. should have as
many columns as the number of possible labels.
distances: distances to original data point.
label: label for which we want an explanation
num_features: maximum number of features in explanation
feature_selection: how to select num_features. options are:
'forward_selection': iteratively add features to the model.
This is costly when num_features is high
'highest_weights': selects the features that have the highest
product of absolute weight * original data point when
learning with all the features
'lasso_path': chooses features based on the lasso
regularization path
'none': uses all features, ignores num_features
'auto': uses forward_selection if num_features <= 6, and
'highest_weights' otherwise.
model_regressor: sklearn regressor to use in explanation.
Defaults to Ridge regression if None. Must have
model_regressor.coef_ and 'sample_weight' as a parameter
to model_regressor.fit()
Returns:
(intercept, exp, score, local_pred):
intercept is a float.
exp is a sorted list of tuples, where each tuple (x,y) corresponds
to the feature id (x) and the local weight (y). The list is sorted
by decreasing absolute value of y.
score is the R^2 value of the returned explanation
local_pred is the prediction of the explanation model on the original instance
"""
weights = self.kernel_fn(distances)
labels_column = neighborhood_labels[:, label]
used_features = self.feature_selection(neighborhood_data,
labels_column,
weights,
num_features,
feature_selection)
if model_regressor is None:
model_regressor = Ridge(alpha=1, fit_intercept=True,
random_state=self.random_state)
easy_model = model_regressor
easy_model.fit(neighborhood_data[:, used_features],
labels_column, sample_weight=weights)
prediction_score = easy_model.score(
neighborhood_data[:, used_features],
labels_column, sample_weight=weights)
local_pred = easy_model.predict(neighborhood_data[0, used_features].reshape(1, -1))
if self.verbose:
print('Intercept', easy_model.intercept_)
print('Prediction_local', local_pred,)
print('Right:', neighborhood_labels[0, label])
return (easy_model.intercept_,
sorted(zip(used_features, easy_model.coef_),
key=lambda x: np.abs(x[1]), reverse=True),
prediction_score, local_pred), easy_model
|
the-stack_0_707 | # Attempts to verify the solutions of discrete mathematics CW1
import random
def listUpTo(num):
"""
Returns a lists of integers from 1 up to num
"""
return list(range(1, num + 1))
def countMultiples(dividendList, divisor):
"""
Returns the total number of multiples of the divisor in dividendList
"""
multNum = 0
for dividend in dividendList:
if dividend % divisor == 0:
multNum += 1
return multNum
def solveQ1(myList, divisor, selectAmount, n):
"""
Let X denote the number of successful trails in a given n trails.
Selects a 'selectAmount' random elements from 'myList', checks whether it
is a multiple of 'divisor', performs this for 'n' trails, then returns a
probability point of X from it's binomial distribution.
"""
X = 0
for _ in range(n):
random.shuffle(myList)
for i, selected in enumerate(myList, start=1):
if i == selectAmount:
break
else:
if selected % divisor == 0:
X += 1
p = X / (len(myList) * n * selectAmount)
print(p)
if __name__ == "__main__":
list40 = listUpTo(40)
# print(list40)
# print(countMultiples(list40, 4))
# print()
solveQ1(list40, 4, 2, 10000)
|
the-stack_0_708 | #!/usr/bin/env python3
# Hydrus is released under WTFPL
# You just DO WHAT THE FUCK YOU WANT TO.
# https://github.com/sirkris/WTFPL/blob/master/WTFPL.md
import locale
try: locale.setlocale( locale.LC_ALL, '' )
except: pass
try:
import os
import argparse
import sys
from hydrus.core import HydrusBoot
HydrusBoot.AddBaseDirToEnvPath()
# initialise Qt here, important it is done early
from hydrus.client.gui import QtPorting as QP
from hydrus.core import HydrusConstants as HC
from hydrus.core import HydrusData
from hydrus.core import HydrusGlobals as HG
from hydrus.core import HydrusLogger
from hydrus.core import HydrusPaths
from hydrus.core import HydrusTemp
argparser = argparse.ArgumentParser( description = 'hydrus network client' )
argparser.add_argument( '-d', '--db_dir', help = 'set an external db location' )
argparser.add_argument( '--temp_dir', help = 'override the program\'s temporary directory' )
argparser.add_argument( '--db_journal_mode', default = 'WAL', choices = [ 'WAL', 'TRUNCATE', 'PERSIST', 'MEMORY' ], help = 'change db journal mode (default=WAL)' )
argparser.add_argument( '--db_cache_size', type = int, help = 'override SQLite cache_size per db file, in MB (default=256)' )
argparser.add_argument( '--db_transaction_commit_period', type = int, help = 'override how often (in seconds) database changes are saved to disk (default=30,min=10)' )
argparser.add_argument( '--db_synchronous_override', type = int, choices = range(4), help = 'override SQLite Synchronous PRAGMA (default=2)' )
argparser.add_argument( '--no_db_temp_files', action='store_true', help = 'run db temp operations entirely in memory' )
argparser.add_argument( '--boot_debug', action='store_true', help = 'print additional bootup information to the log' )
argparser.add_argument( '--no_wal', action='store_true', help = 'OBSOLETE: run using TRUNCATE db journaling' )
argparser.add_argument( '--db_memory_journaling', action='store_true', help = 'OBSOLETE: run using MEMORY db journaling (DANGEROUS)' )
result = argparser.parse_args()
if result.db_dir is None:
db_dir = HC.DEFAULT_DB_DIR
if not HydrusPaths.DirectoryIsWriteable( db_dir ) or HC.RUNNING_FROM_MACOS_APP:
if HC.USERPATH_DB_DIR is None:
raise Exception( 'The default db path "{}" was not writeable, and the userpath could not be determined!'.format( HC.DEFAULT_DB_DIR ) )
db_dir = HC.USERPATH_DB_DIR
else:
db_dir = result.db_dir
db_dir = HydrusPaths.ConvertPortablePathToAbsPath( db_dir, HC.BASE_DIR )
if not HydrusPaths.DirectoryIsWriteable( db_dir ):
raise Exception( 'The given db path "{}" is not a writeable-to!'.format( db_dir ) )
try:
HydrusPaths.MakeSureDirectoryExists( db_dir )
except:
raise Exception( 'Could not ensure db path "{}" exists! Check the location is correct and that you have permission to write to it!'.format( db_dir ) )
if not os.path.isdir( db_dir ):
raise Exception( 'The given db path "{}" is not a directory!'.format( db_dir ) )
HG.db_journal_mode = result.db_journal_mode
if result.no_wal:
HG.db_journal_mode = 'TRUNCATE'
if result.db_memory_journaling:
HG.db_journal_mode = 'MEMORY'
if result.db_cache_size is not None:
HG.db_cache_size = result.db_cache_size
else:
HG.db_cache_size = 256
if result.db_transaction_commit_period is not None:
HG.db_transaction_commit_period = max( 10, result.db_transaction_commit_period )
else:
HG.db_transaction_commit_period = 30
if result.db_synchronous_override is not None:
HG.db_synchronous = int( result.db_synchronous_override )
else:
if HG.db_journal_mode == 'WAL':
HG.db_synchronous = 1
else:
HG.db_synchronous = 2
HG.no_db_temp_files = result.no_db_temp_files
HG.boot_debug = result.boot_debug
try:
from twisted.internet import reactor
except:
HG.twisted_is_broke = True
except Exception as e:
try:
HydrusData.DebugPrint( 'Critical boot error occurred! Details written to crash.log!' )
HydrusData.PrintException( e )
except:
pass
import traceback
error_trace = traceback.format_exc()
print( error_trace )
if 'db_dir' in locals() and os.path.exists( db_dir ):
emergency_dir = db_dir
else:
emergency_dir = os.path.expanduser( '~' )
possible_desktop = os.path.join( emergency_dir, 'Desktop' )
if os.path.exists( possible_desktop ) and os.path.isdir( possible_desktop ):
emergency_dir = possible_desktop
dest_path = os.path.join( emergency_dir, 'hydrus_crash.log' )
with open( dest_path, 'w', encoding = 'utf-8' ) as f:
f.write( error_trace )
print( 'Critical boot error occurred! Details written to hydrus_crash.log in either db dir or user dir!' )
sys.exit( 1 )
def boot():
if result.temp_dir is not None:
HydrusTemp.SetEnvTempDir( result.temp_dir )
controller = None
with HydrusLogger.HydrusLogger( db_dir, 'client' ) as logger:
try:
HydrusData.Print( 'hydrus client started' )
if not HG.twisted_is_broke:
import threading
threading.Thread( target = reactor.run, name = 'twisted', kwargs = { 'installSignalHandlers' : 0 } ).start()
from hydrus.client import ClientController
controller = ClientController.Controller( db_dir )
controller.Run()
except:
HydrusData.Print( 'hydrus client failed' )
import traceback
HydrusData.Print( traceback.format_exc() )
finally:
HG.started_shutdown = True
HG.view_shutdown = True
HG.model_shutdown = True
if controller is not None:
controller.pubimmediate( 'wake_daemons' )
if not HG.twisted_is_broke:
reactor.callFromThread( reactor.stop )
HydrusData.Print( 'hydrus client shut down' )
HG.shutdown_complete = True
if HG.restart:
HydrusData.RestartProcess()
|
the-stack_0_710 | print('-*-' * 15)
print('SISTEMA CAIXA ELETRONICO')
print('-*-' * 15)
valor = float(input('Qual será o valor sacado? '))
cedula = 100
qtd = 0
total = valor
if valor < 1:
print('Saque somente acima de R$1! ')
while True:
if valor >= cedula:
valor = valor - cedula
qtd += 1
else:
if qtd > 0:
print(F'Total de {qtd} de cedulas de R${cedula}')
if cedula == 100:
cedula = 50
elif cedula == 50:
cedula = 20
elif cedula == 20:
cedula = 10
elif cedula == 10:
cedula = 5
elif cedula == 5:
cedula = 2
elif cedula == 2:
cedula = 1
qtd = 0
if total == 0:
break |
the-stack_0_711 | # Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
"""
Base tests that all storage providers should implement in their own tests.
They handle the storage-based assertions, internally.
All tests return true if assertions pass to indicate that the code ran to completion, passing internal assertions.
Therefore, all tests using theses static tests should strictly check that the method returns true.
Note: Python cannot have dicts with properties with a None value like other SDKs can have properties with null values.
Because of this, StoreItem tests have "e_tag: *" where the tests in the other SDKs do not.
This has also caused us to comment out some parts of these tests where we assert that "e_tag"
is None for the same reason. A null e_tag should work just like a * e_tag when writing,
as far as the storage adapters are concerened, so this shouldn't cause issues.
:Example:
async def test_handle_null_keys_when_reading(self):
await reset()
test_ran = await StorageBaseTests.handle_null_keys_when_reading(get_storage())
assert test_ran
"""
import pytest
from botbuilder.azure import CosmosDbStorage
from botbuilder.core import (
ConversationState,
TurnContext,
MessageFactory,
MemoryStorage,
)
from botbuilder.core.adapters import TestAdapter
from botbuilder.dialogs import (
DialogSet,
DialogTurnStatus,
TextPrompt,
PromptValidatorContext,
WaterfallStepContext,
Dialog,
WaterfallDialog,
PromptOptions,
)
class StorageBaseTests:
# pylint: disable=pointless-string-statement
@staticmethod
async def return_empty_object_when_reading_unknown_key(storage) -> bool:
result = await storage.read(["unknown"])
assert result is not None
assert len(result) == 0
return True
@staticmethod
async def handle_null_keys_when_reading(storage) -> bool:
if isinstance(storage, (CosmosDbStorage, MemoryStorage)):
result = await storage.read(None)
assert len(result.keys()) == 0
# Catch-all
else:
with pytest.raises(Exception) as err:
await storage.read(None)
assert err.value.args[0] == "Keys are required when reading"
return True
@staticmethod
async def handle_null_keys_when_writing(storage) -> bool:
with pytest.raises(Exception) as err:
await storage.write(None)
assert err.value.args[0] == "Changes are required when writing"
return True
@staticmethod
async def does_not_raise_when_writing_no_items(storage) -> bool:
# noinspection PyBroadException
try:
await storage.write([])
except:
pytest.fail("Should not raise")
return True
@staticmethod
async def create_object(storage) -> bool:
store_items = {
"createPoco": {"id": 1},
"createPocoStoreItem": {"id": 2, "e_tag": "*"},
}
await storage.write(store_items)
read_store_items = await storage.read(store_items.keys())
assert store_items["createPoco"]["id"] == read_store_items["createPoco"]["id"]
assert (
store_items["createPocoStoreItem"]["id"]
== read_store_items["createPocoStoreItem"]["id"]
)
# If decided to validate e_tag integrity again, uncomment this code
# assert read_store_items["createPoco"]["e_tag"] is not None
assert read_store_items["createPocoStoreItem"]["e_tag"] is not None
return True
@staticmethod
async def handle_crazy_keys(storage) -> bool:
key = '!@#$%^&*()_+??><":QASD~`'
store_item = {"id": 1}
store_items = {key: store_item}
await storage.write(store_items)
read_store_items = await storage.read(store_items.keys())
assert read_store_items[key] is not None
assert read_store_items[key]["id"] == 1
return True
@staticmethod
async def update_object(storage) -> bool:
original_store_items = {
"pocoItem": {"id": 1, "count": 1},
"pocoStoreItem": {"id": 1, "count": 1, "e_tag": "*"},
}
# 1st write should work
await storage.write(original_store_items)
loaded_store_items = await storage.read(["pocoItem", "pocoStoreItem"])
update_poco_item = loaded_store_items["pocoItem"]
update_poco_item["e_tag"] = None
update_poco_store_item = loaded_store_items["pocoStoreItem"]
assert update_poco_store_item["e_tag"] is not None
# 2nd write should work
update_poco_item["count"] += 1
update_poco_store_item["count"] += 1
await storage.write(loaded_store_items)
reloaded_store_items = await storage.read(loaded_store_items.keys())
reloaded_update_poco_item = reloaded_store_items["pocoItem"]
reloaded_update_poco_store_item = reloaded_store_items["pocoStoreItem"]
assert reloaded_update_poco_item["count"] == 2
assert reloaded_update_poco_store_item["count"] == 2
# Write with old e_tag should succeed for non-storeItem
update_poco_item["count"] = 123
await storage.write({"pocoItem": update_poco_item})
# Write with old eTag should FAIL for storeItem
update_poco_store_item["count"] = 123
"""
This assert exists in the other SDKs but can't in python, currently
due to using "e_tag: *" above (see comment near the top of this file for details).
with pytest.raises(Exception) as err:
await storage.write({"pocoStoreItem": update_poco_store_item})
assert err.value is not None
"""
reloaded_store_items2 = await storage.read(["pocoItem", "pocoStoreItem"])
reloaded_poco_item2 = reloaded_store_items2["pocoItem"]
reloaded_poco_item2["e_tag"] = None
reloaded_poco_store_item2 = reloaded_store_items2["pocoStoreItem"]
assert reloaded_poco_item2["count"] == 123
assert reloaded_poco_store_item2["count"] == 2
# write with wildcard etag should work
reloaded_poco_item2["count"] = 100
reloaded_poco_store_item2["count"] = 100
reloaded_poco_store_item2["e_tag"] = "*"
wildcard_etag_dict = {
"pocoItem": reloaded_poco_item2,
"pocoStoreItem": reloaded_poco_store_item2,
}
await storage.write(wildcard_etag_dict)
reloaded_store_items3 = await storage.read(["pocoItem", "pocoStoreItem"])
assert reloaded_store_items3["pocoItem"]["count"] == 100
assert reloaded_store_items3["pocoStoreItem"]["count"] == 100
# Write with empty etag should not work
reloaded_store_items4 = await storage.read(["pocoStoreItem"])
reloaded_store_item4 = reloaded_store_items4["pocoStoreItem"]
assert reloaded_store_item4 is not None
reloaded_store_item4["e_tag"] = ""
dict2 = {"pocoStoreItem": reloaded_store_item4}
with pytest.raises(Exception) as err:
await storage.write(dict2)
assert err.value is not None
final_store_items = await storage.read(["pocoItem", "pocoStoreItem"])
assert final_store_items["pocoItem"]["count"] == 100
assert final_store_items["pocoStoreItem"]["count"] == 100
return True
@staticmethod
async def delete_object(storage) -> bool:
store_items = {"delete1": {"id": 1, "count": 1, "e_tag": "*"}}
await storage.write(store_items)
read_store_items = await storage.read(["delete1"])
assert read_store_items["delete1"]["e_tag"]
assert read_store_items["delete1"]["count"] == 1
await storage.delete(["delete1"])
reloaded_store_items = await storage.read(["delete1"])
assert reloaded_store_items.get("delete1", None) is None
return True
@staticmethod
async def delete_unknown_object(storage) -> bool:
# noinspection PyBroadException
try:
await storage.delete(["unknown_key"])
except:
pytest.fail("Should not raise")
return True
@staticmethod
async def perform_batch_operations(storage) -> bool:
await storage.write(
{"batch1": {"count": 10}, "batch2": {"count": 20}, "batch3": {"count": 30},}
)
result = await storage.read(["batch1", "batch2", "batch3"])
assert result.get("batch1", None) is not None
assert result.get("batch2", None) is not None
assert result.get("batch3", None) is not None
assert result["batch1"]["count"] == 10
assert result["batch2"]["count"] == 20
assert result["batch3"]["count"] == 30
"""
If decided to validate e_tag integrity aagain, uncomment this code
assert result["batch1"].get("e_tag", None) is not None
assert result["batch2"].get("e_tag", None) is not None
assert result["batch3"].get("e_tag", None) is not None
"""
await storage.delete(["batch1", "batch2", "batch3"])
result = await storage.read(["batch1", "batch2", "batch3"])
assert result.get("batch1", None) is None
assert result.get("batch2", None) is None
assert result.get("batch3", None) is None
return True
@staticmethod
async def proceeds_through_waterfall(storage) -> bool:
convo_state = ConversationState(storage)
dialog_state = convo_state.create_property("dialogState")
dialogs = DialogSet(dialog_state)
async def exec_test(turn_context: TurnContext) -> None:
dialog_context = await dialogs.create_context(turn_context)
await dialog_context.continue_dialog()
if not turn_context.responded:
await dialog_context.begin_dialog(WaterfallDialog.__name__)
await convo_state.save_changes(turn_context)
adapter = TestAdapter(exec_test)
async def prompt_validator(prompt_context: PromptValidatorContext):
result = prompt_context.recognized.value
if len(result) > 3:
succeeded_message = MessageFactory.text(
f"You got it at the {prompt_context.options.number_of_attempts}rd try!"
)
await prompt_context.context.send_activity(succeeded_message)
return True
reply = MessageFactory.text(
f"Please send a name that is longer than 3 characters. {prompt_context.options.number_of_attempts}"
)
await prompt_context.context.send_activity(reply)
return False
async def step_1(step_context: WaterfallStepContext) -> DialogTurnStatus:
assert isinstance(step_context.active_dialog.state["stepIndex"], int)
await step_context.context.send_activity("step1")
return Dialog.end_of_turn
async def step_2(step_context: WaterfallStepContext) -> None:
assert isinstance(step_context.active_dialog.state["stepIndex"], int)
await step_context.prompt(
TextPrompt.__name__,
PromptOptions(prompt=MessageFactory.text("Please type your name")),
)
async def step_3(step_context: WaterfallStepContext) -> DialogTurnStatus:
assert isinstance(step_context.active_dialog.state["stepIndex"], int)
await step_context.context.send_activity("step3")
return Dialog.end_of_turn
steps = [step_1, step_2, step_3]
dialogs.add(WaterfallDialog(WaterfallDialog.__name__, steps))
dialogs.add(TextPrompt(TextPrompt.__name__, prompt_validator))
step1 = await adapter.send("hello")
step2 = await step1.assert_reply("step1")
step3 = await step2.send("hello")
step4 = await step3.assert_reply("Please type your name") # None
step5 = await step4.send("hi")
step6 = await step5.assert_reply(
"Please send a name that is longer than 3 characters. 0"
)
step7 = await step6.send("hi")
step8 = await step7.assert_reply(
"Please send a name that is longer than 3 characters. 1"
)
step9 = await step8.send("hi")
step10 = await step9.assert_reply(
"Please send a name that is longer than 3 characters. 2"
)
step11 = await step10.send("Kyle")
step12 = await step11.assert_reply("You got it at the 3rd try!")
await step12.assert_reply("step3")
return True
|
the-stack_0_714 | """
Phong Material
For phong shading
"""
from .material import Material
from ..math import Vec3, Ray, HitRecord, dot3, reflect3, normalize3, clamp3
from ..camera import Camera
class PhongMaterial(Material):
"""Base Material Class"""
def __init__(self, color: Vec3 = Vec3(1.,1.,1.), shininess: float = 10.0, reflectivity: float = 0.0, refraction: float = 1.0):
Material.__init__(self, color, shininess, reflectivity, refraction)
def shade(self, camera: Camera, ray: Ray, hitrecord: HitRecord, lights: list) -> Vec3:
"""
Shade method: Phong
phong shader
"""
colorsum = Vec3(0.,0.,0.)
if len(lights)>0:
for light in lights:
N = hitrecord.normal_g
L = normalize3(hitrecord.point - light.position)
E = normalize3(camera.position - hitrecord.point)
R = normalize3(-reflect3(L, N))
diffuse = max(1. - dot3(N, L), 0.0)
specular = pow(max(dot3(R, E), 0.0), 0.3 * self.shininess)
color = self.color * 0.5 * (diffuse + specular) * hitrecord.color
colorsum += color
colorsum /= len(lights)
colorsum = clamp3(colorsum, Vec3(0.,0.,0.), Vec3(1.,1.,1.))
else:
# no light in scene, use material color
colorsum = self.color * hitrecord.color
return colorsum
|
the-stack_0_715 | import numpy as np
import os
import pickle
import tensorflow as tf
import matplotlib.pyplot as plt
from skimage.transform import rotate, resize
from skimage import exposure
import skimage.io as io
from config import FLAGS
def load_facegreyreduxshuffled_set(batch_size, is_training=True):
path = os.path.join('data', 'facegreyredux')
if is_training:
fd = open(os.path.join(path, 'facegreyredux'), 'rb')
# loaded = np.fromfile(file=fd, dtype=np.uint8)
loaded = np.asarray(pickle.load(fd))
trainX = loaded.reshape((50000, 28, 28, 1)).astype(np.float32)
fd = open(os.path.join(path, 'facegreyreduxcat'), 'rb')
# loaded = np.fromfile(file=fd, dtype=np.uint8)
loaded = np.asarray(pickle.load(fd))
trainY = loaded.reshape((50000)).astype(np.int32)
data_set = list(zip(trainX,trainY))
np.random.shuffle(data_set)
trainX, trainY = list(zip(*data_set))
trainX = np.asarray(trainX).reshape((50000, 28, 28, 1)).astype(np.float32)
trainY = np.asarray(trainY).reshape((50000)).astype(np.int32)
trX = trainX[:40000] / 255.
trY = trainY[:40000]
valX = trainX[40000:, ] / 255.
valY = trainY[40000:]
num_tr_batch = 40000 // batch_size
num_val_batch = 10000 // batch_size
return trX, trY, num_tr_batch, valX, valY, num_val_batch
else:
if (FLAGS.flickr):
fd = open(os.path.join(path, 'flickrsetgreyredux'), 'rb')
loaded = np.asarray(pickle.load(fd))
trainX = loaded.reshape((10000, 28, 28)).astype(np.float32) / 255.
else:
fd = open(os.path.join(path, 'facegreyreduxeval'), 'rb')
loaded = np.asarray(pickle.load(fd))
trainX = loaded.reshape((10000, 28, 28)).astype(np.float32) / 255.
fd = open(os.path.join(path, 'facegreyreduxevalcat'), 'rb')
loaded = np.asarray(pickle.load(fd))
trainY = loaded.reshape((10000)).astype(np.int32)
rotatedlist = []
for image in trainX:
image = rotate(image, FLAGS.rotation, preserve_range=True)
if(FLAGS.mooney):
v_min, v_max = np.percentile(image, (49.99999999, 51))
image = exposure.rescale_intensity(image, in_range=(v_min, v_max))
rotatedlist.append(image)
if(len(rotatedlist)==1000):
I = resize(image.reshape(28, 28), (128, 128))
io.imsave("rotate" + str(FLAGS.rotation) + "example.jpg", I, cmap='gray')
rotatedlist = np.asarray(rotatedlist)
plt.imshow(rotatedlist[33], cmap='gray')
plt.show()
trainX = rotatedlist.reshape((10000, 28, 28, 1)).astype(np.float32)
return trainX, trainY
def create_inputs_norb(path, is_train: bool):
"""Get a batch from the input pipeline.
Author:
Ashley Gritzman 15/11/2018
Args:
is_train:
Returns:
img, lab:
"""
if is_train:
trX, trY, num_tr_batch, valX, valY, num_val_batch = load_facegreyreduxshuffled_set(FLAGS.batch_size, is_train)
else:
trX, trY = load_facegreyreduxshuffled_set(FLAGS.batch_size, is_train)
def generator():
for e1, e2 in zip(trX, trY):
yield e1, e2
capacity = 2000 + 3 * FLAGS.batch_size
# Create batched dataset
tf_dataset = tf.data.Dataset.from_generator(generator, output_types=(tf.float32, tf.int32), output_shapes=(tf.TensorShape(list(trX[0].shape)), ())).repeat().shuffle(capacity).batch(batch_size=FLAGS.batch_size, drop_remainder=True).prefetch(1)
# dataset = input_fn(path, is_train)
# Create one-shot iterator
iterator = tf.compat.v1.data.make_one_shot_iterator(tf_dataset)
img, lab = iterator.get_next()
output_dict = {'image': img,
'label': lab}
return output_dict
|
the-stack_0_717 | import csv
from collections import OrderedDict
from datetime import datetime
from pathlib import Path
from typing import Any, List, Mapping
from dmutils.formats import DATE_FORMAT, DATETIME_FORMAT
from dmutils.s3 import S3
from dmscripts.helpers.s3_helpers import get_bucket_name
# This URL is framework agnostic
PUBLIC_BRIEF_URL = "https://www.digitalmarketplace.service.gov.uk/digital-outcomes-and-specialists/opportunities/{}"
DOS_OPPORTUNITY_HEADERS = [
"ID", "Opportunity", "Link", "Framework", "Category", "Specialist",
"Organisation Name", "Buyer Domain", "Location Of The Work",
"Published At", "Open For", "Expected Contract Length", "Applications from SMEs",
"Applications from Large Organisations", "Total Organisations", "Status", "Winning supplier",
"Size of supplier", "Contract amount", "Contract start date", "Clarification questions", "Employment status"
]
DOWNLOAD_FILE_NAME = "opportunity-data.csv"
def format_datetime_string_as_date(dt):
return datetime.strptime(dt, DATETIME_FORMAT).strftime(DATE_FORMAT) if dt else None
def remove_username_from_email_address(ea):
return '{}'.format(ea.split('@').pop()) if ea else None
def _build_row(
brief: dict, brief_responses: List[dict], include_buyer_user_details: bool = False
) -> OrderedDict:
winner = None
applications_from_sme_suppliers = 0
applications_from_large_suppliers = 0
for brief_response in brief_responses:
if brief_response['supplierOrganisationSize'] == 'large':
applications_from_large_suppliers += 1
else:
applications_from_sme_suppliers += 1
if brief_response['status'] == 'awarded':
winner = brief_response
row = OrderedDict(zip(DOS_OPPORTUNITY_HEADERS, [
brief['id'],
brief['title'],
PUBLIC_BRIEF_URL.format(brief['id']),
brief['frameworkSlug'],
brief['lotSlug'],
brief.get('specialistRole', ""),
brief['organisation'],
remove_username_from_email_address(brief['users'][0]['emailAddress']),
brief['location'],
format_datetime_string_as_date(brief['publishedAt']),
brief.get('requirementsLength', '2 weeks'), # only briefs on the specialists lot include 'requirementsLength'
brief.get('contractLength', ''),
applications_from_sme_suppliers,
applications_from_large_suppliers,
applications_from_sme_suppliers + applications_from_large_suppliers,
brief['status'],
winner['supplierName'] if winner else '',
winner['supplierOrganisationSize'] if winner else '',
winner['awardDetails']['awardedContractValue'] if winner else '',
winner['awardDetails']['awardedContractStartDate'] if winner else '',
len(brief['clarificationQuestions']),
brief.get('employmentStatus', ''),
]))
if include_buyer_user_details:
buyer_user = brief["users"][0]
row.update([
("Buyer user name", buyer_user["name"]),
("Buyer email address", buyer_user["emailAddress"]),
("Buyer phone number", buyer_user.get("phoneNumber", "")),
])
return row
def get_latest_dos_framework(client) -> str:
frameworks = client.find_frameworks()['frameworks']
for framework in frameworks:
# Should be maximum of 1 live DOS framework
if framework['family'] == 'digital-outcomes-and-specialists' and framework['status'] == 'live':
return framework['slug']
return 'digital-outcomes-and-specialists'
def get_brief_data(client, logger, include_buyer_user_details: bool = False) -> list:
logger.info("Fetching closed briefs from API")
briefs = client.find_briefs_iter(status="closed,awarded,unsuccessful,cancelled", with_users=True,
with_clarification_questions=True)
rows = []
for brief in briefs:
logger.info(f"Fetching brief responses for Brief ID {brief['id']}")
brief_responses = client.find_brief_responses_iter(brief_id=brief['id'])
rows.append(_build_row(brief, brief_responses, include_buyer_user_details))
return rows
def write_rows_to_csv(rows: List[Mapping[str, Any]], file_path: Path, logger) -> None:
logger.info(f"Writing rows to {file_path}")
# assumes all rows have the same keys
fieldnames = list(rows[0].keys())
with open(file_path, 'w') as csv_file:
writer = csv.DictWriter(csv_file, fieldnames, delimiter=',', quotechar='"')
writer.writeheader()
for row in rows:
writer.writerow(row)
def upload_file_to_s3(
file_path,
bucket,
remote_key_name: str,
download_name: str,
*,
public: bool = True,
dry_run: bool = False,
logger,
):
with open(file_path, 'br') as source_file:
acl = "public-read" if public else "bucket-owner-full-control"
logger.info("{}UPLOAD: {} to s3://{}/{} with acl {}".format(
'[Dry-run]' if dry_run else '',
file_path,
bucket.bucket_name,
remote_key_name,
acl
))
if not dry_run:
# Save file
bucket.save(
remote_key_name,
source_file,
acl=acl,
download_filename=download_name
)
def export_dos_opportunities(
client,
logger,
stage: str,
output_dir,
dry_run: bool = False
):
output_dir = Path(output_dir)
if not output_dir.exists():
logger.info(f"Creating {output_dir} directory")
output_dir.mkdir(parents=True)
latest_framework_slug = get_latest_dos_framework(client)
communications_bucket = S3(get_bucket_name(stage, "communications"))
reports_bucket = S3(get_bucket_name(stage, "reports"))
logger.info("Exporting DOS opportunity data to CSV")
# Get the data
rows = get_brief_data(client, logger, include_buyer_user_details=True)
# Construct CSV for admins
write_rows_to_csv(rows, output_dir / "opportunity-data-for-admins.csv", logger)
# Construct public CSV (filter out buyer details)
write_rows_to_csv(
[
OrderedDict((k, v) for k, v in row.items() if k in DOS_OPPORTUNITY_HEADERS)
for row in rows
],
output_dir / DOWNLOAD_FILE_NAME,
logger
)
# Upload admin CSV to reports bucket
upload_file_to_s3(
output_dir / "opportunity-data-for-admins.csv",
reports_bucket,
f"{latest_framework_slug}/reports/{DOWNLOAD_FILE_NAME}",
DOWNLOAD_FILE_NAME,
public=False,
dry_run=dry_run,
logger=logger
)
# Upload public CSV to S3
upload_file_to_s3(
output_dir / DOWNLOAD_FILE_NAME,
communications_bucket,
f"{latest_framework_slug}/communications/data/{DOWNLOAD_FILE_NAME}",
DOWNLOAD_FILE_NAME,
public=True,
dry_run=dry_run,
logger=logger
)
|
the-stack_0_718 | """Utility functions."""
import logging
import numpy as np
from scipy.signal import periodogram
from tensorpac.methods.meth_pac import _kl_hr
from tensorpac.pac import _PacObj, _PacVisual
from tensorpac.io import set_log_level
from matplotlib.gridspec import GridSpec
import matplotlib.pyplot as plt
logger = logging.getLogger('tensorpac')
def pac_vec(f_pha='mres', f_amp='mres'):
"""Generate cross-frequency coupling vectors.
Parameters
----------
Frequency vector for the phase and amplitude. Here you can use
several forms to define those vectors :
* Basic list/tuple (ex: [2, 4] or [8, 12]...)
* List of frequency bands (ex: [[2, 4], [5, 7]]...)
* Dynamic definition : (start, stop, width, step)
* Range definition (ex : np.arange(3) => [[0, 1], [1, 2]])
* Using a string. `f_pha` and `f_amp` can be 'lres', 'mres', 'hres'
respectively for low, middle and high resolution vectors. In that
case, it uses the definition proposed by Bahramisharif et al. 2013
:cite:`bahramisharif2013propagating` i.e
f_pha = [f - f / 4, f + f / 4] and f_amp = [f - f / 8, f + f / 8]
Returns
-------
f_pha, f_amp : array_like
Arrays containing the pairs of phase and amplitude frequencies. Each
vector have a shape of (N, 2).
"""
nb_fcy = dict(lres=10, mres=30, hres=50, demon=70, hulk=100)
if isinstance(f_pha, str):
# get where phase frequencies start / finish / number
f_pha_start, f_pha_end = 2, 20
f_pha_nb = nb_fcy[f_pha]
# f_pha = [f - f / 4, f + f / 4]
f_pha_mid = np.linspace(f_pha_start, f_pha_end, f_pha_nb)
f_pha = np.c_[f_pha_mid - f_pha_mid / 4., f_pha_mid + f_pha_mid / 4.]
if isinstance(f_amp, str):
# get where amplitude frequencies start / finish / number
f_amp_start, f_amp_end = 60, 160
f_amp_nb = nb_fcy[f_amp]
# f_amp = [f - f / 8, f + f / 8]
f_amp_mid = np.linspace(f_amp_start, f_amp_end, f_amp_nb)
f_amp = np.c_[f_amp_mid - f_amp_mid / 8., f_amp_mid + f_amp_mid / 8.]
return _check_freq(f_pha), _check_freq(f_amp)
def _check_freq(f):
"""Check the frequency definition."""
f = np.atleast_2d(np.asarray(f))
#
if len(f.reshape(-1)) == 1:
raise ValueError("The length of f should at least be 2.")
elif 2 in f.shape: # f of shape (N, 2) or (2, N)
if f.shape[1] is not 2:
f = f.T
elif np.squeeze(f).shape == (4,): # (f_start, f_end, f_width, f_step)
f = _pair_vectors(*tuple(np.squeeze(f)))
else: # Sequential
f = f.reshape(-1)
f.sort()
f = np.c_[f[0:-1], f[1::]]
return f
def _pair_vectors(f_start, f_end, f_width, f_step):
# Generate two array for phase and amplitude :
fdown = np.arange(f_start, f_end - f_width, f_step)
fup = np.arange(f_start + f_width, f_end, f_step)
return np.c_[fdown, fup]
def pac_trivec(f_start=60., f_end=160., f_width=10.):
"""Generate triangular vector.
By contrast with the pac_vec function, this function generate frequency
vector with an increasing frequency bandwidth.
Parameters
----------
f_start : float | 60.
Starting frequency.
f_end : float | 160.
Ending frequency.
f_width : float | 10.
Frequency bandwidth increase between each band.
Returns
-------
f : array_like
The triangular vector.
tridx : array_like
The triangular index for the reconstruction.
"""
starting = np.arange(f_start, f_end + f_width, f_width)
f, tridx = np.array([]), np.array([])
for num, k in enumerate(starting[0:-1]):
# Lentgh of the vector to build :
le = len(starting) - (num + 1)
# Create the frequency vector for this starting frequency :
fst = np.c_[np.full(le, k), starting[num + 1::]]
nfst = fst.shape[0]
# Create the triangular index for this vector of frequencies :
idx = np.c_[np.flipud(np.arange(nfst)), np.full(nfst, num)]
tridx = np.concatenate((tridx, idx), axis=0) if tridx.size else idx
f = np.concatenate((f, fst), axis=0) if f.size else fst
return f, tridx
class PSD(object):
"""Power Spectrum Density for electrophysiological brain data.
Parameters
----------
x : array_like
Array of data of shape (n_epochs, n_times)
sf : float
The sampling frequency.
"""
def __init__(self, x, sf):
"""Init."""
assert isinstance(x, np.ndarray) and (x.ndim == 2), (
"x should be a 2d array of shape (n_epochs, n_times)")
self._n_trials, self._n_times = x.shape
logger.info(f"Compute PSD over {self._n_trials} trials and "
f"{self._n_times} time points")
self._freqs, self._psd = periodogram(x, fs=sf, window=None,
nfft=self._n_times,
detrend='constant',
return_onesided=True,
scaling='density', axis=1)
def plot(self, f_min=None, f_max=None, confidence=95, interp=None,
log=False, grid=True, fz_title=18, fz_labels=15):
"""Plot the PSD.
Parameters
----------
f_min, f_max : (int, float) | None
Frequency bounds to use for plotting
confidence : (int, float) | None
Light gray confidence interval. If None, no interval will be
displayed
interp : int | None
Line interpolation integer. For example, if interp is 10 the number
of points is going to be multiply by 10
log : bool | False
Use a log scale representation
grid : bool | True
Add a grid to the plot
fz_title : int | 18
Font size for the title
fz_labels : int | 15
Font size the x/y labels
Returns
-------
ax : Matplotlib axis
The matplotlib axis that contains the figure
"""
import matplotlib.pyplot as plt
f_types = (int, float)
# interpolation
xvec, yvec = self._freqs, self._psd
if isinstance(interp, int) and (interp > 1):
# from scipy.interpolate import make_interp_spline, BSpline
from scipy.interpolate import interp1d
xnew = np.linspace(xvec[0], xvec[-1], len(xvec) * interp)
f = interp1d(xvec, yvec, kind='quadratic', axis=1)
yvec = f(xnew)
xvec = xnew
# (f_min, f_max)
f_min = xvec[0] if not isinstance(f_min, f_types) else f_min
f_max = xvec[-1] if not isinstance(f_max, f_types) else f_max
# plot main psd
plt.plot(xvec, yvec.mean(0), color='black',
label='mean PSD over trials')
# plot confidence interval
if isinstance(confidence, (int, float)) and (0 < confidence < 100):
logger.info(f" Add {confidence}th confidence interval")
interval = (100. - confidence) / 2
kw = dict(axis=0, interpolation='nearest')
psd_min = np.percentile(yvec, interval, **kw)
psd_max = np.percentile(yvec, 100. - interval, **kw)
plt.fill_between(xvec, psd_max, psd_min, color='lightgray',
alpha=0.5,
label=f"{confidence}th confidence interval")
plt.legend(fontsize=fz_labels)
plt.xlabel("Frequencies (Hz)", fontsize=fz_labels)
plt.ylabel("Power (V**2/Hz)", fontsize=fz_labels)
plt.title(f"PSD mean over {self._n_trials} trials", fontsize=fz_title)
plt.xlim(f_min, f_max)
if log:
from matplotlib.ticker import ScalarFormatter
plt.xscale('log', basex=10)
plt.gca().xaxis.set_major_formatter(ScalarFormatter())
if grid:
plt.grid(color='grey', which='major', linestyle='-',
linewidth=1., alpha=0.5)
plt.grid(color='lightgrey', which='minor', linestyle='--',
linewidth=0.5, alpha=0.5)
return plt.gca()
def plot_st_psd(self, f_min=None, f_max=None, log=False, grid=True,
fz_title=18, fz_labels=15, fz_cblabel=15, **kw):
"""Single-trial PSD plot.
Parameters
----------
f_min, f_max : (int, float) | None
Frequency bounds to use for plotting
log : bool | False
Use a log scale representation
grid : bool | True
Add a grid to the plot
fz_title : int | 18
Font size for the title
fz_labels : int | 15
Font size the x/y labels
fz_cblabel : int | 15
Font size the colorbar label labels
Returns
-------
ax : Matplotlib axis
The matplotlib axis that contains the figure
"""
# manage input variables
kw['fz_labels'] = kw.get('fz_labels', fz_labels)
kw['fz_title'] = kw.get('fz_title', fz_title)
kw['fz_cblabel'] = kw.get('fz_cblabel', fz_title)
kw['xlabel'] = kw.get('xlabel', "Frequencies (Hz)")
kw['ylabel'] = kw.get('ylabel', "Trials")
kw['title'] = kw.get('title', "Single-trial PSD")
kw['cblabel'] = kw.get('cblabel', "Power (V**2/Hz)")
# (f_min, f_max)
xvec, psd = self._freqs, self._psd
f_types = (int, float)
f_min = xvec[0] if not isinstance(f_min, f_types) else f_min
f_max = xvec[-1] if not isinstance(f_max, f_types) else f_max
# locate (f_min, f_max) indices
f_min_idx = np.abs(xvec - f_min).argmin()
f_max_idx = np.abs(xvec - f_max).argmin()
sl_freq = slice(f_min_idx, f_max_idx)
xvec = xvec[sl_freq]
psd = psd[:, sl_freq]
# make the 2D plot
_viz = _PacVisual()
trials = np.arange(self._n_trials)
_viz.pacplot(psd, xvec, trials, **kw)
if log:
from matplotlib.ticker import ScalarFormatter
plt.xscale('log', basex=10)
plt.gca().xaxis.set_major_formatter(ScalarFormatter())
if grid:
plt.grid(color='grey', which='major', linestyle='-',
linewidth=1., alpha=0.5)
plt.grid(color='lightgrey', which='minor', linestyle='--',
linewidth=0.5, alpha=0.5)
return plt.gca()
def show(self):
"""Display the PSD figure."""
import matplotlib.pyplot as plt
plt.show()
@property
def freqs(self):
"""Get the frequency vector."""
return self._freqs
@property
def psd(self):
"""Get the psd value."""
return self._psd
class BinAmplitude(_PacObj):
"""Bin the amplitude according to the phase.
Parameters
----------
x : array_like
Array of data of shape (n_epochs, n_times)
sf : float
The sampling frequency
f_pha : tuple, list | [2, 4]
List of two floats describing the frequency bounds for extracting the
phase
f_amp : tuple, list | [60, 80]
List of two floats describing the frequency bounds for extracting the
amplitude
n_bins : int | 18
Number of bins to use to binarize the phase and the amplitude
dcomplex : {'wavelet', 'hilbert'}
Method for the complex definition. Use either 'hilbert' or
'wavelet'.
cycle : tuple | (3, 6)
Control the number of cycles for filtering (only if dcomplex is
'hilbert'). Should be a tuple of integers where the first one
refers to the number of cycles for the phase and the second for the
amplitude :cite:`bahramisharif2013propagating`.
width : int | 7
Width of the Morlet's wavelet.
edges : int | None
Number of samples to discard to avoid edge effects due to filtering
"""
def __init__(self, x, sf, f_pha=[2, 4], f_amp=[60, 80], n_bins=18,
dcomplex='hilbert', cycle=(3, 6), width=7, edges=None,
n_jobs=-1):
"""Init."""
_PacObj.__init__(self, f_pha=f_pha, f_amp=f_amp, dcomplex=dcomplex,
cycle=cycle, width=width)
# check
x = np.atleast_2d(x)
assert x.ndim <= 2, ("`x` input should be an array of shape "
"(n_epochs, n_times)")
assert isinstance(sf, (int, float)), ("`sf` input should be a integer "
"or a float")
assert all([isinstance(k, (int, float)) for k in f_pha]), (
"`f_pha` input should be a list of two integers / floats")
assert all([isinstance(k, (int, float)) for k in f_amp]), (
"`f_amp` input should be a list of two integers / floats")
assert isinstance(n_bins, int), "`n_bins` should be an integer"
logger.info(f"Binning {f_amp}Hz amplitude according to {f_pha}Hz "
"phase")
# extract phase and amplitude
kw = dict(keepfilt=False, edges=edges, n_jobs=n_jobs)
pha = self.filter(sf, x, 'phase', **kw)
amp = self.filter(sf, x, 'amplitude', **kw)
# binarize amplitude according to phase
self._amplitude = _kl_hr(pha, amp, n_bins, mean_bins=False).squeeze()
self.n_bins = n_bins
def plot(self, unit='rad', normalize=False, **kw):
"""Plot the amplitude.
Parameters
----------
unit : {'rad', 'deg'}
The unit to use for the phase. Use either 'deg' for degree or 'rad'
for radians
normalize : bool | None
Normalize the histogram by the maximum
kw : dict | {}
Additional inputs are passed to the matplotlib.pyplot.bar function
Returns
-------
ax : Matplotlib axis
The matplotlib axis that contains the figure
"""
import matplotlib.pyplot as plt
assert unit in ['rad', 'deg']
if unit == 'rad':
self._phase = np.linspace(-np.pi, np.pi, self.n_bins)
width = 2 * np.pi / self.n_bins
elif unit == 'deg':
self._phase = np.linspace(-180, 180, self.n_bins)
width = 360 / self.n_bins
amp_mean = self._amplitude.mean(1)
if normalize:
amp_mean /= amp_mean.max()
plt.bar(self._phase, amp_mean, width=width, **kw)
plt.xlabel(f"Frequency phase ({self.n_bins} bins)", fontsize=18)
plt.ylabel("Amplitude", fontsize=18)
plt.title("Binned amplitude")
plt.autoscale(enable=True, axis='x', tight=True)
def show(self):
"""Show the figure."""
import matplotlib.pyplot as plt
plt.show()
@property
def amplitude(self):
"""Get the amplitude value."""
return self._amplitude
@property
def phase(self):
"""Get the phase value."""
return self._phase
class ITC(_PacObj, _PacVisual):
"""Compute the Inter-Trials Coherence (ITC).
The Inter-Trials Coherence (ITC) is a measure of phase consistency over
trials for a single recording site (electrode / sensor etc.).
Parameters
----------
x : array_like
Array of data of shape (n_epochs, n_times)
sf : float
The sampling frequency
f_pha : tuple, list | [2, 4]
List of two floats describing the frequency bounds for extracting the
phase
dcomplex : {'wavelet', 'hilbert'}
Method for the complex definition. Use either 'hilbert' or
'wavelet'.
cycle : tuple | 3
Control the number of cycles for filtering the phase (only if dcomplex
is 'hilbert').
width : int | 7
Width of the Morlet's wavelet.
edges : int | None
Number of samples to discard to avoid edge effects due to filtering
"""
def __init__(self, x, sf, f_pha=[2, 4], dcomplex='hilbert', cycle=3,
width=7, edges=None, n_jobs=-1, verbose=None):
"""Init."""
set_log_level(verbose)
_PacObj.__init__(self, f_pha=f_pha, f_amp=[60, 80], dcomplex=dcomplex,
cycle=(cycle, 6), width=width)
_PacVisual.__init__(self)
# check
x = np.atleast_2d(x)
assert x.ndim <= 2, ("`x` input should be an array of shape "
"(n_epochs, n_times)")
self._n_trials = x.shape[0]
logger.info("Inter-Trials Coherence (ITC)")
logger.info(f" extracting {len(self.xvec)} phases")
# extract phase and amplitude
kw = dict(keepfilt=False, edges=edges, n_jobs=n_jobs)
pha = self.filter(sf, x, 'phase', **kw)
# compute itc
self._itc = np.abs(np.exp(1j * pha).mean(1)).squeeze()
self._sf = sf
def plot(self, times=None, **kw):
"""Plot the Inter-Trials Coherence.
Parameters
----------
times : array_like | None
Custom time vector to use
kw : dict | {}
Additional inputs are either pass to the matplotlib.pyplot.plot
function if a single phase band is used, otherwise to the
matplotlib.pyplot.pcolormesh function
Returns
-------
ax : Matplotlib axis
The matplotlib axis that contains the figure
"""
import matplotlib.pyplot as plt
n_pts = self._itc.shape[-1]
if not isinstance(times, np.ndarray):
times = np.arange(n_pts) / self._sf
times = times[self._edges]
assert len(times) == n_pts, ("The length of the time vector should be "
"{n_pts}")
xlab = 'Time'
title = f"Inter-Trials Coherence ({self._n_trials} trials)"
if self._itc.ndim == 1:
plt.plot(times, self._itc, **kw)
elif self._itc.ndim == 2:
vmin = kw.get('vmin', np.percentile(self._itc, 1))
vmax = kw.get('vmax', np.percentile(self._itc, 99))
self.pacplot(self._itc, times, self.xvec, vmin=vmin, vmax=vmax,
ylabel="Frequency for phase (Hz)", xlabel=xlab,
title=title, **kw)
return plt.gca()
def show(self):
"""Show the figure."""
import matplotlib.pyplot as plt
plt.show()
@property
def itc(self):
"""Get the itc value."""
return self._itc
class PeakLockedTF(_PacObj, _PacVisual):
"""Peak-Locked Time-frequency representation.
This class can be used in order to re-align time-frequency representations
around a time-point (cue) according to the closest phase peak. This type
of visualization can bring out a cyclic behavior of the amplitude at a
given phase, potentially indicating the presence of a phase-amplitude
coupling. Here's the detailed pipeline :
* Filter around a single phase frequency bands and across multiple
amplitude frequencies
* Use a `cue` which define the time-point to use for the realignment
* Detect in the filtered phase the closest peak to the cue. This step
is repeated to each trial in order to get a list of length (n_epochs)
that contains the number of sample (shift) so that if the phase is
moved, the peak fall onto the cue. A positive shift indicates that
the phase is moved forward while a negative shift is for a backward
move
* Apply, to each trial, this shift to the amplitude
* Plot the mean re-aligned amplitudes
Parameters
----------
x : array_like
Array of data of shape (n_epochs, n_times)
sf : float
The sampling frequency
cue : int, float
Time-point to use in order to detect the closest phase peak. This
parameter works in conjunction with the `times` input below. Use
either :
* An integer and `times` is None to indicate that you want to
realign according to a time-point in sample
* A integer or a float with `times` the time vector if you want
that Tensorpac automatically infer the sample number around which
to align
times : array_like | None
Time vector
f_pha : tuple, list | [2, 4]
List of two floats describing the frequency bounds for extracting the
phase
f_amp : tuple, list | [60, 80]
Frequency vector for the amplitude. Here you can use several forms to
define those vectors :
* Dynamic definition : (start, stop, width, step)
* Using a string : `f_amp` can be 'lres', 'mres', 'hres'
respectively for low, middle and high resolution vectors
cycle : tuple | (3, 6)
Control the number of cycles for filtering. Should be a tuple of
integers where the first one refers to the number of cycles for the
phase and the second for the amplitude
:cite:`bahramisharif2013propagating`.
"""
def __init__(self, x, sf, cue, times=None, f_pha=[5, 7], f_amp='hres',
cycle=(3, 6), n_jobs=-1, verbose=None):
"""Init."""
set_log_level(verbose)
# initialize to retrieve filtering methods
_PacObj.__init__(self, f_pha=f_pha, f_amp=f_amp, dcomplex='hilbert',
cycle=cycle)
_PacVisual.__init__(self)
logger.info("PeakLockedTF object defined")
# inputs checking
x = np.atleast_2d(x)
assert isinstance(x, np.ndarray) and (x.ndim == 2)
assert isinstance(sf, (int, float))
assert isinstance(cue, (int, float))
assert isinstance(f_pha, (list, tuple)) and (len(f_pha) == 2)
n_epochs, n_times = x.shape
# manage cur conversion
if times is None:
cue = int(cue)
times = np.arange(n_times)
logger.info(f" align on sample cue={cue}")
else:
assert isinstance(times, np.ndarray) and (len(times) == n_times)
cue_time = cue
cue = np.abs(times - cue).argmin() - 1
logger.info(f" align on time-point={cue_time} (sample={cue})")
self.cue, self._times = cue, times
# extract phase and amplitudes
logger.info(f" extract phase and amplitudes "
f"(n_amps={len(self.yvec)})")
kw = dict(keepfilt=False, n_jobs=n_jobs)
pha = self.filter(sf, x, 'phase', n_jobs=n_jobs, keepfilt=True)
amp = self.filter(sf, x, 'amplitude', n_jobs=n_jobs)
self._pha, self._amp = pha, amp ** 2
# peak detection
logger.info(f" running peak detection around sample={cue}")
self.shifts = self._peak_detection(self._pha.squeeze(), cue)
# realign phases and amplitudes
logger.info(f" realign the {n_epochs} phases and amplitudes")
self.amp_a = self._shift_signals(self._amp, self.shifts, fill_with=0.)
self.pha_a = self._shift_signals(self._pha, self.shifts, fill_with=0.)
@staticmethod
def _peak_detection(pha, cue):
"""Single trial closest to a cue peak detection.
Parameters
----------
pha : array_like
Array of single trial phases of shape (n_trials, n_times)
cue : int
Cue to use as a reference (in sample unit)
Returns
-------
peaks : array_like
Array of length (n_trials,) describing each delay to apply
to each trial in order to realign the phases. In detail :
* Positive delays means that zeros should be prepend
* Negative delays means that zeros should be append
"""
n_trials, n_times = pha.shape
peaks = []
for tr in range(n_trials):
# select the single trial phase
st_pha = pha[tr, :]
# detect all peaks across time points
st_peaks = []
for t in range(n_times - 1):
if (st_pha[t - 1] < st_pha[t]) and (st_pha[t] > st_pha[t + 1]):
st_peaks += [t]
# detect the minimum peak
min_peak = st_peaks[np.abs(np.array(st_peaks) - cue).argmin()]
peaks += [cue - min_peak]
return np.array(peaks)
@staticmethod
def _shift_signals(sig, n_shifts, fill_with=0):
"""Shift an array of signals according to an array of delays.
Parameters
----------
sig : array_like
Array of signals of shape (n_freq, n_trials, n_times)
n_shifts : array_like
Array of delays to apply to each trial of shape (n_trials,)
fill_with : int
Value to prepend / append to each shifted time-series
Returns
-------
sig_shifted : array_like
Array of shifted signals with the same shape as the input
"""
# prepare the needed variables
n_freqs, n_trials, n_pts = sig.shape
sig_shifted = np.zeros_like(sig)
# shift each trial
for tr in range(n_trials):
# select the data of a specific trial
st_shift = n_shifts[tr]
st_sig = sig[:, tr, :]
fill = np.full((n_freqs, abs(st_shift)), fill_with,
dtype=st_sig.dtype)
# shift this specific trial
if st_shift > 0: # move forward = prepend zeros
sig_shifted[:, tr, :] = np.c_[fill, st_sig][:, 0:-st_shift]
elif st_shift < 0: # move backward = append zeros
sig_shifted[:, tr, :] = np.c_[st_sig, fill][:, abs(st_shift):]
return sig_shifted
def plot(self, zscore=False, baseline=None, edges=0, **kwargs):
"""Integrated Peak-Locked TF plotting function.
Parameters
----------
zscore : bool | False
Normalize the power by using a z-score normalization. This can be
useful in order to compensate the 1 / f effect in the power
spectrum. If True, the mean and deviation are computed at the
single trial level and across all time points
baseline : tuple | None
Baseline period to use in order to apply the z-score correction.
Should be in samples.
edges : int | 0
Number of pixels to discard to compensate filtering edge effect
(`power[edges:-edges]`).
kwargs : dict | {}
Additional arguments are sent to the
:class:`tensorpac.utils.PeakLockedTF.pacplot` method
"""
# manage additional arguments
kwargs['colorbar'] = False
kwargs['ylabel'] = 'Frequency for amplitude (hz)'
kwargs['xlabel'] = ''
kwargs['fz_labels'] = kwargs.get('fz_labels', 14)
kwargs['fz_cblabel'] = kwargs.get('fz_cblabel', 14)
kwargs['fz_title'] = kwargs.get('fz_title', 16)
sl_times = slice(edges, len(self._times) - edges)
times = self._times[sl_times]
pha_n = self.pha_a[..., sl_times].squeeze()
# z-score normalization
if zscore:
if baseline is None:
bsl_idx = sl_times
else:
assert len(baseline) == 2
bsl_idx = slice(baseline[0], baseline[1])
_mean = self.amp_a[..., bsl_idx].mean(2, keepdims=True)
_std = self.amp_a[..., bsl_idx].std(2, keepdims=True)
_std[_std == 0.] = 1. # correction from NaN
amp_n = (self.amp_a[..., sl_times] - _mean) / _std
else:
amp_n = self.amp_a[..., sl_times]
# grid definition
gs = GridSpec(8, 8)
# image plot
plt.subplot(gs[slice(0, 6), 0:-1])
self.pacplot(amp_n.mean(1), times, self.yvec, **kwargs)
plt.axvline(times[self.cue], color='w', lw=2)
plt.tick_params(bottom=False, labelbottom=False)
ax_1 = plt.gca()
# external colorbar
plt.subplot(gs[slice(1, 5), -1])
cb = plt.colorbar(self._plt_im, pad=0.01, cax=plt.gca())
cb.set_label('Power (V**2/Hz)', fontsize=kwargs['fz_cblabel'])
cb.outline.set_visible(False)
# phase plot
plt.subplot(gs[slice(6, 8), 0:-1])
plt.plot(times, pha_n.T, color='lightgray', alpha=.2, lw=1.)
plt.plot(times, pha_n.mean(0), label='single trial phases', alpha=.2,
lw=1.) # legend tweaking
plt.plot(times, pha_n.mean(0), label='mean phases',
color='#1f77b4')
plt.axvline(times[self.cue], color='k', lw=2)
plt.autoscale(axis='both', tight=True, enable=True)
plt.xlabel("Times", fontsize=kwargs['fz_labels'])
plt.ylabel("V / Hz", fontsize=kwargs['fz_labels'])
# bottom legend
plt.legend(loc='center', bbox_to_anchor=(.5, -.5),
fontsize='x-large', ncol=2)
ax_2 = plt.gca()
return [ax_1, ax_2]
|
the-stack_0_719 | #-*- coding: utf-8 -*-
# pysqlite2/dbapi.py: pysqlite DB-API module
#
# Copyright (C) 2007-2008 Gerhard Häring <[email protected]>
#
# This file is part of pysqlite.
#
# This software is provided 'as-is', without any express or implied
# warranty. In no event will the authors be held liable for any damages
# arising from the use of this software.
#
# Permission is granted to anyone to use this software for any purpose,
# including commercial applications, and to alter it and redistribute it
# freely, subject to the following restrictions:
#
# 1. The origin of this software must not be misrepresented; you must not
# claim that you wrote the original software. If you use this software
# in a product, an acknowledgment in the product documentation would be
# appreciated but is not required.
# 2. Altered source versions must be plainly marked as such, and must not be
# misrepresented as being the original software.
# 3. This notice may not be removed or altered from any source distribution.
#
# Note: This software has been modified for use in PyPy.
from collections import OrderedDict
from functools import wraps
import datetime
import string
import sys
import weakref
from threading import _get_ident as _thread_get_ident
try:
from __pypy__ import newlist_hint
except ImportError:
assert '__pypy__' not in sys.builtin_module_names
newlist_hint = lambda sizehint: []
if sys.version_info[0] >= 3:
StandardError = Exception
cmp = lambda x, y: (x > y) - (x < y)
long = int
xrange = range
basestring = unicode = str
buffer = memoryview
_BLOB_TYPE = bytes
else:
_BLOB_TYPE = buffer
from _sqlite3_cffi import ffi as _ffi, lib as _lib
exported_sqlite_symbols = [
'SQLITE_ALTER_TABLE',
'SQLITE_ANALYZE',
'SQLITE_ATTACH',
'SQLITE_CREATE_INDEX',
'SQLITE_CREATE_TABLE',
'SQLITE_CREATE_TEMP_INDEX',
'SQLITE_CREATE_TEMP_TABLE',
'SQLITE_CREATE_TEMP_TRIGGER',
'SQLITE_CREATE_TEMP_VIEW',
'SQLITE_CREATE_TRIGGER',
'SQLITE_CREATE_VIEW',
'SQLITE_DELETE',
'SQLITE_DENY',
'SQLITE_DETACH',
'SQLITE_DROP_INDEX',
'SQLITE_DROP_TABLE',
'SQLITE_DROP_TEMP_INDEX',
'SQLITE_DROP_TEMP_TABLE',
'SQLITE_DROP_TEMP_TRIGGER',
'SQLITE_DROP_TEMP_VIEW',
'SQLITE_DROP_TRIGGER',
'SQLITE_DROP_VIEW',
'SQLITE_IGNORE',
'SQLITE_INSERT',
'SQLITE_OK',
'SQLITE_PRAGMA',
'SQLITE_READ',
'SQLITE_REINDEX',
'SQLITE_SELECT',
'SQLITE_TRANSACTION',
'SQLITE_UPDATE',
]
for symbol in exported_sqlite_symbols:
globals()[symbol] = getattr(_lib, symbol)
_SQLITE_TRANSIENT = _lib.SQLITE_TRANSIENT
# pysqlite version information
version = "2.6.0"
# pysqlite constants
PARSE_COLNAMES = 1
PARSE_DECLTYPES = 2
# SQLite version information
sqlite_version = str(_ffi.string(_lib.sqlite3_libversion()).decode('ascii'))
_STMT_TYPE_UPDATE = 0
_STMT_TYPE_DELETE = 1
_STMT_TYPE_INSERT = 2
_STMT_TYPE_REPLACE = 3
_STMT_TYPE_OTHER = 4
_STMT_TYPE_SELECT = 5
_STMT_TYPE_INVALID = 6
class Error(StandardError):
pass
class Warning(StandardError):
pass
class InterfaceError(Error):
pass
class DatabaseError(Error):
pass
class InternalError(DatabaseError):
pass
class OperationalError(DatabaseError):
pass
class ProgrammingError(DatabaseError):
pass
class IntegrityError(DatabaseError):
pass
class DataError(DatabaseError):
pass
class NotSupportedError(DatabaseError):
pass
def connect(database, timeout=5.0, detect_types=0, isolation_level="",
check_same_thread=True, factory=None, cached_statements=100):
factory = Connection if not factory else factory
return factory(database, timeout, detect_types, isolation_level,
check_same_thread, factory, cached_statements)
def _unicode_text_factory(x):
return unicode(x, 'utf-8')
if sys.version_info[0] < 3:
def OptimizedUnicode(s):
try:
val = unicode(s, "ascii").encode("ascii")
except UnicodeDecodeError:
val = unicode(s, "utf-8")
return val
else:
OptimizedUnicode = _unicode_text_factory
class _StatementCache(object):
def __init__(self, connection, maxcount):
self.connection = connection
self.maxcount = maxcount
self.cache = OrderedDict()
def get(self, sql):
try:
stat = self.cache[sql]
except KeyError:
stat = Statement(self.connection, sql)
self.cache[sql] = stat
if len(self.cache) > self.maxcount:
self.cache.popitem(0)
else:
if stat._in_use:
stat = Statement(self.connection, sql)
self.cache[sql] = stat
return stat
class Connection(object):
__initialized = False
_db = None
def __init__(self, database, timeout=5.0, detect_types=0, isolation_level="",
check_same_thread=True, factory=None, cached_statements=100):
self.__initialized = True
db_star = _ffi.new('sqlite3 **')
if isinstance(database, unicode):
database = database.encode('utf-8')
if _lib.sqlite3_open(database, db_star) != _lib.SQLITE_OK:
raise OperationalError("Could not open database")
self._db = db_star[0]
if timeout is not None:
timeout = int(timeout * 1000) # pysqlite2 uses timeout in seconds
_lib.sqlite3_busy_timeout(self._db, timeout)
self.row_factory = None
self.text_factory = _unicode_text_factory
self._detect_types = detect_types
self._in_transaction = False
self.isolation_level = isolation_level
self.__cursors = []
self.__cursors_counter = 0
self.__statements = []
self.__statements_counter = 0
self.__rawstatements = set()
self._statement_cache = _StatementCache(self, cached_statements)
self.__func_cache = {}
self.__aggregates = {}
self.__aggregate_instances = {}
self.__collations = {}
if check_same_thread:
self.__thread_ident = _thread_get_ident()
self.Error = Error
self.Warning = Warning
self.InterfaceError = InterfaceError
self.DatabaseError = DatabaseError
self.InternalError = InternalError
self.OperationalError = OperationalError
self.ProgrammingError = ProgrammingError
self.IntegrityError = IntegrityError
self.DataError = DataError
self.NotSupportedError = NotSupportedError
def __del__(self):
if self._db:
_lib.sqlite3_close(self._db)
def close(self):
self._check_thread()
self.__do_all_statements(Statement._finalize, True)
# depending on when this close() is called, the statements' weakrefs
# may be already dead, even though Statement.__del__() was not called
# yet. In this case, self.__rawstatements is not empty.
if self.__rawstatements is not None:
for stmt in list(self.__rawstatements):
self._finalize_raw_statement(stmt)
self.__rawstatements = None
if self._db:
ret = _lib.sqlite3_close(self._db)
if ret != _lib.SQLITE_OK:
raise self._get_exception(ret)
self._db = None
def _check_closed(self):
if not self.__initialized:
raise ProgrammingError("Base Connection.__init__ not called.")
if not self._db:
raise ProgrammingError("Cannot operate on a closed database.")
def _check_closed_wrap(func):
@wraps(func)
def wrapper(self, *args, **kwargs):
self._check_closed()
return func(self, *args, **kwargs)
return wrapper
def _check_thread(self):
try:
if self.__thread_ident == _thread_get_ident():
return
except AttributeError:
pass
else:
raise ProgrammingError(
"SQLite objects created in a thread can only be used in that "
"same thread. The object was created in thread id %d and this "
"is thread id %d" % (self.__thread_ident, _thread_get_ident()))
def _check_thread_wrap(func):
@wraps(func)
def wrapper(self, *args, **kwargs):
self._check_thread()
return func(self, *args, **kwargs)
return wrapper
def _get_exception(self, error_code=None):
if error_code is None:
error_code = _lib.sqlite3_errcode(self._db)
error_message = _ffi.string(_lib.sqlite3_errmsg(self._db)).decode('utf-8')
if error_code == _lib.SQLITE_OK:
raise ValueError("error signalled but got SQLITE_OK")
elif error_code in (_lib.SQLITE_INTERNAL, _lib.SQLITE_NOTFOUND):
exc = InternalError
elif error_code == _lib.SQLITE_NOMEM:
exc = MemoryError
elif error_code in (
_lib.SQLITE_ERROR, _lib.SQLITE_PERM, _lib.SQLITE_ABORT,
_lib.SQLITE_BUSY, _lib.SQLITE_LOCKED, _lib.SQLITE_READONLY,
_lib.SQLITE_INTERRUPT, _lib.SQLITE_IOERR, _lib.SQLITE_FULL,
_lib.SQLITE_CANTOPEN, _lib.SQLITE_PROTOCOL, _lib.SQLITE_EMPTY,
_lib.SQLITE_SCHEMA):
exc = OperationalError
elif error_code == _lib.SQLITE_CORRUPT:
exc = DatabaseError
elif error_code == _lib.SQLITE_TOOBIG:
exc = DataError
elif error_code in (_lib.SQLITE_CONSTRAINT, _lib.SQLITE_MISMATCH):
exc = IntegrityError
elif error_code == _lib.SQLITE_MISUSE:
exc = ProgrammingError
else:
exc = DatabaseError
exc = exc(error_message)
exc.error_code = error_code
return exc
def _remember_cursor(self, cursor):
self.__cursors.append(weakref.ref(cursor))
self.__cursors_counter += 1
if self.__cursors_counter < 200:
return
self.__cursors_counter = 0
self.__cursors = [r for r in self.__cursors if r() is not None]
def _remember_statement(self, statement):
self.__rawstatements.add(statement._statement)
self.__statements.append(weakref.ref(statement))
self.__statements_counter += 1
if self.__statements_counter < 200:
return
self.__statements_counter = 0
self.__statements = [r for r in self.__statements if r() is not None]
def _finalize_raw_statement(self, _statement):
if self.__rawstatements is not None:
try:
self.__rawstatements.remove(_statement)
except KeyError:
return # rare case: already finalized, see issue #2097
_lib.sqlite3_finalize(_statement)
def __do_all_statements(self, action, reset_cursors):
for weakref in self.__statements:
statement = weakref()
if statement is not None:
action(statement)
if reset_cursors:
for weakref in self.__cursors:
cursor = weakref()
if cursor is not None:
cursor._reset = True
@_check_thread_wrap
@_check_closed_wrap
def __call__(self, sql):
return self._statement_cache.get(sql)
def cursor(self, factory=None):
self._check_thread()
self._check_closed()
if factory is None:
factory = Cursor
cur = factory(self)
if self.row_factory is not None:
cur.row_factory = self.row_factory
return cur
def execute(self, *args):
cur = self.cursor()
return cur.execute(*args)
def executemany(self, *args):
cur = self.cursor()
return cur.executemany(*args)
def executescript(self, *args):
cur = self.cursor()
return cur.executescript(*args)
def iterdump(self):
from sqlite3.dump import _iterdump
return _iterdump(self)
def _begin(self):
statement_star = _ffi.new('sqlite3_stmt **')
ret = _lib.sqlite3_prepare_v2(self._db, self.__begin_statement, -1,
statement_star, _ffi.NULL)
try:
if ret != _lib.SQLITE_OK:
raise self._get_exception(ret)
ret = _lib.sqlite3_step(statement_star[0])
if ret != _lib.SQLITE_DONE:
raise self._get_exception(ret)
self._in_transaction = True
finally:
_lib.sqlite3_finalize(statement_star[0])
def commit(self):
self._check_thread()
self._check_closed()
if not self._in_transaction:
return
self.__do_all_statements(Statement._reset, False)
statement_star = _ffi.new('sqlite3_stmt **')
ret = _lib.sqlite3_prepare_v2(self._db, b"COMMIT", -1,
statement_star, _ffi.NULL)
try:
if ret != _lib.SQLITE_OK:
raise self._get_exception(ret)
ret = _lib.sqlite3_step(statement_star[0])
if ret != _lib.SQLITE_DONE:
raise self._get_exception(ret)
self._in_transaction = False
finally:
_lib.sqlite3_finalize(statement_star[0])
def rollback(self):
self._check_thread()
self._check_closed()
if not self._in_transaction:
return
self.__do_all_statements(Statement._reset, True)
statement_star = _ffi.new('sqlite3_stmt **')
ret = _lib.sqlite3_prepare_v2(self._db, b"ROLLBACK", -1,
statement_star, _ffi.NULL)
try:
if ret != _lib.SQLITE_OK:
raise self._get_exception(ret)
ret = _lib.sqlite3_step(statement_star[0])
if ret != _lib.SQLITE_DONE:
raise self._get_exception(ret)
self._in_transaction = False
finally:
_lib.sqlite3_finalize(statement_star[0])
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, exc_tb):
if exc_type is None and exc_value is None and exc_tb is None:
self.commit()
else:
self.rollback()
@_check_thread_wrap
@_check_closed_wrap
def create_function(self, name, num_args, callback):
try:
closure = self.__func_cache[callback]
except KeyError:
@_ffi.callback("void(sqlite3_context*, int, sqlite3_value**)")
def closure(context, nargs, c_params):
_function_callback(callback, context, nargs, c_params)
self.__func_cache[callback] = closure
if isinstance(name, unicode):
name = name.encode('utf-8')
ret = _lib.sqlite3_create_function(self._db, name, num_args,
_lib.SQLITE_UTF8, _ffi.NULL,
closure, _ffi.NULL, _ffi.NULL)
if ret != _lib.SQLITE_OK:
raise self.OperationalError("Error creating function")
@_check_thread_wrap
@_check_closed_wrap
def create_aggregate(self, name, num_args, cls):
try:
step_callback, final_callback = self.__aggregates[cls]
except KeyError:
@_ffi.callback("void(sqlite3_context*, int, sqlite3_value**)")
def step_callback(context, argc, c_params):
res = _lib.sqlite3_aggregate_context(context,
_ffi.sizeof("size_t"))
aggregate_ptr = _ffi.cast("size_t[1]", res)
if not aggregate_ptr[0]:
try:
aggregate = cls()
except Exception:
msg = (b"user-defined aggregate's '__init__' "
b"method raised error")
_lib.sqlite3_result_error(context, msg, len(msg))
return
aggregate_id = id(aggregate)
self.__aggregate_instances[aggregate_id] = aggregate
aggregate_ptr[0] = aggregate_id
else:
aggregate = self.__aggregate_instances[aggregate_ptr[0]]
params = _convert_params(context, argc, c_params)
try:
aggregate.step(*params)
except Exception:
msg = (b"user-defined aggregate's 'step' "
b"method raised error")
_lib.sqlite3_result_error(context, msg, len(msg))
@_ffi.callback("void(sqlite3_context*)")
def final_callback(context):
res = _lib.sqlite3_aggregate_context(context,
_ffi.sizeof("size_t"))
aggregate_ptr = _ffi.cast("size_t[1]", res)
if aggregate_ptr[0]:
aggregate = self.__aggregate_instances[aggregate_ptr[0]]
try:
val = aggregate.finalize()
except Exception:
msg = (b"user-defined aggregate's 'finalize' "
b"method raised error")
_lib.sqlite3_result_error(context, msg, len(msg))
else:
_convert_result(context, val)
finally:
del self.__aggregate_instances[aggregate_ptr[0]]
self.__aggregates[cls] = (step_callback, final_callback)
if isinstance(name, unicode):
name = name.encode('utf-8')
ret = _lib.sqlite3_create_function(self._db, name, num_args,
_lib.SQLITE_UTF8, _ffi.NULL,
_ffi.NULL,
step_callback,
final_callback)
if ret != _lib.SQLITE_OK:
raise self._get_exception(ret)
@_check_thread_wrap
@_check_closed_wrap
def create_collation(self, name, callback):
name = name.upper()
if not all(c in string.ascii_uppercase + string.digits + '_' for c in name):
raise ProgrammingError("invalid character in collation name")
if callback is None:
del self.__collations[name]
collation_callback = _ffi.NULL
else:
if not callable(callback):
raise TypeError("parameter must be callable")
@_ffi.callback("int(void*, int, const void*, int, const void*)")
def collation_callback(context, len1, str1, len2, str2):
text1 = _ffi.buffer(str1, len1)[:]
text2 = _ffi.buffer(str2, len2)[:]
try:
ret = callback(text1, text2)
assert isinstance(ret, (int, long))
return cmp(ret, 0)
except Exception:
return 0
self.__collations[name] = collation_callback
if isinstance(name, unicode):
name = name.encode('utf-8')
ret = _lib.sqlite3_create_collation(self._db, name,
_lib.SQLITE_UTF8,
_ffi.NULL,
collation_callback)
if ret != _lib.SQLITE_OK:
raise self._get_exception(ret)
@_check_thread_wrap
@_check_closed_wrap
def set_authorizer(self, callback):
try:
authorizer = self.__func_cache[callback]
except KeyError:
@_ffi.callback("int(void*, int, const char*, const char*, "
"const char*, const char*)")
def authorizer(userdata, action, arg1, arg2, dbname, source):
try:
ret = callback(action, arg1, arg2, dbname, source)
assert isinstance(ret, int)
# try to detect cases in which cffi would swallow
# OverflowError when casting the return value
assert int(_ffi.cast('int', ret)) == ret
return ret
except Exception:
return _lib.SQLITE_DENY
self.__func_cache[callback] = authorizer
ret = _lib.sqlite3_set_authorizer(self._db, authorizer, _ffi.NULL)
if ret != _lib.SQLITE_OK:
raise self._get_exception(ret)
@_check_thread_wrap
@_check_closed_wrap
def set_progress_handler(self, callable, nsteps):
if callable is None:
progress_handler = _ffi.NULL
else:
try:
progress_handler = self.__func_cache[callable]
except KeyError:
@_ffi.callback("int(void*)")
def progress_handler(userdata):
try:
return bool(callable())
except Exception:
# abort query if error occurred
return 1
self.__func_cache[callable] = progress_handler
_lib.sqlite3_progress_handler(self._db, nsteps, progress_handler,
_ffi.NULL)
if sys.version_info[0] >= 3:
def __get_in_transaction(self):
return self._in_transaction
in_transaction = property(__get_in_transaction)
def __get_total_changes(self):
self._check_closed()
return _lib.sqlite3_total_changes(self._db)
total_changes = property(__get_total_changes)
def __get_isolation_level(self):
return self._isolation_level
def __set_isolation_level(self, val):
if val is None:
self.commit()
else:
self.__begin_statement = str("BEGIN " + val).encode('utf-8')
self._isolation_level = val
isolation_level = property(__get_isolation_level, __set_isolation_level)
if hasattr(_lib, 'sqlite3_enable_load_extension'):
@_check_thread_wrap
@_check_closed_wrap
def enable_load_extension(self, enabled):
rc = _lib.sqlite3_enable_load_extension(self._db, int(enabled))
if rc != _lib.SQLITE_OK:
raise OperationalError("Error enabling load extension")
class Cursor(object):
__initialized = False
__statement = None
def __init__(self, con):
if not isinstance(con, Connection):
raise TypeError
self.__connection = con
self.arraysize = 1
self.row_factory = None
self._reset = False
self.__locked = False
self.__closed = False
self.__lastrowid = None
self.__rowcount = -1
con._check_thread()
con._remember_cursor(self)
self.__initialized = True
def close(self):
self.__connection._check_thread()
self.__connection._check_closed()
if self.__statement:
self.__statement._reset()
self.__statement = None
self.__closed = True
def __check_cursor(self):
if not self.__initialized:
raise ProgrammingError("Base Cursor.__init__ not called.")
if self.__closed:
raise ProgrammingError("Cannot operate on a closed cursor.")
if self.__locked:
raise ProgrammingError("Recursive use of cursors not allowed.")
self.__connection._check_thread()
self.__connection._check_closed()
def __check_cursor_wrap(func):
@wraps(func)
def wrapper(self, *args, **kwargs):
self.__check_cursor()
return func(self, *args, **kwargs)
return wrapper
def __check_reset(self):
if self._reset:
raise InterfaceError(
"Cursor needed to be reset because of commit/rollback "
"and can no longer be fetched from.")
def __build_row_cast_map(self):
if not self.__connection._detect_types:
return
self.__row_cast_map = []
for i in xrange(_lib.sqlite3_column_count(self.__statement._statement)):
converter = None
if self.__connection._detect_types & PARSE_COLNAMES:
colname = _lib.sqlite3_column_name(self.__statement._statement, i)
if colname:
colname = _ffi.string(colname).decode('utf-8')
type_start = -1
key = None
for pos in range(len(colname)):
if colname[pos] == '[':
type_start = pos + 1
elif colname[pos] == ']' and type_start != -1:
key = colname[type_start:pos]
converter = converters[key.upper()]
if converter is None and self.__connection._detect_types & PARSE_DECLTYPES:
decltype = _lib.sqlite3_column_decltype(self.__statement._statement, i)
if decltype:
decltype = _ffi.string(decltype).decode('utf-8')
# if multiple words, use first, eg.
# "INTEGER NOT NULL" => "INTEGER"
decltype = decltype.split()[0]
if '(' in decltype:
decltype = decltype[:decltype.index('(')]
converter = converters.get(decltype.upper(), None)
self.__row_cast_map.append(converter)
def __fetch_one_row(self):
num_cols = _lib.sqlite3_data_count(self.__statement._statement)
row = newlist_hint(num_cols)
for i in xrange(num_cols):
if self.__connection._detect_types:
converter = self.__row_cast_map[i]
else:
converter = None
if converter is not None:
blob = _lib.sqlite3_column_blob(self.__statement._statement, i)
if not blob:
val = None
else:
blob_len = _lib.sqlite3_column_bytes(self.__statement._statement, i)
val = _ffi.buffer(blob, blob_len)[:]
val = converter(val)
else:
typ = _lib.sqlite3_column_type(self.__statement._statement, i)
if typ == _lib.SQLITE_NULL:
val = None
elif typ == _lib.SQLITE_INTEGER:
val = _lib.sqlite3_column_int64(self.__statement._statement, i)
val = int(val)
elif typ == _lib.SQLITE_FLOAT:
val = _lib.sqlite3_column_double(self.__statement._statement, i)
elif typ == _lib.SQLITE_TEXT:
text = _lib.sqlite3_column_text(self.__statement._statement, i)
text_len = _lib.sqlite3_column_bytes(self.__statement._statement, i)
val = _ffi.buffer(text, text_len)[:]
try:
val = self.__connection.text_factory(val)
except Exception:
column_name = _lib.sqlite3_column_name(
self.__statement._statement, i)
if column_name:
column_name = _ffi.string(column_name).decode('utf-8')
else:
column_name = "<unknown column name>"
val = val.decode('ascii', 'replace')
raise OperationalError(
"Could not decode to UTF-8 column '%s' with text '%s'" % (
column_name, val))
elif typ == _lib.SQLITE_BLOB:
blob = _lib.sqlite3_column_blob(self.__statement._statement, i)
blob_len = _lib.sqlite3_column_bytes(self.__statement._statement, i)
val = _BLOB_TYPE(_ffi.buffer(blob, blob_len)[:])
row.append(val)
return tuple(row)
def __execute(self, multiple, sql, many_params):
self.__locked = True
self._reset = False
try:
del self.__next_row
except AttributeError:
pass
try:
if not isinstance(sql, basestring):
raise ValueError("operation parameter must be str or unicode")
try:
del self.__description
except AttributeError:
pass
self.__rowcount = -1
self.__statement = self.__connection._statement_cache.get(sql)
if self.__connection._isolation_level is not None:
if self.__statement._type in (
_STMT_TYPE_UPDATE,
_STMT_TYPE_DELETE,
_STMT_TYPE_INSERT,
_STMT_TYPE_REPLACE
):
if not self.__connection._in_transaction:
self.__connection._begin()
elif self.__statement._type == _STMT_TYPE_OTHER:
if self.__connection._in_transaction:
self.__connection.commit()
elif self.__statement._type == _STMT_TYPE_SELECT:
if multiple:
raise ProgrammingError("You cannot execute SELECT "
"statements in executemany().")
for params in many_params:
self.__statement._set_params(params)
# Actually execute the SQL statement
ret = _lib.sqlite3_step(self.__statement._statement)
if ret == _lib.SQLITE_ROW:
if multiple:
raise ProgrammingError("executemany() can only execute DML statements.")
self.__build_row_cast_map()
self.__next_row = self.__fetch_one_row()
elif ret == _lib.SQLITE_DONE:
if not multiple:
self.__statement._reset()
else:
self.__statement._reset()
raise self.__connection._get_exception(ret)
if self.__statement._type in (
_STMT_TYPE_UPDATE,
_STMT_TYPE_DELETE,
_STMT_TYPE_INSERT,
_STMT_TYPE_REPLACE
):
if self.__rowcount == -1:
self.__rowcount = 0
self.__rowcount += _lib.sqlite3_changes(self.__connection._db)
if not multiple and self.__statement._type == _STMT_TYPE_INSERT:
self.__lastrowid = _lib.sqlite3_last_insert_rowid(self.__connection._db)
else:
self.__lastrowid = None
if multiple:
self.__statement._reset()
finally:
self.__connection._in_transaction = \
not _lib.sqlite3_get_autocommit(self.__connection._db)
self.__locked = False
return self
@__check_cursor_wrap
def execute(self, sql, params=[]):
return self.__execute(False, sql, [params])
@__check_cursor_wrap
def executemany(self, sql, many_params):
return self.__execute(True, sql, many_params)
def executescript(self, sql):
self.__check_cursor()
self._reset = False
if isinstance(sql, unicode):
sql = sql.encode('utf-8')
elif not isinstance(sql, str):
raise ValueError("script argument must be unicode or string.")
statement_star = _ffi.new('sqlite3_stmt **')
next_char = _ffi.new('char **')
self.__connection.commit()
while True:
c_sql = _ffi.new("char[]", sql)
rc = _lib.sqlite3_prepare(self.__connection._db, c_sql, -1,
statement_star, next_char)
if rc != _lib.SQLITE_OK:
raise self.__connection._get_exception(rc)
rc = _lib.SQLITE_ROW
while rc == _lib.SQLITE_ROW:
if not statement_star[0]:
rc = _lib.SQLITE_OK
else:
rc = _lib.sqlite3_step(statement_star[0])
if rc != _lib.SQLITE_DONE:
_lib.sqlite3_finalize(statement_star[0])
if rc == _lib.SQLITE_OK:
break
else:
raise self.__connection._get_exception(rc)
rc = _lib.sqlite3_finalize(statement_star[0])
if rc != _lib.SQLITE_OK:
raise self.__connection._get_exception(rc)
sql = _ffi.string(next_char[0])
if not sql:
break
return self
def __iter__(self):
return self
def __next__(self):
self.__check_cursor()
self.__check_reset()
if not self.__statement:
raise StopIteration
try:
next_row = self.__next_row
except AttributeError:
raise StopIteration
del self.__next_row
if self.row_factory is not None:
next_row = self.row_factory(self, next_row)
ret = _lib.sqlite3_step(self.__statement._statement)
if ret == _lib.SQLITE_ROW:
self.__next_row = self.__fetch_one_row()
else:
self.__statement._reset()
if ret != _lib.SQLITE_DONE:
raise self.__connection._get_exception(ret)
return next_row
if sys.version_info[0] < 3:
next = __next__
del __next__
def fetchone(self):
return next(self, None)
def fetchmany(self, size=None):
if size is None:
size = self.arraysize
lst = []
for row in self:
lst.append(row)
if len(lst) == size:
break
return lst
def fetchall(self):
return list(self)
def __get_connection(self):
return self.__connection
connection = property(__get_connection)
def __get_rowcount(self):
return self.__rowcount
rowcount = property(__get_rowcount)
def __get_description(self):
try:
return self.__description
except AttributeError:
if self.__statement:
self.__description = self.__statement._get_description()
return self.__description
description = property(__get_description)
def __get_lastrowid(self):
return self.__lastrowid
lastrowid = property(__get_lastrowid)
def setinputsizes(self, *args):
pass
def setoutputsize(self, *args):
pass
class Statement(object):
_statement = None
def __init__(self, connection, sql):
self.__con = connection
self._in_use = False
if not isinstance(sql, basestring):
raise Warning("SQL is of wrong type. Must be string or unicode.")
if '\0' in sql:
raise ValueError("the query contains a null character")
first_word = sql.lstrip().split(" ")[0].upper()
if first_word == "":
self._type = _STMT_TYPE_INVALID
elif first_word == "SELECT":
self._type = _STMT_TYPE_SELECT
elif first_word == "INSERT":
self._type = _STMT_TYPE_INSERT
elif first_word == "UPDATE":
self._type = _STMT_TYPE_UPDATE
elif first_word == "DELETE":
self._type = _STMT_TYPE_DELETE
elif first_word == "REPLACE":
self._type = _STMT_TYPE_REPLACE
else:
self._type = _STMT_TYPE_OTHER
if isinstance(sql, unicode):
sql = sql.encode('utf-8')
statement_star = _ffi.new('sqlite3_stmt **')
next_char = _ffi.new('char **')
c_sql = _ffi.new("char[]", sql)
ret = _lib.sqlite3_prepare_v2(self.__con._db, c_sql, -1,
statement_star, next_char)
self._statement = statement_star[0]
if ret == _lib.SQLITE_OK and not self._statement:
# an empty statement, work around that, as it's the least trouble
self._type = _STMT_TYPE_SELECT
c_sql = _ffi.new("char[]", b"select 42")
ret = _lib.sqlite3_prepare_v2(self.__con._db, c_sql, -1,
statement_star, next_char)
self._statement = statement_star[0]
if ret != _lib.SQLITE_OK:
raise self.__con._get_exception(ret)
self.__con._remember_statement(self)
tail = _ffi.string(next_char[0]).decode('utf-8')
if _check_remaining_sql(tail):
raise Warning("You can only execute one statement at a time.")
def __del__(self):
if self._statement:
self.__con._finalize_raw_statement(self._statement)
def _finalize(self):
if self._statement:
self.__con._finalize_raw_statement(self._statement)
self._statement = None
self._in_use = False
def _reset(self):
if self._in_use and self._statement:
_lib.sqlite3_reset(self._statement)
self._in_use = False
if sys.version_info[0] < 3:
def __check_decodable(self, param):
if self.__con.text_factory in (unicode, OptimizedUnicode,
_unicode_text_factory):
for c in param:
if ord(c) & 0x80 != 0:
raise self.__con.ProgrammingError(
"You must not use 8-bit bytestrings unless "
"you use a text_factory that can interpret "
"8-bit bytestrings (like text_factory = str). "
"It is highly recommended that you instead "
"just switch your application to Unicode strings.")
def __set_param(self, idx, param):
cvt = converters.get(type(param))
if cvt is not None:
param = cvt(param)
try:
param = adapt(param)
except:
pass # And use previous value
if param is None:
rc = _lib.sqlite3_bind_null(self._statement, idx)
elif isinstance(param, (bool, int, long)):
if -2147483648 <= param <= 2147483647:
rc = _lib.sqlite3_bind_int(self._statement, idx, param)
else:
rc = _lib.sqlite3_bind_int64(self._statement, idx, param)
elif isinstance(param, float):
rc = _lib.sqlite3_bind_double(self._statement, idx, param)
elif isinstance(param, unicode):
param = param.encode("utf-8")
rc = _lib.sqlite3_bind_text(self._statement, idx, param,
len(param), _SQLITE_TRANSIENT)
elif isinstance(param, str):
self.__check_decodable(param)
rc = _lib.sqlite3_bind_text(self._statement, idx, param,
len(param), _SQLITE_TRANSIENT)
elif isinstance(param, (buffer, bytes)):
param = bytes(param)
rc = _lib.sqlite3_bind_blob(self._statement, idx, param,
len(param), _SQLITE_TRANSIENT)
else:
rc = -1
return rc
def _set_params(self, params):
self._in_use = True
num_params_needed = _lib.sqlite3_bind_parameter_count(self._statement)
if isinstance(params, (tuple, list)) or \
not isinstance(params, dict) and \
hasattr(params, '__getitem__'):
try:
num_params = len(params)
except TypeError:
num_params = -1
if num_params != num_params_needed:
raise ProgrammingError("Incorrect number of bindings supplied. "
"The current statement uses %d, and "
"there are %d supplied." %
(num_params_needed, num_params))
for i in range(num_params):
rc = self.__set_param(i + 1, params[i])
if rc != _lib.SQLITE_OK:
raise InterfaceError("Error binding parameter %d - "
"probably unsupported type." % i)
elif isinstance(params, dict):
for i in range(1, num_params_needed + 1):
param_name = _lib.sqlite3_bind_parameter_name(self._statement, i)
if not param_name:
raise ProgrammingError("Binding %d has no name, but you "
"supplied a dictionary (which has "
"only names)." % i)
param_name = _ffi.string(param_name).decode('utf-8')[1:]
try:
param = params[param_name]
except KeyError:
raise ProgrammingError("You did not supply a value for "
"binding %d." % i)
rc = self.__set_param(i, param)
if rc != _lib.SQLITE_OK:
raise InterfaceError("Error binding parameter :%s - "
"probably unsupported type." %
param_name)
else:
raise ValueError("parameters are of unsupported type")
def _get_description(self):
if self._type in (
_STMT_TYPE_INSERT,
_STMT_TYPE_UPDATE,
_STMT_TYPE_DELETE,
_STMT_TYPE_REPLACE
):
return None
desc = []
for i in xrange(_lib.sqlite3_column_count(self._statement)):
name = _lib.sqlite3_column_name(self._statement, i)
if name:
name = _ffi.string(name).decode('utf-8').split("[")[0].strip()
desc.append((name, None, None, None, None, None, None))
return desc
class Row(object):
def __init__(self, cursor, values):
self.description = cursor.description
self.values = values
def __len__(self):
return len(self.values)
def __getitem__(self, item):
if isinstance(item, (int, long)):
return self.values[item]
else:
item = item.lower()
for idx, desc in enumerate(self.description):
if desc[0].lower() == item:
return self.values[idx]
raise IndexError("No item with that key")
def keys(self):
return [desc[0] for desc in self.description]
def __eq__(self, other):
if not isinstance(other, Row):
return NotImplemented
if self.description != other.description:
return False
if self.values != other.values:
return False
return True
def __ne__(self, other):
return not self == other
def __hash__(self):
return hash(tuple(self.description)) ^ hash(tuple(self.values))
def _check_remaining_sql(s):
state = "NORMAL"
for char in s:
if char == chr(0):
return 0
elif char == '-':
if state == "NORMAL":
state = "LINECOMMENT_1"
elif state == "LINECOMMENT_1":
state = "IN_LINECOMMENT"
elif char in (' ', '\t'):
pass
elif char == '\n':
if state == "IN_LINECOMMENT":
state = "NORMAL"
elif char == '/':
if state == "NORMAL":
state = "COMMENTSTART_1"
elif state == "COMMENTEND_1":
state = "NORMAL"
elif state == "COMMENTSTART_1":
return 1
elif char == '*':
if state == "NORMAL":
return 1
elif state == "LINECOMMENT_1":
return 1
elif state == "COMMENTSTART_1":
state = "IN_COMMENT"
elif state == "IN_COMMENT":
state = "COMMENTEND_1"
else:
if state == "COMMENTEND_1":
state = "IN_COMMENT"
elif state == "IN_LINECOMMENT":
pass
elif state == "IN_COMMENT":
pass
else:
return 1
return 0
def _convert_params(con, nargs, params):
_params = []
for i in range(nargs):
typ = _lib.sqlite3_value_type(params[i])
if typ == _lib.SQLITE_NULL:
val = None
elif typ == _lib.SQLITE_INTEGER:
val = _lib.sqlite3_value_int64(params[i])
val = int(val)
elif typ == _lib.SQLITE_FLOAT:
val = _lib.sqlite3_value_double(params[i])
elif typ == _lib.SQLITE_TEXT:
val = _lib.sqlite3_value_text(params[i])
val = _ffi.string(val).decode('utf-8')
elif typ == _lib.SQLITE_BLOB:
blob = _lib.sqlite3_value_blob(params[i])
blob_len = _lib.sqlite3_value_bytes(params[i])
val = _BLOB_TYPE(_ffi.buffer(blob, blob_len)[:])
else:
raise NotImplementedError
_params.append(val)
return _params
def _convert_result(con, val):
if val is None:
_lib.sqlite3_result_null(con)
elif isinstance(val, (bool, int, long)):
_lib.sqlite3_result_int64(con, int(val))
elif isinstance(val, float):
_lib.sqlite3_result_double(con, val)
elif isinstance(val, unicode):
val = val.encode('utf-8')
_lib.sqlite3_result_text(con, val, len(val), _SQLITE_TRANSIENT)
elif isinstance(val, str):
_lib.sqlite3_result_text(con, val, len(val), _SQLITE_TRANSIENT)
elif isinstance(val, (buffer, bytes)):
_lib.sqlite3_result_blob(con, bytes(val), len(val), _SQLITE_TRANSIENT)
else:
raise NotImplementedError
def _function_callback(real_cb, context, nargs, c_params):
params = _convert_params(context, nargs, c_params)
try:
val = real_cb(*params)
except Exception:
msg = b"user-defined function raised exception"
_lib.sqlite3_result_error(context, msg, len(msg))
else:
_convert_result(context, val)
converters = {}
adapters = {}
class PrepareProtocol(object):
pass
def register_adapter(typ, callable):
adapters[typ, PrepareProtocol] = callable
def register_converter(name, callable):
converters[name.upper()] = callable
def register_adapters_and_converters():
def adapt_date(val):
return val.isoformat()
def adapt_datetime(val):
return val.isoformat(" ")
def convert_date(val):
return datetime.date(*map(int, val.split("-")))
def convert_timestamp(val):
datepart, timepart = val.split(" ")
year, month, day = map(int, datepart.split("-"))
timepart_full = timepart.split(".")
hours, minutes, seconds = map(int, timepart_full[0].split(":"))
if len(timepart_full) == 2:
microseconds = int(timepart_full[1])
else:
microseconds = 0
return datetime.datetime(year, month, day, hours, minutes, seconds,
microseconds)
register_adapter(datetime.date, adapt_date)
register_adapter(datetime.datetime, adapt_datetime)
register_converter("date", convert_date)
register_converter("timestamp", convert_timestamp)
def adapt(val, proto=PrepareProtocol):
# look for an adapter in the registry
adapter = adapters.get((type(val), proto), None)
if adapter is not None:
return adapter(val)
# try to have the protocol adapt this object
if hasattr(proto, '__adapt__'):
try:
adapted = proto.__adapt__(val)
except TypeError:
pass
else:
if adapted is not None:
return adapted
# and finally try to have the object adapt itself
if hasattr(val, '__conform__'):
try:
adapted = val.__conform__(proto)
except TypeError:
pass
else:
if adapted is not None:
return adapted
return val
register_adapters_and_converters()
|
the-stack_0_720 | # -*- coding: utf-8 -*-
from serial.serialutil import SerialException
from struct import unpack
from .serial_wrapper import SerialPort
from .constants import NO_KEY_DETECTED
from .internal import XidConnection
from .keymaps import (rb_530_keymap, rb_730_keymap, rb_830_keymap,
rb_834_keymap, lumina_keymap)
class XidScanner(object):
"""
Scan the computer for connected XID devices
"""
def __init__(self):
self.__com_ports = SerialPort.available_ports()
self.__xid_cons = []
self.detect_xid_devices()
def detect_xid_devices(self):
"""
For all of the com ports connected to the computer, send an
XID command '_c1'. If the device response with '_xid', it is
an xid device.
"""
self.__xid_cons = []
for c in self.__com_ports:
device_found = False
for b in [115200, 19200, 9600, 57600, 38400]:
con = XidConnection(c, b)
try:
con.open()
except SerialException:
continue
con.flush_input()
con.flush_output()
returnval = con.send_xid_command("_c1", 5).decode('ASCII')
if returnval.startswith('_xid'):
device_found = True
self.__xid_cons.append(con)
if(returnval != '_xid0'):
# set the device into XID mode
con.send_xid_command('c10')
con.flush_input()
con.flush_output()
# be sure to reset the timer to avoid the 4.66 hours
# problem. (refer to XidConnection.xid_input_found to
# read about the 4.66 hours)
con.send_xid_command('e1')
con.send_xid_command('e5')
con.close()
if device_found:
break
def device_at_index(self, index):
"""
Returns the device at the specified index
"""
if index >= len(self.__xid_cons):
raise ValueError("Invalid device index")
return self.__xid_cons[index]
def device_count(self):
"""
Number of XID devices connected to the computer
"""
return len(self.__xid_cons)
class BaseDevice(object):
def __init__(self, connection, name="Unknown XID Device"):
self.con = connection
self.device_name = name
def reset_rt_timer(self):
"""
Resets the Reaction Time timer.
"""
self.con.send_xid_command("e5")
def reset_base_timer(self):
"""
Resets the base timer
"""
self.con.send_xid_command("e1")
def query_base_timer(self):
"""
gets the value from the device's base timer
"""
(_, _, time) = unpack('<ccI', self.con.send_xid_command("e3", 6))
return time
class ResponseDevice(BaseDevice):
def __init__(self, connection,
name='Unknown XID Device',
keymap=None,
trigger_prefix="Button"):
BaseDevice.__init__(self, connection, name)
self.keymap = keymap
self.trigger_name_prefix = trigger_prefix
self.response_queue = []
def poll_for_response(self):
"""
Polls the device for user input
If there is a keymapping for the device, the key map is applied
to the key reported from the device.
If a response is waiting to be processed, the response is appended
to the internal response_queue
"""
key_state = self.con.check_for_keypress()
if key_state != NO_KEY_DETECTED:
response = self.con.get_current_response()
if self.keymap is not None:
response['key'] = self.keymap[response['key']]
else:
response['key'] -= 1
self.response_queue.append(response)
def response_queue_size(self):
"""
Number of responses in the response queue
"""
return len(self.response_queue)
def get_next_response(self):
"""
Pops the response at the beginning of the response queue
and returns it.
This function returns a dict object with the following keys:
pressed: A boolean value of whether the event was a keypress
or key release.
key: The key on the device that was pressed. This is a
0 based index.
port: Device port the response came from. Typically this
is 0 on RB-series devices, and 2 on SV-1 voice key
devices.
time: For the time being, this just returns 0. There is
currently an issue with clock drift in the Cedrus XID
devices. Once we have this issue resolved, time will
report the value of the RT timer in miliseconds.
"""
return self.response_queue.pop(0)
def clear_response_queue(self):
"""
Clears the response queue
"""
self.response_queue = []
def __repr__(self):
return '<ResponseDevice "%s">' % self.device_name
class StimTracker(BaseDevice):
"""
Class that encapsulates the StimTracker device.
The pulse duration defaults to 100ms. To change this, call
StimTracker.set_pulse_duration(duration_in_miliseconds)
"""
_lines = {1: 1,
2: 2,
3: 4,
4: 8,
5: 16,
6: 32,
7: 64,
8: 128}
def __init__(self, connection, name="StimTracker"):
BaseDevice.__init__(self, connection, name)
self.con.set_using_stim_tracker(True)
self.con.send_xid_command('a10')
self.con.clear_digital_output_lines(0xff)
self.set_pulse_duration(100)
def set_pulse_duration(self, duration):
"""
Sets the pulse duration for events in miliseconds when activate_line
is called
"""
if duration > 4294967295:
raise ValueError('Duration is too long. Please choose a value '
'less than 4294967296.')
big_endian = hex(duration)[2:]
if len(big_endian) % 2 != 0:
big_endian = '0'+big_endian
little_endian = []
for i in range(0, len(big_endian), 2):
little_endian.insert(0, big_endian[i:i+2])
for i in range(0, 4-len(little_endian)):
little_endian.append('00')
command = 'mp'
for i in little_endian:
command += chr(int(i, 16))
self.con.send_xid_command(command, 0)
def activate_line(self, lines=None, bitmask=None,
leave_remaining_lines=False):
"""
Triggers an output line on StimTracker.
There are 8 output lines on StimTracker that can be raised in any
combination. To raise lines 1 and 7, for example, you pass in
the list: activate_line(lines=[1, 7]).
To raise a single line, pass in just an integer, or a list with a
single element to the lines keyword argument:
activate_line(lines=3)
or
activate_line(lines=[3])
The `lines` argument must either be an Integer, list of Integers, or
None.
If you'd rather specify a bitmask for setting the lines, you can use
the bitmask keyword argument. Bitmask must be a Integer value between
0 and 255 where 0 specifies no lines, and 255 is all lines. For a
mapping between lines and their bit values, see the `_lines` class
variable.
To use this, call the function as so to activate lines 1 and 6:
activate_line(bitmask=33)
leave_remaining_lines tells the function to only operate on the lines
specified. For example, if lines 1 and 8 are active, and you make
the following function call:
activate_line(lines=4, leave_remaining_lines=True)
This will result in lines 1, 4 and 8 being active.
If you call activate_line(lines=4) with leave_remaining_lines=False
(the default), if lines 1 and 8 were previously active, only line 4
will be active after the call.
"""
if lines is None and bitmask is None:
raise ValueError('Must set one of lines or bitmask')
if lines is not None and bitmask is not None:
raise ValueError('Can only set one of lines or bitmask')
if bitmask is not None:
if bitmask not in range(0, 256):
raise ValueError('bitmask must be an integer between '
'0 and 255')
if lines is not None:
if not isinstance(lines, list):
lines = [lines]
bitmask = 0
for l in lines:
if l < 1 or l > 8:
raise ValueError('Line numbers must be between 1 and 8 '
'(inclusive)')
bitmask |= self._lines[l]
self.con.set_digital_output_lines(bitmask, leave_remaining_lines)
def clear_line(self, lines=None, bitmask=None,
leave_remaining_lines=False):
"""
The inverse of activate_line. If a line is active, it deactivates it.
This has the same parameters as activate_line()
"""
if lines is None and bitmask is None:
raise ValueError('Must set one of lines or bitmask')
if lines is not None and bitmask is not None:
raise ValueError('Can only set one of lines or bitmask')
if bitmask is not None:
if bitmask not in range(0, 256):
raise ValueError('bitmask must be an integer between '
'0 and 255')
if lines is not None:
if not isinstance(lines, list):
lines = [lines]
bitmask = 0
for l in lines:
if l < 1 or l > 8:
raise ValueError('Line numbers must be between 1 and 8 '
'(inclusive)')
bitmask |= self._lines[l]
self.con.clear_digital_output_lines(bitmask, leave_remaining_lines)
def __str__(self):
return '<StimTracker "%s">' % self.device_name
def __repr__(self):
return self.__str__()
class XidError(Exception):
pass
class XidDevice(object):
"""
Class for interfacing with a Cedrus XID device.
At the beginning of an experiment, the developer should call:
XidDevice.reset_base_timer()
Whenever a stimulus is presented, the developer should call:
XidDevice.reset_rt_timer()
Developers Note: Currently there is a known issue of clock drift
in the XID devices. Due to this, the dict returned by
XidDevice.get_next_response() returns 0 for the reaction time value.
This issue will be resolved in a future release of this library.
"""
def __init__(self, xid_connection):
self.con = xid_connection
self._impl = None
self.init_device()
def __del__(self):
self.con.close()
del self.con
def is_stimtracker(self):
return isinstance(self._impl, StimTracker)
def is_response_device(self):
return isinstance(self._impl, ResponseDevice)
def init_device(self):
"""
Initializes the device with the proper keymaps and name
"""
try:
product_id = int(self._send_command('_d2', 1))
except ValueError:
product_id = self._send_command('_d2', 1)
if product_id == 0:
self._impl = ResponseDevice(
self.con,
'Cedrus Lumina LP-400 Response Pad System',
lumina_keymap)
elif product_id == 1:
self._impl = ResponseDevice(
self.con,
'Cedrus SV-1 Voice Key',
None,
'Voice Response')
elif product_id == 2:
model_id = int(self._send_command('_d3', 1))
if model_id == 1:
self._impl = ResponseDevice(
self.con,
'Cedrus RB-530',
rb_530_keymap)
elif model_id == 2:
self._impl = ResponseDevice(
self.con,
'Cedrus RB-730',
rb_730_keymap)
elif model_id == 3:
self._impl = ResponseDevice(
self.con,
'Cedrus RB-830',
rb_830_keymap)
elif model_id == 4:
self._impl = ResponseDevice(
self.con,
'Cedrus RB-834',
rb_834_keymap)
else:
raise XidError('Unknown RB Device')
elif product_id == 4:
self._impl = StimTracker(
self.con,
'Cedrus C-POD')
elif product_id == b'S':
self._impl = StimTracker(
self.con,
'Cedrus StimTracker')
elif product_id == -99:
raise XidError('Invalid XID device')
def _send_command(self, command, expected_bytes):
"""
Send an XID command to the device
"""
response = self.con.send_xid_command(command, expected_bytes)
return response
def __getattr__(self, attrname):
return getattr(self._impl, attrname)
def __repr__(self):
if self._impl is not None:
return str(self._impl)
else:
return 'Uninitialized XID device'
|
the-stack_0_722 | # -*- coding: utf-8 -*-
# @Author: yulidong
# @Date: 2018-07-17 10:44:43
# @Last Modified by: yulidong
# @Last Modified time: 2018-08-27 18:45:39
# -*- coding: utf-8 -*-
# @Author: lidong
# @Date: 2018-03-20 18:01:52
# @Last Modified by: yulidong
# @Last Modified time: 2018-07-16 22:16:14
import time
import torch
import numpy as np
import torch.nn as nn
import math
from math import ceil
from torch.autograd import Variable
from torch.nn.functional import cosine_similarity as cosine_s
from pssm import caffe_pb2
from pssm.models.utils import *
rsn_specs = {
'scene':
{
'n_classes': 9,
'input_size': (540, 960),
'block_config': [3, 4, 23, 3],
},
}
group_dim=8
def conv3x3(in_planes, out_planes, stride=1):
"""3x3 convolution with padding"""
if stride==1:
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
if stride==2:
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=2, bias=False)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride)
self.gn1 = nn.GroupNorm(group_dim,planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.gn2 = nn.GroupNorm(group_dim,planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.gn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.gn2(out)
if self.downsample is not None:
residual = self.downsample(x)
# print(residual.shape)
# print(out.shape)
out += residual
out = self.relu(out)
return out
class feature_extraction(nn.Module):
def __init__(self):
super(feature_extraction, self).__init__()
self.inplanes = 32
self.layer1 = self._make_layer(BasicBlock, 32, 3, 1,1,1)
self.branch1 = nn.Sequential(nn.AvgPool2d((54, 96), stride=(54,96)),
nn.Conv2d(32, 8, 1, 1, 0, 1),
nn.GroupNorm(4,8),
nn.ReLU(inplace=True))
self.branch2 = nn.Sequential(nn.AvgPool2d((27, 48), stride=(27,48)),
nn.Conv2d(32, 8, 1, 1, 0, 1),
nn.GroupNorm(4,8),
nn.ReLU(inplace=True))
self.branch3 = nn.Sequential(nn.AvgPool2d((36, 64), stride=(36,64)),
nn.Conv2d(32, 8, 1, 1, 0, 1),
nn.GroupNorm(4,8),
nn.ReLU(inplace=True))
self.branch4 = nn.Sequential(nn.AvgPool2d((18, 32), stride=(18,32)),
nn.Conv2d(32, 8, 1, 1, 0, 1),
nn.GroupNorm(4,8),
nn.ReLU(inplace=True))
self.branch5 = nn.Sequential(nn.AvgPool2d((9, 16), stride=(9,16)),
nn.Conv2d(32, 8, 1, 1, 0, 1),
nn.GroupNorm(4,8),
nn.ReLU(inplace=True))
self.branch6 = nn.Sequential(nn.AvgPool2d((3, 8), stride=(3,8)),
nn.Conv2d(32, 8, 1, 1, 0, 1),
nn.GroupNorm(4,8),
nn.ReLU(inplace=True))
self.lastconv = nn.Sequential(nn.Conv2d(80, 64, 3, 1, 1, 1),
nn.GroupNorm(group_dim,64),
nn.ReLU(inplace=True),
nn.Conv2d(64, 32, 3, 1, 1, 1),
nn.GroupNorm(group_dim,32),
nn.ReLU(inplace=True),
)
def _make_layer(self, block, planes, blocks, stride, pad, dilation):
downsample = None
layers = []
layers.append(block(self.inplanes, planes, stride))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes,1))
return nn.Sequential(*layers)
def forward(self, x):
# output = self.conv1(x)
# output = self.gn1(output)
# output = self.relu1(output)
# output = self.conv2(output)
# output = self.gn2(output)
# output = self.relu2(output)
# output = self.conv3(output)
# output = self.gn3(output)
# output = self.relu3(output)
output_skip = self.layer1(x)
# output_skip=x
output_branch1 = self.branch1(output_skip)
output_branch1 = F.interpolate(output_branch1, (output_skip.size()[2],output_skip.size()[3]),mode='bilinear',align_corners=True)
output_branch2 = self.branch2(output_skip)
output_branch2 = F.interpolate(output_branch2, (output_skip.size()[2],output_skip.size()[3]),mode='bilinear',align_corners=True)
output_branch3 = self.branch3(output_skip)
output_branch3 = F.interpolate(output_branch3, (output_skip.size()[2],output_skip.size()[3]),mode='bilinear',align_corners=True)
output_branch4 = self.branch4(output_skip)
output_branch4 = F.interpolate(output_branch4, (output_skip.size()[2],output_skip.size()[3]),mode='bilinear',align_corners=True)
output_branch5 = self.branch5(output_skip)
output_branch5 = F.interpolate(output_branch5, (output_skip.size()[2],output_skip.size()[3]),mode='bilinear',align_corners=True)
output_branch6 = self.branch6(output_skip)
output_branch6 = F.interpolate(output_branch6, (output_skip.size()[2],output_skip.size()[3]),mode='bilinear',align_corners=True)
output_feature = torch.cat((output_skip, output_branch6, output_branch5, output_branch4, output_branch3, output_branch2, output_branch1), 1)
output_feature = self.lastconv(output_feature)
#print(output_feature.shape)
return output_feature
class feature_extraction2(nn.Module):
def __init__(self):
super(feature_extraction2, self).__init__()
self.inplanes = 32
self.conv1 = nn.Conv2d(3, 32, kernel_size=3, stride=1, padding=1,
bias=False,dilation=1)
self.gn1 = nn.GroupNorm(group_dim,32)
self.relu1 = nn.ReLU(inplace=True)
self.conv2 = nn.Conv2d(32, 32, kernel_size=3, stride=1, padding=1,
bias=False,dilation=1)
self.gn2 = nn.GroupNorm(group_dim,32)
self.relu2 = nn.ReLU(inplace=True)
self.conv3 = nn.Conv2d(32, 32, kernel_size=7, stride=1, padding=6,
bias=False,dilation=2)
self.gn3 = nn.GroupNorm(group_dim,32)
self.relu3 = nn.ReLU(inplace=True)
self.layer1 = self._make_layer(BasicBlock, 32, 1, 1,1,1)
def _make_layer(self, block, planes, blocks, stride, pad, dilation):
downsample = None
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes,1,None,))
return nn.Sequential(*layers)
def forward(self, x):
output = self.conv1(x)
output = self.gn1(output)
output = self.relu1(output)
output = self.conv2(output)
output = self.gn2(output)
output = self.relu2(output)
output = self.conv3(output)
output = self.gn3(output)
output = self.relu3(output)
#print(output.shape)
output = self.layer1(output)
return output
class ss_argmin(nn.Module):
def __init__(self):
super(ss_argmin, self).__init__()
self.softmax = nn.Softmax(dim=-1)
def forward(self,x,min,max):
one=torch.ones(1)
zero=torch.zeros(1)
x=self.softmax(x)
index=torch.ones_like(x)*torch.range(min,max)
disparity= torch.sum(x*index,dim=-1)
v,i=torch.topk(x,k=1,dim=-1)
mask_1=torch.squeeze(torch.where(v>0.7,one,zero))
v,i=torch.topk(x,k=5,dim=-1)
v_sum=torch.sum(v,-1)
mask_2=torch.squeeze(torch.where(v_s>0.7,one,zero))
i_dis=torch.max(i,-1)[0]-torch.min(i,-1)[0]
mask_3=torch.squeeze(torch.where(i_dis<6,one,zero))
mask=mask_1+mask_2*mask_3
mask=torch.where(mask>0,one,zero)
return disparity*mask
class rstereo(nn.Module):
def __init__(self,
n_classes=9,
block_config=[3, 4, 6, 3],
input_size= (480, 640),
version='scene'):
super(rstereo, self).__init__()
self.feature_extraction=feature_extraction().cuda(0)
self.feature_extraction2=feature_extraction2().cuda(0)
# self.aggregation_sparse=aggregation_sparse()
# self.aggregation_dense=aggregation_dense()
self.ss_argmin=ss_argmin()
# self.refinement_sparse=aggregation_sparse()
# self.refinement_dense=aggregation_dense()
def crop(self,x):
index=(x==1).nonzero()
return torch.min(index[:,0]),torch.max(index[:,0])+1,torch.min(index[:,1]),torch.max(index[:,1]+1)
def cluster(feature,mask):
count=torch.sum(mask)
mean=torch.sum(torch.sum(feature,dim=-1),dim=-1)/count
weights=torch.where(mask==ones,torch.norm(feature-mean,dim=1),zeros)
weights=torch.exp(weights/torch.max(weights)).view(weights.shape[0],weights.shape[1],1)
return weights
def forward(self, l,r,P,pre1,pre2):
#self.P=P[1,0]
#0 l to r,1 min,2 max
#[l_box,r_box,match],[min_d,max_d]
self.pre=pre1
self.pre2=pre2
P1=P[...,0]
P2=P[...,3]
P3=P[...,1]
P4=P[...,2]
#feature extraction
l_mask=P2-P1
s_mask=P1
#l_mask=l_mask.byte()
#s_mask=s_mask.byte()
#basic cuda 524
#print(l.type)
#1923
#print(torch.cuda.memory_allocated(1))
#2727
l_sf=self.feature_extraction2(l)
l_lf=self.feature_extraction(l_sf)
#print(torch.cuda.memory_allocated(2))
#the cuda won't copy the volume to the new gpu
# a=l_lf.cuda(1)
# b=l_lf.cuda(2)
# c=l_sf.cuda(3)
r_sf=self.feature_extraction2(r)
r_lf=self.feature_extraction(r_sf)
#print(torch.cuda.memory_allocated(1))
#3267
#print(torch.cuda.memory_allocated(2))
#reshape the mask to batch and channel
disparity=torch.zeros([540,960]).cuda(2)
one=torch.ones(1).cuda(2)
zero=torch.zeros(1).cuda(2)
cost_volume=[]
#5710
#print(value)
l_lf=l_lf.cuda(2)
r_lf=r_lf.cuda(2)
r_sf=r_sf.cuda(2)
l_sf=l_sf.cuda(2)
#985
#feature=torch.masked_select(l_sf,s_mask)
#feature=torch.masked_select(l_lf,l_mask)+torch.masked_select(l_sf,s_mask)
feature=l_lf*l_mask+l_sf*s_mask
feature=torch.where((l_mask+s_mask)>0,feature,l_lf)
s_match=s_mask.long().nonzero()
s_feature=l_sf[...,s_match[:,0],s_match[:,1]]
l_match=l_mask.long().nonzero()
l_feature=l_lf[...,l_match[:,0],l_match[:,1]]
start_time=time.time()
#0.0003
#s_r_o_t=r_sf[...,s_match[:,0],s_match[:,1]]
#1,32,n
#print(time.time()-start_time)
#print(s_match.shape)
#time 10
# for i in range(s_match.shape[0]):
# min_d=torch.max(s_match[i,1]-300,zero.long())
# #print(min_d)
# s_r_o_t=r_sf[...,s_match[i,0],min_d:s_match[i,1]]
# a=s_feature[...,i].view(1,32,1)
# #print(a.shape,s_r_o_t.shape)
# cost_volume.append(torch.where(s_match[i,1]-300>=0,cosine_s(a,s_r_o_t),zero))
#time 0.23,192,0.035,30, the number of the match points won't influence the time,only the iteration
# for i in range(300):
# s_r_o_t=r_sf[...,s_match[:,0],s_match[:,1]-i]
# cost_volume.append(torch.where(s_match[:,1]-i>=0,cosine_s(s_feature,s_r_o_t),zero))
# l_r_o_t=r_sf[...,l_match[:,0],l_match[:,1]-i]
# cost_volume.append(torch.where(l_match[:,1]-i>=0,cosine_s(l_feature,l_r_o_t),zero))
# #cost_volume=torch.stack(cost_volume)
# print(torch.cuda.memory_allocated(2))
# print(time.time()-start_time)
# time.sleep(100)
#promotion
#we can segment with bounding box and divide the whole image into many parts
#each single bounding box will be managed through network not the whole image
#matching cost computation
count=0
start_time=time.time()
for i in range(torch.max(P3).type(torch.int32)+1):
#ground 0-270, sky 0-40
# if i==13 or i == 14:
# continue
# i=60
#print(pre2.shape)
#i=14
min_d=pre1[0,0,i].long()
max_d=pre1[0,1,i].long()
object_mask=torch.where(P3==i,one,zero)
x1,y1,x2,y2,size=pre2[0,i].long()
object_mask=object_mask[0,x1:x2,y1:y2]
s_mask_o=object_mask*s_mask[0,x1:x2,y1:y2]
l_mask_o=object_mask*l_mask[0,x1:x2,y1:y2]
s_match=s_mask_o.long().nonzero()
l_match=l_mask_o.long().nonzero()
if s_match.shape[0]==0:
s_match=object_mask.nonzero()
if l_match.shape[0]==0:
l_match=object_mask.nonzero()
s_l_o=feature[...,s_match[:,0],s_match[:,1]]
l_l_o=feature[...,l_match[:,0],l_match[:,1]]
#print(torch.max(min_d,zero).long())
#s_r_o=feature[...,s_match[:,0],s_match[:,1]]
# s_r_o=r_sf[...,x1:x2,y1-max_d:y2-min_d]
# l_r_o=r_lf[...,x1:x2,y1-max_d:y2-min_d]
cost_s=[]
cost_l=[]
#ground and sky
#print(s_match.shape[0],l_match.shape[0],min_d,max_d)
for j in range(min_d,max_d):
s_r_o_t=r_sf[...,s_match[:,0],s_match[:,1]-j]
cost_s.append(torch.where(s_match[:,1]-j>=0,cosine_s(s_l_o,s_r_o_t),zero))
l_r_o_t=r_lf[...,l_match[:,0],l_match[:,1]-j]
cost_l.append(torch.where(l_match[:,1]-j>=0,cosine_s(l_l_o,l_r_o_t),zero))
cost_s=torch.stack(cost_s,-1)
cost_l=torch.stack(cost_l,-1)
#cost_volume=cost_s+cost_l
#print(torch.cuda.memory_allocated(2)/1e+6)
#time.sleep(30)
print(time.time()-start_time)
time.sleep(100)
return cost_volume
|
the-stack_0_723 | import pytest
import random
import tensorflow as tf
from run import run
from main import main
import os
import json
import shutil
cwd = os.path.abspath(os.path.dirname(__file__))
path = os.path.join(cwd, '..', 'cotk')
def setup_function(function):
import sys
sys.argv = ['python3']
random.seed(0)
import numpy as np
np.random.seed(0)
tf.set_random_seed(0)
try:
shutil.rmtree(cwd + '/output_test')
except Exception:
pass
try:
shutil.rmtree(cwd + '/tensorboard_test')
except Exception:
pass
try:
shutil.rmtree(cwd + '/model_test')
except Exception:
pass
try:
shutil.rmtree(cwd + '/cache_test')
except Exception:
pass
os.mkdir(cwd + '/output_test')
os.mkdir(cwd + '/tensorboard_test')
os.mkdir(cwd + '/model_test')
os.mkdir(cwd + '/cache_test')
def teardown_function(function):
shutil.rmtree(cwd + '/output_test')
shutil.rmtree(cwd + '/tensorboard_test')
shutil.rmtree(cwd + '/model_test')
shutil.rmtree(cwd + '/cache_test')
def modify_args(args):
args.cuda = False
args.restore = None
args.wvclass = 'Glove'
args.wvpath = path + '/tests/wordvector/dummy_glove/300d'
args.embedding_size=300 #must be the same as the dim of wvpath
args.out_dir = cwd + '/output_test'
args.log_dir = cwd + '/tensorboard_test'
args.model_dir = cwd + '/model_test'
args.cache_dir = cwd + '/cache_test'
args.name = 'test_hred_tensorflow'
args.epochs = 1
args.checkpoint_steps = 1
args.datapath = path + '/tests/dataloader/dummy_ubuntucorpus#Ubuntu'
def test_train(mocker):
def side_effect_train(args):
modify_args(args)
args.mode = 'train'
main(args)
def side_effect_restore(args):
modify_args(args)
args.mode = 'train'
args.restore = 'last'
main(args)
def side_effect_cache(args):
modify_args(args)
args.mode = 'train'
args.cache = True
main(args)
mock = mocker.patch('main.main', side_effect=side_effect_train)
run()
tf.reset_default_graph()
mock.side_effect = side_effect_restore
run()
tf.reset_default_graph()
mock.side_effect = side_effect_cache
run()
tf.reset_default_graph()
def test_test(mocker):
def side_effect_test(args):
modify_args(args)
args.mode = 'test'
main(args)
mock = mocker.patch('main.main', side_effect=side_effect_test)
run()
old_res = json.load(open("./result.json", "r"))
tf.reset_default_graph()
run()
new_res = json.load(open("./result.json", "r"))
for key in old_res:
if key[-9:] == 'hashvalue':
assert old_res[key] == new_res[key]
tf.reset_default_graph()
|
the-stack_0_727 | __classification__ = 'UNCLASSIFIED'
__author__ = "Thomas McCullough"
import os
import re
import logging
from typing import List
logger = logging.getLogger('validation')
_the_directory = os.path.split(__file__)[0]
urn_mapping = {
'urn:SIDD:1.0.0': {
'ism_urn': 'urn:us:gov:ic:ism',
'sfa_urn': 'urn:SFA:1.2.0',
'sicommon_urn': 'urn:SICommon:0.1',
'version': '1.0',
'release': '1.0.0',
'date': '2011-08-31T00:00:00Z',
'schema': os.path.join(_the_directory, 'version1', 'SIDD_schema_V1.0.0_2011_08_31.xsd')},
'urn:SIDD:2.0.0': {
'ism_urn': 'urn:us:gov:ic:ism:13',
'sfa_urn': 'urn:SFA:1.2.0',
'sicommon_urn': 'urn:SICommon:1.0',
'version': '2.0',
'release': '2.0.0',
'date': '2019-05-31T00:00:00Z',
'schema': os.path.join(_the_directory, 'version2', 'SIDD_schema_V2.0.0_2019_05_31.xsd')},
}
_SIDD_SPECIFICATION_IDENTIFIER = 'SIDD Volume 1 Design & Implementation Description Document'
def get_specification_identifier():
"""
Get the SIDD specification identifier string.
Returns
-------
str
"""
return _SIDD_SPECIFICATION_IDENTIFIER
def check_urn(urn_string):
"""
Checks that the urn string follows the correct pattern. This raises an
exception for a poorly formed or unmapped SIDD urn.
Parameters
----------
urn_string : str
"""
if not isinstance(urn_string, str):
raise TypeError(
'Expected a urn input of string type, got type {}'.format(type(urn_string)))
the_match = re.match(r'^urn:SIDD:\d.\d.\d$', urn_string)
if the_match is None:
raise ValueError(
'Input provided as `{}`,\nbut should be of the form '
'`urn:SIDD:<major>.<minor>.<release>'.format(urn_string))
def get_urn_details(urn_string):
"""
Gets the associated details for the given SIDD urn, or raise an exception for
poorly formatted or unrecognized urn.
Parameters
----------
urn_string
Returns
-------
dict
"""
check_urn(urn_string)
out = urn_mapping.get(urn_string, None)
if out is None:
raise KeyError(
'Got correctly formatted, but unmapped SIDD urn {}.'.format(urn_string))
return out
def get_schema_path(the_urn):
"""
Gets the path to the proper schema file for the given SIDD urn.
Parameters
----------
the_urn : str
Returns
-------
str
"""
result = get_urn_details(the_urn)
return result['schema']
def get_versions():
"""
Gets a list of recognized SIDD urn.
Returns
-------
List[str]
"""
return list(sorted(urn_mapping.keys()))
def validate_xml_ns(xml_ns, ns_key='default'):
"""
Validate the parsed SIDD xml namespace dictionary. This is expected to
accompany the use of :func:`sarpy.io.general.utils.parse_xml_from_string`.
Parameters
----------
xml_ns : dict
The xml namespace dictionary.
ns_key : str
The main SIDD element or default namespace.
Returns
-------
bool
"""
def validate_ism_urn():
if 'ism' not in xml_ns:
the_val = None
for key in xml_ns:
val = xml_ns[key]
if val.lower().startswith('urn:us:gov:ic:ism'):
the_val = val
xml_ns['ism'] = the_val
valid = True
if 'ism' not in xml_ns:
logger.error('SIDD: No `ism` namespace defined.')
valid = False
elif xml_ns['ism'] != details['ism_urn']:
logger.error(
'SIDD: SIDD {} `ISM` namespace urn is expected to be "{}", but we got "{}".\n\t'
'Differences in standard may lead to deserialization and/or '
'validation errors.'.format(sidd_urn, details['ism_urn'], xml_ns['ism']))
valid = False
return valid
def validate_sfa_urn():
if 'sfa' not in xml_ns:
the_val = None
for key in xml_ns:
val = xml_ns[key]
if val.lower().startswith('urn:sfa:'):
the_val = val
xml_ns['sfa'] = the_val
valid = True
if 'ism' not in xml_ns:
logger.error('SIDD: No `sfa` namespace defined.')
valid = False
elif xml_ns['sfa'] != details['sfa_urn']:
logger.error(
'SIDD: SIDD {} `SFA` namespace urn is expected to be "{}", but we got "{}".\n\t'
'Differences in standard may lead to deserialization and/or '
'validation errors.'.format(sidd_urn, details['sfa_urn'], xml_ns['sfa']))
valid = False
return valid
def validate_sicommon_urn():
if 'sicommon' not in xml_ns:
the_val = None
for key in xml_ns:
val = xml_ns[key]
if val.lower().startswith('urn:sicommon:'):
the_val = val
xml_ns['sicommon'] = the_val
valid = True
if 'sicommon' not in xml_ns:
logger.error('SIDD: No `sicommon` namespace defined.')
valid = False
elif xml_ns['sicommon'] != details['sicommon_urn']:
logger.error(
'SIDD: SIDD {} `SICommon` namespace urn is expected to be "{}", but we got "{}".\n\t'
'Differences in standard may lead to deserialization and/or '
'validation errors.'.format(sidd_urn, details['sicommon_urn'], xml_ns['sicommon']))
valid = False
return valid
if not isinstance(xml_ns, dict):
return ValueError('xml_ns must be a dictionary for SIDD interpretation.')
if ns_key not in xml_ns:
raise ValueError('ns_key must be a key in xml_ns.')
sidd_urn = xml_ns[ns_key]
try:
details = get_urn_details(sidd_urn)
except KeyError:
logger.error('Got unmapped sidd urn `{}`'.format(sidd_urn))
return False
valid_ns = validate_ism_urn()
valid_ns &= validate_sfa_urn()
valid_ns &= validate_sicommon_urn()
return valid_ns
|
the-stack_0_728 | import math
import numbers
import random
import warnings
from collections.abc import Sequence
from typing import Tuple, List, Optional
import torch
from torch import Tensor
try:
import accimage
except ImportError:
accimage = None
from . import functional as F
from .functional import InterpolationMode, _interpolation_modes_from_int
__all__ = ["Compose", "ToTensor", "PILToTensor", "ConvertImageDtype", "ToPILImage", "Normalize", "Resize", "Scale",
"CenterCrop", "Pad", "Lambda", "RandomApply", "RandomChoice", "RandomOrder", "RandomCrop",
"RandomHorizontalFlip", "RandomVerticalFlip", "RandomResizedCrop", "RandomSizedCrop", "FiveCrop", "TenCrop",
"LinearTransformation", "ColorJitter", "RandomRotation", "RandomAffine", "Grayscale", "RandomGrayscale",
"RandomPerspective", "RandomErasing", "GaussianBlur", "InterpolationMode", "RandomInvert", "RandomPosterize",
"RandomSolarize", "RandomAdjustSharpness", "RandomAutocontrast", "RandomEqualize"]
class Compose:
"""Composes several transforms together. This transform does not support torchscript.
Please, see the note below.
Args:
transforms (list of ``Transform`` objects): list of transforms to compose.
Example:
>>> transforms.Compose([
>>> transforms.CenterCrop(10),
>>> transforms.ToTensor(),
>>> ])
.. note::
In order to script the transformations, please use ``torch.nn.Sequential`` as below.
>>> transforms = torch.nn.Sequential(
>>> transforms.CenterCrop(10),
>>> transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)),
>>> )
>>> scripted_transforms = torch.jit.script(transforms)
Make sure to use only scriptable transformations, i.e. that work with ``torch.Tensor``, does not require
`lambda` functions or ``PIL.Image``.
"""
def __init__(self, transforms):
self.transforms = transforms
def __call__(self, img):
for t in self.transforms:
img = t(img)
return img
def __repr__(self):
format_string = self.__class__.__name__ + '('
for t in self.transforms:
format_string += '\n'
format_string += ' {0}'.format(t)
format_string += '\n)'
return format_string
class ToTensor:
"""Convert a ``PIL Image`` or ``numpy.ndarray`` to tensor. This transform does not support torchscript.
Converts a PIL Image or numpy.ndarray (H x W x C) in the range
[0, 255] to a torch.FloatTensor of shape (C x H x W) in the range [0.0, 1.0]
if the PIL Image belongs to one of the modes (L, LA, P, I, F, RGB, YCbCr, RGBA, CMYK, 1)
or if the numpy.ndarray has dtype = np.uint8
In the other cases, tensors are returned without scaling.
.. note::
Because the input image is scaled to [0.0, 1.0], this transformation should not be used when
transforming target image masks. See the `references`_ for implementing the transforms for image masks.
.. _references: https://github.com/pytorch/vision/tree/master/references/segmentation
"""
def __call__(self, pic):
"""
Args:
pic (PIL Image or numpy.ndarray): Image to be converted to tensor.
Returns:
Tensor: Converted image.
"""
return F.to_tensor(pic)
def __repr__(self):
return self.__class__.__name__ + '()'
class PILToTensor:
"""Convert a ``PIL Image`` to a tensor of the same type. This transform does not support torchscript.
Converts a PIL Image (H x W x C) to a Tensor of shape (C x H x W).
"""
def __call__(self, pic):
"""
Args:
pic (PIL Image): Image to be converted to tensor.
Returns:
Tensor: Converted image.
"""
return F.pil_to_tensor(pic)
def __repr__(self):
return self.__class__.__name__ + '()'
class ConvertImageDtype(torch.nn.Module):
"""Convert a tensor image to the given ``dtype`` and scale the values accordingly
This function does not support PIL Image.
Args:
dtype (torch.dtype): Desired data type of the output
.. note::
When converting from a smaller to a larger integer ``dtype`` the maximum values are **not** mapped exactly.
If converted back and forth, this mismatch has no effect.
Raises:
RuntimeError: When trying to cast :class:`torch.float32` to :class:`torch.int32` or :class:`torch.int64` as
well as for trying to cast :class:`torch.float64` to :class:`torch.int64`. These conversions might lead to
overflow errors since the floating point ``dtype`` cannot store consecutive integers over the whole range
of the integer ``dtype``.
"""
def __init__(self, dtype: torch.dtype) -> None:
super().__init__()
self.dtype = dtype
def forward(self, image):
return F.convert_image_dtype(image, self.dtype)
class ToPILImage:
"""Convert a tensor or an ndarray to PIL Image. This transform does not support torchscript.
Converts a torch.*Tensor of shape C x H x W or a numpy ndarray of shape
H x W x C to a PIL Image while preserving the value range.
Args:
mode (`PIL.Image mode`_): color space and pixel depth of input data (optional).
If ``mode`` is ``None`` (default) there are some assumptions made about the input data:
- If the input has 4 channels, the ``mode`` is assumed to be ``RGBA``.
- If the input has 3 channels, the ``mode`` is assumed to be ``RGB``.
- If the input has 2 channels, the ``mode`` is assumed to be ``LA``.
- If the input has 1 channel, the ``mode`` is determined by the data type (i.e ``int``, ``float``,
``short``).
.. _PIL.Image mode: https://pillow.readthedocs.io/en/latest/handbook/concepts.html#concept-modes
"""
def __init__(self, mode=None):
self.mode = mode
def __call__(self, pic):
"""
Args:
pic (Tensor or numpy.ndarray): Image to be converted to PIL Image.
Returns:
PIL Image: Image converted to PIL Image.
"""
return F.to_pil_image(pic, self.mode)
def __repr__(self):
format_string = self.__class__.__name__ + '('
if self.mode is not None:
format_string += 'mode={0}'.format(self.mode)
format_string += ')'
return format_string
class Normalize(torch.nn.Module):
"""Normalize a tensor image with mean and standard deviation.
This transform does not support PIL Image.
Given mean: ``(mean[1],...,mean[n])`` and std: ``(std[1],..,std[n])`` for ``n``
channels, this transform will normalize each channel of the input
``torch.*Tensor`` i.e.,
``output[channel] = (input[channel] - mean[channel]) / std[channel]``
.. note::
This transform acts out of place, i.e., it does not mutate the input tensor.
Args:
mean (sequence): Sequence of means for each channel.
std (sequence): Sequence of standard deviations for each channel.
inplace(bool,optional): Bool to make this operation in-place.
"""
def __init__(self, mean, std, inplace=False):
super().__init__()
self.mean = mean
self.std = std
self.inplace = inplace
def forward(self, tensor: Tensor) -> Tensor:
"""
Args:
tensor (Tensor): Tensor image to be normalized.
Returns:
Tensor: Normalized Tensor image.
"""
return F.normalize(tensor, self.mean, self.std, self.inplace)
def __repr__(self):
return self.__class__.__name__ + '(mean={0}, std={1})'.format(self.mean, self.std)
class Resize(torch.nn.Module):
"""Resize the input image to the given size.
If the image is torch Tensor, it is expected
to have [..., H, W] shape, where ... means an arbitrary number of leading dimensions
Args:
size (sequence or int): Desired output size. If size is a sequence like
(h, w), output size will be matched to this. If size is an int,
smaller edge of the image will be matched to this number.
i.e, if height > width, then image will be rescaled to
(size * height / width, size).
In torchscript mode size as single int is not supported, use a sequence of length 1: ``[size, ]``.
interpolation (InterpolationMode): Desired interpolation enum defined by
:class:`torchvision.transforms.InterpolationMode`. Default is ``InterpolationMode.BILINEAR``.
If input is Tensor, only ``InterpolationMode.NEAREST``, ``InterpolationMode.BILINEAR`` and
``InterpolationMode.BICUBIC`` are supported.
For backward compatibility integer values (e.g. ``PIL.Image.NEAREST``) are still acceptable.
"""
def __init__(self, size, interpolation=InterpolationMode.BILINEAR):
super().__init__()
if not isinstance(size, (int, Sequence)):
raise TypeError("Size should be int or sequence. Got {}".format(type(size)))
if isinstance(size, Sequence) and len(size) not in (1, 2):
raise ValueError("If size is a sequence, it should have 1 or 2 values")
self.size = size
# Backward compatibility with integer value
if isinstance(interpolation, int):
warnings.warn(
"Argument interpolation should be of type InterpolationMode instead of int. "
"Please, use InterpolationMode enum."
)
interpolation = _interpolation_modes_from_int(interpolation)
self.interpolation = interpolation
def forward(self, img):
"""
Args:
img (PIL Image or Tensor): Image to be scaled.
Returns:
PIL Image or Tensor: Rescaled image.
"""
return F.resize(img, self.size, self.interpolation)
def __repr__(self):
interpolate_str = self.interpolation.value
return self.__class__.__name__ + '(size={0}, interpolation={1})'.format(self.size, interpolate_str)
class Scale(Resize):
"""
Note: This transform is deprecated in favor of Resize.
"""
def __init__(self, *args, **kwargs):
warnings.warn("The use of the transforms.Scale transform is deprecated, " +
"please use transforms.Resize instead.")
super(Scale, self).__init__(*args, **kwargs)
class CenterCrop(torch.nn.Module):
"""Crops the given image at the center.
If the image is torch Tensor, it is expected
to have [..., H, W] shape, where ... means an arbitrary number of leading dimensions
Args:
size (sequence or int): Desired output size of the crop. If size is an
int instead of sequence like (h, w), a square crop (size, size) is
made. If provided a sequence of length 1, it will be interpreted as (size[0], size[0]).
"""
def __init__(self, size):
super().__init__()
self.size = _setup_size(size, error_msg="Please provide only two dimensions (h, w) for size.")
def forward(self, img):
"""
Args:
img (PIL Image or Tensor): Image to be cropped.
Returns:
PIL Image or Tensor: Cropped image.
"""
return F.center_crop(img, self.size)
def __repr__(self):
return self.__class__.__name__ + '(size={0})'.format(self.size)
class Pad(torch.nn.Module):
"""Pad the given image on all sides with the given "pad" value.
If the image is torch Tensor, it is expected
to have [..., H, W] shape, where ... means an arbitrary number of leading dimensions
Args:
padding (int or sequence): Padding on each border. If a single int is provided this
is used to pad all borders. If sequence of length 2 is provided this is the padding
on left/right and top/bottom respectively. If a sequence of length 4 is provided
this is the padding for the left, top, right and bottom borders respectively.
In torchscript mode padding as single int is not supported, use a sequence of length 1: ``[padding, ]``.
fill (number or str or tuple): Pixel fill value for constant fill. Default is 0. If a tuple of
length 3, it is used to fill R, G, B channels respectively.
This value is only used when the padding_mode is constant.
Only number is supported for torch Tensor.
Only int or str or tuple value is supported for PIL Image.
padding_mode (str): Type of padding. Should be: constant, edge, reflect or symmetric.
Default is constant.
- constant: pads with a constant value, this value is specified with fill
- edge: pads with the last value at the edge of the image
- reflect: pads with reflection of image without repeating the last value on the edge
For example, padding [1, 2, 3, 4] with 2 elements on both sides in reflect mode
will result in [3, 2, 1, 2, 3, 4, 3, 2]
- symmetric: pads with reflection of image repeating the last value on the edge
For example, padding [1, 2, 3, 4] with 2 elements on both sides in symmetric mode
will result in [2, 1, 1, 2, 3, 4, 4, 3]
"""
def __init__(self, padding, fill=0, padding_mode="constant"):
super().__init__()
if not isinstance(padding, (numbers.Number, tuple, list)):
raise TypeError("Got inappropriate padding arg")
if not isinstance(fill, (numbers.Number, str, tuple)):
raise TypeError("Got inappropriate fill arg")
if padding_mode not in ["constant", "edge", "reflect", "symmetric"]:
raise ValueError("Padding mode should be either constant, edge, reflect or symmetric")
if isinstance(padding, Sequence) and len(padding) not in [1, 2, 4]:
raise ValueError("Padding must be an int or a 1, 2, or 4 element tuple, not a " +
"{} element tuple".format(len(padding)))
self.padding = padding
self.fill = fill
self.padding_mode = padding_mode
def forward(self, img):
"""
Args:
img (PIL Image or Tensor): Image to be padded.
Returns:
PIL Image or Tensor: Padded image.
"""
return F.pad(img, self.padding, self.fill, self.padding_mode)
def __repr__(self):
return self.__class__.__name__ + '(padding={0}, fill={1}, padding_mode={2})'.\
format(self.padding, self.fill, self.padding_mode)
class Lambda:
"""Apply a user-defined lambda as a transform. This transform does not support torchscript.
Args:
lambd (function): Lambda/function to be used for transform.
"""
def __init__(self, lambd):
if not callable(lambd):
raise TypeError("Argument lambd should be callable, got {}".format(repr(type(lambd).__name__)))
self.lambd = lambd
def __call__(self, img):
return self.lambd(img)
def __repr__(self):
return self.__class__.__name__ + '()'
class RandomTransforms:
"""Base class for a list of transformations with randomness
Args:
transforms (sequence): list of transformations
"""
def __init__(self, transforms):
if not isinstance(transforms, Sequence):
raise TypeError("Argument transforms should be a sequence")
self.transforms = transforms
def __call__(self, *args, **kwargs):
raise NotImplementedError()
def __repr__(self):
format_string = self.__class__.__name__ + '('
for t in self.transforms:
format_string += '\n'
format_string += ' {0}'.format(t)
format_string += '\n)'
return format_string
class RandomApply(torch.nn.Module):
"""Apply randomly a list of transformations with a given probability.
.. note::
In order to script the transformation, please use ``torch.nn.ModuleList`` as input instead of list/tuple of
transforms as shown below:
>>> transforms = transforms.RandomApply(torch.nn.ModuleList([
>>> transforms.ColorJitter(),
>>> ]), p=0.3)
>>> scripted_transforms = torch.jit.script(transforms)
Make sure to use only scriptable transformations, i.e. that work with ``torch.Tensor``, does not require
`lambda` functions or ``PIL.Image``.
Args:
transforms (sequence or torch.nn.Module): list of transformations
p (float): probability
"""
def __init__(self, transforms, p=0.5):
super().__init__()
self.transforms = transforms
self.p = p
def forward(self, img):
if self.p < torch.rand(1):
return img
for t in self.transforms:
img = t(img)
return img
def __repr__(self):
format_string = self.__class__.__name__ + '('
format_string += '\n p={}'.format(self.p)
for t in self.transforms:
format_string += '\n'
format_string += ' {0}'.format(t)
format_string += '\n)'
return format_string
class RandomOrder(RandomTransforms):
"""Apply a list of transformations in a random order. This transform does not support torchscript.
"""
def __call__(self, img):
order = list(range(len(self.transforms)))
random.shuffle(order)
for i in order:
img = self.transforms[i](img)
return img
class RandomChoice(RandomTransforms):
"""Apply single transformation randomly picked from a list. This transform does not support torchscript.
"""
def __call__(self, img):
t = random.choice(self.transforms)
return t(img)
class RandomCrop(torch.nn.Module):
"""Crop the given image at a random location.
If the image is torch Tensor, it is expected
to have [..., H, W] shape, where ... means an arbitrary number of leading dimensions
Args:
size (sequence or int): Desired output size of the crop. If size is an
int instead of sequence like (h, w), a square crop (size, size) is
made. If provided a sequence of length 1, it will be interpreted as (size[0], size[0]).
padding (int or sequence, optional): Optional padding on each border
of the image. Default is None. If a single int is provided this
is used to pad all borders. If sequence of length 2 is provided this is the padding
on left/right and top/bottom respectively. If a sequence of length 4 is provided
this is the padding for the left, top, right and bottom borders respectively.
In torchscript mode padding as single int is not supported, use a sequence of length 1: ``[padding, ]``.
pad_if_needed (boolean): It will pad the image if smaller than the
desired size to avoid raising an exception. Since cropping is done
after padding, the padding seems to be done at a random offset.
fill (number or str or tuple): Pixel fill value for constant fill. Default is 0. If a tuple of
length 3, it is used to fill R, G, B channels respectively.
This value is only used when the padding_mode is constant.
Only number is supported for torch Tensor.
Only int or str or tuple value is supported for PIL Image.
padding_mode (str): Type of padding. Should be: constant, edge, reflect or symmetric. Default is constant.
- constant: pads with a constant value, this value is specified with fill
- edge: pads with the last value on the edge of the image
- reflect: pads with reflection of image (without repeating the last value on the edge)
padding [1, 2, 3, 4] with 2 elements on both sides in reflect mode
will result in [3, 2, 1, 2, 3, 4, 3, 2]
- symmetric: pads with reflection of image (repeating the last value on the edge)
padding [1, 2, 3, 4] with 2 elements on both sides in symmetric mode
will result in [2, 1, 1, 2, 3, 4, 4, 3]
"""
@staticmethod
def get_params(img: Tensor, output_size: Tuple[int, int]) -> Tuple[int, int, int, int]:
"""Get parameters for ``crop`` for a random crop.
Args:
img (PIL Image or Tensor): Image to be cropped.
output_size (tuple): Expected output size of the crop.
Returns:
tuple: params (i, j, h, w) to be passed to ``crop`` for random crop.
"""
w, h = F._get_image_size(img)
th, tw = output_size
if h + 1 < th or w + 1 < tw:
raise ValueError(
"Required crop size {} is larger then input image size {}".format((th, tw), (h, w))
)
if w == tw and h == th:
return 0, 0, h, w
i = torch.randint(0, h - th + 1, size=(1, )).item()
j = torch.randint(0, w - tw + 1, size=(1, )).item()
return i, j, th, tw
def __init__(self, size, padding=None, pad_if_needed=False, fill=0, padding_mode="constant"):
super().__init__()
self.size = tuple(_setup_size(
size, error_msg="Please provide only two dimensions (h, w) for size."
))
self.padding = padding
self.pad_if_needed = pad_if_needed
self.fill = fill
self.padding_mode = padding_mode
def forward(self, img):
"""
Args:
img (PIL Image or Tensor): Image to be cropped.
Returns:
PIL Image or Tensor: Cropped image.
"""
if self.padding is not None:
img = F.pad(img, self.padding, self.fill, self.padding_mode)
width, height = F._get_image_size(img)
# pad the width if needed
if self.pad_if_needed and width < self.size[1]:
padding = [self.size[1] - width, 0]
img = F.pad(img, padding, self.fill, self.padding_mode)
# pad the height if needed
if self.pad_if_needed and height < self.size[0]:
padding = [0, self.size[0] - height]
img = F.pad(img, padding, self.fill, self.padding_mode)
i, j, h, w = self.get_params(img, self.size)
return F.crop(img, i, j, h, w)
def __repr__(self):
return self.__class__.__name__ + "(size={0}, padding={1})".format(self.size, self.padding)
class RandomHorizontalFlip(torch.nn.Module):
"""Horizontally flip the given image randomly with a given probability.
If the image is torch Tensor, it is expected
to have [..., H, W] shape, where ... means an arbitrary number of leading
dimensions
Args:
p (float): probability of the image being flipped. Default value is 0.5
"""
def __init__(self, p=0.5):
super().__init__()
self.p = p
def forward(self, img):
"""
Args:
img (PIL Image or Tensor): Image to be flipped.
Returns:
PIL Image or Tensor: Randomly flipped image.
"""
if torch.rand(1) < self.p:
return F.hflip(img)
return img
def __repr__(self):
return self.__class__.__name__ + '(p={})'.format(self.p)
class RandomVerticalFlip(torch.nn.Module):
"""Vertically flip the given image randomly with a given probability.
If the image is torch Tensor, it is expected
to have [..., H, W] shape, where ... means an arbitrary number of leading
dimensions
Args:
p (float): probability of the image being flipped. Default value is 0.5
"""
def __init__(self, p=0.5):
super().__init__()
self.p = p
def forward(self, img):
"""
Args:
img (PIL Image or Tensor): Image to be flipped.
Returns:
PIL Image or Tensor: Randomly flipped image.
"""
if torch.rand(1) < self.p:
return F.vflip(img)
return img
def __repr__(self):
return self.__class__.__name__ + '(p={})'.format(self.p)
class RandomPerspective(torch.nn.Module):
"""Performs a random perspective transformation of the given image with a given probability.
If the image is torch Tensor, it is expected
to have [..., H, W] shape, where ... means an arbitrary number of leading dimensions.
Args:
distortion_scale (float): argument to control the degree of distortion and ranges from 0 to 1.
Default is 0.5.
p (float): probability of the image being transformed. Default is 0.5.
interpolation (InterpolationMode): Desired interpolation enum defined by
:class:`torchvision.transforms.InterpolationMode`. Default is ``InterpolationMode.BILINEAR``.
If input is Tensor, only ``InterpolationMode.NEAREST``, ``InterpolationMode.BILINEAR`` are supported.
For backward compatibility integer values (e.g. ``PIL.Image.NEAREST``) are still acceptable.
fill (sequence or number, optional): Pixel fill value for the area outside the transformed
image. If given a number, the value is used for all bands respectively.
If input is PIL Image, the options is only available for ``Pillow>=5.0.0``.
"""
def __init__(self, distortion_scale=0.5, p=0.5, interpolation=InterpolationMode.BILINEAR, fill=0):
super().__init__()
self.p = p
# Backward compatibility with integer value
if isinstance(interpolation, int):
warnings.warn(
"Argument interpolation should be of type InterpolationMode instead of int. "
"Please, use InterpolationMode enum."
)
interpolation = _interpolation_modes_from_int(interpolation)
self.interpolation = interpolation
self.distortion_scale = distortion_scale
self.fill = fill
def forward(self, img):
"""
Args:
img (PIL Image or Tensor): Image to be Perspectively transformed.
Returns:
PIL Image or Tensor: Randomly transformed image.
"""
fill = self.fill
if isinstance(img, Tensor):
if isinstance(fill, (int, float)):
fill = [float(fill)] * F._get_image_num_channels(img)
else:
fill = [float(f) for f in fill]
if torch.rand(1) < self.p:
width, height = F._get_image_size(img)
startpoints, endpoints = self.get_params(width, height, self.distortion_scale)
return F.perspective(img, startpoints, endpoints, self.interpolation, fill)
return img
@staticmethod
def get_params(width: int, height: int, distortion_scale: float) -> Tuple[List[List[int]], List[List[int]]]:
"""Get parameters for ``perspective`` for a random perspective transform.
Args:
width (int): width of the image.
height (int): height of the image.
distortion_scale (float): argument to control the degree of distortion and ranges from 0 to 1.
Returns:
List containing [top-left, top-right, bottom-right, bottom-left] of the original image,
List containing [top-left, top-right, bottom-right, bottom-left] of the transformed image.
"""
half_height = height // 2
half_width = width // 2
topleft = [
int(torch.randint(0, int(distortion_scale * half_width) + 1, size=(1, )).item()),
int(torch.randint(0, int(distortion_scale * half_height) + 1, size=(1, )).item())
]
topright = [
int(torch.randint(width - int(distortion_scale * half_width) - 1, width, size=(1, )).item()),
int(torch.randint(0, int(distortion_scale * half_height) + 1, size=(1, )).item())
]
botright = [
int(torch.randint(width - int(distortion_scale * half_width) - 1, width, size=(1, )).item()),
int(torch.randint(height - int(distortion_scale * half_height) - 1, height, size=(1, )).item())
]
botleft = [
int(torch.randint(0, int(distortion_scale * half_width) + 1, size=(1, )).item()),
int(torch.randint(height - int(distortion_scale * half_height) - 1, height, size=(1, )).item())
]
startpoints = [[0, 0], [width - 1, 0], [width - 1, height - 1], [0, height - 1]]
endpoints = [topleft, topright, botright, botleft]
return startpoints, endpoints
def __repr__(self):
return self.__class__.__name__ + '(p={})'.format(self.p)
class RandomResizedCrop(torch.nn.Module):
"""Crop the given image to random size and aspect ratio.
If the image is torch Tensor, it is expected
to have [..., H, W] shape, where ... means an arbitrary number of leading dimensions
A crop of random size (default: of 0.08 to 1.0) of the original size and a random
aspect ratio (default: of 3/4 to 4/3) of the original aspect ratio is made. This crop
is finally resized to given size.
This is popularly used to train the Inception networks.
Args:
size (int or sequence): expected output size of each edge. If size is an
int instead of sequence like (h, w), a square output size ``(size, size)`` is
made. If provided a sequence of length 1, it will be interpreted as (size[0], size[0]).
In torchscript mode size as single int is not supported, use a sequence of length 1: ``[size, ]``.
scale (tuple of float): scale range of the cropped image before resizing, relatively to the origin image.
ratio (tuple of float): aspect ratio range of the cropped image before resizing.
interpolation (InterpolationMode): Desired interpolation enum defined by
:class:`torchvision.transforms.InterpolationMode`. Default is ``InterpolationMode.BILINEAR``.
If input is Tensor, only ``InterpolationMode.NEAREST``, ``InterpolationMode.BILINEAR`` and
``InterpolationMode.BICUBIC`` are supported.
For backward compatibility integer values (e.g. ``PIL.Image.NEAREST``) are still acceptable.
"""
def __init__(self, size, scale=(0.08, 1.0), ratio=(3. / 4., 4. / 3.), interpolation=InterpolationMode.BILINEAR):
super().__init__()
self.size = _setup_size(size, error_msg="Please provide only two dimensions (h, w) for size.")
if not isinstance(scale, Sequence):
raise TypeError("Scale should be a sequence")
if not isinstance(ratio, Sequence):
raise TypeError("Ratio should be a sequence")
if (scale[0] > scale[1]) or (ratio[0] > ratio[1]):
warnings.warn("Scale and ratio should be of kind (min, max)")
# Backward compatibility with integer value
if isinstance(interpolation, int):
warnings.warn(
"Argument interpolation should be of type InterpolationMode instead of int. "
"Please, use InterpolationMode enum."
)
interpolation = _interpolation_modes_from_int(interpolation)
self.interpolation = interpolation
self.scale = scale
self.ratio = ratio
@staticmethod
def get_params(
img: Tensor, scale: List[float], ratio: List[float]
) -> Tuple[int, int, int, int]:
"""Get parameters for ``crop`` for a random sized crop.
Args:
img (PIL Image or Tensor): Input image.
scale (list): range of scale of the origin size cropped
ratio (list): range of aspect ratio of the origin aspect ratio cropped
Returns:
tuple: params (i, j, h, w) to be passed to ``crop`` for a random
sized crop.
"""
width, height = F._get_image_size(img)
area = height * width
for _ in range(10):
target_area = area * torch.empty(1).uniform_(scale[0], scale[1]).item()
log_ratio = torch.log(torch.tensor(ratio))
aspect_ratio = torch.exp(
torch.empty(1).uniform_(log_ratio[0], log_ratio[1])
).item()
w = int(round(math.sqrt(target_area * aspect_ratio)))
h = int(round(math.sqrt(target_area / aspect_ratio)))
if 0 < w <= width and 0 < h <= height:
i = torch.randint(0, height - h + 1, size=(1,)).item()
j = torch.randint(0, width - w + 1, size=(1,)).item()
return i, j, h, w
# Fallback to central crop
in_ratio = float(width) / float(height)
if in_ratio < min(ratio):
w = width
h = int(round(w / min(ratio)))
elif in_ratio > max(ratio):
h = height
w = int(round(h * max(ratio)))
else: # whole image
w = width
h = height
i = (height - h) // 2
j = (width - w) // 2
return i, j, h, w
def forward(self, img):
"""
Args:
img (PIL Image or Tensor): Image to be cropped and resized.
Returns:
PIL Image or Tensor: Randomly cropped and resized image.
"""
i, j, h, w = self.get_params(img, self.scale, self.ratio)
return F.resized_crop(img, i, j, h, w, self.size, self.interpolation)
def __repr__(self):
interpolate_str = self.interpolation.value
format_string = self.__class__.__name__ + '(size={0}'.format(self.size)
format_string += ', scale={0}'.format(tuple(round(s, 4) for s in self.scale))
format_string += ', ratio={0}'.format(tuple(round(r, 4) for r in self.ratio))
format_string += ', interpolation={0})'.format(interpolate_str)
return format_string
class RandomSizedCrop(RandomResizedCrop):
"""
Note: This transform is deprecated in favor of RandomResizedCrop.
"""
def __init__(self, *args, **kwargs):
warnings.warn("The use of the transforms.RandomSizedCrop transform is deprecated, " +
"please use transforms.RandomResizedCrop instead.")
super(RandomSizedCrop, self).__init__(*args, **kwargs)
class FiveCrop(torch.nn.Module):
"""Crop the given image into four corners and the central crop.
If the image is torch Tensor, it is expected
to have [..., H, W] shape, where ... means an arbitrary number of leading
dimensions
.. Note::
This transform returns a tuple of images and there may be a mismatch in the number of
inputs and targets your Dataset returns. See below for an example of how to deal with
this.
Args:
size (sequence or int): Desired output size of the crop. If size is an ``int``
instead of sequence like (h, w), a square crop of size (size, size) is made.
If provided a sequence of length 1, it will be interpreted as (size[0], size[0]).
Example:
>>> transform = Compose([
>>> FiveCrop(size), # this is a list of PIL Images
>>> Lambda(lambda crops: torch.stack([ToTensor()(crop) for crop in crops])) # returns a 4D tensor
>>> ])
>>> #In your test loop you can do the following:
>>> input, target = batch # input is a 5d tensor, target is 2d
>>> bs, ncrops, c, h, w = input.size()
>>> result = model(input.view(-1, c, h, w)) # fuse batch size and ncrops
>>> result_avg = result.view(bs, ncrops, -1).mean(1) # avg over crops
"""
def __init__(self, size):
super().__init__()
self.size = _setup_size(size, error_msg="Please provide only two dimensions (h, w) for size.")
def forward(self, img):
"""
Args:
img (PIL Image or Tensor): Image to be cropped.
Returns:
tuple of 5 images. Image can be PIL Image or Tensor
"""
return F.five_crop(img, self.size)
def __repr__(self):
return self.__class__.__name__ + '(size={0})'.format(self.size)
class TenCrop(torch.nn.Module):
"""Crop the given image into four corners and the central crop plus the flipped version of
these (horizontal flipping is used by default).
If the image is torch Tensor, it is expected
to have [..., H, W] shape, where ... means an arbitrary number of leading
dimensions
.. Note::
This transform returns a tuple of images and there may be a mismatch in the number of
inputs and targets your Dataset returns. See below for an example of how to deal with
this.
Args:
size (sequence or int): Desired output size of the crop. If size is an
int instead of sequence like (h, w), a square crop (size, size) is
made. If provided a sequence of length 1, it will be interpreted as (size[0], size[0]).
vertical_flip (bool): Use vertical flipping instead of horizontal
Example:
>>> transform = Compose([
>>> TenCrop(size), # this is a list of PIL Images
>>> Lambda(lambda crops: torch.stack([ToTensor()(crop) for crop in crops])) # returns a 4D tensor
>>> ])
>>> #In your test loop you can do the following:
>>> input, target = batch # input is a 5d tensor, target is 2d
>>> bs, ncrops, c, h, w = input.size()
>>> result = model(input.view(-1, c, h, w)) # fuse batch size and ncrops
>>> result_avg = result.view(bs, ncrops, -1).mean(1) # avg over crops
"""
def __init__(self, size, vertical_flip=False):
super().__init__()
self.size = _setup_size(size, error_msg="Please provide only two dimensions (h, w) for size.")
self.vertical_flip = vertical_flip
def forward(self, img):
"""
Args:
img (PIL Image or Tensor): Image to be cropped.
Returns:
tuple of 10 images. Image can be PIL Image or Tensor
"""
return F.ten_crop(img, self.size, self.vertical_flip)
def __repr__(self):
return self.__class__.__name__ + '(size={0}, vertical_flip={1})'.format(self.size, self.vertical_flip)
class LinearTransformation(torch.nn.Module):
"""Transform a tensor image with a square transformation matrix and a mean_vector computed
offline.
This transform does not support PIL Image.
Given transformation_matrix and mean_vector, will flatten the torch.*Tensor and
subtract mean_vector from it which is then followed by computing the dot
product with the transformation matrix and then reshaping the tensor to its
original shape.
Applications:
whitening transformation: Suppose X is a column vector zero-centered data.
Then compute the data covariance matrix [D x D] with torch.mm(X.t(), X),
perform SVD on this matrix and pass it as transformation_matrix.
Args:
transformation_matrix (Tensor): tensor [D x D], D = C x H x W
mean_vector (Tensor): tensor [D], D = C x H x W
"""
def __init__(self, transformation_matrix, mean_vector):
super().__init__()
if transformation_matrix.size(0) != transformation_matrix.size(1):
raise ValueError("transformation_matrix should be square. Got " +
"[{} x {}] rectangular matrix.".format(*transformation_matrix.size()))
if mean_vector.size(0) != transformation_matrix.size(0):
raise ValueError("mean_vector should have the same length {}".format(mean_vector.size(0)) +
" as any one of the dimensions of the transformation_matrix [{}]"
.format(tuple(transformation_matrix.size())))
if transformation_matrix.device != mean_vector.device:
raise ValueError("Input tensors should be on the same device. Got {} and {}"
.format(transformation_matrix.device, mean_vector.device))
self.transformation_matrix = transformation_matrix
self.mean_vector = mean_vector
def forward(self, tensor: Tensor) -> Tensor:
"""
Args:
tensor (Tensor): Tensor image to be whitened.
Returns:
Tensor: Transformed image.
"""
shape = tensor.shape
n = shape[-3] * shape[-2] * shape[-1]
if n != self.transformation_matrix.shape[0]:
raise ValueError("Input tensor and transformation matrix have incompatible shape." +
"[{} x {} x {}] != ".format(shape[-3], shape[-2], shape[-1]) +
"{}".format(self.transformation_matrix.shape[0]))
if tensor.device.type != self.mean_vector.device.type:
raise ValueError("Input tensor should be on the same device as transformation matrix and mean vector. "
"Got {} vs {}".format(tensor.device, self.mean_vector.device))
flat_tensor = tensor.view(-1, n) - self.mean_vector
transformed_tensor = torch.mm(flat_tensor, self.transformation_matrix)
tensor = transformed_tensor.view(shape)
return tensor
def __repr__(self):
format_string = self.__class__.__name__ + '(transformation_matrix='
format_string += (str(self.transformation_matrix.tolist()) + ')')
format_string += (", (mean_vector=" + str(self.mean_vector.tolist()) + ')')
return format_string
class ColorJitter(torch.nn.Module):
"""Randomly change the brightness, contrast, saturation and hue of an image.
If the image is torch Tensor, it is expected
to have [..., 3, H, W] shape, where ... means an arbitrary number of leading dimensions.
If img is PIL Image, mode "1", "L", "I", "F" and modes with transparency (alpha channel) are not supported.
Args:
brightness (float or tuple of float (min, max)): How much to jitter brightness.
brightness_factor is chosen uniformly from [max(0, 1 - brightness), 1 + brightness]
or the given [min, max]. Should be non negative numbers.
contrast (float or tuple of float (min, max)): How much to jitter contrast.
contrast_factor is chosen uniformly from [max(0, 1 - contrast), 1 + contrast]
or the given [min, max]. Should be non negative numbers.
saturation (float or tuple of float (min, max)): How much to jitter saturation.
saturation_factor is chosen uniformly from [max(0, 1 - saturation), 1 + saturation]
or the given [min, max]. Should be non negative numbers.
hue (float or tuple of float (min, max)): How much to jitter hue.
hue_factor is chosen uniformly from [-hue, hue] or the given [min, max].
Should have 0<= hue <= 0.5 or -0.5 <= min <= max <= 0.5.
"""
def __init__(self, brightness=0, contrast=0, saturation=0, hue=0):
super().__init__()
self.brightness = self._check_input(brightness, 'brightness')
self.contrast = self._check_input(contrast, 'contrast')
self.saturation = self._check_input(saturation, 'saturation')
self.hue = self._check_input(hue, 'hue', center=0, bound=(-0.5, 0.5),
clip_first_on_zero=False)
@torch.jit.unused
def _check_input(self, value, name, center=1, bound=(0, float('inf')), clip_first_on_zero=True):
if isinstance(value, numbers.Number):
if value < 0:
raise ValueError("If {} is a single number, it must be non negative.".format(name))
value = [center - float(value), center + float(value)]
if clip_first_on_zero:
value[0] = max(value[0], 0.0)
elif isinstance(value, (tuple, list)) and len(value) == 2:
if not bound[0] <= value[0] <= value[1] <= bound[1]:
raise ValueError("{} values should be between {}".format(name, bound))
else:
raise TypeError("{} should be a single number or a list/tuple with lenght 2.".format(name))
# if value is 0 or (1., 1.) for brightness/contrast/saturation
# or (0., 0.) for hue, do nothing
if value[0] == value[1] == center:
value = None
return value
@staticmethod
def get_params(brightness: Optional[List[float]],
contrast: Optional[List[float]],
saturation: Optional[List[float]],
hue: Optional[List[float]]
) -> Tuple[Tensor, Optional[float], Optional[float], Optional[float], Optional[float]]:
"""Get the parameters for the randomized transform to be applied on image.
Args:
brightness (tuple of float (min, max), optional): The range from which the brightness_factor is chosen
uniformly. Pass None to turn off the transformation.
contrast (tuple of float (min, max), optional): The range from which the contrast_factor is chosen
uniformly. Pass None to turn off the transformation.
saturation (tuple of float (min, max), optional): The range from which the saturation_factor is chosen
uniformly. Pass None to turn off the transformation.
hue (tuple of float (min, max), optional): The range from which the hue_factor is chosen uniformly.
Pass None to turn off the transformation.
Returns:
tuple: The parameters used to apply the randomized transform
along with their random order.
"""
fn_idx = torch.randperm(4)
b = None if brightness is None else float(torch.empty(1).uniform_(brightness[0], brightness[1]))
c = None if contrast is None else float(torch.empty(1).uniform_(contrast[0], contrast[1]))
s = None if saturation is None else float(torch.empty(1).uniform_(saturation[0], saturation[1]))
h = None if hue is None else float(torch.empty(1).uniform_(hue[0], hue[1]))
return fn_idx, b, c, s, h
def forward(self, img):
"""
Args:
img (PIL Image or Tensor): Input image.
Returns:
PIL Image or Tensor: Color jittered image.
"""
fn_idx, brightness_factor, contrast_factor, saturation_factor, hue_factor = \
self.get_params(self.brightness, self.contrast, self.saturation, self.hue)
for fn_id in fn_idx:
if fn_id == 0 and brightness_factor is not None:
img = F.adjust_brightness(img, brightness_factor)
elif fn_id == 1 and contrast_factor is not None:
img = F.adjust_contrast(img, contrast_factor)
elif fn_id == 2 and saturation_factor is not None:
img = F.adjust_saturation(img, saturation_factor)
elif fn_id == 3 and hue_factor is not None:
img = F.adjust_hue(img, hue_factor)
return img
def __repr__(self):
format_string = self.__class__.__name__ + '('
format_string += 'brightness={0}'.format(self.brightness)
format_string += ', contrast={0}'.format(self.contrast)
format_string += ', saturation={0}'.format(self.saturation)
format_string += ', hue={0})'.format(self.hue)
return format_string
class RandomRotation(torch.nn.Module):
"""Rotate the image by angle.
If the image is torch Tensor, it is expected
to have [..., H, W] shape, where ... means an arbitrary number of leading dimensions.
Args:
degrees (sequence or number): Range of degrees to select from.
If degrees is a number instead of sequence like (min, max), the range of degrees
will be (-degrees, +degrees).
interpolation (InterpolationMode): Desired interpolation enum defined by
:class:`torchvision.transforms.InterpolationMode`. Default is ``InterpolationMode.NEAREST``.
If input is Tensor, only ``InterpolationMode.NEAREST``, ``InterpolationMode.BILINEAR`` are supported.
For backward compatibility integer values (e.g. ``PIL.Image.NEAREST``) are still acceptable.
expand (bool, optional): Optional expansion flag.
If true, expands the output to make it large enough to hold the entire rotated image.
If false or omitted, make the output image the same size as the input image.
Note that the expand flag assumes rotation around the center and no translation.
center (sequence, optional): Optional center of rotation, (x, y). Origin is the upper left corner.
Default is the center of the image.
fill (sequence or number, optional): Pixel fill value for the area outside the rotated
image. If given a number, the value is used for all bands respectively.
If input is PIL Image, the options is only available for ``Pillow>=5.2.0``.
resample (int, optional): deprecated argument and will be removed since v0.10.0.
Please use `arg`:interpolation: instead.
.. _filters: https://pillow.readthedocs.io/en/latest/handbook/concepts.html#filters
"""
def __init__(
self, degrees, interpolation=InterpolationMode.NEAREST, expand=False, center=None, fill=None, resample=None
):
super().__init__()
if resample is not None:
warnings.warn(
"Argument resample is deprecated and will be removed since v0.10.0. Please, use interpolation instead"
)
interpolation = _interpolation_modes_from_int(resample)
# Backward compatibility with integer value
if isinstance(interpolation, int):
warnings.warn(
"Argument interpolation should be of type InterpolationMode instead of int. "
"Please, use InterpolationMode enum."
)
interpolation = _interpolation_modes_from_int(interpolation)
self.degrees = _setup_angle(degrees, name="degrees", req_sizes=(2, ))
if center is not None:
_check_sequence_input(center, "center", req_sizes=(2, ))
self.center = center
self.resample = self.interpolation = interpolation
self.expand = expand
self.fill = fill
@staticmethod
def get_params(degrees: List[float]) -> float:
"""Get parameters for ``rotate`` for a random rotation.
Returns:
float: angle parameter to be passed to ``rotate`` for random rotation.
"""
angle = float(torch.empty(1).uniform_(float(degrees[0]), float(degrees[1])).item())
return angle
def forward(self, img):
"""
Args:
img (PIL Image or Tensor): Image to be rotated.
Returns:
PIL Image or Tensor: Rotated image.
"""
fill = self.fill
if isinstance(img, Tensor):
if isinstance(fill, (int, float)):
fill = [float(fill)] * F._get_image_num_channels(img)
else:
fill = [float(f) for f in fill]
angle = self.get_params(self.degrees)
return F.rotate(img, angle, self.resample, self.expand, self.center, fill)
def __repr__(self):
interpolate_str = self.interpolation.value
format_string = self.__class__.__name__ + '(degrees={0}'.format(self.degrees)
format_string += ', interpolation={0}'.format(interpolate_str)
format_string += ', expand={0}'.format(self.expand)
if self.center is not None:
format_string += ', center={0}'.format(self.center)
if self.fill is not None:
format_string += ', fill={0}'.format(self.fill)
format_string += ')'
return format_string
class RandomAffine(torch.nn.Module):
"""Random affine transformation of the image keeping center invariant.
If the image is torch Tensor, it is expected
to have [..., H, W] shape, where ... means an arbitrary number of leading dimensions.
Args:
degrees (sequence or number): Range of degrees to select from.
If degrees is a number instead of sequence like (min, max), the range of degrees
will be (-degrees, +degrees). Set to 0 to deactivate rotations.
translate (tuple, optional): tuple of maximum absolute fraction for horizontal
and vertical translations. For example translate=(a, b), then horizontal shift
is randomly sampled in the range -img_width * a < dx < img_width * a and vertical shift is
randomly sampled in the range -img_height * b < dy < img_height * b. Will not translate by default.
scale (tuple, optional): scaling factor interval, e.g (a, b), then scale is
randomly sampled from the range a <= scale <= b. Will keep original scale by default.
shear (sequence or number, optional): Range of degrees to select from.
If shear is a number, a shear parallel to the x axis in the range (-shear, +shear)
will be applied. Else if shear is a sequence of 2 values a shear parallel to the x axis in the
range (shear[0], shear[1]) will be applied. Else if shear is a sequence of 4 values,
a x-axis shear in (shear[0], shear[1]) and y-axis shear in (shear[2], shear[3]) will be applied.
Will not apply shear by default.
interpolation (InterpolationMode): Desired interpolation enum defined by
:class:`torchvision.transforms.InterpolationMode`. Default is ``InterpolationMode.NEAREST``.
If input is Tensor, only ``InterpolationMode.NEAREST``, ``InterpolationMode.BILINEAR`` are supported.
For backward compatibility integer values (e.g. ``PIL.Image.NEAREST``) are still acceptable.
fill (sequence or number, optional): Pixel fill value for the area outside the transformed
image. If given a number, the value is used for all bands respectively.
If input is PIL Image, the options is only available for ``Pillow>=5.0.0``.
fillcolor (sequence or number, optional): deprecated argument and will be removed since v0.10.0.
Please use `arg`:fill: instead.
resample (int, optional): deprecated argument and will be removed since v0.10.0.
Please use `arg`:interpolation: instead.
.. _filters: https://pillow.readthedocs.io/en/latest/handbook/concepts.html#filters
"""
def __init__(
self, degrees, translate=None, scale=None, shear=None, interpolation=InterpolationMode.NEAREST, fill=0,
fillcolor=None, resample=None
):
super().__init__()
if resample is not None:
warnings.warn(
"Argument resample is deprecated and will be removed since v0.10.0. Please, use interpolation instead"
)
interpolation = _interpolation_modes_from_int(resample)
# Backward compatibility with integer value
if isinstance(interpolation, int):
warnings.warn(
"Argument interpolation should be of type InterpolationMode instead of int. "
"Please, use InterpolationMode enum."
)
interpolation = _interpolation_modes_from_int(interpolation)
if fillcolor is not None:
warnings.warn(
"Argument fillcolor is deprecated and will be removed since v0.10.0. Please, use fill instead"
)
fill = fillcolor
self.degrees = _setup_angle(degrees, name="degrees", req_sizes=(2, ))
if translate is not None:
_check_sequence_input(translate, "translate", req_sizes=(2, ))
for t in translate:
if not (0.0 <= t <= 1.0):
raise ValueError("translation values should be between 0 and 1")
self.translate = translate
if scale is not None:
_check_sequence_input(scale, "scale", req_sizes=(2, ))
for s in scale:
if s <= 0:
raise ValueError("scale values should be positive")
self.scale = scale
if shear is not None:
self.shear = _setup_angle(shear, name="shear", req_sizes=(2, 4))
else:
self.shear = shear
self.resample = self.interpolation = interpolation
self.fillcolor = self.fill = fill
@staticmethod
def get_params(
degrees: List[float],
translate: Optional[List[float]],
scale_ranges: Optional[List[float]],
shears: Optional[List[float]],
img_size: List[int]
) -> Tuple[float, Tuple[int, int], float, Tuple[float, float]]:
"""Get parameters for affine transformation
Returns:
params to be passed to the affine transformation
"""
angle = float(torch.empty(1).uniform_(float(degrees[0]), float(degrees[1])).item())
if translate is not None:
max_dx = float(translate[0] * img_size[0])
max_dy = float(translate[1] * img_size[1])
tx = int(round(torch.empty(1).uniform_(-max_dx, max_dx).item()))
ty = int(round(torch.empty(1).uniform_(-max_dy, max_dy).item()))
translations = (tx, ty)
else:
translations = (0, 0)
if scale_ranges is not None:
scale = float(torch.empty(1).uniform_(scale_ranges[0], scale_ranges[1]).item())
else:
scale = 1.0
shear_x = shear_y = 0.0
if shears is not None:
shear_x = float(torch.empty(1).uniform_(shears[0], shears[1]).item())
if len(shears) == 4:
shear_y = float(torch.empty(1).uniform_(shears[2], shears[3]).item())
shear = (shear_x, shear_y)
return angle, translations, scale, shear
def forward(self, img):
"""
img (PIL Image or Tensor): Image to be transformed.
Returns:
PIL Image or Tensor: Affine transformed image.
"""
fill = self.fill
if isinstance(img, Tensor):
if isinstance(fill, (int, float)):
fill = [float(fill)] * F._get_image_num_channels(img)
else:
fill = [float(f) for f in fill]
img_size = F._get_image_size(img)
ret = self.get_params(self.degrees, self.translate, self.scale, self.shear, img_size)
return F.affine(img, *ret, interpolation=self.interpolation, fill=fill)
def __repr__(self):
s = '{name}(degrees={degrees}'
if self.translate is not None:
s += ', translate={translate}'
if self.scale is not None:
s += ', scale={scale}'
if self.shear is not None:
s += ', shear={shear}'
if self.interpolation != InterpolationMode.NEAREST:
s += ', interpolation={interpolation}'
if self.fill != 0:
s += ', fill={fill}'
s += ')'
d = dict(self.__dict__)
d['interpolation'] = self.interpolation.value
return s.format(name=self.__class__.__name__, **d)
class Grayscale(torch.nn.Module):
"""Convert image to grayscale.
If the image is torch Tensor, it is expected
to have [..., 3, H, W] shape, where ... means an arbitrary number of leading dimensions
Args:
num_output_channels (int): (1 or 3) number of channels desired for output image
Returns:
PIL Image: Grayscale version of the input.
- If ``num_output_channels == 1`` : returned image is single channel
- If ``num_output_channels == 3`` : returned image is 3 channel with r == g == b
"""
def __init__(self, num_output_channels=1):
super().__init__()
self.num_output_channels = num_output_channels
def forward(self, img):
"""
Args:
img (PIL Image or Tensor): Image to be converted to grayscale.
Returns:
PIL Image or Tensor: Grayscaled image.
"""
return F.rgb_to_grayscale(img, num_output_channels=self.num_output_channels)
def __repr__(self):
return self.__class__.__name__ + '(num_output_channels={0})'.format(self.num_output_channels)
class RandomGrayscale(torch.nn.Module):
"""Randomly convert image to grayscale with a probability of p (default 0.1).
If the image is torch Tensor, it is expected
to have [..., 3, H, W] shape, where ... means an arbitrary number of leading dimensions
Args:
p (float): probability that image should be converted to grayscale.
Returns:
PIL Image or Tensor: Grayscale version of the input image with probability p and unchanged
with probability (1-p).
- If input image is 1 channel: grayscale version is 1 channel
- If input image is 3 channel: grayscale version is 3 channel with r == g == b
"""
def __init__(self, p=0.1):
super().__init__()
self.p = p
def forward(self, img):
"""
Args:
img (PIL Image or Tensor): Image to be converted to grayscale.
Returns:
PIL Image or Tensor: Randomly grayscaled image.
"""
num_output_channels = F._get_image_num_channels(img)
if torch.rand(1) < self.p:
return F.rgb_to_grayscale(img, num_output_channels=num_output_channels)
return img
def __repr__(self):
return self.__class__.__name__ + '(p={0})'.format(self.p)
class RandomErasing(torch.nn.Module):
""" Randomly selects a rectangle region in an torch Tensor image and erases its pixels.
This transform does not support PIL Image.
'Random Erasing Data Augmentation' by Zhong et al. See https://arxiv.org/abs/1708.04896
Args:
p: probability that the random erasing operation will be performed.
scale: range of proportion of erased area against input image.
ratio: range of aspect ratio of erased area.
value: erasing value. Default is 0. If a single int, it is used to
erase all pixels. If a tuple of length 3, it is used to erase
R, G, B channels respectively.
If a str of 'random', erasing each pixel with random values.
inplace: boolean to make this transform inplace. Default set to False.
Returns:
Erased Image.
Example:
>>> transform = transforms.Compose([
>>> transforms.RandomHorizontalFlip(),
>>> transforms.ToTensor(),
>>> transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)),
>>> transforms.RandomErasing(),
>>> ])
"""
def __init__(self, p=0.5, scale=(0.02, 0.33), ratio=(0.3, 3.3), value=0, inplace=False):
super().__init__()
if not isinstance(value, (numbers.Number, str, tuple, list)):
raise TypeError("Argument value should be either a number or str or a sequence")
if isinstance(value, str) and value != "random":
raise ValueError("If value is str, it should be 'random'")
if not isinstance(scale, (tuple, list)):
raise TypeError("Scale should be a sequence")
if not isinstance(ratio, (tuple, list)):
raise TypeError("Ratio should be a sequence")
if (scale[0] > scale[1]) or (ratio[0] > ratio[1]):
warnings.warn("Scale and ratio should be of kind (min, max)")
if scale[0] < 0 or scale[1] > 1:
raise ValueError("Scale should be between 0 and 1")
if p < 0 or p > 1:
raise ValueError("Random erasing probability should be between 0 and 1")
self.p = p
self.scale = scale
self.ratio = ratio
self.value = value
self.inplace = inplace
@staticmethod
def get_params(
img: Tensor, scale: Tuple[float, float], ratio: Tuple[float, float], value: Optional[List[float]] = None
) -> Tuple[int, int, int, int, Tensor]:
"""Get parameters for ``erase`` for a random erasing.
Args:
img (Tensor): Tensor image to be erased.
scale (sequence): range of proportion of erased area against input image.
ratio (sequence): range of aspect ratio of erased area.
value (list, optional): erasing value. If None, it is interpreted as "random"
(erasing each pixel with random values). If ``len(value)`` is 1, it is interpreted as a number,
i.e. ``value[0]``.
Returns:
tuple: params (i, j, h, w, v) to be passed to ``erase`` for random erasing.
"""
img_c, img_h, img_w = img.shape[-3], img.shape[-2], img.shape[-1]
area = img_h * img_w
for _ in range(10):
erase_area = area * torch.empty(1).uniform_(scale[0], scale[1]).item()
aspect_ratio = torch.empty(1).uniform_(ratio[0], ratio[1]).item()
h = int(round(math.sqrt(erase_area * aspect_ratio)))
w = int(round(math.sqrt(erase_area / aspect_ratio)))
if not (h < img_h and w < img_w):
continue
if value is None:
v = torch.empty([img_c, h, w], dtype=torch.float32).normal_()
else:
v = torch.tensor(value)[:, None, None]
i = torch.randint(0, img_h - h + 1, size=(1, )).item()
j = torch.randint(0, img_w - w + 1, size=(1, )).item()
return i, j, h, w, v
# Return original image
return 0, 0, img_h, img_w, img
def forward(self, img):
"""
Args:
img (Tensor): Tensor image to be erased.
Returns:
img (Tensor): Erased Tensor image.
"""
if torch.rand(1) < self.p:
# cast self.value to script acceptable type
if isinstance(self.value, (int, float)):
value = [self.value, ]
elif isinstance(self.value, str):
value = None
elif isinstance(self.value, tuple):
value = list(self.value)
else:
value = self.value
if value is not None and not (len(value) in (1, img.shape[-3])):
raise ValueError(
"If value is a sequence, it should have either a single value or "
"{} (number of input channels)".format(img.shape[-3])
)
x, y, h, w, v = self.get_params(img, scale=self.scale, ratio=self.ratio, value=value)
return F.erase(img, x, y, h, w, v, self.inplace)
return img
class GaussianBlur(torch.nn.Module):
"""Blurs image with randomly chosen Gaussian blur.
If the image is torch Tensor, it is expected
to have [..., C, H, W] shape, where ... means an arbitrary number of leading dimensions.
Args:
kernel_size (int or sequence): Size of the Gaussian kernel.
sigma (float or tuple of float (min, max)): Standard deviation to be used for
creating kernel to perform blurring. If float, sigma is fixed. If it is tuple
of float (min, max), sigma is chosen uniformly at random to lie in the
given range.
Returns:
PIL Image or Tensor: Gaussian blurred version of the input image.
"""
def __init__(self, kernel_size, sigma=(0.1, 2.0)):
super().__init__()
self.kernel_size = _setup_size(kernel_size, "Kernel size should be a tuple/list of two integers")
for ks in self.kernel_size:
if ks <= 0 or ks % 2 == 0:
raise ValueError("Kernel size value should be an odd and positive number.")
if isinstance(sigma, numbers.Number):
if sigma <= 0:
raise ValueError("If sigma is a single number, it must be positive.")
sigma = (sigma, sigma)
elif isinstance(sigma, Sequence) and len(sigma) == 2:
if not 0. < sigma[0] <= sigma[1]:
raise ValueError("sigma values should be positive and of the form (min, max).")
else:
raise ValueError("sigma should be a single number or a list/tuple with length 2.")
self.sigma = sigma
@staticmethod
def get_params(sigma_min: float, sigma_max: float) -> float:
"""Choose sigma for random gaussian blurring.
Args:
sigma_min (float): Minimum standard deviation that can be chosen for blurring kernel.
sigma_max (float): Maximum standard deviation that can be chosen for blurring kernel.
Returns:
float: Standard deviation to be passed to calculate kernel for gaussian blurring.
"""
return torch.empty(1).uniform_(sigma_min, sigma_max).item()
def forward(self, img: Tensor) -> Tensor:
"""
Args:
img (PIL Image or Tensor): image to be blurred.
Returns:
PIL Image or Tensor: Gaussian blurred image
"""
sigma = self.get_params(self.sigma[0], self.sigma[1])
return F.gaussian_blur(img, self.kernel_size, [sigma, sigma])
def __repr__(self):
s = '(kernel_size={}, '.format(self.kernel_size)
s += 'sigma={})'.format(self.sigma)
return self.__class__.__name__ + s
def _setup_size(size, error_msg):
if isinstance(size, numbers.Number):
return int(size), int(size)
if isinstance(size, Sequence) and len(size) == 1:
return size[0], size[0]
if len(size) != 2:
raise ValueError(error_msg)
return size
def _check_sequence_input(x, name, req_sizes):
msg = req_sizes[0] if len(req_sizes) < 2 else " or ".join([str(s) for s in req_sizes])
if not isinstance(x, Sequence):
raise TypeError("{} should be a sequence of length {}.".format(name, msg))
if len(x) not in req_sizes:
raise ValueError("{} should be sequence of length {}.".format(name, msg))
def _setup_angle(x, name, req_sizes=(2, )):
if isinstance(x, numbers.Number):
if x < 0:
raise ValueError("If {} is a single number, it must be positive.".format(name))
x = [-x, x]
else:
_check_sequence_input(x, name, req_sizes)
return [float(d) for d in x]
class RandomInvert(torch.nn.Module):
"""Inverts the colors of the given image randomly with a given probability.
If img is a Tensor, it is expected to be in [..., 1 or 3, H, W] format,
where ... means it can have an arbitrary number of leading dimensions.
If img is PIL Image, it is expected to be in mode "L" or "RGB".
Args:
p (float): probability of the image being color inverted. Default value is 0.5
"""
def __init__(self, p=0.5):
super().__init__()
self.p = p
def forward(self, img):
"""
Args:
img (PIL Image or Tensor): Image to be inverted.
Returns:
PIL Image or Tensor: Randomly color inverted image.
"""
if torch.rand(1).item() < self.p:
return F.invert(img)
return img
def __repr__(self):
return self.__class__.__name__ + '(p={})'.format(self.p)
class RandomPosterize(torch.nn.Module):
"""Posterize the image randomly with a given probability by reducing the
number of bits for each color channel. If the image is torch Tensor, it should be of type torch.uint8,
and it is expected to have [..., 1 or 3, H, W] shape, where ... means an arbitrary number of leading dimensions.
If img is PIL Image, it is expected to be in mode "L" or "RGB".
Args:
bits (int): number of bits to keep for each channel (0-8)
p (float): probability of the image being color inverted. Default value is 0.5
"""
def __init__(self, bits, p=0.5):
super().__init__()
self.bits = bits
self.p = p
def forward(self, img):
"""
Args:
img (PIL Image or Tensor): Image to be posterized.
Returns:
PIL Image or Tensor: Randomly posterized image.
"""
if torch.rand(1).item() < self.p:
return F.posterize(img, self.bits)
return img
def __repr__(self):
return self.__class__.__name__ + '(bits={},p={})'.format(self.bits, self.p)
class RandomSolarize(torch.nn.Module):
"""Solarize the image randomly with a given probability by inverting all pixel
values above a threshold. If img is a Tensor, it is expected to be in [..., 1 or 3, H, W] format,
where ... means it can have an arbitrary number of leading dimensions.
If img is PIL Image, it is expected to be in mode "L" or "RGB".
Args:
threshold (float): all pixels equal or above this value are inverted.
p (float): probability of the image being color inverted. Default value is 0.5
"""
def __init__(self, threshold, p=0.5):
super().__init__()
self.threshold = threshold
self.p = p
def forward(self, img):
"""
Args:
img (PIL Image or Tensor): Image to be solarized.
Returns:
PIL Image or Tensor: Randomly solarized image.
"""
if torch.rand(1).item() < self.p:
return F.solarize(img, self.threshold)
return img
def __repr__(self):
return self.__class__.__name__ + '(threshold={},p={})'.format(self.threshold, self.p)
class RandomAdjustSharpness(torch.nn.Module):
"""Adjust the sharpness of the image randomly with a given probability. If the image is torch Tensor,
it is expected to have [..., 1 or 3, H, W] shape, where ... means an arbitrary number of leading dimensions.
Args:
sharpness_factor (float): How much to adjust the sharpness. Can be
any non negative number. 0 gives a blurred image, 1 gives the
original image while 2 increases the sharpness by a factor of 2.
p (float): probability of the image being color inverted. Default value is 0.5
"""
def __init__(self, sharpness_factor, p=0.5):
super().__init__()
self.sharpness_factor = sharpness_factor
self.p = p
def forward(self, img):
"""
Args:
img (PIL Image or Tensor): Image to be sharpened.
Returns:
PIL Image or Tensor: Randomly sharpened image.
"""
if torch.rand(1).item() < self.p:
return F.adjust_sharpness(img, self.sharpness_factor)
return img
def __repr__(self):
return self.__class__.__name__ + '(sharpness_factor={},p={})'.format(self.sharpness_factor, self.p)
class RandomAutocontrast(torch.nn.Module):
"""Autocontrast the pixels of the given image randomly with a given probability.
If the image is torch Tensor, it is expected
to have [..., 1 or 3, H, W] shape, where ... means an arbitrary number of leading dimensions.
If img is PIL Image, it is expected to be in mode "L" or "RGB".
Args:
p (float): probability of the image being autocontrasted. Default value is 0.5
"""
def __init__(self, p=0.5):
super().__init__()
self.p = p
def forward(self, img):
"""
Args:
img (PIL Image or Tensor): Image to be autocontrasted.
Returns:
PIL Image or Tensor: Randomly autocontrasted image.
"""
if torch.rand(1).item() < self.p:
return F.autocontrast(img)
return img
def __repr__(self):
return self.__class__.__name__ + '(p={})'.format(self.p)
class RandomEqualize(torch.nn.Module):
"""Equalize the histogram of the given image randomly with a given probability.
If the image is torch Tensor, it is expected
to have [..., 1 or 3, H, W] shape, where ... means an arbitrary number of leading dimensions.
If img is PIL Image, it is expected to be in mode "P", "L" or "RGB".
Args:
p (float): probability of the image being equalized. Default value is 0.5
"""
def __init__(self, p=0.5):
super().__init__()
self.p = p
def forward(self, img):
"""
Args:
img (PIL Image or Tensor): Image to be equalized.
Returns:
PIL Image or Tensor: Randomly equalized image.
"""
if torch.rand(1).item() < self.p:
return F.equalize(img)
return img
def __repr__(self):
return self.__class__.__name__ + '(p={})'.format(self.p)
|
the-stack_0_730 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2018/10/04 14:23
# @Author : Iydon
# @File : course3.5.py
import numpy as np
from Poly import *
import matplotlib.pyplot as plt
def natural_cubic_spline(xs:list, fxs:list, display:bool=False):
"""
Cubic spline interpolation.
"""
n = len(xs)
hs = [xs[i+1]-xs[i] for i in range(n-1)]
A = np.diag([0]+hs[1:],1) + np.diag(hs[:-1]+[0],-1)
A += np.diag([1]+[2*(hs[i+1]+hs[i]) for i in range(n-2)]+[1])
bs = [0]+[3/hs[i+1]*(fxs[i+2]-fxs[i+1])-3/hs[i]*(fxs[i+1]-fxs[i]) for i in range(n-2)]+[0]
# a, b, c, d: end with 'x'.
cx = [i[0] for i in (np.linalg.inv(A) * np.matrix(bs).transpose()).tolist()]
bx = [1/hs[i]*(fxs[i+1]-fxs[i])-hs[i]/3*(2*cx[i]+cx[i+1]) for i in range(n-1)]
dx = [1/3/hs[i]*(cx[i+1]-cx[i]) for i in range(n-1)]
# S_i(x)
Ss = [fxs[i]+bx[i]*Poly([1,-xs[i]])+cx[i]*Poly([1,-xs[i]])**2+dx[i]*Poly([1,-xs[i]])**3 for i in range(n-1)]
if display: print(fxs, bx, cx, dx, sep="\n\n\n")
return Ss
def clamped_cubic_spline(xs:list, fxs:list, boundray:list=[0,0]):
"""
Cubic spline interpolation.
"""
n = len(xs)
hs = [xs[i+1]-xs[i] for i in range(n-1)]
A = np.diag(hs,1) + np.diag(hs,-1)
A += np.diag([2*hs[0]]+[2*(hs[i+1]+hs[i]) for i in range(n-2)]+[2*hs[-1]])
head = [3/hs[0]*(fxs[1]-fxs[0]) - 3*boundray[0]]
tail = [3*boundray[-1] - 3/hs[-1]*(fxs[-1]-fxs[-2])]
bs = head+[3/hs[i+1]*(fxs[i+2]-fxs[i+1])-3/hs[i]*(fxs[i+1]-fxs[i]) for i in range(n-2)]+tail
# a, b, c, d: end with 'x'.
cx = [i[0] for i in (np.linalg.inv(A) * np.matrix(bs).transpose()).tolist()]
bx = [1/hs[i]*(fxs[i+1]-fxs[i])-hs[i]/3*(2*cx[i]+cx[i+1]) for i in range(n-1)]
dx = [1/3/hs[i]*(cx[i+1]-cx[i]) for i in range(n-1)]
# S_i(x)
Ss = [fxs[i]+bx[i]*Poly([1,-xs[i]])+cx[i]*Poly([1,-xs[i]])**2+dx[i]*Poly([1,-xs[i]])**3 for i in range(n-1)]
return Ss
def cubic_spline_lambdify(S:str, xs:list):
"""
Lambdify the cubic spline function.
"""
f = ["%s[%d].lambdify()(x)*(%s<=x<%s)"%(S, i, xs[i], xs[i+1]) for i in range(len(xs)-1)]
return eval("lambda x: %s"%"+".join(f))
xs = [0.9,1.3,1.9,2.1,2.6,3.0,3.9,4.4,4.7,5.0,6.0,7.0,8.0,9.2,10.5,11.3,11.6,12.0,12.6,13.0,13.3]
fxs = [1.3,1.5,1.85,2.1,2.6,2.7,2.4,2.15,2.05,2.1,2.25,2.3,2.25,1.95,1.4,0.9,0.7,0.6,0.5,0.4,0.25]
S = natural_cubic_spline(xs, fxs)
f = cubic_spline_lambdify("S", xs)
plt.plot(xs, fxs, marker="*", color="orange")
x = np.linspace(0.9, 13.29, 100)
y = [f(x) for x in x]
plt.plot(x, y, color="blue")
plt.axis("equal")
plt.grid()
plt.show()
|
the-stack_0_732 | import sys
import os.path as op
rpws_folder = op.dirname(op.dirname(__file__))
sys.path.append(rpws_folder)
print('sys.path + {}'.format(rpws_folder))
from rpws import RevitServer
import testconfig as config
rs = RevitServer(config.test_server_name, config.test_server_version)
for parent, folders, files, models in rs.walk(config.test_folder):
print(parent)
for fd in folders:
print('\t@d {}'.format(fd.path))
for f in files:
print('\t@f {}'.format(f.path))
for m in models:
print('\t@m {}'.format(m.path))
|
the-stack_0_740 | from typing import Sequence, Union, Optional, Callable, Dict, Any, Tuple
import torch
from ignite.engine.engine import Engine
from ignite.engine.events import State, Events, EventEnum, CallableEventWithFilter
from ignite.utils import convert_tensor
from ignite.metrics import Metric
__all__ = [
"State",
"create_supervised_trainer",
"create_supervised_evaluator",
"Engine",
"Events",
"EventEnum",
"CallableEventWithFilter",
]
def _prepare_batch(
batch: Sequence[torch.Tensor], device: Optional[Union[str, torch.device]] = None, non_blocking: bool = False
):
"""Prepare batch for training: pass to a device with options.
"""
x, y = batch
return (
convert_tensor(x, device=device, non_blocking=non_blocking),
convert_tensor(y, device=device, non_blocking=non_blocking),
)
def create_supervised_trainer(
model: torch.nn.Module,
optimizer: torch.optim.Optimizer,
loss_fn: Union[Callable, torch.nn.Module],
device: Optional[Union[str, torch.device]] = None,
non_blocking: bool = False,
prepare_batch: Callable = _prepare_batch,
output_transform: Callable = lambda x, y, y_pred, loss: loss.item(),
) -> Engine:
"""
Factory function for creating a trainer for supervised models.
Args:
model (`torch.nn.Module`): the model to train.
optimizer (`torch.optim.Optimizer`): the optimizer to use.
loss_fn (torch.nn loss function): the loss function to use.
device (str, optional): device type specification (default: None).
Applies to batches after starting the engine. Model *will not* be moved.
Device can be CPU, GPU or TPU.
non_blocking (bool, optional): if True and this copy is between CPU and GPU, the copy may occur asynchronously
with respect to the host. For other cases, this argument has no effect.
prepare_batch (callable, optional): function that receives `batch`, `device`, `non_blocking` and outputs
tuple of tensors `(batch_x, batch_y)`.
output_transform (callable, optional): function that receives 'x', 'y', 'y_pred', 'loss' and returns value
to be assigned to engine's state.output after each iteration. Default is returning `loss.item()`.
Note:
`engine.state.output` for this engine is defind by `output_transform` parameter and is the loss
of the processed batch by default.
.. warning::
The internal use of `device` has changed.
`device` will now *only* be used to move the input data to the correct device.
The `model` should be moved by the user before creating an optimizer.
For more information see:
* `PyTorch Documentation <https://pytorch.org/docs/stable/optim.html#constructing-it>`_
* `PyTorch's Explanation <https://github.com/pytorch/pytorch/issues/7844#issuecomment-503713840>`_
Returns:
Engine: a trainer engine with supervised update function.
"""
device_type = device.type if isinstance(device, torch.device) else device
on_tpu = "xla" in device_type if device_type is not None else False
if on_tpu:
try:
import torch_xla.core.xla_model as xm
except ImportError:
raise RuntimeError("In order to run on TPU, please install PyTorch XLA")
def _update(engine: Engine, batch: Sequence[torch.Tensor]) -> Union[Any, Tuple[torch.Tensor]]:
model.train()
optimizer.zero_grad()
x, y = prepare_batch(batch, device=device, non_blocking=non_blocking)
y_pred = model(x)
loss = loss_fn(y_pred, y)
loss.backward()
if on_tpu:
xm.optimizer_step(optimizer, barrier=True)
else:
optimizer.step()
return output_transform(x, y, y_pred, loss)
trainer = Engine(_update)
return trainer
def create_supervised_evaluator(
model: torch.nn.Module,
metrics: Optional[Dict[str, Metric]] = None,
device: Optional[Union[str, torch.device]] = None,
non_blocking: bool = False,
prepare_batch: Callable = _prepare_batch,
output_transform: Callable = lambda x, y, y_pred: (y_pred, y),
) -> Engine:
"""
Factory function for creating an evaluator for supervised models.
Args:
model (`torch.nn.Module`): the model to train.
metrics (dict of str - :class:`~ignite.metrics.Metric`): a map of metric names to Metrics.
device (str, optional): device type specification (default: None).
Applies to batches after starting the engine. Model *will not* be moved.
non_blocking (bool, optional): if True and this copy is between CPU and GPU, the copy may occur asynchronously
with respect to the host. For other cases, this argument has no effect.
prepare_batch (callable, optional): function that receives `batch`, `device`, `non_blocking` and outputs
tuple of tensors `(batch_x, batch_y)`.
output_transform (callable, optional): function that receives 'x', 'y', 'y_pred' and returns value
to be assigned to engine's state.output after each iteration. Default is returning `(y_pred, y,)` which fits
output expected by metrics. If you change it you should use `output_transform` in metrics.
Note:
`engine.state.output` for this engine is defind by `output_transform` parameter and is
a tuple of `(batch_pred, batch_y)` by default.
.. warning::
The internal use of `device` has changed.
`device` will now *only* be used to move the input data to the correct device.
The `model` should be moved by the user before creating an optimizer.
For more information see:
* `PyTorch Documentation <https://pytorch.org/docs/stable/optim.html#constructing-it>`_
* `PyTorch's Explanation <https://github.com/pytorch/pytorch/issues/7844#issuecomment-503713840>`_
Returns:
Engine: an evaluator engine with supervised inference function.
"""
metrics = metrics or {}
def _inference(engine: Engine, batch: Sequence[torch.Tensor]) -> Union[Any, Tuple[torch.Tensor]]:
model.eval()
with torch.no_grad():
x, y = prepare_batch(batch, device=device, non_blocking=non_blocking)
y_pred = model(x)
return output_transform(x, y, y_pred)
evaluator = Engine(_inference)
for name, metric in metrics.items():
metric.attach(evaluator, name)
return evaluator
|
the-stack_0_741 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from cleave import server
class MyServer(server.BaseServer):
"""
My HTTP Server
"""
def client_handler(self, client):
"""
Handles a client connection
:param client: server.BaseClient
:return: None
"""
client.send('HTTP/1.1 200 OK\n')
client.send('Content-Type: text/html; charset=utf-8\n\n')
client.send('<h1>Hello world</h1>')
client.send('<p><strong>My Address:</strong></p>')
client.send('<pre>{}:{}</pre>'.format(client.addr[0], client.addr[1]))
client.send('<p><strong>Request body:</strong></p>')
client.send('<pre>{}</pre>'.format(client.message))
client.send('<hr /><small>By Cleave Server 0.13 Beta</small>')
if __name__ == '__main__':
MyServer(port=80) |
the-stack_0_744 | from unittest import TestCase
from unittest.mock import patch
from pathlib import Path
from click.testing import CliRunner
from ..management.commands import bump_changelog
from hourglass import changelog
from hourglass.tests.test_changelog import UtilTests
def patch_new_version(version):
return patch.object(bump_changelog, '__version__', version)
def patch_changelog_contents(contents):
return patch.object(changelog, 'get_contents', lambda: contents)
class BumpChangelogTests(TestCase):
@patch_new_version('9.0.0')
@patch_changelog_contents(UtilTests.AFTER_BUMP)
def test_it_reports_error_on_no_release_notes(self):
result = CliRunner().invoke(bump_changelog.command)
self.assertIn('The new release has no release notes', result.output)
self.assertNotEqual(result.exit_code, 0)
@patch_new_version('0.0.1')
@patch_changelog_contents(UtilTests.BEFORE_BUMP)
def test_it_reports_error_if_new_version_is_invalid(self):
result = CliRunner().invoke(bump_changelog.command)
self.assertIn('Please change hourglass/version.py', result.output)
self.assertNotEqual(result.exit_code, 0)
@patch_new_version('9.0.0')
@patch_changelog_contents(UtilTests.BEFORE_BUMP)
def test_it_works(self):
runner = CliRunner()
with runner.isolated_filesystem():
fakelog = Path('fake-changelog.md')
with patch.object(changelog, 'PATH', fakelog):
result = CliRunner().invoke(bump_changelog.command)
self.assertIn('Modifying CHANGELOG.md', result.output)
self.assertEqual(result.exit_code, 0)
with fakelog.open('r', encoding=changelog.ENCODING) as f:
self.assertIn('9.0.0', f.read())
tagmsg = Path('tag-message-v9.0.0.txt')
with tagmsg.open('r', encoding=changelog.ENCODING) as f:
self.assertIn('Fixed some stuff', f.read())
del UtilTests # So our test runner doesn't find and run them.
|
the-stack_0_745 | import numpy as np
def vertex_voronoi(mesh):
"""
compute vertex voronoi of a mesh as described in
Meyer, M., Desbrun, M., Schroder, P., Barr, A. (2002).
Discrete differential geometry operators for triangulated 2manifolds.
Visualization and Mathematics, 1..26.
:param mesh: trimesh object
:return: numpy array of shape (mesh.vertices.shape[0],)
"""
Nbv = mesh.vertices.shape[0]
Nbp = mesh.faces.shape[0]
obt_angs = mesh.face_angles > np.pi / 2
obt_poly = obt_angs[:, 0] | obt_angs[:, 1] | obt_angs[:, 2]
print(' -percent polygon with obtuse angle ',
100.0 * len(np.where(obt_poly)[0]) / Nbp)
cot = 1 / np.tan(mesh.face_angles)
vert_voronoi = np.zeros(Nbv)
for ind_p, p in enumerate(mesh.faces):
if obt_poly[ind_p]:
obt_verts = p[obt_angs[ind_p, :]]
vert_voronoi[obt_verts] = vert_voronoi[obt_verts] + \
mesh.area_faces[ind_p] / 2.0
non_obt_verts = p[[not x for x in obt_angs[ind_p, :]]]
vert_voronoi[non_obt_verts] = vert_voronoi[non_obt_verts] + \
mesh.area_faces[ind_p] / 4.0
else:
d0 = np.sum(
np.power(mesh.vertices[p[1], :] - mesh.vertices[p[2], :], 2))
d1 = np.sum(
np.power(mesh.vertices[p[2], :] - mesh.vertices[p[0], :], 2))
d2 = np.sum(
np.power(mesh.vertices[p[0], :] - mesh.vertices[p[1], :], 2))
vert_voronoi[p[0]] = vert_voronoi[p[0]] + \
(d1 * cot[ind_p, 1] + d2 * cot[ind_p, 2]) / 8.0
vert_voronoi[p[1]] = vert_voronoi[p[1]] + \
(d2 * cot[ind_p, 2] + d0 * cot[ind_p, 0]) / 8.0
vert_voronoi[p[2]] = vert_voronoi[p[2]] + \
(d0 * cot[ind_p, 0] + d1 * cot[ind_p, 1]) / 8.0
return vert_voronoi
|
the-stack_0_746 | # Copyright (C) 2016 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Skylark rule to generate a Junit4 TestSuite
# Assumes srcs are all .java Test files
# Assumes junit4 is already added to deps by the user.
# See https://github.com/bazelbuild/bazel/issues/1017 for background.
_OUTPUT = """import org.junit.runners.Suite;
import org.junit.runner.RunWith;
@RunWith(Suite.class)
@Suite.SuiteClasses({%s})
public class %s {}
"""
_PREFIXES = ("org", "com", "edu")
def _SafeIndex(j, val):
for i, v in enumerate(j):
if val == v:
return i
return -1
def _AsClassName(fname):
fname = [x.path for x in fname.files.to_list()][0]
toks = fname[:-5].split("/")
findex = -1
for s in _PREFIXES:
findex = _SafeIndex(toks, s)
if findex != -1:
break
if findex == -1:
fail("%s does not contain any of %s" % (fname, _PREFIXES))
return ".".join(toks[findex:]) + ".class"
def _impl(ctx):
classes = ",".join(
[_AsClassName(x) for x in ctx.attr.srcs],
)
ctx.actions.write(output = ctx.outputs.out, content = _OUTPUT % (
classes,
ctx.attr.outname,
))
_GenSuite = rule(
attrs = {
"srcs": attr.label_list(allow_files = True),
"outname": attr.string(),
},
outputs = {"out": "%{name}.java"},
implementation = _impl,
)
POST_JDK8_OPTS = [
# Enforce JDK 8 compatibility on Java 9, see
# https://docs.oracle.com/javase/9/intl/internationalization-enhancements-jdk-9.htm#JSINT-GUID-AF5AECA7-07C1-4E7D-BC10-BC7E73DC6C7F
"-Djava.locale.providers=COMPAT,CLDR,SPI",
"--add-opens=jdk.management/com.sun.management.internal=ALL-UNNAMED",
]
def junit_tests(name, srcs, **kwargs):
s_name = name.replace("-", "_") + "TestSuite"
_GenSuite(
name = s_name,
srcs = srcs,
outname = s_name,
)
jvm_flags = kwargs.get("jvm_flags", [])
jvm_flags = jvm_flags + select({
"//:java9": POST_JDK8_OPTS,
"//:java_next": POST_JDK8_OPTS,
"//conditions:default": [],
})
native.java_test(
name = name,
test_class = s_name,
srcs = srcs + [":" + s_name],
**dict(kwargs, jvm_flags = jvm_flags)
)
|
the-stack_0_747 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# (c) Copyright 2013 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Swaminathan Vasudevan, Hewlett-Packard.
import sqlalchemy as sa
from sqlalchemy import orm
from sqlalchemy.orm import exc
from neutron.common import constants as q_constants
from neutron.db import agentschedulers_db as agent_db
from neutron.db import api as qdbapi
from neutron.db import db_base_plugin_v2 as base_db
from neutron.db import l3_db
from neutron.db import model_base
from neutron.db import models_v2
from neutron.extensions import vpnaas
from neutron.extensions.vpnaas import VPNPluginBase
from neutron import manager
from neutron.openstack.common import log as logging
from neutron.openstack.common import uuidutils
from neutron.plugins.common import constants
LOG = logging.getLogger(__name__)
class IPsecPeerCidr(model_base.BASEV2):
"""Internal representation of a IPsec Peer Cidrs."""
cidr = sa.Column(sa.String(32), nullable=False, primary_key=True)
ipsec_site_connection_id = sa.Column(
sa.String(36),
sa.ForeignKey('ipsec_site_connections.id',
ondelete="CASCADE"),
primary_key=True)
class IPsecPolicy(model_base.BASEV2, models_v2.HasId, models_v2.HasTenant):
"""Represents a v2 IPsecPolicy Object."""
__tablename__ = 'ipsecpolicies'
name = sa.Column(sa.String(255))
description = sa.Column(sa.String(255))
transform_protocol = sa.Column(sa.Enum("esp", "ah", "ah-esp",
name="ipsec_transform_protocols"),
nullable=False)
auth_algorithm = sa.Column(sa.Enum("sha1",
name="vpn_auth_algorithms"),
nullable=False)
encryption_algorithm = sa.Column(sa.Enum("3des", "aes-128",
"aes-256", "aes-192",
name="vpn_encrypt_algorithms"),
nullable=False)
encapsulation_mode = sa.Column(sa.Enum("tunnel", "transport",
name="ipsec_encapsulations"),
nullable=False)
lifetime_units = sa.Column(sa.Enum("seconds", "kilobytes",
name="vpn_lifetime_units"),
nullable=False)
lifetime_value = sa.Column(sa.Integer, nullable=False)
pfs = sa.Column(sa.Enum("group2", "group5", "group14",
name="vpn_pfs"), nullable=False)
class IKEPolicy(model_base.BASEV2, models_v2.HasId, models_v2.HasTenant):
"""Represents a v2 IKEPolicy Object."""
__tablename__ = 'ikepolicies'
name = sa.Column(sa.String(255))
description = sa.Column(sa.String(255))
auth_algorithm = sa.Column(sa.Enum("sha1",
name="vpn_auth_algorithms"),
nullable=False)
encryption_algorithm = sa.Column(sa.Enum("3des", "aes-128",
"aes-256", "aes-192",
name="vpn_encrypt_algorithms"),
nullable=False)
phase1_negotiation_mode = sa.Column(sa.Enum("main",
name="ike_phase1_mode"),
nullable=False)
lifetime_units = sa.Column(sa.Enum("seconds", "kilobytes",
name="vpn_lifetime_units"),
nullable=False)
lifetime_value = sa.Column(sa.Integer, nullable=False)
ike_version = sa.Column(sa.Enum("v1", "v2", name="ike_versions"),
nullable=False)
pfs = sa.Column(sa.Enum("group2", "group5", "group14",
name="vpn_pfs"), nullable=False)
class IPsecSiteConnection(model_base.BASEV2,
models_v2.HasId, models_v2.HasTenant):
"""Represents a IPsecSiteConnection Object."""
__tablename__ = 'ipsec_site_connections'
name = sa.Column(sa.String(255))
description = sa.Column(sa.String(255))
peer_address = sa.Column(sa.String(64), nullable=False)
peer_id = sa.Column(sa.String(255), nullable=False)
route_mode = sa.Column(sa.String(8), nullable=False)
mtu = sa.Column(sa.Integer, nullable=False)
initiator = sa.Column(sa.Enum("bi-directional", "response-only",
name="vpn_initiators"), nullable=False)
auth_mode = sa.Column(sa.String(16), nullable=False)
psk = sa.Column(sa.String(255), nullable=False)
dpd_action = sa.Column(sa.Enum("hold", "clear",
"restart", "disabled",
"restart-by-peer", name="vpn_dpd_actions"),
nullable=False)
dpd_interval = sa.Column(sa.Integer, nullable=False)
dpd_timeout = sa.Column(sa.Integer, nullable=False)
status = sa.Column(sa.String(16), nullable=False)
admin_state_up = sa.Column(sa.Boolean(), nullable=False)
vpnservice_id = sa.Column(sa.String(36),
sa.ForeignKey('vpnservices.id'),
nullable=False)
ipsecpolicy_id = sa.Column(sa.String(36),
sa.ForeignKey('ipsecpolicies.id'),
nullable=False)
ikepolicy_id = sa.Column(sa.String(36),
sa.ForeignKey('ikepolicies.id'),
nullable=False)
ipsecpolicy = orm.relationship(
IPsecPolicy, backref='ipsec_site_connection')
ikepolicy = orm.relationship(IKEPolicy, backref='ipsec_site_connection')
peer_cidrs = orm.relationship(IPsecPeerCidr,
backref='ipsec_site_connection',
lazy='joined',
cascade='all, delete, delete-orphan')
class VPNService(model_base.BASEV2, models_v2.HasId, models_v2.HasTenant):
"""Represents a v2 VPNService Object."""
name = sa.Column(sa.String(255))
description = sa.Column(sa.String(255))
status = sa.Column(sa.String(16), nullable=False)
admin_state_up = sa.Column(sa.Boolean(), nullable=False)
subnet_id = sa.Column(sa.String(36), sa.ForeignKey('subnets.id'),
nullable=False)
router_id = sa.Column(sa.String(36), sa.ForeignKey('routers.id'),
nullable=False)
subnet = orm.relationship(models_v2.Subnet)
router = orm.relationship(l3_db.Router)
ipsec_site_connections = orm.relationship(
IPsecSiteConnection,
backref='vpnservice',
cascade="all, delete-orphan")
class VPNPluginDb(VPNPluginBase, base_db.CommonDbMixin):
"""VPN plugin database class using SQLAlchemy models."""
def __init__(self):
"""Do the initialization for the vpn service plugin here."""
qdbapi.register_models()
def update_status(self, context, model, v_id, status):
with context.session.begin(subtransactions=True):
v_db = self._get_resource(context, model, v_id)
v_db.update({'status': status})
def _get_resource(self, context, model, v_id):
try:
r = self._get_by_id(context, model, v_id)
except exc.NoResultFound:
if issubclass(model, IPsecSiteConnection):
raise vpnaas.IPsecSiteConnectionNotFound(
ipsec_site_conn_id=v_id
)
elif issubclass(model, IKEPolicy):
raise vpnaas.IKEPolicyNotFound(ikepolicy_id=v_id)
elif issubclass(model, IPsecPolicy):
raise vpnaas.IPsecPolicyNotFound(ipsecpolicy_id=v_id)
elif issubclass(model, VPNService):
raise vpnaas.VPNServiceNotFound(vpnservice_id=v_id)
else:
raise
return r
def assert_update_allowed(self, obj):
status = getattr(obj, 'status', None)
if status != constants.ACTIVE:
raise vpnaas.VPNStateInvalid(id=id, state=status)
def _make_ipsec_site_connection_dict(self, ipsec_site_conn, fields=None):
res = {'id': ipsec_site_conn['id'],
'tenant_id': ipsec_site_conn['tenant_id'],
'name': ipsec_site_conn['name'],
'description': ipsec_site_conn['description'],
'peer_address': ipsec_site_conn['peer_address'],
'peer_id': ipsec_site_conn['peer_id'],
'route_mode': ipsec_site_conn['route_mode'],
'mtu': ipsec_site_conn['mtu'],
'auth_mode': ipsec_site_conn['auth_mode'],
'psk': ipsec_site_conn['psk'],
'initiator': ipsec_site_conn['initiator'],
'dpd': {
'action': ipsec_site_conn['dpd_action'],
'interval': ipsec_site_conn['dpd_interval'],
'timeout': ipsec_site_conn['dpd_timeout']
},
'admin_state_up': ipsec_site_conn['admin_state_up'],
'status': ipsec_site_conn['status'],
'vpnservice_id': ipsec_site_conn['vpnservice_id'],
'ikepolicy_id': ipsec_site_conn['ikepolicy_id'],
'ipsecpolicy_id': ipsec_site_conn['ipsecpolicy_id'],
'peer_cidrs': [pcidr['cidr']
for pcidr in ipsec_site_conn['peer_cidrs']]
}
return self._fields(res, fields)
def create_ipsec_site_connection(self, context, ipsec_site_connection):
ipsec_sitecon = ipsec_site_connection['ipsec_site_connection']
dpd = ipsec_sitecon['dpd']
ipsec_sitecon['dpd_action'] = dpd.get('action', 'hold')
ipsec_sitecon['dpd_interval'] = dpd.get('interval', 30)
ipsec_sitecon['dpd_timeout'] = dpd.get('timeout', 120)
tenant_id = self._get_tenant_id_for_create(context, ipsec_sitecon)
if ipsec_sitecon['dpd_timeout'] < ipsec_sitecon['dpd_interval']:
raise vpnaas.IPsecSiteConnectionDpdIntervalValueError(
attribute_a='dpd_timeout')
with context.session.begin(subtransactions=True):
#Check permissions
self._get_resource(context,
VPNService,
ipsec_sitecon['vpnservice_id'])
self._get_resource(context,
IKEPolicy,
ipsec_sitecon['ikepolicy_id'])
self._get_resource(context,
IPsecPolicy,
ipsec_sitecon['ipsecpolicy_id'])
ipsec_site_conn_db = IPsecSiteConnection(
id=uuidutils.generate_uuid(),
tenant_id=tenant_id,
name=ipsec_sitecon['name'],
description=ipsec_sitecon['description'],
peer_address=ipsec_sitecon['peer_address'],
peer_id=ipsec_sitecon['peer_id'],
route_mode='static',
mtu=ipsec_sitecon['mtu'],
auth_mode='psk',
psk=ipsec_sitecon['psk'],
initiator=ipsec_sitecon['initiator'],
dpd_action=ipsec_sitecon['dpd_action'],
dpd_interval=ipsec_sitecon['dpd_interval'],
dpd_timeout=ipsec_sitecon['dpd_timeout'],
admin_state_up=ipsec_sitecon['admin_state_up'],
status=constants.PENDING_CREATE,
vpnservice_id=ipsec_sitecon['vpnservice_id'],
ikepolicy_id=ipsec_sitecon['ikepolicy_id'],
ipsecpolicy_id=ipsec_sitecon['ipsecpolicy_id']
)
context.session.add(ipsec_site_conn_db)
for cidr in ipsec_sitecon['peer_cidrs']:
peer_cidr_db = IPsecPeerCidr(
cidr=cidr,
ipsec_site_connection_id=ipsec_site_conn_db['id']
)
context.session.add(peer_cidr_db)
return self._make_ipsec_site_connection_dict(ipsec_site_conn_db)
def update_ipsec_site_connection(
self, context,
ipsec_site_conn_id, ipsec_site_connection):
ipsec_sitecon = ipsec_site_connection['ipsec_site_connection']
dpd = ipsec_sitecon.get('dpd', {})
if dpd.get('action'):
ipsec_sitecon['dpd_action'] = dpd.get('action')
if dpd.get('interval'):
ipsec_sitecon['dpd_interval'] = dpd.get('interval')
if dpd.get('timeout'):
ipsec_sitecon['dpd_timeout'] = dpd.get('timeout')
changed_peer_cidrs = False
with context.session.begin(subtransactions=True):
ipsec_site_conn_db = self._get_resource(
context,
IPsecSiteConnection,
ipsec_site_conn_id)
self.assert_update_allowed(ipsec_site_conn_db)
if "peer_cidrs" in ipsec_sitecon:
changed_peer_cidrs = True
old_peer_cidr_list = ipsec_site_conn_db['peer_cidrs']
old_peer_cidr_dict = dict(
(peer_cidr['cidr'], peer_cidr)
for peer_cidr in old_peer_cidr_list)
new_peer_cidr_set = set(ipsec_sitecon["peer_cidrs"])
old_peer_cidr_set = set(old_peer_cidr_dict)
new_peer_cidrs = list(new_peer_cidr_set)
for peer_cidr in old_peer_cidr_set - new_peer_cidr_set:
context.session.delete(old_peer_cidr_dict[peer_cidr])
for peer_cidr in new_peer_cidr_set - old_peer_cidr_set:
pcidr = IPsecPeerCidr(
cidr=peer_cidr,
ipsec_site_connection_id=ipsec_site_conn_id)
context.session.add(pcidr)
del ipsec_sitecon["peer_cidrs"]
if ipsec_sitecon:
ipsec_site_conn_db.update(ipsec_sitecon)
result = self._make_ipsec_site_connection_dict(ipsec_site_conn_db)
if changed_peer_cidrs:
result['peer_cidrs'] = new_peer_cidrs
return result
def delete_ipsec_site_connection(self, context, ipsec_site_conn_id):
with context.session.begin(subtransactions=True):
ipsec_site_conn_db = self._get_resource(
context, IPsecSiteConnection, ipsec_site_conn_id
)
context.session.delete(ipsec_site_conn_db)
def get_ipsec_site_connection(self, context,
ipsec_site_conn_id, fields=None):
ipsec_site_conn_db = self._get_resource(
context, IPsecSiteConnection, ipsec_site_conn_id
)
return self._make_ipsec_site_connection_dict(
ipsec_site_conn_db, fields)
def get_ipsec_site_connections(self, context, filters=None, fields=None):
return self._get_collection(context, IPsecSiteConnection,
self._make_ipsec_site_connection_dict,
filters=filters, fields=fields)
def _make_ikepolicy_dict(self, ikepolicy, fields=None):
res = {'id': ikepolicy['id'],
'tenant_id': ikepolicy['tenant_id'],
'name': ikepolicy['name'],
'description': ikepolicy['description'],
'auth_algorithm': ikepolicy['auth_algorithm'],
'encryption_algorithm': ikepolicy['encryption_algorithm'],
'phase1_negotiation_mode': ikepolicy['phase1_negotiation_mode'],
'lifetime': {
'units': ikepolicy['lifetime_units'],
'value': ikepolicy['lifetime_value'],
},
'ike_version': ikepolicy['ike_version'],
'pfs': ikepolicy['pfs']
}
return self._fields(res, fields)
def create_ikepolicy(self, context, ikepolicy):
ike = ikepolicy['ikepolicy']
tenant_id = self._get_tenant_id_for_create(context, ike)
lifetime_info = ike.get('lifetime', [])
lifetime_units = lifetime_info.get('units', 'seconds')
lifetime_value = lifetime_info.get('value', 3600)
with context.session.begin(subtransactions=True):
ike_db = IKEPolicy(
id=uuidutils.generate_uuid(),
tenant_id=tenant_id,
name=ike['name'],
description=ike['description'],
auth_algorithm=ike['auth_algorithm'],
encryption_algorithm=ike['encryption_algorithm'],
phase1_negotiation_mode=ike['phase1_negotiation_mode'],
lifetime_units=lifetime_units,
lifetime_value=lifetime_value,
ike_version=ike['ike_version'],
pfs=ike['pfs']
)
context.session.add(ike_db)
return self._make_ikepolicy_dict(ike_db)
def update_ikepolicy(self, context, ikepolicy_id, ikepolicy):
ike = ikepolicy['ikepolicy']
with context.session.begin(subtransactions=True):
ikepolicy = context.session.query(IPsecSiteConnection).filter_by(
ikepolicy_id=ikepolicy_id).first()
if ikepolicy:
raise vpnaas.IKEPolicyInUse(ikepolicy_id=ikepolicy_id)
ike_db = self._get_resource(context, IKEPolicy, ikepolicy_id)
if ike:
lifetime_info = ike.get('lifetime')
if lifetime_info:
if lifetime_info.get('units'):
ike['lifetime_units'] = lifetime_info['units']
if lifetime_info.get('value'):
ike['lifetime_value'] = lifetime_info['value']
ike_db.update(ike)
return self._make_ikepolicy_dict(ike_db)
def delete_ikepolicy(self, context, ikepolicy_id):
with context.session.begin(subtransactions=True):
ikepolicy = context.session.query(IPsecSiteConnection).filter_by(
ikepolicy_id=ikepolicy_id).first()
if ikepolicy:
raise vpnaas.IKEPolicyInUse(ikepolicy_id=ikepolicy_id)
ike_db = self._get_resource(context, IKEPolicy, ikepolicy_id)
context.session.delete(ike_db)
def get_ikepolicy(self, context, ikepolicy_id, fields=None):
ike_db = self._get_resource(context, IKEPolicy, ikepolicy_id)
return self._make_ikepolicy_dict(ike_db, fields)
def get_ikepolicies(self, context, filters=None, fields=None):
return self._get_collection(context, IKEPolicy,
self._make_ikepolicy_dict,
filters=filters, fields=fields)
def _make_ipsecpolicy_dict(self, ipsecpolicy, fields=None):
res = {'id': ipsecpolicy['id'],
'tenant_id': ipsecpolicy['tenant_id'],
'name': ipsecpolicy['name'],
'description': ipsecpolicy['description'],
'transform_protocol': ipsecpolicy['transform_protocol'],
'auth_algorithm': ipsecpolicy['auth_algorithm'],
'encryption_algorithm': ipsecpolicy['encryption_algorithm'],
'encapsulation_mode': ipsecpolicy['encapsulation_mode'],
'lifetime': {
'units': ipsecpolicy['lifetime_units'],
'value': ipsecpolicy['lifetime_value'],
},
'pfs': ipsecpolicy['pfs']
}
return self._fields(res, fields)
def create_ipsecpolicy(self, context, ipsecpolicy):
ipsecp = ipsecpolicy['ipsecpolicy']
tenant_id = self._get_tenant_id_for_create(context, ipsecp)
lifetime_info = ipsecp['lifetime']
lifetime_units = lifetime_info.get('units', 'seconds')
lifetime_value = lifetime_info.get('value', 3600)
with context.session.begin(subtransactions=True):
ipsecp_db = IPsecPolicy(id=uuidutils.generate_uuid(),
tenant_id=tenant_id,
name=ipsecp['name'],
description=ipsecp['description'],
transform_protocol=ipsecp['transform_'
'protocol'],
auth_algorithm=ipsecp['auth_algorithm'],
encryption_algorithm=ipsecp['encryption_'
'algorithm'],
encapsulation_mode=ipsecp['encapsulation_'
'mode'],
lifetime_units=lifetime_units,
lifetime_value=lifetime_value,
pfs=ipsecp['pfs'])
context.session.add(ipsecp_db)
return self._make_ipsecpolicy_dict(ipsecp_db)
def update_ipsecpolicy(self, context, ipsecpolicy_id, ipsecpolicy):
ipsecp = ipsecpolicy['ipsecpolicy']
with context.session.begin(subtransactions=True):
ipsecpolicy = context.session.query(IPsecSiteConnection).filter_by(
ipsecpolicy_id=ipsecpolicy_id).first()
if ipsecpolicy:
raise vpnaas.IPsecPolicyInUse(ipsecpolicy_id=ipsecpolicy_id)
ipsecp_db = self._get_resource(context,
IPsecPolicy,
ipsecpolicy_id)
if ipsecp:
lifetime_info = ipsecp.get('lifetime')
if lifetime_info:
if lifetime_info.get('units'):
ipsecp['lifetime_units'] = lifetime_info['units']
if lifetime_info('value'):
ipsecp['lifetime_value'] = lifetime_info['value']
ipsecp_db.update(ipsecp)
return self._make_ipsecpolicy_dict(ipsecp_db)
def delete_ipsecpolicy(self, context, ipsecpolicy_id):
with context.session.begin(subtransactions=True):
ipsecpolicy = context.session.query(IPsecSiteConnection).filter_by(
ipsecpolicy_id=ipsecpolicy_id).first()
if ipsecpolicy:
raise vpnaas.IPsecPolicyInUse(ipsecpolicy_id=ipsecpolicy_id)
ipsec_db = self._get_resource(context, IPsecPolicy, ipsecpolicy_id)
context.session.delete(ipsec_db)
def get_ipsecpolicy(self, context, ipsecpolicy_id, fields=None):
ipsec_db = self._get_resource(context, IPsecPolicy, ipsecpolicy_id)
return self._make_ipsecpolicy_dict(ipsec_db, fields)
def get_ipsecpolicies(self, context, filters=None, fields=None):
return self._get_collection(context, IPsecPolicy,
self._make_ipsecpolicy_dict,
filters=filters, fields=fields)
def _make_vpnservice_dict(self, vpnservice, fields=None):
res = {'id': vpnservice['id'],
'name': vpnservice['name'],
'description': vpnservice['description'],
'tenant_id': vpnservice['tenant_id'],
'subnet_id': vpnservice['subnet_id'],
'router_id': vpnservice['router_id'],
'admin_state_up': vpnservice['admin_state_up'],
'status': vpnservice['status']}
return self._fields(res, fields)
def create_vpnservice(self, context, vpnservice):
vpns = vpnservice['vpnservice']
tenant_id = self._get_tenant_id_for_create(context, vpns)
with context.session.begin(subtransactions=True):
vpnservice_db = VPNService(id=uuidutils.generate_uuid(),
tenant_id=tenant_id,
name=vpns['name'],
description=vpns['description'],
subnet_id=vpns['subnet_id'],
router_id=vpns['router_id'],
admin_state_up=vpns['admin_state_up'],
status=constants.PENDING_CREATE)
context.session.add(vpnservice_db)
return self._make_vpnservice_dict(vpnservice_db)
def update_vpnservice(self, context, vpnservice_id, vpnservice):
vpns = vpnservice['vpnservice']
with context.session.begin(subtransactions=True):
vpnservice = context.session.query(IPsecSiteConnection).filter_by(
vpnservice_id=vpnservice_id).first()
if vpnservice:
raise vpnaas.VPNServiceInUse(vpnservice_id=vpnservice_id)
vpns_db = self._get_resource(context, VPNService, vpnservice_id)
self.assert_update_allowed(vpns_db)
if vpns:
vpns_db.update(vpns)
return self._make_vpnservice_dict(vpns_db)
def delete_vpnservice(self, context, vpnservice_id):
with context.session.begin(subtransactions=True):
if context.session.query(IPsecSiteConnection).filter_by(
vpnservice_id=vpnservice_id
).first():
raise vpnaas.VPNServiceInUse(vpnservice_id=vpnservice_id)
vpns_db = self._get_resource(context, VPNService, vpnservice_id)
context.session.delete(vpns_db)
def _get_vpnservice(self, context, vpnservice_id):
return self._get_resource(context, VPNService, vpnservice_id)
def get_vpnservice(self, context, vpnservice_id, fields=None):
vpns_db = self._get_resource(context, VPNService, vpnservice_id)
return self._make_vpnservice_dict(vpns_db, fields)
def get_vpnservices(self, context, filters=None, fields=None):
return self._get_collection(context, VPNService,
self._make_vpnservice_dict,
filters=filters, fields=fields)
class VPNPluginRpcDbMixin():
def _get_agent_hosting_vpn_services(self, context, host):
plugin = manager.NeutronManager.get_plugin()
agent = plugin._get_agent_by_type_and_host(
context, q_constants.AGENT_TYPE_L3, host)
if not agent.admin_state_up:
return []
query = context.session.query(VPNService)
query = query.join(IPsecSiteConnection)
query = query.join(IKEPolicy)
query = query.join(IPsecPolicy)
query = query.join(IPsecPeerCidr)
query = query.join(agent_db.RouterL3AgentBinding,
agent_db.RouterL3AgentBinding.router_id ==
VPNService.router_id)
query = query.filter(
agent_db.RouterL3AgentBinding.l3_agent_id == agent.id)
return query
def update_status_on_host(self, context, host, active_services):
with context.session.begin(subtransactions=True):
vpnservices = self._get_agent_hosting_vpn_services(
context, host)
for vpnservice in vpnservices:
if vpnservice.id in active_services:
if vpnservice.status != constants.ACTIVE:
vpnservice.status = constants.ACTIVE
else:
if vpnservice.status != constants.ERROR:
vpnservice.status = constants.ERROR
|
the-stack_0_749 | #!/usr/bin/env python
import ads1256
import time
import rospy
from std_msgs.msg import Float32
def ReadValues():
rate = 25 # Frequency in Hz
ads1256.start("1",str(rate))
pub = rospy.Publisher('/sen_4/ResVal', Float32, tcp_nodelay=False, queue_size=1)
rospy.init_node('Rheostat',anonymous=True)
rate=rospy.Rate(10)
while not rospy.is_shutdown():
absoluteValue = ads1256.read_channel(0)
voltage = ((absoluteValue*100)/167.0)/1000000.0
rospy.loginfo(voltage)
pub.publish(voltage)
rate.sleep()
ads1256.stop()
if __name__== '__main__':
try:
ReadValues()
except rospy.ROSInterruptException:
pass
|
the-stack_0_752 | """
Utility functions used in the logistic regression classifier.
@copyright: The Broad Institute of MIT and Harvard 2015
"""
import numpy as np
def sigmoid(v):
return 1 / (1 + np.exp(-v))
"""Computes a prediction (in the form of probabilities) for the given data vector
"""
def predict(x, theta):
p = sigmoid(np.dot(x, theta))
return np.array([p])
"""Return a function that gives a prediction from a design matrix row
"""
def gen_predictor(params_filename="./models/test/lreg-params"):
with open(params_filename, "rb") as pfile:
lines = pfile.readlines()
N = len(lines)
theta = np.ones(N)
i = 0
for line in lines:
theta[i] = float(line.strip().split(' ')[1])
i = i + 1
def predictor(X):
scores = []
for i in range(0, len(X)):
scores.extend(predict(X[i,:], theta))
return scores
return predictor
|
the-stack_0_755 | import numpy as np
import matplotlib.pyplot as plt
import sectionproperties.pre.pre as pre
import sectionproperties.post.post as post
class Geometry:
"""Parent class for a cross-section geometry input.
Provides an interface for the user to specify the geometry defining a cross-section. A method
is provided for generating a triangular mesh, for translating the cross-section by *(x, y)* and
for plotting the geometry.
:cvar points: List of points *(x, y)* defining the vertices of the cross-section
:vartype points: list[list[float, float]]
:cvar facets: List of point index pairs *(p1, p2)* defining the edges of the cross-section
:vartype facets: list[list[int, int]]
:cvar holes: List of points *(x, y)* defining the locations of holes within the cross-section.
If there are no holes, provide an empty list [].
:vartype holes: list[list[float, float]]
:cvar control_points: A list of points *(x, y)* that define different regions of the
cross-section. A control point is an arbitrary point within a region enclosed by facets.
:vartype control_points: list[list[float, float]]
:cvar shift: Vector that shifts the cross-section by *(x, y)*
:vartype shift: list[float, float]
:cvar perimeter: List of facet indices defining the perimeter of the cross-section
:vartype perimeter: list[int]
"""
def __init__(self, control_points, shift):
"""Inits the Geometry class."""
self.control_points = control_points
self.shift = shift
self.points = []
self.facets = []
self.holes = []
self.perimeter = []
def create_mesh(self, mesh_sizes):
"""Creates a quadratic triangular mesh from the Geometry object.
:param mesh_sizes: A list of maximum element areas corresponding to each region within the
cross-section geometry.
:type mesh_size: list[float]
:return: Object containing generated mesh data
:rtype: :class:`meshpy.triangle.MeshInfo`
:raises AssertionError: If the number of mesh sizes does not match the number of regions
The following example creates a circular cross-section with a diameter of 50 with 64
points, and generates a mesh with a maximum triangular area of 2.5::
import sectionproperties.pre.sections as sections
geometry = sections.CircularSection(d=50, n=64)
mesh = geometry.create_mesh(mesh_sizes=[2.5])
.. figure:: ../images/sections/circle_mesh.png
:align: center
:scale: 75 %
Mesh generated from the above geometry.
"""
str = "Number of mesh_sizes ({0}), should match the number of regions ({1})".format(
len(mesh_sizes), len(self.control_points)
)
assert(len(mesh_sizes) == len(self.control_points)), str
return pre.create_mesh(
self.points, self.facets, self.holes, self.control_points, mesh_sizes)
def shift_section(self):
"""Shifts the cross-section parameters by the class variable vector *shift*."""
for point in self.points:
point[0] += self.shift[0]
point[1] += self.shift[1]
for hole in self.holes:
hole[0] += self.shift[0]
hole[1] += self.shift[1]
for cp in self.control_points:
cp[0] += self.shift[0]
cp[1] += self.shift[1]
def rotate_section(self, angle, rot_point=None):
"""Rotates the geometry and specified angle about a point. If the rotation point is not
provided, rotates the section about the first control point in the list of control points
of the :class:`~sectionproperties.pre.sections.Geometry` object.
:param float angle: Angle (degrees) by which to rotate the section. A positive angle leads
to a counter-clockwise rotation.
:param rot_point: Point *(x, y)* about which to rotate the section
:type rot_point: list[float, float]
The following example rotates a 200UB25 section clockwise by 30 degrees::
import sectionproperties.pre.sections as sections
geometry = sections.ISection(d=203, b=133, t_f=7.8, t_w=5.8, r=8.9, n_r=8)
geometry.rotate_section(angle=-30)
"""
# convert angle to radians
rot_phi = angle * np.pi / 180
def get_r(pt1, pt2):
"""Returns the distance between two points."""
return ((pt1[0] - pt2[0]) ** 2 + (pt1[1] - pt2[1]) ** 2) ** 0.5
def get_phi(pt1, pt2):
"""Returns the angle between two points."""
return np.arctan2(pt1[1] - pt2[1], pt1[0] - pt2[0])
def rotate_point(pt, rot_point, rot_phi):
"""Rotates a point given a rotation point and rotation angle."""
r = get_r(pt, rot_point)
phi = get_phi(pt, rot_point)
pt[0] = r * np.cos(phi + rot_phi) + rot_point[0]
pt[1] = r * np.sin(phi + rot_phi) + rot_point[1]
# use the first control point if no rotation point is specified
if rot_point is None:
rot_point = self.control_points[0]
# rotate all the points
for point in self.points:
rotate_point(point, rot_point, rot_phi)
# rotate all the holes
for hole in self.holes:
rotate_point(hole, rot_point, rot_phi)
# rotate all the control points
for cp in self.control_points:
rotate_point(cp, rot_point, rot_phi)
def mirror_section(self, axis='x', mirror_point=None):
"""Mirrors the geometry about a point on either the x or y-axis. If no point is provided,
mirrors the geometry about the first control point in the list of control points of the
:class:`~sectionproperties.pre.sections.Geometry` object.
:param string axis: Axis about which to mirror the geometry, *'x'* or *'y'*
:param mirror_point: Point about which to mirror the geometry *(x, y)*
:type mirror_point: list[float, float]
The following example mirrors a 200PFC section about the y-axis and the point (0, 0)::
import sectionproperties.pre.sections as sections
geometry = sections.PfcSection(d=200, b=75, t_f=12, t_w=6, r=12, n_r=8)
geometry.mirror_section(axis='y', mirror_point=[0, 0])
"""
# use the first control point if no mirror point is specified
if mirror_point is None:
mirror_point = self.control_points[0]
# select the axis to mirror
if axis == 'x':
i = 1
elif axis == 'y':
i = 0
else:
raise RuntimeError("Enter a valid axis: 'x' or 'y'")
# mirror all points
for point in self.points:
point[i] = 2 * mirror_point[i] - point[i]
# mirror all holes
for hole in self.holes:
hole[i] = 2 * mirror_point[i] - hole[i]
# mirror all control points
for cp in self.control_points:
cp[i] = 2 * mirror_point[i] - cp[i]
def add_point(self, point):
"""Adds a point to the geometry and returns the added point id.
:param point: Location of the point
:type point: list[float, float]
:return: Point id
:rtype: int
"""
self.points.append(point)
return len(self.points) - 1
def add_facet(self, facet):
"""Adds a facet to the geometry and returns the added facet id.
:param facet: Point indices of the facet
:type facet: list[float, float]
:return: Facet id
:rtype: int
"""
self.facets.append(facet)
return len(self.facets) - 1
def add_hole(self, hole):
"""Adds a hole location to the geometry and returns the added hole id.
:param hole: Location of the hole
:type hole: list[float, float]
:return: Hole id
:rtype: int
"""
self.holes.append(hole)
return len(self.holes) - 1
def add_control_point(self, control_point):
"""Adds a control point to the geometry and returns the added control
point id.
:param hole: Location of the control point
:type hole: list[float, float]
:return: Control point id
:rtype: int
"""
self.control_points.append(control_point)
return len(self.control_points) - 1
def clean_geometry(self, verbose=False):
"""Peforms a full clean on the geometry.
:param bool verbose: If set to true, information related to the geometry cleaning process
is printed to the terminal.
.. note:: Cleaning the geometry is always recommended when creating a merged section,
which may result in overlapping or intersecting facets, or duplicate nodes.
"""
self = pre.GeometryCleaner(self, verbose).clean_geometry()
def plot_geometry(self, ax=None, pause=True, labels=False, perimeter=False):
"""Plots the geometry defined by the input section. If no axes object is supplied a new
figure and axis is created.
:param ax: Axes object on which the mesh is plotted
:type ax: :class:`matplotlib.axes.Axes`
:param bool pause: If set to true, the figure pauses the script until the window is closed.
If set to false, the script continues immediately after the window is rendered.
:param bool labels: If set to true, node and facet labels are displayed
:param bool perimeter: If set to true, boldens the perimeter of the cross-section
:return: Matplotlib figure and axes objects (fig, ax)
:rtype: (:class:`matplotlib.figure.Figure`, :class:`matplotlib.axes`)
The following example creates a CHS discretised with 64 points, with a diameter of 48 and
thickness of 3.2, and plots the geometry::
import sectionproperties.pre.sections as sections
geometry = sections.Chs(d=48, t=3.2, n=64)
geometry.plot_geometry()
.. figure:: ../images/sections/chs_geometry.png
:align: center
:scale: 75 %
Geometry generated by the above example.
"""
# if no axes object is supplied, create and setup the plot
if ax is None:
ax_supplied = False
(fig, ax) = plt.subplots()
post.setup_plot(ax, pause)
else:
ax_supplied = True
for (i, f) in enumerate(self.facets):
if perimeter:
if i in self.perimeter:
linewidth = 3
else:
linewidth = 1.5
else:
linewidth = 1.5
# plot the points and facets
if i == 0:
ax.plot([self.points[f[0]][0], self.points[f[1]][0]],
[self.points[f[0]][1], self.points[f[1]][1]],
'ko-', markersize=2, linewidth=linewidth, label='Points & Facets')
else:
ax.plot([self.points[f[0]][0], self.points[f[1]][0]],
[self.points[f[0]][1], self.points[f[1]][1]],
'ko-', markersize=2, linewidth=linewidth)
for (i, h) in enumerate(self.holes):
# plot the holes
if i == 0:
ax.plot(h[0], h[1], 'rx', markersize=5, label='Holes')
else:
ax.plot(h[0], h[1], 'rx', markersize=5)
for (i, cp) in enumerate(self.control_points):
# plot the control points
if i == 0:
ax.plot(cp[0], cp[1], 'bo', markersize=5,
label='Control Points')
else:
ax.plot(cp[0], cp[1], 'bo', markersize=5)
# display the legend
ax.legend(loc='center left', bbox_to_anchor=(1, 0.5))
# display the labels
if labels:
# plot node labels
for (i, pt) in enumerate(self.points):
ax.annotate(str(i), xy=pt, color='r')
# plot facet labels
for (i, fct) in enumerate(self.facets):
pt1 = self.points[fct[0]]
pt2 = self.points[fct[1]]
xy = [(pt1[0] + pt2[0]) / 2, (pt1[1] + pt2[1]) / 2]
ax.annotate(str(i), xy=xy, color='b')
# if no axes object is supplied, finish the plot
if not ax_supplied:
post.finish_plot(ax, pause, title='Cross-Section Geometry')
return (fig, ax)
def calculate_extents(self):
"""Calculates the minimum and maximum x and y-values amongst the list of points.
:return: Minimum and maximum x and y-values *(x_min, x_max, y_min, y_max)*
:rtype: tuple(float, float, float, float)
"""
# loop through all points
for (i, pt) in enumerate(self.points):
x = pt[0]
y = pt[1]
# initialise min, max variables
if i == 0:
x_min = x
x_max = x
y_min = y
y_max = y
# update the mins and maxs where necessary
x_min = min(x_min, x)
x_max = max(x_max, x)
y_min = min(y_min, y)
y_max = max(y_max, y)
return (x_min, x_max, y_min, y_max)
def draw_radius(self, pt, r, theta, n, anti=True):
"""Adds a quarter radius of points to the points list - centered at point *pt*, with radius
*r*, starting at angle *theta*, with *n* points. If r = 0, adds pt only.
:param pt: Centre of radius *(x,y)*
:type pt: list[float, float]
:param float r: Radius
:param float theta: Initial angle
:param int n: Number of points
:param bool anti: Anticlockwise rotation?
"""
if r == 0:
self.points.append(pt)
return
if anti:
mult = 1
else:
mult = -1
# calculate radius of points
for i in range(n):
# determine angle
t = theta + mult * i * 1.0 / max(1, n - 1) * np.pi * 0.5
x = pt[0] + r * np.cos(t)
y = pt[1] + r * np.sin(t)
self.points.append([x, y])
def calculate_facet_length(self, facet):
"""Calculates the length of the facet.
:param facet: Point index pair *(p1, p2)* defining a facet
:vartype facets: list[int, int]
:return: Facet length
:rtype: float
"""
# get facet points
p1 = self.points[facet[0]]
p2 = self.points[facet[1]]
# calculate distance between two points
return np.sqrt((p2[0] - p1[0]) ** 2 + (p2[1] - p1[1]) ** 2)
def calculate_perimeter(self):
"""Calculates the perimeter of the cross-section by summing the length of all facets in the
``perimeter`` class variable.
:return: Cross-section perimeter, returns 0 if there is no perimeter defined
:rtype: float
"""
# check to see if there are any facets in the perimeter variable
if len(self.perimeter) == 0:
return 0
# initialise perimeter variable
perimeter = 0
# loop through all the facets along the perimeter
for facet_idx in self.perimeter:
perimeter += self.calculate_facet_length(self.facets[facet_idx])
return perimeter
class CustomSection(Geometry):
"""Constructs a cross-section from a list of points, facets, holes and a user specified control
point.
:param points: List of points *(x, y)* defining the vertices of the cross-section
:type points: list[list[float, float]]
:param facets: List of point index pairs *(p1, p2)* defining the edges of the cross-section
:type facets: list[list[int, int]]
:param holes: List of points *(x, y)* defining the locations of holes within the cross-section.
If there are no holes, provide an empty list [].
:type holes: list[list[float, float]]
:param control_points: A list of points *(x, y)* that define different regions of the
cross-section. A control point is an arbitrary point within a region enclosed by facets.
:type control_points: list[list[float, float]]
:param shift: Vector that shifts the cross-section by *(x, y)*
:type shift: list[float, float]
:param perimeter: List of facet indices defining the perimeter of the cross-section
:vartype perimeter: list[int]
The following example creates a hollow trapezium with a base width of 100, top width of 50,
height of 50 and a wall thickness of 10. A mesh is generated with a maximum triangular area of
2.0::
import sectionproperties.pre.sections as sections
points = [[0, 0], [100, 0], [75, 50], [25, 50], [15, 10], [85, 10], [70, 40], [30, 40]]
facets = [[0, 1], [1, 2], [2, 3], [3, 0], [4, 5], [5, 6], [6, 7], [7, 4]]
holes = [[50, 25]]
control_points = [[5, 5]]
perimeter = [0, 1, 2, 3]
geometry = sections.CustomSection(
points, facets, holes, control_points, perimeter=perimeter
)
mesh = geometry.create_mesh(mesh_sizes=[2.0])
.. figure:: ../images/sections/custom_geometry.png
:align: center
:scale: 75 %
Custom section geometry.
.. figure:: ../images/sections/custom_mesh.png
:align: center
:scale: 75 %
Mesh generated from the above geometry.
"""
def __init__(self, points, facets, holes, control_points, shift=[0, 0], perimeter=[]):
"""Inits the CustomSection class."""
super().__init__(control_points, shift)
self.points = points
self.facets = facets
self.holes = holes
self.perimeter = perimeter
self.shift_section()
class RectangularSection(Geometry):
"""Constructs a rectangular section with the bottom left corner at the origin *(0, 0)*, with
depth *d* and width *b*.
:param float d: Depth (y) of the rectangle
:param float b: Width (x) of the rectangle
:param shift: Vector that shifts the cross-section by *(x, y)*
:type shift: list[float, float]
The following example creates a rectangular cross-section with a depth of 100 and width of 50,
and generates a mesh with a maximum triangular area of 5::
import sectionproperties.pre.sections as sections
geometry = sections.RectangularSection(d=100, b=50)
mesh = geometry.create_mesh(mesh_sizes=[5])
.. figure:: ../images/sections/rectangle_geometry.png
:align: center
:scale: 75 %
Rectangular section geometry.
.. figure:: ../images/sections/rectangle_mesh.png
:align: center
:scale: 75 %
Mesh generated from the above geometry.
"""
def __init__(self, d, b, shift=[0, 0]):
"""Inits the RectangularSection class."""
# assign control point
control_points = [[0.5 * b, 0.5 * d]]
super().__init__(control_points, shift)
# construct the points and facets
self.points = [[0, 0], [b, 0], [b, d], [0, d]]
self.facets = [[0, 1], [1, 2], [2, 3], [3, 0]]
self.perimeter = list(range(len(self.facets)))
self.shift_section()
class CircularSection(Geometry):
"""Constructs a solid circle centered at the origin *(0, 0)* with diameter *d* and using *n*
points to construct the circle.
:param float d: Diameter of the circle
:param int n: Number of points discretising the circle
:param shift: Vector that shifts the cross-section by *(x, y)*
:type shift: list[float, float]
The following example creates a circular cross-section with a diameter of 50 with 64 points,
and generates a mesh with a maximum triangular area of 2.5::
import sectionproperties.pre.sections as sections
geometry = sections.CircularSection(d=50, n=64)
mesh = geometry.create_mesh(mesh_sizes=[2.5])
.. figure:: ../images/sections/circle_geometry.png
:align: center
:scale: 75 %
Circular section geometry.
.. figure:: ../images/sections/circle_mesh.png
:align: center
:scale: 75 %
Mesh generated from the above geometry.
"""
def __init__(self, d, n, shift=[0, 0]):
"""Inits the CircularSection class."""
# assign control point
control_points = [[0, 0]]
super().__init__(control_points, shift)
# loop through each point on the circle
for i in range(n):
# determine polar angle
theta = i * 2 * np.pi * 1.0 / n
# calculate location of the point
x = 0.5 * d * np.cos(theta)
y = 0.5 * d * np.sin(theta)
# append the current point to the points list
self.points.append([x, y])
# if we are not at the last point
if i != n - 1:
self.facets.append([i, i + 1])
# if we are at the last point, complete the circle
else:
self.facets.append([i, 0])
self.perimeter = list(range(len(self.facets)))
self.shift_section()
class Chs(Geometry):
"""Constructs a circular hollow section centered at the origin *(0, 0)*, with diameter *d* and
thickness *t*, using *n* points to construct the inner and outer circles.
:param float d: Outer diameter of the CHS
:param float t: Thickness of the CHS
:param int n: Number of points discretising the inner and outer circles
:param shift: Vector that shifts the cross-section by *(x, y)*
:type shift: list[float, float]
The following example creates a CHS discretised with 64 points, with a diameter of 48 and
thickness of 3.2, and generates a mesh with a maximum triangular area of 1.0::
import sectionproperties.pre.sections as sections
geometry = sections.Chs(d=48, t=3.2, n=64)
mesh = geometry.create_mesh(mesh_sizes=[1.0])
.. figure:: ../images/sections/chs_geometry.png
:align: center
:scale: 75 %
CHS geometry.
.. figure:: ../images/sections/chs_mesh.png
:align: center
:scale: 75 %
Mesh generated from the above geometry.
"""
def __init__(self, d, t, n, shift=[0, 0]):
"""Inits the Chs class."""
# assign control point
control_points = [[d * 0.5 - t * 0.5, 0]]
super().__init__(control_points, shift)
# specify a hole in the centre of the CHS
self.holes = [[0, 0]]
# loop through each point of the CHS
for i in range(n):
# determine polar angle
theta = i * 2 * np.pi * 1.0 / n
# calculate location of outer and inner points
x_outer = 0.5 * d * np.cos(theta)
y_outer = 0.5 * d * np.sin(theta)
x_inner = (0.5 * d - t) * np.cos(theta)
y_inner = (0.5 * d - t) * np.sin(theta)
# append the current points to the points list
self.points.append([x_outer, y_outer])
self.points.append([x_inner, y_inner])
# if we are not at the last point
if i != n - 1:
self.facets.append([i * 2, i * 2 + 2])
self.facets.append([i * 2 + 1, i * 2 + 3])
# if we are at the last point, complete the circle
else:
self.facets.append([i * 2, 0])
self.facets.append([i * 2 + 1, 1])
self.perimeter = list(range(0, len(self.facets), 2))
self.shift_section()
class EllipticalSection(Geometry):
"""Constructs a solid ellipse centered at the origin *(0, 0)* with vertical diameter *d_y* and
horizontal diameter *d_x*, using *n* points to construct the ellipse.
:param float d_y: Diameter of the ellipse in the y-dimension
:param float d_x: Diameter of the ellipse in the x-dimension
:param int n: Number of points discretising the ellipse
:param shift: Vector that shifts the cross-section by *(x, y)*
:type shift: list[float, float]
The following example creates an elliptical cross-section with a vertical diameter of 25 and
horizontal diameter of 50, with 40 points, and generates a mesh with a maximum triangular area
of 1.0::
import sectionproperties.pre.sections as sections
geometry = sections.EllipticalSection(d_y=25, d_x=50, n=40)
mesh = geometry.create_mesh(mesh_sizes=[1.0])
.. figure:: ../images/sections/ellipse_geometry.png
:align: center
:scale: 75 %
Elliptical section geometry.
.. figure:: ../images/sections/ellipse_mesh.png
:align: center
:scale: 75 %
Mesh generated from the above geometry.
"""
def __init__(self, d_y, d_x, n, shift=[0, 0]):
"""Inits the EllipticalSection class."""
# assign control point centered at zero
control_points = [[0, 0]]
super().__init__(control_points, shift)
# loop through each point on the ellipse
for i in range(n):
# determine polar angle
theta = i * 2 * np.pi * 1.0 / n
# calculate location of the point
x = 0.5 * d_x * np.cos(theta)
y = 0.5 * d_y * np.sin(theta)
# append the current point to the points list
self.points.append([x, y])
# if we are not at the last point
if i != n - 1:
self.facets.append([i, i + 1])
# if we are at the last point, complete the ellipse
else:
self.facets.append([i, 0])
self.perimeter = list(range(len(self.facets)))
self.shift_section()
class Ehs(Geometry):
"""Constructs an elliptical hollow section centered at the origin *(0, 0)*, with outer vertical
diameter *d_y*, outer horizontal diameter *d_x*, and thickness *t*, using *n* points to
construct the inner and outer ellipses.
:param float d_y: Diameter of the ellipse in the y-dimension
:param float d_x: Diameter of the ellipse in the x-dimension
:param float t: Thickness of the EHS
:param int n: Number of points discretising the inner and outer ellipses
:param shift: Vector that shifts the cross-section by *(x, y)*
:type shift: list[float, float]
The following example creates a EHS discretised with 30 points, with a outer vertical diameter
of 25, outer horizontal diameter of 50, and thickness of 2.0, and generates a mesh with a
maximum triangular area of 0.5::
import sectionproperties.pre.sections as sections
geometry = sections.Ehs(d_y=25, d_x=50, t=2.0, n=64)
mesh = geometry.create_mesh(mesh_sizes=[0.5])
.. figure:: ../images/sections/ehs_geometry.png
:align: center
:scale: 75 %
EHS geometry.
.. figure:: ../images/sections/ehs_mesh.png
:align: center
:scale: 75 %
Mesh generated from the above geometry.
"""
def __init__(self, d_y, d_x, t, n, shift=[0, 0]):
"""Inits the Ehs class."""
# assign control point
control_points = [[(d_x * 0.5) - (t * 0.5), 0]]
super().__init__(control_points, shift)
# specify a hole in the centre of the EHS
self.holes = [[0, 0]]
# loop through each point of the EHS
for i in range(n):
# determine polar angle
theta = i * 2 * np.pi * 1.0 / n
# calculate location of outer and inner points
x_outer = 0.5 * d_x * np.cos(theta)
y_outer = 0.5 * d_y * np.sin(theta)
x_inner = ((0.5 * d_x) - t) * np.cos(theta)
y_inner = ((0.5 * d_y) - t) * np.sin(theta)
# append the current points to the points list
self.points.append([x_outer, y_outer])
self.points.append([x_inner, y_inner])
# if we are not at the last point
if i != n - 1:
self.facets.append([i * 2, i * 2 + 2])
self.facets.append([i * 2 + 1, i * 2 + 3])
# if we are at the last point, complete the circle
else:
self.facets.append([i * 2, 0])
self.facets.append([i * 2 + 1, 1])
self.perimeter = list(range(0, len(self.facets), 2))
self.shift_section()
class Rhs(Geometry):
"""Constructs a rectangular hollow section centered at *(b/2, d/2)*, with depth *d*, width *b*,
thickness *t* and outer radius *r_out*, using *n_r* points to construct the inner and outer
radii. If the outer radius is less than the thickness of the RHS, the inner radius is set to
zero.
:param float d: Depth of the RHS
:param float b: Width of the RHS
:param float t: Thickness of the RHS
:param float r_out: Outer radius of the RHS
:param int n_r: Number of points discretising the inner and outer radii
:param shift: Vector that shifts the cross-section by *(x, y)*
:type shift: list[float, float]
The following example creates an RHS with a depth of 100, a width of 50, a thickness of 6 and
an outer radius of 9, using 8 points to discretise the inner and outer radii. A mesh is
generated with a maximum triangular area of 2.0::
import sectionproperties.pre.sections as sections
geometry = sections.Rhs(d=100, b=50, t=6, r_out=9, n_r=8)
mesh = geometry.create_mesh(mesh_sizes=[2.0])
.. figure:: ../images/sections/rhs_geometry.png
:align: center
:scale: 75 %
RHS geometry.
.. figure:: ../images/sections/rhs_mesh.png
:align: center
:scale: 75 %
Mesh generated from the above geometry.
"""
def __init__(self, d, b, t, r_out, n_r, shift=[0, 0]):
"""Inits the Rhs class."""
# assign control point
control_points = [[b - t * 0.5, d * 0.5]]
super().__init__(control_points, shift)
# specify a hole in the centre of the RHS
self.holes = [[b * 0.5, d * 0.5]]
# calculate internal radius
r_in = max(r_out - t, 0)
# construct the outer radius points
self.draw_radius([r_out, r_out], r_out, np.pi, n_r)
self.draw_radius([b - r_out, r_out], r_out, 1.5 * np.pi, n_r)
self.draw_radius([b - r_out, d - r_out], r_out, 0, n_r)
self.draw_radius([r_out, d - r_out], r_out, 0.5 * np.pi, n_r)
# construct the outer radius facet list
n_outer = len(self.points)
for i in range(n_outer):
# if we are not at the last point
if i != n_outer - 1:
self.facets.append([i, i + 1])
# if we are at the last point, complete the loop
else:
self.facets.append([i, 0])
# construct the inner radius points
self.draw_radius([t + r_in, t + r_in], r_in, np.pi, n_r)
self.draw_radius([b - t - r_in, t + r_in], r_in, 1.5 * np.pi, n_r)
self.draw_radius([b - t - r_in, d - t - r_in], r_in, 0, n_r)
self.draw_radius([t + r_in, d - t - r_in], r_in, 0.5 * np.pi, n_r)
# construct the inner radius facet list
n_inner = len(self.points) - n_outer
for i in range(n_inner):
# if we are not at the last point
if i != n_inner - 1:
self.facets.append([i + n_outer, i + n_outer + 1])
# if we are at the last point, complete the loop
else:
self.facets.append([i + n_outer, n_outer])
self.perimeter = list(range(int(len(self.facets) / 2)))
self.shift_section()
class ISection(Geometry):
"""Constructs an I-section centered at *(b/2, d/2)*, with depth *d*, width *b*, flange
thickness *t_f*, web thickness *t_w*, and root radius *r*, using *n_r* points to construct the
root radius.
:param float d: Depth of the I-section
:param float b: Width of the I-section
:param float t_f: Flange thickness of the I-section
:param float t_w: Web thickness of the I-section
:param float r: Root radius of the I-section
:param int n_r: Number of points discretising the root radius
:param shift: Vector that shifts the cross-section by *(x, y)*
:type shift: list[float, float]
The following example creates an I-section with a depth of 203, a width of 133, a flange
thickness of 7.8, a web thickness of 5.8 and a root radius of 8.9, using 16 points to
discretise the root radius. A mesh is generated with a maximum triangular area of 3.0::
import sectionproperties.pre.sections as sections
geometry = sections.ISection(d=203, b=133, t_f=7.8, t_w=5.8, r=8.9, n_r=16)
mesh = geometry.create_mesh(mesh_sizes=[3.0])
.. figure:: ../images/sections/isection_geometry.png
:align: center
:scale: 75 %
I-section geometry.
.. figure:: ../images/sections/isection_mesh.png
:align: center
:scale: 75 %
Mesh generated from the above geometry.
"""
def __init__(self, d, b, t_f, t_w, r, n_r, shift=[0, 0]):
"""Inits the ISection class."""
# assign control point
control_points = [[b * 0.5, d * 0.5]]
super().__init__(control_points, shift)
# add first three points
self.points.append([0, 0])
self.points.append([b, 0])
self.points.append([b, t_f])
# construct the bottom right radius
pt = [b * 0.5 + t_w * 0.5 + r, t_f + r]
self.draw_radius(pt, r, 1.5 * np.pi, n_r, False)
# construct the top right radius
pt = [b * 0.5 + t_w * 0.5 + r, d - t_f - r]
self.draw_radius(pt, r, np.pi, n_r, False)
# add the next four points
self.points.append([b, d - t_f])
self.points.append([b, d])
self.points.append([0, d])
self.points.append([0, d - t_f])
# construct the top left radius
pt = [b * 0.5 - t_w * 0.5 - r, d - t_f - r]
self.draw_radius(pt, r, 0.5 * np.pi, n_r, False)
# construct the bottom left radius
pt = [b * 0.5 - t_w * 0.5 - r, t_f + r]
self.draw_radius(pt, r, 0, n_r, False)
# add the last point
self.points.append([0, t_f])
# build the facet list
for i in range(len(self.points)):
# if we are not at the last point
if i != len(self.points) - 1:
self.facets.append([i, i + 1])
# if we are at the last point, complete the loop
else:
self.facets.append([len(self.points) - 1, 0])
self.perimeter = list(range(len(self.facets)))
self.shift_section()
class MonoISection(Geometry):
"""Constructs a monosymmetric I-section centered at *(max(b_t, b_b)/2, d/2)*, with depth *d*,
top flange width *b_t*, bottom flange width *b_b*, top flange thickness *t_ft*, top flange
thickness *t_fb*, web thickness *t_w*, and root radius *r*, using *n_r* points to construct the
root radius.
:param float d: Depth of the I-section
:param float b_t: Top flange width
:param float b_b: Bottom flange width
:param float t_ft: Top flange thickness of the I-section
:param float t_fb: Bottom flange thickness of the I-section
:param float t_w: Web thickness of the I-section
:param float r: Root radius of the I-section
:param int n_r: Number of points discretising the root radius
:param shift: Vector that shifts the cross-section by *(x, y)*
:type shift: list[float, float]
The following example creates a monosymmetric I-section with a depth of 200, a top flange width
of 50, a top flange thickness of 12, a bottom flange width of 130, a bottom flange thickness of
8, a web thickness of 6 and a root radius of 8, using 16 points to discretise the root radius.
A mesh is generated with a maximum triangular area of 3.0::
import sectionproperties.pre.sections as sections
geometry = sections.MonoISection(
d=200, b_t=50, b_b=130, t_ft=12, t_fb=8, t_w=6, r=8, n_r=16
)
mesh = geometry.create_mesh(mesh_sizes=[3.0])
.. figure:: ../images/sections/monoisection_geometry.png
:align: center
:scale: 75 %
I-section geometry.
.. figure:: ../images/sections/monoisection_mesh.png
:align: center
:scale: 75 %
Mesh generated from the above geometry.
"""
def __init__(self, d, b_t, b_b, t_fb, t_ft, t_w, r, n_r, shift=[0, 0]):
"""Inits the ISection class."""
# assign control point
control_points = [[max(b_t, b_b) * 0.5, d * 0.5]]
super().__init__(control_points, shift)
# calculate central axis
x_central = max(b_t, b_b) * 0.5
# add first three points
self.points.append([x_central - b_b * 0.5, 0])
self.points.append([x_central + b_b * 0.5, 0])
self.points.append([x_central + b_b * 0.5, t_fb])
# construct the bottom right radius
pt = [x_central + t_w * 0.5 + r, t_fb + r]
self.draw_radius(pt, r, 1.5 * np.pi, n_r, False)
# construct the top right radius
pt = [x_central + t_w * 0.5 + r, d - t_ft - r]
self.draw_radius(pt, r, np.pi, n_r, False)
# add the next four points
self.points.append([x_central + b_t * 0.5, d - t_ft])
self.points.append([x_central + b_t * 0.5, d])
self.points.append([x_central - b_t * 0.5, d])
self.points.append([x_central - b_t * 0.5, d - t_ft])
# construct the top left radius
pt = [x_central - t_w * 0.5 - r, d - t_ft - r]
self.draw_radius(pt, r, 0.5 * np.pi, n_r, False)
# construct the bottom left radius
pt = [x_central - t_w * 0.5 - r, t_fb + r]
self.draw_radius(pt, r, 0, n_r, False)
# add the last point
self.points.append([x_central - b_b * 0.5, t_fb])
# build the facet list
for i in range(len(self.points)):
# if we are not at the last point
if i != len(self.points) - 1:
self.facets.append([i, i + 1])
# if we are at the last point, complete the loop
else:
self.facets.append([len(self.points) - 1, 0])
self.perimeter = list(range(len(self.facets)))
self.shift_section()
class TaperedFlangeISection(Geometry):
"""Constructs a Tapered Flange I-section centered at *(b/2, d/2)*, with depth *d*, width *b*,
mid-flange thickness *t_f*, web thickness *t_w*, root radius *r_r*, flange radius *r_f* and
flange angle *alpha*, using *n_r* points to construct the radii.
:param float d: Depth of the Tapered Flange I-section
:param float b: Width of the Tapered Flange I-section
:param float t_f: Mid-flange thickness of the Tapered Flange I-section (measured at the point
equidistant from the face of the web to the edge of the flange)
:param float t_w: Web thickness of the Tapered Flange I-section
:param float r_r: Root radius of the Tapered Flange I-section
:param float r_f: Flange radius of the Tapered Flange I-section
:param float alpha: Flange angle of the Tapered Flange I-section (degrees)
:param int n_r: Number of points discretising the radii
:param shift: Vector that shifts the cross-section by *(x, y)*
:type shift: list[float, float]
The following example creates a Tapered Flange I-section with a depth of 588, a width of 191, a
mid-flange thickness of 27.2, a web thickness of 15.2, a root radius of 17.8, a flange radius
of 8.9 and a flange angle of 8°, using 16 points to discretise the radii. A mesh is generated
with a maximum triangular area of 20.0::
import sectionproperties.pre.sections as sections
geometry = sections.TaperedFlangeISection(
d=588, b=191, t_f=27.2, t_w=15.2, r_r=17.8, r_f=8.9, alpha=8, n_r=16
)
mesh = geometry.create_mesh(mesh_sizes=[20.0])
.. figure:: ../images/sections/taperedisection_geometry.png
:align: center
:scale: 75 %
I-section geometry.
.. figure:: ../images/sections/taperedisection_mesh.png
:align: center
:scale: 75 %
Mesh generated from the above geometry.
"""
def __init__(self, d, b, t_f, t_w, r_r, r_f, alpha, n_r, shift=[0, 0]):
"""Inits the ISection class."""
# assign control point
control_points = [[b * 0.5, d * 0.5]]
super().__init__(control_points, shift)
# calculate alpha in radians
alpha_rad = np.pi * alpha / 180
# calculate the height of the flange toe and dimensions of the straight
x1 = b * 0.25 - t_w * 0.25 - r_f * (1 - np.sin(alpha_rad))
y1 = x1 * np.tan(alpha_rad)
x2 = b * 0.25 - t_w * 0.25 - r_r * (1 - np.sin(alpha_rad))
y2 = x2 * np.tan(alpha_rad)
y_t = t_f - y1 - r_f * np.cos(alpha_rad)
# add first two points
self.points.append([0, 0])
self.points.append([b, 0])
# construct the bottom right flange toe radius
if r_f == 0:
self.points.append([b, y_t])
else:
for i in range(n_r):
# determine polar angle
theta = i * 1.0 / max(1, n_r - 1) * (np.pi * 0.5 - alpha_rad)
# calculate the locations of the radius points
x = b - r_f + r_f * np.cos(theta)
y = y_t + r_f * np.sin(theta)
# append the current points to the points list
self.points.append([x, y])
# construct the bottom right root radius
if r_r == 0:
self.points.append([b * 0.5 + t_w * 0.5, t_f + y2])
else:
for i in range(n_r):
# determine polar angle
theta = (
3.0 / 2 * np.pi - alpha_rad) - (i * 1.0 / max(1, n_r - 1) * (
np.pi * 0.5 - alpha_rad)
)
# calculate the locations of the radius points
x = b * 0.5 + t_w * 0.5 + r_r + r_r * np.cos(theta)
y = t_f + y2 + r_r * np.cos(alpha_rad) + r_r * np.sin(theta)
# append the current points to the points list
self.points.append([x, y])
# construct the top right root radius
if r_r == 0:
self.points.append([b * 0.5 + t_w * 0.5, d - t_f - y2])
else:
for i in range(n_r):
# determine polar angle
theta = np.pi - i * 1.0 / max(1, n_r - 1) * (np.pi * 0.5 - alpha_rad)
# calculate the locations of the radius points
x = b * 0.5 + t_w * 0.5 + r_r + r_r * np.cos(theta)
y = d - t_f - y2 - r_r * np.cos(alpha_rad) + r_r * np.sin(theta)
# append the current points to the points list
self.points.append([x, y])
# construct the top right flange toe radius
if r_f == 0:
self.points.append([b, d - y_t])
else:
for i in range(n_r):
# determine polar angle
theta = (
3.0 * np.pi / 2 + alpha_rad) + i * 1.0 / max(1, n_r - 1) * (
np.pi * 0.5 - alpha_rad
)
# calculate the locations of the radius points
x = b - r_f + r_f * np.cos(theta)
y = d - y_t + r_f * np.sin(theta)
# append the current points to the points list
self.points.append([x, y])
# add the next two points
self.points.append([b, d])
self.points.append([0, d])
# construct the top left flange toe radius
if r_f == 0:
self.points.append([0, d - y_t])
else:
for i in range(n_r):
# determine polar angle
theta = np.pi + (i * 1.0 / max(1, n_r - 1) * (np.pi * 0.5 - alpha_rad))
# calculate the locations of the radius points
x = r_f + r_f * np.cos(theta)
y = d - y_t + r_f * np.sin(theta)
# append the current points to the points list
self.points.append([x, y])
# construct the top left root radius
if r_r == 0:
self.points.append([b * 0.5 - t_w * 0.5, d - t_f - y2])
else:
for i in range(n_r):
# determine polar angle
theta = (
np.pi * 0.5 - alpha_rad) - (i * 1.0 / max(1, n_r - 1) * (
np.pi * 0.5 - alpha_rad)
)
# calculate the locations of the radius points
x = b * 0.5 - t_w * 0.5 - r_r + r_r * np.cos(theta)
y = d - t_f - y2 - r_r * np.cos(alpha_rad) + r_r * np.sin(theta)
# append the current points to the points list
self.points.append([x, y])
# construct the bottom left root radius
if r_r == 0:
self.points.append([b * 0.5 - t_w * 0.5, t_f + y2])
else:
for i in range(n_r):
# determine polar angle
theta = -i * 1.0 / max(1, n_r - 1) * (np.pi * 0.5 - alpha_rad)
# calculate the locations of the radius points
x = b * 0.5 - t_w * 0.5 - r_r + r_r * np.cos(theta)
y = t_f + y2 + r_r * np.cos(alpha_rad) + r_r * np.sin(theta)
# append the current points to the points list
self.points.append([x, y])
# construct the bottom left flange toe radius
if r_f == 0:
self.points.append([0, y_t])
else:
for i in range(n_r):
# determine polar angle
theta = (
np.pi * 0.5 + alpha_rad) + (i * 1.0 / max(1, n_r - 1) * (
np.pi * 0.5 - alpha_rad)
)
# calculate the locations of the radius points
x = r_f + r_f * np.cos(theta)
y = y_t + r_f * np.sin(theta)
# append the current points to the points list
self.points.append([x, y])
# build the facet list
for i in range(len(self.points)):
# if we are not at the last point
if i != len(self.points) - 1:
self.facets.append([i, i + 1])
# if we are at the last point, complete the loop
else:
self.facets.append([len(self.points) - 1, 0])
self.perimeter = list(range(len(self.facets)))
self.shift_section()
class PfcSection(Geometry):
"""Constructs a PFC section with the bottom left corner at the origin *(0, 0)*, with depth *d*,
width *b*, flange thickness *t_f*, web thickness *t_w* and root radius *r*, using *n_r* points
to construct the root radius.
:param float d: Depth of the PFC section
:param float b: Width of the PFC section
:param float t_f: Flange thickness of the PFC section
:param float t_w: Web thickness of the PFC section
:param float r: Root radius of the PFC section
:param int n_r: Number of points discretising the root radius
:param shift: Vector that shifts the cross-section by *(x, y)*
:type shift: list[float, float]
The following example creates a PFC section with a depth of 250, a width of 90, a flange
thickness of 15, a web thickness of 8 and a root radius of 12, using 8 points to discretise the
root radius. A mesh is generated with a maximum triangular area of 5.0::
import sectionproperties.pre.sections as sections
geometry = sections.PfcSection(d=250, b=90, t_f=15, t_w=8, r=12, n_r=8)
mesh = geometry.create_mesh(mesh_sizes=[5.0])
.. figure:: ../images/sections/pfc_geometry.png
:align: center
:scale: 75 %
PFC geometry.
.. figure:: ../images/sections/pfc_mesh.png
:align: center
:scale: 75 %
Mesh generated from the above geometry.
"""
def __init__(self, d, b, t_f, t_w, r, n_r, shift=[0, 0]):
"""Inits the PfcSection class."""
# assign control point
control_points = [[t_w * 0.5, d * 0.5]]
super().__init__(control_points, shift)
# add first three points
self.points.append([0, 0])
self.points.append([b, 0])
self.points.append([b, t_f])
# construct the bottom right radius
pt = [t_w + r, t_f + r]
self.draw_radius(pt, r, 1.5 * np.pi, n_r, False)
# construct the top right radius
pt = [t_w + r, d - t_f - r]
self.draw_radius(pt, r, np.pi, n_r, False)
# add last three points
self.points.append([b, d - t_f])
self.points.append([b, d])
self.points.append([0, d])
# build the facet list
for i in range(len(self.points)):
# if we are not at the last point
if i != len(self.points) - 1:
self.facets.append([i, i + 1])
# if we are at the last point, complete the loop
else:
self.facets.append([len(self.points) - 1, 0])
self.perimeter = list(range(len(self.facets)))
self.shift_section()
class TaperedFlangeChannel(Geometry):
"""Constructs a Tapered Flange Channel section with the bottom left corner at the origin
*(0, 0)*, with depth *d*, width *b*, mid-flange thickness *t_f*, web thickness *t_w*, root
radius *r_r*, flange radius *r_f* and flange angle *alpha*, using *n_r* points to construct the
radii.
:param float d: Depth of the Tapered Flange Channel section
:param float b: Width of the Tapered Flange Channel section
:param float t_f: Mid-flange thickness of the Tapered Flange Channel section (measured at the
point equidistant from the face of the web to the edge of the flange)
:param float t_w: Web thickness of the Tapered Flange Channel section
:param float r_r: Root radius of the Tapered Flange Channel section
:param float r_f: Flange radius of the Tapered Flange Channel section
:param float alpha: Flange angle of the Tapered Flange Channel section (degrees)
:param int n_r: Number of points discretising the radii
:param shift: Vector that shifts the cross-section by *(x, y)*
:type shift: list[float, float]
The following example creates a Tapered Flange Channel section with a depth of 10, a width of
3.5, a mid-flange thickness of 0.575, a web thickness of 0.475, a root radius of 0.575, a
flange radius of 0.4 and a flange angle of 8°, using 16 points to discretise the radii. A mesh
is generated with a maximum triangular area of 0.02::
import sectionproperties.pre.sections as sections
geometry = sections.TaperedFlangeChannel(
d=10, b=3.5, t_f=0.575, t_w=0.475, r_r=0.575, r_f=0.4, alpha=8, n_r=16
)
mesh = geometry.create_mesh(mesh_sizes=[0.02])
.. figure:: ../images/sections/taperedchannel_geometry.png
:align: center
:scale: 75 %
I-section geometry.
.. figure:: ../images/sections/taperedchannel_mesh.png
:align: center
:scale: 75 %
Mesh generated from the above geometry.
"""
def __init__(self, d, b, t_f, t_w, r_r, r_f, alpha, n_r, shift=[0, 0]):
"""Inits the ISection class."""
# assign control point
control_points = [[t_w * 0.5, d * 0.5]]
super().__init__(control_points, shift)
# calculate alpha in radians
alpha_rad = np.pi * alpha / 180
# calculate the height of the flange toe and dimensions of the straight
x1 = b * 0.5 - t_w * 0.5 - r_f * (1 - np.sin(alpha_rad))
y1 = x1 * np.tan(alpha_rad)
x2 = b * 0.5 - t_w * 0.5 - r_r * (1 - np.sin(alpha_rad))
y2 = x2 * np.tan(alpha_rad)
y_t = t_f - y1 - r_f * np.cos(alpha_rad)
# add first two points
self.points.append([0, 0])
self.points.append([b, 0])
# construct the bottom right flange toe radius
if r_f == 0:
self.points.append([b, y_t])
else:
for i in range(n_r):
# determine polar angle
theta = i * 1.0 / max(1, n_r - 1) * (np.pi * 0.5 - alpha_rad)
# calculate the locations of the radius points
x = b - r_f + r_f * np.cos(theta)
y = y_t + r_f * np.sin(theta)
# append the current points to the points list
self.points.append([x, y])
# construct the bottom right root radius
if r_r == 0:
self.points.append([t_w, t_f + y2])
else:
for i in range(n_r):
# determine polar angle
theta = (
3.0 / 2 * np.pi - alpha_rad) - (i * 1.0 / max(1, n_r - 1) * (
np.pi * 0.5 - alpha_rad)
)
# calculate the locations of the radius points
x = t_w + r_r + r_r * np.cos(theta)
y = t_f + y2 + r_r * np.cos(alpha_rad) + r_r * np.sin(theta)
# append the current points to the points list
self.points.append([x, y])
# construct the top right root radius
if r_r == 0:
self.points.append([t_w, d - t_f - y2])
else:
for i in range(n_r):
# determine polar angle
theta = np.pi - i * 1.0 / max(1, n_r - 1) * (np.pi * 0.5 - alpha_rad)
# calculate the locations of the radius points
x = t_w + r_r + r_r * np.cos(theta)
y = d - t_f - y2 - r_r * np.cos(alpha_rad) + r_r * np.sin(
theta)
# append the current points to the points list
self.points.append([x, y])
# construct the top right flange toe radius
if r_f == 0:
self.points.append([b, d - y_t])
else:
for i in range(n_r):
# determine polar angle
theta = (
3.0 * np.pi / 2 + alpha_rad) + (i * 1.0 / max(1, n_r - 1) * (
np.pi * 0.5 - alpha_rad)
)
# calculate the locations of the radius points
x = b - r_f + r_f * np.cos(theta)
y = d - y_t + r_f * np.sin(theta)
# append the current points to the points list
self.points.append([x, y])
# add the final two points
self.points.append([b, d])
self.points.append([0, d])
# build the facet list
for i in range(len(self.points)):
# if we are not at the last point
if i != len(self.points) - 1:
self.facets.append([i, i + 1])
# if we are at the last point, complete the loop
else:
self.facets.append([len(self.points) - 1, 0])
self.perimeter = list(range(len(self.facets)))
self.shift_section()
class TeeSection(Geometry):
"""Constructs a Tee section with the top left corner at *(0, d)*, with depth *d*, width *b*,
flange thickness *t_f*, web thickness *t_w* and root radius *r*, using *n_r* points to
construct the root radius.
:param float d: Depth of the Tee section
:param float b: Width of the Tee section
:param float t_f: Flange thickness of the Tee section
:param float t_w: Web thickness of the Tee section
:param float r: Root radius of the Tee section
:param int n_r: Number of points discretising the root radius
:param shift: Vector that shifts the cross-section by *(x, y)*
:type shift: list[float, float]
The following example creates a Tee section with a depth of 200, a width of 100, a flange
thickness of 12, a web thickness of 6 and a root radius of 8, using 8 points to discretise the
root radius. A mesh is generated with a maximum triangular area of 3.0::
import sectionproperties.pre.sections as sections
geometry = sections.TeeSection(d=200, b=100, t_f=12, t_w=6, r=8, n_r=8)
mesh = geometry.create_mesh(mesh_sizes=[3.0])
.. figure:: ../images/sections/tee_geometry.png
:align: center
:scale: 75 %
Tee section geometry.
.. figure:: ../images/sections/tee_mesh.png
:align: center
:scale: 75 %
Mesh generated from the above geometry.
"""
def __init__(self, d, b, t_f, t_w, r, n_r, shift=[0, 0]):
"""Inits the TeeSection class."""
# assign control point
control_points = [[b * 0.5, d - t_f * 0.5]]
super().__init__(control_points, shift)
# add first two points
self.points.append([b * 0.5 - t_w * 0.5, 0])
self.points.append([b * 0.5 + t_w * 0.5, 0])
# construct the top right radius
pt = [b * 0.5 + t_w * 0.5 + r, d - t_f - r]
self.draw_radius(pt, r, np.pi, n_r, False)
# add next four points
self.points.append([b, d - t_f])
self.points.append([b, d])
self.points.append([0, d])
self.points.append([0, d - t_f])
# construct the top left radius
pt = [b * 0.5 - t_w * 0.5 - r, d - t_f - r]
self.draw_radius(pt, r, 0.5 * np.pi, n_r, False)
# build the facet list
for i in range(len(self.points)):
# if we are not at the last point
if i != len(self.points) - 1:
self.facets.append([i, i + 1])
# if we are at the last point, complete the loop
else:
self.facets.append([len(self.points) - 1, 0])
self.perimeter = list(range(len(self.facets)))
self.shift_section()
class AngleSection(Geometry):
"""Constructs an angle section with the bottom left corner at the origin *(0, 0)*, with depth
*d*, width *b*, thickness *t*, root radius *r_r* and toe radius *r_t*, using *n_r* points to
construct the radii.
:param float d: Depth of the angle section
:param float b: Width of the angle section
:param float t: Thickness of the angle section
:param float r_r: Root radius of the angle section
:param float r_t: Toe radius of the angle section
:param int n_r: Number of points discretising the radii
:param shift: Vector that shifts the cross-section by *(x, y)*
:type shift: list[float, float]
The following example creates an angle section with a depth of 150, a width of 100, a thickness
of 8, a root radius of 12 and a toe radius of 5, using 16 points to discretise the radii. A
mesh is generated with a maximum triangular area of 2.0::
import sectionproperties.pre.sections as sections
geometry = sections.AngleSection(d=150, b=100, t=8, r_r=12, r_t=5, n_r=16)
mesh = geometry.create_mesh(mesh_sizes=[2.0])
.. figure:: ../images/sections/angle_geometry.png
:align: center
:scale: 75 %
Angle section geometry.
.. figure:: ../images/sections/angle_mesh.png
:align: center
:scale: 75 %
"""
def __init__(self, d, b, t, r_r, r_t, n_r, shift=[0, 0]):
"""Inits the AngleSection class."""
# assign control point
control_points = [[t * 0.5, t * 0.5]]
super().__init__(control_points, shift)
# add first two points
self.points.append([0, 0])
self.points.append([b, 0])
# construct the bottom toe radius
pt = [b - r_t, t - r_t]
self.draw_radius(pt, r_t, 0, n_r)
# construct the root radius
pt = [t + r_r, t + r_r]
self.draw_radius(pt, r_r, 1.5 * np.pi, n_r, False)
# construct the top toe radius
pt = [t - r_t, d - r_t]
self.draw_radius(pt, r_t, 0, n_r)
# add the next point
self.points.append([0, d])
# build the facet list
for i in range(len(self.points)):
# if we are not at the last point
if i != len(self.points) - 1:
self.facets.append([i, i + 1])
# if we are at the last point, complete the loop
else:
self.facets.append([len(self.points) - 1, 0])
self.perimeter = list(range(len(self.facets)))
self.shift_section()
class CeeSection(Geometry):
"""Constructs a Cee section with the bottom left corner at the origin *(0, 0)*, with depth *d*,
width *b*, lip *l*, thickness *t* and outer radius *r_out*, using *n_r* points to construct the
radius. If the outer radius is less than the thickness of the Cee Section, the inner radius is
set to zero.
:param float d: Depth of the Cee section
:param float b: Width of the Cee section
:param float l: Lip of the Cee section
:param float t: Thickness of the Cee section
:param float r_out: Outer radius of the Cee section
:param int n_r: Number of points discretising the outer radius
:param shift: Vector that shifts the cross-section by *(x, y)*
:type shift: list[float, float]
:raises Exception: Lip length must be greater than the outer radius
The following example creates a Cee section with a depth of 125, a width of 50, a lip of 30, a
thickness of 1.5 and an outer radius of 6, using 8 points to discretise the radius. A mesh is
generated with a maximum triangular area of 0.25::
import sectionproperties.pre.sections as sections
geometry = sections.CeeSection(d=125, b=50, l=30, t=1.5, r_out=6, n_r=8)
mesh = geometry.create_mesh(mesh_sizes=[0.25])
.. figure:: ../images/sections/cee_geometry.png
:align: center
:scale: 75 %
Cee section geometry.
.. figure:: ../images/sections/cee_mesh.png
:align: center
:scale: 75 %
"""
def __init__(self, d, b, l, t, r_out, n_r, shift=[0, 0]):
"""Inits the CeeSection class."""
# ensure the lip length is greater than the outer radius
if l < r_out:
raise Exception('Lip length must be greater than the outer radius')
# assign control point
control_points = [[t * 0.5, d * 0.5]]
super().__init__(control_points, shift)
# calculate internal radius
r_in = max(r_out - t, 0)
# construct the outer bottom left radius
self.draw_radius([r_out, r_out], r_out, np.pi, n_r)
# construct the outer bottom right radius
self.draw_radius([b - r_out, r_out], r_out, 1.5 * np.pi, n_r)
if r_out != l:
# add next two points
self.points.append([b, l])
self.points.append([b - t, l])
# construct the inner bottom right radius
self.draw_radius([b - t - r_in, t + r_in], r_in, 0, n_r, False)
# construct the inner bottom left radius
self.draw_radius([t + r_in, t + r_in], r_in, 1.5 * np.pi, n_r, False)
# construct the inner top left radius
self.draw_radius([t + r_in, d - t - r_in], r_in, np.pi, n_r, False)
# construct the inner top right radius
self.draw_radius(
[b - t - r_in, d - t - r_in], r_in, 0.5 * np.pi, n_r, False)
if r_out != l:
# add next two points
self.points.append([b - t, d - l])
self.points.append([b, d - l])
# construct the outer top right radius
self.draw_radius([b - r_out, d - r_out], r_out, 0, n_r)
# construct the outer top left radius
self.draw_radius([r_out, d - r_out], r_out, 0.5 * np.pi, n_r)
# build the facet list
for i in range(len(self.points)):
# if we are not at the last point
if i != len(self.points) - 1:
self.facets.append([i, i + 1])
# if we are at the last point, complete the loop
else:
self.facets.append([len(self.points) - 1, 0])
self.perimeter = list(range(len(self.facets)))
self.shift_section()
class ZedSection(Geometry):
"""Constructs a Zed section with the bottom left corner at the origin *(0, 0)*, with depth *d*,
left flange width *b_l*, right flange width *b_r*, lip *l*, thickness *t* and outer radius
*r_out*, using *n_r* points to construct the radius. If the outer radius is less than the
thickness of the Zed Section, the inner radius is set to zero.
:param float d: Depth of the Zed section
:param float b_l: Left flange width of the Zed section
:param float b_r: Right flange width of the Zed section
:param float l: Lip of the Zed section
:param float t: Thickness of the Zed section
:param float r_out: Outer radius of the Zed section
:param int n_r: Number of points discretising the outer radius
:param shift: Vector that shifts the cross-section by *(x, y)*
:type shift: list[float, float]
:raises Exception: Lip length must be greater than the outer radius
The following example creates a Zed section with a depth of 100, a left flange width of 40, a
right flange width of 50, a lip of 20, a thickness of 1.2 and an outer radius of 5, using 8
points to discretise the radius. A mesh is generated with a maximum triangular area of 0.15::
import sectionproperties.pre.sections as sections
geometry = sections.ZedSection(d=100, b_l=40, b_r=50, l=20, t=1.2, r_out=5, n_r=8)
mesh = geometry.create_mesh(mesh_sizes=[0.15])
.. figure:: ../images/sections/zed_geometry.png
:align: center
:scale: 75 %
Zed section geometry.
.. figure:: ../images/sections/zed_mesh.png
:align: center
:scale: 75 %
"""
def __init__(self, d, b_l, b_r, l, t, r_out, n_r, shift=[0, 0]):
"""Inits the ZedSection class."""
# ensure the lip length is greater than the outer radius
if l < r_out:
raise Exception('Lip length must be greater than the outer radius')
# assign control point
control_points = [[t * 0.5, d * 0.5]]
super().__init__(control_points, shift)
# calculate internal radius
r_in = max(r_out - t, 0)
# construct the outer bottom left radius
self.draw_radius([r_out, r_out], r_out, np.pi, n_r)
# construct the outer bottom right radius
self.draw_radius([b_r - r_out, r_out], r_out, 1.5 * np.pi, n_r)
if r_out != l:
# add next two points
self.points.append([b_r, l])
self.points.append([b_r - t, l])
# construct the inner bottom right radius
self.draw_radius([b_r - t - r_in, t + r_in], r_in, 0, n_r, False)
# construct the inner bottom left radius
self.draw_radius([t + r_in, t + r_in], r_in, 1.5 * np.pi, n_r, False)
# construct the outer top right radius
self.draw_radius([t - r_out, d - r_out], r_out, 0, n_r)
# construct the outer top left radius
self.draw_radius([t - b_l + r_out, d - r_out], r_out, 0.5 * np.pi, n_r)
if r_out != l:
# add the next two points
self.points.append([t - b_l, d - l])
self.points.append([t - b_l + t, d - l])
# construct the inner top left radius
self.draw_radius([2 * t - b_l + r_in, d - t - r_in], r_in, np.pi, n_r, False)
# construct the inner top right radius
self.draw_radius([-r_in, d - t - r_in], r_in, 0.5 * np.pi, n_r, False)
# build the facet list
for i in range(len(self.points)):
# if we are not at the last point
if i != len(self.points) - 1:
self.facets.append([i, i + 1])
# if we are at the last point, complete the loop
else:
self.facets.append([len(self.points) - 1, 0])
self.perimeter = list(range(len(self.facets)))
self.shift_section()
class CruciformSection(Geometry):
"""Constructs a cruciform section centered at the origin *(0, 0)*, with depth *d*, width *b*,
thickness *t* and root radius *r*, using *n_r* points to construct the root radius.
:param float d: Depth of the cruciform section
:param float b: Width of the cruciform section
:param float t: Thickness of the cruciform section
:param float r: Root radius of the cruciform section
:param int n_r: Number of points discretising the root radius
:param shift: Vector that shifts the cross-section by *(x, y)*
:type shift: list[float, float]
The following example creates a cruciform section with a depth of 250, a width of 175, a
thickness of 12 and a root radius of 16, using 16 points to discretise the radius. A mesh is
generated with a maximum triangular area of 5.0::
import sectionproperties.pre.sections as sections
geometry = sections.CruciformSection(d=250, b=175, t=12, r=16, n_r=16)
mesh = geometry.create_mesh(mesh_sizes=[5.0])
.. figure:: ../images/sections/cruciform_geometry.png
:align: center
:scale: 75 %
Cruciform section geometry.
.. figure:: ../images/sections/cruciform_mesh.png
:align: center
:scale: 75 %
"""
def __init__(self, d, b, t, r, n_r, shift=[0, 0]):
"""Inits the CruciformSection class."""
# assign control point
control_points = [[0, 0]]
super().__init__(control_points, shift)
# add first two points
self.points.append([-t * 0.5, -d * 0.5])
self.points.append([t * 0.5, -d * 0.5])
# construct the bottom right radius
pt = [0.5 * t + r, -0.5 * t - r]
self.draw_radius(pt, r, np.pi, n_r, False)
# add the next two points
self.points.append([0.5 * b, -t * 0.5])
self.points.append([0.5 * b, t * 0.5])
# construct the top right radius
pt = [0.5 * t + r, 0.5 * t + r]
self.draw_radius(pt, r, 1.5 * np.pi, n_r, False)
# add the next two points
self.points.append([t * 0.5, 0.5 * d])
self.points.append([-t * 0.5, 0.5 * d])
# construct the top left radius
pt = [-0.5 * t - r, 0.5 * t + r]
self.draw_radius(pt, r, 0, n_r, False)
# add the next two points
self.points.append([-0.5 * b, t * 0.5])
self.points.append([-0.5 * b, -t * 0.5])
# construct the bottom left radius
pt = [-0.5 * t - r, -0.5 * t - r]
self.draw_radius(pt, r, 0.5 * np.pi, n_r, False)
# build the facet list
for i in range(len(self.points)):
# if we are not at the last point
if i != len(self.points) - 1:
self.facets.append([i, i + 1])
# if we are at the last point, complete the loop
else:
self.facets.append([len(self.points) - 1, 0])
self.perimeter = list(range(len(self.facets)))
self.shift_section()
class PolygonSection(Geometry):
"""Constructs a regular hollow polygon section centered at *(0, 0)*, with a pitch circle
diameter of bounding polygon *d*, thickness *t*, number of sides *n_sides* and an optional
inner radius *r_in*, using *n_r* points to construct the inner and outer radii (if radii is
specified).
:param float d: Pitch circle diameter of the outer bounding polygon (i.e. diameter of circle
that passes through all vertices of the outer polygon)
:param float t: Thickness of the polygon section wall
:param float r_in: Inner radius of the polygon corners. By default, if not specified, a polygon
with no corner radii is generated.
:param int n_r: Number of points discretising the inner and outer radii, ignored if no inner
radii is specified
:param rot: Initial counterclockwise rotation in degrees. By default bottom face is aligned
with x axis.
:param shift: Vector that shifts the cross-section by *(x, y)*
:type shift: list[float, float]
:raises Exception: Number of sides in polygon must be greater than or equal to 3
The following example creates an Octagonal section (8 sides) with a diameter of 200, a
thickness of 6 and an inner radius of 20, using 12 points to discretise the inner and outer
radii. A mesh is generated with a maximum triangular area of 5::
import sectionproperties.pre.sections as sections
geometry = sections.PolygonSection(d=200, t=6, n_sides=8, r_in=20, n_r=12)
mesh = geometry.create_mesh(mesh_sizes=[5])
.. figure:: ../images/sections/polygon_geometry.png
:align: center
:scale: 75 %
Octagonal section geometry.
.. figure:: ../images/sections/polygon_mesh.png
:align: center
:scale: 75 %
Mesh generated from the above geometry.
"""
def __init__(self, d, t, n_sides, r_in=0, n_r=1, rot=0, shift=[0, 0]):
"""Inits the PolygonSection class."""
if n_sides < 3:
msg = 'n_sides required to be greater than 3 for PolygonSection class'
raise Exception(msg)
# initial rotation
rot = rot * np.pi / 180 # radians
# determine triangular segment angle
alpha = 2 * np.pi / n_sides # radians
# determine distance from origin to point perpendicular on face of side
a_out = d / 2 * np.cos(alpha / 2)
a_in = a_out - t
# determine side length for outer & inner faces neglecting radii
side_length_out = d * np.sin(alpha / 2)
side_length_in = a_in / a_out * side_length_out
# check limit on internal radii, if exceeded then radii merge to circle
if r_in > a_in:
r_in = a_in
circle = True
else:
circle = False
# calculate external radius, if r_in is zero, r_out also is zero
if r_in == 0:
r_out = 0
n_r = 1
else:
r_out = r_in + t
# equivalent side length of half the corner radii triangular segment
c_out = r_out * (side_length_out / 2) / a_out
c_in = r_in * (side_length_in / 2) / a_in
# determine straight side length between corner radii (if present)
side_length_straight_out = side_length_out - (2 * c_out)
side_length_straight_in = side_length_in - (2 * c_in)
# assign control point central on bottom side length & rotate to initial rotation specified
control_points = [self.rotate([0, -a_out + t / 2], rot)]
super().__init__(control_points, shift)
# temp list for repeating geometry
base_points = []
# specify a hole in the centre of the Polygon section
self.holes = [[0, 0]]
# start at bottom face, constructing one corner radii, then rotate by initial rotation +
# alpha and repeat for n_side number of times to form full section perimeter
# construct the first radius (bottom right)
for i in range(n_r):
# determine polar angle
theta = 1 / 2 * np.pi + i * 1.0 / max(1, n_r - 1) * alpha
# calculate location of inner and outer points
x_outer = side_length_straight_out / 2 - r_out * np.cos(theta)
y_outer = -a_out + r_out - r_out * np.sin(theta)
x_inner = side_length_straight_in / 2 - r_in * np.cos(theta)
y_inner = -a_in + r_in - r_in * np.sin(theta)
# append the current temporary points to the temporary points list
base_points.append([x_outer, y_outer])
base_points.append([x_inner, y_inner])
# if radii merged to circle with an outer diameter of a_out then skip last point as causes
# overlapping end points which causes meshing issues if geometry is not cleaned by user
if circle:
base_points = base_points[0:-2]
# iterate and add subsequent corner radii one point at a time for each side
for i in range(n_sides):
for point in base_points:
point_new = self.rotate(point, alpha * i + rot)
self.points.append(point_new)
# build the facet list
num_points = int(len(self.points) / 2)
for i in range(num_points):
# if we are not at the last point
if i != num_points - 1:
self.facets.append([i * 2, i * 2 + 2])
self.facets.append([i * 2 + 1, i * 2 + 3])
# if we are at the last point, complete the loop
else:
self.facets.append([i * 2, 0])
self.facets.append([i * 2 + 1, 1])
self.perimeter = list(range(0, len(self.facets), 2))
self.shift_section()
def rotate(self, point, angle):
"""
Rotate a point counterclockwise by a given angle around origin [0, 0]
:param list point: Point coordinates to be rotated
:param float angle: Angle to rotate point coordinates
:return: Coordinates of rotated point
:rtype: list[float, float]
"""
pt_x, pt_y = point
c = np.cos(angle)
s = np.sin(angle)
new_x = c * pt_x - s * pt_y
new_y = s * pt_x + c * pt_y
return [new_x, new_y]
class BoxGirderSection(Geometry):
"""Constructs a Box Girder section centered at at *(max(b_t, b_b)/2, d/2)*, with depth *d*, top
width *b_t*, bottom width *b_b*, top flange thickness *t_ft*, bottom flange thickness *t_fb*
and web thickness *t_w*.
:param float d: Depth of the Box Girder section
:param float b_t: Top width of the Box Girder section
:param float b_b: Bottom width of the Box Girder section
:param float t_ft: Top lange thickness of the Box Girder section
:param float t_fb: Bottom flange thickness of the Box Girder section
:param float t_w: Web thickness of the Box Girder section
:param shift: Vector that shifts the cross-section by *(x, y)*
:type shift: list[float, float]
The following example creates a Box Gider section with a depth of 1200, a top width of 1200, a
bottom width of 400, a top flange thickness of 16, a bottom flange thickness of 12 and a web
thickness of 8. A mesh is generated with a maximum triangular area of 5.0::
import sectionproperties.pre.sections as sections
geometry = sections.BoxGirderSection(d=1200, b_t=1200, b_b=400, t_ft=100, t_fb=80, t_w=50)
mesh = geometry.create_mesh(mesh_sizes=[200.0])
.. figure:: ../images/sections/box_girder_geometry.png
:align: center
:scale: 75 %
Box Girder geometry.
.. figure:: ../images/sections/box_girder_mesh.png
:align: center
:scale: 75 %
Mesh generated from the above geometry.
"""
def __init__(self, d, b_t, b_b, t_ft, t_fb, t_w, shift=[0, 0]):
"""Inits the BoxGirderSection class."""
# assign control point
control_points = [[max(b_t, b_b) * 0.5, t_fb * 0.5]]
super().__init__(control_points, shift)
# calculate central axis
x_c = max(b_t, b_b) * 0.5
# specify a hole in the centre of the Box Girder
self.holes = [[x_c, d * 0.5]]
# determine side wall angle
if b_t < b_b:
phi_b = np.arctan2(d, 0.5 * (b_b - b_t))
phi_t = np.pi - phi_b
else:
phi_t = np.arctan2(d, 0.5 * (b_t - b_b))
phi_b = np.pi - phi_t
# determine inner wall x-offsets
x_bot = t_fb / np.tan(np.pi - phi_b)
x_top = t_ft / np.tan(np.pi - phi_t)
web_x = abs(t_w / np.sin(np.pi - phi_b))
# add outer points
self.points.append([x_c - 0.5 * b_b, 0])
self.points.append([x_c + 0.5 * b_b, 0])
self.points.append([x_c + 0.5 * b_t, d])
self.points.append([x_c - 0.5 * b_t, d])
# add inner points
self.points.append([x_c - 0.5 * b_b - x_bot + web_x, t_fb])
self.points.append([x_c + 0.5 * b_b + x_bot - web_x, t_fb])
self.points.append([x_c + 0.5 * b_t + x_top - web_x, d - t_ft])
self.points.append([x_c - 0.5 * b_t - x_top + web_x, d - t_ft])
# build facet list
self.facets = [[0, 1], [1, 2], [2, 3], [3, 0], [4, 5], [5, 6], [6, 7], [7, 4]]
self.perimeter = [0, 1, 2, 3]
self.shift_section()
class MergedSection(Geometry):
"""Merges a number of section geometries into one geometry. Note that for the meshing algorithm
to work, there needs to be connectivity between all regions of the provided geometries.
Overlapping of geometries is permitted.
:param sections: A list of geometry objects to merge into one
:class:`~sectionproperties.pre.sections.Geometry` object
:type sections: list[:class:`~sectionproperties.pre.sections.Geometry`]
The following example creates a combined cross-section with a 150x100x6 RHS placed on its side
on top of a 200UB25.4. A mesh is generated with a maximum triangle size of 5.0 for the
I-section and 2.5 for the RHS::
import sectionproperties.pre.sections as sections
isection = sections.ISection(d=203, b=133, t_f=7.8, t_w=5.8, r=8.9, n_r=8)
box = sections.Rhs(d=100, b=150, t=6, r_out=15, n_r=8, shift=[-8.5, 203])
geometry = sections.MergedSection([isection, box])
geometry.clean_geometry()
mesh = geometry.create_mesh(mesh_sizes=[5.0, 2.5])
.. figure:: ../images/sections/merged_geometry.png
:align: center
:scale: 75 %
Merged section geometry.
.. figure:: ../images/sections/merged_mesh.png
:align: center
:scale: 75 %
"""
def __init__(self, sections):
"""Inits the MergedSection class."""
super().__init__([], [0, 0])
point_count = 0
# loop through all sections
for section in sections:
# add facets
for facet in section.facets:
self.facets.append([facet[0] + point_count, facet[1] + point_count])
# add points and count points
for point in section.points:
self.points.append([point[0], point[1]])
point_count += 1
# add holes
for hole in section.holes:
self.holes.append([hole[0], hole[1]])
# add control points
for control_point in section.control_points:
self.control_points.append([control_point[0], control_point[1]])
|
the-stack_0_756 | #!/usr/bin/env python
"""
Fetch descriptions from NCBI given file with gene names.
Intended to use on genes from Gene2Products.need-curating.txt
from funannotate annotate formatted as single column, new line
separated text file.
Outputs 2 column TSV ready for update-gene2products.py
Usage: python grab_gene_descriptions.py <genes.txt> <outfile.txt>
Cam Gilchrist
2018-05-29
"""
import sys
from Bio import Entrez
from collections import Counter
# *Always* tell NCBI who you are
Entrez.email = "[email protected]"
def read_genes(gene_file):
"""Read in list of gene names from \n separated text file and
return list."""
genes = []
with open(gene_file, 'rU') as genefile:
for gene in genefile:
gene = gene.strip()
genes.append(gene)
return(genes)
def retrieve_descriptions(gene, descriptions, empties):
"""Given single gene name, grab possible descriptions from NCBI
and prompt user to select one"""
# Perform ESearch and grab list of IDs
query = gene + '[Gene Name]'
handle = Entrez.esearch(db='gene', term=query,
retmax=100,
retmode='xml')
record = Entrez.read(handle)
handle.close()
idlist = ','.join(record["IdList"])
# Ensure you have results, exit if not
if idlist == '':
print('No records for {}, skipping...\n'.format(gene))
empties.append(gene)
return
# Generate summary from UID list
handle = Entrez.esummary(db='gene', id=idlist)
record = Entrez.read(handle)
handle.close()
# Grab description, counter for unique values
desc_cnt = Counter()
doc_sums = record[u'DocumentSummarySet'][u'DocumentSummary']
for i in range(len(doc_sums)):
if doc_sums[i][u'NomenclatureName'] != '':
desc = doc_sums[i][u'NomenclatureName']
else:
desc = doc_sums[i][u'OtherDesignations'].split('|')[0]
desc_cnt[desc] += 1
# Create list from counter keys for indexing purposes
desc_list = filter(None, desc_cnt)
if len(desc_cnt) > 1:
print('{} has {} unique descriptions from {} results. These are:'.format(
gene, len(desc_list), len(doc_sums)))
ans_range = range(len(desc_list))
for i in ans_range:
print ('{}: {} [{}/{}]'.format(i+1, desc_list[i], desc_cnt[desc_list[i]], len(doc_sums)))
# Take user input to accept/reject a description
while True:
ans = raw_input('Which do you accept? [{}-{}/N]: '.format(
min(ans_range)+1, max(ans_range)+1))
# Check if int or str entered
try:
ans = int(ans)-1
if ans in ans_range:
print('Accepting #{}.\n'.format(ans+1))
descriptions[gene] = desc_list[ans]
break
else:
print('{} is outside acceptable range. Try again.'.format(
ans))
except:
if ans in ['N', 'n', 'no', 'No']:
print('Skipping this gene.\n')
break
else:
print('Invalid input, try again.')
# If there's only one unique description, accept/reject
elif len(desc_cnt) == 1:
desc_list2 = list(desc_cnt)
desc = desc_list2[0]
if desc == '':
print('{} has empty description.'.format(gene))
empties.append(gene)
return
print('{} only has one unique description from {} results.'.format(
gene, len(doc_sums)))
print('This is:\n{}'.format(desc))
while True:
ans = raw_input('Accept? Y/N: ')
if ans in ['Y', 'y', 'yes', 'Yes']:
print('Description accepted.\n')
descriptions[gene] = desc
break
elif ans in ['N', 'n', 'no', 'No']:
print('Skipping this gene.\n')
empties.append(gene)
break
else:
print('Invalid input, try again.')
return(descriptions)
def print_descriptions(descriptions, outfile):
"""Print descriptions as 2 column TSV for update-gene2products.py"""
with open(outfile, 'w') as out:
out.write('Empty descriptions:\n')
for gene in empties:
out.write('{}\n'.format(gene))
out.write('\nNon-empty descriptions:\n')
for gene in descriptions:
out.write('{}\t{}\n'.format(gene, descriptions[gene]))
# Read in genes from file and summarize
genes = read_genes(sys.argv[1])
print('There are {} genes in {}. These are:\n{}\n'.format(
len(genes), sys.argv[1], ', '.join(genes))
)
# Fetch descriptions
empties = []
descriptions = {}
for gene in genes:
retrieve_descriptions(gene, descriptions, empties)
# Write to output file given in second argument
print_descriptions(descriptions, sys.argv[2])
print('All done. Remember to check {} to correct errors or make adjustments!'.format(sys.argv[2]))
|
the-stack_0_757 | from pyspark import SparkContext, SparkConf
if __name__ == "__main__":
conf = SparkConf().setAppName("word count").setMaster("local[3]")
# Spark Context
sc = SparkContext(conf=conf)
sc.setLogLevel("ERROR")
# Load input
lines = sc.textFile("inputs/word_count.text")
# Split the sentences into words
words = lines.flatMap(lambda line: line.split(" "))
# Count occurrence of each word
wordCounts = words.countByValue()
# Print the count
for word, count in wordCounts.items():
print("{} : {}".format(word, count))
|
the-stack_0_758 | import requests
from pymongo import MongoClient
from datetime import datetime
from airflow.providers.mongo.hooks.mongo import MongoHook
def get_raw_joke():
"""Retrieve a joke from 'jokeapi' and return it in dict format."""
base_url = "https://v2.jokeapi.dev"
response = requests.get(f"{base_url}/joke/any")
return response.json()
def preprocess_joke(raw_joke: dict):
"""Perform preprocessing to clean raw jokes."""
dictObject = {}
dictObject["type"] = raw_joke.get("type")
dictObject["category"] = raw_joke.get("category")
if raw_joke.get("type") == "single":
dictObject["joke"] = raw_joke.get("joke")
return dictObject
elif raw_joke.get("type") == "twopart":
dictObject["joke"] = {}
dictObject["joke"]["setup"] = raw_joke.get("setup")
dictObject["joke"]["delivery"] = raw_joke.get("delivery")
return dictObject
else:
print("Joke is of neither 'single' nor 'twopart' type.")
def serialize_joke(joke: dict):
"""Save jokes into local MongoDB instance."""
if joke:
joke["datetime"] = f"{datetime.now():%Y-%m-%d %H:%M:%S%z}"
# Using PyMongo
# uri = "mongodb://root:example@mongo:27017" # this works
uri = "mongodb://airflow:airflow@mongo:27017" # this works too
# uri = "mongodb://airflow:airflow@localhost:3456" # but this does not work
client = MongoClient(uri)
db = client.the_database
collection = db.jokes
result = collection.insert_one(joke)
print(f"{result.inserted_id} is inserted!")
# Using MongoHook wrapper
# mongo_hook = MongoHook(conn_id="MONGO")
# client = mongo_hook.get_conn()
# db = client.the_database
# collection = db.jokes
# result = collection.insert_one(joke)
# print(f"{result.inserted_id} is inserted!")
def scrap_joke():
raw_joke = get_raw_joke()
joke = preprocess_joke(raw_joke)
serialize_joke(joke)
if __name__ == "__main__":
scrap_joke()
|
the-stack_0_759 | from model.group import Group
class GroupHelper:
def __init__(self, app):
self.app = app
def open_groups_page(self):
wd = self.app.wd
if not(wd.current_url.endswith("/group.php") and len(wd.find_elements_by_name("new")) > 0):
wd.find_element_by_link_text("groups").click()
def create(self, group):
wd = self.app.wd
self.open_groups_page()
# init group creation
wd.find_element_by_xpath("//div[@id='content']/form/input[4]").click()
self.fill_group_form(group)
# submit group creation
wd.find_element_by_name("submit").click()
self.return_to_groups_page()
self.group_cache = None
def delete_first_group(self):
self.delete_group_by_index(0)
def delete_group_by_index(self, index):
wd = self.app.wd
self.open_groups_page()
self.select_group_by_index(index)
# submit deletion
wd.find_element_by_name("delete").click()
self.return_to_groups_page()
self.group_cache = None
def delete_group_by_id(self, id):
wd = self.app.wd
self.open_groups_page()
self.select_group_by_id(id)
# submit deletion
wd.find_element_by_name("delete").click()
self.return_to_groups_page()
self.group_cache = None
def delete_all_groups(self):
wd = self.app.wd
self.open_groups_page()
nmb_groups = self.count()
if nmb_groups != 0:
for ndx in range(0, nmb_groups):
self.select_group_by_index(ndx)
# submit deletion
wd.find_element_by_name("delete").click()
self.group_cache = None
self.return_to_groups_page()
def select_first_group(self):
self.select_group_by_index(0)
def select_group_by_index(self, index):
wd = self.app.wd
wd.find_elements_by_name("selected[]")[index].click()
def select_group_by_id(self, id):
wd = self.app.wd
wd.find_element_by_css_selector("input[value='%s']" % id).click()
def modify_first_group(self, new_group_data):
self.modify_group_by_index(0, new_group_data)
def modify_group_by_index(self, index, new_group_data):
wd = self.app.wd
self.open_groups_page()
self.select_group_by_index(index)
# open modification form
wd.find_element_by_name("edit").click()
# fill group form
self.fill_group_form(new_group_data)
# submit modification
wd.find_element_by_name("update").click()
self.return_to_groups_page()
self.group_cache = None
def modify_group_by_id(self, id, new_group_data):
wd = self.app.wd
self.open_groups_page()
self.select_group_by_id(id)
# open modification form
wd.find_element_by_name("edit").click()
# fill group form
self.fill_group_form(new_group_data)
# submit modification
wd.find_element_by_name("update").click()
self.return_to_groups_page()
self.group_cache = None
def fill_group_form(self, group):
self.change_field_value("group_name", group.name)
self.change_field_value("group_header", group.header)
self.change_field_value("group_footer", group.footer)
def change_field_value(self, field_name, text):
wd = self.app.wd
if text is not None:
wd.find_element_by_name(field_name).click()
wd.find_element_by_name(field_name).clear()
wd.find_element_by_name(field_name).send_keys(text)
def return_to_groups_page(self):
self.open_groups_page()
def count(self):
wd = self.app.wd
self.open_groups_page()
return len(wd.find_elements_by_name("selected[]"))
group_cache = None
def get_group_list(self):
if self.group_cache is None:
wd = self.app.wd
self.open_groups_page()
self.group_cache = []
for element in wd.find_elements_by_css_selector("span.group"):
text = element.text
id = element.find_element_by_name("selected[]").get_attribute("value")
self.group_cache.append(Group(name=text, id=id))
return list(self.group_cache)
|
the-stack_0_762 | import os
from collections import OrderedDict
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.dates import DateFormatter
from .building import Building
from .datastore.datastore import join_key
from .utils import get_datastore
from .timeframe import TimeFrame
class DataSet(object):
"""
Attributes
----------
buildings : OrderedDict
Each key is an integer, starting from 1.
Each value is a nilmtk.Building object.
store : nilmtk.DataStore
metadata : dict
Metadata describing the dataset name, authors etc.
(Metadata about specific buildings, meters, appliances etc.
is stored elsewhere.)
See nilm-metadata.readthedocs.org/en/latest/dataset_metadata.html#dataset
"""
def __init__(self, filename=None, format='HDF',mode='a'):
"""
Parameters
----------
filename : str
path to data set
format : str
format of output. Either 'HDF' or 'CSV'. Defaults to 'HDF'
"""
self.store = None
self.buildings = OrderedDict()
self.metadata = {}
if filename is not None:
self.import_metadata(get_datastore(filename, format,mode))
def import_metadata(self, store):
"""
Parameters
----------
store : nilmtk.DataStore
"""
self.store = store
self.metadata = store.load_metadata()
self._init_buildings(store)
return self
def save(self, destination):
for b_id, building in self.buildings.items():
building.save(destination, '/building' + str(b_id))
def _init_buildings(self, store):
buildings = store.elements_below_key('/')
buildings.sort()
for b_key in buildings:
building = Building()
building.import_metadata(
store, '/'+b_key, self.metadata.get('name'))
self.buildings[building.identifier.instance] = building
def set_window(self, start=None, end=None):
"""Set the timeframe window on self.store. Used for setting the
'region of interest' non-destructively for all processing.
Parameters
----------
start, end : str or pd.Timestamp or datetime or None
"""
if self.store is None:
raise RuntimeError("You need to set self.store first!")
tz = self.metadata.get('timezone')
if tz is None:
raise RuntimeError("'timezone' is not set in dataset metadata.")
self.store.window = TimeFrame(start, end, tz)
def describe(self, **kwargs):
"""Returns a DataFrame describing this dataset.
Each column is a building. Each row is a feature."""
keys = list(self.buildings.keys())
keys.sort()
results = pd.DataFrame(columns=keys)
for i, building in self.buildings.items():
results[i] = building.describe(**kwargs)
return results
def plot_good_sections(self, axes=None, label_func=None, gap=0, **kwargs):
"""Plots all good sections for all buildings.
Parameters
----------
axes : list of axes or None.
If None then they will be generated.
Returns
-------
axes : list of axes
"""
n = len(self.buildings)
if axes is None:
n_meters_per_building = [len(elec.all_meters())
for elec in self.elecs()]
gridspec_kw = dict(height_ratios=n_meters_per_building)
fig, axes = plt.subplots(
n, 1, sharex=True, gridspec_kw=gridspec_kw)
assert n == len(axes)
for i, (ax, elec) in enumerate(zip(axes, self.elecs())):
elec.plot_good_sections(ax=ax, label_func=label_func, gap=gap,
**kwargs)
ax.set_title('House {}'.format(elec.building()), y=0.4, va='top')
ax.grid(False)
for spine in ax.spines.values():
spine.set_linewidth(0.5)
if i == n // 2:
ax.set_ylabel('Meter', rotation=0,
ha='center', va='center', y=.4)
ax.set_xlabel('Date')
plt.tight_layout()
plt.subplots_adjust(hspace=0.05)
plt.draw()
return axes
def elecs(self):
return [building.elec for building in self.buildings.values()]
def clear_cache(self):
for elec in self.elecs():
elec.clear_cache()
def plot_mains_power_histograms(self, axes=None, **kwargs):
n = len(self.buildings)
if axes is None:
fig, axes = plt.subplots(n, 1, sharex=True)
assert n == len(axes)
for ax, elec in zip(axes, self.elecs()):
ax = elec.mains().plot_power_histogram(ax=ax, **kwargs)
ax.set_title('House {}'.format(elec.building()))
return axes
def get_activity_script(self, filename):
"""Extracts an activity script from this dataset.
Saves the activity script to an HDF5 file.
Keys in the HDF5 file take the form:
'/building<building_i>/<appliance type>__<appliance instance>'
e.g. '/building1/electric_oven__1'
Spaces in the appliance type are replaced by underscores.
Each table is of fixed format and stores a pd.Series.
The index is the datetime of the start time or end time of
each appliance activation. The values are booleans. True means
the start time of an appliance activation; false means the
end time of an appliance activation.
Parameters
----------
filename : str
The full filename, including path and suffix, for the HDF5 file
for storing the activity script.
"""
store = pd.HDFStore(
filename, mode='w', complevel=9, complib='blosc')
for building in self.buildings.values():
submeters = building.elec.submeters().meters
for meter in submeters:
appliance = meter.dominant_appliance()
key = '/building{:d}/{:s}__{:d}'.format(
building.identifier.instance,
appliance.identifier.type.replace(' ', '_'),
appliance.identifier.instance)
print("Computing activations for", key)
activations = meter.get_activations()
starts = []
ends = []
for activation in activations:
starts.append(activation.index[0])
ends.append(activation.index[-1])
del activations
starts = pd.Series(True, index=starts)
ends = pd.Series(False, index=ends)
script = pd.concat([starts, ends])
script = script.sort_index()
store[key] = script
del starts, ends
store.close()
|
the-stack_0_766 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class EnvironmentDeploymentPropertiesFragment(Model):
"""Properties of an environment deployment.
:param arm_template_id: The Azure Resource Manager template's identifier.
:type arm_template_id: str
:param parameters: The parameters of the Azure Resource Manager template.
:type parameters:
list[~azure.mgmt.devtestlabs.models.ArmTemplateParameterPropertiesFragment]
"""
_attribute_map = {
'arm_template_id': {'key': 'armTemplateId', 'type': 'str'},
'parameters': {'key': 'parameters', 'type': '[ArmTemplateParameterPropertiesFragment]'},
}
def __init__(self, *, arm_template_id: str=None, parameters=None, **kwargs) -> None:
super(EnvironmentDeploymentPropertiesFragment, self).__init__(**kwargs)
self.arm_template_id = arm_template_id
self.parameters = parameters
|
the-stack_0_767 | #!/usr/bin/env python
#-----------------------------*-python-*----------------------------------------#
# file src/cdi_ipcress/python/ipcress_reader.py
# author Alex Long <[email protected]>
# date Monday, December 15, 2014, 5:44 pm
# brief This script has fucntions that parse an IPCRESS file and returns a
# dictionary that contains data for each property and each material
# present in the file. This script also contains interpolation functions
# for opacity data.
# note Copyright (C) 2016, Triad National Security, LLC.
# All rights reserved.
#--------------------------------------------------------------------------------------------------#
# import block
################################################################################
import re
from numpy import arange, sin, pi, min, max
import sys
import struct
import numpy as np
from struct import *
from math import *
################################################################################
# These are the functions that are used to read data from the
# binary IPCRESS file. It also contains a function for interpolating in
# density and temperature. The data locations are specified in
# cdi_ipcress/doc/IPCRESS_File_Format.pdf
################################################################################
def get_data_for_id(filename, data_start_index, num_entries):
temp_grid = []
# "rb" is read binary mode
with open(filename, "rb") as f:
f.seek(data_start_index*8)
for i in range(num_entries):
word = f.read(8)
temp_grid.append(unpack('>d', word)[0])
return temp_grid
################################################################################
################################################################################
def write_data_for_id(filename, data_start_index, num_entries, new_values):
# "wb" is write binary mode
with open(filename, "r+b") as f:
f.seek(data_start_index*8)
for i in range(num_entries):
s = struct.pack('>d', new_values[i])
f.write(s)
################################################################################
################################################################################
def interpolate_mg_opacity_data(T_grid, rho_grid, hnu_grid, op_data, \
target_rho, target_T, print_str=""):
n_rho = len(rho_grid)
n_T = len(T_grid)
n_hnu = len(hnu_grid)
# don't allow extrapolation
if (target_rho < np.min(rho_grid)): target_rho = np.min(rho_grid)
if (target_rho > np.max(rho_grid)): target_rho = np.max(rho_grid)
if (target_T < np.min(T_grid)): target_T = np.min(T_grid)
if (target_T > np.max(T_grid)): target_T = np.max(T_grid)
if (print_str is not None):
print( \
"Interpolating {0}--Target rho: {1} , target T: {2}".format( \
print_str, target_rho, target_T))
# get correct index of adjacent density points
rho_L = 1000; rho_G =0
for rho_i, rho in enumerate(rho_grid[:-1]):
if ( target_rho >= rho and target_rho<=rho_grid[rho_i+1]):
rho_L = rho_i
rho_G = rho_i+1
break
# get correct index of adjacent temperature points
T_L = 1000; T_G = 0
for T_i, T in enumerate(T_grid[:-1]):
if ( target_T >= T and target_T<=T_grid[T_i+1]):
T_L = T_i
T_G = T_i+1
break
#print("Temperature interpolation bounds: {0} {1}".format(T_grid[T_L], T_grid[T_G]))
#print("Density interpolation bounds: {0} {1}".format(rho_grid[rho_L], rho_grid[rho_G]))
#get the adjacent rows of the opacity index
#get the points of the opacity index
rho_L_T_L = op_data[n_rho*T_L*(n_hnu-1) + rho_L*(n_hnu-1) : n_rho*T_L*(n_hnu-1) + rho_L*(n_hnu-1) + (n_hnu-1) ]
rho_L_T_G = op_data[n_rho*T_G*(n_hnu-1) + rho_L*(n_hnu-1) : n_rho*T_G*(n_hnu-1) + rho_L*(n_hnu-1) + (n_hnu-1) ]
rho_G_T_L = op_data[n_rho*T_L*(n_hnu-1) + rho_G*(n_hnu-1) : n_rho*T_L*(n_hnu-1) + rho_G*(n_hnu-1) + (n_hnu-1) ]
rho_G_T_G = op_data[n_rho*T_G*(n_hnu-1) + rho_G*(n_hnu-1) : n_rho*T_G*(n_hnu-1) + rho_G*(n_hnu-1) + (n_hnu-1) ]
interp_op = []
#interpolate for each frequency point
for i in range(n_hnu-1):
#if (rho_L_T_L[i] < 1.0e-10) or (rho_L_T_G[i] < 1.0e-10) or (rho_G_T_L[i] < 1.0e-10) or (rho_G_T_G[i] < 1.0e10):
# interp_op.append(1.0e-10)
#print("{0} {1} {2} {3}" .format(rho_L_T_L[i], rho_L_T_G[i], rho_G_T_L[i], rho_G_T_G[i]))
log_op_T_L = log(rho_L_T_L[i]) + log(target_rho/rho_grid[rho_L]) / log(rho_grid[rho_G]/rho_grid[rho_L]) * log(rho_G_T_L[i]/rho_L_T_L[i])
log_op_T_G = log(rho_L_T_G[i]) + log(target_rho/rho_grid[rho_L]) / log(rho_grid[rho_G]/rho_grid[rho_L]) * log(rho_G_T_G[i]/rho_L_T_G[i])
log_op = log_op_T_L + log(target_T/T_grid[T_L]) / log(T_grid[T_G]/T_grid[T_L]) * (log_op_T_G - log_op_T_L)
interp_op.append(exp(log_op))
print("hnu(keV) opacity(sq_cm/g) opacity(1/cm)")
for i, hnu in enumerate(hnu_grid[:-1]):
print("{0} {1} {2}".format( 0.5*(hnu + hnu_grid[i+1]), interp_op[i], interp_op[i]*target_rho))
return interp_op
###############################################################################
################################################################################
def interpolate_gray_opacity_data(T_grid, rho_grid, op_data, target_rho, \
target_T, print_str = ""):
n_rho = len(rho_grid)
n_T = len(T_grid)
# don't allow extrapolation
if (target_rho < np.min(rho_grid)): target_rho = np.min(rho_grid)
if (target_rho > np.max(rho_grid)): target_rho = np.max(rho_grid)
if (target_T < np.min(T_grid)): target_T = np.min(T_grid)
if (target_T > np.max(T_grid)): target_T = np.max(T_grid)
if (print_str is not None):
print( \
"Interpolating {0}--Target rho: {1} , target T: {2}".format( \
print_str, target_rho, target_T))
rho_L = 1000; rho_G =0
for rho_i, rho in enumerate(rho_grid[:-1]):
if ( target_rho >= rho and target_rho<=rho_grid[rho_i+1]):
rho_L = rho_i
rho_G = rho_i+1
break
for T_i, T in enumerate(T_grid[:-1]):
if ( target_T >= T and target_T<=T_grid[T_i+1]):
T_L = T_i
T_G = T_i+1
break
#get the adjacent rows of the opacity index
rho_L_T_L = op_data[n_rho*T_L + rho_L]
rho_L_T_G = op_data[n_rho*T_G + rho_L]
rho_G_T_L = op_data[n_rho*T_L + rho_G]
rho_G_T_G = op_data[n_rho*T_G + rho_G]
#interpolate in log space
#print("{0} {1} {2} {3}" .format(rho_L_T_L, rho_L_T_G, rho_G_T_L, rho_G_T_G))
log_op_T_L = log(rho_L_T_L) + log(target_rho/rho_grid[rho_L]) / log(rho_grid[rho_G]/rho_grid[rho_L]) * log(rho_G_T_L/rho_L_T_L)
log_op_T_G = log(rho_L_T_G) + log(target_rho/rho_grid[rho_L]) / \
log(rho_grid[rho_G]/rho_grid[rho_L]) * log(rho_G_T_G/rho_L_T_G)
log_op = log_op_T_L + log(target_T/T_grid[T_L]) / \
log(T_grid[T_G]/T_grid[T_L]) * (log_op_T_G - log_op_T_L)
interp_op = exp(log_op)
#print("opacity(sq_cm/g) opacity(1/cm)")
#print("{0} {1}".format(interp_op, interp_op*target_rho))
return interp_op
###############################################################################
###############################################################################
def read_information_from_file(ipcress_file):
word_array = []
with open(ipcress_file, "rb") as f:
for i in range(26):
word = f.read(8)
if not word:
break
word_array.append(word)
#print(int(unpack('>d', word)[0]))
title = word_array[0]
toc_int= []
offset = 2
for i in range(offset,offset+24):
toc_int.append( int(unpack('>d', word_array[i])[0]))
n_data_records = toc_int[14]
mxrec = toc_int[1] - toc_int[0]
mxkey = toc_int[16]
#print("Number of data records: {0}".format(n_data_records))
#print("Beginnging of data: {0}".format(toc_int[0]))
#print("Max records: {0} , max search keys: {1}".format(mxrec, mxkey))
mat_property = []
ds = []
dfo = []
tdf = []
num_mats = 0
mat_ids= []
with open(ipcress_file, "rb") as f:
# Read in array that lists the data sizes in this file
f.seek(toc_int[0]*8)
#print("Table of data sizes")
for i in range(n_data_records):
word = f.read(8)
ds.append(int(unpack('>d', word)[0]))
# Read in array gives the offsets between data
f.seek(toc_int[1]*8)
#print("Table of data file offesets")
for i in range(n_data_records):
word = f.read(8)
dfo.append(int(unpack('>d', word)[0]))
# Read in material IDs present in this file
f.seek(dfo[0]*8)
#print("Table of material identifiers")
word = f.read(8)
num_mats = int(unpack('>d', word)[0])
for i in range(num_mats):
word = f.read(8)
mat_ids.append( int(unpack('>d', word)[0]))
# Read in list of properties in this file available for each material
# entries in this table are 24 bytes each
f.seek(toc_int[10]*8)
#print("Table of data fields for each material")
word = f.read(72) #ignore the first 72 bytes, they don't contain useful information
for i in range(1,toc_int[14]):
#oredering is "matID" "data type" "fill"
temp_property = []
for j in range(mxkey):
three_string = []
three_string.append( f.read(8).decode("utf-8"))
three_string.append( f.read(8).decode("utf-8"))
three_string.append( f.read(8).decode("utf-8"))
if (j==0): temp_property.append(three_string[2].strip() )
elif (j==1): temp_property.append(three_string[0].strip())
else: temp_property.append(i) #index of data table containing values
try:
temp_property = [temp_property[0].decode('ascii'), \
temp_property[1].decode('ascii'), temp_property[2]]
mat_property.append(temp_property)
except:
mat_property.append(temp_property)
materials = []
for m in range(num_mats):
materials.append([ m, mat_ids[m]])
#print("{0} materials in file".format(num_mats))
#for i in range(num_mats):
# print(" Matieral ID: {0}".format(mat_ids[i]))
#print("List of available properties")
#for i in property:
# print(i)
#return the list of available properties, data file offsets and data sizes
return materials, mat_property, dfo, ds
################################################################################
###############################################################################
def write_information_to_file(ipcress_file, material_ID, mat_property, new_values):
materials, property_list, dfo, ds = read_information_from_file(ipcress_file)
# check to make sure material is in file
material_IDs = []
for imat in materials:
material_IDs.append(str(imat[1]))
if (not (material_ID in material_IDs)):
print("ERROR: Material ID not found in file, not changing anything!")
return
# try to find property in file
property_found = False
propery_index = 0
for prop_i, prop in enumerate(property_list):
if (material_ID == prop[0] and mat_property == prop[1]):
property_found = True
property_index = prop_i
break
# make sure sizes match of property you're about to write
if (property_found and ds[property_index+1] != len(new_values)):
print("ERROR: Number of new values does not match size of old values, not changing anything!")
return
# if the combination of property and material was found, write the new data to
# the ipcress file
if property_found:
write_data_for_id( ipcress_file, dfo[property_index+1], \
ds[property_index+1], new_values)
else:
print("ERROR: Combination of material ID and property not found, not changing anything!")
return
################################################################################
################################################################################
# Checks to see if there are any zeros in the opcaity data--zero data is
# difficult to handle and for now we are going to ignore data sets that contain
# zeros and print an error message
def check_valid_data(opacity_grid):
for item in opacity_grid:
if (item != 0.0):
return True
return False
################################################################################
################################################################################
# return a dictionary where the keys are "<material ID>_<property_ID>" and the
# values are the data
def get_property_map_from_ipcress_file(ipcress_file):
#load data from IPCRESS file
# dfo is the array of data file offsets, ds is the array of data sizes
materials, property_list, dfo, ds = read_information_from_file(ipcress_file)
#build dictionary of data, keys are "property_matID"
table_key_dict = {}
for prop_i, prop in enumerate(property_list):
table_key_dict["{0}_{1}".format(prop[1], prop[0])] = get_data_for_id( ipcress_file, dfo[prop_i+1], ds[prop_i+1])
material_list = []
for material in materials:
material_list.append(material[1])
return table_key_dict, material_list
################################################################################
|
the-stack_0_769 | import torch
import torch.nn as nn
import torch.nn.functional as f
from torch.nn import init
from .submodules import ConvLayer, UpsampleConvLayer, TransposedConvLayer, RecurrentConvLayer, ResidualBlock, ConvLSTM, ConvGRU, RecurrentResidualLayer
def skip_concat(x1, x2):
return torch.cat([x1, x2], dim=1)
def skip_sum(x1, x2):
return x1 + x2
class BaseUNet(nn.Module):
def __init__(self, num_input_channels, num_output_channels=1, skip_type='sum', activation='sigmoid',
num_encoders=4, base_num_channels=32, num_residual_blocks=2, norm=None, use_upsample_conv=True):
super(BaseUNet, self).__init__()
self.num_input_channels = num_input_channels
self.num_output_channels = num_output_channels
self.skip_type = skip_type
self.apply_skip_connection = skip_sum if self.skip_type == 'sum' else skip_concat
self.activation = activation
self.norm = norm
if use_upsample_conv:
print('Using UpsampleConvLayer (slow, but no checkerboard artefacts)')
self.UpsampleLayer = UpsampleConvLayer
else:
print('Using TransposedConvLayer (fast, with checkerboard artefacts)')
self.UpsampleLayer = TransposedConvLayer
self.num_encoders = num_encoders
self.base_num_channels = base_num_channels
self.num_residual_blocks = num_residual_blocks
self.max_num_channels = self.base_num_channels * pow(2, self.num_encoders)
assert(self.num_input_channels > 0)
assert(self.num_output_channels > 0)
self.encoder_input_sizes = []
for i in range(self.num_encoders):
self.encoder_input_sizes.append(self.base_num_channels * pow(2, i))
self.encoder_output_sizes = [self.base_num_channels * pow(2, i + 1) for i in range(self.num_encoders)]
self.activation = getattr(torch, self.activation, 'sigmoid')
def build_resblocks(self):
self.resblocks = nn.ModuleList()
for i in range(self.num_residual_blocks):
self.resblocks.append(ResidualBlock(self.max_num_channels, self.max_num_channels, norm=self.norm))
def build_decoders(self):
decoder_input_sizes = list(reversed([self.base_num_channels * pow(2, i + 1) for i in range(self.num_encoders)]))
self.decoders = nn.ModuleList()
for input_size in decoder_input_sizes:
self.decoders.append(self.UpsampleLayer(input_size if self.skip_type == 'sum' else 2 * input_size,
input_size // 2,
kernel_size=5, padding=2, norm=self.norm))
def build_prediction_layer(self):
self.pred = ConvLayer(self.base_num_channels if self.skip_type == 'sum' else 2 * self.base_num_channels,
self.num_output_channels, 1, activation=None, norm=self.norm)
class UNet(BaseUNet):
def __init__(self, num_input_channels, num_output_channels=1, skip_type='sum', activation='sigmoid',
num_encoders=4, base_num_channels=32, num_residual_blocks=2, norm=None, use_upsample_conv=True):
super(UNet, self).__init__(num_input_channels, num_output_channels, skip_type, activation,
num_encoders, base_num_channels, num_residual_blocks, norm, use_upsample_conv)
self.head = ConvLayer(self.num_input_channels, self.base_num_channels,
kernel_size=5, stride=1, padding=2) # N x C x H x W -> N x 32 x H x W
self.encoders = nn.ModuleList()
for input_size, output_size in zip(self.encoder_input_sizes, self.encoder_output_sizes):
self.encoders.append(ConvLayer(input_size, output_size, kernel_size=5,
stride=2, padding=2, norm=self.norm))
self.build_resblocks()
self.build_decoders()
self.build_prediction_layer()
def forward(self, x):
"""
:param x: N x num_input_channels x H x W
:return: N x num_output_channels x H x W
"""
# head
x = self.head(x)
head = x
# encoder
blocks = []
for i, encoder in enumerate(self.encoders):
x = encoder(x)
blocks.append(x)
# residual blocks
for resblock in self.resblocks:
x = resblock(x)
# decoder
for i, decoder in enumerate(self.decoders):
x = decoder(self.apply_skip_connection(x, blocks[self.num_encoders - i - 1]))
img = self.activation(self.pred(self.apply_skip_connection(x, head)))
return img
class UNetRecurrent(BaseUNet):
"""
Recurrent UNet architecture where every encoder is followed by a recurrent convolutional block,
such as a ConvLSTM or a ConvGRU.
Symmetric, skip connections on every encoding layer.
"""
def __init__(self, num_input_channels, num_output_channels=1, skip_type='sum',
recurrent_block_type='convlstm', activation='sigmoid', num_encoders=4, base_num_channels=32,
num_residual_blocks=2, norm=None, use_upsample_conv=True):
super(UNetRecurrent, self).__init__(num_input_channels, num_output_channels, skip_type, activation,
num_encoders, base_num_channels, num_residual_blocks, norm,
use_upsample_conv)
self.head = ConvLayer(self.num_input_channels, self.base_num_channels,
kernel_size=5, stride=1, padding=2) # N x C x H x W -> N x 32 x H x W
self.encoders = nn.ModuleList()
for input_size, output_size in zip(self.encoder_input_sizes, self.encoder_output_sizes):
self.encoders.append(RecurrentConvLayer(input_size, output_size,
kernel_size=5, stride=2, padding=2,
recurrent_block_type=recurrent_block_type,
norm=self.norm))
self.build_resblocks()
self.build_decoders()
self.build_prediction_layer()
def forward(self, x, prev_states):
"""
:param x: N x num_input_channels x H x W
:param prev_states: previous LSTM states for every encoder layer
:return: N x num_output_channels x H x W
"""
# head
x = self.head(x)
head = x
if prev_states is None:
prev_states = [None] * self.num_encoders
# encoder
blocks = []
states = []
for i, encoder in enumerate(self.encoders):
x, state = encoder(x, prev_states[i])
blocks.append(x)
states.append(state)
# residual blocks
for resblock in self.resblocks:
x = resblock(x)
# decoder
for i, decoder in enumerate(self.decoders):
x = decoder(self.apply_skip_connection(x, blocks[self.num_encoders - i - 1]))
# tail
img = self.activation(self.pred(self.apply_skip_connection(x, head)))
return img, states
class UNetFire(BaseUNet):
"""
"""
def __init__(self, num_input_channels, num_output_channels=1, skip_type='sum',
recurrent_block_type='convgru', base_num_channels=16,
num_residual_blocks=2, norm=None, kernel_size=3,
recurrent_blocks={'resblock': [0]}):
super(UNetFire, self).__init__(num_input_channels=num_input_channels,
num_output_channels=num_output_channels,
skip_type=skip_type,
base_num_channels=base_num_channels,
num_residual_blocks=num_residual_blocks,
norm=norm)
self.kernel_size = kernel_size
self.recurrent_blocks = recurrent_blocks
print(self.num_input_channels)
self.head = RecurrentConvLayer(self.num_input_channels,
self.base_num_channels,
kernel_size=self.kernel_size,
padding=self.kernel_size // 2,
recurrent_block_type=recurrent_block_type,
norm=self.norm)
self.num_recurrent_units = 1
self.resblocks = nn.ModuleList()
recurrent_indices = self.recurrent_blocks.get('resblock', [])
for i in range(self.num_residual_blocks):
if i in recurrent_indices or -1 in recurrent_indices:
self.resblocks.append(RecurrentResidualLayer(
in_channels=self.base_num_channels,
out_channels=self.base_num_channels,
recurrent_block_type=recurrent_block_type,
norm=self.norm))
self.num_recurrent_units += 1
else:
self.resblocks.append(ResidualBlock(self.base_num_channels,
self.base_num_channels,
norm=self.norm))
self.pred = ConvLayer(2 * self.base_num_channels if self.skip_type == 'concat' else self.base_num_channels,
self.num_output_channels, kernel_size=1, padding=0, activation=None, norm=None)
def forward(self, x, prev_states):
"""
:param x: N x num_input_channels x H x W
:param prev_states: previous LSTM states for every encoder layer
:return: N x num_output_channels x H x W
"""
if prev_states is None:
prev_states = [None] * (self.num_recurrent_units)
states = []
state_idx = 0
# head
x, state = self.head(x, prev_states[state_idx])
state_idx += 1
states.append(state)
# residual blocks
recurrent_indices = self.recurrent_blocks.get('resblock', [])
for i, resblock in enumerate(self.resblocks):
if i in recurrent_indices or -1 in recurrent_indices:
x, state = resblock(x, prev_states[state_idx])
state_idx += 1
states.append(state)
else:
x = resblock(x)
# tail
img = self.pred(x)
return img, states
|
the-stack_0_774 | #!/usr/bin/python3
import os
import sys
import math
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
import data_utils
load_fn = data_utils.load_cls_train_val
balance_fn = None
map_fn = None
keep_remainder = True
save_ply_fn = None
num_class = 40
batch_size = 32
sample_num = 512
num_epochs = 4096
step_val = 500
learning_rate_base = 0.01
decay_steps = 8000
decay_rate = 0.5
learning_rate_min = 1e-6
weight_decay = 1e-5
jitter = 0.0
jitter_val = 0.0
jitter_test = 0.0
rotation_range = [0, 0, 0, 'u']
rotation_range_val = [0, 0, 0, 'u']
rotation_range_test = [0, 0, 0, 'u']
rotation_order = 'rxyz'
scaling_range = [0, 0, 0, 'g']
scaling_range_val = [0, 0, 0, 'u']
scaling_range_test = [0, 0, 0, 'u']
sample_num_variance = 1 // 8
sample_num_clip = 1 // 4
x = 3
xconv_param_name = ('K', 'D', 'P', 'C', 'links')
xconv_params = [dict(zip(xconv_param_name, xconv_param)) for xconv_param in
[(8, 1, -1, 16 * x, []),
(12, 2, 384, 32 * x, []),
(16, 2, 128, 64 * x, []),
(16, 3, 128, 128 * x, [])]]
with_global = True
fc_param_name = ('C', 'dropout_rate')
fc_params = [dict(zip(fc_param_name, fc_param)) for fc_param in
[(128 * x, 0.0),
(64 * x, 0.8)]]
sampling = 'random'
optimizer = 'adam'
epsilon = 1e-2
data_dim = 6
use_extra_features = False
with_X_transformation = True
sorting_method = None
|
the-stack_0_775 | from __future__ import print_function, absolute_import, division # makes KratosMultiphysics backward compatible with python 2.6 and 2.7
#import kratos core and applications
import KratosMultiphysics
import KratosMultiphysics.DelaunayMeshingApplication as KratosDelaunay
import KratosMultiphysics.PfemFluidDynamicsApplication as KratosPfemFluid
from importlib import import_module
def CreateMeshingDomain(main_model_part, custom_settings):
return FluidMeshingDomain(main_model_part, custom_settings)
class FluidMeshingDomain(object):
##constructor. the constructor shall only take care of storing the settings
##and the pointer to the main_model part.
##
##real construction shall be delayed to the function "Initialize" which
##will be called once the mesher is already filled
def __init__(self, main_model_part, custom_settings):
self.echo_level = 1
self.main_model_part = main_model_part
##settings string in json format
default_settings = KratosMultiphysics.Parameters("""
{
"python_module": "meshing_domain",
"model_part_name": "model_part_name",
"alpha_shape": 2.4,
"offset_factor": 0.0,
"meshing_strategy":{
"python_module": "meshing_strategy",
"meshing_frequency": 0.0,
"remesh": false,
"refine": false,
"reconnect": false,
"transfer": false,
"constrained": false,
"mesh_smoothing": false,
"variables_smoothing": false,
"elemental_variables_to_smooth":[],
"reference_element_type": "Element2D3N",
"reference_condition_type": "CompositeCondition2D2N"
},
"spatial_bounding_box":{
"use_bounding_box" : true,
"initial_time" : 0.0,
"final_time" : 1000.0,
"upper_point" : [10,10,10],
"lower_point" : [-10,-10,-10]
},
"spatial_refining_box" : {
"use_refining_box" : false,
"mesh_size" : 0.1,
"initial_time" : 0.0,
"final_time" : 1,
"upper_point" : [10,10,10],
"lower_point" : [-10,-10,-10]
},
"refining_parameters":{
"critical_size": 0.0,
"threshold_variable": "PLASTIC_STRAIN",
"reference_threshold" : 0.0,
"error_variable": "NORM_ISOCHORIC_STRESS",
"reference_error" : 0.0,
"add_nodes": true,
"insert_nodes": false,
"remove_nodes": {
"apply_removal": false,
"on_distance": false,
"on_threshold": false,
"on_error": false
},
"remove_boundary": {
"apply_removal": false,
"on_distance": false,
"on_threshold": false,
"on_error": false
},
"refine_elements": {
"apply_refinement": false,
"on_distance": false,
"on_threshold": false,
"on_error": false
},
"refine_boundary": {
"apply_refinement": false,
"on_distance": false,
"on_threshold": false,
"on_error": false
}
},
"elemental_variables_to_transfer":[]
}
""")
##overwrite the default settings with user-provided parameters
self.settings = custom_settings
self.settings.ValidateAndAssignDefaults(default_settings)
#construct the meshing strategy
python_module_name = "KratosMultiphysics.PfemFluidDynamicsApplication"
full_module_name = python_module_name + "." + self.settings["meshing_strategy"]["python_module"].GetString()
meshing_module = import_module(full_module_name)
#meshing_module = __import__(self.settings["meshing_strategy"]["python_module"].GetString())
self.MeshingStrategy = meshing_module.CreateMeshingStrategy(self.main_model_part, self.settings["meshing_strategy"])
self.active_remeshing = False
if( self.settings["meshing_strategy"]["remesh"].GetBool() or self.settings["meshing_strategy"]["transfer"].GetBool() ):
self.active_remeshing = True
print("::[Meshing_Domain]:: (",self.settings["model_part_name"].GetString()," ) -BUILT-")
####
def Initialize(self):
print("::[Meshing Domain]:: -START-")
self.dimension = self.main_model_part.ProcessInfo[KratosMultiphysics.SPACE_DIMENSION]
# Set MeshingParameters
self.SetMeshingParameters()
# Meshing Stratety
self.MeshingStrategy.SetEchoLevel(self.echo_level)
self.MeshingStrategy.Initialize(self.MeshingParameters, self.dimension)
print("::[Meshing Domain]:: -END- ")
####
#
def SetInfoParameters(self):
# Create InfoParameters
self.InfoParameters = KratosDelaunay.MeshingInfoParameters()
self.InfoParameters.Initialize()
#
def SetTransferParameters(self):
# Create TransferParameters
self.TransferParameters = KratosDelaunay.TransferParameters()
transfer_variables = self.settings["elemental_variables_to_transfer"]
#for variable in transfer_variables:
# self.TransferParameters.SetVariable( KratosMultiphysics.KratosGlobals.GetVariable( variable.GetString() ) )
for i in range(0, transfer_variables.size() ):
self.TransferParameters.SetVariable(KratosMultiphysics.KratosGlobals.GetVariable(transfer_variables[i].GetString()))
#
def SetRefiningParameters(self):
# Create RefiningParameters
self.RefiningParameters = KratosDelaunay.RefiningParameters()
self.RefiningParameters.Initialize()
# parameters
self.RefiningParameters.SetAlphaParameter(self.settings["alpha_shape"].GetDouble())
# set mesh refinement in box
size = self.dimension
refining_box = self.settings["spatial_refining_box"]
if(refining_box["use_refining_box"].GetBool()):
self.MeshingParameters.SetUseRefiningBox(True)
self.MeshingParameters.SetRefiningBoxMinimumPoint(refining_box["lower_point"][0].GetDouble(),refining_box["lower_point"][1].GetDouble(),refining_box["lower_point"][2].GetDouble())
self.MeshingParameters.SetRefiningBoxMaximumPoint(refining_box["upper_point"][0].GetDouble(),refining_box["upper_point"][1].GetDouble(),refining_box["upper_point"][2].GetDouble())
self.MeshingParameters.SetRefiningBoxTimeInterval(refining_box["initial_time"].GetDouble(),refining_box["final_time"].GetDouble())
self.MeshingParameters.SetRefiningBoxMeshSize(refining_box["mesh_size"].GetDouble())
removing_options = KratosMultiphysics.Flags()
#remove nodes
remove_nodes = self.settings["refining_parameters"]["remove_nodes"]
removing_options.Set(KratosDelaunay.MesherUtilities.REMOVE_NODES, remove_nodes["apply_removal"].GetBool())
removing_options.Set(KratosDelaunay.MesherUtilities.REMOVE_NODES_ON_DISTANCE, remove_nodes["on_distance"].GetBool())
removing_options.Set(KratosDelaunay.MesherUtilities.REMOVE_NODES_ON_ERROR, remove_nodes["on_error"].GetBool())
removing_options.Set(KratosDelaunay.MesherUtilities.REMOVE_NODES_ON_THRESHOLD, remove_nodes["on_threshold"].GetBool())
#remove boundary
remove_boundary = self.settings["refining_parameters"]["remove_boundary"]
removing_options.Set(KratosDelaunay.MesherUtilities.REMOVE_BOUNDARY_NODES, remove_boundary["apply_removal"].GetBool())
removing_options.Set(KratosDelaunay.MesherUtilities.REMOVE_BOUNDARY_NODES_ON_DISTANCE, remove_boundary["on_distance"].GetBool())
removing_options.Set(KratosDelaunay.MesherUtilities.REMOVE_BOUNDARY_NODES_ON_ERROR, remove_boundary["on_error"].GetBool())
removing_options.Set(KratosDelaunay.MesherUtilities.REMOVE_BOUNDARY_NODES_ON_THRESHOLD, remove_boundary["on_threshold"].GetBool())
refining_options = KratosMultiphysics.Flags()
refining_options.Set(KratosDelaunay.MesherUtilities.REFINE, self.settings["meshing_strategy"]["refine"].GetBool())
refining_options.Set(KratosDelaunay.MesherUtilities.REFINE_ADD_NODES, self.settings["refining_parameters"]["add_nodes"].GetBool())
refining_options.Set(KratosDelaunay.MesherUtilities.REFINE_INSERT_NODES, self.settings["refining_parameters"]["insert_nodes"].GetBool())
#refine elements
refine_elements = self.settings["refining_parameters"]["refine_elements"]
refining_options.Set(KratosDelaunay.MesherUtilities.REFINE_ELEMENTS, refine_elements["apply_refinement"].GetBool())
refining_options.Set(KratosDelaunay.MesherUtilities.REFINE_ELEMENTS_ON_DISTANCE, refine_elements["on_distance"].GetBool())
refining_options.Set(KratosDelaunay.MesherUtilities.REFINE_ELEMENTS_ON_ERROR, refine_elements["on_error"].GetBool())
refining_options.Set(KratosDelaunay.MesherUtilities.REFINE_ELEMENTS_ON_THRESHOLD, refine_elements["on_threshold"].GetBool())
#refine boundary
refine_boundary = self.settings["refining_parameters"]["refine_boundary"]
refining_options.Set(KratosDelaunay.MesherUtilities.REFINE_BOUNDARY, refine_boundary["apply_refinement"].GetBool())
refining_options.Set(KratosDelaunay.MesherUtilities.REFINE_BOUNDARY_ON_DISTANCE, refine_boundary["on_distance"].GetBool())
refining_options.Set(KratosDelaunay.MesherUtilities.REFINE_BOUNDARY_ON_ERROR, refine_boundary["on_error"].GetBool())
refining_options.Set(KratosDelaunay.MesherUtilities.REFINE_BOUNDARY_ON_THRESHOLD, refine_boundary["on_threshold"].GetBool())
self.RefiningParameters.SetRefiningOptions(refining_options)
self.RefiningParameters.SetRemovingOptions(removing_options)
#
def SetMeshingParameters(self):
# Create MeshingParameters
self.MeshingParameters = KratosDelaunay.MeshingParameters()
self.MeshingParameters.Initialize()
self.MeshingParameters.SetSubModelPartName(self.settings["model_part_name"].GetString())
if(self.active_remeshing):
self.MeshingParameters.SetAlphaParameter(self.settings["alpha_shape"].GetDouble())
self.MeshingParameters.SetOffsetFactor(self.settings["offset_factor"].GetDouble())
self.SetInfoParameters()
self.SetTransferParameters()
self.SetRefiningParameters()
self.MeshingParameters.SetInfoParameters(self.InfoParameters)
self.MeshingParameters.SetTransferParameters(self.TransferParameters)
self.MeshingParameters.SetRefiningParameters(self.RefiningParameters)
bounding_box = self.settings["spatial_bounding_box"]
if(bounding_box["use_bounding_box"].GetBool()):
self.MeshingParameters.SetUseBoundingBox(True)
self.MeshingParameters.SetBoundingBoxLowerPoint(bounding_box["lower_point"][0].GetDouble(),bounding_box["lower_point"][1].GetDouble(),bounding_box["lower_point"][2].GetDouble())
self.MeshingParameters.SetBoundingBoxUpperPoint(bounding_box["upper_point"][0].GetDouble(),bounding_box["upper_point"][1].GetDouble(),bounding_box["upper_point"][2].GetDouble())
self.MeshingParameters.SetBoundingBoxTimeInterval(bounding_box["initial_time"].GetDouble(),bounding_box["final_time"].GetDouble())
#
def ExecuteMeshing(self):
if( self.active_remeshing ):
self.MeshingStrategy.GenerateMesh()
#
def Check(self):
# set mesher utilities
self.mesher_utils = KratosDelaunay.MesherUtilities()
# set the domain labels to mesh mesher
critical_mesh_size = self.settings["refining_parameters"]["critical_size"].GetDouble()
critical_radius = self.mesher_utils.CheckCriticalRadius(self.main_model_part,critical_mesh_size)
print(" CriticalRadius ", critical_radius)
#
def Active(self):
return self.active_remeshing
#
def SetEchoLevel(self, echo_level):
self.echo_level = echo_level
#
def GetVariables(self):
nodal_variables = []
transfer_variables = self.settings["elemental_variables_to_transfer"]
for i in range(0, transfer_variables.size() ):
nodal_variables.append(transfer_variables[i].GetString())
return nodal_variables
#
def ComputeAverageMeshParameters(self):
MesherUtils = KratosDelaunay.MesherUtilities();
self.domain_volume = MesherUtils.ComputeModelPartVolume(self.main_model_part)
self.element_mean_volume = 0
number_of_elements = self.main_model_part.NumberOfElements()
nodes_for_element = self.main_model_part.ProcessInfo[KratosMultiphysics.SPACE_DIMENSION] + 1
if(number_of_elements != 0):
self.element_mean_volume = self.domain_volume/float(number_of_elements*nodes_for_element)
self.RefiningParameters.SetMeanVolume(self.element_mean_volume)
#
def GetMeanVolume(self):
return self.element_mean_volume
#
def GetTotalVolume(self):
return self.domain_volume
#
def ComputeInitialAverageMeshParameters(self):
self.mesh_parameters = KratosPfemFluid.ComputeAveragePfemMeshParameters(self.main_model_part, self.MeshingParameters,self.echo_level)
self.mesh_parameters.Execute()
# numFluid=0
# mean_nodal_h=0
# for node in self.main_model_part.Nodes:
# if (node.Is(KratosMultiphysics.FLUID)):
# numFluid+=1
# nodal_h=node.GetSolutionStepValue(KratosMultiphysics.NODAL_H)
# mean_nodal_h+=nodal_h
# mean_nodal_h*=1.0/numFluid;
# self.RefiningParameters.SetCriticalRadius(mean_nodal_h)
# self.RefiningParameters.SetInitialRadius(mean_nodal_h)
# delta_time = self.main_model_part.ProcessInfo[KratosMultiphysics.DELTA_TIME]
# self.main_model_part.ProcessInfo.SetValue(KratosPfemFluid.INITIAL_DELTA_TIME,delta_time)
# self.main_model_part.ProcessInfo.SetValue(KratosPfemFluid.CURRENT_DELTA_TIME,delta_time)
# self.main_model_part.ProcessInfo.SetValue(KratosMultiphysics.PREVIOUS_DELTA_TIME,delta_time)
# self.main_model_part.ProcessInfo.SetValue(KratosPfemFluid.TIME_INTERVAL_CHANGED,False)
def SetTimeDataOnProcessInfo(self):
delta_time = self.main_model_part.ProcessInfo[KratosMultiphysics.DELTA_TIME]
self.main_model_part.ProcessInfo.SetValue(KratosPfemFluid.INITIAL_DELTA_TIME,delta_time)
self.main_model_part.ProcessInfo.SetValue(KratosPfemFluid.CURRENT_DELTA_TIME,delta_time)
self.main_model_part.ProcessInfo.SetValue(KratosMultiphysics.PREVIOUS_DELTA_TIME,delta_time)
self.main_model_part.ProcessInfo.SetValue(KratosPfemFluid.TIME_INTERVAL_CHANGED,False)
#
|
the-stack_0_776 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import unittest
import pandas as pd
from apache_beam.dataframe import doctests
from apache_beam.dataframe.frames import PD_VERSION
from apache_beam.dataframe.pandas_top_level_functions import _is_top_level_function
@unittest.skipIf(sys.platform == 'win32', '[BEAM-10626]')
class DoctestTest(unittest.TestCase):
def test_ndframe_tests(self):
# IO methods are tested in io_test.py
skip_writes = {
f'pandas.core.generic.NDFrame.{name}': ['*']
for name in dir(pd.core.generic.NDFrame) if name.startswith('to_')
}
result = doctests.testmod(
pd.core.generic,
use_beam=False,
report=True,
wont_implement_ok={
'pandas.core.generic.NDFrame.head': ['*'],
'pandas.core.generic.NDFrame.shift': [
'df.shift(periods=3)',
'df.shift(periods=3, fill_value=0)',
],
'pandas.core.generic.NDFrame.tail': ['*'],
'pandas.core.generic.NDFrame.take': ['*'],
'pandas.core.generic.NDFrame.values': ['*'],
'pandas.core.generic.NDFrame.tz_localize': [
"s.tz_localize('CET', ambiguous='infer')",
# np.array is not a deferred object. This use-case is possible
# with a deferred Series though, which is tested in
# frames_test.py
"s.tz_localize('CET', ambiguous=np.array([True, True, False]))",
],
'pandas.core.generic.NDFrame.truncate': [
# These inputs rely on tail (wont implement, order
# sensitive) for verification
"df.tail()",
"df.truncate(before=pd.Timestamp('2016-01-05'),\n"
" after=pd.Timestamp('2016-01-10')).tail()",
"df.truncate('2016-01-05', '2016-01-10').tail()",
"df.loc['2016-01-05':'2016-01-10', :].tail()"
],
'pandas.core.generic.NDFrame.replace': [
"s.replace([1, 2], method='bfill')",
# Relies on method='pad'
"s.replace('a')",
# Relies on method='pad'
# value=None is not valid for pandas < 1.4
"s.replace('a', None)",
# Implicitly uses method='pad', but output doesn't rely on that
# behavior. Verified indepently in
# frames_test.py::DeferredFrameTest::test_replace
"df.replace(regex={r'^ba.$': 'new', 'foo': 'xyz'})"
],
'pandas.core.generic.NDFrame.fillna': [
'df.fillna(method=\'ffill\')',
'df.fillna(method="ffill")',
'df.fillna(value=values, limit=1)',
],
'pandas.core.generic.NDFrame.sort_values': ['*'],
'pandas.core.generic.NDFrame.mask': [
'df.where(m, -df) == np.where(m, df, -df)'
],
'pandas.core.generic.NDFrame.where': [
'df.where(m, -df) == np.where(m, df, -df)'
],
'pandas.core.generic.NDFrame.interpolate': ['*'],
'pandas.core.generic.NDFrame.resample': ['*'],
'pandas.core.generic.NDFrame.rolling': ['*'],
# argsort wont implement
'pandas.core.generic.NDFrame.abs': [
'df.loc[(df.c - 43).abs().argsort()]',
],
'pandas.core.generic.NDFrame.reindex': ['*'],
'pandas.core.generic.NDFrame.pct_change': ['*'],
'pandas.core.generic.NDFrame.asof': ['*'],
'pandas.core.generic.NDFrame.infer_objects': ['*'],
'pandas.core.generic.NDFrame.ewm': ['*'],
'pandas.core.generic.NDFrame.expanding': ['*'],
'pandas.core.generic.NDFrame.get': ['*'],
},
not_implemented_ok={
'pandas.core.generic.NDFrame.asof': ['*'],
'pandas.core.generic.NDFrame.at_time': ['*'],
'pandas.core.generic.NDFrame.between_time': ['*'],
'pandas.core.generic.NDFrame.ewm': ['*'],
'pandas.core.generic.NDFrame.expanding': ['*'],
'pandas.core.generic.NDFrame.flags': ['*'],
'pandas.core.generic.NDFrame.rank': ['*'],
'pandas.core.generic.NDFrame.reindex_like': ['*'],
'pandas.core.generic.NDFrame.replace': ['*'],
'pandas.core.generic.NDFrame.sample': ['*'],
'pandas.core.generic.NDFrame.set_flags': ['*'],
'pandas.core.generic.NDFrame.squeeze': ['*'],
'pandas.core.generic.NDFrame.truncate': ['*'],
},
skip={
# Internal test
'pandas.core.generic.NDFrame._set_axis_name': ['*'],
# Fails to construct test series. asfreq is not implemented anyway.
'pandas.core.generic.NDFrame.asfreq': ['*'],
'pandas.core.generic.NDFrame.astype': ['*'],
'pandas.core.generic.NDFrame.convert_dtypes': ['*'],
'pandas.core.generic.NDFrame.copy': ['*'],
'pandas.core.generic.NDFrame.droplevel': ['*'],
'pandas.core.generic.NDFrame.get': ['*'],
'pandas.core.generic.NDFrame.rank': [
# Modified dataframe
'df'
],
'pandas.core.generic.NDFrame.rename': [
# Seems to be an upstream bug. The actual error has a different
# message:
# TypeError: Index(...) must be called with a collection of
# some kind, 2 was passed
# pandas doctests only verify the type of exception
'df.rename(2)'
],
# For pandas >= 1.4, rename is changed to _rename
'pandas.core.generic.NDFrame._rename': [
# Seems to be an upstream bug. The actual error has a different
# message:
# TypeError: Index(...) must be called with a collection of
# some kind, 2 was passed
# pandas doctests only verify the type of exception
'df.rename(2)'
],
# Tests rely on setting index
'pandas.core.generic.NDFrame.rename_axis': ['*'],
# Raises right exception, but testing framework has matching issues.
'pandas.core.generic.NDFrame.replace': [
"df.replace({'a string': 'new value', True: False}) # raises"
],
'pandas.core.generic.NDFrame.squeeze': ['*'],
# NameError
'pandas.core.generic.NDFrame.resample': ['df'],
# Skipped so we don't need to install natsort
'pandas.core.generic.NDFrame.sort_values': [
'from natsort import index_natsorted',
'df.sort_values(\n'
' by="time",\n'
' key=lambda x: np.argsort(index_natsorted(df["time"]))\n'
')'
],
**skip_writes
})
self.assertEqual(result.failed, 0)
def test_dataframe_tests(self):
result = doctests.testmod(
pd.core.frame,
use_beam=False,
report=True,
wont_implement_ok={
'pandas.core.frame.DataFrame.T': ['*'],
'pandas.core.frame.DataFrame.cummax': ['*'],
'pandas.core.frame.DataFrame.cummin': ['*'],
'pandas.core.frame.DataFrame.cumsum': ['*'],
'pandas.core.frame.DataFrame.cumprod': ['*'],
'pandas.core.frame.DataFrame.diff': ['*'],
'pandas.core.frame.DataFrame.fillna': [
'df.fillna(method=\'ffill\')',
'df.fillna(method="ffill")',
'df.fillna(value=values, limit=1)',
],
'pandas.core.frame.DataFrame.items': ['*'],
'pandas.core.frame.DataFrame.itertuples': ['*'],
'pandas.core.frame.DataFrame.iterrows': ['*'],
'pandas.core.frame.DataFrame.iteritems': ['*'],
# default keep is 'first'
'pandas.core.frame.DataFrame.nlargest': [
"df.nlargest(3, 'population')",
"df.nlargest(3, ['population', 'GDP'])",
"df.nlargest(3, 'population', keep='last')"
],
'pandas.core.frame.DataFrame.nsmallest': [
"df.nsmallest(3, 'population')",
"df.nsmallest(3, ['population', 'GDP'])",
"df.nsmallest(3, 'population', keep='last')",
],
'pandas.core.frame.DataFrame.replace': [
"s.replace([1, 2], method='bfill')",
# Relies on method='pad'
"s.replace('a')",
# Relies on method='pad'
# value=None is not valid for pandas < 1.4
"s.replace('a', None)",
# Implicitly uses method='pad', but output doesn't rely on that
# behavior. Verified indepently in
# frames_test.py::DeferredFrameTest::test_replace
"df.replace(regex={r'^ba.$': 'new', 'foo': 'xyz'})"
],
'pandas.core.frame.DataFrame.to_records': ['*'],
'pandas.core.frame.DataFrame.to_dict': ['*'],
'pandas.core.frame.DataFrame.to_numpy': ['*'],
'pandas.core.frame.DataFrame.to_string': ['*'],
'pandas.core.frame.DataFrame.transpose': ['*'],
'pandas.core.frame.DataFrame.shape': ['*'],
'pandas.core.frame.DataFrame.shift': [
'df.shift(periods=3)',
'df.shift(periods=3, fill_value=0)',
],
'pandas.core.frame.DataFrame.unstack': ['*'],
'pandas.core.frame.DataFrame.memory_usage': ['*'],
'pandas.core.frame.DataFrame.info': ['*'],
# Not equal to df.agg('mode', axis='columns', numeric_only=True)
# because there can be multiple columns if a row has more than one
# mode
'pandas.core.frame.DataFrame.mode': [
"df.mode(axis='columns', numeric_only=True)"
],
'pandas.core.frame.DataFrame.append': [
'df.append(df2, ignore_index=True)',
"for i in range(5):\n" +
" df = df.append({'A': i}, ignore_index=True)",
],
'pandas.core.frame.DataFrame.sort_index': ['*'],
'pandas.core.frame.DataFrame.sort_values': ['*'],
'pandas.core.frame.DataFrame.melt': [
"df.melt(id_vars=['A'], value_vars=['B'])",
"df.melt(id_vars=['A'], value_vars=['B', 'C'])",
"df.melt(col_level=0, id_vars=['A'], value_vars=['B'])",
"df.melt(id_vars=[('A', 'D')], value_vars=[('B', 'E')])",
"df.melt(id_vars=['A'], value_vars=['B'],\n" +
" var_name='myVarname', value_name='myValname')"
],
# Most keep= options are order-sensitive
'pandas.core.frame.DataFrame.drop_duplicates': ['*'],
'pandas.core.frame.DataFrame.duplicated': [
'df.duplicated()',
"df.duplicated(keep='last')",
"df.duplicated(subset=['brand'])",
],
'pandas.core.frame.DataFrame.reindex': ['*'],
'pandas.core.frame.DataFrame.dot': [
# reindex not supported
's2 = s.reindex([1, 0, 2, 3])',
],
'pandas.core.frame.DataFrame.resample': ['*'],
'pandas.core.frame.DataFrame.values': ['*'],
},
not_implemented_ok={
'pandas.core.frame.DataFrame.transform': [
# str arg not supported. Tested with np.sum in
# frames_test.py::DeferredFrameTest::test_groupby_transform_sum
"df.groupby('Date')['Data'].transform('sum')",
],
'pandas.core.frame.DataFrame.swaplevel': ['*'],
'pandas.core.frame.DataFrame.melt': ['*'],
'pandas.core.frame.DataFrame.reindex_axis': ['*'],
'pandas.core.frame.DataFrame.round': [
'df.round(decimals)',
],
# We should be able to support pivot and pivot_table for categorical
# columns
'pandas.core.frame.DataFrame.pivot': ['*'],
# Trivially elementwise for axis=columns. Relies on global indexing
# for axis=rows.
# Difficult to determine proxy, need to inspect function
'pandas.core.frame.DataFrame.apply': ['*'],
# Cross-join not implemented
'pandas.core.frame.DataFrame.merge': [
"df1.merge(df2, how='cross')"
],
# TODO(BEAM-11711)
'pandas.core.frame.DataFrame.set_index': [
"df.set_index([s, s**2])",
],
'pandas.core.frame.DataFrame.set_axis': [
"df.set_axis(range(0,2), axis='index')",
],
# TODO(BEAM-12495)
'pandas.core.frame.DataFrame.value_counts': [
'df.value_counts(dropna=False)'
],
},
skip={
# DataFrame construction from a dictionary and
# Series requires using the len() function, which
# is a non-deferred operation that we do not allow
'pandas.core.frame.DataFrame': [
'pd.DataFrame(data=d, index=[0, 1, 2, 3])',
],
# s2 created with reindex
'pandas.core.frame.DataFrame.dot': [
'df.dot(s2)',
],
'pandas.core.frame.DataFrame.resample': ['df'],
'pandas.core.frame.DataFrame.asfreq': ['*'],
# Throws NotImplementedError when modifying df
'pandas.core.frame.DataFrame.axes': [
# Returns deferred index.
'df.axes',
],
# Skipped because the relies on loc to set cells in df2
'pandas.core.frame.DataFrame.compare': ['*'],
'pandas.core.frame.DataFrame.cov': [
# Relies on setting entries ahead of time.
"df.loc[df.index[:5], 'a'] = np.nan",
"df.loc[df.index[5:10], 'b'] = np.nan",
'df.cov(min_periods=12)',
],
'pandas.core.frame.DataFrame.rename': [
# Returns deferred index.
'df.index',
'df.rename(index=str).index',
],
'pandas.core.frame.DataFrame.set_index': [
# TODO(BEAM-11711): This could pass in the index as
# a DeferredIndex, and we should fail it as order-sensitive.
"df.set_index([pd.Index([1, 2, 3, 4]), 'year'])",
],
'pandas.core.frame.DataFrame.set_axis': [
# This should pass as set_axis(axis='columns')
# and fail with set_axis(axis='index')
"df.set_axis(['a', 'b', 'c'], axis='index')"
],
'pandas.core.frame.DataFrame.to_markdown': ['*'],
'pandas.core.frame.DataFrame.to_parquet': ['*'],
# Raises right exception, but testing framework has matching issues.
# Tested in `frames_test.py`.
'pandas.core.frame.DataFrame.insert': [
'df',
'df.insert(1, "newcol", [99, 99])',
'df.insert(0, "col1", [100, 100], allow_duplicates=True)'
],
'pandas.core.frame.DataFrame.to_records': [
'df.index = df.index.rename("I")',
'index_dtypes = f"<S{df.index.str.len().max()}"', # 1.x
'index_dtypes = "<S{}".format(df.index.str.len().max())', #0.x
'df.to_records(index_dtypes=index_dtypes)',
],
# These tests use the static method pd.pivot_table, which doesn't
# actually raise NotImplementedError
'pandas.core.frame.DataFrame.pivot_table': ['*'],
# Expected to raise a ValueError, but we raise NotImplementedError
'pandas.core.frame.DataFrame.pivot': [
"df.pivot(index='foo', columns='bar', values='baz')"
],
'pandas.core.frame.DataFrame.append': [
'df',
# pylint: disable=line-too-long
"pd.concat([pd.DataFrame([i], columns=['A']) for i in range(5)],\n"
" ignore_index=True)"
],
'pandas.core.frame.DataFrame.eval': ['df'],
'pandas.core.frame.DataFrame.melt': [
"df.columns = [list('ABC'), list('DEF')]", "df"
],
'pandas.core.frame.DataFrame.merge': [
# Order-sensitive index, checked in frames_test.py.
"df1.merge(df2, left_on='lkey', right_on='rkey')",
"df1.merge(df2, left_on='lkey', right_on='rkey',\n"
" suffixes=('_left', '_right'))",
"df1.merge(df2, how='left', on='a')",
],
# Raises right exception, but testing framework has matching issues.
'pandas.core.frame.DataFrame.replace': [
"df.replace({'a string': 'new value', True: False}) # raises"
],
'pandas.core.frame.DataFrame.to_sparse': ['type(df)'],
# Skipped because "seen_wont_implement" is reset before getting to
# these calls, so the NameError they raise is not ignored.
'pandas.core.frame.DataFrame.T': [
'df1_transposed.dtypes', 'df2_transposed.dtypes'
],
'pandas.core.frame.DataFrame.transpose': [
'df1_transposed.dtypes', 'df2_transposed.dtypes'
],
# Skipped because the relies on iloc to set a cell to NA. Test is
# replicated in frames_test::DeferredFrameTest::test_applymap.
'pandas.core.frame.DataFrame.applymap': [
'df_copy.iloc[0, 0] = pd.NA',
"df_copy.applymap(lambda x: len(str(x)), na_action='ignore')",
],
# Skipped so we don't need to install natsort
'pandas.core.frame.DataFrame.sort_values': [
'from natsort import index_natsorted',
'df.sort_values(\n'
' by="time",\n'
' key=lambda x: np.argsort(index_natsorted(df["time"]))\n'
')'
],
# Mode that we don't yet support, documentation added in pandas
# 1.2.0 (https://github.com/pandas-dev/pandas/issues/35912)
'pandas.core.frame.DataFrame.aggregate': [
"df.agg(x=('A', max), y=('B', 'min'), z=('C', np.mean))"
],
})
self.assertEqual(result.failed, 0)
def test_series_tests(self):
result = doctests.testmod(
pd.core.series,
use_beam=False,
report=True,
wont_implement_ok={
'pandas.core.series.Series.__array__': ['*'],
'pandas.core.series.Series.array': ['*'],
'pandas.core.series.Series.cummax': ['*'],
'pandas.core.series.Series.cummin': ['*'],
'pandas.core.series.Series.cumsum': ['*'],
'pandas.core.series.Series.cumprod': ['*'],
'pandas.core.series.Series.diff': ['*'],
'pandas.core.series.Series.dot': [
's.dot(arr)', # non-deferred result
],
'pandas.core.series.Series.fillna': [
'df.fillna(method=\'ffill\')',
'df.fillna(method="ffill")',
'df.fillna(value=values, limit=1)',
],
'pandas.core.series.Series.info': ['*'],
'pandas.core.series.Series.items': ['*'],
'pandas.core.series.Series.iteritems': ['*'],
# default keep is 'first'
'pandas.core.series.Series.nlargest': [
"s.nlargest()",
"s.nlargest(3)",
"s.nlargest(3, keep='last')",
],
'pandas.core.series.Series.memory_usage': ['*'],
'pandas.core.series.Series.nsmallest': [
"s.nsmallest()",
"s.nsmallest(3)",
"s.nsmallest(3, keep='last')",
],
'pandas.core.series.Series.pop': ['*'],
'pandas.core.series.Series.searchsorted': ['*'],
'pandas.core.series.Series.shift': [
'df.shift(periods=3)',
'df.shift(periods=3, fill_value=0)',
],
'pandas.core.series.Series.take': ['*'],
'pandas.core.series.Series.to_dict': ['*'],
'pandas.core.series.Series.unique': ['*'],
'pandas.core.series.Series.unstack': ['*'],
'pandas.core.series.Series.values': ['*'],
'pandas.core.series.Series.view': ['*'],
'pandas.core.series.Series.append': [
's1.append(s2, ignore_index=True)',
],
'pandas.core.series.Series.replace': [
"s.replace([1, 2], method='bfill')",
# Relies on method='pad'
"s.replace('a')",
# Relies on method='pad'
# value=None is not valid for pandas < 1.4
"s.replace('a', None)",
# Implicitly uses method='pad', but output doesn't rely on that
# behavior. Verified indepently in
# frames_test.py::DeferredFrameTest::test_replace
"df.replace(regex={r'^ba.$': 'new', 'foo': 'xyz'})"
],
'pandas.core.series.Series.sort_index': ['*'],
'pandas.core.series.Series.sort_values': ['*'],
'pandas.core.series.Series.argmax': ['*'],
'pandas.core.series.Series.argmin': ['*'],
'pandas.core.series.Series.drop_duplicates': [
's.drop_duplicates()',
"s.drop_duplicates(keep='last')",
],
'pandas.core.series.Series.reindex': ['*'],
'pandas.core.series.Series.autocorr': ['*'],
'pandas.core.series.Series.repeat': ['s.repeat([1, 2, 3])'],
'pandas.core.series.Series.resample': ['*'],
'pandas.core.series.Series': ['ser.iloc[0] = 999'],
},
not_implemented_ok={
'pandas.core.series.Series.transform': [
# str arg not supported. Tested with np.sum in
# frames_test.py::DeferredFrameTest::test_groupby_transform_sum
"df.groupby('Date')['Data'].transform('sum')",
],
'pandas.core.series.Series.groupby': [
'ser.groupby(["a", "b", "a", "b"]).mean()',
'ser.groupby(["a", "b", "a", np.nan]).mean()',
'ser.groupby(["a", "b", "a", np.nan], dropna=False).mean()',
],
'pandas.core.series.Series.swaplevel' :['*']
},
skip={
# Relies on setting values with iloc
'pandas.core.series.Series': ['ser', 'r'],
'pandas.core.series.Series.groupby': [
# TODO(BEAM-11393): This example requires aligning two series
# with non-unique indexes. It only works in pandas because
# pandas can recognize the indexes are identical and elide the
# alignment.
'ser.groupby(ser > 100).mean()',
],
'pandas.core.series.Series.asfreq': ['*'],
# error formatting
'pandas.core.series.Series.append': [
's1.append(s2, verify_integrity=True)',
],
'pandas.core.series.Series.cov': [
# Differs in LSB on jenkins.
"s1.cov(s2)",
],
# Skipped idxmax/idxmin due an issue with the test framework
'pandas.core.series.Series.idxmin': ['s.idxmin()'],
'pandas.core.series.Series.idxmax': ['s.idxmax()'],
'pandas.core.series.Series.duplicated': ['*'],
'pandas.core.series.Series.set_axis': ['*'],
'pandas.core.series.Series.nonzero': ['*'],
'pandas.core.series.Series.pop': ['ser'], # testing side effect
# Raises right exception, but testing framework has matching issues.
'pandas.core.series.Series.replace': [
"df.replace({'a string': 'new value', True: False}) # raises"
],
'pandas.core.series.Series.searchsorted': [
# This doctest seems to be incorrectly parsed.
"x = pd.Categorical(['apple', 'bread', 'bread',"
],
'pandas.core.series.Series.to_csv': ['*'],
'pandas.core.series.Series.to_markdown': ['*'],
'pandas.core.series.Series.update': ['*'],
'pandas.core.series.Series.view': [
# Inspection after modification.
's'
],
'pandas.core.series.Series.resample': ['df'],
})
self.assertEqual(result.failed, 0)
def test_string_tests(self):
if PD_VERSION < (1, 2):
module = pd.core.strings
else:
# Definitions were moved to accessor in pandas 1.2.0
module = pd.core.strings.accessor
module_name = module.__name__
result = doctests.testmod(
module,
use_beam=False,
wont_implement_ok={
# These methods can accept deferred series objects, but not lists
f'{module_name}.StringMethods.cat': [
"s.str.cat(['A', 'B', 'C', 'D'], sep=',')",
"s.str.cat(['A', 'B', 'C', 'D'], sep=',', na_rep='-')",
"s.str.cat(['A', 'B', 'C', 'D'], na_rep='-')"
],
f'{module_name}.StringMethods.repeat': [
's.str.repeat(repeats=[1, 2, 3])'
],
f'{module_name}.str_repeat': ['s.str.repeat(repeats=[1, 2, 3])'],
# get_dummies pandas examples are not casted to CategoricalDtype
# Must be CategoricalDtype to work in Beam
f'{module_name}.StringMethods.get_dummies': ['*'],
f'{module_name}.str_get_dummies': ['*'],
f'{module_name}.StringMethods': ['s.str.split("_")'],
f'{module_name}.StringMethods.rsplit': ['*'],
f'{module_name}.StringMethods.split': ['*'],
},
skip={
# count() on Series with a NaN produces mismatched type if we
# have a NaN-only partition.
f'{module_name}.StringMethods.count': ["s.str.count('a')"],
f'{module_name}.str_count': ["s.str.count('a')"],
# Bad test strings in pandas 1.1.x
f'{module_name}.str_replace': [
"pd.Series(['foo', 'fuz', np.nan]).str.replace('f', repr)"
],
f'{module_name}.StringMethods.replace': [
"pd.Series(['foo', 'fuz', np.nan]).str.replace('f', repr)"
],
# output has incorrect formatting in 1.2.x
f'{module_name}.StringMethods.extractall': ['*']
})
self.assertEqual(result.failed, 0)
def test_datetime_tests(self):
# TODO(BEAM-10721)
indexes_accessors_result = doctests.testmod(
pd.core.indexes.accessors,
use_beam=False,
skip={
'pandas.core.indexes.accessors.TimedeltaProperties': [
# Seems like an upstream bug. The property is 'second'
'seconds_series.dt.seconds'
],
# TODO(BEAM-12530): Test data creation fails for these
# s = pd.Series(pd.to_timedelta(np.arange(5), unit="d"))
# pylint: disable=line-too-long
'pandas.core.indexes.accessors.DatetimeProperties.to_pydatetime': [
'*'
],
'pandas.core.indexes.accessors.TimedeltaProperties.components': [
'*'
],
'pandas.core.indexes.accessors.TimedeltaProperties.to_pytimedelta': [
'*'
],
# pylint: enable=line-too-long
})
datetimelike_result = doctests.testmod(
pd.core.arrays.datetimelike, use_beam=False)
datetime_result = doctests.testmod(
pd.core.arrays.datetimes,
use_beam=False,
wont_implement_ok={
'pandas.core.arrays.datetimes.DatetimeArray.to_period': ['*'],
# All tz_localize tests use unsupported values for ambiguous=
# Verified seperately in
# frames_test.py::DeferredFrameTest::test_dt_tz_localize_*
'pandas.core.arrays.datetimes.DatetimeArray.tz_localize': ['*'],
},
not_implemented_ok={
# Verifies index version of this method
'pandas.core.arrays.datetimes.DatetimeArray.to_period': [
'df.index.to_period("M")'
],
})
self.assertEqual(indexes_accessors_result.failed, 0)
self.assertEqual(datetimelike_result.failed, 0)
self.assertEqual(datetime_result.failed, 0)
def test_indexing_tests(self):
result = doctests.testmod(
pd.core.indexing,
use_beam=False,
skip={
'pandas.core.indexing._IndexSlice': ['*'],
'pandas.core.indexing.IndexingMixin.at': ['*'],
'pandas.core.indexing.IndexingMixin.iat': ['*'],
'pandas.core.indexing.IndexingMixin.iloc': ['*'],
'pandas.core.indexing.IndexingMixin.loc': ['*'],
'pandas.core.indexing._AtIndexer': ['*'],
'pandas.core.indexing._LocIndexer': ['*'],
'pandas.core.indexing._iAtIndexer': ['*'],
'pandas.core.indexing._iLocIndexer': ['*'],
})
self.assertEqual(result.failed, 0)
def test_groupby_tests(self):
result = doctests.testmod(
pd.core.groupby.groupby,
use_beam=False,
wont_implement_ok={
'pandas.core.groupby.groupby.GroupBy.head': ['*'],
'pandas.core.groupby.groupby.GroupBy.tail': ['*'],
'pandas.core.groupby.groupby.GroupBy.nth': ['*'],
'pandas.core.groupby.groupby.GroupBy.cumcount': ['*'],
'pandas.core.groupby.groupby.GroupBy.resample': ['*'],
},
not_implemented_ok={
'pandas.core.groupby.groupby.GroupBy.ngroup': ['*'],
'pandas.core.groupby.groupby.GroupBy.sample': ['*'],
'pandas.core.groupby.groupby.GroupBy.rank': ['*'],
'pandas.core.groupby.groupby.GroupBy.nth': [
"df.groupby('A', as_index=False).nth(1)",
],
},
skip={
# Uses iloc to mutate a DataFrame
'pandas.core.groupby.groupby.GroupBy.resample': [
'df.iloc[2, 0] = 5',
'df',
],
# df is reassigned
'pandas.core.groupby.groupby.GroupBy.rank': ['df'],
# TODO: Raise wont implement for list passed as a grouping column
# Currently raises unhashable type: list
'pandas.core.groupby.groupby.GroupBy.ngroup': [
'df.groupby(["A", [1,1,2,3,2,1]]).ngroup()'
],
})
self.assertEqual(result.failed, 0)
result = doctests.testmod(
pd.core.groupby.generic,
use_beam=False,
wont_implement_ok={
# Returns an array by default, not a Series. WontImplement
# (non-deferred)
'pandas.core.groupby.generic.SeriesGroupBy.unique': ['*'],
# TODO: Is take actually deprecated?
'pandas.core.groupby.generic.DataFrameGroupBy.take': ['*'],
'pandas.core.groupby.generic.SeriesGroupBy.take': ['*'],
'pandas.core.groupby.generic.SeriesGroupBy.nsmallest': [
"s.nsmallest(3, keep='last')",
"s.nsmallest(3)",
"s.nsmallest()",
],
'pandas.core.groupby.generic.SeriesGroupBy.nlargest': [
"s.nlargest(3, keep='last')",
"s.nlargest(3)",
"s.nlargest()",
],
'pandas.core.groupby.generic.DataFrameGroupBy.diff': ['*'],
'pandas.core.groupby.generic.SeriesGroupBy.diff': ['*'],
'pandas.core.groupby.generic.DataFrameGroupBy.hist': ['*'],
'pandas.core.groupby.generic.DataFrameGroupBy.fillna': [
'df.fillna(method=\'ffill\')',
'df.fillna(method="ffill")',
'df.fillna(value=values, limit=1)',
],
'pandas.core.groupby.generic.SeriesGroupBy.fillna': [
'df.fillna(method=\'ffill\')',
'df.fillna(method="ffill")',
'df.fillna(value=values, limit=1)',
],
},
not_implemented_ok={
'pandas.core.groupby.generic.DataFrameGroupBy.idxmax': ['*'],
'pandas.core.groupby.generic.DataFrameGroupBy.idxmin': ['*'],
'pandas.core.groupby.generic.SeriesGroupBy.transform': ['*'],
'pandas.core.groupby.generic.SeriesGroupBy.idxmax': ['*'],
'pandas.core.groupby.generic.SeriesGroupBy.idxmin': ['*'],
'pandas.core.groupby.generic.SeriesGroupBy.apply': ['*'],
},
skip={
'pandas.core.groupby.generic.SeriesGroupBy.cov': [
# Floating point comparison fails
's1.cov(s2)',
],
'pandas.core.groupby.generic.DataFrameGroupBy.cov': [
# Mutates input DataFrame with loc
# TODO: Replicate in frames_test.py
"df.loc[df.index[:5], 'a'] = np.nan",
"df.loc[df.index[5:10], 'b'] = np.nan",
"df.cov(min_periods=12)",
],
# These examples rely on grouping by a list
'pandas.core.groupby.generic.SeriesGroupBy.aggregate': ['*'],
'pandas.core.groupby.generic.DataFrameGroupBy.aggregate': ['*'],
'pandas.core.groupby.generic.SeriesGroupBy.transform': [
# Dropping invalid columns during a transform is unsupported.
'grouped.transform(lambda x: (x - x.mean()) / x.std())'
],
'pandas.core.groupby.generic.DataFrameGroupBy.transform': [
# Dropping invalid columns during a transform is unsupported.
'grouped.transform(lambda x: (x - x.mean()) / x.std())'
],
# Skipped idxmax/idxmin due an issue with the test framework
'pandas.core.groupby.generic.SeriesGroupBy.idxmin': ['s.idxmin()'],
'pandas.core.groupby.generic.SeriesGroupBy.idxmax': ['s.idxmax()'],
# Uses as_index, which is currently not_implemented
'pandas.core.groupby.generic.DataFrameGroupBy.value_counts': [
"df.groupby('gender', as_index=False).value_counts()",
# pylint: disable=line-too-long
"df.groupby('gender', as_index=False).value_counts(normalize=True)",
],
})
self.assertEqual(result.failed, 0)
def test_top_level(self):
tests = {
name: func.__doc__
for (name, func) in pd.__dict__.items()
if _is_top_level_function(func) and getattr(func, '__doc__', None)
}
# IO methods are tested in io_test.py
skip_reads = {name: ['*'] for name in dir(pd) if name.startswith('read_')}
result = doctests.teststrings(
tests,
use_beam=False,
report=True,
not_implemented_ok={
'concat': ['pd.concat([s1, s2], ignore_index=True)'],
'crosstab': ['*'],
'cut': ['*'],
'eval': ['*'],
'get_dummies': ['*'],
'infer_freq': ['*'],
'lreshape': ['*'],
'melt': ['*'],
'merge': ["df1.merge(df2, how='cross')"],
'merge_asof': ['*'],
'pivot': ['*'],
'pivot_table': ['*'],
'qcut': ['*'],
'reset_option': ['*'],
'set_eng_float_format': ['*'],
'set_option': ['*'],
'to_numeric': ['*'],
'to_timedelta': ['*'],
'unique': ['*'],
'wide_to_long': ['*'],
},
wont_implement_ok={
'factorize': ['*'],
'to_datetime': ['s.head()'],
'to_pickle': ['*'],
'melt': [
"pd.melt(df, id_vars=['A'], value_vars=['B'])",
"pd.melt(df, id_vars=['A'], value_vars=['B', 'C'])",
"pd.melt(df, col_level=0, id_vars=['A'], value_vars=['B'])",
"pd.melt(df, id_vars=[('A', 'D')], value_vars=[('B', 'E')])",
"pd.melt(df, id_vars=['A'], value_vars=['B'],\n" +
" var_name='myVarname', value_name='myValname')"
],
},
skip={
# error formatting
'concat': ['pd.concat([df5, df6], verify_integrity=True)'],
# doctest DeprecationWarning
'melt': ['df'],
# Order-sensitive re-indexing.
'merge': [
"df1.merge(df2, left_on='lkey', right_on='rkey')",
"df1.merge(df2, left_on='lkey', right_on='rkey',\n"
" suffixes=('_left', '_right'))",
"df1.merge(df2, how='left', on='a')",
],
# Not an actual test.
'option_context': ['*'],
'factorize': ['codes', 'uniques'],
# Bad top-level use of un-imported function.
'merge_ordered': [
'merge_ordered(df1, df2, fill_method="ffill", left_by="group")'
],
# Expected error.
'pivot': ["df.pivot(index='foo', columns='bar', values='baz')"],
# Never written.
'to_pickle': ['os.remove("./dummy.pkl")'],
**skip_reads
})
self.assertEqual(result.failed, 0)
if __name__ == '__main__':
unittest.main()
|
the-stack_0_777 | # -*- coding: utf-8 -*-
"""Implements a class to be used for unit testing.
"""
import pathlib
from tlsmate.workers.eval_cipher_suites import ScanCipherSuites
from tlsmate.tlssuite import TlsSuiteTester
from tlsmate.tlssuite import TlsLibrary
ssl2_ck = [
"SSL_CK_RC4_128_WITH_MD5",
"SSL_CK_RC2_128_CBC_WITH_MD5",
"SSL_CK_IDEA_128_CBC_WITH_MD5",
"SSL_CK_DES_192_EDE3_CBC_WITH_MD5",
]
class TestCase(TlsSuiteTester):
"""Class used for tests with pytest.
For more information refer to the documentation of the TcRecorder class.
"""
sp_out_yaml = "profile_basic_ssl2"
recorder_yaml = "recorder_eval_cipher_suites_ssl2"
path = pathlib.Path(__file__)
server_cmd = (
"utils/start_openssl --version {library} --port {server_port} "
"--cert1 server-rsa --cert2 server-ecdsa --no-cert-chain "
"-- -www -cipher ALL -ssl2"
)
library = TlsLibrary.openssl1_0_2
server = "localhost"
def check_versions(self, versions):
assert len(versions) == 6
assert versions[0]["version"]["name"] == "SSL20"
assert versions[0]["support"] == "TRUE"
assert versions[1]["version"]["name"] == "SSL30"
assert versions[1]["support"] == "FALSE"
assert versions[2]["version"]["name"] == "TLS10"
assert versions[2]["support"] == "FALSE"
assert versions[3]["version"]["name"] == "TLS11"
assert versions[3]["support"] == "FALSE"
assert versions[4]["version"]["name"] == "TLS12"
assert versions[4]["support"] == "FALSE"
assert versions[5]["version"]["name"] == "TLS13"
assert versions[5]["support"] == "FALSE"
for a, b in zip(ssl2_ck, versions[0]["cipher_kinds"]):
assert a == b["name"]
def check_profile(self, profile):
self.check_versions(profile["versions"])
def run(self, tlsmate, is_replaying):
for vers in ["sslv2", "sslv3", "tls10", "tls11", "tls12", "tls13"]:
tlsmate.config.set(vers, True)
server_profile = tlsmate.server_profile
ScanCipherSuites(tlsmate).run()
self.check_profile(server_profile.make_serializable())
if __name__ == "__main__":
TestCase().entry(is_replaying=False)
|
the-stack_0_782 | """
[Python scripts for 3DTracker-FAB (www.3dtracker.org)]
Example 03: Converting 2D position to 3D
This is a script demonstrating how to convert 2D positions in a ROI in a RGB image to 3D.
The type of conversion is useful for using 2D image based object detection/tracking
algorithms to obtain the corresponding 3D object position/trace.
The example plot 3D points in the ROIs surrouding a can in the 2D images
Date last modified: 2018.10.03
"""
import numpy as np
import cv2
import contextlib
import pyqtgraph as pg
import pyqtgraph.opengl as gl
import lib3dtracker as tdt # 3DTracker-FAB python library
fname_metadata = './example data/dual_d435_01/dual_d435_01.metadata.xml' # metadata file path
with contextlib.closing(tdt.DataReader(fname_metadata)) as d: # open data using 'with statement'
i_frame = 10; # video frame number to process
# show camera 1 RGB image and ROI
roi_cam1 = [120, 70, 40, 80] # ROI; left, top, width, height
[frame_rgb, frame_d] = d.get_rgbd_frame(i_frame, 0)
cv2.rectangle(frame_rgb, tuple(roi_cam1[0:2]), (roi_cam1[0]+roi_cam1[2], roi_cam1[1]+roi_cam1[3]), (0, 0, 255), 2)
cv2.imshow('rgb1', frame_rgb)
# show camera 2 RGB image and ROI
roi_cam2 = [170, 80, 50, 100] # ROI; left, top, width, height
[frame_rgb, frame_d] = d.get_rgbd_frame(i_frame, 1)
cv2.rectangle(frame_rgb, tuple(roi_cam2[0:2]), (roi_cam2[0]+roi_cam2[2], roi_cam2[1]+roi_cam2[3]), (0, 0, 255), 2)
cv2.imshow('rgb2', frame_rgb)
# get 3D point cloud in ROI
pc_roi1 = d.get_pc_from_rgbd(i_frame, 0, roi_cam1)
pc_roi2 = d.get_pc_from_rgbd(i_frame, 1, roi_cam2)
# prepare for plotting
app=pg.QtGui.QApplication([])
w = gl.GLViewWidget()
# read and plot merged point cloud
pc = d.get_mrgpc_frame(i_frame)
tdt.plot_pc(pc, w, 4)
# plot point cloud in ROIs
tdt.plot_pc(pc_roi1, w, 5, (1,0,0,1))
tdt.plot_pc(pc_roi2, w, 5, (1,0,0,1))
# plot axis
g=gl.GLAxisItem()
w.addItem(g)
# show the plot
w.setCameraPosition(distance = 0.5)
w.show()
print('Close the window to quit.')
pg.QtGui.QApplication.exec_()
|
the-stack_0_784 | # Copyright 2017 BrainPad Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import, division, print_function, unicode_literals
import argparse
from web.app import create_app
from dobot.utils import detect_dobot_port, dobot_is_on_port
DEFAULT_BAUDRATE = 115200
parser = argparse.ArgumentParser(description='Run Dobot WebAPI.')
parser.add_argument('--port', type=int, default=18001)
parser.add_argument('--host', type=str, default='0.0.0.0')
parser.add_argument('--dobot-port', type=str, default=None)
parser.add_argument('--tuner-file', type=str, default='/var/tmp/robot_tuner.dat')
parser.add_argument('--instance_path', type=str, default=None)
args = parser.parse_args()
if not args.dobot_port:
dobot_port = detect_dobot_port(DEFAULT_BAUDRATE)
if dobot_port is None:
print('dobot offline')
exit(1)
else:
dobot_port = args.dobot_port
if not dobot_is_on_port(dobot_port, DEFAULT_BAUDRATE):
print('dobot is not detected on port {}'.format(dobot_port))
exit(1)
app = create_app(dobot_port, args.tuner_file, args.instance_path)
if __name__ == '__main__':
app.run(port=args.port, host=args.host)
|
the-stack_0_785 | # coding=utf-8
# Copyright 2018 The Dopamine Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Library used by example_viz.py to generate visualizations.
This file illustrates the following:
- How to subclass an existing agent to add visualization functionality.
- For DQN we visualize the cumulative rewards and the Q-values for each
action (MyDQNAgent).
- For Rainbow we visualize the cumulative rewards and the Q-value
distributions for each action (MyRainbowAgent).
- How to subclass Runner to run in eval mode, lay out the different subplots,
generate the visualizations, and compile them into a video (MyRunner).
- The function `run()` is the main entrypoint for running everything.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import sys
from absl import logging
from dopamine.agents.dqn import dqn_agent
from dopamine.agents.rainbow import rainbow_agent
from dopamine.discrete_domains import atari_lib
from dopamine.discrete_domains import iteration_statistics
from dopamine.discrete_domains import run_experiment
from dopamine.utils import agent_visualizer
from dopamine.utils import atari_plotter
from dopamine.utils import bar_plotter
from dopamine.utils import line_plotter
import gin
import numpy as np
import tensorflow as tf
import tf_slim
import pdb
import matplotlib.pyplot as plt
class MyDQNAgent(dqn_agent.DQNAgent):
"""Sample DQN agent to visualize Q-values and rewards."""
def __init__(self, sess, num_actions, summary_writer=None):
super(MyDQNAgent, self).__init__(sess, num_actions,
summary_writer=summary_writer)
self.q_values = [[] for _ in range(num_actions)]
self.rewards = []
def step(self, reward, observation, step_number):
self.rewards.append(reward)
return super(MyDQNAgent, self).step(reward, observation, step_number)
def _select_action(self, step_number):
action = super(MyDQNAgent, self)._select_action(step_number)
# print("on selectionne ici")
q_vals = self._sess.run(self._net_outputs.q_values,
{self.state_ph: self.state})[0]
for i in range(len(q_vals)):
self.q_values[i].append(q_vals[i])
return action
def reload_checkpoint(self, checkpoint_path, use_legacy_checkpoint=False):
if use_legacy_checkpoint:
variables_to_restore = atari_lib.maybe_transform_variable_names(
tf.compat.v1.global_variables(), legacy_checkpoint_load=True)
else:
global_vars = set([x.name for x in tf.compat.v1.global_variables()])
ckpt_vars = [
'{}:0'.format(name)
for name, _ in tf.train.list_variables(checkpoint_path)
]
include_vars = list(global_vars.intersection(set(ckpt_vars)))
variables_to_restore = tf_slim.get_variables_to_restore(
include=include_vars)
if variables_to_restore:
reloader = tf.compat.v1.train.Saver(var_list=variables_to_restore)
reloader.restore(self._sess, checkpoint_path)
logging.info('Done restoring from %s', checkpoint_path)
else:
logging.info('Nothing to restore!')
def get_q_values(self):
return self.q_values
def get_rewards(self):
return [np.cumsum(self.rewards)]
class MyRainbowAgent(rainbow_agent.RainbowAgent):
"""Sample Rainbow agent to visualize Q-values and rewards."""
def __init__(self, sess, num_actions, summary_writer=None):
super(MyRainbowAgent, self).__init__(sess, num_actions,
summary_writer=summary_writer)
self.rewards = []
def step(self, reward, observation, step_number):
self.rewards.append(reward)
return super(MyRainbowAgent, self).step(reward, observation, step_number)
def reload_checkpoint(self, checkpoint_path, use_legacy_checkpoint=False):
if use_legacy_checkpoint:
variables_to_restore = atari_lib.maybe_transform_variable_names(
tf.compat.v1.global_variables(), legacy_checkpoint_load=True)
else:
global_vars = set([x.name for x in tf.compat.v1.global_variables()])
ckpt_vars = [
'{}:0'.format(name)
for name, _ in tf.train.list_variables(checkpoint_path)
]
include_vars = list(global_vars.intersection(set(ckpt_vars)))
variables_to_restore = tf_slim.get_variables_to_restore(
include=include_vars)
if variables_to_restore:
reloader = tf.compat.v1.train.Saver(var_list=variables_to_restore)
reloader.restore(self._sess, checkpoint_path)
logging.info('Done restoring from %s', checkpoint_path)
else:
logging.info('Nothing to restore!')
def get_probabilities(self):
return self._sess.run(tf.squeeze(self._net_outputs.probabilities),
{self.state_ph: self.state})
def get_rewards(self):
return [np.cumsum(self.rewards)]
class MyRunner(run_experiment.Runner):
"""Sample Runner class to generate visualizations."""
def __init__(self, base_dir, trained_agent_ckpt_path, create_agent_fn,
use_legacy_checkpoint=False):
self._trained_agent_ckpt_path = trained_agent_ckpt_path
self._use_legacy_checkpoint = use_legacy_checkpoint
super(MyRunner, self).__init__(base_dir, create_agent_fn)
def _initialize_checkpointer_and_maybe_resume(self, checkpoint_file_prefix):
self._agent.reload_checkpoint(self._trained_agent_ckpt_path,
self._use_legacy_checkpoint)
self._start_iteration = 0
def _run_one_iteration(self, iteration):
statistics = iteration_statistics.IterationStatistics()
logging.info('Starting iteration %d', iteration)
_, _ = self._run_eval_phase(statistics)
return statistics.data_lists
def _run_one_iteration(self, iteration):
statistics = iteration_statistics.IterationStatistics()
logging.info('Starting iteration %d', iteration)
num_episodes_eval, average_reward_eval = self._run_eval_phase(
statistics)
return statistics.data_lists
def _run_eval_phase(self, statistics):
# Perform the evaluation phase -- no learning.
self._agent.eval_mode = True
_, sum_returns, num_episodes = self._run_one_phase(
self._evaluation_steps, statistics, 'eval')
average_return = sum_returns / num_episodes if num_episodes > 0 else 0.0
logging.info('Average undiscounted return per evaluation episode: %.2f',
average_return)
statistics.append({'eval_average_return': average_return})
return num_episodes, average_return
def _run_one_phase(self, min_steps, statistics, run_mode_str):
step_count = 0
num_episodes = 0
sum_returns = 0.
print("min_steps", min_steps)
while step_count < min_steps:
print(">>>>> step_count", step_count)
episode_length, episode_return = self._run_one_episode()
statistics.append({
'{}_episode_lengths'.format(run_mode_str): episode_length,
'{}_episode_returns'.format(run_mode_str): episode_return
})
step_count += episode_length
sum_returns += episode_return
num_episodes += 1
# We use sys.stdout.write instead of logging so as to flush frequently
# without generating a line break.
sys.stdout.write('Steps executed: {} '.format(step_count) +
'Episode length: {} '.format(episode_length) +
'Return: {}\r'.format(episode_return))
sys.stdout.flush()
return step_count, sum_returns, num_episodes
def _run_one_episode(self):
step_number = 0
total_reward = 0.
action = self._initialize_episode()
is_terminal = False
# Keep interacting until we reach a terminal state.
while True:
observation, reward, is_terminal = self._run_one_step(action, step_number)
total_reward += reward
step_number += 1
print("step_number", step_number)
if self._clip_rewards:
# Perform reward clipping.
reward = np.clip(reward, -1, 1)
if (self._environment.game_over or
step_number == self._max_steps_per_episode):
# Stop the run loop once we reach the true end of episode.
break
elif is_terminal:
# If we lose a life but the episode is not over, signal an artificial
# end of episode to the agent.
self._end_episode(reward, is_terminal)
action = self._agent.begin_episode(observation)
else:
action = self._agent.step(reward, observation, step_number)
self._end_episode(reward, is_terminal)
return step_number, total_reward
def _run_one_step(self, action, step_number):
observation, reward, is_terminal, _ = self._environment.step(action)
# Saving the render
if True:
if step_number > 900 and step_number < 1000:
image = self._environment.render('rgb_array')
plt.imshow(image)
plt.savefig("/home/hugo/saliency_maps/Rainbow-Tennis/render/render"+str(step_number)+".png")
return observation, reward, is_terminal
def create_dqn_agent(sess, environment, summary_writer=None):
return MyDQNAgent(sess, num_actions=environment.action_space.n,
summary_writer=summary_writer)
def create_rainbow_agent(sess, environment, summary_writer=None):
return MyRainbowAgent(sess, num_actions=environment.action_space.n,
summary_writer=summary_writer)
def create_runner(base_dir, trained_agent_ckpt_path, agent='dqn',
use_legacy_checkpoint=False):
create_agent = create_dqn_agent if agent == 'dqn' else create_rainbow_agent
return MyRunner(base_dir, trained_agent_ckpt_path, create_agent,
use_legacy_checkpoint)
def run(agent, game, num_steps, root_dir, restore_ckpt,
use_legacy_checkpoint=False):
"""Main entrypoint for running and generating visualizations.
Args:
agent: str, agent type to use.
game: str, Atari 2600 game to run.
num_steps: int, number of steps to play game.
root_dir: str, root directory where files will be stored.
restore_ckpt: str, path to the checkpoint to reload.
use_legacy_checkpoint: bool, whether to restore from a legacy (pre-Keras)
checkpoint.
"""
tf.compat.v1.reset_default_graph()
config = """
atari_lib.create_atari_environment.game_name = '{}'
WrappedReplayBuffer.replay_capacity = 300
""".format(game)
base_dir = os.path.join(root_dir, 'agent_viz', game, agent)
gin.parse_config(config)
runner = create_runner(base_dir, restore_ckpt, agent, use_legacy_checkpoint)
iteration = 0
runner._run_one_iteration(iteration)
|
the-stack_0_790 | # Copyright 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import re
import time
from cement.utils.misc import minimal_logger
from ..lib import elasticbeanstalk, iam, utils
from ..lib.aws import InvalidParameterValueError
from ..core import io
from ..objects.exceptions import TimeoutError, AlreadyExistsError, \
NotAuthorizedError, NotSupportedError
from ..resources.strings import strings, responses, prompts
from . import commonops
LOG = minimal_logger(__name__)
DEFAULT_ROLE_NAME = 'aws-elasticbeanstalk-ec2-role'
DEFAULT_SERVICE_ROLE_NAME = 'aws-elasticbeanstalk-service-role'
def make_new_env(env_request, branch_default=False,
nohang=False, interactive=True, timeout=None):
resolve_roles(env_request, interactive)
# deploy code
if not env_request.sample_application and not env_request.version_label:
io.log_info('Creating new application version using project code')
env_request.version_label = \
commonops.create_app_version(env_request.app_name)
if env_request.version_label is None or env_request.sample_application:
env_request.version_label = \
commonops.create_dummy_app_version(env_request.app_name)
# Create env
if env_request.key_name:
commonops.upload_keypair_if_needed(env_request.key_name)
io.log_info('Creating new environment')
result, request_id = create_env(env_request,
interactive=interactive)
env_name = result.name # get the (possibly) updated name
# Edit configurations
## Get default environment
default_env = commonops.get_current_branch_environment()
## Save env as branch default if needed
if not default_env or branch_default:
commonops.set_environment_for_current_branch(env_name)
# Print status of env
commonops.print_env_details(result, health=False)
if nohang:
return
io.echo('Printing Status:')
try:
commonops.wait_for_success_events(request_id,
timeout_in_minutes=timeout)
except TimeoutError:
io.log_error(strings['timeout.error'])
def create_env(env_request, interactive=True):
# If a template is being used, we want to try using just the template
if env_request.template_name:
platform = env_request.platform
env_request.platform = None
else:
platform = None
while True:
try:
return elasticbeanstalk.create_environment(env_request)
except InvalidParameterValueError as e:
if e.message == responses['app.notexists'].replace(
'{app-name}', '\'' + env_request.app_name + '\''):
# App doesnt exist, must be a new region.
## Lets create the app in the region
commonops.create_app(env_request.app_name)
elif e.message == responses['create.noplatform']:
if platform:
env_request.platform = platform
else:
raise
elif interactive:
LOG.debug('creating env returned error: ' + e.message)
if re.match(responses['env.cnamenotavailable'], e.message):
io.echo(prompts['cname.unavailable'])
cname = io.prompt_for_cname()
elif re.match(responses['env.nameexists'], e.message):
io.echo(strings['env.exists'])
current_environments = commonops.get_all_env_names()
unique_name = utils.get_unique_name(env_request.env_name,
current_environments)
env_request.env_name = io.prompt_for_environment_name(
default_name=unique_name)
elif e.message == responses['app.notexists'].replace(
'{app-name}', '\'' + env_request.app_name + '\''):
# App doesnt exist, must be a new region.
## Lets create the app in the region
commonops.create_app(env_request.app_name)
else:
raise
else:
raise
# Try again with new values
def get_default_profile():
""" Get the default elasticbeanstalk IAM profile,
Create it if it doesn't exist """
# get list of profiles
try:
profile = DEFAULT_ROLE_NAME
try:
iam.create_instance_profile(profile)
io.log_info('Created default instance profile.')
role = get_default_role()
iam.add_role_to_profile(profile, role)
except AlreadyExistsError:
pass
except NotAuthorizedError:
# Not a root account. Just assume role exists
io.log_info('No IAM privileges: assuming default '
'instance profile exists.')
return DEFAULT_ROLE_NAME
return profile
def get_default_role():
role = DEFAULT_ROLE_NAME
document = '{"Version": "2008-10-17","Statement": [{"Action":' \
' "sts:AssumeRole","Principal": {"Service": ' \
'"ec2.amazonaws.com"},"Effect": "Allow","Sid": ""}]}'
try:
iam.create_role(role, document)
except AlreadyExistsError:
pass
return role
def get_service_role():
try:
roles = iam.get_role_names()
if DEFAULT_SERVICE_ROLE_NAME not in roles:
return None
except NotAuthorizedError:
# No permissions to list roles
# Assume role exists, we will handle error at a deeper level
pass
return DEFAULT_SERVICE_ROLE_NAME
def create_default_service_role():
"""
Create the default service role
"""
io.log_info('Creating service role {} with default permissions.'
.format(DEFAULT_SERVICE_ROLE_NAME))
trust_document = _get_default_service_trust_document()
json_policy = _get_default_service_role_policy()
role_name = DEFAULT_SERVICE_ROLE_NAME
policy_name = 'awsebcli_aws-elasticbeanstalk-service-role_{}'\
.format(int(time.time()))
try:
iam.create_role_with_policy(role_name, trust_document,
policy_name, json_policy)
except NotAuthorizedError as e:
# NO permissions to create or do something
raise NotAuthorizedError(prompts['create.servicerole.nopermissions']
.format(DEFAULT_SERVICE_ROLE_NAME, e))
return DEFAULT_SERVICE_ROLE_NAME
def resolve_roles(env_request, interactive):
"""
Resolves instance-profile and service-role
:param env_request: environment request
:param interactive: boolean
"""
LOG.debug('Resolving roles')
if env_request.instance_profile is None and \
env_request.template_name is None:
# Service supports no profile, however it is not good/recommended
# Get the eb default profile
env_request.instance_profile = get_default_profile()
if (env_request.platform.has_healthd_support() and # HealthD enabled
(env_request.service_role is None) and
(env_request.template_name is None)):
role = get_service_role()
if role is None:
if interactive:
io.echo()
io.echo(prompts['create.servicerole.info'])
input = io.get_input(prompts['create.servicerole.view'],
default='')
if input.strip('"').lower() == 'view':
io.echo(_get_default_service_role_policy())
io.get_input(prompts['general.pressenter'])
role = create_default_service_role()
else:
raise NotSupportedError(prompts['create.servicerole.required'])
env_request.service_role = role
def _get_default_service_trust_document():
"""
Just a string representing the service role policy.
Includes newlines for pretty printing :)
"""
return \
'''{
"Version": "2012-10-17",
"Statement": [{
"Sid": "",
"Effect": "Allow",
"Principal": {
"Service": "elasticbeanstalk.amazonaws.com"
},
"Action": "sts:AssumeRole",
"Condition": {
"StringEquals": {
"sts:ExternalId": "elasticbeanstalk"
}
}
}]
}'''
def _get_default_service_role_policy():
"""
Just a string representing the service role policy.
Includes newlines for pretty printing :)
"""
return \
'''{
"Version": "2012-10-17",
"Statement": [{
"Effect": "Allow",
"Action": [
"elasticloadbalancing:DescribeInstanceHealth",
"ec2:DescribeInstances",
"ec2:DescribeInstanceStatus",
"ec2:GetConsoleOutput",
"ec2:AssociateAddress",
"ec2:DescribeAddresses",
"ec2:DescribeSecurityGroups",
"sqs:GetQueueAttributes",
"sqs:GetQueueUrl",
"autoscaling:DescribeAutoScalingGroups",
"autoscaling:DescribeAutoScalingInstances",
"autoscaling:DescribeScalingActivities",
"autoscaling:DescribeNotificationConfigurations"
],
"Resource": ["*"]
}]
}''' |
the-stack_0_794 | import itertools
from collections import OrderedDict
from rest_framework import filters, exceptions
from .mixin import ViewSetMixin
def get_sort_order(request, param):
args = request.query_params.getlist(param)
fields = itertools.chain(*(arg.split(',') for arg in args))
order = tuple(field.strip() for field in fields if field)
return order
class OrderingFilter(filters.OrderingFilter):
@staticmethod
def get_translated_sort_order(fields, field_map):
return tuple(field_map.get(field, field) for field in fields)
@staticmethod
def get_reverse_translated_sort_order(fields, field_map):
sort_field_reverse_map = {value: key for (key, value) in field_map.items()}
return tuple(sort_field_reverse_map.get(field, field) for field in fields)
@staticmethod
def get_consistent_sort_order(fields):
return fields + type(fields)(('pk',))
def get_ordering(self, request, queryset, view):
fields = get_sort_order(request, self.ordering_param)
if fields:
field_map = getattr(view, 'sort_field_map', {})
fields = self.get_translated_sort_order(fields, field_map)
ordering = self.remove_invalid_fields(queryset, fields, view, request)
if len(ordering) != len(fields):
ext_fields = self.get_reverse_translated_sort_order(fields, field_map)
ext_ordering = self.get_reverse_translated_sort_order(ordering, field_map)
errors = {}
for ext_field in ext_fields:
if ext_field not in ext_ordering:
errors[ext_field] = 'invalid field'
raise exceptions.ValidationError(errors)
ordering = self.get_consistent_sort_order(ordering)
else:
ordering = self.get_default_ordering(view)
consistent_sort = getattr(view, 'consistent_sort', True)
if consistent_sort:
ordering = self.get_consistent_sort_order(ordering)
return ordering
class SortedModelMixin(ViewSetMixin):
ordering = ()
sort_field_map = {}
consistent_sort = True
def list(self, request, *args, **kwargs):
sort = get_sort_order(request, OrderingFilter.ordering_param) or self.ordering
context = OrderedDict(sort=','.join(sort))
return self.decorated_list(SortedModelMixin, context, request, *args, **kwargs)
|
the-stack_0_796 | # -*- coding: utf-8 -*-
#
# Copyright 2015 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Benchmark Memory functions.
"""
import re
import subprocess
import sys
import six
from hardware.benchmark import utils
def get_available_memory():
"""Return the total amount of available memory, in bytes."""
with open('/proc/meminfo', 'r') as meminfo:
for line in meminfo:
if line.startswith('MemFree:'):
return int(line.split()[1]) * 1024
return -1
def check_mem_size(block_size, cpu_count):
"""Check if a test can run with a given block size and cpu count."""
dsplit = re.compile(r'\d+')
ssplit = re.compile(r'[A-Z]+')
unit = ssplit.findall(block_size)
unit_in_bytes = 1
if unit[0] == 'K':
unit_in_bytes = 1024
elif unit[0] == 'M':
unit_in_bytes = 1024 * 1024
elif unit[0] == 'G':
unit_in_bytes = 1024 * 1024 * 1024
size_in_bytes = (unit_in_bytes * int(dsplit.findall(block_size)[0])
* cpu_count)
if size_in_bytes > get_available_memory():
return False
return True
def run_sysbench_memory_threaded(hw_lst, max_time, block_size, cpu_count,
processor_num=None):
"""Running memtest on a processor."""
check_mem = check_mem_size(block_size, cpu_count)
taskset = ''
if processor_num is not None:
if check_mem is False:
msg = ("Avoid Benchmarking memory @%s "
"from CPU %d, not enough memory\n")
sys.stderr.write(msg % (block_size, processor_num))
return
sys.stderr.write('Benchmarking memory @%s from CPU %d'
' for %d seconds (%d threads)\n' %
(block_size, processor_num, max_time, cpu_count))
taskset = 'taskset %s' % hex(1 << processor_num)
else:
if check_mem is False:
msg = ("Avoid Benchmarking memory @%s "
"from all CPUs, not enough memory\n")
sys.stderr.write(msg % block_size)
return
sys.stderr.write('Benchmarking memory @%s from all CPUs '
'for %d seconds (%d threads)\n'
% (block_size, max_time, cpu_count))
_cmd = ('%s sysbench --max-time=%d --max-requests=100000000 '
'--num-threads=%d --test=memory --memory-block-size=%s run')
sysbench_cmd = subprocess.Popen(_cmd % (taskset, max_time,
cpu_count, block_size),
shell=True, stdout=subprocess.PIPE)
for line in sysbench_cmd.stdout:
if isinstance(line, six.binary_type):
line = line.decode()
if "transferred" in line:
_, right = line.rstrip('\n').replace(' ', '').split('(')
perf, _ = right.split('.')
if processor_num is not None:
hw_lst.append(('cpu',
'logical_%d' % processor_num,
'bandwidth_%s' % block_size,
perf))
else:
hw_lst.append(('cpu', 'logical',
'threaded_bandwidth_%s' % block_size,
perf))
def run_sysbench_memory_forked(hw_lst, max_time, block_size, cpu_count):
"""Running forked memtest on a processor."""
if check_mem_size(block_size, cpu_count) is False:
cmd = ('Avoid benchmarking memory @%s from all'
' CPUs (%d forked processes), not enough memory\n')
sys.stderr.write(cmd % (block_size, cpu_count))
return
sys.stderr.write('Benchmarking memory @%s from all CPUs'
' for %d seconds (%d forked processes)\n'
% (block_size, max_time, cpu_count))
sysbench_cmd = '('
for _ in range(cpu_count):
_cmd = ('sysbench --max-time=%d --max-requests=100000000 '
'--num-threads=1 --test=memory --memory-block-size=%s run &')
sysbench_cmd += _cmd % (max_time, block_size)
sysbench_cmd.rstrip('&')
sysbench_cmd += ')'
global_perf = 0
process = subprocess.Popen(
sysbench_cmd, shell=True, stdout=subprocess.PIPE)
for line in process.stdout:
if isinstance(line, six.binary_type):
line = line.decode()
if "transferred" in line:
_, right = line.rstrip('\n').replace(' ', '').split('(')
perf, _ = right.split('.')
global_perf += int(perf)
hw_lst.append(('cpu', 'logical', 'forked_bandwidth_%s' %
(block_size), str(global_perf)))
def mem_perf(hw_lst, max_time=5):
"""Report the memory performance."""
all_cpu_testing_time = 5
block_size_list = ['1K', '4K', '1M', '16M', '128M', '1G', '2G']
logical = utils.get_value(hw_lst, 'cpu', 'logical', 'number')
physical = utils.get_value(hw_lst, 'cpu', 'physical', 'number')
if physical:
eta = int(physical) * len(block_size_list) * max_time
eta += 2 * (all_cpu_testing_time * len(block_size_list))
sys.stderr.write('Memory Performance: %d logical CPU'
' to test (ETA: %d seconds)\n'
% (int(physical), int(eta)))
for cpu_nb in utils.get_one_cpu_per_socket(hw_lst):
for block_size in block_size_list:
run_sysbench_memory_threaded(hw_lst, max_time,
block_size, 1, cpu_nb)
# There is not need to test fork vs thread
# if only a single logical cpu is present
if int(logical) > 1:
for block_size in block_size_list:
run_sysbench_memory_threaded(hw_lst, all_cpu_testing_time,
block_size, int(logical))
for block_size in block_size_list:
run_sysbench_memory_forked(hw_lst, all_cpu_testing_time,
block_size, int(logical))
|
the-stack_0_798 | # uncompyle6 version 3.3.1
# Python bytecode 3.6 (3379)
# Decompiled from: Python 3.6.2 (v3.6.2:5fd33b5926, Jul 16 2017, 20:11:06)
# [GCC 4.2.1 (Apple Inc. build 5666) (dot 3)]
# Embedded file name: ../../shared/problems/CR/problem1068_CR.py
# Compiled at: 2019-03-13 18:01:49
# Size of source mod 2**32: 1148 bytes
__author__ = 'patras'
from domain_chargeableRobot import *
from timer import DURATION
from state import state
DURATION.TIME = {'put':2,
'take':2,
'perceive':2,
'charge':2,
'move':2,
'moveToEmergency':2,
'moveCharger':2,
'addressEmergency':2,
'wait':2}
DURATION.COUNTER = {'put':2,
'take':2,
'perceive':2,
'charge':2,
'move':2,
'moveToEmergency':2,
'moveCharger':2,
'addressEmergency':2,
'wait':2}
rv.LOCATIONS = [
1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
rv.EDGES = {1:[2], 2:[1, 3], 3:[2, 4], 4:[5, 3, 6, 7], 5:[4, 9], 6:[4, 10], 7:[4, 8], 8:[7], 9:[5], 10:[6]}
rv.OBJECTS = ['o1']
rv.ROBOTS = [
'r1', 'r2']
def ResetState():
state.loc = {'r1':1,
'r2':1}
state.charge = {'r1':2, 'r2':3}
state.load = {'r1':NIL, 'r2':NIL}
state.pos = {'c1':'r2', 'o1':5}
state.containers = {1:[], 2:[], 3:[], 4:[], 5:['o1'], 6:[], 7:[], 8:[], 9:[], 10:[]}
state.emergencyHandling = {'r1':False, 'r2':False}
state.view = {}
for l in rv.LOCATIONS:
state.view[l] = False
tasks = {5: [['fetch', 'r1', 'o1']]}
eventsEnv = {}
# okay decompiling __pycache__/problem1068_CR.cpython-36.pyc
|
the-stack_0_799 | #
# Copyright SAS Institute
#
# Licensed under the Apache License, Version 2.0 (the License);
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import http.client as hc
import base64
import json
import os
import ssl
import sys
import urllib
import warnings
import io
import tempfile as tf
from time import sleep
from saspy.sasexceptions import (SASHTTPauthenticateError, SASHTTPconnectionError, SASHTTPsubmissionError)
import logging
logger = logging.getLogger('saspy')
try:
import pandas as pd
import numpy as np
except ImportError:
pass
class SASconfigHTTP:
'''
This object is not intended to be used directly. Instantiate a SASsession object instead
'''
def __init__(self, session, **kwargs):
self._kernel = kwargs.get('kernel', None)
SAScfg = session._sb.sascfg.SAScfg
self.name = session._sb.sascfg.name
cfg = getattr(SAScfg, self.name)
self._token = cfg.get('authtoken', None)
self.url = cfg.get('url', '')
self.ip = cfg.get('ip', '')
self.port = cfg.get('port', None)
self.ctxname = cfg.get('context', '')
self.ctx = {}
self.options = cfg.get('options', [])
self.ssl = cfg.get('ssl', True)
self.verify = cfg.get('verify', True)
self.timeout = cfg.get('timeout', None)
user = cfg.get('user', '')
pw = cfg.get('pw', '')
client_id = cfg.get('client_id', None)
client_secret = cfg.get('client_secret', '')
authcode = cfg.get('authcode', None)
jwt = cfg.get('jwt', None)
self.encoding = cfg.get('encoding', '')
self.authkey = cfg.get('authkey', '')
self._prompt = session._sb.sascfg._prompt
self.lrecl = cfg.get('lrecl', None)
self.inactive = cfg.get('inactive', 120)
try:
self.outopts = getattr(SAScfg, "SAS_output_options")
self.output = self.outopts.get('output', 'html5')
except:
self.output = 'html5'
if self.output.lower() not in ['html', 'html5']:
logger.warning("Invalid value specified for SAS_output_options. Using the default of HTML5")
self.output = 'html5'
# GET Config options
try:
self.cfgopts = getattr(SAScfg, "SAS_config_options")
except:
self.cfgopts = {}
lock = self.cfgopts.get('lock_down', True)
# in lock down mode, don't allow runtime overrides of option values from the config file.
self.verbose = self.cfgopts.get('verbose', True)
self.verbose = kwargs.get('verbose', self.verbose)
inurl = kwargs.get('url', None)
if inurl is not None:
if lock and len(self.url):
logger.warning("Parameter 'url' passed to SAS_session was ignored due to configuration restriction.")
else:
self.url = inurl
inip = kwargs.get('ip', None)
if inip is not None:
if lock and len(self.ip):
logger.warning("Parameter 'ip' passed to SAS_session was ignored due to configuration restriction.")
else:
self.ip = inip
inport = kwargs.get('port', None)
if inport is not None:
if lock and self.port:
logger.warning("Parameter 'port' passed to SAS_session was ignored due to configuration restriction.")
else:
self.port = inport
inctxname = kwargs.get('context', None)
if inctxname is not None:
if lock and len(self.ctxname):
logger.warning("Parameter 'context' passed to SAS_session was ignored due to configuration restriction.")
else:
self.ctxname = inctxname
inoptions = kwargs.get('options', None)
if inoptions is not None:
if lock and len(self.options):
logger.warning("Parameter 'options' passed to SAS_session was ignored due to configuration restriction.")
else:
self.options = inoptions
inssl = kwargs.get('ssl', None)
if inssl is not None:
if lock and self.ssl:
logger.warning("Parameter 'ssl' passed to SAS_session was ignored due to configuration restriction.")
else:
self.ssl = bool(inssl)
inver = kwargs.get('verify', None)
if inver is not None:
if lock and self.verify:
logger.warning("Parameter 'verify' passed to SAS_session was ignored due to configuration restriction.")
else:
self.verify = bool(inver)
intout = kwargs.get('timeout', None)
if intout is not None:
if lock and self.timeout:
logger.warning("Parameter 'timeout' passed to SAS_session was ignored due to configuration restriction.")
else:
self.timeout = intout
inencoding = kwargs.get('encoding', 'NoOverride')
if inencoding != 'NoOverride':
if lock and len(self.encoding):
logger.warning("Parameter 'encoding' passed to SAS_session was ignored due to configuration restriction.")
else:
self.encoding = inencoding
if not self.encoding or self.encoding != 'utf_8':
self.encoding = 'utf_8'
inautht = kwargs.get('authtoken', None)
if inautht is not None:
self._token = inautht
injwt = kwargs.get('jwt', None)
if injwt is not None:
jwt = injwt
inauthc = kwargs.get('authcode', None)
if inauthc is not None:
authcode = inauthc
incis = kwargs.get('client_secret', None)
if incis is not None:
if lock and client_secret:
logger.warning("Parameter 'client_secret' passed to SAS_session was ignored due to configuration restriction.")
else:
client_secret = incis
incid = kwargs.get('client_id', None)
if incid is not None:
if lock and client_id:
logger.warning("Parameter 'client_id' passed to SAS_session was ignored due to configuration restriction.")
else:
client_id = incid
if client_id is None:
client_id = 'SASPy'
use_authcode = False
else:
use_authcode = True
inlrecl = kwargs.get('lrecl', None)
if inlrecl is not None:
if lock and self.lrecl:
logger.warning("Parameter 'lrecl' passed to SAS_session was ignored due to configuration restriction.")
else:
self.lrecl = inlrecl
if not self.lrecl:
self.lrecl = 1048576
inito = kwargs.get('inactive', None)
if inito is not None:
if lock and self.inactive:
logger.warning("Parameter 'inactive' passed to SAS_session was ignored due to configuration restriction.")
else:
self.inactive = inito
inak = kwargs.get('authkey', '')
if len(inak) > 0:
if lock and len(self.authkey):
logger.warning("Parameter 'authkey' passed to SAS_session was ignored due to configuration restriction.")
else:
self.authkey = inak
if len(self.url) > 0:
http = self.url.split('://')
hp = http[1].split(':')
if http[0].lower() in ['http', 'https']:
self.ip = hp[0]
self.port = hp[1] if len(hp) > 1 else self.port
self.ssl = True if 's' in http[0].lower() else False
else:
logger.warning("Parameter 'url' not in recognized format. Expeting 'http[s]://host[:port]'. Ignoring parameter.")
while len(self.ip) == 0:
if not lock:
self.ip = self._prompt("Please enter the host (ip address) you are trying to connect to: ")
if self.ip is None:
self._token = None
raise RuntimeError("No IP address provided.")
else:
logger.fatal("In lockdown mode and missing ip adress in the config named: "+cfgname )
raise RuntimeError("No IP address provided.")
if not self.port:
if self.ssl:
self.port = 443
else:
self.port = 80
if not self._token and not authcode and not jwt:
found = False
if self.authkey:
if os.name == 'nt':
pwf = os.path.expanduser('~')+os.sep+'_authinfo'
else:
pwf = os.path.expanduser('~')+os.sep+'.authinfo'
try:
fid = open(pwf, mode='r')
for line in fid:
if line.startswith(self.authkey):
user = line.partition('user')[2].lstrip().partition(' ')[0].partition('\n')[0]
pw = line.partition('password')[2].lstrip().partition(' ')[0].partition('\n')[0]
found = True
break
fid.close()
except OSError as e:
logger.warning('Error trying to read authinfo file:'+pwf+'\n'+str(e))
pass
except:
pass
if not found:
logger.warning('Did not find key '+self.authkey+' in authinfo file:'+pwf+'\n')
inuser = kwargs.get('user', '')
if len(inuser) > 0:
if lock and len(user):
logger.warning("Parameter 'user' passed to SAS_session was ignored due to configuration restriction.")
else:
user = inuser
inpw = kwargs.get('pw', '')
if len(inpw) > 0:
if lock and len(pw):
logger.warning("Parameter 'pw' passed to SAS_session was ignored due to configuration restriction.")
else:
pw = inpw
if use_authcode:
code_pw = 'authcode'
else:
code_pw = ''
if len(user) == 0:
msg = "To connect to Viya you need either an authcode or a userid/pw. Neither were provided.\n"
msg += "Please enter which one you want to enter next. Type one of these now: [default=authcode | userid]: "
while code_pw.lower() not in ['userid','authcode']:
code_pw = self._prompt(msg)
if code_pw == '':
code_pw = 'authcode'
if code_pw is None:
self._token = None
raise RuntimeError("Neither authcode nor userid provided.")
if code_pw.lower() == 'authcode':
purl = "/SASLogon/oauth/authorize?client_id={}&response_type=code".format(client_id)
if len(self.url) > 0:
purl = self.url+purl
else:
purl = "http{}://{}:{}{}".format('s' if self.ssl else '', self.ip, self.port, purl)
msg = "The default url to authenticate with would be {}\n".format(purl)
msg += "Please enter authcode: "
authcode = self._prompt(msg)
if authcode is None:
self._token = None
raise RuntimeError("No authcode provided.")
else:
while len(user) == 0:
user = self._prompt("Please enter userid: ")
if user is None:
self._token = None
raise RuntimeError("No userid provided.")
while len(pw) == 0:
pw = self._prompt("Please enter password: ", pw = True)
if pw is None:
self._token = None
raise RuntimeError("No password provided.")
if self.ssl:
if self.verify:
# handle having self signed certificate default on Viya w/out copies on client; still ssl, just not verifyable
try:
self.HTTPConn = hc.HTTPSConnection(self.ip, self.port, timeout=self.timeout)
if not self._token:
self._token = self._authenticate(user, pw, authcode, client_id, client_secret, jwt)
except ssl.SSLError as e:
logger.warning("SSL certificate verification failed, creating an unverified SSL connection. Error was:"+str(e))
self.HTTPConn = hc.HTTPSConnection(self.ip, self.port, timeout=self.timeout, context=ssl._create_unverified_context())
logger.warning("You can set 'verify=False' to get rid of this message ")
if not self._token:
self._token = self._authenticate(user, pw, authcode, client_id, client_secret, jwt)
else:
self.HTTPConn = hc.HTTPSConnection(self.ip, self.port, timeout=self.timeout, context=ssl._create_unverified_context())
if not self._token:
self._token = self._authenticate(user, pw, authcode, client_id, client_secret, jwt)
else:
self.HTTPConn = hc.HTTPConnection(self.ip, self.port, timeout=self.timeout)
if not self._token:
self._token = self._authenticate(user, pw, authcode, client_id, client_secret, jwt)
if not self._token:
logger.error("Could not acquire an Authentication Token")
return
# GET Contexts
contexts = self._get_contexts()
if contexts == None:
self._token = None
raise SASHTTPconnectionError(msg="No Contexts found on Compute Service at ip="+self.ip)
ctxnames = []
for i in range(len(contexts)):
ctxnames.append(contexts[i].get('name'))
if len(ctxnames) == 0:
self._token = None
raise SASHTTPconnectionError(msg="No Contexts found on Compute Service at ip="+self.ip)
if len(self.ctxname) == 0:
if len(ctxnames) == 1:
self.ctxname = ctxnames[0]
logger.info("Using SAS Context: " + self.ctxname)
else:
try:
ctxname = self._prompt("Please enter the SAS Context you wish to run. Available contexts are: " +
str(ctxnames)+" ")
if ctxname is None:
self._token = None
raise RuntimeError("No SAS Context provided.")
else:
self.ctxname = ctxname
except:
raise SASHTTPconnectionError(msg=
"SAS Context specified '"+self.ctxname+"' was not found. Prompting failed. Available contexts were: " +
str(ctxnames)+" ")
while self.ctxname not in ctxnames:
if not lock:
''' this was original code before compute was production. users can't create these on the fly.
createctx = self._prompt(
"SAS Context specified was not found. Do you want to create a new context named "+self.ctxname+" [Yes|No]?")
if createctx.upper() in ('YES', 'Y'):
contexts = self._create_context(user)
else:
'''
try:
ctxname = self._prompt(
"SAS Context specified was not found. Please enter the SAS Context you wish to run. Available contexts are: " +
str(ctxnames)+" ")
if ctxname is None:
self._token = None
raise SASHTTPconnectionError(msg=
"SAS Context specified '"+self.ctxname+"' was not found. Prompting failed. Available contexts were: " +
str(ctxnames)+" ")
else:
self.ctxname = ctxname
except:
raise SASHTTPconnectionError(msg=
"SAS Context specified '"+self.ctxname+"' was not found. Prompting failed. Available contexts were: " +
str(ctxnames)+" ")
else:
msg = "SAS Context specified in the SASconfig ("+self.ctxname+") was not found on this server, and because "
msg += "the SASconfig is in lockdown mode, there is no prompting for other contexts. No connection established."
logger.error(msg)
self._token = None
raise RuntimeError("No SAS Context provided.")
for i in range(len(contexts)):
if contexts[i].get('name') == self.ctxname:
self.ctx = contexts[i]
break
if self.ctx == {}:
raise SASHTTPconnectionError(msg="No context information returned for context {}\n{}".format(self.ctxname, contexts))
return
def _authenticate(self, user, pw, authcode, client_id, client_secret, jwt):
#import pdb; pdb.set_trace()
if authcode:
uauthcode = urllib.parse.quote(authcode)
uclient_id = urllib.parse.quote(client_id)
uclient_secret = urllib.parse.quote(client_secret)
d1 = ("grant_type=authorization_code&code="+uauthcode+
"&client_id="+uclient_id+"&client_secret="+uclient_secret).encode(self.encoding)
headers = {"Accept":"application/vnd.sas.compute.session+json","Content-Type":"application/x-www-form-urlencoded"}
elif jwt:
ujwt = urllib.parse.quote(jwt)
d1 = "grant_type=urn:ietf:params:oauth:grant-type:jwt-bearer&assertion="+ujwt
client = "Basic "+base64.encodebytes((client_id+":").encode(self.encoding)).splitlines()[0].decode(self.encoding)
headers = {"Accept":"application/vnd.sas.compute.session+json",
"Content-Type":"application/x-www-form-urlencoded",
"Authorization":client}
else:
uuser = urllib.parse.quote(user)
upw = urllib.parse.quote(pw)
d1 = ("grant_type=password&username="+uuser+"&password="+upw).encode(self.encoding)
client = "Basic "+base64.encodebytes("sas.tkmtrb:".encode(self.encoding)).splitlines()[0].decode(self.encoding)
#client = "Basic "+base64.encodebytes((client_id+":").encode(self.encoding)).splitlines()[0].decode(self.encoding)
headers = {"Accept":"application/vnd.sas.compute.session+json","Content-Type":"application/x-www-form-urlencoded",
"Authorization":client}
# POST AuthToken
conn = self.HTTPConn; conn.connect()
try:
conn.request('POST', "/SASLogon/oauth/token", body=d1, headers=headers)
req = conn.getresponse()
except:
#print("Failure in GET AuthToken. Could not connect to the logon service. Exception info:\n"+str(sys.exc_info()))
msg="Failure in GET AuthToken. Could not connect to the logon service. Exception info:\n"+str(sys.exc_info())
raise SASHTTPauthenticateError(msg)
#return None
status = req.status
resp = req.read()
conn.close()
if status > 299:
#print("Failure in GET AuthToken. Status="+str(status)+"\nResponse="+resp.decode(self.encoding))
msg="Failure in GET AuthToken. Status="+str(status)+"\nResponse="+str(resp)
raise SASHTTPauthenticateError(msg)
#return None
js = json.loads(resp.decode(self.encoding))
token = js.get('access_token')
return token
def _get_contexts(self):
#import pdb; pdb.set_trace()
# GET Contexts
conn = self.HTTPConn; conn.connect()
headers={"Accept":"application/vnd.sas.collection+json",
"Accept-Item":"application/vnd.sas.compute.context.summary+json",
"Authorization":"Bearer "+self._token}
conn.request('GET', "/compute/contexts?limit=999999", headers=headers)
req = conn.getresponse()
status = req.status
resp = req.read()
conn.close()
if status > 299:
fmsg = "Failure in GET Contexts. Status="+str(status)+"\nResponse="+resp.decode(self.encoding)
raise SASHTTPconnectionError(msg=fmsg)
js = json.loads(resp.decode(self.encoding))
contexts = js.get('items')
return contexts
def _create_context(self, user):
# GET Contexts
conn = self.HTTPConn; conn.connect()
d1 = '{"name": "SASPy","version": 1,"description": "SASPy Context","attributes": {"sessionInactiveTimeout": 60 },'
d1 += '"launchContext": {"contextName": "'+self.ctxname+'"},"launchType": "service","authorizedUsers": ["'+user+'"]}'
headers={"Accept":"application/vnd.sas.compute.context+json",
"Content-Type":"application/vnd.sas.compute.context.request+json",
"Authorization":"Bearer "+self._token}
conn.request('POST', "/compute/contexts", body=d1, headers=headers)
req = conn.getresponse()
status = req.status
resp = req.read()
conn.close()
if status > 299:
logger.error("Failure in POST Context. Status="+str(status)+"\nResponse="+resp.decode(self.encoding))
return None
contexts = self._get_contexts()
return contexts
class SASsessionHTTP():
'''
The SASsession object is the main object to instantiate and provides access to the rest of the functionality.
cfgname - value in SAS_config_names List of the sascfg.py file
kernel - None - internal use when running the SAS_kernel notebook
user - userid to use to connect to Compute Service
pw - pw for the userid being used to connect to Compute Service
ip - overrides IP Dict entry of cfgname in sascfg.py file
port - overrides Port Dict entry of cfgname in sascfg.py file
context - overrides Context Dict entry of cfgname in sascfg.py file
options - overrides Options Dict entry of cfgname in sascfg.py file
encoding - This is the python encoding value that matches the SAS session encoding of the Compute Server you are connecting to
'''
#def __init__(self, cfgname: str ='', kernel: '<SAS_kernel object>' =None, user: str ='', pw: str ='',
# ip: str ='', port: int ='', context: str ='', options: list =[]) -> '<SASsession object>':
def __init__(self, **kwargs):
self.pid = None
self._session = None
self._sb = kwargs.get('sb', None)
self._log = "\nNo SAS session established, something must have failed trying to connect\n"
self.sascfg = SASconfigHTTP(self, **kwargs)
if self.sascfg._token:
self._startsas()
else:
None
def __del__(self):
if self._session:
self._endsas()
self._sb.SASpid = None
return
def _startsas(self):
if self.pid:
return self.pid
if len(self.sascfg.options):
options = '[';
for opt in self.sascfg.options:
options += '"'+opt+'", '
options = (options.rpartition(','))[0]+']'
else:
options = '[]'
# POST Session
uri = None
for ld in self.sascfg.ctx.get('links'):
if ld.get('method') == 'POST':
uri = ld.get('uri')
break
if not uri:
raise SASHTTPconnectionError(msg=
"POST uri not found in context info. You may not have permission to use this context.\n{}".format(self.sascfg.ctx))
conn = self.sascfg.HTTPConn; conn.connect()
d1 = '{"name":"'+self.sascfg.ctxname+'", "description":"saspy session", "version":1, "environment":{"options":'+options+'}'
d1 += ',"attributes": {"sessionInactiveTimeout": '+str(int(float(self.sascfg.inactive)*60))+'}}'
headers={"Accept":"application/vnd.sas.compute.session+json","Content-Type":"application/vnd.sas.compute.session.request+json",
"Authorization":"Bearer "+self.sascfg._token}
try:
conn.request('POST', uri, body=d1, headers=headers)
req = conn.getresponse()
except:
#print("Could not acquire a SAS Session for context: "+self.sascfg.ctxname)
raise SASHTTPconnectionError(msg="Could not acquire a SAS Session for context: "+self.sascfg.ctxname+". Exception info:\n"+str(sys.exc_info()))
#return None
status = req.status
resp = req.read()
conn.close()
if status > 299:
#print("Failure in POST Session \n"+resp.decode(self.sascfg.encoding))
#print("Could not acquire a SAS Session for context: "+self.sascfg.ctxname)
msg="Could not acquire a SAS Session for context: "+self.sascfg.ctxname+". Exception info:\nStatus="+str(status)+"\nResponse="+str(resp)
raise SASHTTPconnectionError(msg)
#return None
self._session = json.loads(resp.decode(self.sascfg.encoding))
if self._session == None:
logger.error("Could not acquire a SAS Session for context: "+self.sascfg.ctxname)
return None
#GET Session uri's once
for ld in self._session.get('links'):
if ld.get('method') == 'GET' and ld.get('rel') == 'log':
self._uri_log = ld.get('uri')
elif ld.get('method') == 'GET' and ld.get('rel') == 'listing':
self._uri_lst = ld.get('uri')
elif ld.get('method') == 'GET' and ld.get('rel') == 'results':
self._uri_ods = ld.get('uri')
elif ld.get('method') == 'GET' and ld.get('rel') == 'state':
self._uri_state = ld.get('uri')
elif ld.get('method') == 'POST' and ld.get('rel') == 'execute':
self._uri_exe = ld.get('uri')
elif ld.get('method') == 'PUT' and ld.get('rel') == 'cancel':
self._uri_can = ld.get('uri')
elif ld.get('method') == 'DELETE' and ld.get('rel') == 'delete':
self._uri_del = ld.get('uri')
elif ld.get('method') == 'GET' and ld.get('rel') == 'files':
self._uri_files = ld.get('uri')
self.pid = self._session.get('id')
self._log = self._getlog()
# POST Job - Lets see if the server really came up, cuz you can't tell from what happend so far
conn = self.sascfg.HTTPConn; conn.connect()
jcode = json.dumps('\n')
d1 = '{"code":['+jcode+']}'
headers={"Accept":"application/json","Content-Type":"application/vnd.sas.compute.job.request+json",
"Authorization":"Bearer "+self.sascfg._token}
conn.request('POST', self._uri_exe, body=d1, headers=headers)
req = conn.getresponse()
status = req.status
resp = req.read()
conn.close()
try:
jobid = json.loads(resp.decode(self.sascfg.encoding))
except:
jobid = None
if not jobid or status > 299:
logger.error("Compute server had issues starting:\n")
for key in jobid:
logger.error(key+"="+str(jobid.get(key)))
return None
self._sb.SESSION_ID = self.pid
ll = self.submit("options svgtitle='svgtitle'; options validvarname=any validmemname=extend pagesize=max nosyntaxcheck; ods graphics on;", "text")
if self.sascfg.verbose:
logger.info("SAS server started using Context "+self.sascfg.ctxname+" with SESSION_ID="+self.pid)
return self.pid
def _endsas(self):
rc = 0
if self._session:
# DELETE Session
conn = self.sascfg.HTTPConn; conn.connect()
headers={"Accept":"application/json","Authorization":"Bearer "+self.sascfg._token}
conn.request('DELETE', self._uri_del, headers=headers)
req = conn.getresponse()
resp = req.read()
conn.close()
if self.sascfg.verbose:
logger.info("SAS server terminated for SESSION_ID="+self._session.get('id'))
self._session = None
self.pid = None
self._sb.SASpid = None
return rc
def _getlog(self, jobid=None):
start = 0
logr = ''
# GET Log
if jobid:
for ld in jobid.get('links'):
if ld.get('method') == 'GET' and ld.get('rel') == 'log':
uri = ld.get('uri')
break
else:
uri = self._uri_log
while True:
# GET Log
conn = self.sascfg.HTTPConn; conn.connect()
headers={"Accept":"application/vnd.sas.collection+json", "Authorization":"Bearer "+self.sascfg._token}
conn.request('GET', uri+"?start="+str(start)+"&limit="+str(start+1000), headers=headers)
req = conn.getresponse()
status = req.status
resp = req.read()
conn.close()
try:
js = json.loads(resp.decode(self.sascfg.encoding))
log = js.get('items')
lines = len(log)
except:
lines = None
if not lines:
break
start += lines
for line in log:
logr += line.get('line')+'\n'
if jobid != None:
self._log += logr.replace(chr(12), chr(10))
if logr.count('ERROR:') > 0:
warnings.warn("Noticed 'ERROR:' in LOG, you ought to take a look and see if there was a problem")
self._sb.check_error_log = True
return logr
def _getlst(self, jobid=None):
htm = ''
i = 0
# GET the list of results
if jobid:
for ld in jobid.get('links'):
if ld.get('method') == 'GET' and ld.get('rel') == 'results':
uri = ld.get('uri')+"?includeTypes=ODS"
break
else:
uri = self._uri_lst
conn = self.sascfg.HTTPConn; conn.connect()
headers={"Accept":"application/vnd.sas.collection+json", "Authorization":"Bearer "+self.sascfg._token}
conn.request('GET', uri, headers=headers)
req = conn.getresponse()
status = req.status
resp = req.read()
conn.close()
try:
js = json.loads(resp.decode(self.sascfg.encoding))
results = js.get('items')
if not results:
results = []
except:
results = []
conn = self.sascfg.HTTPConn; conn.connect()
headers={"Accept":"application/vnd.sas.collection+json", "Authorization":"Bearer "+self.sascfg._token}
while i < len(results):
# GET an ODS Result
if results[i].get('type') == 'ODS' and len(results[i].get('links')) > 0:
conn.request('GET', results[i].get('links')[0].get('href'), headers=headers)
req = conn.getresponse()
status = req.status
resp = req.read()
htm += resp.decode(self.sascfg.encoding)
i += 1
conn.close()
lstd = htm.replace(chr(12), chr(10)).replace('<body class="c body">',
'<body class="l body">').replace("font-size: x-small;",
"font-size: normal;")
return lstd
def _getlsttxt(self, jobid=None):
start = 0
lstr = ''
# GET Log
if jobid:
for ld in jobid.get('links'):
if ld.get('method') == 'GET' and ld.get('rel') == 'listing':
uri = ld.get('uri')
break
else:
uri = self._uri_lst
while True:
conn = self.sascfg.HTTPConn; conn.connect()
headers={"Accept":"application/vnd.sas.collection+json", "Authorization":"Bearer "+self.sascfg._token}
conn.request('GET', uri+"?start="+str(start)+"&limit="+str(start+1000), headers=headers)
req = conn.getresponse()
status = req.status
resp = req.read()
conn.close()
try:
js = json.loads(resp.decode(self.sascfg.encoding))
lst = js.get('items')
lines = len(lst)
except:
lines = None
if not lines:
break
start += lines
for line in lst:
lstr += line.get('line')+'\n'
return lstr
def _asubmit(self, code, results="html"):
#odsopen = json.dumps("ods listing close;ods html5 (id=saspy_internal) options(bitmap_mode='inline') device=png; ods graphics on / outputfmt=png;\n")
#odsopen = json.dumps("ods listing close;ods html5 (id=saspy_internal) options(bitmap_mode='inline') device=svg; ods graphics on / outputfmt=png;\n")
#odsclose = json.dumps("ods html5 (id=saspy_internal) close;ods listing;\n")
odsopen = json.dumps("ods listing close;ods "+self.sascfg.output+" (id=saspy_internal) options(bitmap_mode='inline') device=svg style="+self._sb.HTML_Style+"; ods graphics on / outputfmt=png;\n")
odsclose = json.dumps("ods "+self.sascfg.output+" (id=saspy_internal) close;ods listing;\n")
ods = True;
if results.upper() != "HTML":
ods = False
odsopen = '""'
odsclose = '""'
# POST Job
conn = self.sascfg.HTTPConn; conn.connect()
jcode = json.dumps(code)
d1 = '{"code":['+odsopen+','+jcode+','+odsclose+']}'
headers={"Accept":"application/json","Content-Type":"application/vnd.sas.compute.job.request+json",
"Authorization":"Bearer "+self.sascfg._token}
conn.request('POST', self._uri_exe, body=d1, headers=headers)
req = conn.getresponse()
resp = req.read()
conn.close()
jobid = json.loads(resp.decode(self.sascfg.encoding))
return jobid
def _jobstate(self, jobid):
uri = None
for ld in jobid.get('links'):
if ld.get('method') == 'GET' and ld.get('rel') == 'state':
uri = ld.get('uri')
break
if not uri:
print("No job found")
return None
conn = self.sascfg.HTTPConn;
headers = {"Accept":"text/plain", "Authorization":"Bearer "+self.sascfg._token}
conn.connect()
conn.request('GET', uri, headers=headers)
req = conn.getresponse()
resp = req.read()
conn.close()
return resp
def submit(self, code: str, results: str ="html", prompt: dict = None, **kwargs) -> dict:
'''
code - the SAS statements you want to execute
results - format of results, HTML is default, TEXT is the alternative
prompt - dict of names:flags to prompt for; create marco variables (used in submitted code), then keep or delete
The keys are the names of the macro variables and the boolean flag is to either hide what you type and delete
the macros, or show what you type and keep the macros (they will still be available later)
for example (what you type for pw will not be displayed, user and dsname will):
results = sas.submit(
"""
libname tera teradata server=teracop1 user=&user pw=&pw;
proc print data=tera.&dsname (obs=10); run;
""" ,
prompt = {'user': False, 'pw': True, 'dsname': False}
)
Returns - a Dict containing two keys:values, [LOG, LST]. LOG is text and LST is 'results' (HTML or TEXT)
NOTE: to view HTML results in the ipykernel, issue: from IPython.display import HTML and use HTML() instead of print()
i.e,: results = sas.submit("data a; x=1; run; proc print;run')
print(results['LOG'])
HTML(results['LST'])
'''
prompt = prompt if prompt is not None else {}
printto = kwargs.pop('undo', False)
#odsopen = json.dumps("ods listing close;ods html5 (id=saspy_internal) options(bitmap_mode='inline') device=png; ods graphics on / outputfmt=png;\n")
#odsopen = json.dumps("ods listing close;ods html5 (id=saspy_internal) options(bitmap_mode='inline') device=svg; ods graphics on / outputfmt=png;\n")
#odsclose = json.dumps("ods html5 (id=saspy_internal) close;ods listing;\n")
odsopen = json.dumps("ods listing close;ods "+self.sascfg.output+" (id=saspy_internal) options(bitmap_mode='inline') device=svg style="+self._sb.HTML_Style+"; ods graphics on / outputfmt=png;\n")
odsclose = json.dumps("ods "+self.sascfg.output+" (id=saspy_internal) close;ods listing;\n")
ods = True;
pcodei = ''
pcodeiv = ''
pcodeo = ''
if self._session == None:
logger.error("No SAS process attached. SAS process has terminated unexpectedly.")
return dict(LOG="No SAS process attached. SAS process has terminated unexpectedly.", LST='')
if results.upper() != "HTML":
ods = False
odsopen = '""'
odsclose = '""'
if len(prompt):
pcodei += 'options nosource nonotes;\n'
pcodeo += 'options nosource nonotes;\n'
for key in prompt:
gotit = False
while not gotit:
var = self.sascfg._prompt('Please enter value for macro variable '+key+' ', pw=prompt[key])
if var is None:
raise RuntimeError("No value for prompted macro variable provided.")
if len(var) > 0:
gotit = True
else:
print("Sorry, didn't get a value for that variable.")
if prompt[key]:
pcodei += '%let '+key+'='+var+';\n'
else:
pcodeiv += '%let '+key+'='+var+';\n'
if prompt[key]:
pcodeo += '%symdel '+key+';\n'
pcodei += 'options source notes;\n'
pcodeo += 'options source notes;\n'
# POST Job
conn = self.sascfg.HTTPConn; conn.connect()
jcode = json.dumps(pcodei+pcodeiv+code+'\n'+pcodeo)
d1 = '{"code":['+odsopen+','+jcode+','+odsclose+']}'
headers={"Accept":"application/json","Content-Type":"application/vnd.sas.compute.job.request+json",
"Authorization":"Bearer "+self.sascfg._token}
conn.request('POST', self._uri_exe, body=d1, headers=headers)
req = conn.getresponse()
status = req.status
resp = req.read()
conn.close()
try:
jobid = json.loads(resp.decode(self.sascfg.encoding))
except:
raise SASHTTPsubmissionError(msg="Problem parsing response from Compute Service.\n Status="+str(status)+"\n Response="+str(resp))
if not jobid or status > 299:
raise SASHTTPsubmissionError(msg="Problem submitting job to Compute Service.\n Status code="+str(jobid.get('httpStatusCode'))+"\n Message="+jobid.get('message'))
for ld in jobid.get('links'):
if ld.get('method') == 'GET' and ld.get('rel') == 'state':
uri = ld.get('uri')
break
conn = self.sascfg.HTTPConn;
headers = {"Accept":"text/plain", "Authorization":"Bearer "+self.sascfg._token}
done = False
delay = kwargs.get('GETstatusDelay' , 0)
excpcnt = kwargs.get('GETstatusFailcnt', 5)
while not done:
try:
while True:
# GET Status for JOB
conn.connect()
conn.request('GET', uri, headers=headers)
req = conn.getresponse()
resp = req.read()
conn.close()
if resp not in [b'running', b'pending']:
done = True
break
sleep(delay)
except (KeyboardInterrupt, SystemExit):
conn.close()
print('Exception caught!')
response = self.sascfg._prompt(
"SAS attention handling not yet supported over HTTP. Please enter (Q) to Quit waiting for results or (C) to continue waiting.")
while True:
if response is None or response.upper() == 'Q':
return dict(LOG='', LST='', BC=True)
if response.upper() == 'C':
break
response = self.sascfg._prompt("Please enter (Q) to Quit waiting for results or (C) to continue waiting.")
except hc.RemoteDisconnected as Dis:
conn.close()
print('RemoteDisconnected Exception caught!\n'+str(Dis))
excpcnt -= 1
if excpcnt < 0:
raise
logd = self._getlog(jobid).replace(chr(12), chr(10))
if ods:
lstd = self._getlst(jobid).replace(chr(12), chr(10))
else:
lstd = self._getlsttxt(jobid).replace(chr(12), chr(10))
trip = lstd.rpartition("/*]]>*/")
if len(trip[1]) > 0 and len(trip[2]) < 200:
lstd = ''
self._sb._lastlog = logd
# issue 294
if printto:
conn = self.sascfg.HTTPConn; conn.connect()
jcode = json.dumps('proc printto;run;\n')
d1 = '{"code":['+jcode+']}'
headers={"Accept":"application/json","Content-Type":"application/vnd.sas.compute.job.request+json",
"Authorization":"Bearer "+self.sascfg._token}
conn.request('POST', self._uri_exe, body=d1, headers=headers)
req = conn.getresponse()
status = req.status
resp = req.read()
conn.close()
if logd.count('ERROR:') > 0:
warnings.warn("Noticed 'ERROR:' in LOG, you ought to take a look and see if there was a problem")
self._sb.check_error_log = True
return dict(LOG=logd, LST=lstd)
def saslog(self):
'''
this method is used to get the current, full contents of the SASLOG
'''
return self._log
def exist(self, table: str, libref: str ="") -> bool:
'''
table - the name of the SAS Data Set
libref - the libref for the Data Set, defaults to WORK, or USER if assigned
Returns True it the Data Set exists and False if it does not
'''
#can't have an empty libref, so check for user or work
sd = table.strip().replace("'", "''")
if not libref:
# HEAD Libref USER
conn = self.sascfg.HTTPConn; conn.connect()
headers={"Accept":"*/*", "Authorization":"Bearer "+self.sascfg._token}
conn.request('HEAD', "/compute/sessions/"+self.pid+"/data/USER", headers=headers)
req = conn.getresponse()
status = req.status
conn.close()
if status == 200:
libref = 'USER'
else:
libref = 'WORK'
code = 'data _null_; e = exist("'
code += libref+"."
code += "'"+sd+"'n"+'"'+");\n"
code += 'v = exist("'
code += libref+"."
code += "'"+sd+"'n"+'"'+", 'VIEW');\n if e or v then e = 1;\n"
code += "te='TABLE_EXISTS='; put te e;run;\n"
ll = self.submit(code, "text")
l2 = ll['LOG'].rpartition("TABLE_EXISTS= ")
l2 = l2[2].partition("\n")
exists = int(l2[0])
return bool(exists)
"""
# HEAD Data Table
conn = self.sascfg.HTTPConn; conn.connect()
headers={"Accept":"*/*", "Authorization":"Bearer "+self.sascfg._token}
conn.request('HEAD', "/compute/sessions/"+self.pid+"/data/"+libref+"/"+table, headers=headers)
req = conn.getresponse()
status = req.status
conn.close()
if status == 200:
exists = True
else:
exists = False
return exists
"""
def read_csv(self, file: str, table: str, libref: str ="", nosub: bool=False, opts: dict ={}) -> '<SASdata object>':
'''
This method will import a csv file into a SAS Data Set and return the SASdata object referring to it.
file - eithe the OS filesystem path of the file, or HTTP://... for a url accessible file
table - the name of the SAS Data Set to create
libref - the libref for the SAS Data Set being created. Defaults to WORK, or USER if assigned
opts - a dictionary containing any of the following Proc Import options(datarow, delimiter, getnames, guessingrows)
'''
code = "filename x "
if file.lower().startswith("http"):
code += "url "
code += "\""+file+"\";\n"
code += "proc import datafile=x out="
if len(libref):
code += libref+"."
code += "'"+table.strip().replace("'", "''")+"'n dbms=csv replace; "+self._sb._impopts(opts)+" run;"
if nosub:
print(code)
else:
ll = self.submit(code, "text")
def write_csv(self, file: str, table: str, libref: str ="", nosub: bool =False, dsopts: dict ={}, opts: dict ={}) -> 'The LOG showing the results of the step':
'''
This method will export a SAS Data Set to a file in CCSV format.
file - the OS filesystem path of the file to be created (exported from the SAS Data Set)
table - the name of the SAS Data Set you want to export to a CSV file
libref - the libref for the SAS Data Set.
opts - a dictionary containing any of the following Proc Export options(delimiter, putnames)
'''
code = "filename x \""+file+"\";\n"
code += "options nosource;\n"
code += "proc export data="
if len(libref):
code += libref+"."
code += "'"+table.strip().replace("'", "''")+"'n "+self._sb._dsopts(dsopts)+" outfile=x dbms=csv replace; "
code += self._sb._expopts(opts)+" run\n;"
code += "options source;\n"
if nosub:
print(code)
else:
ll = self.submit(code, "text")
return ll['LOG']
def upload(self, localfile: str, remotefile: str, overwrite: bool = True, permission: str = '', **kwargs):
"""
This method uploads a local file to the SAS servers file system.
localfile - path to the local file to upload
remotefile - path to remote file to create or overwrite
overwrite - overwrite the output file if it exists?
permission - permissions to set on the new file. See SAS Filename Statement Doc for syntax
"""
valid = self._sb.file_info(remotefile, quiet = True)
if valid is None:
remf = remotefile
else:
if valid == {}:
remf = remotefile + self._sb.hostsep + localfile.rpartition(os.sep)[2]
else:
remf = remotefile
if overwrite == False:
return {'Success' : False,
'LOG' : "File "+str(remotefile)+" exists and overwrite was set to False. Upload was stopped."}
try:
fd = open(localfile, 'rb')
except OSError as e:
return {'Success' : False,
'LOG' : "File "+str(localfile)+" could not be opened. Error was: "+str(e)}
fsize = os.path.getsize(localfile)
if fsize > 0:
code = "filename _sp_updn '"+remf+"' recfm=N permission='"+permission+"';"
ll = self.submit(code, 'text')
logf = ll['LOG']
# GET Etag
conn = self.sascfg.HTTPConn; conn.connect()
headers={"Accept":"application/vnd.sas.compute.fileref+json;application/json",
"Authorization":"Bearer "+self.sascfg._token}
conn.request('GET', self._uri_files+"/_sp_updn", headers=headers)
req = conn.getresponse()
status = req.status
resp = req.read()
conn.close()
Etag = req.getheader("Etag")
# PUT data
conn = self.sascfg.HTTPConn; conn.connect()
headers={"Accept":"*/*","Content-Type":"application/octet-stream",
"Transfer-Encoding" : "chunked",
"Authorization":"Bearer "+self.sascfg._token}
conn.connect()
conn.putrequest('PUT', self._uri_files+"/_sp_updn/content")
conn.putheader("Accept","*/*")
conn.putheader("Content-Type","application/octet-stream")
conn.putheader("If-Match",Etag)
conn.putheader("Transfer-Encoding","chunked")
conn.putheader("Authorization","Bearer "+self.sascfg._token)
conn.endheaders()
blksz = int(kwargs.get('blocksize', 50000))
while True:
buf = fd.read1(blksz)
if len(buf) == 0:
conn.send(b"0\r\n\r\n")
break
lenstr = "%s\r\n" % hex(len(buf))[2:]
conn.send(lenstr.encode())
conn.send(buf)
conn.send(b"\r\n")
req = conn.getresponse()
status = req.status
resp = req.read()
conn.close()
code = "filename _sp_updn;"
else:
logf = ''
code = """
filename _sp_updn '"""+remf+"""' recfm=F encoding=binary lrecl=1 permission='"""+permission+"""';
data _null_;
fid = fopen('_sp_updn', 'O');
if fid then
rc = fclose(fid);
run;
filename _sp_updn;
"""
ll = self.submit(code, 'text')
logf += ll['LOG']
fd.close()
return {'Success' : True,
'LOG' : logf}
def download(self, localfile: str, remotefile: str, overwrite: bool = True, **kwargs):
"""
This method downloads a remote file from the SAS servers file system.
localfile - path to the local file to create or overwrite
remotefile - path to remote file tp dpwnload
overwrite - overwrite the output file if it exists?
"""
valid = self._sb.file_info(remotefile, quiet = True)
if valid is None:
return {'Success' : False,
'LOG' : "File "+str(remotefile)+" does not exist."}
if valid == {}:
return {'Success' : False,
'LOG' : "File "+str(remotefile)+" is a directory."}
if os.path.isdir(localfile):
locf = localfile + os.sep + remotefile.rpartition(self._sb.hostsep)[2]
else:
locf = localfile
try:
fd = open(locf, 'wb')
fd.write(b'write can fail even if open worked, as it turns out')
fd.close()
fd = open(locf, 'wb')
except OSError as e:
return {'Success' : False,
'LOG' : "File "+str(locf)+" could not be opened or written to. Error was: "+str(e)}
code = "filename _sp_updn '"+remotefile+"' recfm=F encoding=binary lrecl=4096;"
ll = self.submit(code, "text")
logf = ll['LOG']
# GET data
conn = self.sascfg.HTTPConn; conn.connect()
headers={"Accept":"*/*","Content-Type":"application/octet-stream",
"Authorization":"Bearer "+self.sascfg._token}
conn.request('GET', self._uri_files+"/_sp_updn/content", headers=headers)
req = conn.getresponse()
status = req.status
fd.write(req.read())
fd.flush()
fd.close()
conn.close()
ll = self.submit("filename _sp_updn;", 'text')
logf += ll['LOG']
return {'Success' : True,
'LOG' : logf}
def _getbytelenF(self, x):
return len(x.encode(self.sascfg.encoding))
def _getbytelenR(self, x):
return len(x.encode(self.sascfg.encoding, errors='replace'))
def dataframe2sasdata(self, df: '<Pandas Data Frame object>', table: str ='a',
libref: str ="", keep_outer_quotes: bool=False,
embedded_newlines: bool=True,
LF: str = '\x01', CR: str = '\x02',
colsep: str = '\x03', colrep: str = ' ',
datetimes: dict={}, outfmts: dict={}, labels: dict={},
outdsopts: dict={}, encode_errors = None, char_lengths = None,
**kwargs):
'''
This method imports a Pandas Data Frame to a SAS Data Set, returning the SASdata object for the new Data Set.
df - Pandas Data Frame to import to a SAS Data Set
table - the name of the SAS Data Set to create
libref - the libref for the SAS Data Set being created. Defaults to WORK, or USER if assigned
keep_outer_quotes - for character columns, have SAS keep any outer quotes instead of stripping them off.
embedded_newlines - if any char columns have embedded CR or LF, set this to True to get them iported into the SAS data set
LF - if embedded_newlines=True, the chacter to use for LF when transferring the data; defaults to '\x01'
CR - if embedded_newlines=True, the chacter to use for CR when transferring the data; defaults to '\x02'
colsep - the column seperator character used for streaming the delimmited data to SAS defaults to '\x03'
datetimes - dict with column names as keys and values of 'date' or 'time' to create SAS date or times instead of datetimes
outfmts - dict with column names and SAS formats to assign to the new SAS data set
labels - dict with column names and SAS Labels to assign to the new SAS data set
outdsopts - a dictionary containing output data set options for the table being created
encode_errors - 'fail' or 'replace' - default is to 'fail', other choice is to 'replace' invalid chars with the replacement char \
'ignore' will not transcode n Python, so you get whatever happens with your data and SAS
char_lengths - How to determine (and declare) lengths for CHAR variables in the output SAS data set
'''
input = ""
xlate = ""
card = ""
format = ""
length = ""
label = ""
dts = []
ncols = len(df.columns)
lf = "'"+'%02x' % ord(LF.encode(self.sascfg.encoding))+"'x"
cr = "'"+'%02x' % ord(CR.encode(self.sascfg.encoding))+"'x "
delim = "'"+'%02x' % ord(colsep.encode(self.sascfg.encoding))+"'x "
dts_upper = {k.upper():v for k,v in datetimes.items()}
dts_keys = dts_upper.keys()
fmt_upper = {k.upper():v for k,v in outfmts.items()}
fmt_keys = fmt_upper.keys()
lab_upper = {k.upper():v for k,v in labels.items()}
lab_keys = lab_upper.keys()
if encode_errors is None:
encode_errors = 'fail'
bpc = self._sb.pyenc[0]
if char_lengths and str(char_lengths).strip() in ['1','2','3','4']:
bpc = int(char_lengths)
if char_lengths and str(char_lengths) == 'exact':
CnotB = False
else:
CnotB = bpc == 1
if type(char_lengths) is not dict or len(char_lengths) < ncols:
charlens = self._sb.df_char_lengths(df, encode_errors, char_lengths)
else:
charlens = char_lengths
if charlens is None:
return -1
chr_upper = {k.upper():v for k,v in charlens.items()}
if type(df.index) != pd.RangeIndex:
warnings.warn("Note that Indexes are not transferred over as columns. Only actual coulmns are transferred")
for name in df.columns:
colname = str(name).replace("'", "''")
col_up = str(name).upper()
input += "'"+colname+"'n "
if col_up in lab_keys:
label += "label '"+colname+"'n ="+lab_upper[col_up]+";\n"
if col_up in fmt_keys:
format += "'"+colname+"'n "+fmt_upper[col_up]+" "
if df.dtypes[name].kind in ('O','S','U','V'):
try:
length += " '"+colname+"'n $"+str(chr_upper[col_up])
except KeyError as e:
logger.error("Dictionary provided as char_lengths is missing column: "+colname)
raise e
if keep_outer_quotes:
input += "~ "
dts.append('C')
if embedded_newlines:
xlate += " '"+colname+"'n = translate('"+colname+"'n, '0A'x, "+lf+");\n"
xlate += " '"+colname+"'n = translate('"+colname+"'n, '0D'x, "+cr+");\n"
else:
if df.dtypes[name].kind in ('M'):
length += " '"+colname+"'n 8"
input += ":B8601DT26.6 "
if col_up not in dts_keys:
if col_up not in fmt_keys:
format += "'"+colname+"'n E8601DT26.6 "
else:
if dts_upper[col_up].lower() == 'date':
if col_up not in fmt_keys:
format += "'"+colname+"'n E8601DA. "
xlate += " '"+colname+"'n = datepart('"+colname+"'n);\n"
else:
if dts_upper[col_up].lower() == 'time':
if col_up not in fmt_keys:
format += "'"+colname+"'n E8601TM. "
xlate += " '"+colname+"'n = timepart('"+colname+"'n);\n"
else:
logger.warning("invalid value for datetimes for column "+colname+". Using default.")
if col_up not in fmt_keys:
format += "'"+colname+"'n E8601DT26.6 "
dts.append('D')
else:
length += " '"+colname+"'n 8"
if df.dtypes[name] == 'bool':
dts.append('B')
else:
dts.append('N')
code = "data "
if len(libref):
code += libref+"."
code += "'"+table.strip().replace("'", "''")+"'n"
if len(outdsopts):
code += '('
for key in outdsopts:
code += key+'='+str(outdsopts[key]) + ' '
code += ");\n"
else:
code += ";\n"
if len(length):
code += "length "+length+";\n"
if len(format):
code += "format "+format+";\n"
code += label
code += "infile datalines delimiter="+delim+" STOPOVER;\ninput @;\nif _infile_ = '' then delete;\ninput "+input+";\n"+xlate+";\ndatalines4;"
self._asubmit(code, "text")
blksz = int(kwargs.get('blocksize', 1000000))
noencode = self._sb.sascei == 'utf-8' or encode_errors == 'ignore'
row_num = 0
code = ""
for row in df.itertuples(index=False):
row_num += 1
card = ""
for col in range(ncols):
var = str(row[col])
if dts[col] == 'N' and var == 'nan':
var = '.'
elif dts[col] == 'C':
if var == 'nan' or len(var) == 0:
var = ' '
else:
var = var.replace(colsep, colrep)
elif dts[col] == 'B':
var = str(int(row[col]))
elif dts[col] == 'D':
if var in ['nan', 'NaT', 'NaN']:
var = '.'
else:
var = str(row[col].to_datetime64())[:26]
card += var
if col < (ncols-1):
card += colsep
if embedded_newlines:
card = card.replace(LF, colrep).replace(CR, colrep)
card = card.replace('\n', LF).replace('\r', CR)
code += card+"\n"
if len(code) > blksz:
if not noencode:
if encode_errors == 'fail':
if CnotB:
try:
chk = code.encode(self.sascfg.encoding)
except Exception as e:
self._asubmit(";;;;\n;;;;", "text")
ll = self.submit("run;", 'text')
logger.error("Transcoding error encountered. Data transfer stopped on or before row "+str(row_num))
logger.error("DataFrame contains characters that can't be transcoded into the SAS session encoding.\n"+str(e))
return row_num
else:
code = code.encode(self.sascfg.encoding, errors='replace').decode(self.sascfg.encoding)
self._asubmit(code, "text")
code = ""
if not noencode and len(code) > 0:
if encode_errors == 'fail':
if CnotB:
try:
code = code.encode(self.sascfg.encoding).decode(self.sascfg.encoding)
except Exception as e:
self._asubmit(";;;;\n;;;;", "text")
ll = self.submit("run;", 'text')
logger.error("Transcoding error encountered. Data transfer stopped on or before row "+str(row_num))
logger.error("DataFrame contains characters that can't be transcoded into the SAS session encoding.\n"+str(e))
return row_num
else:
code = code.encode(self.sascfg.encoding, errors='replace').decode(self.sascfg.encoding)
self._asubmit(code+";;;;\n;;;;", "text")
ll = self.submit("quit;", 'text')
return None
def sasdata2dataframe(self, table: str, libref: str ='', dsopts: dict = None,
rowsep: str = '\x01', colsep: str = '\x02',
rowrep: str = ' ', colrep: str = ' ',
**kwargs) -> '<Pandas Data Frame object>':
'''
This method exports the SAS Data Set to a Pandas Data Frame, returning the Data Frame object.
table - the name of the SAS Data Set you want to export to a Pandas Data Frame
libref - the libref for the SAS Data Set.
dsopts - data set options for the input SAS Data Set
rowsep - the row seperator character to use; defaults to '\x01'
colsep - the column seperator character to use; defaults to '\x02'
rowrep - the char to convert to for any embedded rowsep chars, defaults to ' '
colrep - the char to convert to for any embedded colsep chars, defaults to ' '
'''
dsopts = dsopts if dsopts is not None else {}
method = kwargs.pop('method', None)
if method and method.lower() == 'csv':
return self.sasdata2dataframeCSV(table, libref, dsopts, **kwargs)
#elif method and method.lower() == 'disk':
else:
return self.sasdata2dataframeDISK(table, libref, dsopts, rowsep, colsep,
rowrep, colrep, **kwargs)
def sasdata2dataframeCSV(self, table: str, libref: str ='', dsopts: dict =None, opts: dict = None,
**kwargs) -> '<Pandas Data Frame object>':
'''
This method exports the SAS Data Set to a Pandas Data Frame, returning the Data Frame object.
table - the name of the SAS Data Set you want to export to a Pandas Data Frame
libref - the libref for the SAS Data Set.
dsopts - data set options for the input SAS Data Set
opts - a dictionary containing any of the following Proc Export options(delimiter, putnames)
tempfile - DEPRECATED
tempkeep - DEPRECATED
These two options are for advanced usage. They override how saspy imports data. For more info
see https://sassoftware.github.io/saspy/advanced-topics.html#advanced-sd2df-and-df2sd-techniques
dtype - this is the parameter to Pandas read_csv, overriding what saspy generates and uses
my_fmts - bool: if True, overrides the formats saspy would use, using those on the data set or in dsopts=
'''
tmp = kwargs.pop('tempfile', None)
tmp = kwargs.pop('tempkeep', None)
dsopts = dsopts if dsopts is not None else {}
opts = opts if opts is not None else {}
if libref:
tabname = libref+".'"+table.strip().replace("'", "''")+"'n "
else:
tabname = "'"+table.strip().replace("'", "''")+"'n "
code = "data work.sasdata2dataframe / view=work.sasdata2dataframe; set "+tabname+self._sb._dsopts(dsopts)+";run;\n"
ll = self.submit(code, "text")
##GET Data Table Info
#conn = self.sascfg.HTTPConn; conn.connect()
#headers={"Accept":"application/vnd.sas.compute.data.table+json", "Authorization":"Bearer "+self.sascfg._token}
#conn.request('GET', "/compute/sessions/"+self.pid+"/data/work/sasdata2dataframe", headers=headers)
#req = conn.getresponse()
#status = req.status
#conn.close()
#resp = req.read()
#js = json.loads(resp.decode(self.sascfg.encoding))
conn = self.sascfg.HTTPConn; conn.connect()
headers={"Accept":"application/vnd.sas.collection+json", "Authorization":"Bearer "+self.sascfg._token}
conn.request('GET', "/compute/sessions/"+self.pid+"/data/work/sasdata2dataframe/columns?start=0&limit=9999999", headers=headers)
req = conn.getresponse()
status = req.status
resp = req.read()
conn.close()
js = json.loads(resp.decode(self.sascfg.encoding))
varlist = []
vartype = []
nvars = js.get('count')
lst = js.get('items')
for i in range(len(lst)):
varlist.append(lst[i].get('name'))
vartype.append(lst[i].get('type'))
dvarlist = list(varlist)
for i in range(len(varlist)):
varlist[i] = varlist[i].replace("'", "''")
topts = dict(dsopts)
topts.pop('firstobs', None)
topts.pop('obs', None)
code = "data work._n_u_l_l_;output;run;\n"
code += "data _null_; set work._n_u_l_l_ "+tabname+self._sb._dsopts(topts)+";put 'FMT_CATS=';\n"
for i in range(nvars):
code += "_tom = vformatn('"+varlist[i]+"'n);put _tom;\n"
code += "stop;\nrun;\nproc delete data=work._n_u_l_l_;run;"
ll = self.submit(code, "text")
l2 = ll['LOG'].rpartition("FMT_CATS=")
l2 = l2[2].partition("\n")
varcat = l2[2].split("\n", nvars)
del varcat[nvars]
code = "proc delete data=work.sasdata2dataframe(memtype=view);run;\n"
code += "data work.sasdata2dataframe / view=work.sasdata2dataframe; set "+tabname+self._sb._dsopts(dsopts)+";\nformat "
idx_col = kwargs.pop('index_col', False)
eng = kwargs.pop('engine', 'c')
my_fmts = kwargs.pop('my_fmts', False)
k_dts = kwargs.pop('dtype', None)
if k_dts is None and my_fmts:
logger.warning("my_fmts option only valid when dtype= is specified. Ignoring and using necessary formatting for data transfer.")
my_fmts = False
if not my_fmts:
for i in range(nvars):
if vartype[i] == 'FLOAT':
code += "'"+varlist[i]+"'n "
if varcat[i] in self._sb.sas_date_fmts:
code += 'E8601DA10. '
else:
if varcat[i] in self._sb.sas_time_fmts:
code += 'E8601TM15.6 '
else:
if varcat[i] in self._sb.sas_datetime_fmts:
code += 'E8601DT26.6 '
else:
code += 'best32. '
code += ";run;\n"
ll = self.submit(code, "text")
if k_dts is None:
dts = {}
for i in range(nvars):
if vartype[i] == 'FLOAT':
if varcat[i] not in self._sb.sas_date_fmts + self._sb.sas_time_fmts + self._sb.sas_datetime_fmts:
dts[dvarlist[i]] = 'float'
else:
dts[dvarlist[i]] = 'str'
else:
dts[dvarlist[i]] = 'str'
else:
dts = k_dts
code = "filename _tomodsx '"+self._sb.workpath+"_tomodsx' lrecl="+str(self.sascfg.lrecl)+" recfm=v encoding='utf-8';\n"
code += "proc export data=work.sasdata2dataframe outfile=_tomodsx dbms=csv replace;\n"
code += self._sb._expopts(opts)+" run;\n"
code += "proc delete data=work.sasdata2dataframe(memtype=view);run;\n"
ll = self.submit(code, 'text')
logf = ll['LOG']
code = "filename _sp_updn '"+self._sb.workpath+"_tomodsx' recfm=F encoding=binary lrecl=4096;"
ll = self.submit(code, "text")
logf += ll['LOG']
# GET data
conn = self.sascfg.HTTPConn; conn.connect()
headers={"Accept":"*/*","Content-Type":"application/octet-stream",
"Authorization":"Bearer "+self.sascfg._token}
conn.request('GET', self._uri_files+"/_sp_updn/content", headers=headers)
req = conn.getresponse()
status = req.status
sockout = _read_sock(req=req)
df = pd.read_csv(sockout, index_col=idx_col, encoding='utf8', engine=eng, dtype=dts, **kwargs)
conn.close()
if k_dts is None: # don't override these if user provided their own dtypes
for i in range(nvars):
if vartype[i] == 'FLOAT':
if varcat[i] in self._sb.sas_date_fmts + self._sb.sas_time_fmts + self._sb.sas_datetime_fmts:
df[dvarlist[i]] = pd.to_datetime(df[dvarlist[i]], errors='coerce')
ll = self.submit("filename _sp_updn;", 'text')
logf += ll['LOG']
return df
def sasdata2dataframeDISK(self, table: str, libref: str ='', dsopts: dict = None,
rowsep: str = '\x01', colsep: str = '\x02',
rowrep: str = ' ', colrep: str = ' ', **kwargs) -> '<Pandas Data Frame object>':
'''
This method exports the SAS Data Set to a Pandas Data Frame, returning the Data Frame object.
table - the name of the SAS Data Set you want to export to a Pandas Data Frame
libref - the libref for the SAS Data Set.
dsopts - data set options for the input SAS Data Set
rowsep - the row seperator character to use; defaults to '\x01'
colsep - the column seperator character to use; defaults to '\x02'
rowrep - the char to convert to for any embedded rowsep chars, defaults to ' '
colrep - the char to convert to for any embedded colsep chars, defaults to ' '
tempfile - DEPRECATED
tempkeep - DEPRECATED
These two options are for advanced usage. They override how saspy imports data. For more info
see https://sassoftware.github.io/saspy/advanced-topics.html#advanced-sd2df-and-df2sd-techniques
dtype - this is the parameter to Pandas read_csv, overriding what saspy generates and uses
my_fmts - bool: if True, overrides the formats saspy would use, using those on the data set or in dsopts=
'''
tmp = kwargs.pop('tempfile', None)
tmp = kwargs.pop('tempkeep', None)
dsopts = dsopts if dsopts is not None else {}
if libref:
tabname = libref+".'"+table.strip().replace("'", "''")+"'n "
else:
tabname = "'"+table.strip().replace("'", "''")+"'n "
code = "data work.sasdata2dataframe / view=work.sasdata2dataframe; set "+tabname+self._sb._dsopts(dsopts)+";run;\n"
ll = self.submit(code, "text")
##GET Data Table Info
#conn = self.sascfg.HTTPConn; conn.connect()
#headers={"Accept":"application/vnd.sas.compute.data.table+json", "Authorization":"Bearer "+self.sascfg._token}
#conn.request('GET', "/compute/sessions/"+self.pid+"/data/work/sasdata2dataframe", headers=headers)
#req = conn.getresponse()
#status = req.status
#conn.close()
#resp = req.read()
#js = json.loads(resp.decode(self.sascfg.encoding))
conn = self.sascfg.HTTPConn; conn.connect()
headers={"Accept":"application/vnd.sas.collection+json", "Authorization":"Bearer "+self.sascfg._token}
conn.request('GET', "/compute/sessions/"+self.pid+"/data/work/sasdata2dataframe/columns?start=0&limit=9999999", headers=headers)
req = conn.getresponse()
status = req.status
resp = req.read()
conn.close()
js = json.loads(resp.decode(self.sascfg.encoding))
varlist = []
vartype = []
nvars = js.get('count')
lst = js.get('items')
for i in range(len(lst)):
varlist.append(lst[i].get('name'))
vartype.append(lst[i].get('type'))
dvarlist = list(varlist)
for i in range(len(varlist)):
varlist[i] = varlist[i].replace("'", "''")
topts = dict(dsopts)
topts.pop('firstobs', None)
topts.pop('obs', None)
code = "proc delete data=work.sasdata2dataframe(memtype=view);run;"
code += "data work._n_u_l_l_;output;run;\n"
code += "data _null_; set work._n_u_l_l_ "+tabname+self._sb._dsopts(topts)+";put 'FMT_CATS=';\n"
for i in range(nvars):
code += "_tom = vformatn('"+varlist[i]+"'n);put _tom;\n"
code += "stop;\nrun;\nproc delete data=work._n_u_l_l_;run;"
ll = self.submit(code, "text")
l2 = ll['LOG'].rpartition("FMT_CATS=")
l2 = l2[2].partition("\n")
varcat = l2[2].split("\n", nvars)
del varcat[nvars]
rdelim = "'"+'%02x' % ord(rowsep.encode(self.sascfg.encoding))+"'x"
cdelim = "'"+'%02x' % ord(colsep.encode(self.sascfg.encoding))+"'x "
idx_col = kwargs.pop('index_col', False)
eng = kwargs.pop('engine', 'c')
my_fmts = kwargs.pop('my_fmts', False)
k_dts = kwargs.pop('dtype', None)
if k_dts is None and my_fmts:
logger.warning("my_fmts option only valid when dtype= is specified. Ignoring and using necessary formatting for data transfer.")
my_fmts = False
code = "filename _tomodsx '"+self._sb.workpath+"_tomodsx' recfm=v termstr=NL encoding='utf-8';\n"
code += "data _null_; set "+tabname+self._sb._dsopts(dsopts)+";\n"
if not my_fmts:
for i in range(nvars):
if vartype[i] == 'FLOAT':
code += "format '"+varlist[i]+"'n "
if varcat[i] in self._sb.sas_date_fmts:
code += 'E8601DA10.'
else:
if varcat[i] in self._sb.sas_time_fmts:
code += 'E8601TM15.6'
else:
if varcat[i] in self._sb.sas_datetime_fmts:
code += 'E8601DT26.6'
else:
code += 'best32.'
code += '; '
if i % 10 == 9:
code +='\n'
miss = {}
code += "\nfile _tomodsx lrecl="+str(self.sascfg.lrecl)+" dlm="+cdelim+" recfm=v termstr=NL encoding='utf-8';\n"
for i in range(nvars):
if vartype[i] != 'FLOAT':
code += "'"+varlist[i]+"'n = translate('"
code += varlist[i]+"'n, '{}'x, '{}'x); ".format( \
'%02x%02x' % \
(ord(rowrep.encode(self.sascfg.encoding)), \
ord(colrep.encode(self.sascfg.encoding))),
'%02x%02x' % \
(ord(rowsep.encode(self.sascfg.encoding)), \
ord(colsep.encode(self.sascfg.encoding))))
miss[dvarlist[i]] = ' '
else:
code += "if missing('"+varlist[i]+"'n) then '"+varlist[i]+"'n = .; "
miss[dvarlist[i]] = '.'
if i % 10 == 9:
code +='\n'
code += "\nput "
for i in range(nvars):
code += " '"+varlist[i]+"'n "
if i % 10 == 9:
code +='\n'
code += rdelim+";\nrun;"
ll = self.submit(code, "text")
if k_dts is None:
dts = {}
for i in range(nvars):
if vartype[i] == 'FLOAT':
if varcat[i] not in self._sb.sas_date_fmts + self._sb.sas_time_fmts + self._sb.sas_datetime_fmts:
dts[dvarlist[i]] = 'float'
else:
dts[dvarlist[i]] = 'str'
else:
dts[dvarlist[i]] = 'str'
else:
dts = k_dts
quoting = kwargs.pop('quoting', 3)
code = "filename _sp_updn '"+self._sb.workpath+"_tomodsx' recfm=F encoding=binary lrecl=4096;"
ll = self.submit(code, "text")
logf = ll['LOG']
# GET data
conn = self.sascfg.HTTPConn; conn.connect()
headers={"Accept":"*/*","Content-Type":"application/octet-stream",
"Authorization":"Bearer "+self.sascfg._token}
conn.request('GET', self._uri_files+"/_sp_updn/content", headers=headers)
req = conn.getresponse()
status = req.status
sockout = _read_sock(req=req, method='DISK', rsep=(colsep+rowsep+'\n').encode(), rowsep=rowsep.encode())
df = pd.read_csv(sockout, index_col=idx_col, engine=eng, header=None, names=dvarlist,
sep=colsep, lineterminator=rowsep, dtype=dts, na_values=miss,
encoding='utf-8', quoting=quoting, **kwargs)
conn.close()
if k_dts is None: # don't override these if user provided their own dtypes
for i in range(nvars):
if vartype[i] == 'FLOAT':
if varcat[i] in self._sb.sas_date_fmts + self._sb.sas_time_fmts + self._sb.sas_datetime_fmts:
df[dvarlist[i]] = pd.to_datetime(df[dvarlist[i]], errors='coerce')
ll = self.submit("filename _sp_updn;", 'text')
logf += ll['LOG']
return df
class _read_sock(io.StringIO):
def __init__(self, **kwargs):
self.req = kwargs.get('req')
self.method = kwargs.get('method', 'CSV')
self.rowsep = kwargs.get('rowsep', b'\n')
self.rsep = kwargs.get('rsep', self.rowsep)
self.datar = b""
def read(self, size=4096):
datl = 0
size = max(size, 4096)
notarow = True
while datl < size or notarow:
data = self.req.read(size)
dl = len(data)
if dl:
datl += dl
self.datar += data
if notarow:
notarow = self.datar.count(self.rsep) <= 0
else:
if len(self.datar) <= 0:
return ''
else:
break
data = self.datar.rpartition(self.rsep)
if self.method == 'DISK':
datap = (data[0]+data[1]).replace(self.rsep, self.rowsep)
else:
datap = data[0]+data[1]
self.datar = data[2]
return datap.decode()
|
the-stack_0_800 | import pandas as pd
from pytorch_lightning.loggers import TensorBoardLogger
from pytorch_lightning.utilities.cloud_io import load as pl_load
import argparse
import json
import pytorch_lightning as pl
import pandas as pd
import sklearn
from ray import tune
import numpy as np
import matplotlib.pyplot as plt
import seaborn
import os
from ray.tune import CLIReporter
from ray.tune.schedulers import ASHAScheduler, PopulationBasedTraining
from ray.tune.integration.pytorch_lightning import TuneReportCallback
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision.models as models
from torch.optim import SGD, Adam
from torchvision import transforms
import MLmodels as m
from ray.tune.integration.pytorch_lightning import TuneReportCheckpointCallback
from ray.tune.suggest.bayesopt import BayesOptSearch
class ResNetClassifier(pl.LightningModule):
def __init__(self, config, num_classes, resnet_version,
test_path=None,
optimizer='adam',
transfer=True):
super().__init__()
self.__dict__.update(locals())
resnets = {
18: models.resnet18, 34: models.resnet34,
50: models.resnet50, 101: models.resnet101,
152: models.resnet152
}
optimizers = {'adam': Adam, 'sgd': SGD}
self.optimizer = optimizers[optimizer]
# hyperparameters
self.lr = config['lr']
self.batch_size = config['batch_size']
# for importing different versions of the data
self.datatype = config['datatype']
if 'B' in self.datatype and '20' not in self.datatype:
self.data_length = 40
else:
self.data_length = 20
self.training_data = None
self.validation_data = None
# Using a pretrained ResNet backbone
self.resnet_model = resnets[resnet_version](pretrained=transfer)
# Replace old FC layer with Identity so we can train our own
linear_size = list(self.resnet_model.children())[-1].in_features
# replace final layer for fine tuning
fcn = [
nn.Dropout(config['dr']),
nn.Linear(linear_size, linear_size),
]
fcn2 = [
nn.Linear(linear_size, num_classes)
]
if num_classes > 1:
fcn2.append(torch.nn.LogSoftmax(dim=1))
self.fcn1 = nn.Sequential(*fcn)
self.d1 = m.drelu(linear_size)
self.fcn2 = nn.Sequential(*fcn2)
self.resnet_model.conv1 = torch.nn.Conv1d(1, 64, (7, 7), (2, 2), (3, 3), bias=False)
modules = list(self.resnet_model.children())[:-1] # delete the last fc layer.
self.resnet_model = nn.Sequential(*modules)
def forward(self, X):
x = self.resnet_model(X)
x = x.view(x.size(0), -1) # flatten
x = self.fcn1(x)
x = self.d1(x)
x = self.fcn2(x)
return x
def configure_optimizers(self):
return self.optimizer(self.parameters(), lr=self.lr)
def prepare_data(self):
# import our data
train, validate, weights = m.get_rawdata(self.datatype, 10, 5, round=8)
_train = train.copy()
_validate = validate.copy()
# Assigns labels for learning
_train["binary"] = _train["affinity"].apply(m.bi_labelM)
_validate["binary"] = _validate["affinity"].apply(m.bi_labelM)
_weights = torch.FloatTensor(weights)
# instantiate loss criterion, need weights so put this here
self.criterion = m.SmoothCrossEntropyLoss(weight=_weights, smoothing=0.01)
self.training_data = _train
self.validation_data = _validate
def train_dataloader(self):
# Data Loading
train_reader = m.NAReader(self.training_data, shuffle=True, max_length=self.data_length)
train_loader = torch.utils.data.DataLoader(
train_reader,
batch_size=self.batch_size,
collate_fn=m.my_collate,
num_workers=4,
# pin_memory=True,
shuffle=True
)
return train_loader
def training_step(self, batch, batch_idx):
seq, x, y = batch
softmax = self(x)
train_loss = self.criterion(softmax, y)
# Convert to labels
preds = torch.argmax(softmax, 1).clone().double() # convert to torch float 64
predcpu = list(preds.detach().cpu().numpy())
ycpu = list(y.detach().cpu().numpy())
train_acc = sklearn.metrics.balanced_accuracy_score(ycpu, predcpu)
# perform logging
self.log("ptl/train_loss", train_loss, on_step=True, on_epoch=True, prog_bar=True, logger=True)
self.log("ptl/train_accuracy", train_acc, on_step=True, on_epoch=True, prog_bar=True, logger=True)
return train_loss
def val_dataloader(self):
# Data Loading
val_reader = m.NAReader(self.validation_data, shuffle=False)
val_loader = torch.utils.data.DataLoader(
val_reader,
batch_size=self.batch_size,
collate_fn=m.my_collate,
num_workers=4,
# pin_memory=True,
shuffle=False
)
return val_loader
def validation_step(self, batch, batch_idx):
seq, x, y = batch
softmax = self(x)
val_loss = self.criterion(softmax, y)
# Convert to labels
preds = torch.argmax(softmax, 1).clone().double() # convert to torch float 64
predcpu = list(preds.detach().cpu().numpy())
ycpu = list(y.detach().cpu().numpy())
val_acc = sklearn.metrics.balanced_accuracy_score(ycpu, predcpu)
# perform logging
self.log("ptl/val_loss", val_loss, on_epoch=True, prog_bar=True, logger=True)
self.log("ptl/val_accuracy", val_acc, on_epoch=True, prog_bar=True, logger=True)
return {"val_loss": val_loss, "val_acc": val_acc}
def train_resnet(config, checkpoint_dir=None, num_epochs=10, num_gpus=0):
trainer = pl.Trainer(
# default_root_dir="./checkpoints/",
max_epochs=num_epochs,
gpus=num_gpus,
logger=TensorBoardLogger(
save_dir=tune.get_trial_dir(), name="", version="."),
progress_bar_refresh_rate=0,
callbacks=[
TuneReportCheckpointCallback(
metrics={
"loss": "ptl/val_loss",
"acc": "ptl/val_accuracy"
},
filename="checkpoint",
on="validation_end")
]
)
if checkpoint_dir:
# Currently, this leads to errors:
# model = LightningMNISTClassifier.load_from_checkpoint(
# os.path.join(checkpoint, "checkpoint"))
# Workaround:
ckpt = pl_load(
os.path.join(checkpoint_dir, "checkpoint"),
map_location=lambda storage, loc: storage)
model = ResNetClassifier._load_model_state(
ckpt, config=config)
trainer.current_epoch = ckpt["epoch"]
else:
model = ResNetClassifier(config, 2, 18, optimizer='adam')
trainer.fit(model)
def tune_asha(datatype, num_samples=10, num_epochs=10, gpus_per_trial=0, cpus_per_trial=1):
config = {
"lr": tune.loguniform(1e-4, 1e-1),
"batch_size": tune.choice([32, 64, 128]),
"dr": tune.loguniform(0.005, 0.5),
"datatype": datatype
}
scheduler = ASHAScheduler(
max_t=num_epochs,
grace_period=5,
reduction_factor=2)
reporter = CLIReporter(
parameter_columns=["lr", "batch_size"],
metric_columns=["loss", "acc", "training_iteration"])
analysis = tune.run(
tune.with_parameters(
train_resnet,
num_epochs=num_epochs,
num_gpus=gpus_per_trial),
resources_per_trial={
"cpu": cpus_per_trial,
"gpu": gpus_per_trial
},
metric="acc",
mode="max",
local_dir="./ray_results/",
config=config,
num_samples=num_samples,
scheduler=scheduler,
progress_reporter=reporter,
name="tune_res_drelu_asha")
print("Best hyperparameters found were: ", analysis.best_config)
# analysis.to_csv('~/ray_results/' + config['datatype'])
def tune_asha_search(datatype, num_samples=10, num_epochs=10, gpus_per_trial=0, cpus_per_trial=1):
config = {
"lr": tune.uniform(1e-4, 1e-1),
"batch_size": 64,
"dr": tune.uniform(0.005, 0.5),
"datatype": datatype
}
scheduler = ASHAScheduler(
max_t=num_epochs,
grace_period=5,
reduction_factor=2)
reporter = CLIReporter(
parameter_columns=["lr", "batch_size"],
metric_columns=["loss", "acc", "training_iteration"])
bayesopt = BayesOptSearch(metric="mean_loss", mode="min")
analysis = tune.run(
tune.with_parameters(
train_resnet,
num_epochs=num_epochs,
num_gpus=gpus_per_trial),
resources_per_trial={
"cpu": cpus_per_trial,
"gpu": gpus_per_trial
},
metric="acc",
mode="max",
local_dir="./ray_results/",
config=config,
num_samples=num_samples,
search_alg=bayesopt,
scheduler=scheduler,
progress_reporter=reporter,
name="tune_res_drelu_bayopt")
print("Best hyperparameters found were: ", analysis.best_config)
# analysis.to_csv('~/ray_results/' + config['datatype'])
def exp_results_check(checkpoint_path, result_path, title):
# example
# checkpoint_file = './ray_results/tune_vae_asha/train_vae_a45d1_00000_0_batch_size=64,dr=0.029188,lr=0.0075796,z_dim=10_2021-07-13_12-50-57/checkpoints/epoch=28-step=15891.ckpt'
checkpoint_file = checkpoint_path
param_file = open(result_path, 'r')
check_epoch = int(checkpoint_file.split("epoch=", 1)[1].split('-', 1)[0])
resultjsons = param_file.read().split('\n')
results = json.loads(resultjsons[check_epoch + 1])
params = results['config']
lr = params['lr']
dr = params['dr']
batch_size = params['batch_size']
datatype = params['datatype']
con = {'lr': lr, 'dr': dr, 'batch_size': batch_size, 'datatype': datatype}
model = ResNetClassifier(con, 2, 18, optimizer='adam')
checkpoint = torch.load(checkpoint_file)
model.prepare_data()
model.criterion.weight = torch.tensor([0., 0.]) # need to add as this is saved by the checkpoint file
model.load_state_dict(checkpoint['state_dict'])
model.eval()
test_set = m.test_set_corr
verdict = {'sequence':list(test_set.keys()), 'binary':list(test_set.values())}
_verification = pd.DataFrame(verdict)
ver_reader = m.NAReader(_verification, shuffle=False)
ver_loader = torch.utils.data.DataLoader(
ver_reader,
batch_size=len(test_set.keys()),
collate_fn=m.my_collate,
# num_workers=4,
# pin_memory=True,
shuffle=False
)
for i, batch in enumerate(ver_loader):
seqs, ohe, labels = batch
softmax = model(ohe)
preds = torch.argmax(softmax, 1).clone().double() # convert to torch float 64
predcpu = list(preds.detach().cpu().numpy())
ycpu = labels
# ver_acc = sklearn.metrics.balanced_accuracy_score(ycpu, predcpu)
# Make confusion Matrix
y_true = ycpu.detach().cpu().numpy().astype('bool').tolist()
y_pred = np.asarray(predcpu, dtype=np.bool).tolist()
score = np.asarray([1 if x == y_pred[xid] else 0 for xid, x in enumerate(y_true)])
ver_acc = np.mean(score)
f1 = sklearn.metrics.f1_score(y_true, y_pred)
cm = sklearn.metrics.confusion_matrix(y_true, y_pred, normalize='true')
df_cm = pd.DataFrame(cm, index=[0, 1], columns=[0, 1])
plt.figure(figsize=(10, 7))
ax = plt.subplot()
seaborn.set(font_scale=3.0)
seaborn.heatmap(df_cm, annot=True, ax=ax)
label_font = {'size': '26'}
ax.tick_params(axis='both', which='major', labelsize=40)
ax.xaxis.set_ticklabels(["0", "1"])
ax.yaxis.set_ticklabels(["0", "1"])
# plt.title(title)
plt.savefig('../../Dropbox (ASU)/Projects/Aptamer_ML/' + title)
o = open('../../Dropbox (ASU)/Projects/Aptamer_ML/' + title + 'results.txt', "w+")
print("Validation Loss", results['loss'], file=o)
print("Validation Accuracy", results['acc'], file=o)
print("Verification Accuracy:", ver_acc, "of dataset size:", len(test_set.keys()), file=o)
print("F1-score", f1, file=o)
o.close()
def exp_results_check_progress(checkpoint_path, hparams, progress, title):
# example
checkpoint_file = checkpoint_path
# param_file = open(result_path, 'r')
# check_epoch = int(checkpoint_file.split("epoch=", 1)[1].split('-', 1)[0])
# resultjsons = param_file.read().split('\n')
o = open(hparams, 'r')
params = json.load(o)
# params = results['config']
lr = params['lr']
dr = params['dr']
batch_size = params['batch_size']
datatype = params['datatype']
progress = pd.read_csv(progress)
loss = progress.iloc[-1].loss
acc = progress.iloc[-1].acc
con = {'lr': lr, 'dr': dr, 'batch_size': batch_size, 'datatype': datatype}
model = ResNetClassifier(con, 2, 18, optimizer='adam')
checkpoint = torch.load(checkpoint_file)
model.prepare_data()
model.criterion.weight = torch.tensor([0., 0.]) # need to add as this is saved by the checkpoint file
model.load_state_dict(checkpoint['state_dict'])
model.eval()
test_set = m.test_set_corr
verdict = {'sequence': list(test_set.keys()), 'binary': list(test_set.values())}
_verification = pd.DataFrame(verdict)
ver_reader = m.NAReader(_verification, shuffle=False)
ver_loader = torch.utils.data.DataLoader(
ver_reader,
batch_size=len(test_set.keys()),
collate_fn=m.my_collate,
# num_workers=4,
# pin_memory=True,
shuffle=False
)
for i, batch in enumerate(ver_loader):
seqs, ohe, labels = batch
softmax = model(ohe)
preds = torch.argmax(softmax, 1).clone().double() # convert to torch float 64
predcpu = list(preds.detach().cpu().numpy())
ycpu = labels
# ver_acc = sklearn.metrics.balanced_accuracy_score(ycpu, predcpu)
# Make confusion Matrix
y_true = ycpu.detach().cpu().numpy().astype('bool').tolist()
y_pred = np.asarray(predcpu, dtype=np.bool).tolist()
score = np.asarray([1 if x == y_pred[xid] else 0 for xid, x in enumerate(y_true)])
ver_acc = np.mean(score)
f1 = sklearn.metrics.f1_score(y_true, y_pred)
cm = sklearn.metrics.confusion_matrix(y_true, y_pred, normalize='true')
df_cm = pd.DataFrame(cm, index=[0, 1], columns=[0, 1])
plt.figure(figsize=(10, 7))
ax = plt.subplot()
seaborn.set(font_scale=3.0)
seaborn.heatmap(df_cm, annot=True, ax=ax)
label_font = {'size': '26'}
ax.tick_params(axis='both', which='major', labelsize=40)
ax.xaxis.set_ticklabels(["0", "1"])
ax.yaxis.set_ticklabels(["0", "1"])
# plt.title(title)
plt.savefig('../../Dropbox (ASU)/Projects/Aptamer_ML/' + title)
o = open('../../Dropbox (ASU)/Projects/Aptamer_ML/' + title + 'results.txt', "w+")
print("Validation Loss", loss, file=o)
print("Validation Accuracy", acc, file=o)
print("Verification Accuracy:", ver_acc, "of dataset size:", len(test_set.keys()), file=o)
print("F1-score", f1, file=o)
o.close()
def val_results_check(checkpoint_path, hparams, progress, result_path, title, r=True):
# example
# checkpoint_file = './ray_results/tune_vae_asha/train_vae_a45d1_00000_0_batch_size=64,dr=0.029188,lr=0.0075796,z_dim=10_2021-07-13_12-50-57/checkpoints/epoch=28-step=15891.ckpt'
checkpoint_file = checkpoint_path
if r:
param_file = open(result_path, 'r')
check_epoch = int(checkpoint_file.split("epoch=", 1)[1].split('-', 1)[0])
resultjsons = param_file.read().split('\n')
results = json.loads(resultjsons[check_epoch + 1])
params = results['config']
lr = params['lr']
dr = params['dr']
batch_size = params['batch_size']
datatype = params['datatype']
loss = results['loss']
acc = results['acc']
else:
o = open(hparams, 'r')
params = json.load(o)
# params = results['config']
lr = params['lr']
dr = params['dr']
batch_size = params['batch_size']
datatype = params['datatype']
progress = pd.read_csv(progress)
loss = progress.iloc[-1].loss
acc = progress.iloc[-1].acc
con = {'lr': lr, 'dr': dr, 'batch_size': batch_size, 'datatype': datatype}
model = ResNetClassifier(con, 2, 18, optimizer='adam')
checkpoint = torch.load(checkpoint_file)
model.prepare_data()
model.criterion.weight = torch.tensor([0., 0.]) # need to add as this is saved by the checkpoint file
model.load_state_dict(checkpoint['state_dict'])
model.eval()
vd = model.val_dataloader()
yt, yp = [], []
for i, batch in enumerate(vd):
seqs, ohe, labels = batch
softmax = model(ohe)
preds = torch.argmax(softmax, 1).clone().double() # convert to torch float 64
predcpu = list(preds.detach().cpu().numpy())
ycpu = labels
# Make confusion Matrix
y_true = ycpu.detach().cpu().numpy().astype('bool').tolist()
y_pred = np.asarray(predcpu, dtype=np.bool).tolist()
yt += y_true
yp += y_pred
ver_acc = np.mean(np.asarray([1 if x == yp[xid] else 0 for xid, x in enumerate(yt)]))
# ver_acc = sklearn.metrics.balanced_accuracy_score(yt, yp)
cm = sklearn.metrics.confusion_matrix(yt, yp, normalize='true')
df_cm = pd.DataFrame(cm, index=[0, 1], columns=[0, 1])
plt.figure(figsize=(10, 7))
ax = plt.subplot()
seaborn.set(font_scale=3.0)
seaborn.heatmap(df_cm, annot=True, ax=ax)
label_font = {'size': '26'}
ax.tick_params(axis='both', which='major', labelsize=40)
ax.xaxis.set_ticklabels(["0", "1"])
ax.yaxis.set_ticklabels(["0", "1"])
# plt.title(title)
plt.savefig('../../Dropbox (ASU)/Projects/Aptamer_ML/' + title + "_VER")
o = open('../../Dropbox (ASU)/Projects/Aptamer_ML/' + title + 'results_ver.txt', "w+")
print("Validation Loss", loss, file=o)
print("Validation Accuracy", acc, file=o)
print("Verification Accuracy:", ver_acc, "of dataset size:", len(yt), file=o)
o.close()
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Resnet Training on Aptamer Dataset")
parser.add_argument('dataset', type=str, help="3-7 letter/number abbreviation describing subset of the data to use")
parser.add_argument('cpus_per_trial', type=str, help="Number of cpus available to each trial in Ray Tune")
parser.add_argument('gpus_per_trial', type=str, help="Number of gpus available to each trial in Ray Tune")
parser.add_argument('samples', type=str, help="Number of Ray Tune Samples")
args = parser.parse_args()
os.environ["SLURM_JOB_NAME"] = "bash"
tune_asha(args.dataset, int(args.samples), 30, gpus_per_trial=int(args.gpus_per_trial), cpus_per_trial=int(args.cpus_per_trial))
# tune_asha_search(args.dataset, int(args.samples), 50, gpus_per_trial=int(args.gpus_per_trial), cpus_per_trial=int(args.cpus_per_trial))
### Debugging
# con = {'lr': 1e-4, 'dr': 0.1, 'batch_size': 32, 'datatype': 'HCL'}
#
# model = ResNetClassifier(con, 2, 18)
#
## Single Loop debugging
# model.prepare_data()
# d = model.train_dataloader()
# for i, batch in enumerate(d):
# if i > 0:
# break
# else:
# model.training_step(batch, i)
# pytorch lightning loop
# rn = ResNetClassifier(con, 2, 18, optimizer='adam')
# plt = pl.Trainer(gpus=1)
# plt.fit(rn)
|
the-stack_0_802 | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Thu Nov 2 23:26:08 2017
@author: Shashwat Sridhar
"""
# system imports
from pyqtgraph.Qt import QtCore, QtGui, QtWidgets
from os import sep
# swan-specific imports
from swan.views.mean_waveforms_view import PgWidget2d
from swan.views.virtual_units_view import VirtualUnitsView
from swan.widgets.plot_grid import MyPlotGrid
from swan.views.isi_histograms_view import PgWidgetISI
from swan.views.pca_3d_view import PgWidgetPCA
from swan.views.rate_profile_view import PgWidgetRateProfile
from swan.widgets.plot_grid_tools import PlotGridTools
from swan.widgets.view_toolbar import CollapsibleWidget
from swan.resources import icons
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _from_utf_8(s):
return s
try:
_encoding = QtWidgets.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtWidgets.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtWidgets.QApplication.translate(context, text, disambig)
class MainUI(object):
def __init__(self, main_application):
main_application.setObjectName(_from_utf_8("Main"))
main_application.setDockOptions(QtWidgets.QMainWindow.AllowTabbedDocks |
QtWidgets.QMainWindow.AllowNestedDocks |
QtWidgets.QMainWindow.GroupedDragging)
self.plotGridDock = QtWidgets.QDockWidget("Plot Grid")
self.plotGridDock.setObjectName(_from_utf_8("PlotGridDock"))
self.plotGrid = MyPlotGrid(main_application)
self.plotGridDock.setFeatures(QtWidgets.QDockWidget.DockWidgetMovable |
QtWidgets.QDockWidget.DockWidgetFloatable)
self.plotGridDock.setAllowedAreas(QtCore.Qt.AllDockWidgetAreas)
self.plotGridDock.setWidget(self.plotGrid)
self.dock_virtual_unit_view = QtWidgets.QDockWidget("Virtual Unit Mappings")
self.dock_virtual_unit_view.setObjectName(_from_utf_8("virtualUnitsDock"))
self.dock_virtual_unit_view.setFeatures(QtWidgets.QDockWidget.DockWidgetMovable |
QtWidgets.QDockWidget.DockWidgetFloatable)
self.dock_virtual_unit_view.setAllowedAreas(QtCore.Qt.AllDockWidgetAreas)
self.virtual_units_view = VirtualUnitsView()
self.virtual_units_view.setObjectName(_from_utf_8("virtualUnitsView"))
self.dock_virtual_unit_view.setWidget(self.virtual_units_view)
self.dock_mean_waveforms_view = QtWidgets.QDockWidget("Mean Waveforms")
self.dock_mean_waveforms_view.setObjectName(_from_utf_8("meanWaveformView"))
self.dock_mean_waveforms_view.setFeatures(QtWidgets.QDockWidget.DockWidgetMovable |
QtWidgets.QDockWidget.DockWidgetFloatable)
self.dock_mean_waveforms_view.setAllowedAreas(QtCore.Qt.AllDockWidgetAreas)
self.mean_waveforms_view = PgWidget2d()
self.mean_waveforms_view.setObjectName(_from_utf_8("meanWaveformsView"))
self.dock_mean_waveforms_view.setWidget(self.mean_waveforms_view)
self.dock_isi_histograms_view = QtWidgets.QDockWidget("ISI Histograms")
self.dock_isi_histograms_view.setObjectName(_from_utf_8("ISIHView"))
self.dock_isi_histograms_view.setFeatures(QtWidgets.QDockWidget.DockWidgetMovable |
QtWidgets.QDockWidget.DockWidgetFloatable)
self.dock_isi_histograms_view.setAllowedAreas(QtCore.Qt.AllDockWidgetAreas)
self.isi_histograms_view = PgWidgetISI()
self.isi_histograms_view.setObjectName(_from_utf_8("IsihView"))
self.dock_isi_histograms_view.setWidget(self.isi_histograms_view)
self.dock_pca_3d_view = QtWidgets.QDockWidget("Principal Component Analysis")
self.dock_pca_3d_view.setObjectName(_from_utf_8("PCAView"))
self.dock_pca_3d_view.setFeatures(QtWidgets.QDockWidget.DockWidgetMovable |
QtWidgets.QDockWidget.DockWidgetFloatable)
self.dock_pca_3d_view.setAllowedAreas(QtCore.Qt.AllDockWidgetAreas)
self.pca_3d_view = PgWidgetPCA()
self.pca_3d_view.setObjectName(_from_utf_8("PcaView"))
self.dock_pca_3d_view.setWidget(self.pca_3d_view)
self.dock_rate_profiles_view = QtWidgets.QDockWidget("Rate Profiles")
self.dock_rate_profiles_view.setObjectName(_from_utf_8("RateProfiles"))
self.dock_rate_profiles_view.setFeatures(QtWidgets.QDockWidget.DockWidgetMovable |
QtWidgets.QDockWidget.DockWidgetFloatable)
self.dock_rate_profiles_view.setAllowedAreas(QtCore.Qt.AllDockWidgetAreas)
self.rate_profiles_view = PgWidgetRateProfile()
self.rate_profiles_view.setObjectName(_from_utf_8("RateProfileView"))
self.dock_rate_profiles_view.setWidget(self.rate_profiles_view)
self.tools = PlotGridTools()
self.plotGridOptionsLayout = QtWidgets.QGridLayout()
self.plotGridOptionsLayout.setObjectName(_from_utf_8("PlotGridOptionsLayout"))
self.plotGridOptionsLayout.addWidget(self.tools)
self.plotGridOptions = CollapsibleWidget(parent=self.plotGrid, title="Options", animation_duration=400)
self.plotGridOptions.set_content_layout(self.plotGridOptionsLayout)
self.plotGrid.main_grid_layout.addWidget(self.plotGridOptions, 1, 0)
self.plotGrid.main_grid_layout.setRowStretch(0, 10)
self.menu_bar = QtWidgets.QMenuBar(main_application)
self.menu_bar.setGeometry(QtCore.QRect(0, 0, 1159, 25))
self.menu_bar.setObjectName(_from_utf_8("menubar"))
self.menu_File = QtWidgets.QMenu(self.menu_bar)
self.menu_File.setObjectName(_from_utf_8("menu_File"))
self.menu_Edit = QtWidgets.QMenu(self.menu_bar)
self.menu_Edit.setObjectName(_from_utf_8("menu_Edit"))
self.menu_Help = QtWidgets.QMenu(self.menu_bar)
self.menu_Help.setObjectName(_from_utf_8("menu_Help"))
self.menu_View = QtWidgets.QMenu(self.menu_bar)
self.menu_View.setObjectName(_from_utf_8("menu_View"))
main_application.setMenuBar(self.menu_bar)
self.statusbar = QtWidgets.QStatusBar(main_application)
self.statusbar.setObjectName(_from_utf_8("statusbar"))
main_application.setStatusBar(self.statusbar)
self.toolbar = QtWidgets.QToolBar(main_application)
self.toolbar.setObjectName(_from_utf_8("toolBar"))
main_application.addToolBar(QtCore.Qt.TopToolBarArea, self.toolbar)
self.action_new_project = QtWidgets.QAction(main_application)
self.action_new_project.setObjectName(_from_utf_8("action_new_project"))
self.action_load_project = QtWidgets.QAction(main_application)
self.action_load_project.setObjectName(_from_utf_8("action_load_project"))
self.action_save_project = QtWidgets.QAction(main_application)
self.action_save_project.setObjectName(_from_utf_8("action_save_project"))
self.action_quit = QtWidgets.QAction(main_application)
self.action_quit.setObjectName(_from_utf_8("action_quit"))
self.action_swap = QtWidgets.QAction(main_application)
self.action_swap.setObjectName(_from_utf_8("action_swap"))
self.action_collapse = QtWidgets.QAction(main_application)
self.action_collapse.setObjectName(_from_utf_8("action_collapse"))
self.action_recalculate_mapping = QtWidgets.QAction(main_application)
self.action_recalculate_mapping.setObjectName(_from_utf_8("action_recalculate_mapping"))
self.action_save_as = QtWidgets.QAction(main_application)
self.action_save_as.setObjectName(_from_utf_8("action_save_as"))
self.action_load_connector_map = QtWidgets.QAction(main_application)
self.action_load_connector_map.setObjectName(_from_utf_8("action_load_connector_map"))
self.action_zoom_in = QtWidgets.QAction(main_application)
self.action_zoom_in.setObjectName(_from_utf_8("action_zoom_in"))
self.action_zoom_out = QtWidgets.QAction(main_application)
self.action_zoom_out.setObjectName(_from_utf_8("action_zoom_out"))
self.action_revert_mapping = QtWidgets.QAction(main_application)
self.action_revert_mapping.setObjectName(_from_utf_8("action_revert_mapping"))
self.action_collapse_overview = QtWidgets.QAction(main_application)
self.action_collapse_overview.setObjectName(_from_utf_8("action_collapse_overview"))
self.action_expand_overview = QtWidgets.QAction(main_application)
self.action_expand_overview.setObjectName(_from_utf_8("action_expand_overview"))
self.action_preferences = QtWidgets.QAction(main_application)
self.action_preferences.setObjectName(_from_utf_8("action_preferences"))
self.action_about = QtWidgets.QAction(main_application)
self.action_about.setObjectName(_from_utf_8("action_about"))
self.action_tutorials = QtWidgets.QAction(main_application)
self.action_tutorials.setObjectName(_from_utf_8("action_tutorials"))
self.action_export_to_csv = QtWidgets.QAction(main_application)
self.action_export_to_csv.setObjectName(_from_utf_8("action_export_to_csv"))
self.action_export_to_odml = QtWidgets.QAction(main_application)
self.action_export_to_odml.setObjectName(_from_utf_8("action_export_to_odml"))
self.action_import_from_csv = QtWidgets.QAction(main_application)
self.action_import_from_csv.setObjectName(_from_utf_8("action_import_from_csv"))
self.action_import_from_od_ml = QtWidgets.QAction(main_application)
self.action_import_from_od_ml.setObjectName(_from_utf_8("action_import_from_od_ml"))
self.action_revert_state = QtWidgets.QAction(main_application)
self.action_revert_state.setObjectName(_from_utf_8("action_revert_state"))
self.action_restore_state = QtWidgets.QAction(main_application)
self.action_restore_state.setObjectName(_from_utf_8("action_restore_state"))
self.action_save_state = QtWidgets.QAction(main_application)
self.action_save_state.setObjectName(_from_utf_8("action_save_state"))
self.menu_File.addAction(self.action_new_project)
self.menu_File.addAction(self.action_load_project)
self.menu_File.addAction(self.action_save_project)
self.menu_File.addAction(self.action_save_as)
self.menu_File.addSeparator()
self.menu_File.addAction(self.action_load_connector_map)
self.menu_File.addAction(self.action_export_to_csv)
self.menu_File.addAction(self.action_export_to_odml)
self.menu_File.addSeparator()
self.menu_File.addAction(self.action_quit)
self.menu_Edit.addAction(self.action_recalculate_mapping)
self.menu_Edit.addAction(self.action_revert_mapping)
self.menu_Edit.addAction(self.action_swap)
self.menu_Edit.addSeparator()
self.menu_Edit.addAction(self.action_zoom_in)
self.menu_Edit.addAction(self.action_zoom_out)
self.menu_Edit.addAction(self.action_expand_overview)
self.menu_Edit.addAction(self.action_collapse_overview)
self.menu_Edit.addSeparator()
self.menu_Edit.addAction(self.action_preferences)
self.menu_Help.addAction(self.action_tutorials)
self.menu_Help.addAction(self.action_about)
self.menu_View.addAction(self.action_save_state)
self.menu_View.addAction(self.action_restore_state)
self.menu_View.addAction(self.action_revert_state)
self.menu_bar.addAction(self.menu_File.menuAction())
self.menu_bar.addAction(self.menu_Edit.menuAction())
self.menu_bar.addAction(self.menu_View.menuAction())
self.menu_bar.addAction(self.menu_Help.menuAction())
self.toolbar.addAction(self.action_new_project)
self.toolbar.addAction(self.action_load_project)
self.toolbar.addAction(self.action_save_project)
self.toolbar.addAction(self.action_save_as)
self.toolbar.addAction(self.action_preferences)
self.toolbar.addSeparator()
self.toolbar.addAction(self.action_revert_mapping)
self.toolbar.addAction(self.action_swap)
self.toolbar.addSeparator()
self.toolbar.addAction(self.action_zoom_in)
self.toolbar.addAction(self.action_zoom_out)
self.toolbar.addAction(self.action_expand_overview)
self.toolbar.addAction(self.action_collapse_overview)
self.load_icons()
self.retranslate_ui(main_application)
main_application.addDockWidget(QtCore.Qt.LeftDockWidgetArea, self.plotGridDock, QtCore.Qt.Vertical)
main_application.addDockWidget(QtCore.Qt.LeftDockWidgetArea, self.dock_virtual_unit_view, QtCore.Qt.Vertical)
main_application.addDockWidget(QtCore.Qt.LeftDockWidgetArea, self.dock_rate_profiles_view, QtCore.Qt.Vertical)
main_application.addDockWidget(QtCore.Qt.LeftDockWidgetArea, self.dock_pca_3d_view, QtCore.Qt.Vertical)
main_application.addDockWidget(QtCore.Qt.RightDockWidgetArea, self.dock_mean_waveforms_view, QtCore.Qt.Vertical)
main_application.addDockWidget(QtCore.Qt.RightDockWidgetArea, self.dock_isi_histograms_view, QtCore.Qt.Vertical)
main_application.splitDockWidget(self.plotGridDock, self.dock_virtual_unit_view, QtCore.Qt.Horizontal)
main_application.splitDockWidget(self.dock_virtual_unit_view, self.dock_rate_profiles_view, QtCore.Qt.Horizontal)
main_application.splitDockWidget(self.dock_rate_profiles_view, self.dock_pca_3d_view, QtCore.Qt.Vertical)
# self.action_quit.triggered.connect(main_application.close)
QtCore.QMetaObject.connectSlotsByName(main_application)
@staticmethod
def set_program_title(main_application, text):
main_application.setWindowTitle(_translate("main_application", text, None))
def retranslate_ui(self, main_application):
main_application.setWindowTitle(_translate("main_application", "SWAN - Sequential waveform analyser", None))
self.menu_File.setTitle(_translate("main_application", "&File", None))
self.menu_Edit.setTitle(_translate("main_application", "&Edit", None))
self.menu_Help.setTitle(_translate("main_application", "&Help", None))
self.menu_View.setTitle(_translate("main_application", "&View", None))
self.toolbar.setWindowTitle(_translate("main_application", "toolBar", None))
self.action_new_project.setText(_translate("main_application", "&New Project...", None))
self.action_new_project.setIconText(_translate("main_application", "New Project...", None))
self.action_new_project.setToolTip(_translate("main_application", "Create a new project", None))
self.action_new_project.setShortcut(_translate("main_application", "Ctrl+N", None))
self.action_load_project.setText(_translate("main_application", "&Load Project...", None))
self.action_load_project.setIconText(_translate("main_application", "Load Project...", None))
self.action_load_project.setToolTip(_translate("main_application", "Load project from file", None))
self.action_load_project.setShortcut(_translate("main_application", "Ctrl+O", None))
self.action_save_project.setText(_translate("main_application", "&Save Project", None))
self.action_save_project.setIconText(_translate("main_application", "Save Project", None))
self.action_save_project.setToolTip(_translate("main_application", "Save project", None))
self.action_save_project.setShortcut(_translate("main_application", "Ctrl+S", None))
self.action_quit.setText(_translate("main_application", "&Quit", None))
self.action_quit.setToolTip(_translate("main_application", "Close this application", None))
self.action_quit.setShortcut(_translate("main_application", "Ctrl+Q", None))
self.action_swap.setText(_translate("main_application", "Swap", None))
self.action_swap.setToolTip(_translate("main_application", "Swap two selected units", None))
self.action_collapse.setText(_translate("main_application", "Collapse", None))
self.action_collapse.setToolTip(_translate("main_application", "Collapse selected unit row(s)", None))
self.action_recalculate_mapping.setText(_translate("main_application", "Recalculate mapping...", None))
self.action_recalculate_mapping.setToolTip(_translate("main_application", "Try to find a mapping automatically",
None))
self.action_save_as.setText(_translate("main_application", "Save project as...", None))
self.action_save_as.setToolTip(_translate("main_application", "Save project to a new file", None))
self.action_load_connector_map.setText(_translate("main_application", "Load connector map...", None))
self.action_zoom_in.setText(_translate("main_application", "Zoom in", None))
self.action_zoom_in.setToolTip(_translate("main_application", "Zoom overview in", None))
self.action_zoom_in.setShortcut(_translate("main_application", "Ctrl++", None))
self.action_zoom_out.setText(_translate("main_application", "Zoom out", None))
self.action_zoom_out.setToolTip(_translate("main_application", "Zoom overview out", None))
self.action_zoom_out.setShortcut(_translate("main_application", "Ctrl+-", None))
self.action_revert_mapping.setText(_translate("main_application", "Revert mapping...", None))
self.action_revert_mapping.setToolTip(_translate("main_application", "Revert current mapping to last saved",
None))
self.action_collapse_overview.setText(_translate("main_application", "Collapse overview", None))
self.action_collapse_overview.setToolTip(_translate("main_application", "Decrease overview\'s y range", None))
self.action_expand_overview.setText(_translate("main_application", "Expand overview", None))
self.action_expand_overview.setToolTip(_translate("main_application", "Increase overview\'s y range", None))
self.action_preferences.setText(_translate("main_application", "Preferences", None))
self.action_preferences.setToolTip(_translate("main_application", "View and change preferences", None))
self.action_about.setText(_translate("main_application", "About", None))
self.action_about.setToolTip(_translate("main_application", "Information about SWAN", None))
self.action_tutorials.setText(_translate("main_application", "Tutorials", None))
self.action_export_to_csv.setText(_translate("main_application", "Export to CSV...", None))
self.action_export_to_odml.setText(_translate("main_application", "Export to odML...", None))
self.action_import_from_csv.setText(_translate("main_application", "Import from csv", None))
self.action_restore_state.setText(_translate("main_application", "Restore GUI state", None))
self.action_revert_state.setText(_translate("main_application", "Revert GUI state", None))
self.action_save_state.setText(_translate("main_application", "Save GUI state", None))
def load_icons(self):
"""
Loads the icons.
"""
try:
prefix = ":" + sep + "icons" + sep
# File
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap(prefix + "new.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.action_new_project.setIcon(icon)
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap(prefix + "open.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.action_load_project.setIcon(icon)
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap(prefix + "save.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.action_save_project.setIcon(icon)
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap(prefix + "save_as.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.action_save_as.setIcon(icon)
# Edit
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap(prefix + "revert.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.action_revert_mapping.setIcon(icon)
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap(prefix + "swap.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.action_swap.setIcon(icon)
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap(prefix + "zoom_in.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.action_zoom_in.setIcon(icon)
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap(prefix + "zoom_out.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.action_zoom_out.setIcon(icon)
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap(prefix + "expand.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.action_expand_overview.setIcon(icon)
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap(prefix + "collapse.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.action_collapse_overview.setIcon(icon)
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap(prefix + "preferences.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.action_preferences.setIcon(icon)
except Exception as e:
print("Icon Exception: {exception}".format(exception=e))
pass
|
the-stack_0_803 | # -*- coding: utf-8 -*-
"""
Container for building a scene with fluorescent objects (i.e., scene plays a role of background or frame).
@author: ssklykov
"""
# %% Imports
import numpy as np
import matplotlib.pyplot as plt
# from skimage.util import img_as_ubyte
import os
from skimage.io import imsave
from scipy.ndimage import measurements
# %% class definition
class u_scene():
"""Class composing all capabilities of building image (numpy 2D array) with some objects drawn on the scene.
The image commonly is designated as width x height (e.g., 800x600)"""
# default values
width = 100
height = 100
possible_img_types = ['uint8', 'uint16', 'float']
image_type = 'uint8'
scene_image = np.zeros((height, width), dtype=image_type)
maxPixelValue = 255
counter = 1 # counting how many images saved along generation
centers_of_mass = []
# %% Constructor
def __init__(self, width: int, height: int, image_type: str = 'uint8'):
"""
Initialize the blank (dark) scene image with the specified type (800x600 8bit image as an example)
Parameters
----------
width : int
Width of the initialized image (scene)
height : int
Height of the initialized image (scene)
image_type : str, optional
Image type used for pixel value calculations. Possible values are: 'uint8', 'uint16', 'float'.
The default is 'uint8'.
Returns
-------
None.
"""
width = abs(width)
height = abs(height)
if width > 0:
self.width = width
if height > 0:
self.width = width
if image_type in self.possible_img_types:
self.image_type = image_type
else:
self.image_type = 'uint8'
print("Image type hasn't been recognized, initialized default 8bit gray image")
if (width != 100) or (height != 100) or (image_type != 'uint8'):
# non default values => re-initialization of the class attributes
self.scene_image = np.zeros((height, width), dtype=self.image_type)
self.width = width
self.height = height
if self.image_type == 'uint16':
self.maxPixelValue = 65535
elif self.image_type == 'float':
self.maxPixelValue = 1.0 # According to the specification of scikit-image
# %% Supportive functions
def cast_pixels_sum(self, pixels_sum):
"""
Casting of input result of pixel summing to conform with data type of the used image.
Parameters
----------
pixels_sum : uint8, uint16 or float
Sum of pixels (mask + scene (background)).
Returns
-------
value_returned : uint8, uint16 or float
Returns casted / corrected pixel value.
"""
if (pixels_sum) <= self.maxPixelValue:
# additional conversion for insuring of conformity with data type
if self.image_type == 'uint8':
value_returned = np.uint8(pixels_sum)
elif self.image_type == 'uint16':
value_returned = np.uint16(pixels_sum)
else:
value_returned = float(pixels_sum)
else:
value_returned = self.maxPixelValue
return value_returned
def get_j_finish(self, j_start: int, nCols: int) -> int:
"""
Calculation of maximum j index for adding mask, preventing it to be for out of bounds.
Parameters
----------
j_start : int
Starting index for filling mask in.
nCols : int
Number of columns in mask that should be added to the scene.
Returns
-------
int
Ending ("final") index j for filling mask into the scene.
"""
if ((j_start + nCols) < self.width): # checking that starting/ending of summing are not out of bounds
j_finish = j_start + nCols
else:
j_finish = self.width
return j_finish
def get_i_finish(self, i_start: int, nRows: int) -> int:
"""
Calculation of maximum i index for adding mask, preventing it to be for out of bounds
Parameters
----------
i_start : int
Starting index for filling mask in.
nRows : int
Number of columns in mask that should be added to the scene.
Returns
-------
int
Ending ("final") index j for filling mask into the scene.
"""
if ((i_start + nRows) < self.height): # checking that starting/ending of summing are not out of bounds
i_finish = i_start + nRows
else:
i_finish = self.height
return i_finish
# %% Drawing of an object with some intensity mask (profile)
def add_mask(self, i_start: int, j_start: int, mask, debug: bool = False):
"""
Adding the "mask" - representation of the object (basically, less than the scene (background) image).
Contradictory, i_start represents "y" coordinate, j_start - "x", due to array representation of column and row.
This function accepts coordinates of image origin - starting pixel for drawing (like zero pixel).
The coordinates (j_start, i_start) as (x, y) could be negative or exceeding the scene sizes - in such case
whenever it possible, only the part of an object image will be added.
Parameters
----------
i_start : int
Start pixel (y coordinate) for drawing of an image ("mask").
j_start : int
Start pixel (x coordinate) for drawing of an image ("mask").
mask : np.array
2D np.array ("mask") with pixel values which represent the object.
debug: bool, optional
Flag for saving some internal statistical values for checking of possible bugs during calculations.
The default is False.
Returns
-------
None.
The scene collected as internal attribute of this class.
"""
(nRows, nCols) = np.shape(mask) # getting of sizes of mask
# Below is checking that the mask is not empty, it should be 1x1 matrix at least
if (nRows == 0) or (nCols == 0):
raise(IndexError('Provided mask is empty along some of its axis'))
# Below is checking that the i_start and j_start makes sense to apply to the scene image:
# i_start and j_start could be negative, but at least 1 point should be added to a scene
# also, j associates with WIDTH, so with # of columns! i - with rows!
if ((i_start + nRows) < 1) or ((j_start + nCols) < 1):
raise(IndexError('Provided i_start or j_start is not conformed with the mask sizes'))
# Below is checking filling parameters (i_start, j_start) is laying on an scene image
if (i_start >= self.height) or (j_start >= self.width):
raise(IndexError("Starting indices for adding mask is out of scene image bounds"))
# i_start, j_start > 0 both, filling some mask into a scene image - basic check for conformity
if (i_start >= 0) and (j_start >= 0) and (nRows > 0) and (nCols > 0):
# Attempt to speed up the adding mask to a scene: transferring pixel values as chunk with rows
if ((i_start + nRows) < self.height): # checking that fast sum over y axis could be performed
i_finish = i_start + nRows
j_finish = self.get_j_finish(j_start, nCols)
# "Fast summing" - adding the whole rows (all columns) to the image (scene)
for j in range(j_start, j_finish): # summing along j axis
# checking the conformity with image type
if np.max(self.scene_image[i_start:i_finish, j] + mask[:, j-j_start]) <= self.maxPixelValue:
self.scene_image[i_start:i_finish, j] += mask[:, j-j_start] # fast adding mask to a scene
else:
# checking each pixel from a scene and added from a mask pixel to be in range with image type
for i in range(i_start, i_finish):
pixels_sum = self.scene_image[i, j] + mask[i-i_start, j-j_start]
self.scene_image[i, j] = self.cast_pixels_sum(pixels_sum)
# Attempt to speed up the adding mask to a scene: transferring pixel values as a chunk with columns
elif ((j_start + nCols) < self.width): # checking that fast sum over i axis could be performed
j_finish = j_start + nCols
i_finish = self.get_i_finish(i_start, nRows)
# "Fast summing" - along column - adding all rows at once
for i in range(i_start, i_finish): # summing along j axis
# checking the conformity with image type
if np.max(self.scene_image[i, j_start:j_finish] + mask[i-i_start, :]) <= self.maxPixelValue:
self.scene_image[i, j_start:j_finish] += mask[i-i_start, :] # fast adding mask to a scene
else:
# checking each pixel from a scene and added from a mask pixel to be in range with image type
for j in range(j_start, j_finish):
pixels_sum = self.scene_image[i, j] + mask[i-i_start, j-j_start]
self.scene_image[i, j] = self.cast_pixels_sum(pixels_sum)
# filling right upper corner with exceptional case - when mask is out of image bounds
else:
i_finish = self.height
j_finish = self.width
for i in range(i_start, i_finish):
for j in range(j_start, j_finish):
pixels_sum = self.scene_image[i, j] + mask[i-i_start, j-j_start]
self.scene_image[i, j] = self.cast_pixels_sum(pixels_sum)
# Making correction of i_start, j_start if some of them is negative for working with partial mask overlap
if (i_start < 0) or (j_start < 0):
i_mask_start = 0
j_mask_start = 0
if (i_start < 0):
nRows += i_start # it will draw the mask if it partially overlaps with image boundaries
i_mask_start = abs(i_start)
i_start = 0
if (j_start < 0):
nCols += j_start
j_mask_start = abs(j_start)
j_start = 0
i_finish = self.get_i_finish(i_start, nRows)
j_finish = self.get_j_finish(j_start, nCols)
for i in range(i_start, i_finish):
for j in range(j_start, j_finish):
pixels_sum = self.scene_image[i, j] + mask[i+i_mask_start, j+j_mask_start]
self.scene_image[i, j] = self.cast_pixels_sum(pixels_sum)
# HINT: below is controlling of simulation - calculation of center of mass of added mask (generated scene)
if debug:
(i_mass_center, j_mass_center) = measurements.center_of_mass(self.scene_image)
self.centers_of_mass.append([i_mass_center, j_mass_center])
# print([i_mass_center, j_mass_center])
# %% Plotting the summurized image (scene) with all objects
def plot_image(self):
"""
Plotting the self.scene composed with added masks (objects) / noise.
Returns
-------
Plotted the scene (picture) on the separate window using matplotlib library
"""
plt.figure()
# Below - representation according to the documentation:
# plt.cm.gray - for representing gray values, aspect - for filling image values in a window
# origin - for adjusting origin of pixels (0, 0), extent - regulation of axis values
# extent = (-0.5, numcols-0.5, -0.5, numrows-0.5)) - for origin = 'lower' - documents
plt.imshow(self.scene_image, cmap=plt.cm.gray, aspect='auto', origin='lower',
extent=(0, self.width, 0, self.height))
plt.tight_layout()
# %% Clearing the scene
def clear_scene(self):
"""
Clearing the scene (background) image by re-initialize it to zero values (completely dark).
Returns
-------
None.
"""
self.scene_image = np.zeros((self.height, self.width), dtype=self.image_type)
# %% Saving generated scene image
def save_scene(self, base_extension: str = "jpg"):
"""
Saving the scene (image) with all collected masks (objects) on it.
Parameters
----------
base_extension : str, optional
The base extension for saving images (like jpg, png, tiff, etc). The default is "jpg".
Returns
-------
None.
"""
scriptFolder = os.getcwd()
default_folder = "tests"
path = os.path.join(scriptFolder, default_folder)
# print(path)
if not os.path.isdir(path):
os.mkdir(path)
if os.path.isdir(path):
# print(path)
base_name = str(self.counter) + "." + base_extension
self.counter += 1
path_for_bead = os.path.join(path, base_name)
if base_extension == "jpg" or base_extension == "jpeg":
imsave(path_for_bead, self.scene_image, quality=100)
else:
imsave(path_for_bead, self.scene_image)
# %% Testing class methods / construction
if __name__ == '__main__':
uScene = u_scene(150, 150, 'uint8')
mask = np.ones((20, 20), dtype='uint8')
mask = mask[:, :]*256
uScene.add_mask(40, 40, mask)
uScene.add_mask(80, 80, mask)
uScene.plot_image()
uScene.save_scene()
|
the-stack_0_804 | # Copyright 2019 BlueCat Networks (USA) Inc. and its affiliates
# -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# By: BlueCat Networks
# Date: 2021-11-10
# Gateway Version: 18.10.2
# Description: Confirm MAC Address Page
# Various Flask framework items.
from flask import url_for, redirect, render_template, flash, g
from bluecat import route, util
from bluecat.entity import Entity
from bluecat.api_exception import PortalException
import config.default_config as config
from main_app import app
from .confirm_mac_address_form import get_resource_text
from .confirm_mac_address_form import GenericFormTemplate
def get_configuration():
configuration = None
if g.user:
configuration = g.user.get_api().get_configuration(config.default_configuration)
return configuration
def get_mac_address(configuration, address):
mac_addr = None
try:
mac_addr = configuration.get_mac_address(address)
print(mac_addr)
except PortalException:
pass
return mac_addr
# The workflow name must be the first part of any endpoints defined in this file.
# If you break this rule, you will trip up on other people's endpoint names and
# chaos will ensue.
@route(app, '/confirm_mac_address/confirm_mac_address_endpoint')
@util.workflow_permission_required('confirm_mac_address_page')
@util.exception_catcher
def confirm_mac_address_confirm_mac_address_page():
form = GenericFormTemplate()
configuration = get_configuration()
return render_template(
'confirm_mac_address_page.html',
form=form,
text=get_resource_text(),
options=g.user.get_options(),
)
@route(app, '/confirm_mac_address/form', methods=['POST'])
@util.workflow_permission_required('confirm_mac_address_page')
@util.exception_catcher
def confirm_mac_address_confirm_mac_address_page_form():
form = GenericFormTemplate()
configuration = get_configuration()
text = get_resource_text()
if form.validate_on_submit():
mac_address = get_mac_address(configuration, form.mac_address.data)
if mac_address is not None:
mac_pool=mac_address.get_property('macPool')
if mac_pool is None:
mac_pool=text['nomacpool']
flash(mac_address.get_address() + text['registered'] , 'succeed')
flash('MAC Pool : ' + mac_pool, 'succeed')
else:
flash(form.mac_address.data + text['noregistered'], 'failed')
# Put form processing code here
g.user.logger.info('SUCCESS')
return redirect(url_for('confirm_mac_addressconfirm_mac_address_confirm_mac_address_page'))
else:
g.user.logger.info('Form data was not valid.')
return render_template(
'confirm_mac_address_page.html',
form=form,
text=text,
options=g.user.get_options(),
)
|
the-stack_0_806 | import sys
import spider
from spider_ui import Ui_Dialog, QtWidgets, QtGui
class SpiderDialog(QtWidgets.QDialog):
def __init__(self, parent=None):
super().__init__(parent)
self.ui = Ui_Dialog()
self.ui.setupUi(self)
self.spider = spider.RenrenSpider()
self.init_signals()
if self.spider.is_login():
self.ui.loginFrame.hide()
self.ui.mainFrame.show()
def init_signals(self):
self.ui.loginBtn.clicked.connect(self.on_login)
self.ui.startBtn.clicked.connect(self.on_start)
self.ui.browserBtn.clicked.connect(self.on_browse_dir)
def on_login(self):
email = self.ui.emailInput.text()
password = self.ui.passwordInput.text()
remember = self.ui.rememberCkb.isChecked()
icode = self.ui.iCodeInput.text()
try:
self.spider.login(email, password, icode, remember)
except spider.iCodeRequired as e:
self.show_icode()
error = QtWidgets.QErrorMessage()
error.showMessage(str(e))
else:
self.ui.loginFrame.hide()
self.ui.mainFrame.show()
def show_icode(self):
with open('icode.jpg', 'wb') as f:
f.write(self.spider.get_icode_image())
icode_image = QtGui.QImage('icode.jpg')
icode_pixmap = QtGui.QPixmap.fromImage(icode_image)
self.ui.iCodeImg.setPixmap(icode_pixmap)
self.ui.iCodeFrame.show()
def on_start(self):
self.spider.set_params(
user_id=self.ui.userInput.text(),
output_dir=self.ui.outputPathInput.text()
)
self.ui.progressFrame.show()
self.spider.main(self)
self.ui.label.setText("备份完成!")
def on_browse_dir(self):
file_dialog = QtWidgets.QFileDialog()
file_dialog.setFileMode(QtWidgets.QFileDialog.Directory)
file_dialog.setOption(QtWidgets.QFileDialog.ShowDirsOnly)
if file_dialog.exec_():
self.ui.outputPathInput.setText(file_dialog.selectedFiles()[0])
def progressbar(self, total: int, desc: str):
ui = self.ui
class ProgressBar(object):
def __init__(self):
self.current = 0.0
ui.label.setText(desc)
ui.progressBar.reset()
def update(self, number: int = 1):
self.current += number
ui.progressBar.setValue(int(self.current / total * 100))
return ProgressBar()
def main():
app = QtWidgets.QApplication(sys.argv)
dialog = SpiderDialog()
dialog.show()
sys.exit(app.exec_())
if __name__ == "__main__":
main()
|
the-stack_0_808 | # coding: utf-8
"""
DocuSign Click API
DocuSign Click lets you capture consent to standard agreement terms with a single click: terms and conditions, terms of service, terms of use, privacy policies, and more. The Click API lets you include this customizable clickwrap solution in your DocuSign integrations. # noqa: E501
OpenAPI spec version: v1
Contact: [email protected]
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class ServiceVersion(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'version': 'str',
'version_url': 'str'
}
attribute_map = {
'version': 'version',
'version_url': 'versionUrl'
}
def __init__(self, version=None, version_url=None): # noqa: E501
"""ServiceVersion - a model defined in Swagger""" # noqa: E501
self._version = None
self._version_url = None
self.discriminator = None
if version is not None:
self.version = version
if version_url is not None:
self.version_url = version_url
@property
def version(self):
"""Gets the version of this ServiceVersion. # noqa: E501
# noqa: E501
:return: The version of this ServiceVersion. # noqa: E501
:rtype: str
"""
return self._version
@version.setter
def version(self, version):
"""Sets the version of this ServiceVersion.
# noqa: E501
:param version: The version of this ServiceVersion. # noqa: E501
:type: str
"""
self._version = version
@property
def version_url(self):
"""Gets the version_url of this ServiceVersion. # noqa: E501
# noqa: E501
:return: The version_url of this ServiceVersion. # noqa: E501
:rtype: str
"""
return self._version_url
@version_url.setter
def version_url(self, version_url):
"""Sets the version_url of this ServiceVersion.
# noqa: E501
:param version_url: The version_url of this ServiceVersion. # noqa: E501
:type: str
"""
self._version_url = version_url
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(ServiceVersion, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ServiceVersion):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
the-stack_0_809 | ###############################################################################
# Copyright Keith Butler(2014) #
# #
# This file MacroDensity.density_tools.py is free software: you can #
# redistribute it and/or modify it under the terms of the GNU General Public #
# License as published by the Free Software Foundation, either version 3 of #
# the License, or (at your option) any later version. #
# This program is distributed in the hope that it will be useful, but WITHOUT #
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or #
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for #
# more details. #
# You should have received a copy of the GNU General Public License along with#
# this program. If not, see <http://www.gnu.org/licenses/>. #
# #
###############################################################################
from __future__ import print_function, division
from functools import reduce
import math
from itertools import chain
import numpy
import numpy as np
from scipy import interpolate
#------------------------------------------------------------------------------
def gradient_magnitude(gx, gy, gz):
"""Converts the separate gradient magnitudes to a single magnitude
Args:
gx/y/z : fields in x y and z directions 2D array
Returns:
grad_mag : gradient of fields at each point"""
grad_mag = gx
for i in range(gx.shape[0]):
for j in range(gy.shape[1]):
for k in range(gz.shape[2]):
grad_mag[i,j,k] = np.sqrt(gx[i,j,k]**2 +
gy[i,j,k]**2 +
gz[i,j,k]**2)
return grad_mag
#------------------------------------------------------------------------------
def vector_2_abscissa(vector, magnitude, dx, dy, dz):
"""Converts a vector with a magnitude given in units of grid density
(NGX/Y/Z) to AA for plotting
Args:
vector : the vector along which the line is being plotted [(3x1) array]
magnitude : the number of steps that were taken along that vector
[Integer]
dx/y/z: the resolution of the density grid in AA-1 [Real]
Returns:
abscissa : the values for plotting on the abscissa in AA [1D array]
"""
vec_mag = np.linalg.norm([vector[0] * dx, vector[1] * dy, vector[2] * dz])
abscissa = [i * vec_mag for i in range(magnitude)]
return np.asarray(abscissa)
#------------------------------------------------------------------------------
def number_in_field(gradients, cutoff):
"""Get number of grid elements with a field magnitude greater than cutoff
Args:
gradients: the grid of field gradients (Real(ngx,ngy,ngz))
cutoff: the value above which tocout them (Real)
Returns:
number_of_elements: the number satisfying the condition (Integer)
"""
number_of_elements = 0
for element in np.nditer(gradients):
if element >= cutoff:
number_of_elements += 1
return number_of_elements
#------------------------------------------------------------------------------
def element_vol(vol, nx, ny, nz):
"""Calculates the volume of each of the elements on the grid.
Args:
vol: the cell volume (real)
x : the number of grid points in each direction (real)
Returns:
ele_vol : the volume (real)
"""
number_of_elements = nx * ny * nz
ele_vol = vol / number_of_elements
return ele_vol
#------------------------------------------------------------------------------
def one_2_2d(Array, resolution, vector):
"""Converts the 1d potential array to 2D with angstroms in A[0]
Args:
Array: 1D array
resolution: density of sampling of distance (1/AA)
vector: The vector of the direction of sampling
Returns
New_array: 2D array
"""
length = np.sqrt(vector.dot(vector))
New_array = np.zeros(shape=(len(Array) - 1, 2))
resolution = length / len(Array)
for i in range(len(Array) - 1):
New_array[i,0] = i*resolution
New_array[i,1] = Array[i]
return New_array
#------------------------------------------------------------------------------
def macroscopic_average(potential, periodicity, resolution):
"""Getting the macroscopic average of potential
Args:
potential : array containig the electrostaticpotential/charge density
periodicity : real number; the period over which to average
resolution : the grid resolution in the direction of averaging
Returns:
macro_average : array with the macroscopically averaged values"""
macro_average = np.zeros(shape=(len(potential)))
period_points = int((periodicity/resolution))
# Period points must be even
if period_points % 2 != 0:
period_points = period_points + 1
length = len(potential)
for i in range(length):
start = i - int(period_points / 2)
end = i + int(period_points / 2)
if start < 0:
start = start + length
macro_average[i] = macro_average[i] + sum(potential[0:end]) + sum(potential[start:length])
macro_average[i] = macro_average[i] / period_points
elif end >= length:
end = end - length
macro_average[i] = macro_average[i] + sum(potential[start:length]) + sum(potential[0:end])
macro_average[i] = macro_average[i] / period_points
else:
macro_average[i] = macro_average[i] + sum(potential[start:end]) / period_points
print("Average of the average = ", numpy.average(macro_average))
return macro_average
#------------------------------------------------------------------------------
def cube_potential(origin, travelled, cube, Grid, nx, ny, nz):
"""Populates the sampling cube with the potential required"""
# Recalc the origin as grid point coordinates
n_origin = np.zeros(shape=(3))
n_origin[0] = int(origin[0]*nx)
n_origin[1] = int(origin[1]*ny)
n_origin[2] = int(origin[2]*nz)
potential_cube = np.zeros(shape=(cube[0],cube[1],cube[2]))
for x in range(0,cube[0]):
for y in range(0,cube[1]):
for z in range(0,cube[2]):
# Assign the values of coordinates in the original grid
xv = int(n_origin[0]+travelled[0]+x)
yv = int(n_origin[1]+travelled[1]+y)
zv = int(n_origin[2]+travelled[2]+z)
# Minimum image convention
zv = int(zv - nz*round(zv/nz))
yv = int(yv - ny*round(yv/ny))
xv = int(xv - nx*round(xv/nx))
potential_cube[x,y,z] = Grid[int(xv),int(yv),int(zv)]
return potential_cube.mean(), np.var(potential_cube)
#------------------------------------------------------------------------------
def cuboid_average(Grid, cube, origin, vector, nx, ny, nz, magnitude):
"""Calculates the average in a cube defined by size cube(a,b,c), beginning
at origin and travelling as far as magnitude."""
plotting_average = np.zeros(shape=(magnitude))
i = 0
while i < magnitude:
travelled = np.multiply(i, vector)
plotting_average[i], varience = cube_potential(origin, travelled,
cube, Grid,
nx, ny, nz)
i = i + 1
return plotting_average
#------------------------------------------------------------------------------
def planar_average(Grid, nx, ny, nz, axis='z'):
"""Calculate the average in a given plane for the full length of the
normal; e.g. the full length of z in the xy plane."""
if axis == 'x':
x_plane = np.zeros(shape=(ny, nz))
Average = np.zeros(shape=(nx))
for x_value in range(nx):
x_plane[:,:] = Grid[x_value,:,:]
Average[x_value] = x_plane.mean()
if axis == 'y':
Average = np.zeros(shape=(ny))
y_plane = np.zeros(shape=(nx,nz))
for y_value in range(ny):
y_plane[:,:] = Grid[:,y_value,:]
Average[y_value] = y_plane.mean()
if axis == 'z':
Average = np.zeros(shape=(nz))
z_plane = np.zeros(shape=(nx,ny))
for z_value in range(nz):
z_plane[:,:] = Grid[:,:,z_value]
Average[z_value] = z_plane.mean()
return Average
#------------------------------------------------------------------------------
def get_volume(a,b,c):
"""Calculate the volume of the cell from lattice vectors
Args:
a/b/c: vectors of the lattice edges
"""
volume = np.dot(a,np.cross(b,c))
return volume
#------------------------------------------------------------------------------
def numbers_2_grid(a,NGX,NGY,NGZ):
"""Takes a point (in fractional coordinates) and converts it to a VASP grid
point based on the NGX/Y/Z values."""
a_grid = np.zeros(shape=(3))
a_grid[0] = round(float(a[0])*NGX)
a_grid[1] = round(float(a[1])*NGY)
a_grid[2] = round(float(a[2])*NGZ)
return a_grid
#------------------------------------------------------------------------------
def matrix_2_abc(Lattice):
"""The the VASP lattice and convert to the a,b,c,alpha,beta,gamma format"""
a = np.sqrt(Lattice[0,0]**2+Lattice[0,1]**2+Lattice[0,2]**2)
b = np.sqrt(Lattice[1,0]**2+Lattice[1,1]**2+Lattice[1,2]**2)
c = np.sqrt(Lattice[2,0]**2+Lattice[2,1]**2+Lattice[2,2]**2)
a_vec = Lattice[0,:]
b_vec = Lattice[1,:]
c_vec = Lattice[2,:]
return a,b,c,a_vec,b_vec,c_vec
#------------------------------------------------------------------------------
def _print_boom(quiet=False):
if not quiet:
print("\n")
print("BBBB OOOO OOOO MMMMM ")
print("BBBB OOOO OOOO MMMMM ")
print("BBBB OOOO OOOO MMMMM ")
print("B B OOOO OOOO MMMMM ")
print("B B O O O O MMMMM ")
print("B B O O O O MMMMM ")
print("B B O O O O MMMMM ")
print("B B O O O O MMMMM ")
print("BBBB O O O O M M M ")
print("BBBB O O O O M M M ")
print("BBBB O O O O M M M ")
print("B B O O O O M M M ")
print("B B O O O O M M M ")
print("B B O O O O M M M ")
print("B B O O O O M M M ")
print("B B OOOO OOOO M M M ")
print("BBBB OOOO OOOO M M M ")
print("BBBB OOOO OOOO M M M ")
print("BBBB OOOO OOOO M M M ")
def read_vasp_density(FILE, use_pandas=None, quiet=False):
"""Generic reading of CHGCAR LOCPOT etc files from VASP
Args:
FILE (str): Path to density file
use_pandas (bool): Use Pandas library for faster file reading. If set
to None, Pandas will be used when available.
Returns:
Potential (array), NGX (int), NGY (int), NGZ (int), lattice (array)
where Potential is a 1-D flattened array of density data with original
dimensions NGX x NGY x NGZ and lattice is the 3x3 unit-cell matrix.
"""
# Get Header information by reading a line at a time
if use_pandas:
from pandas import read_table as pandas_read_table
elif use_pandas is None:
try:
from pandas import read_table as pandas_read_table
use_pandas = True
except ImportError:
use_pandas = False
print("Reading header information...")
with open(FILE, "r") as f:
_ = f.readline()
scale_factor = float(f.readline())
lattice = np.zeros(shape=(3,3))
for row in range(3):
lattice[row] = [float(x) for x in f.readline().split()]
lattice = lattice * scale_factor
num_species = len(f.readline().split())
num_type = [int(x) for x in f.readline().split()]
num_atoms = sum(num_type)
coord_type = f.readline().strip()
coordinates = numpy.zeros(shape=(num_atoms, 3))
for atom_i in range(num_atoms):
coordinates[atom_i] = [float(x) for x in f.readline().split()]
# Skip blank line
_ = f.readline()
NGX, NGY, NGZ = [int(x) for x in f.readline().split()]
if use_pandas:
print("Reading 3D data using Pandas...")
skiprows = 10 + num_atoms
readrows = int(math.ceil(NGX * NGY * NGZ / 5))
dat = pandas_read_table(FILE, delim_whitespace=True,
skiprows=skiprows, header=None,
nrows=readrows)
Potential = dat.iloc[:readrows, :5].values.flatten()
remainder = (NGX * NGY * NGZ) % 5
if remainder > 0:
Potential = Potential[:(-5 + remainder)]
else:
print("Reading 3D data...")
Potential = (f.readline().split()
for i in range(int(math.ceil(NGX * NGY * NGZ / 5))))
Potential = numpy.fromiter(chain.from_iterable(Potential), float)
_print_boom(quiet=quiet)
if not quiet:
print("Average of the potential = ", numpy.average(Potential))
return Potential, NGX, NGY, NGZ, lattice
#------------------------------------------------------------------------------
def _read_partial_density(FILE, use_pandas, num_atoms, NGX, NGY, NGZ, spin=0):
'''
use_pandas (bool): Use Pandas library for faster file reading. If set
to None, Pandas will be used when available.
spin: the set of spin data to read, default 0 for ISPIN=1 calculation
'''
print("PANDAS:", use_pandas)
if use_pandas:
from pandas import read_table as pandas_read_table
elif use_pandas is None:
try:
from pandas import read_table as pandas_read_table
use_pandas = True
except ImportError:
use_pandas = False
with open(FILE, "r") as f:
_ = f.readline()
scale_factor = float(f.readline())
lattice = np.zeros(shape=(3,3))
for row in range(3):
lattice[row] = [float(x) for x in f.readline().split()]
lattice = lattice * scale_factor
num_species = len(f.readline().split())
num_type = [int(x) for x in f.readline().split()]
num_atoms = sum(num_type)
coord_type = f.readline().strip()
coordinates = numpy.zeros(shape=(num_atoms, 3))
for atom_i in range(num_atoms):
coordinates[atom_i] = [float(x) for x in f.readline().split()]
# Skip blank line
_ = f.readline()
NGX, NGY, NGZ = [int(x) for x in f.readline().split()]
if use_pandas:
print("Reading 3D data using Pandas...")
skiprows = 10 + num_atoms + spin * \
(math.ceil(NGX * NGY * NGZ / 10) + 2)
readrows = int(math.ceil(NGX * NGY * NGZ / 10))
dat = pandas_read_table(FILE, delim_whitespace=True,
skiprows=skiprows, header=None,
nrows=readrows)
density = dat.iloc[:readrows, :10].values.flatten()
remainder = (NGX * NGY * NGZ) % 10
if remainder > 0:
density = density[:(-10 + remainder)]
else:
print("Reading 3D data...")
density = (f.readline().split()
for i in range(int(math.ceil(NGX * NGY * NGZ / 10))))
density = numpy.fromiter(chain.from_iterable(density), float)
return density
#------------------------------------------------------------------------------
def read_vasp_parchg(FILE, use_pandas=None, quiet=False, spin=False):
"""Generic reading of CHGCAR LOCPOT etc files from VASP
Args:
FILE (str): Path to parchg file
use_pandas (bool): Use Pandas library for faster file reading. If set
to None, Pandas will be used when available.
spin(bool): is the data spin polarised?
Returns:
density (array), NGX (int), NGY (int), NGZ (int), lattice (array)
where density is a 1-D flattened array of density data with original
dimensions NGX x NGY x NGZ and lattice is the 3x3 unit-cell matrix.
"""
# Get Header information by reading a line at a time
print("Reading header information...")
with open(FILE, "r") as f:
_ = f.readline()
scale_factor = float(f.readline())
lattice = np.zeros(shape=(3,3))
for row in range(3):
lattice[row] = [float(x) for x in f.readline().split()]
lattice = lattice * scale_factor
num_species = len(f.readline().split())
num_type = [int(x) for x in f.readline().split()]
num_atoms = sum(num_type)
coord_type = f.readline().strip()
coordinates = numpy.zeros(shape=(num_atoms, 3))
for atom_i in range(num_atoms):
coordinates[atom_i] = [float(x) for x in f.readline().split()]
# Skip blank line
_ = f.readline()
NGX, NGY, NGZ = [int(x) for x in f.readline().split()]
if not spin:
density = _read_partial_density(FILE, use_pandas, num_atoms, NGX, NGY, NGZ)
else:
densities = []
densities.append(_read_partial_density(FILE, use_pandas, num_atoms, NGX, NGY, NGZ
, spin=0))
densities.append(_read_partial_density(FILE, use_pandas, num_atoms, NGX, NGY, NGZ
, spin=1))
alpha = densities[0] + densities[1]
beta = densities[0] - densities[1]
density = [alpha, beta]
_print_boom(quiet=quiet)
return density, NGX, NGY, NGZ, lattice
def read_vasp_density_classic(FILE):
"""Reimplementation of the legacy 3D data importer
This is still quite a bit slower than the new ``read_vasp_density`` but it
makes less assumptions about where newlines will appear in the file. It
also prints the progress reading through the file; this definitely makes it
slower but might _feel_ faster!
"""
with open(FILE, "r") as f:
lines = f.readlines()
return _read_vasp_density_fromlines(lines)
def _read_vasp_density_fromlines(lines):
"""Generic reading of CHGCAR LOCPOT etc files from VASP"""
i, j, k = 0, 0, 0
NGX, NGY, NGZ = 0, 0, 0
lattice = np.zeros(shape=(3,3))
upper_limit, num_species, scale_factor = 0, 0, 0
num_atoms = 1 # First test needs to fail until headers have been read
Potential, Coordinates = np.zeros(1), np.zeros(1)
for line in lines:
inp = line.split()
if inp == []:
continue
else:
i += 1
if i > (num_atoms + 9) and i < (num_atoms + 10 + upper_limit):
for m, val in enumerate(inp):
Potential[k + m] = val
k = k + 5
if math.fmod(k, 100000) == 0:
print("Reading potential at point", k)
elif i == 2:
scale_factor = float(inp[0])
elif i >= 3 and i < 6:
lattice[i-3,:]=inp[:]
elif i == 6:
num_species = len(inp)
species = inp
elif i == 7:
num_type = inp
num_atoms = sum(int(x) for x in num_type)
elif i == 8:
coord_type = inp
Coordinates = numpy.zeros(shape=(num_atoms,3))
elif i >= 9 and i <= num_atoms + 8:
Coordinates[i-9,0] = float(inp[0])
Coordinates[i-9,1] = float(inp[1])
Coordinates[i-9,2] = float(inp[2])
elif i == num_atoms + 9:
NGX = int(inp[0])
NGY = int(inp[1])
NGZ = int(inp[2])
Potential = numpy.zeros(shape=(NGX * NGY * NGZ))
# Read in the potential data
upper_limit = (int(NGX * NGY * NGZ / 5) +
np.mod(NGX * NGY * NGZ, 5))
_print_boom()
print("Average of the potential = ", numpy.average(Potential))
lattice = lattice * scale_factor
return Potential, NGX, NGY, NGZ, lattice
#------------------------------------------------------------------------------
def density_2_grid(Density, nx, ny, nz, Charge=False, Volume=1):
"""Convert the Potential list to a grid for ease of manipulation
Args:
Density: Array of the output from a VAsp calulation charge/potential
nx,y,z : Number of mesh points in x/y/z
Charge : Boolean, is it charge or potential (charge needs to be
normalised by vol)
Volume : The lattice vectors, only required for normalising charge.
Returns:
Potential_grid: the (normalised) quantity on a mesh
total_electrons : the number of electrons in the system
"""
l = 0
Potential_grid = np.zeros(shape=(nx,ny,nz))
total_electrons = 0
is_CHGCAR = True
for k in range(nz):
for j in range(ny):
for i in range(nx):
Potential_grid[i,j,k] = Density[l] / Volume
if Charge == True:
# Convert the charge density to a number of electrons
point_volume = Volume / (nx*ny*nz)
Potential_grid[i,j,k] = Potential_grid[i,j,k]*point_volume
total_electrons = total_electrons + Density[l]
l = l + 1
if Charge == True:
print("Total electrons: ", total_electrons / (nx * ny * nz))
total_electrons = total_electrons / (nx * ny * nz)
return Potential_grid, total_electrons
#------------------------------------------------------------------------------
def density_2_grid_gulp(Density, nx, ny, nz):
"""Convert the Potential list to a grid for ease of manipulation
Args:
Density: Array of the output from a VAsp calulation charge/potential
nx,y,z : Number of mesh points in x/y/z
Returns:
Potential_grid: the (normalised) quantity on a mesh
"""
l = 0
Potential_grid = np.zeros(shape=(nx,ny,nz))
total_electrons = 0
is_CHGCAR = True
for k in range(nx):
for j in range(ny):
for i in range(nz):
Potential_grid[k,j,i] = Density[l]
l = l + 1
return Potential_grid
#------------------------------------------------------------------------------
def read_gulp_potential(gulpfile='gulp.out'):
"""Generic reading of GULP output
Args:
gulpfile (str): Path to gulp output file
Returns:
potential (array), NGX (int), NGY (int), NGZ (int), lattice (array)
where density is a 1-D flattened array of density data with original
dimensions NGX x NGY x NGZ and lattice is the 3x3 unit-cell matrix.
"""
potential = []
try:
file_handle=open(gulpfile)
except IOError:
print("File not found or path is incorrect")
lines = file_handle.readlines()
for n, line in enumerate(lines):
if line.rfind('Cartesian lattice vectors') > -1:
lattice = np.zeros(shape=(3, 3))
for r in range(3):
lattice[r] = lines[n + 2 + r].split()
break
for n, line in enumerate(lines):
if line.rfind('Electrostatic potential on a grid') > -1:
NGX = int(lines[n + 3].split()[3])
NGY = int(lines[n + 3].split()[5])
NGZ = int(lines[n + 3].split()[7])
break
for n, line in enumerate(lines):
if line.rfind('Electrostatic potential on a grid') > -1:
for k in reversed(range(9, NGX*NGY*NGZ + 9)):
potential.append(float(lines[n + k].split()[3]))
return np.asarray(potential), NGX, NGY, NGZ, lattice
#------------------------------------------------------------------------------
def GCD(a,b):
""" The Euclidean Algorithm """
a = abs(a)
b = abs(b)
while a:
a, b = (b % a), a
return b
#------------------------------------------------------------------------------
def GCD_List(list):
""" Finds the GCD of numbers in a list.
Input: List of numbers you want to find the GCD of
E.g. [8, 24, 12]
Returns: GCD of all numbers
"""
return reduce(GCD, list)
#------------------------------------------------------------------------------
def inverse_participation_ratio(density):
""" Calculate the IPR, which is Psi**4 or Rho**2
Input: density, a 1-D flattened grid of the electron density for the state
this is calculated from the PARCHG in VASP
Output: ipr, float
"""
sq = sum(i**2 for i in density)
fr = sum(i**4 for i in density)
ifr = 1 / (len(density) * fr)
isq = 1 / (len(density) * sq)
return fr / sq**2
|
the-stack_0_810 | import sys
sys.path.append("../../")
def press(btn):
if btn == "SUB":
app.showSubWindow("Sub")
app.hide()
if btn in ["POPUP2", "POPUP"]:
app.infoBox("INFO", "INFO")
if btn == "MAIN":
app.show()
app.hideSubWindow("Sub")
def closer(btn=None):
print("aaa")
from appJar import gui
with gui("Main Window", startWindow="Sub") as app:
#with gui("Main Window") as app:
app.label("title", "Main Window")
app.button("POPUP", press)
with app.subWindow("Sub"):
app.label("sub", "SubWindow")
app.button("POPUP2", press)
app.button("MAIN", press)
app.setStopFunction(closer)
# app.hide()
# app.showSubWindow("Sub")
|
the-stack_0_811 | from __future__ import print_function, division
import os
import re
import datetime
import sys
from os.path import join, isdir, isfile, dirname, abspath
import pandas as pd
import yaml
import psycopg2 as db
from nilmtk.measurement import measurement_columns
from nilmtk.measurement import LEVEL_NAMES
from nilmtk.datastore import Key
from nilm_metadata import convert_yaml_to_hdf5
from nilmtk.utils import get_module_directory
import shutil
import tempfile
"""
MANUAL:
dataport is a large dataset hosted in a remote SQL database. This
file provides a function to download the dataset and save it to disk
as NILMTK-DF. Since downloading the entire dataset will likely take >
24 hours, this function provides some options to allow you to download
only a subset of the data.
'''''''''''''''' Previous Version '''''''''''''''''''''
For example, to only load house 26 for April 2014:
from nilmtk.dataset_converters.dataport.download_dataport
import download_dataport
download_dataport(
'username',
'password',
'/path/output_filename.h5',
periods_to_load={26: ('2014-04-01', '2014-05-01')}
)
'''''''''''''''' Previous Version '''''''''''''''''''''
'''''''''''''''' New Version '''''''''''''''''''''
from nilmtk.dataset_converters.dataport.download_dataport
import download_dataport,
_dataport_dataframe_to_hdf,
view_database_tables,
view_buildings,
view_data_window
# see all available tables in the dataport database.
view_database_tables(
'username',
'password',
'database_schema' # university or commercial
)
# show the list of all available buildings
view_buildings(
'username',
'password',
'database_schema', # university or commercial
'table_name' # for example 'electricity_egauge_15min', 'electricity_egauge_hours'
)
# view data collection window of selected buildings
view_data_window(
'username',
'password',
'database_schema', # university or commercial
'table_name', # for example 'electricity_egauge_15min','electricity_egauge_hours'
[18,26,43,44] # data collection window of building 18,26,43 and 44 respectively
)
# download the dataset.
For example, loading electricity_egauge_hours from 2018-11-17 to
2019-12-17 of building 26
download_dataport(
'username',
'password',
'/path/output_filename.h5',
'university',
'electricity_egauge_hours',
periods_to_load={ 26: ('2018-11-17', '2019-12-17')})
'''''''''''''''' New Version '''''''''''''''''''''
REQUIREMENTS:
On Ubuntu:
* sudo apt-get install libpq-dev
* sudo pip install psycopg2
TODO:
* intelligently handle queries that fail due to network
* integrate 'grid' (use - gen) and 'gen'
"""
feed_mapping = {
'use': {},
'air1': {'type': 'air conditioner'},
'air2': {'type': 'air conditioner'},
'air3': {'type': 'air conditioner'},
'airwindowunit1': {'type': 'air conditioner'},
'aquarium1': {'type': 'appliance'},
'bathroom1': {'type': 'sockets', 'room': 'bathroom'},
'bathroom2': {'type': 'sockets', 'room': 'bathroom'},
'bedroom1': {'type': 'sockets', 'room': 'bedroom'},
'bedroom2': {'type': 'sockets', 'room': 'bedroom'},
'bedroom3': {'type': 'sockets', 'room': 'bedroom'},
'bedroom4': {'type': 'sockets', 'room': 'bedroom'},
'bedroom5': {'type': 'sockets', 'room': 'bedroom'},
'car1': {'type': 'electric vehicle'},
'clotheswasher1': {'type': 'washing machine'},
'clotheswasher_dryg1': {'type': 'washer dryer'},
'diningroom1': {'type': 'sockets', 'room': 'dining room'},
'diningroom2': {'type': 'sockets', 'room': 'dining room'},
'dishwasher1': {'type': 'dish washer'},
'disposal1': {'type': 'waste disposal unit'},
'drye1': {'type': 'spin dryer'},
'dryg1': {'type': 'spin dryer'},
'freezer1': {'type': 'freezer'},
'furnace1': {'type': 'electric furnace'},
'furnace2': {'type': 'electric furnace'},
'garage1': {'type': 'sockets', 'room': 'dining room'},
'garage2': {'type': 'sockets', 'room': 'dining room'},
'gen': {},
'grid': {},
'heater1': {'type': 'electric space heater'},
'housefan1': {'type': 'electric space heater'},
'icemaker1': {'type': 'appliance'},
'jacuzzi1': {'type': 'electric hot tub heater'},
'kitchen1': {'type': 'sockets', 'room': 'kitchen'},
'kitchen2': {'type': 'sockets', 'room': 'kitchen'},
'kitchenapp1': {'type': 'sockets', 'room': 'kitchen'},
'kitchenapp2': {'type': 'sockets', 'room': 'kitchen'},
'lights_plugs1': {'type': 'light'},
'lights_plugs2': {'type': 'light'},
'lights_plugs3': {'type': 'light'},
'lights_plugs4': {'type': 'light'},
'lights_plugs5': {'type': 'light'},
'lights_plugs6': {'type': 'light'},
'livingroom1': {'type': 'sockets', 'room': 'living room'},
'livingroom2': {'type': 'sockets', 'room': 'living room'},
'microwave1': {'type': 'microwave'},
'office1': {'type': 'sockets', 'room': 'office'},
'outsidelights_plugs1': {'type': 'sockets', 'room': 'outside'},
'outsidelights_plugs2': {'type': 'sockets', 'room': 'outside'},
'oven1': {'type': 'oven'},
'oven2': {'type': 'oven'},
'pool1': {'type': 'electric swimming pool heater'},
'pool2': {'type': 'electric swimming pool heater'},
'poollight1': {'type': 'light'},
'poolpump1': {'type': 'electric swimming pool heater'},
'pump1': {'type': 'appliance'},
'range1': {'type': 'stove'},
'refrigerator1': {'type': 'fridge'},
'refrigerator2': {'type': 'fridge'},
'security1': {'type': 'security alarm'},
'shed1': {'type': 'sockets', 'room': 'shed'},
'sprinkler1': {'type': 'appliance'},
'unknown1': {'type': 'unknown'},
'unknown2': {'type': 'unknown'},
'unknown3': {'type': 'unknown'},
'unknown4': {'type': 'unknown'},
'utilityroom1': {'type': 'sockets', 'room': 'utility room'},
'venthood1': {'type': 'appliance'},
'waterheater1': {'type': 'electric water heating appliance'},
'waterheater2': {'type': 'electric water heating appliance'},
'winecooler1': {'type': 'appliance'},
}
feed_ignore = ['gen', 'grid']
def database_assert(database_table):
assert (
database_table == 'electricity_egauge_15min' or
database_table == 'electricity_egauge_hours' or
database_table == 'electricity_egauge_minutes' or
database_table == 'electricity_egauge_seconds'
), "Table not compatible with NILMTK"
def view_database_tables(
database_username,
database_password,
database_schema
):
database_host = 'dataport.pecanstreet.org'
database_port = '5434'
database_name = 'postgres'
try:
conn = db.connect('host=' + database_host +
' port=' + database_port +
' dbname=' + database_name +
' user=' + database_username +
' password=' + database_password)
except:
print('Could not connect to remote database')
raise
# Loading university schemas
sql_query = ("SELECT table_name" +
" FROM information_schema.views" +
" WHERE table_schema ='" + database_schema + "'" +
" ORDER BY table_name")
database_tables = pd.read_sql(sql_query, conn)['table_name'].tolist()
df = pd.DataFrame({database_schema: database_tables})
print(df)
conn.close()
def view_buildings(
database_username,
database_password,
database_schema,
database_table
):
database_assert(database_table)
database_host = 'dataport.pecanstreet.org'
database_port = '5434'
database_name = 'postgres'
# try to connect to database
try:
conn = db.connect('host=' + database_host +
' port=' + database_port +
' dbname=' + database_name +
' user=' + database_username +
' password=' + database_password)
except:
print('Could not connect to remote database')
raise
# select all buildings for the database_table
sql_query = ('SELECT DISTINCT dataid' +
' FROM university.metadata' +
' WHERE' + database_table +
' ORDER BY dataid')
buildings_in_table = pd.read_sql(sql_query, conn)['dataid'].tolist()
print(buildings_in_table)
conn.close()
def view_data_window(
database_username,
database_password,
database_schema,
database_table,
building_no=None):
database_assert(database_table)
database_host = 'dataport.pecanstreet.org'
database_port = '5434'
database_name = 'postgres'
# try to connect to database
try:
conn = db.connect('host=' + database_host +
' port=' + database_port +
' dbname=' + database_name +
' user=' + database_username +
' password=' + database_password)
except:
print('Could not connect to remote database')
raise
# select all buildings for the database_table
sql_query = ('SELECT DISTINCT dataid' +
' FROM university.metadata' +
' WHERE' + database_table +
' ORDER BY dataid')
if(not (building_no)):
print(" Please provide the list of building numbers ")
else:
for each_building in building_no:
sql_query = ('SELECT MIN(egauge_min_time) AS minlocalminute,' +
' MAX(egauge_max_time) AS maxlocalminute' +
' FROM university.metadata' +
' WHERE dataid=' + str(each_building))
timestamps = pd.read_sql(sql_query, conn)
first_timestamp_in_table = timestamps['minlocalminute'][0]
last_timestamp_in_table = timestamps['maxlocalminute'][0]
print(str(each_building),
"\t\t", first_timestamp_in_table,
"\t\t", last_timestamp_in_table)
print("Done loading all the buildings!!")
conn.close()
def download_dataport(database_username,
database_password, hdf_filename,
database_schema='university',
user_selected_table='electricity_egauge_minutes',
periods_to_load=None):
"""
Downloads data from dataport database into an HDF5 file.
Parameters
----------
hdf_filename : str
Output HDF filename. If file exists already then will be deleted.
database_username, database_password, database_schema,user_selected_table, hdf_filename : str
periods_to_load : dict of tuples, optional
Key of dict is the building number (int).
Values are (<start date>, <end date>)
e.g. ("2013-04-01", None) or ("2013-04-01", "2013-08-01")
defaults to all buildings and all date ranges
"""
database_assert(user_selected_table)
# dataport database settings
database_host = 'dataport.pecanstreet.org'
database_port = '5434'
database_name = 'postgres'
# try to connect to database
try:
conn = db.connect('host=' + database_host +
' port=' + database_port +
' dbname=' + database_name +
' user=' + database_username +
' password=' + database_password)
except:
print('Could not connect to remote database')
raise
# map user_selected_table and timestamp column
timestamp_map = {"electricity_egauge_15min": "local_15min",
"electricity_egauge_hours": "localhour",
"electricity_egauge_minutes": "localminute",
"electricity_egauge_seconds": "localminute"}
# set up a new HDF5 datastore (overwrites existing store)
store = pd.HDFStore(hdf_filename, 'w', complevel=9, complib='zlib')
# Create a temporary metadata dir, remove existing building
# yaml files in module dir (if any)
original_metadata_dir = join(get_module_directory(),
'dataset_converters',
'dataport',
'metadata')
tmp_dir = tempfile.mkdtemp()
metadata_dir = join(tmp_dir, 'metadata')
shutil.copytree(original_metadata_dir, metadata_dir)
print("Using temporary dir for metadata:", metadata_dir)
for f in os.listdir(metadata_dir):
if re.search('^building', f):
os.remove(join(metadata_dir, f))
"""
TODO:
The section below can be altered or removed,
since the restructured Dataport
now has only one electricity_egauge_minutes table.
"""
# get tables in database schema
sql_query = ("SELECT table_name" +
" FROM information_schema.views" +
" WHERE table_schema ='" + database_schema + "'" +
" ORDER BY table_name")
database_tables = pd.read_sql(sql_query, conn)['table_name'].tolist()
database_tables = [t for t in database_tables if user_selected_table in t]
# if user has specified buildings
if periods_to_load:
buildings_to_load = list(periods_to_load.keys())
else:
# get buildings present in all tables
sql_query = ''
for table in database_tables:
sql_query = (sql_query + '(SELECT DISTINCT dataid' +
' FROM "' + database_schema + '".' + table +
') UNION ')
sql_query = sql_query[:-7]
sql_query = (sql_query + ' ORDER BY dataid')
buildings_to_load = pd.read_sql(sql_query, conn)['dataid'].tolist()
# for each user specified building or all buildings in database
for building_id in buildings_to_load:
print("Loading building {:d} @ {}"
.format(building_id, datetime.datetime.now()))
sys.stdout.flush()
# create new list of chunks for concatenating later
dataframe_list = []
# for each table of 1 month data
for database_table in database_tables:
print(" Loading table {:s}".format(database_table))
sys.stdout.flush()
# get buildings present in electricity_egauge_minutes table
sql_query = ('SELECT DISTINCT dataid' +
' FROM university.metadata' +
' WHERE egauge_min_time IS NOT NULL' +
' ORDER BY dataid')
buildings_in_table = pd.read_sql(sql_query,
conn)['dataid'].tolist()
if building_id in buildings_in_table:
# get first and last timestamps for this
# house in electricity_egauge_minutes table
sql_query = ('SELECT MIN(egauge_min_time) AS minlocalminute,' +
' MAX(egauge_max_time) AS maxlocalminute' +
' FROM university.metadata' +
' WHERE dataid=' + str(building_id))
range = pd.read_sql(sql_query, conn)
first_timestamp_in_table = range['minlocalminute'][0]
last_timestamp_in_table = range['maxlocalminute'][0]
# get requested start and end and localize them
requested_start = None
requested_end = None
database_timezone = 'US/Central'
if periods_to_load:
if periods_to_load[building_id][0]:
requested_start = pd.Timestamp(periods_to_load[building_id][0])
requested_start = requested_start.tz_localize(database_timezone)
if periods_to_load[building_id][1]:
requested_end = pd.Timestamp(periods_to_load[building_id][1])
requested_end = requested_end.tz_localize(database_timezone)
# check user start is not after end
if requested_start > requested_end:
print('requested end is before requested start')
sys.stdout.flush()
else:
# clip data to smallest range
if requested_start:
start = max(requested_start, first_timestamp_in_table)
else:
start = first_timestamp_in_table
if requested_end:
end = min(requested_end, last_timestamp_in_table)
else:
end = last_timestamp_in_table
# download data in chunks
chunk_start = start
chunk_size = datetime.timedelta(10) # 1 day
while chunk_start < end:
chunk_end = chunk_start + chunk_size
if chunk_end > end:
chunk_end = end
# subtract 1 second so end is exclusive
chunk_end = chunk_end - datetime.timedelta(0, 1)
# query power data for all channels
format = '%Y-%m-%d %H:%M:%S'
sql_query = ('SELECT *' +
' FROM "' + database_schema + '".' + user_selected_table +
' WHERE dataid=' + str(building_id) +
'and "' + timestamp_map[user_selected_table] + '" between ' +
"'" + chunk_start.strftime(format) + "'" +
" and " +
"'" + chunk_end.strftime(format) +
"' ORDER BY "+timestamp_map[user_selected_table]
)
chunk_dataframe = pd.read_sql(sql_query, conn)
# nilmtk requires building indices to start at 1
nilmtk_building_id = buildings_to_load.index(building_id) + 1
# convert to nilmtk-df and save to disk
nilmtk_dataframe = _dataport_dataframe_to_hdf(
chunk_dataframe, store,
nilmtk_building_id,
building_id,
timestamp_map[user_selected_table],
metadata_dir
)
# print progress
print(' ' + str(chunk_start) + ' -> ' +
str(chunk_end) + ': ' +
str(len(chunk_dataframe.index)) + ' rows')
sys.stdout.flush()
# append all chunks into list for csv writing
# dataframe_list.append(chunk_dataframe)
# move on to next chunk
chunk_start = chunk_start + chunk_size
# saves all chunks in list to csv
# if len(dataframe_list) > 0:
# dataframe_concat = pd.concat(dataframe_list)
# dataframe_concat.to_csv(output_directory + str(building_id) + '.csv')
store.close()
conn.close()
# write yaml to hdf5
# dataset.yaml and meter_devices.yaml are static, building<x>.yaml are dynamic
convert_yaml_to_hdf5(metadata_dir, hdf_filename)
# remote the temporary dir when finished
shutil.rmtree(tmp_dir)
def _dataport_dataframe_to_hdf(dataport_dataframe,
store,
nilmtk_building_id,
dataport_building_id,
timestamp_name,
metadata_dir):
local_dataframe = dataport_dataframe.copy()
# remove timezone information to avoid append errors
local_dataframe[timestamp_name] = pd.DatetimeIndex([i.replace(tzinfo=None)
for i in local_dataframe[timestamp_name]])
# set timestamp as frame index
local_dataframe = local_dataframe.set_index(timestamp_name)
# set timezone
local_dataframe = local_dataframe.tz_localize('US/Central')
# remove timestamp column from dataframe
feeds_dataframe = local_dataframe.drop('dataid', axis=1)
# Column names for dataframe
column_names = [('power', 'active')]
# convert from kW to W
feeds_dataframe = feeds_dataframe.mul(1000)
# building metadata
building_metadata = {}
building_metadata['instance'] = nilmtk_building_id
building_metadata['original_name'] = int(dataport_building_id) # use python int
building_metadata['elec_meters'] = {}
building_metadata['appliances'] = []
# initialise dict of instances of each appliance type
instance_counter = {}
meter_id = 1
for column in feeds_dataframe.columns:
if feeds_dataframe[column].notnull().sum() > 0 and not column in feed_ignore:
# convert timeseries into dataframe
feed_dataframe = pd.DataFrame(feeds_dataframe[column])
# set column names
feed_dataframe.columns = pd.MultiIndex.from_tuples(column_names)
# Modify the column labels to reflect the power measurements recorded.
feed_dataframe.columns.set_names(LEVEL_NAMES, inplace=True)
key = Key(building=nilmtk_building_id, meter=meter_id)
# store dataframe
store.put(str(key), feed_dataframe, format='table', append=True)
store.flush()
# elec_meter metadata
if column == 'use':
meter_metadata = {'device_model': 'eGauge',
'site_meter': True}
else:
meter_metadata = {'device_model': 'eGauge',
'submeter_of': 0}
building_metadata['elec_meters'][meter_id] = meter_metadata
# appliance metadata
if column != 'use':
# original name and meter id
appliance_metadata = {'original_name': column,
'meters': [meter_id]}
# appliance type and room if available
appliance_metadata.update(feed_mapping[column])
# appliance instance number
if instance_counter.get(appliance_metadata['type']) == None:
instance_counter[appliance_metadata['type']] = 0
instance_counter[appliance_metadata['type']] += 1
appliance_metadata['instance'] = instance_counter[appliance_metadata['type']]
building_metadata['appliances'].append(appliance_metadata)
meter_id += 1
# write building yaml to file
building = 'building{:d}'.format(nilmtk_building_id)
yaml_full_filename = join(metadata_dir, building + '.yaml')
with open(yaml_full_filename, 'w') as outfile:
outfile.write(yaml.dump(building_metadata))
return 0
|
the-stack_0_813 | #!/usr/bin/env python
import urllib
from decimal import Decimal
from getpass import getpass
import click
from stellar_base import exceptions
from stellar_base.address import Address
from stellar_base.builder import Builder
from stellar_base.keypair import Keypair
from config import configs
from validate import validate
@click.command()
@click.argument('target_address')
@click.argument('amount')
@click.option('--network', default='TESTNET', type=click.Choice(['TESTNET', 'PUBLIC']))
@click.option('--source_secret', prompt=True, hide_input=True)
def payment(target_address: str, amount: str, network, source_secret):
config = configs[network]
src_address = Keypair.from_seed(source_secret).address().decode()
builder = Builder(secret=source_secret, horizon_uri=config['HORIZON_URL'], network=network)
builder.append_payment_op(destination=target_address, asset_code='HOT',
asset_issuer=config['ISSUER_HOT'], amount=amount)
builder.sign()
print("############### TX #################")
print('Payment {} HOT from {} to {}'.format(amount, src_address, target_address))
print('Network: {}'.format(network))
print('Sequence: {}'.format(builder.sequence))
print('Hash: {}'.format(builder.hash()))
print("#########################################")
click.confirm('Correct?', abort=True)
print('Submitting...')
builder.submit()
print('success')
return True
if __name__ == '__main__':
payment()
|
the-stack_0_814 | #!/usr/bin/python
# Copyright (c) 2012 The Native Client Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Convience file system related operations."""
import os
import shutil
import sys
import tempfile
import platform
import time
def AtomicWriteFile(data, filename):
"""Write a file atomically.
NOTE: Not atomic on Windows!
Args:
data: String to write to the file.
filename: Filename to write.
"""
filename = os.path.abspath(filename)
handle, temp_file = tempfile.mkstemp(
prefix='atomic_write', suffix='.tmp',
dir=os.path.dirname(filename))
fh = os.fdopen(handle, 'wb')
fh.write(data)
fh.close()
# Window's can't move into place atomically, delete first.
if sys.platform in ['win32', 'cygwin']:
try:
os.remove(filename)
except OSError:
pass
os.rename(temp_file, filename)
def WriteFile(data, filename):
"""Write a file in one step.
Args:
data: String to write to the file.
filename: Filename to write.
"""
fh = open(filename, 'wb')
fh.write(data)
fh.close()
def ReadFile(filename):
"""Read a file in one step.
Args:
filename: Filename to read.
Returns:
String containing complete file.
"""
fh = open(filename, 'rb')
data = fh.read()
fh.close()
return data
class ExecutableNotFound(Exception):
pass
def Which(command, paths=None, require_executable=True):
"""Find the absolute path of a command in the current PATH.
Args:
command: Command name to look for.
paths: Optional paths to search.
Returns:
Absolute path of the command (first one found),
or default to a bare command if nothing is found.
"""
if paths is None:
paths = os.environ.get('PATH', '').split(os.pathsep)
exe_suffixes = ['']
if sys.platform == 'win32':
exe_suffixes += ['.exe']
for p in paths:
np = os.path.abspath(os.path.join(p, command))
for suffix in exe_suffixes:
full_path = np + suffix
if (os.path.isfile(full_path) and
(not require_executable or os.access(full_path, os.X_OK))):
return full_path
raise ExecutableNotFound('Unable to find: ' + command)
def MakeDirectoryIfAbsent(path):
"""Create a directory if it doesn't already exist.
Args:
path: Directory to create.
"""
if not os.path.isdir(path):
os.makedirs(path)
def MakeParentDirectoryIfAbsent(path):
"""Creates a directory for the parent if it doesn't already exist.
Args:
path: Path of child where parent directory should be created for.
"""
MakeDirectoryIfAbsent(os.path.dirname(path))
def RemoveDirectoryIfPresent(path):
"""Remove a directory if it exists.
Args:
path: Directory to remove.
"""
# On Windows, attempts to remove read-only files get Error 5. This
# error handler fixes the permissions and retries the removal.
def onerror_readonly(func, path, exc_info):
import stat
if not os.access(path, os.W_OK):
os.chmod(path, stat.S_IWUSR)
func(path)
if os.path.exists(path):
shutil.rmtree(path, onerror=onerror_readonly)
def CopyTree(src, dst):
"""Recursively copy the items in the src directory to the dst directory.
Unlike shutil.copytree, the destination directory and any subdirectories and
files may exist. Existing directories are left untouched, and existing files
are removed and copied from the source using shutil.copy2. It is also not
symlink-aware.
Args:
src: Source. Must be an existing directory.
dst: Destination directory. If it exists, must be a directory. Otherwise it
will be created, along with parent directories.
"""
if not os.path.isdir(dst):
os.makedirs(dst)
for root, dirs, files in os.walk(src):
relroot = os.path.relpath(root, src)
dstroot = os.path.join(dst, relroot)
for d in dirs:
dstdir = os.path.join(dstroot, d)
if not os.path.isdir(dstdir):
os.mkdir(dstdir)
for f in files:
dstfile = os.path.join(dstroot, f)
if os.path.isfile(dstfile):
os.remove(dstfile)
shutil.copy2(os.path.join(root, f), dstfile)
def MoveAndMergeDirTree(src_dir, dest_dir):
"""Moves everything from a source directory to a destination directory.
This is different from shutil's move implementation in that it only operates
on directories, and if the destination directory exists, it will move the
contents into the directory and merge any existing directories.
Args:
src_dir: Source directory which files should be moved from.
dest_dir: Destination directory where files should be moved and merged to.
"""
if not os.path.isdir(src_dir):
raise OSError('MoveAndMergeDirTree can only operate on directories.')
if not os.path.exists(dest_dir):
# Simply move the directory over if destination doesn't exist.
MakeParentDirectoryIfAbsent(dest_dir)
os.rename(src_dir, dest_dir)
else:
# Merge each item if destination directory exists.
for dir_item in os.listdir(src_dir):
source_item = os.path.join(src_dir, dir_item)
destination_item = os.path.join(dest_dir, dir_item)
if os.path.exists(destination_item):
if os.path.isdir(destination_item) and os.path.isdir(source_item):
# Merge the sub-directories together if they are both directories.
MoveAndMergeDirTree(source_item, destination_item)
elif os.path.isfile(destination_item) and os.path.isfile(source_item):
# Overwrite the file if they are both files.
os.unlink(destination_item)
os.rename(source_item, destination_item)
else:
raise OSError('Cannot move directory tree, mismatching types.'
' Source - %s. Destination - %s' %
(source_item, destination_item))
else:
os.rename(source_item, destination_item)
# Remove the directory once all the contents have been moved
os.rmdir(src_dir)
def Retry(op, *args):
# Windows seems to be prone to having commands that delete files or
# directories fail. We currently do not have a complete understanding why,
# and as a workaround we simply retry the command a few times.
# It appears that file locks are hanging around longer than they should. This
# may be a secondary effect of processes hanging around longer than they
# should. This may be because when we kill a browser sel_ldr does not exit
# immediately, etc.
# Virus checkers can also accidently prevent files from being deleted, but
# that shouldn't be a problem on the bots.
if platform.IsWindows():
count = 0
while True:
try:
op(*args)
break
except Exception:
sys.stdout.write('FAILED: %s %s\n' % (op.__name__, repr(args)))
count += 1
if count < 5:
sys.stdout.write('RETRY: %s %s\n' % (op.__name__, repr(args)))
time.sleep(pow(2, count))
else:
# Don't mask the exception.
raise
else:
op(*args)
def MoveDirCleanly(src, dst):
RemoveDir(dst)
MoveDir(src, dst)
def MoveDir(src, dst):
Retry(shutil.move, src, dst)
def RemoveDir(path):
if os.path.exists(path):
Retry(shutil.rmtree, path)
def RemoveFile(path):
if os.path.exists(path):
Retry(os.unlink, path)
|
the-stack_0_817 | """Command-line tool to find out where a particular chip or board resides.
The ``spalloc-where-is`` command allows you to query boards by coordinate, by
physical location, by chip or by job. In response to a query, a standard set of
information is displayed as shown in the example below::
$ spalloc-where-is --job-chip 24 14, 3
Machine: my-machine
Physical Location: Cabinet 2, Frame 4, Board 7
Board Coordinate: (3, 4, 0)
Machine Chip Coordinates: (38, 51)
Coordinates within board: (2, 3)
Job using board: 24
Coordinates within job: (14, 3)
In this example we ask, 'where is chip (14, 3) in job 24'? We discover that:
* The chip is the machine named 'my-machine' on the board in cabinet 2, frame
4, board 7.
* This board's logical board coordinates are (3, 4, 0). These logical
coordinates may be used to specifically request this board from Spalloc in
the future.
* If 'my-machine' were booted as a single large machine, the chip we queried
would be chip (38, 51). This may be useful for cross-referencing with
diagrams produced by SpiNNer_.
* The chip in question is chip (2, 3) its board. This may be useful when
reporting faulty chips/replacing boards..
* The job currently running on the board has ID 24. Obviously in this example
we already knew this but this may be useful when querying by board.
* Finally, we're told that the queried chip has the coordinates (14, 3) in the
machine allocated to job 24. Again, this information may be more useful when
querying by board.
.. _SpiNNer: https://github.com/SpiNNakerManchester/SpiNNer
To query by logical board coordinate::
spalloc-where-is --board MACHINE X Y Z
To query by physical board location::
spalloc-where-is --physical MACHINE CABINET FRAME BOARD
To query by chip coordinate (as if the machine were booted as one large
machine)::
spalloc-where-is --chip MACHINE X Y
To query by chip coordinate of chips allocated to a job::
spalloc-where-is --job-chip JOB_ID X Y
"""
import sys
import argparse
from collections import OrderedDict
from spalloc import config
from spalloc import __version__, ProtocolClient, ProtocolTimeoutError
from spalloc.term import render_definitions
# The acceptable range of server version numbers
VERSION_RANGE_START = (0, 3, 0)
VERSION_RANGE_STOP = (2, 0, 0)
def main(argv=None):
cfg = config.read_config()
parser = argparse.ArgumentParser(
description="Find out the location (physical or logical) "
"of a chip or board.")
parser.add_argument("--version", "-V", action="version",
version=__version__)
control_args = parser.add_mutually_exclusive_group(required=True)
control_args.add_argument("--board", "-b", "--logical", "-l", nargs=4,
metavar=("MACHINE", "X", "Y", "Z"),
help="specify the logical board coordinate")
control_args.add_argument("--physical", "-p", nargs=4,
metavar=("MACHINE", "CABINET", "FRAME", "BOARD"),
help="specify a board's physical location")
control_args.add_argument("--chip", "-c", nargs=3,
metavar=("MACHINE", "X", "Y"),
help="specify a board by chip coordinates (as "
"if the whole machine is being used)")
control_args.add_argument("--job-chip", "-j", nargs=3,
metavar=("JOB_ID", "X", "Y"),
help="specify the chip coordinates of a chip "
"within a job's boards")
server_args = parser.add_argument_group("spalloc server arguments")
server_args.add_argument("--hostname", "-H", default=cfg["hostname"],
help="hostname or IP of the spalloc server "
"(default: %(default)s)")
server_args.add_argument("--port", "-P", default=cfg["port"],
type=int,
help="port number of the spalloc server "
"(default: %(default)s)")
server_args.add_argument("--timeout", default=cfg["timeout"],
type=float, metavar="SECONDS",
help="seconds to wait for a response "
"from the server (default: %(default)s)")
args = parser.parse_args(argv)
# Fail if server not specified
if args.hostname is None:
parser.error("--hostname of spalloc server must be specified")
client = ProtocolClient(args.hostname, args.port)
try:
# Connect to server and ensure compatible version
client.connect()
version = tuple(
map(int, client.version(timeout=args.timeout).split(".")))
if not (VERSION_RANGE_START <= version < VERSION_RANGE_STOP):
sys.stderr.write("Incompatible server version ({}).\n".format(
".".join(map(str, version))))
return 2
# Work out what the user asked for
try:
show_board_chip = False
if args.board:
machine, x, y, z = args.board
where_is_kwargs = {
"machine": machine,
"x": int(x),
"y": int(y),
"z": int(z),
}
elif args.physical:
machine, c, f, b = args.physical
where_is_kwargs = {
"machine": machine,
"cabinet": int(c),
"frame": int(f),
"board": int(b),
}
elif args.chip:
machine, x, y = args.chip
where_is_kwargs = {
"machine": machine,
"chip_x": int(x),
"chip_y": int(y),
}
show_board_chip = True
elif args.job_chip:
job_id, x, y = args.job_chip
where_is_kwargs = {
"job_id": int(job_id),
"chip_x": int(x),
"chip_y": int(y),
}
show_board_chip = True
except ValueError as e:
parser.error("Error: {}".format(e))
# Ask the server
location = client.where_is(**where_is_kwargs)
if location is None:
sys.stderr.write("No boards at the specified location.\n")
return 4
else:
out = OrderedDict()
out["Machine"] = location["machine"]
out["Physical location"] = "Cabinet {}, Frame {}, Board {}".format(
*location["physical"])
out["Board coordinate"] = tuple(location["logical"])
out["Machine chip coordinates"] = tuple(location["chip"])
if show_board_chip:
out["Coordinates within board"] = tuple(location["board_chip"])
out["Job using board"] = location["job_id"]
if location["job_id"]:
out["Coordinates within job"] = tuple(location["job_chip"])
print(render_definitions(out))
return 0
except (IOError, OSError, ProtocolTimeoutError) as e:
sys.stderr.write("Error communicating with server: {}\n".format(e))
return 1
finally:
client.close()
if __name__ == "__main__": # pragma: no cover
sys.exit(main())
|
the-stack_0_818 | # [1081] 不同字符的最小子序列
# https://leetcode-cn.com/problems/smallest-subsequence-of-distinct-characters/description/
# * algorithms
# * Medium (53.88%)
# * Total Accepted: 6.7K
# * Total Submissions: 12.5K
# * Testcase Example: '"bcabc"'
# 返回字符串 text 中按字典序排列最小的子序列,该子序列包含 text 中所有不同字符一次。
#
# 示例 1:
# 输入:"cdadabcc"
# 输出:"adbc"
# 示例 2:
# 输入:"abcd"
# 输出:"abcd"
# 示例 3:
# 输入:"ecbacba"
# 输出:"eacb"
# 示例 4:
# 输入:"leetcode"
# 输出:"letcod"
#
# 提示:
# 1 <= text.length <= 1000
# text 由小写英文字母组成
#
# 注意:本题目与 316 https://leetcode-cn.com/problems/remove-duplicate-letters/ 相同
import collections
class Solution(object):
def smallestSubsequence(self, text):
seen = set()
stack = []
# 记录每个字母还可以删除几次,也可以保存每个字符最右边的位置用于判断
remain_counter = collections.Counter(text)
for c in text:
# 每个字母只能出现一次,之前出现过的,现在没有出现过的意义,这是一个单调递增的栈
if c not in seen:
# 栈顶太大了,而且后面还有
while stack and stack[-1] > c and remain_counter[stack[-1]] > 0:
seen.discard(stack.pop())
stack.append(c)
seen.add(c)
remain_counter[c] -= 1
return "".join(stack)
|
the-stack_0_823 | numbers = list()
while True:
num = int(input('Insert a number: '))
numbers.append(num)
cont = str(input('Do you want to continue? [y/n]: ')).lower().strip()[0]
while cont not in 'yn':
cont = str(input('Do you want to continue? [y/n]: ')).lower().strip()[0]
if cont == 'n':
break
print(f'You inserted a total of {len(numbers)} numbers.')
print(f'The numbers, in descending order, are: {sorted(numbers, reverse=True)}.')
if 5 in numbers:
print(f'The number 5 appear {numbers.count(5)} times.')
else:
print('The number 5 don\'t appear in the list.')
|
the-stack_0_824 | # Copyright 2018 The Cirq Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import itertools
import collections
from typing import Any, Iterable, cast, DefaultDict, TYPE_CHECKING, FrozenSet
from numpy import sqrt
from cirq import devices, ops, circuits, value
from cirq.devices.grid_qubit import GridQubit
from cirq.ops import raw_types
from cirq.value import Duration
from cirq.neutral_atoms import convert_to_neutral_atom_gates
if TYPE_CHECKING:
import cirq
def _subgate_if_parallel_gate(gate: 'cirq.Gate') -> 'cirq.Gate':
"""Returns gate.sub_gate if gate is a ParallelGate, else returns gate"""
return gate.sub_gate if isinstance(gate, ops.ParallelGate) else gate
def neutral_atom_gateset(max_parallel_z=None, max_parallel_xy=None):
return ops.Gateset(
ops.AnyIntegerPowerGateFamily(ops.CNotPowGate),
ops.AnyIntegerPowerGateFamily(ops.CCNotPowGate),
ops.AnyIntegerPowerGateFamily(ops.CZPowGate),
ops.AnyIntegerPowerGateFamily(ops.CCZPowGate),
ops.ParallelGateFamily(ops.ZPowGate, max_parallel_allowed=max_parallel_z),
ops.ParallelGateFamily(ops.XPowGate, max_parallel_allowed=max_parallel_xy),
ops.ParallelGateFamily(ops.YPowGate, max_parallel_allowed=max_parallel_xy),
ops.ParallelGateFamily(ops.PhasedXPowGate, max_parallel_allowed=max_parallel_xy),
ops.MeasurementGate,
ops.IdentityGate,
unroll_circuit_op=False,
accept_global_phase_op=False,
)
@value.value_equality
class NeutralAtomDevice(devices.Device):
"""A device with qubits placed on a grid."""
def __init__(
self,
measurement_duration: 'cirq.DURATION_LIKE',
gate_duration: 'cirq.DURATION_LIKE',
control_radius: float,
max_parallel_z: int,
max_parallel_xy: int,
max_parallel_c: int,
qubits: Iterable[GridQubit],
) -> None:
"""Initializes the description of the AQuA device.
Args:
measurement_duration: the maximum duration of a measurement.
gate_duration: the maximum duration of a gate
control_radius: the maximum distance between qubits for a controlled
gate. Distance is measured in units of the indices passed into
the GridQubit constructor.
max_parallel_z: The maximum number of qubits that can be acted on
in parallel by a Z gate
max_parallel_xy: The maximum number of qubits that can be acted on
in parallel by a local XY gate
max_parallel_c: the maximum number of qubits that can be acted on in
parallel by a controlled gate. Must be less than or equal to the
lesser of max_parallel_z and max_parallel_xy
qubits: Qubits on the device, identified by their x, y location.
Must be of type GridQubit
Raises:
ValueError: if the wrong qubit type is provided or if invalid
parallel parameters are provided
"""
self._measurement_duration = Duration(measurement_duration)
self._gate_duration = Duration(gate_duration)
self._control_radius = control_radius
self._max_parallel_z = max_parallel_z
self._max_parallel_xy = max_parallel_xy
if max_parallel_c > min(max_parallel_z, max_parallel_xy):
raise ValueError(
"max_parallel_c must be less than or equal to the"
"min of max_parallel_z and max_parallel_xy"
)
self._max_parallel_c = max_parallel_c
self.xy_gateset_all_allowed = ops.Gateset(
ops.ParallelGateFamily(ops.XPowGate),
ops.ParallelGateFamily(ops.YPowGate),
ops.ParallelGateFamily(ops.PhasedXPowGate),
unroll_circuit_op=False,
accept_global_phase_op=False,
)
self.controlled_gateset = ops.Gateset(
ops.AnyIntegerPowerGateFamily(ops.CNotPowGate),
ops.AnyIntegerPowerGateFamily(ops.CCNotPowGate),
ops.AnyIntegerPowerGateFamily(ops.CZPowGate),
ops.AnyIntegerPowerGateFamily(ops.CCZPowGate),
unroll_circuit_op=False,
accept_global_phase_op=False,
)
self.gateset = neutral_atom_gateset(max_parallel_z, max_parallel_xy)
for q in qubits:
if not isinstance(q, GridQubit):
raise ValueError(f'Unsupported qubit type: {q!r}')
self.qubits = frozenset(qubits)
def qubit_set(self) -> FrozenSet['cirq.GridQubit']:
return self.qubits
def qubit_list(self):
return [qubit for qubit in self.qubits]
def decompose_operation(self, operation: ops.Operation) -> ops.OP_TREE:
return convert_to_neutral_atom_gates.ConvertToNeutralAtomGates().convert(operation)
def duration_of(self, operation: ops.Operation):
"""Provides the duration of the given operation on this device.
Args:
operation: the operation to get the duration of
Returns:
The duration of the given operation on this device
Raises:
ValueError: If the operation provided doesn't correspond to a native
gate
"""
self.validate_operation(operation)
if isinstance(operation, (ops.GateOperation, ops.ParallelGateOperation)):
if isinstance(operation.gate, ops.MeasurementGate):
return self._measurement_duration
return self._gate_duration
def validate_gate(self, gate: ops.Gate):
"""Raises an error if the provided gate isn't part of the native gate set.
Args:
gate: the gate to validate
Raises:
ValueError: If the given gate is not part of the native gate set.
"""
if gate not in self.gateset:
if isinstance(gate, (ops.CNotPowGate, ops.CZPowGate, ops.CCXPowGate, ops.CCZPowGate)):
raise ValueError('controlled gates must have integer exponents')
raise ValueError(f'Unsupported gate: {gate!r}')
def validate_operation(self, operation: ops.Operation):
"""Raises an error if the given operation is invalid on this device.
Args:
operation: the operation to validate
Raises:
ValueError: If the operation is not valid
"""
if not isinstance(operation, (ops.GateOperation, ops.ParallelGateOperation)):
raise ValueError(f'Unsupported operation: {operation!r}')
# All qubits the operation acts on must be on the device
for q in operation.qubits:
if q not in self.qubits:
raise ValueError(f'Qubit not on device: {q!r}')
if operation not in self.gateset and not (
operation in self.xy_gateset_all_allowed and len(operation.qubits) == len(self.qubits)
):
raise ValueError(f'Unsupported operation: {operation!r}')
if operation in self.controlled_gateset:
if len(operation.qubits) > self._max_parallel_c:
raise ValueError(
'Too many qubits acted on in parallel by a controlled gate operation'
)
for p in operation.qubits:
for q in operation.qubits:
if self.distance(p, q) > self._control_radius:
raise ValueError(f"Qubits {p!r}, {q!r} are too far away")
def validate_moment(self, moment: ops.Moment):
"""Raises an error if the given moment is invalid on this device.
Args:
moment: The moment to validate
Raises:
ValueError: If the given moment is invalid
"""
super().validate_moment(moment)
CATEGORIES = {
'Z': (ops.ZPowGate,),
'XY': (
ops.XPowGate,
ops.YPowGate,
ops.PhasedXPowGate,
),
'controlled': (
ops.CNotPowGate,
ops.CZPowGate,
ops.CCXPowGate,
ops.CCZPowGate,
),
'measure': (ops.MeasurementGate,),
}
categorized_ops: DefaultDict = collections.defaultdict(list)
for op in moment.operations:
assert isinstance(op, (ops.GateOperation, ops.ParallelGateOperation))
for k, v in CATEGORIES.items():
assert isinstance(v, tuple)
gate = _subgate_if_parallel_gate(op.gate)
if isinstance(gate, v):
categorized_ops[k].append(op)
for k in ['Z', 'XY', 'controlled']:
if len(set(_subgate_if_parallel_gate(op.gate) for op in categorized_ops[k])) > 1:
raise ValueError(f"Non-identical simultaneous {k} gates")
num_parallel_xy = sum([len(op.qubits) for op in categorized_ops['XY']])
num_parallel_z = sum([len(op.qubits) for op in categorized_ops['Z']])
has_measurement = len(categorized_ops['measure']) > 0
controlled_qubits_lists = [op.qubits for op in categorized_ops['controlled']]
if sum([len(l) for l in controlled_qubits_lists]) > self._max_parallel_c:
raise ValueError("Too many qubits acted on by controlled gates")
if controlled_qubits_lists and (num_parallel_xy or num_parallel_z):
raise ValueError(
"Can't perform non-controlled operations at same time as controlled operations"
)
if self._are_qubit_lists_too_close(*controlled_qubits_lists):
raise ValueError("Interacting controlled operations")
if num_parallel_z > self._max_parallel_z:
raise ValueError("Too many simultaneous Z gates")
if num_parallel_xy > self._max_parallel_xy and num_parallel_xy != len(self.qubits):
raise ValueError("Bad number of simultaneous XY gates")
if has_measurement:
if controlled_qubits_lists or num_parallel_z or num_parallel_xy:
raise ValueError("Measurements can't be simultaneous with other operations")
def _are_qubit_lists_too_close(self, *qubit_lists: Iterable[raw_types.Qid]) -> bool:
if len(qubit_lists) < 2:
return False
if len(qubit_lists) == 2:
a, b = qubit_lists
return any(self.distance(p, q) <= self._control_radius for p in a for q in b)
return any(
self._are_qubit_lists_too_close(a, b) for a, b in itertools.combinations(qubit_lists, 2)
)
def can_add_operation_into_moment(self, operation: ops.Operation, moment: ops.Moment) -> bool:
"""Determines if it's possible to add an operation into a moment.
An operation can be added if the moment with the operation added is valid.
Args:
operation: The operation being added.
moment: The moment being transformed.
Returns:
Whether or not the moment will validate after adding the operation.
Raises:
ValueError: If either of the given moment or operation is invalid
"""
if not super().can_add_operation_into_moment(operation, moment):
return False
try:
self.validate_moment(moment.with_operation(operation))
except:
return False
return True
def validate_circuit(self, circuit: circuits.AbstractCircuit):
"""Raises an error if the given circuit is invalid on this device.
A circuit is invalid if any of its moments are invalid or if there is a
non-empty moment after a moment with a measurement.
Args:
circuit: The circuit to validate
Raises:
ValueError: If the given circuit can't be run on this device
"""
super().validate_circuit(circuit)
# Measurements must be in the last non-empty moment
has_measurement_occurred = False
for moment in circuit:
if has_measurement_occurred:
if len(moment.operations) > 0:
raise ValueError("Non-empty moment after measurement")
for operation in moment.operations:
if isinstance(operation.gate, ops.MeasurementGate):
has_measurement_occurred = True
def _value_equality_values_(self) -> Any:
return (
self._measurement_duration,
self._gate_duration,
self._max_parallel_z,
self._max_parallel_xy,
self._max_parallel_c,
self._control_radius,
self.qubits,
)
def __repr__(self) -> str:
return (
'cirq.NeutralAtomDevice('
f'measurement_duration={self._measurement_duration!r}, '
f'gate_duration={self._gate_duration!r}, '
f'max_parallel_z={self._max_parallel_z!r}, '
f'max_parallel_xy={self._max_parallel_xy!r}, '
f'max_parallel_c={self._max_parallel_c!r}, '
f'control_radius={self._control_radius!r}, '
f'qubits={sorted(self.qubits)!r})'
)
def neighbors_of(self, qubit: 'cirq.GridQubit') -> Iterable['cirq.GridQubit']:
"""Returns the qubits that the given qubit can interact with."""
possibles = [
GridQubit(qubit.row + 1, qubit.col),
GridQubit(qubit.row - 1, qubit.col),
GridQubit(qubit.row, qubit.col + 1),
GridQubit(qubit.row, qubit.col - 1),
]
return [e for e in possibles if e in self.qubits]
def distance(self, p: 'cirq.Qid', q: 'cirq.Qid') -> float:
p = cast(GridQubit, p)
q = cast(GridQubit, q)
return sqrt((p.row - q.row) ** 2 + (p.col - q.col) ** 2)
def __str__(self) -> str:
diagram = circuits.TextDiagramDrawer()
for q in self.qubits:
diagram.write(q.col, q.row, str(q))
for q2 in self.neighbors_of(q):
diagram.grid_line(q.col, q.row, q2.col, q2.row)
return diagram.render(horizontal_spacing=3, vertical_spacing=2, use_unicode_characters=True)
|
the-stack_0_825 | """Tests related to creating ingest definition"""
import json
import os
import unittest
from rf.models import Scene
from rf.ingest.landsat8_ingest import get_landsat8_layer
class Landsat8LayerTestCase(unittest.TestCase):
"""Test that we can create a layer from Landsat 8 scenes"""
def setUp(self):
cwd = os.path.abspath(os.path.dirname(__file__))
scene_path = os.path.join(cwd, 'data', 'scene.json')
with open(scene_path) as fh:
self.scene = Scene.from_dict(json.load(fh))
def test_create_layer(self):
"""Minimal test to verify that a layer can be created"""
layer = get_landsat8_layer(self.scene)
num_sources = len(layer.sources)
self.assertEqual(
num_sources, 11, 'Found {} sources, expected 11'.format(num_sources)
)
|
the-stack_0_826 | # Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import numpy as np
import tensorflow as tf
NUM_CLASSES = 10
EMBEDDING_DIM = 7
def model_fn(features, labels, mode, params):
# build model
global_step = tf.train.get_global_step()
embedding_table = tf.get_variable('embedding_table', shape=(NUM_CLASSES, EMBEDDING_DIM), dtype=tf.float32)
embeddings = tf.nn.embedding_lookup(embedding_table, features)
# lstm model
batch_size = params['train_batch_size']
sequence_length = params['sequence_length']
cell = tf.nn.rnn_cell.BasicLSTMCell(EMBEDDING_DIM)
outputs, final_state = tf.nn.dynamic_rnn(cell, embeddings, dtype=tf.float32)
# flatten the batch and sequence dimensions
flattened = tf.reshape(outputs, (-1, EMBEDDING_DIM))
flattened_logits = tf.layers.dense(flattened, NUM_CLASSES)
logits = tf.reshape(flattened_logits, (-1, sequence_length, NUM_CLASSES))
predictions = tf.multinomial(flattened_logits, num_samples=1)
loss = None
train_op = None
if mode == tf.estimator.ModeKeys.TRAIN:
# define loss
loss = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(labels=labels, logits=logits))
# define train_op
optimizer = tf.train.RMSPropOptimizer(learning_rate=0.05)
# wrapper to make the optimizer work with TPUs
if params['use_tpu']:
optimizer = tf.contrib.tpu.CrossShardOptimizer(optimizer)
train_op = optimizer.minimize(loss, global_step=global_step)
if params['use_tpu']:
# TPU version of EstimatorSpec
return tf.contrib.tpu.TPUEstimatorSpec(
mode=mode,
predictions=predictions,
loss=loss,
train_op=train_op)
else:
return tf.estimator.EstimatorSpec(
mode=mode,
predictions=predictions,
loss=loss,
train_op=train_op)
def train_input_fn(params={}):
# make some fake data of labels
data_length = 100
x = np.random.randint(0, NUM_CLASSES, data_length)
y = np.random.randint(0, NUM_CLASSES, data_length)
x_tensor = tf.constant(x, dtype=tf.int32)
y_tensor = tf.constant(y, dtype=tf.int32)
dataset = tf.data.Dataset.from_tensors((x_tensor, y_tensor))
dataset = dataset.repeat()
# TPUs need to know the full shape of tensors
# so we use a fixed sequence length
sequence_length = params.get('sequence_length', 5)
def get_sequences(x_tensor, y_tensor):
index = tf.random_uniform([1], minval=0, maxval=data_length-sequence_length, dtype=tf.int32)[0]
x_sequence = x_tensor[index:index+sequence_length]
y_sequence = y_tensor[index:index+sequence_length]
return (x_sequence, y_sequence)
dataset = dataset.map(get_sequences)
# TPUEstimator passes params when calling input_fn
batch_size = params.get('train_batch_size', 16)
dataset = dataset.batch(batch_size, drop_remainder=True)
# TPUs need to know all dimensions when the graph is built
# Datasets know the batch size only when the graph is run
def set_shapes(features, labels):
features_shape = features.get_shape().merge_with([batch_size, sequence_length])
labels_shape = labels.get_shape().merge_with([batch_size, sequence_length])
features.set_shape(features_shape)
labels.set_shape(labels_shape)
return features, labels
dataset = dataset.map(set_shapes)
dataset = dataset.prefetch(tf.contrib.data.AUTOTUNE)
return dataset
def main(args):
# pass the args as params so the model_fn can use
# the TPU specific args
params = vars(args)
if args.use_tpu:
# additional configs required for using TPUs
tpu_cluster_resolver = tf.contrib.cluster_resolver.TPUClusterResolver(args.tpu)
tpu_config = tf.contrib.tpu.TPUConfig(
num_shards=8, # using Cloud TPU v2-8
iterations_per_loop=args.save_checkpoints_steps)
# use the TPU version of RunConfig
config = tf.contrib.tpu.RunConfig(
cluster=tpu_cluster_resolver,
model_dir=args.model_dir,
tpu_config=tpu_config,
save_checkpoints_steps=args.save_checkpoints_steps,
save_summary_steps=100)
# TPUEstimator
estimator = tf.contrib.tpu.TPUEstimator(
model_fn=model_fn,
config=config,
params=params,
train_batch_size=args.train_batch_size,
eval_batch_size=32,
export_to_tpu=False)
else:
config = tf.estimator.RunConfig(model_dir=args.model_dir)
estimator = tf.estimator.Estimator(
model_fn,
config=config,
params=params)
estimator.train(train_input_fn, max_steps=args.max_steps)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--model-dir',
type=str,
default='/tmp/tpu-template',
help='Location to write checkpoints and summaries to. Must be a GCS URI when using Cloud TPU.')
parser.add_argument(
'--max-steps',
type=int,
default=1000,
help='The total number of steps to train the model.')
parser.add_argument(
'--sequence-length',
type=int,
default=5,
help='The sequence length for an LSTM model.')
parser.add_argument(
'--train-batch-size',
type=int,
default=16,
help='The training batch size. The training batch is divided evenly across the TPU cores.')
parser.add_argument(
'--save-checkpoints-steps',
type=int,
default=100,
help='The number of training steps before saving each checkpoint.')
parser.add_argument(
'--use-tpu',
action='store_true',
help='Whether to use TPU.')
parser.add_argument(
'--tpu',
default=None,
help='The name or GRPC URL of the TPU node. Leave it as `None` when training on AI Platform.')
args, _ = parser.parse_known_args()
main(args)
|
the-stack_0_829 | import argparse
import collections
import json
import os
import numpy as np
import torch
import yaml
__all__ = [
"load_config",
"save_config",
"flatten_dict",
"sanitize_dict",
"update_namespace",
"extract",
"s2b",
"g",
]
# Load config file
def load_yaml(f_path):
with open(f_path, "r") as stream:
return yaml.safe_load(stream)
def load_json(f_path):
with open(f_path, "r") as f:
return json.load(f)
def load_config(path, flatten=True):
_, ext = os.path.splitext(path)
assert ext in [
".json",
".yaml",
".yml",
], f"Only support yaml and json config, but '{ext}' given."
if ext == "json":
cfg = load_json(path)
else:
cfg = load_yaml(path)
if cfg is None:
cfg = dict()
if flatten:
cfg = flatten_dict(cfg)
return cfg
# Dump config file
def save_json(obj, f_path):
with open(f_path, "w") as f:
json.dump(obj, f, ensure_ascii=False, indent=4)
def save_yaml(obj, f_path):
with open(f_path, "w") as f:
yaml.dump(obj, f)
def save_config(obj, path, ext=None):
_, fext = os.path.splitext(path)
if fext.startswith("."):
fext = fext[1:]
if fext != "":
assert (
ext == None or fext == ext
), f"Extension conflict between '{path}' and '{ext}'."
ext = fext
if ext in ["yaml", "yml"]:
save_yaml(obj, path)
else:
save_json(obj, path)
# Utils
def flatten_dict(d, keep_parent=False, sep="_", parent_key=""):
"""Flatten dict to only one nest
Args:
d (dict): dictionary to flatten
keep_parent (bool, optional): If True, keep parent's key name, and keys should all be str. Defaults to False.
sep (str, optional): Effective only keep_parent=True, separator between keys. Defaults to "_".
parent_key (str, optional): For recursive call. Defaults to "".
Returns:
dict: flattened dict
"""
items = []
for k, v in d.items():
new_key = parent_key + sep + k if parent_key and keep_parent else k
if isinstance(v, collections.abc.MutableMapping):
items.extend(
flatten_dict(v, keep_parent, parent_key=new_key, sep=sep).items()
)
else:
items.append((new_key, v))
items_key = [i[0] for i in items]
assert len(items_key) == len(set(items_key))
return dict(items)
def sanitize_dict(params, to_str=True, none_fill="N/A"):
"""Convert all items into tensorboard supported values or str
Args:
params (dict): dict to sanitize
to_str (bool, optional): If True, turn all items to string. Defaults to True.
Returns:
dict: sanitized dict
"""
items = []
for k in params.keys():
# numpy to float
if isinstance(params[k], (np.bool_, np.integer, np.floating)):
items.append([k, params[k].item()])
elif isinstance(params[k], np.ndarray):
items.append([k, str(params[k].tolist())])
# torch to float
elif isinstance(params[k], torch.Tensor):
items.append([k, str(params[k].tolist())])
# None to str
elif params[k] is None:
items.append([k, none_fill])
# Others to str
elif type(params[k]) not in [bool, int, float, str, torch.Tensor]:
items.append([k, str(params[k])])
else:
items.append([k, params[k]])
# All to str
if to_str:
items[-1][-1] = str(items[-1][-1])
return dict(items)
def update_namespace(args, dictionary, overwrite=True, rest=False):
"""update Namespace with given dictionary
Args:
args (Namespace): Namespace to be updated
dictionary (dict): dictionary
overwrite (bool, optional): If True, All Namespace value will overwritten by dictionary value. Otherwise, only Namespace with None will be overwritten. Defaults to True.
rest: Effective only if overwrite=True. If True, add keys in dictionary but not in args into args. Otherwise raise an error.
Returns:
Namespace
"""
dict_args = vars(args)
if overwrite:
dict_args.update(dictionary)
else:
for k, v in dict_args.items():
if v is not None:
pass
elif k in dictionary:
dict_args[k] = dictionary[k]
for k, v in dictionary.items():
if k not in dict_args:
if rest:
dict_args[k] = v
else:
raise KeyError(f"no key {k}")
args = argparse.Namespace(**dict_args)
return args
def extract(s, delimit="-", num=0):
"""Extract the num_th word from string s
Args:
s (str): string to be parsed
delimit (str, optional): delimiter. Defaults to "-".
num (int, optional): . Defaults to 0.
Returns:
(str, List[str])
"""
s_list = s.split(delimit)
first = s_list[num]
s_list.pop(num)
s_rest = delimit.join(s_list)
return first, s_rest
# argparse type
def s2b(v):
if isinstance(v, bool):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise argparse.ArgumentTypeError("Boolean value expected.")
# template generator for params.py
def g(template, name_list, placeholder="{}"):
items = []
for name in name_list:
t = []
t.append(template[0].replace(placeholder, name))
t.append(template[1].replace(placeholder, name))
t.extend(template[2:])
items.append(t)
return items
|
the-stack_0_830 | # -*- coding:utf-8 -*-
import unittest
class TestZip(unittest.TestCase):
TESTDATA = [
("aabbb" , "a2b3"),
("aaaa", "a4"),
("abc", "abc"),
("abcdd","abcdd")
]
def setUp(self):
self.judge = Zipper()
def testsame(self):
for src, exp in self.TESTDATA:
self.assertEqual(self.judge.zipString(src),exp)
class Zipper:
def zipString(self, iniString):
# write code here
record = []
prevchar = None
prevlen = 0
for letter in iniString:
if letter == prevchar:
prevlen += 1
else:
if prevlen > 0:
record.append({prevchar : prevlen})
prevlen = 1
prevchar = letter
if prevlen > 0:
record.append({prevchar : prevlen})
newstring = ''
for item in record:
for key,value in item.iteritems():
newstring += "{}{}".format(key,value)
return newstring if len(newstring) < len(iniString) else iniString
if __name__ == '__main__':
unittest.main()
|
the-stack_0_831 | # Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import unittest
import numpy as np
import sys
sys.path.append("..")
from op_test import OpTest, skip_check_grad_ci
import paddle
import paddle.fluid.core as core
import paddle.fluid as fluid
from paddle.fluid import compiler, Program, program_guard
from paddle.fluid.framework import convert_np_dtype_to_dtype_
paddle.enable_static()
class TestAny8DOp(OpTest):
def setUp(self):
self.set_npu()
self.op_type = "reduce_any"
self.place = paddle.NPUPlace(0)
self.inputs = {
'X': np.random.randint(0, 2,
(2, 5, 3, 2, 2, 3, 4, 2)).astype("bool")
}
self.attrs = {'dim': (3, 5, 4)}
self.outputs = {'Out': self.inputs['X'].any(axis=self.attrs['dim'])}
def set_npu(self):
self.__class__.use_npu = True
def test_check_output(self):
self.check_output_with_place(self.place)
class TestAnyOpWithDim(OpTest):
def setUp(self):
self.set_npu()
self.op_type = "reduce_any"
self.place = paddle.NPUPlace(0)
self.inputs = {'X': np.random.randint(0, 2, (5, 6, 10)).astype("bool")}
self.attrs = {'dim': [1]}
self.outputs = {'Out': self.inputs['X'].any(axis=1)}
def set_npu(self):
self.__class__.use_npu = True
def test_check_output(self):
self.check_output_with_place(self.place)
class TestAny8DOpWithDim(OpTest):
def setUp(self):
self.set_npu()
self.op_type = "reduce_any"
self.place = paddle.NPUPlace(0)
self.inputs = {
'X': np.random.randint(0, 2,
(2, 5, 3, 2, 2, 3, 4, 2)).astype("bool")
}
self.attrs = {'dim': (3, 6)}
self.outputs = {'Out': self.inputs['X'].any(axis=self.attrs['dim'])}
def set_npu(self):
self.__class__.use_npu = True
def test_check_output(self):
self.check_output_with_place(self.place)
class TestAnyOpWithKeepDim(OpTest):
def setUp(self):
self.set_npu()
self.op_type = "reduce_any"
self.place = paddle.NPUPlace(0)
self.inputs = {'X': np.random.randint(0, 2, (5, 6, 10)).astype("bool")}
self.attrs = {'dim': (1), 'keep_dim': True}
self.outputs = {
'Out': np.expand_dims(
self.inputs['X'].any(axis=self.attrs['dim']), axis=1)
}
def set_npu(self):
self.__class__.use_npu = True
def test_check_output(self):
self.check_output_with_place(self.place)
class TestAny8DOpWithKeepDim(OpTest):
def setUp(self):
self.set_npu()
self.op_type = "reduce_any"
self.place = paddle.NPUPlace(0)
self.inputs = {
'X': np.random.randint(0, 2,
(2, 5, 3, 2, 2, 3, 4, 2)).astype("bool")
}
self.attrs = {'dim': (1), 'keep_dim': True}
self.outputs = {
'Out': np.expand_dims(
self.inputs['X'].any(axis=self.attrs['dim']), axis=1)
}
def set_npu(self):
self.__class__.use_npu = True
def test_check_output(self):
self.check_output_with_place(self.place)
if __name__ == '__main__':
unittest.main()
|
the-stack_0_833 | # Copyright (c) 2009-2019 The Regents of the University of Michigan
# This file is part of the HOOMD-blue project, released under the BSD 3-Clause License.
# Maintainer: jglaser / All Developers are free to add commands for new features
R""" Potentials between special pairs of particles
Special pairs are used to implement interactions between designated pairs of particles.
They act much like bonds, except that the interaction potential is typically a pair potential,
such as LJ.
By themselves, special pairs that have been specified in an initial configuration do nothing. Only when you
specify an force (i.e. special_pairs.lj), are forces actually calculated between the
listed particles.
"""
from hoomd import _hoomd
from hoomd.md import _md
from hoomd.md import force;
from hoomd.md import bond;
import hoomd;
import math;
import sys;
class coeff:
R""" Define special_pair coefficients.
The coefficients for all special pair potentials are specified using this class. Coefficients are
specified per pair type.
There are two ways to set the coefficients for a particular special_pair potential.
The first way is to save the special_pair potential in a variable and call :py:meth:`set()` directly.
See below for an example of this.
The second method is to build the coeff class first and then assign it to the
special_pair potential. There are some advantages to this method in that you could specify a
complicated set of special_pair potential coefficients in a separate python file and import
it into your job script.
Example::
my_coeffs = hoomd.md.special_pair.coeff();
special_pair_force.pair_coeff.set('pairtype1', epsilon=1, sigma=1)
special_pair_force.pair_coeff.set('backbone', epsilon=1.2, sigma=1)
"""
## \internal
# \brief Initializes the class
# \details
# The main task to be performed during initialization is just to init some variables
# \param self Python required class instance variable
def __init__(self):
self.values = {};
self.default_coeff = {}
## \var values
# \internal
# \brief Contains the vector of set values in a dictionary
## \var default_coeff
# \internal
# \brief default_coeff['coeff'] lists the default value for \a coeff, if it is set
## \internal
# \brief Sets a default value for a given coefficient
# \details
# \param name Name of the coefficient to for which to set the default
# \param value Default value to set
#
# Some coefficients have reasonable default values and the user should not be burdened with typing them in
# all the time. set_default_coeff() sets
def set_default_coeff(self, name, value):
self.default_coeff[name] = value;
def set(self, type, **coeffs):
R""" Sets parameters for special_pair types.
Args:
type (str): Type of special_pair (or a list of type names)
coeffs: Named coefficients (see below for examples)
Calling :py:meth:`set()` results in one or more parameters being set for a special_pair type. Types are identified
by name, and parameters are also added by name. Which parameters you need to specify depends on the special_pair
potential you are setting these coefficients for, see the corresponding documentation.
All possible special_pair types as defined in the simulation box must be specified before executing run().
You will receive an error if you fail to do so. It is not an error, however, to specify coefficients for
special_pair types that do not exist in the simulation. This can be useful in defining a potential field for many
different types of special_pairs even when some simulations only include a subset.
Examples::
my_special_pair_force.special_pair_coeff.set('pair1', epsilon=1, sigma=1)
my_special_pair_force.pair_coeff.set('pair2', epsilon=0.5, sigma=0.7)
my_special_pair_force.pair_coeff.set(['special_pairA','special_pairB'], epsilon=0, sigma=1)
Note:
Single parameters can be updated. If both ``k`` and ``r0`` have already been set for a particle type,
then executing ``coeff.set('polymer', r0=1.0)`` will update the value of ``r0`` and leave the other
parameters as they were previously set.
"""
hoomd.util.print_status_line();
# listify the input
type = hoomd.util.listify(type)
for typei in type:
self.set_single(typei, coeffs);
## \internal
# \brief Sets a single parameter
def set_single(self, type, coeffs):
type = str(type);
# create the type identifier if it hasn't been created yet
if (not type in self.values):
self.values[type] = {};
# update each of the values provided
if len(coeffs) == 0:
hoomd.context.msg.error("No coefficients specified\n");
for name, val in coeffs.items():
self.values[type][name] = val;
# set the default values
for name, val in self.default_coeff.items():
# don't override a coeff if it is already set
if not name in self.values[type]:
self.values[type][name] = val;
## \internal
# \brief Verifies that all values are set
# \details
# \param self Python required self variable
# \param required_coeffs list of required variables
#
# This can only be run after the system has been initialized
def verify(self, required_coeffs):
# first, check that the system has been initialized
if not hoomd.init.is_initialized():
hoomd.context.msg.error("Cannot verify special_pair coefficients before initialization\n");
raise RuntimeError('Error verifying force coefficients');
# get a list of types from the particle data
ntypes = hoomd.context.current.system_definition.getPairData().getNTypes();
type_list = [];
for i in range(0,ntypes):
type_list.append(hoomd.context.current.system_definition.getPairData().getNameByType(i));
valid = True;
# loop over all possible types and verify that all required variables are set
for i in range(0,ntypes):
type = type_list[i];
if type not in self.values.keys():
hoomd.context.msg.error("Pair type " +str(type) + " not found in pair coeff\n");
valid = False;
continue;
# verify that all required values are set by counting the matches
count = 0;
for coeff_name in self.values[type].keys():
if not coeff_name in required_coeffs:
hoomd.context.msg.notice(2, "Notice: Possible typo? Force coeff " + str(coeff_name) + " is specified for type " + str(type) + \
", but is not used by the special pair force\n");
else:
count += 1;
if count != len(required_coeffs):
hoomd.context.msg.error("Special pair type " + str(type) + " is missing required coefficients\n");
valid = False;
return valid;
## \internal
# \brief Gets the value of a single %special_pair %force coefficient
# \detail
# \param type Name of special_pair type
# \param coeff_name Coefficient to get
def get(self, type, coeff_name):
if type not in self.values.keys():
hoomd.context.msg.error("Bug detected in force.coeff. Please report\n");
raise RuntimeError("Error setting special_pair coeff");
return self.values[type][coeff_name];
## \internal
# \brief Return metadata
def get_metadata(self):
return self.values
## \internal
# \brief Base class for special pair potentials
#
# A special pair in hoomd.* reflects a PotentialSpecialPair in c++. It is responsible
# for all high-level management that happens behind the scenes for hoomd
# writers. 1) The instance of the c++ bond force itself is tracked and added to the
# System 2) methods are provided for disabling the force from being added to the
# net force on each particle
class _special_pair(force._force):
## \internal
# \brief Constructs the bond potential
#
# \param name name of the bond potential instance
#
# Initializes the cpp_force to None.
# If specified, assigns a name to the instance
# Assigns a name to the force in force_name;
def __init__(self, name=None):
# initialize the base class
force._force.__init__(self, name);
self.cpp_force = None;
# setup the coefficient vector (use bond coefficients for that)
self.pair_coeff = coeff();
self.enabled = True;
def update_coeffs(self):
coeff_list = self.required_coeffs;
# check that the force coefficients are valid
if not self.pair_coeff.verify(coeff_list):
hoomd.context.msg.error("Not all force coefficients are set\n");
raise RuntimeError("Error updating force coefficients");
# set all the params
ntypes = hoomd.context.current.system_definition.getPairData().getNTypes();
type_list = [];
for i in range(0,ntypes):
type_list.append(hoomd.context.current.system_definition.getPairData().getNameByType(i));
for i in range(0,ntypes):
# build a dict of the coeffs to pass to proces_coeff
coeff_dict = {};
for name in coeff_list:
coeff_dict[name] = self.pair_coeff.get(type_list[i], name);
param = self.process_coeff(coeff_dict);
self.cpp_force.setParams(i, param);
## \internal
# \brief Get metadata
def get_metadata(self):
data = force._force.get_metadata(self)
# make sure coefficients are up-to-date
self.update_coeffs()
data['pair_coeff'] = self.pair_coeff
return data
class lj(_special_pair):
R""" LJ special pair potential.
Args:
name (str): Name of the special_pair instance.
:py:class:`lj` specifies a Lennard-Jones potential energy between the two particles in each defined pair.
This is useful for implementing e.g. special 1-4 interactions in all-atom force fields.
The pair potential uses the standard LJ definition.
.. math::
:nowrap:
\begin{eqnarray*}
V_{\mathrm{LJ}}(r) = & 4 \varepsilon \left[ \left( \frac{\sigma}{r} \right)^{12} -
\alpha \left( \frac{\sigma}{r} \right)^{6} \right] & r < r_{\mathrm{cut}} \\
= & 0 & r \ge r_{\mathrm{cut}} \\
\end{eqnarray*}
where :math:`\vec{r}` is the vector pointing from one particle to the other in the bond.
Coefficients:
- :math:`\varepsilon` - *epsilon* (in energy units)
- :math:`\sigma` - *sigma* (in distance units)
- :math:`\alpha` - *alpha* (unitless) - *optional*: defaults to 1.0
- :math:`r_{\mathrm{cut}}` - *r_cut* (in distance units)
Example::
lj = special_pair.lj(name="my_pair")
lj.pair_coeff.set('pairtype_1', epsilon=5.4, sigma=0.47, r_cut=1.1)
Note:
The energy of special pair interactions is reported in a log quantity **special_pair_lj_energy**, which
is separate from those of other non-bonded interactions. Therefore, the total energy of nonbonded interactions
is obtained by adding that of standard and special interactions.
.. versionadded:: 2.1
"""
def __init__(self,name=None):
hoomd.util.print_status_line();
# initialize the base class
_special_pair.__init__(self);
# check that some bonds are defined
if hoomd.context.current.system_definition.getPairData().getNGlobal() == 0:
hoomd.context.msg.error("No pairs are defined.\n");
raise RuntimeError("Error creating special pair forces");
# create the c++ mirror class
if not hoomd.context.exec_conf.isCUDAEnabled():
self.cpp_force = _md.PotentialSpecialPairLJ(hoomd.context.current.system_definition,self.name);
else:
self.cpp_force = _md.PotentialSpecialPairLJGPU(hoomd.context.current.system_definition,self.name);
hoomd.context.current.system.addCompute(self.cpp_force, self.force_name);
# setup the coefficient options
self.required_coeffs = ['epsilon','sigma','alpha','r_cut'];
self.pair_coeff.set_default_coeff('alpha', 1.0);
def process_coeff(self, coeff):
r_cut = coeff['r_cut'];
epsilon = coeff['epsilon'];
sigma = coeff['sigma'];
alpha = coeff['alpha'];
lj1 = 4.0 * epsilon * math.pow(sigma, 12.0);
lj2 = alpha * 4.0 * epsilon * math.pow(sigma, 6.0);
r_cut_squared = r_cut * r_cut
return _hoomd.make_scalar3(lj1, lj2, r_cut_squared);
class coulomb(_special_pair):
R""" Coulomb special pair potential.
Args:
name (str): Name of the special_pair instance.
:py:class:`coulomb` specifies a Coulomb potential energy between the two particles in each defined pair.
This is useful for implementing e.g. special 1-4 interactions in all-atom force fields. It uses a standard Coulomb interaction with a scaling parameter. This allows for using this for scaled 1-4 interactions like in OPLS where both the 1-4 LJ and Coulomb interactions are scaled by 0.5.
.. math::
:nowrap:
\begin{eqnarray*}
V_{\mathrm{Coulomb}}(r) = & \alpha \cdot \left[ \frac{q_{a}q_{b}}{r} \right] & r < r_{\mathrm{cut}} \\
= & 0 & r \ge r_{\mathrm{cut}} \\
\end{eqnarray*}
where :math:`\vec{r}` is the vector pointing from one particle to the other in the bond.
Coefficients:
- :math:`\alpha` - Coulomb scaling factor (defaults to 1.0)
- :math:`q_{a}` - charge of particle a (in hoomd charge units)
- :math:`q_{b}` - charge of particle b (in hoomd charge units)
- :math:`r_{\mathrm{cut}}` - *r_cut* (in distance units)
Example::
coul = special_pair.coulomb(name="myOPLS_style")
coul.pair_coeff.set('pairtype_1', alpha=0.5, r_cut=1.1)
Note:
The energy of special pair interactions is reported in a log quantity **special_pair_coul_energy**, which
is separate from those of other non-bonded interactions. Therefore, the total energy of non-bonded interactions
is obtained by adding that of standard and special interactions.
.. versionadded:: 2.2
.. versionchanged:: 2.2
"""
def __init__(self, name=None):
hoomd.util.print_status_line();
# initialize the base class
_special_pair.__init__(self);
# check that some bonds are defined
if hoomd.context.current.system_definition.getPairData().getNGlobal() == 0:
hoomd.context.msg.error("No pairs are defined.\n");
raise RuntimeError("Error creating special pair forces");
# create the c++ mirror class
if not hoomd.context.exec_conf.isCUDAEnabled():
self.cpp_force = _md.PotentialSpecialPairCoulomb(hoomd.context.current.system_definition,self.name);
else:
self.cpp_force = _md.PotentialSpecialPairCoulombGPU(hoomd.context.current.system_definition,self.name);
hoomd.context.current.system.addCompute(self.cpp_force, self.force_name);
# setup the coefficient options
self.required_coeffs = ['alpha', 'r_cut'];
self.pair_coeff.set_default_coeff('alpha', 1.0);
def process_coeff(self, coeff):
r_cut = coeff['r_cut'];
alpha = coeff['alpha'];
r_cut_squared = r_cut * r_cut;
return _hoomd.make_scalar2(alpha, r_cut_squared);
|
the-stack_0_834 | #!/usr/bin/python3
"""
fasttextRun.py: run fasttext via python interface
usage: fasttextRun.py -f file [-n N]
note: default number of N is 10 (10-fold cross validation)
20180105 erikt(at)xs4all.nl
"""
import fasttext
import os
import random
import splitFile
import sys
COMMAND = sys.argv.pop(0)
DIM = 300
LARGENUMBER = 100000
MINCOUNT = 5
random.seed()
TMPFILENAME = "fasttextRun."+str(os.getpid())+"."+str(random.randint(0,LARGENUMBER))
def makeTrainFile(inFileName,i,n):
outFileName = TMPFILENAME+".train"
outFile = open(outFileName,"w")
for j in range(0,n):
if j != i:
inFile = open(inFileName+"."+str(j),"r")
for line in inFile: outFile.write(line)
inFile.close()
outFile.close()
return(outFileName)
def fasttextRun(inFileName,i,n):
trainFileName = makeTrainFile(inFileName,i,n)
modelFileName = TMPFILENAME+".model"
testFileName = inFileName+"."+str(i)
classifier = fasttext.supervised(trainFileName,modelFileName,dim=DIM,min_count=MINCOUNT)
# ,pretrained_vectors="/home/erikt/software/fastText/wiki.nl.vec")
result = classifier.test(testFileName)
os.unlink(trainFileName)
os.unlink(modelFileName+".bin")
return(result.precision)
def main(argv):
inFileName, n = splitFile.processOpts(list(argv))
data = splitFile.readData(inFileName)
splitFile.writeData(inFileName,data,n)
accuracyTotal = 0.0
for i in range(0,n):
accuracy = fasttextRun(inFileName,i,n)
accuracyTotal += accuracy
print("Fold: {0:0d}; Accuracy: {1:0.3f}".format(i,accuracy))
print("Average accuracy {0:0.3f}".format(accuracyTotal/float(n)))
return(0)
if __name__ == "__main__":
sys.exit(main(sys.argv))
|
the-stack_0_836 | # coding: utf8
from .tsv_utils import complementary_list, find_label, baseline_df, chi2
from clinicaaddl.tools.deep_learning.iotools import return_logger
from scipy.stats import ttest_ind
import shutil
import pandas as pd
from os import path
import numpy as np
import os
import logging
sex_dict = {'M': 0, 'F': 1}
def create_split(diagnosis, diagnosis_df, n_test,
pval_threshold_ttest=0.80, t_val_chi2_threshold=0.0642,
ignore_demographics=False, logger=None):
"""
Split data at the subject-level in training and test set with equivalent age and sex distributions
:param diagnosis: (str) diagnosis on which the split is done
:param diagnosis_df: DataFrame with columns including ['participant_id', 'session_id', 'diagnosis']
:param n_test: (float)
If >= 1 number of subjects to put in the test set.
If < 1 proportion of subjects to put in the test set.
:param pval_threshold_ttest: (float) threshold for the t-test on age
:param t_val_chi2_threshold: (float) threshold for the chi2 test on sex
:param ignore_demographics: (bool): If True the diagnoses are split without taking into account the demographics
distributions (age, sex).
:param logger: Logger object from logging library
:return:
train_df (DataFrame) subjects in the train set
test_df (DataFrame) subjects in the test set
"""
if logger is None:
logger = logging
logger.basicConfig(level=logging.DEBUG)
diagnosis_baseline_df = baseline_df(diagnosis_df)
if n_test >= 1:
n_test = int(n_test)
else:
n_test = int(n_test * len(diagnosis_baseline_df))
if not ignore_demographics:
try:
sex_label = find_label(diagnosis_baseline_df.columns.values, "sex")
age_label = find_label(diagnosis_baseline_df.columns.values, "age")
except ValueError:
raise ValueError("This dataset do not have age or sex values. "
"Please add the flag --ignore_demographics to split "
"without trying to balance age or sex distributions.")
sex = list(diagnosis_baseline_df[sex_label].values)
age = list(diagnosis_baseline_df[age_label].values)
idx = np.arange(len(diagnosis_baseline_df))
flag_selection = True
n_try = 0
while flag_selection:
idx_test = np.random.choice(idx, size=n_test, replace=False)
idx_test.sort()
idx_train = complementary_list(idx, idx_test)
# Find similarity of distribution for the age variable
if len(set(age)) != 1:
age_test = [float(age[idx]) for idx in idx_test]
age_train = [float(age[idx]) for idx in idx_train]
t_age, p_age = ttest_ind(age_test, age_train)
else:
p_age = 1
# Find the a similar distribution for the sex variable
if len(set(sex)) != 1:
sex_test = [sex_dict[sex[idx]] for idx in idx_test]
sex_train = [sex_dict[sex[idx]] for idx in idx_train]
T_sex = chi2(sex_test, sex_train)
else:
T_sex = 0
logger.debug("p=%.2f, T=%.4f" % (p_age, T_sex))
if T_sex < t_val_chi2_threshold and p_age > pval_threshold_ttest:
flag_selection = False
test_df = diagnosis_baseline_df.loc[idx_test]
train_df = diagnosis_baseline_df.loc[idx_train]
n_try += 1
logger.info("Split for diagnosis %s was found after %i trials" % (diagnosis, n_try))
else:
idx = np.arange(len(diagnosis_baseline_df))
idx_test = np.random.choice(idx, size=n_test, replace=False)
idx_test.sort()
idx_train = complementary_list(idx, idx_test)
test_df = diagnosis_baseline_df.loc[idx_test]
train_df = diagnosis_baseline_df.loc[idx_train]
return train_df, test_df
def split_diagnoses(formatted_data_path,
n_test=100, subset_name="test", MCI_sub_categories=True,
t_val_threshold=0.0642, p_val_threshold=0.80,
ignore_demographics=False, verbose=0):
"""
Performs a single split for each label independently on the subject level.
The train folder will contain two lists per diagnosis (baseline and longitudinal),
whereas the test folder will only include the list of baseline sessions.
The age and sex distributions between the two sets must be non-significant (according to T-test and chi-square).
Args:
formatted_data_path (str): Path to the folder containing data extracted by clinicaaddl tsvtool getlabels.
n_test (float):
If > 1, number of subjects to put in set with name 'subset_name'.
If < 1, proportion of subjects to put in set with name 'subset_name'.
If 0, no training set is created and the whole dataset is considered as one set with name 'subset_name'.
subset_name (str): Name of the subset that is complementary to train.
MCI_sub_categories (bool): If True, manages MCI sub-categories to avoid data leakage.
t_val_threshold (float): The threshold used for the chi2 test on sex distributions.
p_val_threshold (float): The threshold used for the T-test on age distributions.
ignore_demographics (bool): If True the diagnoses are split without taking into account the demographics
distributions (age, sex).
verbose (int): level of verbosity.
Returns:
writes three files per <label>.tsv file present in formatted_data_path:
- formatted_data_path/train/<label>.tsv
- formatted_data_path/train/<label>_baseline.tsv
- formatted_data_path/<subset_name>/<label>_baseline.tsv
"""
logger = return_logger(verbose, "split")
# Read files
results_path = formatted_data_path
train_path = path.join(results_path, 'train')
if path.exists(train_path):
shutil.rmtree(train_path)
if n_test > 0:
os.makedirs(train_path)
test_path = path.join(results_path, subset_name)
if path.exists(test_path):
shutil.rmtree(test_path)
os.makedirs(test_path)
diagnosis_df_paths = os.listdir(results_path)
diagnosis_df_paths = [x for x in diagnosis_df_paths if x.endswith('.tsv')]
diagnosis_df_paths = [x for x in diagnosis_df_paths if not x.endswith('_baseline.tsv')]
MCI_special_treatment = False
if 'MCI.tsv' in diagnosis_df_paths and n_test > 0:
if MCI_sub_categories:
diagnosis_df_paths.remove('MCI.tsv')
MCI_special_treatment = True
elif 'sMCI.tsv' in diagnosis_df_paths or 'pMCI.tsv' in diagnosis_df_paths:
logger.warning("MCI special treatment was deactivated though MCI subgroups were found."
"Be aware that it may cause data leakage in transfer learning tasks.")
# The baseline session must be kept before or we are taking all the sessions to mix them
for diagnosis_df_path in diagnosis_df_paths:
diagnosis_df = pd.read_csv(path.join(results_path, diagnosis_df_path),
sep='\t')
interest_columns = diagnosis_df.columns.values
diagnosis = diagnosis_df_path.split('.')[0]
logger.info("Running split for diagnosis %s" % diagnosis)
if n_test > 0:
train_df, test_df = create_split(diagnosis, diagnosis_df, n_test=n_test,
t_val_chi2_threshold=t_val_threshold,
pval_threshold_ttest=p_val_threshold,
ignore_demographics=ignore_demographics,
logger=logger)
# Save baseline splits
train_df = train_df[interest_columns]
train_df.to_csv(path.join(train_path, str(diagnosis) + '_baseline.tsv'), sep='\t', index=False)
test_df = test_df[interest_columns]
test_df.to_csv(path.join(test_path, str(diagnosis) + '_baseline.tsv'), sep='\t', index=False)
# Retrieve all sessions for the training set
complete_train_df = pd.DataFrame()
for idx in train_df.index.values:
subject = train_df.loc[idx, 'participant_id']
subject_df = diagnosis_df[diagnosis_df.participant_id == subject]
complete_train_df = pd.concat([complete_train_df, subject_df])
complete_train_df.to_csv(path.join(train_path, str(diagnosis) + '.tsv'), sep='\t', index=False)
else:
diagnosis_baseline_df = baseline_df(diagnosis_df)
test_df = diagnosis_baseline_df[interest_columns]
test_df.to_csv(path.join(test_path, str(diagnosis) + '_baseline.tsv'), sep='\t', index=False)
if MCI_special_treatment:
# Extraction of MCI subjects without intersection with the sMCI / pMCI train
diagnosis_df = pd.read_csv(path.join(results_path, 'MCI.tsv'), sep='\t')
MCI_df = diagnosis_df.set_index(['participant_id', 'session_id'])
baseline_MCI_df = baseline_df(MCI_df, set_index=False)
supplementary_diagnoses = []
if n_test > 1:
n_test = int(n_test)
else:
n_test = int(n_test * len(baseline_MCI_df))
logger.debug('Before subjects removal for MCI special treatment')
if n_test > 1:
n_test = int(n_test)
else:
n_test = int(n_test * len(baseline_MCI_df))
sub_df = diagnosis_df.reset_index().groupby('participant_id')['session_id'].nunique()
logger.debug('%i subjects, %i scans' % (len(sub_df), len(diagnosis_df)))
if 'sMCI.tsv' in diagnosis_df_paths:
sMCI_baseline_train_df = pd.read_csv(path.join(train_path, 'sMCI_baseline.tsv'), sep='\t')
sMCI_baseline_test_df = pd.read_csv(path.join(test_path, 'sMCI_baseline.tsv'), sep='\t')
sMCI_baseline_df = pd.concat([sMCI_baseline_train_df, sMCI_baseline_test_df])
sMCI_baseline_df.reset_index(drop=True, inplace=True)
for idx in sMCI_baseline_df.index.values:
subject = sMCI_baseline_df.loc[idx, 'participant_id']
MCI_df.drop(subject, inplace=True)
supplementary_diagnoses.append('sMCI')
logger.debug('Removed %i subjects based on sMCI label' % len(sMCI_baseline_df))
sub_df = MCI_df.reset_index().groupby('participant_id')['session_id'].nunique()
logger.debug('%i subjects, %i scans' % (len(sub_df), len(MCI_df)))
if 'pMCI.tsv' in diagnosis_df_paths:
pMCI_baseline_train_df = pd.read_csv(path.join(train_path, 'pMCI_baseline.tsv'), sep='\t')
pMCI_baseline_test_df = pd.read_csv(path.join(test_path, 'pMCI_baseline.tsv'), sep='\t')
pMCI_baseline_df = pd.concat([pMCI_baseline_train_df, pMCI_baseline_test_df])
pMCI_baseline_df.reset_index(drop=True, inplace=True)
for idx in pMCI_baseline_df.index.values:
subject = pMCI_baseline_df.loc[idx, 'participant_id']
MCI_df.drop(subject, inplace=True)
supplementary_diagnoses.append('pMCI')
logger.debug('Removed %i subjects based on pMCI label' % len(pMCI_baseline_df))
sub_df = MCI_df.reset_index().groupby('participant_id')['session_id'].nunique()
logger.debug('%i subjects, %i scans' % (len(sub_df), len(MCI_df)))
if len(supplementary_diagnoses) == 0:
raise ValueError('The MCI_sub_categories flag is not needed as there are no intersections with'
'MCI subcategories.')
# Construction of supplementary train
supplementary_train_df = pd.DataFrame()
for diagnosis in supplementary_diagnoses:
sup_baseline_train_df = pd.read_csv(path.join(train_path, diagnosis + '_baseline.tsv'), sep='\t')
supplementary_train_df = pd.concat([supplementary_train_df, sup_baseline_train_df])
sub_df = supplementary_train_df.reset_index().groupby('participant_id')['session_id'].nunique()
logger.debug('supplementary_train_df %i subjects, %i scans' % (len(sub_df), len(supplementary_train_df)))
supplementary_train_df.reset_index(drop=True, inplace=True)
# MCI selection
MCI_df.reset_index(inplace=True)
diagnosis_baseline_df = baseline_df(MCI_df)
if not ignore_demographics:
sex_label = find_label(diagnosis_baseline_df.columns.values, "sex")
age_label = find_label(diagnosis_baseline_df.columns.values, "age")
sex = list(diagnosis_baseline_df[sex_label].values)
age = list(diagnosis_baseline_df[age_label].values)
sup_train_sex = list(supplementary_train_df[sex_label].values)
sup_train_age = list(supplementary_train_df[age_label].values)
sup_train_sex = [sex_dict[x] for x in sup_train_sex]
sup_train_age = [float(x) for x in sup_train_age]
idx = np.arange(len(diagnosis_baseline_df))
flag_selection = True
n_try = 0
while flag_selection:
idx_test = np.random.choice(idx, size=n_test, replace=False)
idx_test.sort()
idx_train = complementary_list(idx, idx_test)
# Find similarity of distribution for the age variable
if len(set(age)) != 1:
age_test = [float(age[idx]) for idx in idx_test]
age_train = [float(age[idx]) for idx in idx_train]
t_age, p_age = ttest_ind(age_test, age_train)
else:
p_age = 1
# Find similarity of distribution for the sex variable
if len(set(sex)) != 1:
sex_test = [sex_dict[sex[idx]] for idx in idx_test]
sex_train = [sex_dict[sex[idx]] for idx in idx_train]
T_sex = chi2(sex_test, sex_train)
else:
T_sex = 0
logger.debug("p=%.2f, T=%.4f" % (p_age, T_sex))
if T_sex < t_val_threshold and p_age > p_val_threshold:
flag_selection = False
MCI_baseline_test_df = diagnosis_baseline_df.loc[idx_test]
train_df = diagnosis_baseline_df.loc[idx_train]
MCI_baseline_train_df = pd.concat([train_df, supplementary_train_df])
logger.debug('Supplementary train df %i' % len(supplementary_train_df))
MCI_baseline_train_df.reset_index(drop=True, inplace=True)
n_try += 1
logger.info('Split for diagnosis MCI was found after %i trials' % n_try)
else:
idx = np.arange(len(diagnosis_baseline_df))
idx_test = np.random.choice(idx, size=n_test, replace=False)
idx_test.sort()
idx_train = complementary_list(idx, idx_test)
MCI_baseline_test_df = diagnosis_baseline_df.loc[idx_test]
train_df = diagnosis_baseline_df.loc[idx_train]
MCI_baseline_train_df = pd.concat([train_df, supplementary_train_df])
MCI_baseline_train_df.reset_index(drop=True, inplace=True)
# Write selection of MCI
MCI_baseline_train_df = MCI_baseline_train_df[interest_columns]
MCI_baseline_train_df.to_csv(path.join(train_path, 'MCI_baseline.tsv'), sep='\t', index=False)
MCI_baseline_test_df = MCI_baseline_test_df[interest_columns]
MCI_baseline_test_df.to_csv(path.join(test_path, 'MCI_baseline.tsv'), sep='\t', index=False)
# Retrieve all sessions for the training set
MCI_complete_train_df = pd.DataFrame()
for idx in MCI_baseline_train_df.index.values:
subject = MCI_baseline_train_df.loc[idx, 'participant_id']
subject_df = diagnosis_df[diagnosis_df.participant_id == subject]
MCI_complete_train_df = pd.concat([MCI_complete_train_df, subject_df])
MCI_complete_train_df.to_csv(path.join(train_path, 'MCI.tsv'), sep='\t', index=False)
|
the-stack_0_837 | # -*- encoding: utf-8 -*-
# pylint: disable=E0203,E1101,C0111
"""
@file
@brief Runtime operator.
"""
import numpy
from ._op import OpRun
class ConstantOfShape(OpRun):
atts = {'value': numpy.array([0], dtype=numpy.float32)}
def __init__(self, onnx_node, desc=None, **options):
OpRun.__init__(self, onnx_node, desc=desc,
expected_attributes=ConstantOfShape.atts,
**options)
self.cst = (self.value[0]
if isinstance(self.value, numpy.ndarray)
else self.value)
if not isinstance(self.cst, (float, numpy.float32, numpy.float64)):
raise TypeError("cst must be a real not {}".format(type(self.cst)))
def _run(self, data): # pylint: disable=W0221
res = numpy.full(tuple(data), self.cst)
return (res, )
|
the-stack_0_838 | #!/usr/bin/env python3
import argparse
import json
import os
from patrace import (
InputFile,
OutputFile,
Call,
CreateInt32Value,
)
class Arg:
def __init__(self, type, name, value):
self.type = type
self.name = name
self.value = value
def get(self):
arg = self.type(self.value)
if self.name:
arg.mName = self.name
return arg
class Function:
def __init__(self, name, args):
self.name = name
self.args = args
def write(self, output, tid):
call = Call(self.name)
call.thread_id = tid
for arg in self.args[1:]:
call.args.push_back(arg.get())
call.return_value = self.args[0].get()
output.WriteCall(call)
class Remapper:
def __init__(self):
self.num_calls_remapped = 0
def run(self, input, output):
# Modify header, if we are remaping the default tid
header = json.loads(input.jsonHeader)
default_tid = header['defaultTid']
output.jsonHeader = json.dumps(header)
print('Searching for relevant calls...')
call_lists = {
'eglMakeCurrent': [],
'eglCreateContext': [],
'eglDestroyContext': [],
}
context_calls = []
highest_thread_id = -1
for call in input.Calls():
highest_thread_id = max(call.thread_id, highest_thread_id)
# call_list = call_lists.get(call.name, None)
if call.name in list(call_lists.keys()):
context_calls.append({
'name': call.name,
'tid': call.thread_id,
'params': call.GetArgumentsDict().copy(),
'retval': call.GetReturnValue(),
'number': call.number,
})
# if call_list is not None:
# call_list.append({
# 'call_name': call.name,
# 'tid': call.thread_id,
# 'params': call.GetArgumentsDict(),
# 'retval': call.GetReturnValue(),
# 'number': call.number,
# })
num_threads = highest_thread_id + 1
print("Renumbering context ids...")
# Sometimes, contexts can get the same pointer values
# Hence, the contexts pointers will not be unique. Therefor,
# we create an unique, sequential id.
context_sequential_id = 1
# Maps original context id with sequential context id.
contexts_idmap = {0: 0}
for call in context_calls:
if call['name'] == 'eglCreateContext':
contexts_idmap[call['retval']] = context_sequential_id
call['retval'] = context_sequential_id
context_sequential_id += 1
elif call['name'] == 'eglDestroyContext':
old_id = call['params']['ctx']
seq_id = contexts_idmap[old_id]
del contexts_idmap[old_id]
call['params']['ctx'] = seq_id
elif call['name'] == 'eglMakeCurrent':
# Change ctx parameter to our new sequential id
call['params']['ctx'] = contexts_idmap[call['params']['ctx']]
print("Finding relevant context and surfaces...")
make_current_args = [
(call['params']['draw'], call['params']['ctx'])
for call in context_calls
if (
call['name'] == 'eglMakeCurrent'
# Excluding the following test made things work for GunJack
# call['tid'] in [default_tid, 0]
)
]
import pprint
pprint.pprint(make_current_args)
surfaces = []
contexts = []
for draw, ctx in make_current_args:
if draw:
surfaces.append(draw)
if ctx:
contexts.append(ctx)
# Find all relevant shared contexts
shared_contexts = []
for context in contexts:
for context_call in context_calls:
if context_call['name'] != 'eglCreateContext':
continue
if context_call['retval'] == context:
shared_contexts.append(context_call['params']['share_context'])
for share_context in shared_contexts:
contexts.append(share_context)
contexts = set(contexts)
surfaces = set(surfaces)
print("Surfaces {}".format(surfaces))
print("Contexts: {}".format(contexts))
class Thread:
def __init__(self):
self.current_ctx_seq = 0
self.current_ctx_old = 0
self.remap = 0
threads = [Thread() for i in range(num_threads)]
# Used to indicate if inside a relevant "eglMakeCurrent-block"
print("Remap calls...")
contextid_to_use = None
contexts_idmap = {0: 0}
context_sequential_id = 1
active_thread = -1
for call in input.Calls():
current_thread = call.thread_id
thread_switch = False
if active_thread != current_thread:
thread_switch = True
active_thread = current_thread
if call.name == 'eglCreateContext':
oldid = call.GetReturnValue()
contexts_idmap[oldid] = context_sequential_id
if context_sequential_id in contexts:
contextid_to_use = oldid
print("We will map all calls of the context:", contextid_to_use)
self.remap(call, default_tid)
context_sequential_id += 1
elif call.name == 'eglDestroyContext':
ad = call.GetArgumentsDict()
oldid = ad['ctx']
# seqid = contexts_idmap[oldid]
del contexts_idmap[oldid]
elif (
call.name.startswith('eglCreateWindowSurface') or
call.name == 'eglCreatePbufferSurface'
):
if call.GetReturnValue() in surfaces:
self.remap(call, default_tid)
elif call.name == 'eglDestroySurface':
ad = call.GetArgumentsDict()
if ad['surface'] in surfaces:
self.remap(call, default_tid)
elif call.name == 'eglMakeCurrent':
t = threads[call.thread_id]
ad = call.GetArgumentsDict()
t.current_dpy = ad['dpy']
t.current_draw = ad['draw']
t.current_read = ad['read']
t.current_ctx_old = ad['ctx']
t.current_ctx_seq = contexts_idmap[ad['ctx']]
if t.current_ctx_seq in contexts:
# call.SetArgument(3, contextid_to_use)
t.remap = True
if ad['ctx'] == 0:
t.remap = False
if threads[call.thread_id].remap:
# If a context is already active on the default thread
# We need to inject an eglMakeCurrent the first time
if thread_switch and call.name != 'eglMakeCurrent':
t = threads[call.thread_id]
Function(
'eglMakeCurrent', [
Arg(CreateInt32Value, '', 1),
Arg(CreateInt32Value, 'dpy', t.current_dpy),
Arg(CreateInt32Value, 'draw', t.current_draw),
Arg(CreateInt32Value, 'read', t.current_read),
Arg(CreateInt32Value, 'ctx', t.current_ctx_old),
]
).write(output, default_tid)
self.remap(call, default_tid)
output.WriteCall(call)
def remap(self, call, newtid):
call.thread_id = newtid
self.num_calls_remapped += 1
def remap(oldfile, newfile):
remapper = Remapper()
if not os.path.exists(oldfile):
print("File does not exists: {}".format(oldfile))
return
with InputFile(oldfile) as input:
with OutputFile(newfile) as output:
remapper.run(input, output)
return remapper.num_calls_remapped
def main():
parser = argparse.ArgumentParser(description='Automatically remap thread ids in a .pat trace. This should be used if an eglContext is used by more threads than the default thread.')
parser.add_argument('oldfile', help='Path to the .pat trace file')
parser.add_argument('newfile', help='New .pat file to create')
args = parser.parse_args()
num = remap(args.oldfile, args.newfile)
print("Number of calls remapped {num}".format(num=num))
if __name__ == '__main__':
main()
|
the-stack_0_841 | import sympy as sym
# Computing with Dirichlet conditions: -u''=2 and sines
x, L = sym.symbols('x L')
e_Galerkin = x*(L-x) - 8*L**2*sym.pi**(-3)*sym.sin(sym.pi*x/L)
e_colloc = x*(L-x) - 2*L**2*sym.pi**(-2)*sym.sin(sym.pi*x/L)
# Verify max error for x=L/2
dedx_Galerkin = sym.diff(e_Galerkin, x)
print((dedx_Galerkin.subs(x, L/2)))
dedx_colloc = sym.diff(e_colloc, x)
print((dedx_colloc.subs(x, L/2)))
# Compute max error: x=L/2, evaluate numerical, and simplify
print(('Max error Galerkin/least.sq.:', \
sym.simplify(e_Galerkin.subs(x, L/2).evalf(n=3))))
print(('Max error colloc.:', \
sym.simplify(e_colloc.subs(x, L/2).evalf(n=3))))
import sys
#sys.exit(0)
# Computing with Neumann and Dirichlet conditions: -u''=2
x, C, D = sym.symbols('x C D')
i, j = sym.symbols('i j', integer=True)
integrand = (i+1)*(j+1)*(1-x)**(i+j)
A_ij = sym.integrate(integrand, (x, 0, 1))
A_ij = sym.simplify(A_ij)
print(A_ij)
psi_i = (1-x)**(i+1)
integrand = 2*psi_i - D*(i+1)*(1-x)**i
b_i = sym.integrate(integrand, (x, 0, 1)) - C*psi_i.subs(x, 0)
b_i = sym.factor(sym.simplify(b_i))
print(b_i)
print((sym.expand(2 - (2+i)*(D+C))))
# Solving model2 problem with f(x) and fe1D.py
from u_xx_f_sympy import model2, x, C, D, L
m = 2
u = model2(x**m, L, C, D)
print(u)
#u_exact = lambda x: D + C*(x-L) + (1./6)*(L**3 - x**3)
u_exact = sym.lambdify([x, C, D, L], u)
import numpy as np
from fe1D import finite_element1D_naive, mesh_uniform
# Override C, D and L with numeric values
C = 5
D = 2
L = 4
d = 1
vertices, cells, dof_map = mesh_uniform(
N_e=2, d=d, Omega=[0,L], symbolic=False)
vertices[1] = 3
essbc = {}
essbc[dof_map[-1][-1]] = D
c, A, b, timing = finite_element1D_naive(
vertices, cells, dof_map,
essbc,
ilhs=lambda e, phi, r, s, X, x, h:
phi[1][r](X, h)*phi[1][s](X, h),
irhs=lambda e, phi, r, X, x, h:
x**m*phi[0][r](X),
blhs=lambda e, phi, r, s, X, x, h: 0,
brhs=lambda e, phi, r, X, x, h:
-C*phi[0][r](-1) if e == 0 else 0,
intrule='GaussLegendre',
verbose=False,
)
# Visualize
from fe1D import u_glob
x, u, nodes = u_glob(c, cells, vertices, dof_map)
u_e = u_exact(x, C, D, L)
print((u_exact(nodes, C, D, L) - c)) # difference at the nodes
import matplotlib.pyplot as plt
plt.plot(x, u, 'b-', x, u_e, 'r--')
plt.legend(['finite elements, d=%d' %d, 'exact'], loc='upper left')
plt.savefig('tmp.png'); plt.savefig('tmp.pdf')
plt.show()
|
the-stack_0_842 | # -*- coding: utf-8 -*-
"""
This module exports functions to initialize the Flask application.
"""
import random
from typing import Callable, Dict
import flask
import flask_babel
import orchard.errors
import orchard.extensions
import orchard.system_status
def create_app(config: str = 'Development') -> flask.Flask:
"""
Create and initialize the Flask application.
:param config: The name of the configuration class, valid values are ``Development``
(default), ``Production``, and ``Testing``.
:return: The initialized Flask application.
"""
configuration_values = {'Development', 'Production', 'Testing'}
if config in configuration_values:
config = 'orchard.configuration.{config}'.format(config = config)
else: # pragma: no cover.
config = 'orchard.configuration.Development'
name = __name__.split('.')[0]
app = flask.Flask(name, instance_relative_config = True)
app.config.from_object(config)
app.config.from_object('instance.Configuration')
# Always use English as default language during testing.
if app.testing: # pragma: no branch.
app.config['BABEL_DEFAULT_LOCALE'] = 'en'
_configure_blueprints(app)
_configure_context_processor(app)
_configure_extensions(app)
_configure_logging(app)
_configure_request_handlers(app)
return app
def _configure_blueprints(app: flask.Flask):
"""
Register the blueprints.
:param app: The application instance.
"""
app.register_blueprint(orchard.errors.blueprint)
app.register_blueprint(orchard.system_status.blueprint)
def _configure_context_processor(app: flask.Flask):
"""
Set up the global context processors.
:param app: The application instance.
"""
@app.context_processor
def inject_jinja2() -> Dict[str, Callable]:
"""
Inject more functions into the scope of Jinja2 templates.
:return: A dictionary
"""
jinja2_functions = {
'hasattr': hasattr,
'random_int': random.randint
}
return jinja2_functions
def _configure_extensions(app: flask.Flask):
"""
Register the extensions with the app and configure them as needed.
:param app: The application instance.
"""
orchard.extensions.babel.init_app(app)
orchard.extensions.cache.init_app(app)
def _configure_logging(app: flask.Flask): # pragma: no cover.
"""
Set up a file and a mail logger, unless the app is being debugged or tested.
:param app: The application instance.
"""
if app.debug or app.testing:
return
# noinspection PyUnresolvedReferences
import logging
import logging.handlers
import os
# Set up the file logger.
log_path = app.config['LOG_PATH']
if not os.path.isdir(log_path):
os.makedirs(log_path)
log_file = os.path.join(log_path, '{file_name}.log'.format(file_name = app.name))
log_format = '%(asctime)s %(levelname)s: %(message)s [in %(pathname)s:%(lineno)d]'
file_handler = logging.handlers.RotatingFileHandler(log_file, 'a', 1 * 1024 * 1024, 10)
file_handler.setLevel(logging.INFO)
file_handler.setFormatter(logging.Formatter(log_format))
app.logger.setLevel(logging.INFO)
app.logger.addHandler(file_handler)
app.logger.info('{name} Startup'.format(name = app.config['PROJECT_NAME']))
# Set up the mail logger.
if app.config.get('MAIL_SERVER', '') == '':
return
credentials = None
if app.config['MAIL_USERNAME'] or app.config['MAIL_PASSWORD']:
credentials = (app.config['MAIL_USERNAME'], app.config['MAIL_PASSWORD'])
server = (app.config['MAIL_SERVER'], app.config['MAIL_PORT'])
sender = app.config['MAIL_FROM']
receivers = app.config['ADMINS']
subject = '{name} Failure'.format(name = app.config['PROJECT_NAME'])
secure = None
if app.config['MAIL_SSL']:
secure = ()
mail_handler = logging.handlers.SMTPHandler(server, sender, receivers, subject, credentials,
secure)
mail_handler.setLevel(logging.ERROR)
app.logger.addHandler(mail_handler)
def _configure_request_handlers(app: flask.Flask):
"""
Set up the global before and after request handlers.
:param app: The application instance.
"""
@app.before_request
def before_request():
"""
Set up a few things before handling the actual request.
"""
flask.g.locale = flask_babel.get_locale()
flask.g.project_name = app.config['PROJECT_NAME']
# Set a default title.
flask.g.title = app.config['PROJECT_NAME']
@app.after_request
def after_request(response: flask.Response) -> flask.Response:
"""
Modify the response after the request has been handled.
:return: The modified response.
"""
# http://www.gnuterrypratchett.com/
response.headers.add("X-Clacks-Overhead", "GNU Terry Pratchett")
return response
|
the-stack_0_843 | import functools
import re
from typing import Any, Dict, Optional, Tuple, Union
from urllib.parse import urlsplit
from django.apps import apps
from django.contrib.auth.models import AnonymousUser
from django.http import HttpRequest, JsonResponse
from django.utils import timezone
from rest_framework import authentication
from rest_framework.exceptions import AuthenticationFailed
from rest_framework.request import Request
class PersonalAPIKeyAuthentication(authentication.BaseAuthentication):
"""A way of authenticating with personal API keys.
Only the first key candidate found in the request is tried, and the order is:
1. Request Authorization header of type Bearer.
2. Request body.
3. Request query string.
"""
keyword = "Bearer"
@classmethod
def find_key_with_source(
cls,
request: Union[HttpRequest, Request],
request_data: Optional[Dict[str, Any]] = None,
extra_data: Optional[Dict[str, Any]] = None,
) -> Optional[Tuple[str, str]]:
"""Try to find personal API key in request and return it along with where it was found."""
if "HTTP_AUTHORIZATION" in request.META:
authorization_match = re.match(fr"^{cls.keyword}\s+(\S.+)$", request.META["HTTP_AUTHORIZATION"])
if authorization_match:
return authorization_match.group(1).strip(), "Authorization header"
data = request.data if request_data is None and isinstance(request, Request) else request_data
if data and "personal_api_key" in data:
return data["personal_api_key"], "body"
if "personal_api_key" in request.GET:
return request.GET["personal_api_key"], "query string"
if extra_data and "personal_api_key" in extra_data:
# compatibility with /capture endpoint
return extra_data["personal_api_key"], "query string data"
return None
@classmethod
def find_key(
cls,
request: Union[HttpRequest, Request],
request_data: Optional[Dict[str, Any]] = None,
extra_data: Optional[Dict[str, Any]] = None,
) -> Optional[str]:
"""Try to find personal API key in request and return it."""
key_with_source = cls.find_key_with_source(request, request_data, extra_data)
return key_with_source[0] if key_with_source is not None else None
@classmethod
def authenticate(cls, request: Union[HttpRequest, Request]) -> Optional[Tuple[Any, None]]:
personal_api_key_with_source = cls.find_key_with_source(request)
if not personal_api_key_with_source:
return None
personal_api_key, source = personal_api_key_with_source
PersonalAPIKey = apps.get_model(app_label="posthog", model_name="PersonalAPIKey")
try:
personal_api_key_object = (
PersonalAPIKey.objects.select_related("user").filter(user__is_active=True).get(value=personal_api_key)
)
except PersonalAPIKey.DoesNotExist:
raise AuthenticationFailed(detail=f"Personal API key found in request {source} is invalid.")
personal_api_key_object.last_used_at = timezone.now()
personal_api_key_object.save()
assert personal_api_key_object.user is not None
return personal_api_key_object.user, None
@classmethod
def authenticate_header(cls, request) -> str:
return cls.keyword
class TemporaryTokenAuthentication(authentication.BaseAuthentication):
def authenticate(self, request: Request):
# if the Origin is different, the only authentication method should be temporary_token
# This happens when someone is trying to create actions from the editor on their own website
if (
request.headers.get("Origin")
and urlsplit(request.headers["Origin"]).netloc not in urlsplit(request.build_absolute_uri("/")).netloc
):
if not request.GET.get("temporary_token"):
raise AuthenticationFailed(
detail="No temporary_token set. "
+ "That means you're either trying to access this API from a different site, "
+ "or it means your proxy isn't sending the correct headers. "
+ "See https://posthog.com/docs/deployment/running-behind-proxy for more information."
)
if request.GET.get("temporary_token"):
User = apps.get_model(app_label="posthog", model_name="User")
user = User.objects.filter(temporary_token=request.GET.get("temporary_token"))
if not user.exists():
raise AuthenticationFailed(detail="User doesn't exist")
return (user.first(), None)
return None
class PublicTokenAuthentication(authentication.BaseAuthentication):
def authenticate(self, request: Request):
if request.GET.get("share_token") and request.parser_context and request.parser_context.get("kwargs"):
Dashboard = apps.get_model(app_label="posthog", model_name="Dashboard")
dashboard = Dashboard.objects.filter(
share_token=request.GET.get("share_token"), pk=request.parser_context["kwargs"].get("pk"),
).first()
if dashboard is None:
raise AuthenticationFailed(detail="Dashboard doesn't exist")
if dashboard.team.organization.for_internal_metrics:
return None
return (AnonymousUser(), None)
return None
def authenticate_secondarily(endpoint):
"""
DEPRECATED: Used for supporting legacy endpoints not on DRF.
Authentication for function views.
"""
@functools.wraps(endpoint)
def wrapper(request: HttpRequest):
if not request.user.is_authenticated:
try:
auth_result = PersonalAPIKeyAuthentication.authenticate(request)
if isinstance(auth_result, tuple) and auth_result[0].__class__.__name__ == "User":
request.user = auth_result[0]
else:
raise AuthenticationFailed("Authentication credentials were not provided.")
except AuthenticationFailed as e:
return JsonResponse({"detail": e.detail}, status=401)
return endpoint(request)
return wrapper
|
the-stack_0_844 | import attr
import logging
import os
from datetime import datetime
from feedparser import parse as parse_feed
from typing import List, Optional
from telegram_rss.config import FeedConfig
from telegram_rss.utils import save_as, get_default_directory, load_dict
from . import Entry, Channel, Feed
class FeedUpdater:
def __init__(self, feed_config: FeedConfig, ext: str = ".json"):
self.feed_config = feed_config
self._feed: Optional[Feed] = None
self._local_feed: Optional[Feed] = None
self.local_file = os.path.join(
get_default_directory(),
"data",
f"{self.feed_config.name}" + ext,
)
self.logger = logging.getLogger(feed_config.name)
def __call__(self, save: bool = True) -> List[Entry]:
return self.get_new_entries(save=save)
def get_new_entries(self, save: bool = True) -> List[Entry]:
entries: List[Entry] = list()
if not self.feed or self.feed == self.local_feed:
self.logger.info("No new feeds found")
return entries
for feed in self.feed:
if feed in entries:
continue
if feed not in self.local_feed:
entries.append(feed)
if not entries:
self.logger.debug("All feeds aleady in local_feeds")
return entries
if self.feed_config.only_today:
self.logger.debug("Filter feeds published only today")
now = datetime.now()
for i in range(len(entries)):
entry = entries[i]
if not entry.time:
continue
if entry.time.date() == now.date():
continue
else:
del entries[i]
self.logger.info(f"Found new {len(entries)} feeds")
if entries and save:
self.save_feed(self.feed)
self.logger.debug(f"Saved {len(entries)} as {self.local_file}")
return entries
@property
def feed(self) -> Feed:
if self._feed:
return self._feed
if self.feed_config.save_bandwith:
raw_feed = parse_feed(
self.feed_config.source,
etag=self.feed_config.etag,
modified=self.feed_config.modified,
)
else:
raw_feed = parse_feed(self.feed_config.source)
if raw_feed.status == 304:
return Feed()
self.feed_config.etag = raw_feed.etag
self.feed_config.modified = raw_feed.modified
self._feed = Feed.from_feedparser(raw_feed)
return self._feed
@property
def local_feed(self) -> Feed:
if self._local_feed:
return self._local_feed
if not os.path.isfile(self.local_file):
return Feed()
feed_data = load_dict(self.local_file)
self._local_feed = Feed(**feed_data)
return self._local_feed
def save_feed(self, feed: Feed):
feed_data = attr.asdict(feed, recurse=True)
save_as(feed_data, self.local_file)
@property
def channel(self) -> Optional[Channel]:
return self.feed.channel or self.local_feed.channel
|
the-stack_0_845 | #Imports library
import socket
#Creates instance of 'Socket'
s = socket.socket()
hostname = 'tutorialspi' #Server IP/Hostname
port = 8000 #Server Port
s.connect((hostname,port)) #Connects to server
while True:
x = raw_input("Enter message: ") #Gets the message to be sent
s.send(x.encode()) #Encodes and sends message (x)
|
the-stack_0_846 | from cs50 import get_string
import re
def letters_counter(t, a):
c = 0
for i in t:
if i in a or i in [j.upper() for j in a]:
c += 1
return c
def words_counter(t):
match = re.split(" ", t)
return len(match)
def sentences_counter(t):
match = re.split("[.!?]", t)
return len(match) - 1
def calculate(lc, wc, sc):
l = (lc / wc) * 100
s = (sc / wc) * 100
index = 0.0588 * l - 0.296 * s - 15.8
return round(index)
alphabet = ["a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z"]
text = get_string("Text: ")
letter_count = letters_counter(text, alphabet)
word_count = words_counter(text)
sentece_count = sentences_counter(text)
calc = calculate(letter_count, word_count, sentece_count)
if calc < 1:
print("Before Grade 1")
elif calc >= 16:
print("Grade 16+")
else:
print(f"Grade {calc}") |
the-stack_0_847 | import pytest
from .common import JSON, Cookies, Headers, Query, Resp, get_paths
from .test_plugin_falcon import api as falcon_api
from .test_plugin_flask import api as flask_api
from .test_plugin_flask_blueprint import api as flask_bp_api
from .test_plugin_flask_view import api as flask_view_api
from .test_plugin_starlette import api as starlette_api
@pytest.mark.parametrize(
"api", [flask_api, flask_bp_api, flask_view_api, falcon_api, starlette_api]
)
def test_plugin_spec(api):
models = {
m.__name__: m.schema(ref_template="#/components/schemas/{model}")
for m in (Query, JSON, Resp, Cookies, Headers)
}
for name, schema in models.items():
assert api.spec["components"]["schemas"][name] == schema
assert api.spec["tags"] == [{"name": tag} for tag in ("test", "health", "api")]
assert get_paths(api.spec) == [
"/api/user/{name}",
"/api/user_annotated/{name}",
"/ping",
]
ping = api.spec["paths"]["/ping"]["get"]
assert ping["tags"] == ["test", "health"]
assert ping["parameters"][0]["in"] == "header"
assert ping["summary"] == "summary"
assert ping["description"] == "description"
assert ping["operationId"] == "get_/ping"
user = api.spec["paths"]["/api/user/{name}"]["post"]
assert user["tags"] == ["api", "test"]
assert (
user["requestBody"]["content"]["application/json"]["schema"]["$ref"]
== "#/components/schemas/JSON"
)
assert len(user["responses"]) == 3
params = user["parameters"]
for param in params:
if param["in"] == "path":
assert param["name"] == "name"
elif param["in"] == "query":
assert param["name"] == "order"
|
the-stack_0_852 | from collections import OrderedDict
from collections.abc import Iterable
from cached_property import cached_property
import numpy as np
import sympy
from devito.finite_differences.finite_difference import (generic_derivative,
first_derivative,
cross_derivative)
from devito.finite_differences.differentiable import Differentiable
from devito.finite_differences.tools import direct, transpose
from devito.tools import as_mapper, as_tuple, filter_ordered, frozendict
from devito.types.array import Array
from devito.types.dimension import StencilDimension
from devito.types.utils import DimensionTuple
__all__ = ['Derivative', 'Weights']
class Derivative(sympy.Derivative, Differentiable):
"""
An unevaluated Derivative, which carries metadata (Dimensions,
derivative order, etc) describing how the derivative will be expanded
upon evaluation.
Parameters
----------
expr : expr-like
Expression for which the Derivative is produced.
dims : Dimension or tuple of Dimension
Dimenions w.r.t. which to differentiate.
fd_order : int or tuple of int, optional
Coefficient discretization order. Note: this impacts the width of
the resulting stencil. Defaults to 1.
deriv_order: int or tuple of int, optional
Derivative order. Defaults to 1.
side : Side or tuple of Side, optional
Side of the finite difference location, centered (at x), left (at x - 1)
or right (at x +1). Defaults to ``centered``.
transpose : Transpose, optional
Forward (matvec=direct) or transpose (matvec=transpose) mode of the
finite difference. Defaults to ``direct``.
subs : dict, optional
Substitutions to apply to the finite-difference expression after evaluation.
x0 : dict, optional
Origin (where the finite-difference is evaluated at) for the finite-difference
scheme, e.g. {x: x, y: y + h_y/2}.
Examples
--------
Creation
>>> from devito import Function, Derivative, Grid
>>> grid = Grid((10, 10))
>>> x, y = grid.dimensions
>>> u = Function(name="u", grid=grid, space_order=2)
>>> Derivative(u, x)
Derivative(u(x, y), x)
This can also be obtained via the differential shortcut
>>> u.dx
Derivative(u(x, y), x)
You can also specify the order as a keyword argument
>>> Derivative(u, x, deriv_order=2)
Derivative(u(x, y), (x, 2))
Or as a tuple
>>> Derivative(u, (x, 2))
Derivative(u(x, y), (x, 2))
Once again, this can be obtained via shortcut notation
>>> u.dx2
Derivative(u(x, y), (x, 2))
Derivative object are also callable to change default setup:
>>> u.dx2(x0=x + x.spacing)
Derivative(u(x, y), (x, 2))
will create the second derivative at x=x + x.spacing. Accepted arguments for dynamic
evaluation are `x0`, `fd_order` and `side`.
"""
_state = ('expr', 'dims', 'side', 'fd_order', 'transpose', '_ppsubs', 'x0')
_fd_priority = 3
def __new__(cls, expr, *dims, **kwargs):
if type(expr) == sympy.Derivative:
raise ValueError("Cannot nest sympy.Derivative with devito.Derivative")
if not isinstance(expr, Differentiable):
raise ValueError("`expr` must be a Differentiable object")
new_dims, orders, fd_o, var_count = cls._process_kwargs(expr, *dims, **kwargs)
# Construct the actual Derivative object
obj = Differentiable.__new__(cls, expr, *var_count)
obj._dims = tuple(OrderedDict.fromkeys(new_dims))
skip = kwargs.get('preprocessed', False) or obj.ndims == 1
obj._fd_order = fd_o if skip else DimensionTuple(*fd_o, getters=obj._dims)
obj._deriv_order = orders if skip else DimensionTuple(*orders, getters=obj._dims)
obj._side = kwargs.get("side")
obj._transpose = kwargs.get("transpose", direct)
obj._ppsubs = as_tuple(frozendict(i) for i in kwargs.get("subs", []))
obj._x0 = frozendict(kwargs.get('x0', {}))
return obj
@classmethod
def _process_kwargs(cls, expr, *dims, **kwargs):
"""
Process arguments for the construction of a Derivative
"""
# Skip costly processing if constructiong from preprocessed
if kwargs.get('preprocessed', False):
fd_orders = kwargs.get('fd_order')
deriv_orders = kwargs.get('deriv_order')
if len(dims) == 1:
dims = tuple([dims[0]]*deriv_orders)
variable_count = [sympy.Tuple(s, dims.count(s))
for s in filter_ordered(dims)]
return dims, deriv_orders, fd_orders, variable_count
# Check `dims`. It can be a single Dimension, an iterable of Dimensions, or even
# an iterable of 2-tuple (Dimension, deriv_order)
if len(dims) == 0:
raise ValueError("Expected Dimension w.r.t. which to differentiate")
elif len(dims) == 1:
if isinstance(dims[0], Iterable):
# Iterable of Dimensions
if len(dims[0]) != 2:
raise ValueError("Expected `(dim, deriv_order)`, got %s" % dims[0])
orders = kwargs.get('deriv_order', dims[0][1])
if dims[0][1] != orders:
raise ValueError("Two different values of `deriv_order`")
new_dims = tuple([dims[0][0]]*dims[0][1])
else:
# Single Dimension
orders = kwargs.get('deriv_order', 1)
if isinstance(orders, Iterable):
orders = orders[0]
new_dims = tuple([dims[0]]*orders)
else:
# Iterable of 2-tuple, e.g. ((x, 2), (y, 3))
new_dims = []
orders = []
d_ord = kwargs.get('deriv_order', tuple([1]*len(dims)))
for d, o in zip(dims, d_ord):
if isinstance(d, Iterable):
new_dims.extend([d[0] for _ in range(d[1])])
orders.append(d[1])
else:
new_dims.extend([d for _ in range(o)])
orders.append(o)
new_dims = as_tuple(new_dims)
orders = as_tuple(orders)
# Finite difference orders depending on input dimension (.dt or .dx)
fd_orders = kwargs.get('fd_order', tuple([expr.time_order if
getattr(d, 'is_Time', False) else
expr.space_order for d in dims]))
if len(dims) == 1 and isinstance(fd_orders, Iterable):
fd_orders = fd_orders[0]
# SymPy expects the list of variable w.r.t. which we differentiate to be a list
# of 2-tuple `(s, count)` where s is the entity to diff wrt and count is the order
# of the derivative
variable_count = [sympy.Tuple(s, new_dims.count(s))
for s in filter_ordered(new_dims)]
return new_dims, orders, fd_orders, variable_count
def __call__(self, x0=None, fd_order=None, side=None):
if self.ndims == 1:
_fd_order = fd_order or self._fd_order
_side = side or self._side
new_x0 = {self.dims[0]: x0} if x0 is not None else self.x0
return self._new_from_self(fd_order=_fd_order, side=_side, x0=new_x0)
if side is not None:
raise TypeError("Side only supported for first order single"
"Dimension derivative such as `.dxl` or .dx(side=left)")
# Cross derivative
_x0 = dict(self._x0)
_fd_order = dict(self.fd_order._getters)
try:
_fd_order.update(**(fd_order or {}))
_fd_order = tuple(_fd_order.values())
_fd_order = DimensionTuple(*_fd_order, getters=self.dims)
_x0.update(x0)
except AttributeError:
raise TypeError("Multi-dimensional Derivative, input expected as a dict")
return self._new_from_self(fd_order=_fd_order, x0=_x0)
def _new_from_self(self, **kwargs):
expr = kwargs.pop('expr', self.expr)
_kwargs = {'deriv_order': self.deriv_order, 'fd_order': self.fd_order,
'side': self.side, 'transpose': self.transpose, 'subs': self._ppsubs,
'x0': self.x0, 'preprocessed': True}
_kwargs.update(**kwargs)
return Derivative(expr, *self.dims, **_kwargs)
@property
def func(self):
return lambda *a, **kw: self._new_from_self(expr=a[0], **kw)
def subs(self, *args, **kwargs):
"""
Bypass sympy.Subs as Devito has its own lazy evaluation mechanism.
"""
try:
rules = dict(*args)
except TypeError:
rules = dict((args,))
kwargs.pop('simultaneous', None)
return self.xreplace(rules, **kwargs)
def _xreplace(self, subs):
"""
This is a helper method used internally by SymPy. We exploit it to postpone
substitutions until evaluation.
"""
subs = self._ppsubs + (subs,) # Postponed substitutions
return self._new_from_self(subs=subs), True
@cached_property
def _metadata(self):
state = list(self._state)
state.remove('expr')
ret = [getattr(self, i) for i in state]
ret.append(self.expr.staggered or (None,))
return tuple(ret)
@property
def dims(self):
return self._dims
@property
def ndims(self):
return len(self._dims)
@property
def x0(self):
return self._x0
@property
def fd_order(self):
return self._fd_order
@property
def deriv_order(self):
return self._deriv_order
@property
def side(self):
return self._side
@property
def transpose(self):
return self._transpose
@property
def is_TimeDependent(self):
return self.expr.is_TimeDependent
@property
def T(self):
"""Transpose of the Derivative.
FD derivatives can be represented as matrices and have adjoint/transpose.
This is really useful for more advanced FD definitions. For example
the conventional Laplacian is `.dxl.T * .dxl`
"""
if self._transpose == direct:
adjoint = transpose
else:
adjoint = direct
return self._new_from_self(transpose=adjoint)
def _eval_at(self, func):
"""
Evaluates the derivative at the location of `func`. It is necessary for staggered
setup where one could have Eq(u(x + h_x/2), v(x).dx)) in which case v(x).dx
has to be computed at x=x + h_x/2.
"""
# If an x0 already exists do not overwrite it
x0 = self.x0 or dict(func.indices_ref._getters)
if self.expr.is_Add:
# If `expr` has both staggered and non-staggered terms such as
# `(u(x + h_x/2) + v(x)).dx` then we exploit linearity of FD to split
# it into `u(x + h_x/2).dx` and `v(x).dx`, since they require
# different FD indices
mapper = as_mapper(self.expr._args_diff, lambda i: i.staggered)
args = [self.expr.func(*v) for v in mapper.values()]
args.extend([a for a in self.expr.args if a not in self.expr._args_diff])
args = [self._new_from_self(expr=a, x0=x0) for a in args]
return self.expr.func(*args)
elif self.expr.is_Mul:
# For Mul, We treat the basic case `u(x + h_x/2) * v(x) which is what appear
# in most equation with div(a * u) for example. The expression is re-centered
# at the highest priority index (see _gather_for_diff) to compute the
# derivative at x0.
return self._new_from_self(x0=x0, expr=self.expr._gather_for_diff)
else:
# For every other cases, that has more functions or more complexe arithmetic,
# there is not actual way to decide what to do so it’s as safe to use
# the expression as is.
return self._new_from_self(x0=x0)
@property
def evaluate(self):
# Evaluate finite-difference.
# NOTE: `evaluate` and `_eval_fd` split for potential future different
# types of discretizations
return self._eval_fd(self.expr)
@property
def _eval_deriv(self):
return self._eval_fd(self.expr)
def _eval_fd(self, expr):
"""
Evaluate the finite-difference approximation of the Derivative.
Evaluation is carried out via the following three steps:
- 1: Evaluate derivatives within the expression. For example given
`f.dx * g`, `f.dx` will be evaluated first.
- 2: Evaluate the finite difference for the (new) expression.
This in turn is a two-step procedure, for Functions that may
may need to be evaluated at a different point due to e.g. a
shited derivative.
- 3: Apply substitutions.
"""
# Step 1: Evaluate derivatives within expression
try:
expr = expr._eval_deriv
except AttributeError:
pass
# Step 2: Evaluate FD of the new expression
if self.side is not None and self.deriv_order == 1:
res = first_derivative(expr, self.dims[0], self.fd_order,
side=self.side, matvec=self.transpose,
x0=self.x0)
elif len(self.dims) > 1:
res = cross_derivative(expr, self.dims, self.fd_order, self.deriv_order,
matvec=self.transpose, x0=self.x0)
else:
res = generic_derivative(expr, *self.dims, self.fd_order, self.deriv_order,
matvec=self.transpose, x0=self.x0)
# Step 3: Apply substitutions
for e in self._ppsubs:
res = res.xreplace(e)
return res
class Weights(Array):
"""
The weights (or coefficients) of a finite-difference expansion.
"""
def __init_finalize__(self, *args, **kwargs):
dimensions = as_tuple(kwargs.get('dimensions'))
weights = kwargs.get('initvalue')
assert len(dimensions) == 1
d = dimensions[0]
assert isinstance(d, StencilDimension) and d.symbolic_size == len(weights)
assert isinstance(weights, (list, tuple, np.ndarray))
kwargs['scope'] = 'static'
super().__init_finalize__(*args, **kwargs)
@property
def dimension(self):
return self.dimensions[0]
weights = Array.initvalue
|
the-stack_0_853 | #!/usr/bin/env python
# encoding: utf-8
"""
untitled.py
Created by Olivier Huin on 2010-02-20.
Copyright (c) 2010 Flarebyte.com Limited. All rights reserved.
"""
import sys
import os
activitykinds={
('shortid', 'uuid', 'visiting', 'visiting', ['visiting']),
('shortid', 'uuid', 'booking', 'booking', ['booking']),
('shortid', 'uuid', 'learning', 'learning', ['learning']),
('shortid', 'uuid', 'eating', 'eating', ['eating']),
('shortid', 'uuid', 'drinking', 'drinking', ['drinking']),
('shortid', 'uuid', 'volunteering', 'volunteering', ['volunteering']),
('shortid', 'uuid', 'fundraising', 'fundraising', ['fundraising']),
}
|