hexsha
stringlengths 40
40
| size
int64 4
1.02M
| ext
stringclasses 8
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
209
| max_stars_repo_name
stringlengths 5
121
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
209
| max_issues_repo_name
stringlengths 5
121
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
209
| max_forks_repo_name
stringlengths 5
121
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 4
1.02M
| avg_line_length
float64 1.07
66.1k
| max_line_length
int64 4
266k
| alphanum_fraction
float64 0.01
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
a3c67e44f8f0cc9608424b5d8a9f9722cd23ab28 | 5,764 | py | Python | rcnn/PY_OP/rpn_fpn_ohem.py | templeblock/mxnet-SSHA | 4ef18f7ea7eca2178fffc70d6aaea4fd18a56de1 | [
"MIT"
]
| 6 | 2018-12-06T09:39:31.000Z | 2019-03-08T02:50:22.000Z | rcnn/PY_OP/rpn_fpn_ohem.py | templeblock/mxnet-SSHA | 4ef18f7ea7eca2178fffc70d6aaea4fd18a56de1 | [
"MIT"
]
| null | null | null | rcnn/PY_OP/rpn_fpn_ohem.py | templeblock/mxnet-SSHA | 4ef18f7ea7eca2178fffc70d6aaea4fd18a56de1 | [
"MIT"
]
| null | null | null |
from __future__ import print_function
import sys
import mxnet as mx
import numpy as np
from distutils.util import strtobool
from ..config import config
CALLING_COUNT=0
COUNT_THRESH_FG=0
COUNT_THRESH_BG=0
STAT = {0:0, 8:0, 16:0, 32:0}
ACC = {0:0, 8:0, 16:0, 32:0}
class RPNFPNOHEMOperator(mx.operator.CustomOp):
def __init__(self, stride=0):
super(RPNFPNOHEMOperator, self).__init__()
self.stride = int(stride)
def forward(self, is_train, req, in_data, out_data, aux):
global STAT
global ACC
cls_score = in_data[0].asnumpy() #BS, 2, ANCHORS
bbox_weight = in_data[1].asnumpy() #BS, 4*SCALES, featuremapsize
kpoint_weight = in_data[2].asnumpy()
labels_raw = in_data[3].asnumpy() # BS, ANCHORS
A = config.NUM_ANCHORS
#assert labels.shape[0]==1
#assert cls_score.shape[0]==1
#assert bbox_weight.shape[0]==1
#print('shape', cls_score.shape, labels.shape, file=sys.stderr)
#print('bbox_weight 0', bbox_weight.shape, file=sys.stderr)
#bbox_weight = np.zeros( (labels_raw.shape[0], labels_raw.shape[1], 4), dtype=np.float32)
for ibatch in xrange(labels_raw.shape[0]):
_bbox_weight = np.zeros( (labels_raw.shape[1], 4), dtype=np.float32)
_kpoint_weight = np.zeros( (labels_raw.shape[1], 10), dtype=np.float32)
labels = labels_raw[ibatch]
fg_score = cls_score[ibatch,1,:] - cls_score[ibatch,0,:]
num_fg = int(config.TRAIN.RPN_FG_FRACTION * config.TRAIN.RPN_BATCH_SIZE)
fg_inds = np.where(labels == 1)[0]
origin_num_fg = len(fg_inds)
if len(fg_inds) > num_fg:
if CALLING_COUNT<COUNT_THRESH_FG:
disable_inds = np.random.choice(fg_inds, size=(len(fg_inds) - num_fg), replace=False)
labels[disable_inds] = -1
else:
pos_ohem_scores = fg_score[fg_inds]
order_pos_ohem_scores = pos_ohem_scores.ravel().argsort()
sampled_inds = fg_inds[order_pos_ohem_scores[:num_fg]]
labels[fg_inds] = -1
labels[sampled_inds] = 1
n_fg = np.sum(labels == 1)
fg_inds = np.where(labels == 1)[0]
STAT[0]+=1
STAT[self.stride] += n_fg
ACC[self.stride] += np.sum(fg_score[fg_inds]>=0)
if STAT[0]%9600==0:
S = {0: STAT[0]}
for k in STAT:
if k==0:
continue
acc = float(ACC[k])/STAT[k]
S[k] = (STAT[k], ACC[k], acc)
print('STAT ', S, file=sys.stderr)
for k in STAT:
STAT[k]=0
ACC[k] = 0
#print('ohem_calling_count', CALLING_COUNT, STAT, file=sys.stderr)
num_bg = config.TRAIN.RPN_BATCH_SIZE - n_fg
bg_inds = np.where(labels == 0)[0]
origin_num_bg = len(bg_inds)
if num_bg==0:
labels[bg_inds] = -1
elif len(bg_inds) > num_bg:
# sort ohem scores
if CALLING_COUNT<COUNT_THRESH_BG:
disable_inds = np.random.choice(bg_inds, size=(len(bg_inds) - num_bg), replace=False)
labels[disable_inds] = -1
else:
neg_ohem_scores = fg_score[bg_inds]
order_neg_ohem_scores = neg_ohem_scores.ravel().argsort()[::-1]
sampled_inds = bg_inds[order_neg_ohem_scores[:num_bg]]
#print('sampled_inds_bg', sampled_inds, file=sys.stderr)
labels[bg_inds] = -1
labels[sampled_inds] = 0
if n_fg>0:
order0_labels = labels.reshape( (1, A, -1) ).transpose( (0, 2, 1) ).reshape( (-1,) )
bbox_fg_inds = np.where(order0_labels == 1)[0]
#print('bbox_fg_inds, order0 ', bbox_fg_inds, file=sys.stderr)
_bbox_weight[bbox_fg_inds,:] = np.array(config.TRAIN.RPN_BBOX_WEIGHTS)
_kpoint_weight[bbox_fg_inds,:] = np.array(config.TRAIN.RPN_KPOINT_WEIGHTS) #TODO
_bbox_weight = _bbox_weight.reshape((1, -1, A * 4)).transpose((0,2,1))
_kpoint_weight = _kpoint_weight.reshape((1, -1, A*10)).transpose((0,2,1))
bbox_weight[ibatch] = _bbox_weight
# kpoint_weight[ibatch] = _kpoint_weight
# kpoint_weight[:] = 0
#labels = labels[np.newaxis,:]
labels_ohem = mx.nd.array(labels_raw)
bbox_weights_ohem = mx.nd.array(bbox_weight)
kpoint_weights_ohem = mx.nd.array(kpoint_weight)
for ind, val in enumerate([labels_ohem, kpoint_weights_ohem, bbox_weights_ohem]):
self.assign(out_data[ind], req[ind], val)
def backward(self, req, out_grad, in_data, out_data, in_grad, aux):
for i in range(len(in_grad)):
self.assign(in_grad[i], req[i], 0)
@mx.operator.register('rpn_fpn_ohem')
class RPNFPNOHEMProp(mx.operator.CustomOpProp):
def __init__(self, stride=0):
super(RPNFPNOHEMProp, self).__init__(need_top_grad=False)
self.stride = stride
def list_arguments(self):
return ['cls_score', 'bbox_weight', 'kpoint_weight', 'labels']
def list_outputs(self):
return ['labels_ohem', 'kpoint_weights_ohem', 'bbox_weights_ohem']
def infer_shape(self, in_shape):
labels_shape = in_shape[3]
kpoint_weight_shape = in_shape[2]
bbox_weights_shape = in_shape[1]
#print('in_rpn_ohem', in_shape[0], in_shape[1], in_shape[2], file=sys.stderr)
return in_shape, \
[labels_shape, kpoint_weight_shape, bbox_weights_shape]
def create_operator(self, ctx, shapes, dtypes):
return RPNFPNOHEMOperator(self.stride)
def declare_backward_dependency(self, out_grad, in_data, out_data):
return []
| 39.210884 | 99 | 0.606176 |
33322ed44e3ee11606ed245cc85b15e63e1686c3 | 1,952 | py | Python | release/stubs.min/Rhino/DocObjects/__init___parts/MaterialRefCreateParams.py | htlcnn/ironpython-stubs | 780d829e2104b2789d5f4d6f32b0ec9f2930ca03 | [
"MIT"
]
| 182 | 2017-06-27T02:26:15.000Z | 2022-03-30T18:53:43.000Z | release/stubs.min/Rhino/DocObjects/__init___parts/MaterialRefCreateParams.py | htlcnn/ironpython-stubs | 780d829e2104b2789d5f4d6f32b0ec9f2930ca03 | [
"MIT"
]
| 28 | 2017-06-27T13:38:23.000Z | 2022-03-15T11:19:44.000Z | release/stubs.min/Rhino/DocObjects/__init___parts/MaterialRefCreateParams.py | htlcnn/ironpython-stubs | 780d829e2104b2789d5f4d6f32b0ec9f2930ca03 | [
"MIT"
]
| 67 | 2017-06-28T09:43:59.000Z | 2022-03-20T21:17:10.000Z | class MaterialRefCreateParams(object):
"""
Options passed to MaterialRefs.Create
MaterialRefCreateParams()
"""
BackFaceMaterialId=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""The Id of the Material used to render the back of an object.
Get: BackFaceMaterialId(self: MaterialRefCreateParams) -> Guid
Set: BackFaceMaterialId(self: MaterialRefCreateParams)=value
"""
BackFaceMaterialIndex=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""The index of the material used to render the back of an object
Get: BackFaceMaterialIndex(self: MaterialRefCreateParams) -> int
Set: BackFaceMaterialIndex(self: MaterialRefCreateParams)=value
"""
FrontFaceMaterialId=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""The Id of the Material used to render the front of an object.
Get: FrontFaceMaterialId(self: MaterialRefCreateParams) -> Guid
Set: FrontFaceMaterialId(self: MaterialRefCreateParams)=value
"""
FrontFaceMaterialIndex=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""The index of the material used to render the front of an object
Get: FrontFaceMaterialIndex(self: MaterialRefCreateParams) -> int
Set: FrontFaceMaterialIndex(self: MaterialRefCreateParams)=value
"""
MaterialSource=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Determines if the simple material should come from the object or from
it's layer.
Get: MaterialSource(self: MaterialRefCreateParams) -> ObjectMaterialSource
Set: MaterialSource(self: MaterialRefCreateParams)=value
"""
PlugInId=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Identifies a rendering plug-in
Get: PlugInId(self: MaterialRefCreateParams) -> Guid
Set: PlugInId(self: MaterialRefCreateParams)=value
"""
| 21.688889 | 94 | 0.732582 |
e9478b42614d60f98360014f8a5888723ff4c145 | 23,254 | py | Python | setup.py | paulboot/PythonMiniProbe | 30b43096919c726116446fd9846d7d60038ef03f | [
"BSD-3-Clause"
]
| null | null | null | setup.py | paulboot/PythonMiniProbe | 30b43096919c726116446fd9846d7d60038ef03f | [
"BSD-3-Clause"
]
| null | null | null | setup.py | paulboot/PythonMiniProbe | 30b43096919c726116446fd9846d7d60038ef03f | [
"BSD-3-Clause"
]
| null | null | null | default_sensors = "Ping,HTTP,Port,SNMPCustom,CPULoad,Memory,Diskspace,SNMPTraffic,CPUTemp,Probehealth,ExternalIP,ADNS,APT,NMAP,MDADM"
#!/usr/bin/env python3
# Copyright (c) 2014, Paessler AG <[email protected]>
# All rights reserved.
# Redistribution and use in source and binary forms, with or without modification, are permitted provided that the
# following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice, this list of conditions
# and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions
# and the following disclaimer in the documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse
# or promote products derived from this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
# INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import sys
import os
import subprocess
import uuid
from setuptools import setup, find_packages
from setuptools.command.install import install as _install
raw_input = input
def read(path):
with open(path, 'r') as file:
return file.read()
class Bcolor:
GREEN = '\033[92m'
RED = '\033[91m'
YELLOW = '\033[93m'
CYAN = '\033[96m'
END = '\033[0m'
class Configure(_install):
probe_conf = {}
conf_avail = False
config_init = {
'name': "Python MiniProbe",
'gid': str(uuid.uuid4()),
'server': "",
'port': "443",
'baseinterval': "60",
'key': "",
'cleanmem': "",
'announced': "0",
'protocol': "1",
'debug': "",
'subprocs': "10"
}
path = './miniprobe/probe.conf'
def run(self):
conf_avail = False
if not os.getuid() == 0:
print(Bcolor.RED + "You must run me as root user!" + Bcolor.END)
print(Bcolor.RED + "Rerun me with sudo " + __file__ + Bcolor.END)
sys.exit(2)
_install.do_egg_install(self)
print("")
print(Bcolor.CYAN + "Welcome to the Miniprobe (Python) for PRTG installer" + Bcolor.END)
if self.file_check(self.path):
print("")
probe_config_exists = "%s" % str(raw_input(Bcolor.YELLOW + "A config file was already found. "
"Do you want to reconfigure [y/N]: "
+ Bcolor.END)).rstrip().lstrip()
if probe_config_exists.lower() == "y":
config_old = self.read_config(self.path)
self.get_config(config_old)
else:
print("")
uninstall = "%s" % str(raw_input(Bcolor.YELLOW + "Do you want to Uninstall or Restart the "
"service [u/R]: " + Bcolor.END)).rstrip().lstrip()
if uninstall.lower() == "u":
self.remove_config()
conf_avail = False
else:
conf_avail = True
else:
conf_avail = self.get_config(self.config_init)
if conf_avail:
print(subprocess.call("update-rc.d prtgprobe defaults", shell=True))
print(Bcolor.GREEN + "Starting Mini Probe" + Bcolor.END)
print(subprocess.call("/etc/init.d/prtgprobe start", shell=True))
print(Bcolor.GREEN + "Done. You now can start/stop the Mini Probe using '/etc/init.d/prtgprobe start' "
"or '/etc/init.d/prtgprobe stop'" + Bcolor.END)
else:
print("Exiting!")
sys.exit()
pass
def file_check(self, check_path):
# Check if a give file exists
return os.path.exists(check_path)
def file_create(self, create_path):
# Creates a given file and writes some startup information to it
with open(create_path, 'w') as file_create:
file_create.write("###Mini Probe Config File\n")
file_create.close()
def write_config(self, config):
conf = ""
with open(self.path, 'a') as config_file:
for key in config:
conf += "%s:%s\n" % (key, config[key])
config_file.write(conf)
config_file.close()
print(Bcolor.GREEN + "Config file successfully written!" + Bcolor.END)
def write_file(self, write_path, content):
with open(write_path, 'w') as file_write:
file_write.write(content)
file_write.close()
def logrotation(self, rotation_path):
rotate_tpl = open("./miniprobe/scripts/rotate.tpl")
return rotate_tpl.read() % rotation_path
def read_config(self, path):
"""
read configuration file and write data to dict
"""
config = {}
try:
conf_file = open(path)
for line in conf_file:
if not (line == '\n'):
if not (line.startswith('#')):
config[line.split(':')[0]] = line.split(':')[1].rstrip()
conf_file.close()
return config
except Exception as read_error:
print(Bcolor.RED + "No config found! Error Message: %s Exiting!" + Bcolor.END % read_error)
sys.exit()
def init_script(self, script_path, user):
init_script_tpl = open("./miniprobe/scripts/probe.tpl")
return init_script_tpl.read() % (script_path, user)
def write_load_list(self, ds18b20_sensors, other_sensors):
default_sensors = "Ping,HTTP,Port,SNMPCustom,CPULoad,Memory,Diskspace,SNMPTraffic,CPUTemp,Probehealth,ExternalIP,ADNS,APT,NMAP,MDADM"
if not (other_sensors == ""):
default_sensors = default_sensors + "," + other_sensors
file_sensor_init = open("./miniprobe/sensors/__init__.py", "a")
file_sensor_init.write("# Copyright (c) 2014, Paessler AG <[email protected]>\n")
file_sensor_init.write("# All rights reserved.\n")
file_sensor_init.write("# Redistribution and use in source and binary forms, with or without modification,"
" are permitted provided that the\n")
file_sensor_init.write("# following conditions are met:\n")
file_sensor_init.write("# 1. Redistributions of source code must retain the above copyright notice, "
"this list of conditions\n")
file_sensor_init.write("# and the following disclaimer.\n")
file_sensor_init.write("# 2. Redistributions in binary form must reproduce the above copyright notice, "
"this list of conditions\n")
file_sensor_init.write("# and the following disclaimer in the documentation and/or other materials provided "
"with the distribution.\n")
file_sensor_init.write("# 3. Neither the name of the copyright holder nor the names of its contributors may be"
" used to endorse\n")
file_sensor_init.write("# or promote products derived from this software without specific prior written "
"permission.\n")
file_sensor_init.write("\n")
file_sensor_init.write("# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" "
"AND ANY EXPRESS OR IMPLIED WARRANTIES,\n")
file_sensor_init.write("# INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND "
"FITNESS FOR\n")
file_sensor_init.write("# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR "
"CONTRIBUTORS BE LIABLE FOR ANY DIRECT,\n")
file_sensor_init.write("# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES "
"(INCLUDING, BUT NOT LIMITED TO,\n")
file_sensor_init.write("# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; "
"OR BUSINESS INTERRUPTION)\n")
file_sensor_init.write("# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, "
"STRICT LIABILITY,\n")
file_sensor_init.write("# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF "
"THE USE OF THIS SOFTWARE,\n")
file_sensor_init.write("# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n")
file_sensor_init.write("\n")
file_sensor_init.write("# Announce modules available in this package\n")
file_sensor_init.write("# Just extend this list for your modules and they will be automatically imported "
"during runtime and\n")
file_sensor_init.write("# are announced to the PRTG Core\n")
file_sensor_init.write("__all__ = " + str(default_sensors.split(",")) + "\n")
print("__all__ = " + str(default_sensors.split(",")) + "\n")
if not (ds18b20_sensors == ""):
file_sensor_init.write("DS18B20_sensors = " + str(ds18b20_sensors.split(",")) + "\n")
file_sensor_init.close()
def install_w1_module(self):
print(Bcolor.YELLOW + "Checking the hardware for Raspberry Pi." + Bcolor.END)
if os.uname()[4][:3] == 'arm':
print(Bcolor.GREEN + "Found hardware matching " + os.uname()[4][:3] + Bcolor.END)
tmp_use_raspberry = "%s" % str(raw_input(Bcolor.GREEN + "Do you want to enable the Raspberry Pi "
"temperature sensor [y/N]: "
+ Bcolor.END)).rstrip().lstrip()
if tmp_use_raspberry.lower() == "y":
try:
self.install_kernel_module()
return True
except Exception as e:
print("%s.Please install the same" % e)
print("Exiting")
sys.exit(1)
else:
return False
else:
print(Bcolor.RED + "Found hardware matching " + os.uname()[4][:3] + Bcolor.END)
return False
def install_kernel_module(self):
print(Bcolor.GREEN + "Checking for w1-gpio line in /boot/config.txt" + Bcolor.END)
found = False
file_boot_config = open('/boot/config.txt', 'r')
for line in file_boot_config.readlines():
if line.strip() == 'dtoverlay=w1-gpio':
print(Bcolor.GREEN + "Found dtoverlay line. Skipping install of w1-gpio" + Bcolor.END)
found = True
file_boot_config.close()
if not found:
print(Bcolor.GREEN + "Line not found. Now adding the dtoverlay line to /boot/config.txt" + Bcolor.END)
file_boot_config = open('/boot/config.txt', 'a')
file_boot_config.write('\n#w1-gpio added by PRTG MiniProbe install script\n')
file_boot_config.write('dtoverlay=w1-gpio')
file_boot_config.close()
print(Bcolor.GREEN + "Please restart the installscript after the Raspberry Pi has been rebooted!"
+ Bcolor.END)
print(Bcolor.GREEN + "Now rebooting..." + Bcolor.END)
print(subprocess.call("reboot", shell=True))
sys.exit(2)
def get_w1_sensors(self):
sensors = ""
print(Bcolor.GREEN + "Finding all W1 sensors" + Bcolor.END)
w1_file = open('/sys/devices/w1_bus_master1/w1_master_slaves', 'r')
for line in w1_file.readlines():
print(Bcolor.GREEN + "Found: " + Bcolor.YELLOW + line[3:].strip() + Bcolor.END)
sensors = sensors + "," + line[3:].strip()
w1_file.close()
sens = "%s" % str(raw_input(Bcolor.GREEN + "Please enter the id's of the temperature sensors you want to use "
"from the list above, separated with a , [" + sensors[1:] + "]: "
+ Bcolor.END)).rstrip().lstrip()
if not sens == "":
return sens
else:
return sensors[1:]
def get_config_user(self, default="root"):
tmp_user = "%s" % str(raw_input(Bcolor.GREEN + "Please provide the username the script should run under ["
+ default + "]: " + Bcolor.END)).rstrip().lstrip()
if not tmp_user == "":
return tmp_user
else:
return default
def get_config_name(self, default):
tmp_name = "%s" % str(raw_input(Bcolor.GREEN + "Please provide the desired name of your Mini Probe ["
+ default + "]: " + Bcolor.END)).rstrip().lstrip()
if not tmp_name == "":
return tmp_name
else:
return default
def get_config_gid(self, default):
tmp_gid = "%s" % str(raw_input(Bcolor.GREEN + "Please provide the Probe GID [" + default + "]: "
+ Bcolor.END)).rstrip().lstrip()
if not tmp_gid == "":
return tmp_gid
else:
return default
def get_config_ip(self, default=None):
tmp_ip = "%s" % str(raw_input(Bcolor.GREEN + "Please provide the IP/DNS name of the PRTG Core Server ["
+ default + "]: " + Bcolor.END)).rstrip().lstrip()
if not (tmp_ip == "") or not (default == ""):
if (tmp_ip == "") and not (default == ""):
tmp_ip = default
response = os.system("ping -c 1 " + tmp_ip + " > /dev/null")
if not response == 0:
print(Bcolor.YELLOW + "PRTG Server can not be reached. Please make sure the server is reachable."
+ Bcolor.END)
go_on = "%s" % str(raw_input(Bcolor.YELLOW + "Do you still want to continue using this server [y/N]: "
+ Bcolor.END)).rstrip().lstrip()
if not go_on.lower() == "y":
return self.get_config_ip()
else:
print(Bcolor.GREEN + "PRTG Server can be reached. Continuing..." + Bcolor.END)
return tmp_ip
else:
print(Bcolor.YELLOW + "You have not provided an IP/DNS name of the PRTG Core Server." + Bcolor.END)
return self.get_config_ip()
def get_config_port(self, default):
tmp_port = "%s" % str(raw_input(Bcolor.GREEN + "Please provide the port the PRTG web server is listening to "
"(IMPORTANT: Only SSL is supported)[" + default + "]: "
+ Bcolor.END)).rstrip().lstrip()
if not tmp_port == "":
return tmp_port
else:
return default
def get_config_base_interval(self, default):
tmp_interval = "%s" % str(raw_input(Bcolor.GREEN + "Please provide the base interval for your sensors ["
+ default + "]: " + Bcolor.END)).rstrip().lstrip()
if not tmp_interval == "":
return tmp_interval
else:
return default
def get_config_access_key(self, default):
tmp_accesskey = "%s" % str(raw_input(Bcolor.GREEN + "Please provide the Probe Access Key as defined on the "
"PRTG Core [" + default + "]: "
+ Bcolor.END)).rstrip().lstrip()
if (tmp_accesskey == "") and not (default == ""):
tmp_accesskey = default
else:
if tmp_accesskey == "":
print(Bcolor.YELLOW + "You have not provided the Probe Access Key as defined on the PRTG Core."
+ Bcolor.END)
return self.get_config_access_key(default)
else:
return tmp_accesskey
def get_config_path(self, default=os.path.dirname(os.path.abspath(__file__))):
default += "/miniprobe"
tmp_path = "%s" % str(raw_input(Bcolor.GREEN + "Please provide the path where the probe files are located ["
+ default + "]: " + Bcolor.END)).rstrip().lstrip()
if not tmp_path == "":
return tmp_path
else:
return default
def get_config_clean_memory(self, default=None):
tmp_cleanmem = "%s" % str(raw_input(Bcolor.GREEN + "Do you want the mini probe flushing buffered and cached "
"memory [y/N]: " + Bcolor.END)).rstrip().lstrip()
if tmp_cleanmem.lower() == "y":
return "True"
else:
return "False"
def get_config_subprocs(self, default="10"):
tmp_subprocs = "%s" % str(raw_input(Bcolor.GREEN + "How much subprocesses should be spawned for scanning ["
+ default + "]: " + Bcolor.END)).rstrip().lstrip()
if not tmp_subprocs == "":
return tmp_subprocs
else:
return default
# For future use
def get_config_announced(self, default):
return default
# For future use
def get_config_protocol(self, default):
return default
def get_config_debug(self, default):
tmp_debug = "%s" % str(raw_input(Bcolor.GREEN + "Do you want to enable debug logging (" + Bcolor.YELLOW +
"can create massive logfiles!" + Bcolor.GREEN + ") [y/N]: "
+ Bcolor.END)).rstrip().lstrip()
if tmp_debug.lower() == "y":
tmp_debug1 = "%s" % str(raw_input(Bcolor.YELLOW + "Are you sure you want to enable debug logging? "
"This will create massive logfiles [y/N]: "
+ Bcolor.END)).rstrip().lstrip()
if tmp_debug1.lower() == "y":
return "True"
else:
return "False"
else:
return "False"
def get_config(self, config_old):
print("")
print(Bcolor.YELLOW + "Checking for necessary modules and Python Version" + Bcolor.END)
try:
import hashlib
import string
import json
import socket
import importlib
import requests
import pyasn1
import pysnmp
except Exception as e:
print("%s.Please install the same" % e)
print("Exiting")
sys.exit(1)
print(Bcolor.GREEN + "Successfully imported modules." + Bcolor.END)
print("")
if self.install_w1_module():
sensors = self.get_w1_sensors()
if not sensors == "":
print(Bcolor.GREEN + "Adding DS18B20.py and selected sensors to /miniprobe/sensors/__init__.py"
+ Bcolor.END)
self.write_load_list(sensors, "DS18B20")
else:
self.write_load_list("", "")
else:
self.write_load_list("", "")
print("")
try:
probe_user = self.get_config_user()
self.probe_conf['name'] = self.get_config_name(config_old['name'])
self.probe_conf['gid'] = self.get_config_gid(config_old['gid'])
self.probe_conf['server'] = self.get_config_ip(config_old['server'])
self.probe_conf['port'] = self.get_config_port(config_old['port'])
self.probe_conf['baseinterval'] = self.get_config_base_interval(config_old['baseinterval'])
self.probe_conf['key'] = self.get_config_access_key(config_old['key'])
probe_path = self.get_config_path()
self.probe_conf['cleanmem'] = self.get_config_clean_memory(config_old['cleanmem'])
self.probe_conf['announced'] = self.get_config_announced(config_old['announced'])
self.probe_conf['protocol'] = self.get_config_protocol(config_old['protocol'])
self.probe_conf['debug'] = self.get_config_debug(config_old['debug'])
self.probe_conf['subprocs'] = self.get_config_subprocs(config_old['subprocs'])
print("")
print(self.path)
self.file_create(self.path)
self.write_config(self.probe_conf)
logpath = "%s/logs" % probe_path
if not (self.file_check(logpath)):
os.makedirs(logpath)
path_rotate = "/etc/logrotate.d/prtgprobe"
path_init = "/etc/init.d/prtgprobe"
print(Bcolor.GREEN + "Creating Logrotation Config" + Bcolor.END)
self.write_file(path_rotate, self.logrotation(probe_path))
print(Bcolor.GREEN + "Setting up runlevel" + Bcolor.END)
self.write_file(path_init, self.init_script(probe_path, probe_user))
print(Bcolor.GREEN + "Changing File Permissions" + Bcolor.END)
os.chmod('%s/probe.py' % probe_path, 0o0755)
os.chmod(path_init, 0o0755)
return True
except Exception as e:
print(Bcolor.RED + "%s. Exiting!" % e + Bcolor.END)
return False
def remove_config(self):
try:
print(subprocess.call("/etc/init.d/prtgprobe stop", shell=True))
os.remove('/etc/init.d/prtgprobe')
os.remove('/etc/logrotate.d/prtgprobe')
os.remove('./miniprobe/probe.conf')
except Exception as e:
print("%s. Exiting!" % e)
return False
py_requires = 'requirements3.txt'
with open(py_requires) as f:
requires = f.read().splitlines()
packages = [
"miniprobe"
]
setup(
name='Python Mini Probe',
version=read('VERSION.txt'),
author='Paessler AG',
author_email='[email protected]',
license='BSD 3.0',
description='Python MiniProbe for PRTG',
long_description=read('README.md'),
packages=find_packages(),
install_requires=requires,
url='https://github.com/PaesslerAG/PythonMiniProbe',
classifiers=[
"Development Status :: 4 - Beta",
"Environment :: Console",
"License :: OSI Approved :: BSD License",
"Programming Language :: Python",
],
cmdclass={'configure': Configure}
)
| 48.045455 | 141 | 0.569321 |
fd9fac58a973c92571a71f0c49c21e2f5135f822 | 65,776 | py | Python | pandas/tseries/index.py | olgabot/pandas | 286811a2775c9b5ca1c30086b1950dd1060ecf68 | [
"PSF-2.0",
"Apache-2.0",
"BSD-2-Clause",
"BSD-3-Clause"
]
| 2 | 2017-03-10T04:16:05.000Z | 2019-04-19T23:02:48.000Z | pandas/tseries/index.py | Gwillink/pandas | ceec8bf305fbd0258edb66c648b87219a435bc32 | [
"PSF-2.0",
"Apache-2.0",
"BSD-2-Clause",
"BSD-3-Clause"
]
| null | null | null | pandas/tseries/index.py | Gwillink/pandas | ceec8bf305fbd0258edb66c648b87219a435bc32 | [
"PSF-2.0",
"Apache-2.0",
"BSD-2-Clause",
"BSD-3-Clause"
]
| null | null | null | # pylint: disable=E1101
import operator
from datetime import time, datetime
from datetime import timedelta
import numpy as np
from pandas.core.common import (isnull, _NS_DTYPE, _INT64_DTYPE,
is_list_like,_values_from_object, _maybe_box,
notnull)
from pandas.core.index import Index, Int64Index, _Identity
import pandas.compat as compat
from pandas.compat import u
from pandas.tseries.frequencies import (
infer_freq, to_offset, get_period_alias,
Resolution, get_reso_string)
from pandas.tseries.offsets import DateOffset, generate_range, Tick, CDay
from pandas.tseries.tools import parse_time_string, normalize_date
from pandas.util.decorators import cache_readonly
import pandas.core.common as com
import pandas.tseries.offsets as offsets
import pandas.tseries.tools as tools
from pandas.lib import Timestamp
import pandas.lib as lib
import pandas.tslib as tslib
import pandas.algos as _algos
import pandas.index as _index
def _utc():
import pytz
return pytz.utc
# -------- some conversion wrapper functions
def _field_accessor(name, field):
def f(self):
values = self.asi8
if self.tz is not None:
utc = _utc()
if self.tz is not utc:
values = self._local_timestamps()
return tslib.get_date_field(values, field)
f.__name__ = name
return property(f)
def _join_i8_wrapper(joinf, with_indexers=True):
@staticmethod
def wrapper(left, right):
if isinstance(left, np.ndarray):
left = left.view('i8', type=np.ndarray)
if isinstance(right, np.ndarray):
right = right.view('i8', type=np.ndarray)
results = joinf(left, right)
if with_indexers:
join_index, left_indexer, right_indexer = results
join_index = join_index.view('M8[ns]')
return join_index, left_indexer, right_indexer
return results
return wrapper
def _dt_index_cmp(opname):
"""
Wrap comparison operations to convert datetime-like to datetime64
"""
def wrapper(self, other):
func = getattr(super(DatetimeIndex, self), opname)
if isinstance(other, datetime):
other = _to_m8(other, tz=self.tz)
elif isinstance(other, list):
other = DatetimeIndex(other)
elif isinstance(other, compat.string_types):
other = _to_m8(other, tz=self.tz)
elif not isinstance(other, np.ndarray):
other = _ensure_datetime64(other)
result = func(other)
return result.view(np.ndarray)
return wrapper
def _ensure_datetime64(other):
if isinstance(other, np.datetime64):
return other
raise TypeError('%s type object %s' % (type(other), str(other)))
_midnight = time(0, 0)
class DatetimeIndex(Int64Index):
"""
Immutable ndarray of datetime64 data, represented internally as int64, and
which can be boxed to Timestamp objects that are subclasses of datetime and
carry metadata such as frequency information.
Parameters
----------
data : array-like (1-dimensional), optional
Optional datetime-like data to construct index with
copy : bool
Make a copy of input ndarray
freq : string or pandas offset object, optional
One of pandas date offset strings or corresponding objects
start : starting value, datetime-like, optional
If data is None, start is used as the start point in generating regular
timestamp data.
periods : int, optional, > 0
Number of periods to generate, if generating index. Takes precedence
over end argument
end : end time, datetime-like, optional
If periods is none, generated index will extend to first conforming
time on or just past end argument
closed : string or None, default None
Make the interval closed with respect to the given frequency to
the 'left', 'right', or both sides (None)
"""
_join_precedence = 10
_inner_indexer = _join_i8_wrapper(_algos.inner_join_indexer_int64)
_outer_indexer = _join_i8_wrapper(_algos.outer_join_indexer_int64)
_left_indexer = _join_i8_wrapper(_algos.left_join_indexer_int64)
_left_indexer_unique = _join_i8_wrapper(
_algos.left_join_indexer_unique_int64, with_indexers=False)
_arrmap = None
__eq__ = _dt_index_cmp('__eq__')
__ne__ = _dt_index_cmp('__ne__')
__lt__ = _dt_index_cmp('__lt__')
__gt__ = _dt_index_cmp('__gt__')
__le__ = _dt_index_cmp('__le__')
__ge__ = _dt_index_cmp('__ge__')
# structured array cache for datetime fields
_sarr_cache = None
_engine_type = _index.DatetimeEngine
offset = None
_comparables = ['name','freqstr','tz']
def __new__(cls, data=None,
freq=None, start=None, end=None, periods=None,
copy=False, name=None, tz=None,
verify_integrity=True, normalize=False,
closed=None, **kwds):
dayfirst = kwds.pop('dayfirst', None)
yearfirst = kwds.pop('yearfirst', None)
infer_dst = kwds.pop('infer_dst', False)
warn = False
if 'offset' in kwds and kwds['offset']:
freq = kwds['offset']
warn = True
freq_infer = False
if not isinstance(freq, DateOffset):
# if a passed freq is None, don't infer automatically
if freq != 'infer':
freq = to_offset(freq)
else:
freq_infer = True
freq = None
if warn:
import warnings
warnings.warn("parameter 'offset' is deprecated, "
"please use 'freq' instead",
FutureWarning)
offset = freq
if periods is not None:
if com.is_float(periods):
periods = int(periods)
elif not com.is_integer(periods):
raise ValueError('Periods must be a number, got %s' %
str(periods))
if data is None and offset is None:
raise ValueError("Must provide freq argument if no data is "
"supplied")
if data is None:
return cls._generate(start, end, periods, name, offset,
tz=tz, normalize=normalize, closed=closed,
infer_dst=infer_dst)
if not isinstance(data, np.ndarray):
if np.isscalar(data):
raise ValueError('DatetimeIndex() must be called with a '
'collection of some kind, %s was passed'
% repr(data))
# other iterable of some kind
if not isinstance(data, (list, tuple)):
data = list(data)
data = np.asarray(data, dtype='O')
# try a few ways to make it datetime64
if lib.is_string_array(data):
data = _str_to_dt_array(data, offset, dayfirst=dayfirst,
yearfirst=yearfirst)
else:
data = tools.to_datetime(data, errors='raise')
data.offset = offset
if isinstance(data, DatetimeIndex):
if name is not None:
data.name = name
if tz is not None:
return data.tz_localize(tz, infer_dst=infer_dst)
return data
if issubclass(data.dtype.type, compat.string_types):
data = _str_to_dt_array(data, offset, dayfirst=dayfirst,
yearfirst=yearfirst)
if issubclass(data.dtype.type, np.datetime64):
if isinstance(data, DatetimeIndex):
if tz is None:
tz = data.tz
subarr = data.values
if offset is None:
offset = data.offset
verify_integrity = False
else:
if data.dtype != _NS_DTYPE:
subarr = tslib.cast_to_nanoseconds(data)
else:
subarr = data
elif data.dtype == _INT64_DTYPE:
if isinstance(data, Int64Index):
raise TypeError('cannot convert Int64Index->DatetimeIndex')
if copy:
subarr = np.asarray(data, dtype=_NS_DTYPE)
else:
subarr = data.view(_NS_DTYPE)
else:
try:
subarr = tools.to_datetime(data, box=False)
except ValueError:
# tz aware
subarr = tools.to_datetime(data, box=False, utc=True)
if not np.issubdtype(subarr.dtype, np.datetime64):
raise ValueError('Unable to convert %s to datetime dtype'
% str(data))
if isinstance(subarr, DatetimeIndex):
if tz is None:
tz = subarr.tz
else:
if tz is not None:
tz = tools._maybe_get_tz(tz)
if (not isinstance(data, DatetimeIndex) or
getattr(data, 'tz', None) is None):
# Convert tz-naive to UTC
ints = subarr.view('i8')
subarr = tslib.tz_localize_to_utc(ints, tz,
infer_dst=infer_dst)
subarr = subarr.view(_NS_DTYPE)
subarr = subarr.view(cls)
subarr.name = name
subarr.offset = offset
subarr.tz = tz
if verify_integrity and len(subarr) > 0:
if offset is not None and not freq_infer:
inferred = subarr.inferred_freq
if inferred != offset.freqstr:
raise ValueError('Dates do not conform to passed '
'frequency')
if freq_infer:
inferred = subarr.inferred_freq
if inferred:
subarr.offset = to_offset(inferred)
return subarr
@classmethod
def _generate(cls, start, end, periods, name, offset,
tz=None, normalize=False, infer_dst=False, closed=None):
if com._count_not_none(start, end, periods) != 2:
raise ValueError('Must specify two of start, end, or periods')
_normalized = True
if start is not None:
start = Timestamp(start)
if end is not None:
end = Timestamp(end)
left_closed = False
right_closed = False
if start is None and end is None:
if closed is not None:
raise ValueError("Closed has to be None if not both of start"
"and end are defined")
if closed is None:
left_closed = True
right_closed = True
elif closed == "left":
left_closed = True
elif closed == "right":
right_closed = True
else:
raise ValueError("Closed has to be either 'left', 'right' or None")
try:
inferred_tz = tools._infer_tzinfo(start, end)
except:
raise ValueError('Start and end cannot both be tz-aware with '
'different timezones')
inferred_tz = tools._maybe_get_tz(inferred_tz)
tz = tools._maybe_get_tz(tz)
if tz is not None and inferred_tz is not None:
if not inferred_tz == tz:
raise AssertionError("Inferred time zone not equal to passed "
"time zone")
elif inferred_tz is not None:
tz = inferred_tz
if start is not None:
if normalize:
start = normalize_date(start)
_normalized = True
else:
_normalized = _normalized and start.time() == _midnight
if end is not None:
if normalize:
end = normalize_date(end)
_normalized = True
else:
_normalized = _normalized and end.time() == _midnight
if hasattr(offset, 'delta') and offset != offsets.Day():
if inferred_tz is None and tz is not None:
# naive dates
if start is not None and start.tz is None:
start = start.tz_localize(tz)
if end is not None and end.tz is None:
end = end.tz_localize(tz)
if start and end:
if start.tz is None and end.tz is not None:
start = start.tz_localize(end.tz)
if end.tz is None and start.tz is not None:
end = end.tz_localize(start.tz)
if _use_cached_range(offset, _normalized, start, end):
index = cls._cached_range(start, end, periods=periods,
offset=offset, name=name)
else:
index = _generate_regular_range(start, end, periods, offset)
else:
if inferred_tz is None and tz is not None:
# naive dates
if start is not None and start.tz is not None:
start = start.replace(tzinfo=None)
if end is not None and end.tz is not None:
end = end.replace(tzinfo=None)
if start and end:
if start.tz is None and end.tz is not None:
end = end.replace(tzinfo=None)
if end.tz is None and start.tz is not None:
start = start.replace(tzinfo=None)
if _use_cached_range(offset, _normalized, start, end):
index = cls._cached_range(start, end, periods=periods,
offset=offset, name=name)
else:
index = _generate_regular_range(start, end, periods, offset)
if tz is not None and getattr(index, 'tz', None) is None:
index = tslib.tz_localize_to_utc(com._ensure_int64(index), tz,
infer_dst=infer_dst)
index = index.view(_NS_DTYPE)
index = index.view(cls)
index.name = name
index.offset = offset
index.tz = tz
if not left_closed:
index = index[1:]
if not right_closed:
index = index[:-1]
return index
def _box_values(self, values):
return lib.map_infer(values, lib.Timestamp)
def _local_timestamps(self):
utc = _utc()
if self.is_monotonic:
return tslib.tz_convert(self.asi8, utc, self.tz)
else:
values = self.asi8
indexer = values.argsort()
result = tslib.tz_convert(values.take(indexer), utc, self.tz)
n = len(indexer)
reverse = np.empty(n, dtype=np.int_)
reverse.put(indexer, np.arange(n))
return result.take(reverse)
@classmethod
def _simple_new(cls, values, name, freq=None, tz=None):
if values.dtype != _NS_DTYPE:
values = com._ensure_int64(values).view(_NS_DTYPE)
result = values.view(cls)
result.name = name
result.offset = freq
result.tz = tools._maybe_get_tz(tz)
return result
@property
def tzinfo(self):
"""
Alias for tz attribute
"""
return self.tz
@classmethod
def _cached_range(cls, start=None, end=None, periods=None, offset=None,
name=None):
if start is None and end is None:
# I somewhat believe this should never be raised externally and therefore
# should be a `PandasError` but whatever...
raise TypeError('Must specify either start or end.')
if start is not None:
start = Timestamp(start)
if end is not None:
end = Timestamp(end)
if (start is None or end is None) and periods is None:
raise TypeError('Must either specify period or provide both start and end.')
if offset is None:
# This can't happen with external-facing code, therefore PandasError
raise TypeError('Must provide offset.')
drc = _daterange_cache
if offset not in _daterange_cache:
xdr = generate_range(offset=offset, start=_CACHE_START,
end=_CACHE_END)
arr = tools.to_datetime(list(xdr), box=False)
cachedRange = arr.view(DatetimeIndex)
cachedRange.offset = offset
cachedRange.tz = None
cachedRange.name = None
drc[offset] = cachedRange
else:
cachedRange = drc[offset]
if start is None:
if not isinstance(end, Timestamp):
raise AssertionError('end must be an instance of Timestamp')
end = offset.rollback(end)
endLoc = cachedRange.get_loc(end) + 1
startLoc = endLoc - periods
elif end is None:
if not isinstance(start, Timestamp):
raise AssertionError('start must be an instance of Timestamp')
start = offset.rollforward(start)
startLoc = cachedRange.get_loc(start)
endLoc = startLoc + periods
else:
if not offset.onOffset(start):
start = offset.rollforward(start)
if not offset.onOffset(end):
end = offset.rollback(end)
startLoc = cachedRange.get_loc(start)
endLoc = cachedRange.get_loc(end) + 1
indexSlice = cachedRange[startLoc:endLoc]
indexSlice.name = name
indexSlice.offset = offset
return indexSlice
def _mpl_repr(self):
# how to represent ourselves to matplotlib
return tslib.ints_to_pydatetime(self.asi8, self.tz)
_na_value = tslib.NaT
"""The expected NA value to use with this index."""
def __unicode__(self):
from pandas.core.format import _format_datetime64
values = self.values
freq = None
if self.offset is not None:
freq = self.offset.freqstr
summary = str(self.__class__)
if len(self) == 1:
first = _format_datetime64(values[0], tz=self.tz)
summary += '\n[%s]' % first
elif len(self) == 2:
first = _format_datetime64(values[0], tz=self.tz)
last = _format_datetime64(values[-1], tz=self.tz)
summary += '\n[%s, %s]' % (first, last)
elif len(self) > 2:
first = _format_datetime64(values[0], tz=self.tz)
last = _format_datetime64(values[-1], tz=self.tz)
summary += '\n[%s, ..., %s]' % (first, last)
tagline = '\nLength: %d, Freq: %s, Timezone: %s'
summary += tagline % (len(self), freq, self.tz)
return summary
def __reduce__(self):
"""Necessary for making this object picklable"""
object_state = list(np.ndarray.__reduce__(self))
subclass_state = self.name, self.offset, self.tz
object_state[2] = (object_state[2], subclass_state)
return tuple(object_state)
def __setstate__(self, state):
"""Necessary for making this object picklable"""
if len(state) == 2:
nd_state, own_state = state
self.name = own_state[0]
self.offset = own_state[1]
self.tz = own_state[2]
np.ndarray.__setstate__(self, nd_state)
# provide numpy < 1.7 compat
if nd_state[2] == 'M8[us]':
new_state = np.ndarray.__reduce__(self.values.astype('M8[ns]'))
np.ndarray.__setstate__(self, new_state[2])
else: # pragma: no cover
np.ndarray.__setstate__(self, state)
def __add__(self, other):
if isinstance(other, Index):
return self.union(other)
elif isinstance(other, (DateOffset, timedelta)):
return self._add_delta(other)
elif isinstance(other, np.timedelta64):
return self._add_delta(other)
elif com.is_integer(other):
return self.shift(other)
else: # pragma: no cover
raise TypeError(other)
def __sub__(self, other):
if isinstance(other, Index):
return self.diff(other)
elif isinstance(other, (DateOffset, timedelta)):
return self._add_delta(-other)
elif isinstance(other, np.timedelta64):
return self._add_delta(-other)
elif com.is_integer(other):
return self.shift(-other)
else: # pragma: no cover
raise TypeError(other)
def _add_delta(self, delta):
if isinstance(delta, (Tick, timedelta)):
inc = offsets._delta_to_nanoseconds(delta)
new_values = (self.asi8 + inc).view(_NS_DTYPE)
tz = 'UTC' if self.tz is not None else None
result = DatetimeIndex(new_values, tz=tz, freq='infer')
utc = _utc()
if self.tz is not None and self.tz is not utc:
result = result.tz_convert(self.tz)
elif isinstance(delta, np.timedelta64):
new_values = self.to_series() + delta
result = DatetimeIndex(new_values, tz=self.tz, freq='infer')
else:
new_values = self.astype('O') + delta
result = DatetimeIndex(new_values, tz=self.tz, freq='infer')
return result
def __contains__(self, key):
try:
res = self.get_loc(key)
return np.isscalar(res) or type(res) == slice
except (KeyError, TypeError):
return False
def _format_with_header(self, header, **kwargs):
return header + self._format_native_types(**kwargs)
def _format_native_types(self, na_rep=u('NaT'), date_format=None, **kwargs):
data = list(self)
# tz formatter or time formatter
zero_time = time(0, 0)
if date_format is None:
for d in data:
if d.time() != zero_time or d.tzinfo is not None:
return [u('%s') % x for x in data]
values = np.array(data, dtype=object)
mask = isnull(self.values)
values[mask] = na_rep
imask = -mask
if date_format is None:
date_formatter = lambda x: u('%d-%.2d-%.2d' % (x.year, x.month, x.day))
else:
date_formatter = lambda x: u(x.strftime(date_format))
values[imask] = np.array([date_formatter(dt) for dt in values[imask]])
return values.tolist()
def isin(self, values):
"""
Compute boolean array of whether each index value is found in the
passed set of values
Parameters
----------
values : set or sequence of values
Returns
-------
is_contained : ndarray (boolean dtype)
"""
if not isinstance(values, DatetimeIndex):
try:
values = DatetimeIndex(values)
except ValueError:
return self.asobject.isin(values)
value_set = set(values.asi8)
return lib.ismember(self.asi8, value_set)
def to_datetime(self, dayfirst=False):
return self.copy()
def groupby(self, f):
objs = self.asobject
return _algos.groupby_object(objs, f)
def summary(self, name=None):
if len(self) > 0:
index_summary = ', %s to %s' % (com.pprint_thing(self[0]),
com.pprint_thing(self[-1]))
else:
index_summary = ''
if name is None:
name = type(self).__name__
result = '%s: %s entries%s' % (com.pprint_thing(name),
len(self), index_summary)
if self.freq:
result += '\nFreq: %s' % self.freqstr
return result
def get_duplicates(self):
values = Index.get_duplicates(self)
return DatetimeIndex(values)
def astype(self, dtype):
dtype = np.dtype(dtype)
if dtype == np.object_:
return self.asobject
elif dtype == _INT64_DTYPE:
return self.asi8.copy()
else: # pragma: no cover
raise ValueError('Cannot cast DatetimeIndex to dtype %s' % dtype)
def _get_time_micros(self):
utc = _utc()
values = self.asi8
if self.tz is not None and self.tz is not utc:
values = self._local_timestamps()
return tslib.get_time_micros(values)
@property
def asobject(self):
"""
Convert to Index of datetime objects
"""
if isnull(self).any():
msg = 'DatetimeIndex with NaT cannot be converted to object'
raise ValueError(msg)
return self._get_object_index()
def tolist(self):
"""
See ndarray.tolist
"""
return list(self.asobject)
def _get_object_index(self):
boxfunc = lambda x: Timestamp(x, offset=self.offset, tz=self.tz)
boxed_values = lib.map_infer(self.asi8, boxfunc)
return Index(boxed_values, dtype=object)
def to_pydatetime(self):
"""
Return DatetimeIndex as object ndarray of datetime.datetime objects
Returns
-------
datetimes : ndarray
"""
return tslib.ints_to_pydatetime(self.asi8, tz=self.tz)
def to_period(self, freq=None):
"""
Cast to PeriodIndex at a particular frequency
"""
from pandas.tseries.period import PeriodIndex
if self.freq is None and freq is None:
msg = "You must pass a freq argument as current index has none."
raise ValueError(msg)
if freq is None:
freq = get_period_alias(self.freqstr)
return PeriodIndex(self.values, freq=freq, tz=self.tz)
def order(self, return_indexer=False, ascending=True):
"""
Return sorted copy of Index
"""
if return_indexer:
_as = self.argsort()
if not ascending:
_as = _as[::-1]
sorted_index = self.take(_as)
return sorted_index, _as
else:
sorted_values = np.sort(self.values)
if not ascending:
sorted_values = sorted_values[::-1]
return self._simple_new(sorted_values, self.name, None,
self.tz)
def snap(self, freq='S'):
"""
Snap time stamps to nearest occurring frequency
"""
# Superdumb, punting on any optimizing
freq = to_offset(freq)
snapped = np.empty(len(self), dtype=_NS_DTYPE)
for i, v in enumerate(self):
s = v
if not freq.onOffset(s):
t0 = freq.rollback(s)
t1 = freq.rollforward(s)
if abs(s - t0) < abs(t1 - s):
s = t0
else:
s = t1
snapped[i] = s
# we know it conforms; skip check
return DatetimeIndex(snapped, freq=freq, verify_integrity=False)
def shift(self, n, freq=None):
"""
Specialized shift which produces a DatetimeIndex
Parameters
----------
n : int
Periods to shift by
freq : DateOffset or timedelta-like, optional
Returns
-------
shifted : DatetimeIndex
"""
if freq is not None and freq != self.offset:
if isinstance(freq, compat.string_types):
freq = to_offset(freq)
result = Index.shift(self, n, freq)
result.tz = self.tz
return result
if n == 0:
# immutable so OK
return self
if self.offset is None:
raise ValueError("Cannot shift with no offset")
start = self[0] + n * self.offset
end = self[-1] + n * self.offset
return DatetimeIndex(start=start, end=end, freq=self.offset,
name=self.name, tz=self.tz)
def repeat(self, repeats, axis=None):
"""
Analogous to ndarray.repeat
"""
return DatetimeIndex(self.values.repeat(repeats),
name=self.name)
def take(self, indices, axis=0):
"""
Analogous to ndarray.take
"""
maybe_slice = lib.maybe_indices_to_slice(com._ensure_int64(indices))
if isinstance(maybe_slice, slice):
return self[maybe_slice]
indices = com._ensure_platform_int(indices)
taken = self.values.take(indices, axis=axis)
return self._simple_new(taken, self.name, None, self.tz)
def unique(self):
"""
Index.unique with handling for DatetimeIndex metadata
Returns
-------
result : DatetimeIndex
"""
result = Int64Index.unique(self)
return DatetimeIndex._simple_new(result, tz=self.tz,
name=self.name)
def union(self, other):
"""
Specialized union for DatetimeIndex objects. If combine
overlapping ranges with the same DateOffset, will be much
faster than Index.union
Parameters
----------
other : DatetimeIndex or array-like
Returns
-------
y : Index or DatetimeIndex
"""
if not isinstance(other, DatetimeIndex):
try:
other = DatetimeIndex(other)
except TypeError:
pass
this, other = self._maybe_utc_convert(other)
if this._can_fast_union(other):
return this._fast_union(other)
else:
result = Index.union(this, other)
if isinstance(result, DatetimeIndex):
result.tz = this.tz
if result.freq is None:
result.offset = to_offset(result.inferred_freq)
return result
def union_many(self, others):
"""
A bit of a hack to accelerate unioning a collection of indexes
"""
this = self
for other in others:
if not isinstance(this, DatetimeIndex):
this = Index.union(this, other)
continue
if not isinstance(other, DatetimeIndex):
try:
other = DatetimeIndex(other)
except TypeError:
pass
this, other = this._maybe_utc_convert(other)
if this._can_fast_union(other):
this = this._fast_union(other)
else:
tz = this.tz
this = Index.union(this, other)
if isinstance(this, DatetimeIndex):
this.tz = tz
if this.freq is None:
this.offset = to_offset(this.inferred_freq)
return this
def append(self, other):
"""
Append a collection of Index options together
Parameters
----------
other : Index or list/tuple of indices
Returns
-------
appended : Index
"""
name = self.name
to_concat = [self]
if isinstance(other, (list, tuple)):
to_concat = to_concat + list(other)
else:
to_concat.append(other)
for obj in to_concat:
if isinstance(obj, Index) and obj.name != name:
name = None
break
to_concat = self._ensure_compat_concat(to_concat)
to_concat, factory = _process_concat_data(to_concat, name)
return factory(to_concat)
def join(self, other, how='left', level=None, return_indexers=False):
"""
See Index.join
"""
if (not isinstance(other, DatetimeIndex) and len(other) > 0 and
other.inferred_type not in ('floating', 'mixed-integer',
'mixed-integer-float', 'mixed')):
try:
other = DatetimeIndex(other)
except (TypeError, ValueError):
pass
this, other = self._maybe_utc_convert(other)
return Index.join(this, other, how=how, level=level,
return_indexers=return_indexers)
def _maybe_utc_convert(self, other):
this = self
if isinstance(other, DatetimeIndex):
if self.tz is not None:
if other.tz is None:
raise TypeError('Cannot join tz-naive with tz-aware '
'DatetimeIndex')
elif other.tz is not None:
raise TypeError('Cannot join tz-naive with tz-aware '
'DatetimeIndex')
if self.tz != other.tz:
this = self.tz_convert('UTC')
other = other.tz_convert('UTC')
return this, other
def _wrap_joined_index(self, joined, other):
name = self.name if self.name == other.name else None
if (isinstance(other, DatetimeIndex)
and self.offset == other.offset
and self._can_fast_union(other)):
joined = self._view_like(joined)
joined.name = name
return joined
else:
tz = getattr(other, 'tz', None)
return self._simple_new(joined, name, tz=tz)
def _can_fast_union(self, other):
if not isinstance(other, DatetimeIndex):
return False
offset = self.offset
if offset is None or offset != other.offset:
return False
if not self.is_monotonic or not other.is_monotonic:
return False
if len(self) == 0 or len(other) == 0:
return True
# to make our life easier, "sort" the two ranges
if self[0] <= other[0]:
left, right = self, other
else:
left, right = other, self
right_start = right[0]
left_end = left[-1]
# Only need to "adjoin", not overlap
return (right_start == left_end + offset) or right_start in left
def _fast_union(self, other):
if len(other) == 0:
return self.view(type(self))
if len(self) == 0:
return other.view(type(self))
# to make our life easier, "sort" the two ranges
if self[0] <= other[0]:
left, right = self, other
else:
left, right = other, self
left_start, left_end = left[0], left[-1]
right_end = right[-1]
if not self.offset._should_cache():
# concatenate dates
if left_end < right_end:
loc = right.searchsorted(left_end, side='right')
right_chunk = right.values[loc:]
dates = com._concat_compat((left.values, right_chunk))
return self._view_like(dates)
else:
return left
else:
return type(self)(start=left_start,
end=max(left_end, right_end),
freq=left.offset)
def __array_finalize__(self, obj):
if self.ndim == 0: # pragma: no cover
return self.item()
self.offset = getattr(obj, 'offset', None)
self.tz = getattr(obj, 'tz', None)
self.name = getattr(obj, 'name', None)
self._reset_identity()
def intersection(self, other):
"""
Specialized intersection for DatetimeIndex objects. May be much faster
than Index.intersection
Parameters
----------
other : DatetimeIndex or array-like
Returns
-------
y : Index or DatetimeIndex
"""
if not isinstance(other, DatetimeIndex):
try:
other = DatetimeIndex(other)
except (TypeError, ValueError):
pass
result = Index.intersection(self, other)
if isinstance(result, DatetimeIndex):
if result.freq is None:
result.offset = to_offset(result.inferred_freq)
return result
elif (other.offset is None or self.offset is None or
other.offset != self.offset or
not other.offset.isAnchored() or
(not self.is_monotonic or not other.is_monotonic)):
result = Index.intersection(self, other)
if isinstance(result, DatetimeIndex):
if result.freq is None:
result.offset = to_offset(result.inferred_freq)
return result
if len(self) == 0:
return self
if len(other) == 0:
return other
# to make our life easier, "sort" the two ranges
if self[0] <= other[0]:
left, right = self, other
else:
left, right = other, self
end = min(left[-1], right[-1])
start = right[0]
if end < start:
return type(self)(data=[])
else:
lslice = slice(*left.slice_locs(start, end))
left_chunk = left.values[lslice]
return self._view_like(left_chunk)
def _partial_date_slice(self, reso, parsed, use_lhs=True, use_rhs=True):
is_monotonic = self.is_monotonic
if reso == 'year':
t1 = Timestamp(datetime(parsed.year, 1, 1), tz=self.tz)
t2 = Timestamp(datetime(parsed.year, 12, 31, 23, 59, 59, 999999), tz=self.tz)
elif reso == 'month':
d = tslib.monthrange(parsed.year, parsed.month)[1]
t1 = Timestamp(datetime(parsed.year, parsed.month, 1), tz=self.tz)
t2 = Timestamp(datetime(parsed.year, parsed.month, d, 23, 59, 59, 999999), tz=self.tz)
elif reso == 'quarter':
qe = (((parsed.month - 1) + 2) % 12) + 1 # two months ahead
d = tslib.monthrange(parsed.year, qe)[1] # at end of month
t1 = Timestamp(datetime(parsed.year, parsed.month, 1), tz=self.tz)
t2 = Timestamp(datetime(parsed.year, qe, d, 23, 59, 59, 999999), tz=self.tz)
elif (reso == 'day' and (self._resolution < Resolution.RESO_DAY or not is_monotonic)):
st = datetime(parsed.year, parsed.month, parsed.day)
t1 = Timestamp(st, tz=self.tz)
t2 = st + offsets.Day()
t2 = Timestamp(Timestamp(t2, tz=self.tz).value - 1)
elif (reso == 'hour' and (
self._resolution < Resolution.RESO_HR or not is_monotonic)):
st = datetime(parsed.year, parsed.month, parsed.day,
hour=parsed.hour)
t1 = Timestamp(st, tz=self.tz)
t2 = Timestamp(Timestamp(st + offsets.Hour(),
tz=self.tz).value - 1)
elif (reso == 'minute' and (
self._resolution < Resolution.RESO_MIN or not is_monotonic)):
st = datetime(parsed.year, parsed.month, parsed.day,
hour=parsed.hour, minute=parsed.minute)
t1 = Timestamp(st, tz=self.tz)
t2 = Timestamp(Timestamp(st + offsets.Minute(),
tz=self.tz).value - 1)
elif (reso == 'second' and (
self._resolution == Resolution.RESO_SEC or not is_monotonic)):
st = datetime(parsed.year, parsed.month, parsed.day,
hour=parsed.hour, minute=parsed.minute, second=parsed.second)
t1 = Timestamp(st, tz=self.tz)
t2 = Timestamp(Timestamp(st + offsets.Second(),
tz=self.tz).value - 1)
else:
raise KeyError
stamps = self.asi8
if is_monotonic:
# we are out of range
if len(stamps) and (
(use_lhs and t1.value < stamps[0] and t2.value < stamps[0]) or (
(use_rhs and t1.value > stamps[-1] and t2.value > stamps[-1]))):
raise KeyError
# a monotonic (sorted) series can be sliced
left = stamps.searchsorted(t1.value, side='left') if use_lhs else None
right = stamps.searchsorted(t2.value, side='right') if use_rhs else None
return slice(left, right)
lhs_mask = (stamps >= t1.value) if use_lhs else True
rhs_mask = (stamps <= t2.value) if use_rhs else True
# try to find a the dates
return (lhs_mask & rhs_mask).nonzero()[0]
def _possibly_promote(self, other):
if other.inferred_type == 'date':
other = DatetimeIndex(other)
return self, other
def get_value(self, series, key):
"""
Fast lookup of value from 1-dimensional ndarray. Only use this if you
know what you're doing
"""
timestamp = None
#if isinstance(key, Timestamp):
# timestamp = key
#el
if isinstance(key, datetime):
# needed to localize naive datetimes
timestamp = Timestamp(key, tz=self.tz)
if timestamp:
return self.get_value_maybe_box(series, timestamp)
try:
return _maybe_box(self, Index.get_value(self, series, key), series, key)
except KeyError:
try:
loc = self._get_string_slice(key)
return series[loc]
except (TypeError, ValueError, KeyError):
pass
if isinstance(key, time):
locs = self.indexer_at_time(key)
return series.take(locs)
try:
return self.get_value_maybe_box(series, key)
except (TypeError, ValueError, KeyError):
raise KeyError(key)
def get_value_maybe_box(self, series, key):
# needed to localize naive datetimes
if self.tz is not None:
key = Timestamp(key, tz=self.tz)
elif not isinstance(key, Timestamp):
key = Timestamp(key)
values = self._engine.get_value(_values_from_object(series), key)
return _maybe_box(self, values, series, key)
def get_loc(self, key):
"""
Get integer location for requested label
Returns
-------
loc : int
"""
if isinstance(key, datetime):
# needed to localize naive datetimes
stamp = Timestamp(key, tz=self.tz)
return self._engine.get_loc(stamp)
try:
return Index.get_loc(self, key)
except (KeyError, ValueError):
try:
return self._get_string_slice(key)
except (TypeError, KeyError, ValueError):
pass
if isinstance(key, time):
return self.indexer_at_time(key)
try:
stamp = Timestamp(key, tz=self.tz)
return self._engine.get_loc(stamp)
except (KeyError, ValueError):
raise KeyError(key)
def _get_string_slice(self, key, use_lhs=True, use_rhs=True):
freq = getattr(self, 'freqstr',
getattr(self, 'inferred_freq', None))
_, parsed, reso = parse_time_string(key, freq)
loc = self._partial_date_slice(reso, parsed, use_lhs=use_lhs,
use_rhs=use_rhs)
return loc
def slice_indexer(self, start=None, end=None, step=None):
"""
Index.slice_indexer, customized to handle time slicing
"""
if isinstance(start, time) and isinstance(end, time):
if step is not None and step != 1:
raise ValueError('Must have step size of 1 with time slices')
return self.indexer_between_time(start, end)
if isinstance(start, time) or isinstance(end, time):
raise KeyError('Cannot mix time and non-time slice keys')
if isinstance(start, float) or isinstance(end, float):
raise TypeError('Cannot index datetime64 with float keys')
return Index.slice_indexer(self, start, end, step)
def slice_locs(self, start=None, end=None):
"""
Index.slice_locs, customized to handle partial ISO-8601 string slicing
"""
if isinstance(start, compat.string_types) or isinstance(end, compat.string_types):
if self.is_monotonic:
try:
if start:
start_loc = self._get_string_slice(start).start
else:
start_loc = 0
if end:
end_loc = self._get_string_slice(end).stop
else:
end_loc = len(self)
return start_loc, end_loc
except KeyError:
pass
else:
# can't use a slice indexer because we are not sorted!
# so create an indexer directly
try:
if start:
start_loc = self._get_string_slice(start,
use_rhs=False)
else:
start_loc = np.arange(len(self))
if end:
end_loc = self._get_string_slice(end, use_lhs=False)
else:
end_loc = np.arange(len(self))
return start_loc, end_loc
except KeyError:
pass
if isinstance(start, time) or isinstance(end, time):
raise KeyError('Cannot use slice_locs with time slice keys')
return Index.slice_locs(self, start, end)
def __getitem__(self, key):
"""Override numpy.ndarray's __getitem__ method to work as desired"""
arr_idx = self.view(np.ndarray)
if np.isscalar(key):
val = arr_idx[key]
return Timestamp(val, offset=self.offset, tz=self.tz)
else:
if com._is_bool_indexer(key):
key = np.asarray(key)
key = lib.maybe_booleans_to_slice(key.view(np.uint8))
new_offset = None
if isinstance(key, slice):
if self.offset is not None and key.step is not None:
new_offset = key.step * self.offset
else:
new_offset = self.offset
result = arr_idx[key]
if result.ndim > 1:
return result
return self._simple_new(result, self.name, new_offset, self.tz)
_getitem_slice = __getitem__
# Try to run function on index first, and then on elements of index
# Especially important for group-by functionality
def map(self, f):
try:
result = f(self)
if not isinstance(result, np.ndarray):
raise TypeError
return result
except Exception:
return _algos.arrmap_object(self.asobject, f)
# alias to offset
@property
def freq(self):
return self.offset
@cache_readonly
def inferred_freq(self):
try:
return infer_freq(self)
except ValueError:
return None
@property
def freqstr(self):
return self.offset.freqstr
year = _field_accessor('year', 'Y')
month = _field_accessor('month', 'M')
day = _field_accessor('day', 'D')
hour = _field_accessor('hour', 'h')
minute = _field_accessor('minute', 'm')
second = _field_accessor('second', 's')
microsecond = _field_accessor('microsecond', 'us')
nanosecond = _field_accessor('nanosecond', 'ns')
weekofyear = _field_accessor('weekofyear', 'woy')
week = weekofyear
dayofweek = _field_accessor('dayofweek', 'dow')
weekday = dayofweek
dayofyear = _field_accessor('dayofyear', 'doy')
quarter = _field_accessor('quarter', 'q')
@property
def time(self):
"""
Returns numpy array of datetime.time. The time part of the Timestamps.
"""
# can't call self.map() which tries to treat func as ufunc
# and causes recursion warnings on python 2.6
return _algos.arrmap_object(self.asobject, lambda x: x.time())
@property
def date(self):
"""
Returns numpy array of datetime.date. The date part of the Timestamps.
"""
return _algos.arrmap_object(self.asobject, lambda x: x.date())
def normalize(self):
"""
Return DatetimeIndex with times to midnight. Length is unaltered
Returns
-------
normalized : DatetimeIndex
"""
new_values = tslib.date_normalize(self.asi8, self.tz)
return DatetimeIndex(new_values, freq='infer', name=self.name,
tz=self.tz)
def __iter__(self):
return iter(self._get_object_index())
def searchsorted(self, key, side='left'):
if isinstance(key, np.ndarray):
key = np.array(key, dtype=_NS_DTYPE, copy=False)
else:
key = _to_m8(key, tz=self.tz)
return self.values.searchsorted(key, side=side)
def is_type_compatible(self, typ):
return typ == self.inferred_type or typ == 'datetime'
def argmin(self):
# hack to workaround argmin failure
try:
return self.values.argmin()
except Exception: # pragma: no cover
return self.asi8.argmin()
@property
def inferred_type(self):
# b/c datetime is represented as microseconds since the epoch, make
# sure we can't have ambiguous indexing
return 'datetime64'
@property
def dtype(self):
return _NS_DTYPE
@property
def is_all_dates(self):
return True
@cache_readonly
def is_normalized(self):
"""
Returns True if all of the dates are at midnight ("no time")
"""
return tslib.dates_normalized(self.asi8, self.tz)
@cache_readonly
def resolution(self):
"""
Returns day, hour, minute, second, or microsecond
"""
reso = self._resolution
return get_reso_string(reso)
@cache_readonly
def _resolution(self):
return tslib.resolution(self.asi8, self.tz)
def equals(self, other):
"""
Determines if two Index objects contain the same elements.
"""
if self.is_(other):
return True
if (not hasattr(other, 'inferred_type') or
other.inferred_type != 'datetime64'):
if self.offset is not None:
return False
try:
other = DatetimeIndex(other)
except:
return False
if self.tz is not None:
if other.tz is None:
return False
same_zone = tslib.get_timezone(
self.tz) == tslib.get_timezone(other.tz)
else:
if other.tz is not None:
return False
same_zone = True
return same_zone and np.array_equal(self.asi8, other.asi8)
def insert(self, loc, item):
"""
Make new Index inserting new item at location
Parameters
----------
loc : int
item : object
Returns
-------
new_index : Index
"""
if isinstance(item, datetime):
item = _to_m8(item, tz=self.tz)
new_index = np.concatenate((self[:loc].asi8,
[item.view(np.int64)],
self[loc:].asi8))
return DatetimeIndex(new_index, freq='infer')
def delete(self, loc):
"""
Make new DatetimeIndex with passed location deleted
Returns
-------
new_index : DatetimeIndex
"""
arr = np.delete(self.values, loc)
return DatetimeIndex(arr, tz=self.tz)
def _view_like(self, ndarray):
result = ndarray.view(type(self))
result.offset = self.offset
result.tz = self.tz
result.name = self.name
return result
def tz_convert(self, tz):
"""
Convert DatetimeIndex from one time zone to another (using pytz)
Returns
-------
normalized : DatetimeIndex
"""
tz = tools._maybe_get_tz(tz)
if self.tz is None:
# tz naive, use tz_localize
raise TypeError('Cannot convert tz-naive timestamps, use '
'tz_localize to localize')
# No conversion since timestamps are all UTC to begin with
return self._simple_new(self.values, self.name, self.offset, tz)
def tz_localize(self, tz, infer_dst=False):
"""
Localize tz-naive DatetimeIndex to given time zone (using pytz)
Parameters
----------
tz : string or pytz.timezone
Time zone for time. Corresponding timestamps would be converted to
time zone of the TimeSeries
infer_dst : boolean, default False
Attempt to infer fall dst-transition hours based on order
Returns
-------
localized : DatetimeIndex
"""
if self.tz is not None:
raise TypeError("Already tz-aware, use tz_convert to convert.")
tz = tools._maybe_get_tz(tz)
# Convert to UTC
new_dates = tslib.tz_localize_to_utc(self.asi8, tz, infer_dst=infer_dst)
new_dates = new_dates.view(_NS_DTYPE)
return self._simple_new(new_dates, self.name, self.offset, tz)
def indexer_at_time(self, time, asof=False):
"""
Select values at particular time of day (e.g. 9:30AM)
Parameters
----------
time : datetime.time or string
tz : string or pytz.timezone
Time zone for time. Corresponding timestamps would be converted to
time zone of the TimeSeries
Returns
-------
values_at_time : TimeSeries
"""
from dateutil.parser import parse
if asof:
raise NotImplementedError
if isinstance(time, compat.string_types):
time = parse(time).time()
if time.tzinfo:
# TODO
raise NotImplementedError
time_micros = self._get_time_micros()
micros = _time_to_micros(time)
return (micros == time_micros).nonzero()[0]
def indexer_between_time(self, start_time, end_time, include_start=True,
include_end=True):
"""
Select values between particular times of day (e.g., 9:00-9:30AM)
Parameters
----------
start_time : datetime.time or string
end_time : datetime.time or string
include_start : boolean, default True
include_end : boolean, default True
tz : string or pytz.timezone, default None
Returns
-------
values_between_time : TimeSeries
"""
from dateutil.parser import parse
if isinstance(start_time, compat.string_types):
start_time = parse(start_time).time()
if isinstance(end_time, compat.string_types):
end_time = parse(end_time).time()
if start_time.tzinfo or end_time.tzinfo:
raise NotImplementedError
time_micros = self._get_time_micros()
start_micros = _time_to_micros(start_time)
end_micros = _time_to_micros(end_time)
if include_start and include_end:
lop = rop = operator.le
elif include_start:
lop = operator.le
rop = operator.lt
elif include_end:
lop = operator.lt
rop = operator.le
else:
lop = rop = operator.lt
if start_time <= end_time:
join_op = operator.and_
else:
join_op = operator.or_
mask = join_op(lop(start_micros, time_micros),
rop(time_micros, end_micros))
return mask.nonzero()[0]
def min(self, axis=None):
"""
Overridden ndarray.min to return a Timestamp
"""
if self.is_monotonic:
return self[0]
else:
min_stamp = self.asi8.min()
return Timestamp(min_stamp, tz=self.tz)
def max(self, axis=None):
"""
Overridden ndarray.max to return a Timestamp
"""
if self.is_monotonic:
return self[-1]
else:
max_stamp = self.asi8.max()
return Timestamp(max_stamp, tz=self.tz)
def _generate_regular_range(start, end, periods, offset):
if isinstance(offset, Tick):
stride = offset.nanos
if periods is None:
b = Timestamp(start).value
e = Timestamp(end).value
e += stride - e % stride
# end.tz == start.tz by this point due to _generate implementation
tz = start.tz
elif start is not None:
b = Timestamp(start).value
e = b + periods * stride
tz = start.tz
elif end is not None:
e = Timestamp(end).value + stride
b = e - periods * stride
tz = end.tz
else:
raise NotImplementedError
data = np.arange(b, e, stride, dtype=np.int64)
data = DatetimeIndex._simple_new(data, None, tz=tz)
else:
if isinstance(start, Timestamp):
start = start.to_pydatetime()
if isinstance(end, Timestamp):
end = end.to_pydatetime()
xdr = generate_range(start=start, end=end,
periods=periods, offset=offset)
dates = list(xdr)
# utc = len(dates) > 0 and dates[0].tzinfo is not None
data = tools.to_datetime(dates)
return data
def date_range(start=None, end=None, periods=None, freq='D', tz=None,
normalize=False, name=None, closed=None):
"""
Return a fixed frequency datetime index, with day (calendar) as the default
frequency
Parameters
----------
start : string or datetime-like, default None
Left bound for generating dates
end : string or datetime-like, default None
Right bound for generating dates
periods : integer or None, default None
If None, must specify start and end
freq : string or DateOffset, default 'D' (calendar daily)
Frequency strings can have multiples, e.g. '5H'
tz : string or None
Time zone name for returning localized DatetimeIndex, for example
Asia/Hong_Kong
normalize : bool, default False
Normalize start/end dates to midnight before generating date range
name : str, default None
Name of the resulting index
closed : string or None, default None
Make the interval closed with respect to the given frequency to
the 'left', 'right', or both sides (None)
Notes
-----
2 of start, end, or periods must be specified
Returns
-------
rng : DatetimeIndex
"""
return DatetimeIndex(start=start, end=end, periods=periods,
freq=freq, tz=tz, normalize=normalize, name=name,
closed=closed)
def bdate_range(start=None, end=None, periods=None, freq='B', tz=None,
normalize=True, name=None, closed=None):
"""
Return a fixed frequency datetime index, with business day as the default
frequency
Parameters
----------
start : string or datetime-like, default None
Left bound for generating dates
end : string or datetime-like, default None
Right bound for generating dates
periods : integer or None, default None
If None, must specify start and end
freq : string or DateOffset, default 'B' (business daily)
Frequency strings can have multiples, e.g. '5H'
tz : string or None
Time zone name for returning localized DatetimeIndex, for example
Asia/Beijing
normalize : bool, default False
Normalize start/end dates to midnight before generating date range
name : str, default None
Name for the resulting index
closed : string or None, default None
Make the interval closed with respect to the given frequency to
the 'left', 'right', or both sides (None)
Notes
-----
2 of start, end, or periods must be specified
Returns
-------
rng : DatetimeIndex
"""
return DatetimeIndex(start=start, end=end, periods=periods,
freq=freq, tz=tz, normalize=normalize, name=name,
closed=closed)
def cdate_range(start=None, end=None, periods=None, freq='C', tz=None,
normalize=True, name=None, closed=None, **kwargs):
"""
**EXPERIMENTAL** Return a fixed frequency datetime index, with
CustomBusinessDay as the default frequency
.. warning:: EXPERIMENTAL
The CustomBusinessDay class is not officially supported and the API is
likely to change in future versions. Use this at your own risk.
Parameters
----------
start : string or datetime-like, default None
Left bound for generating dates
end : string or datetime-like, default None
Right bound for generating dates
periods : integer or None, default None
If None, must specify start and end
freq : string or DateOffset, default 'C' (CustomBusinessDay)
Frequency strings can have multiples, e.g. '5H'
tz : string or None
Time zone name for returning localized DatetimeIndex, for example
Asia/Beijing
normalize : bool, default False
Normalize start/end dates to midnight before generating date range
name : str, default None
Name for the resulting index
weekmask : str, Default 'Mon Tue Wed Thu Fri'
weekmask of valid business days, passed to ``numpy.busdaycalendar``
holidays : list
list/array of dates to exclude from the set of valid business days,
passed to ``numpy.busdaycalendar``
closed : string or None, default None
Make the interval closed with respect to the given frequency to
the 'left', 'right', or both sides (None)
Notes
-----
2 of start, end, or periods must be specified
Returns
-------
rng : DatetimeIndex
"""
if freq=='C':
holidays = kwargs.pop('holidays', [])
weekmask = kwargs.pop('weekmask', 'Mon Tue Wed Thu Fri')
freq = CDay(holidays=holidays, weekmask=weekmask)
return DatetimeIndex(start=start, end=end, periods=periods, freq=freq,
tz=tz, normalize=normalize, name=name,
closed=closed, **kwargs)
def _to_m8(key, tz=None):
'''
Timestamp-like => dt64
'''
if not isinstance(key, Timestamp):
# this also converts strings
key = Timestamp(key, tz=tz)
return np.int64(tslib.pydt_to_i8(key)).view(_NS_DTYPE)
def _str_to_dt_array(arr, offset=None, dayfirst=None, yearfirst=None):
def parser(x):
result = parse_time_string(x, offset, dayfirst=dayfirst,
yearfirst=yearfirst)
return result[0]
arr = np.asarray(arr, dtype=object)
data = _algos.arrmap_object(arr, parser)
return tools.to_datetime(data)
_CACHE_START = Timestamp(datetime(1950, 1, 1))
_CACHE_END = Timestamp(datetime(2030, 1, 1))
_daterange_cache = {}
def _naive_in_cache_range(start, end):
if start is None or end is None:
return False
else:
if start.tzinfo is not None or end.tzinfo is not None:
return False
return _in_range(start, end, _CACHE_START, _CACHE_END)
def _in_range(start, end, rng_start, rng_end):
return start > rng_start and end < rng_end
def _use_cached_range(offset, _normalized, start, end):
return (offset._should_cache() and
not (offset._normalize_cache and not _normalized) and
_naive_in_cache_range(start, end))
def _time_to_micros(time):
seconds = time.hour * 60 * 60 + 60 * time.minute + time.second
return 1000000 * seconds + time.microsecond
def _process_concat_data(to_concat, name):
klass = Index
kwargs = {}
concat = np.concatenate
all_dti = True
need_utc_convert = False
has_naive = False
tz = None
for x in to_concat:
if not isinstance(x, DatetimeIndex):
all_dti = False
else:
if tz is None:
tz = x.tz
if x.tz is None:
has_naive = True
if x.tz != tz:
need_utc_convert = True
tz = 'UTC'
if all_dti:
need_obj_convert = False
if has_naive and tz is not None:
need_obj_convert = True
if need_obj_convert:
to_concat = [x.asobject.values for x in to_concat]
else:
if need_utc_convert:
to_concat = [x.tz_convert('UTC').values for x in to_concat]
else:
to_concat = [x.values for x in to_concat]
# well, technically not a "class" anymore...oh well
klass = DatetimeIndex._simple_new
kwargs = {'tz': tz}
concat = com._concat_compat
else:
for i, x in enumerate(to_concat):
if isinstance(x, DatetimeIndex):
to_concat[i] = x.asobject.values
elif isinstance(x, Index):
to_concat[i] = x.values
factory_func = lambda x: klass(concat(x), name=name, **kwargs)
return to_concat, factory_func
| 33.086519 | 98 | 0.561816 |
a07aa1a8b655bc9788c5d9e1bc3bc21141f51962 | 4,311 | py | Python | cookiecutter/config.py | PLSV/cookiecutter | 2bd62c67ec3e52b8e537d5346fd96ebd82803efe | [
"BSD-3-Clause"
]
| 8 | 2020-06-15T18:49:24.000Z | 2021-04-15T10:34:24.000Z | cookiecutter/config.py | PLSV/cookiecutter | 2bd62c67ec3e52b8e537d5346fd96ebd82803efe | [
"BSD-3-Clause"
]
| 19 | 2020-06-28T16:03:56.000Z | 2020-10-07T15:52:06.000Z | cookiecutter/config.py | PLSV/cookiecutter | 2bd62c67ec3e52b8e537d5346fd96ebd82803efe | [
"BSD-3-Clause"
]
| 1 | 2020-12-17T13:08:23.000Z | 2020-12-17T13:08:23.000Z | """Global configuration handling."""
import collections
import copy
import logging
import os
import poyo
from cookiecutter.exceptions import ConfigDoesNotExistException, InvalidConfiguration
logger = logging.getLogger(__name__)
USER_CONFIG_PATH = os.path.expanduser('~/.cookiecutterrc')
BUILTIN_ABBREVIATIONS = {
'gh': 'https://github.com/{0}.git',
'gl': 'https://gitlab.com/{0}.git',
'bb': 'https://bitbucket.org/{0}',
}
DEFAULT_CONFIG = {
'cookiecutters_dir': os.path.expanduser('~/.cookiecutters/'),
'replay_dir': os.path.expanduser('~/.cookiecutter_replay/'),
'default_context': collections.OrderedDict([]),
'abbreviations': BUILTIN_ABBREVIATIONS,
}
def _expand_path(path):
"""Expand both environment variables and user home in the given path."""
path = os.path.expandvars(path)
path = os.path.expanduser(path)
return path
def merge_configs(default, overwrite):
"""Recursively update a dict with the key/value pair of another.
Dict values that are dictionaries themselves will be updated, whilst
preserving existing keys.
"""
new_config = copy.deepcopy(default)
for k, v in overwrite.items():
# Make sure to preserve existing items in
# nested dicts, for example `abbreviations`
if isinstance(v, dict):
new_config[k] = merge_configs(default[k], v)
else:
new_config[k] = v
return new_config
def get_config(config_path):
"""Retrieve the config from the specified path, returning a config dict."""
if not os.path.exists(config_path):
raise ConfigDoesNotExistException(
'Config file {} does not exist.'.format(config_path)
)
logger.debug('config_path is %s', config_path)
with open(config_path, encoding='utf-8') as file_handle:
try:
yaml_dict = poyo.parse_string(file_handle.read())
except poyo.exceptions.PoyoException as e:
raise InvalidConfiguration(
'Unable to parse YAML file {}. Error: {}'.format(config_path, e)
)
config_dict = merge_configs(DEFAULT_CONFIG, yaml_dict)
raw_replay_dir = config_dict['replay_dir']
config_dict['replay_dir'] = _expand_path(raw_replay_dir)
raw_cookies_dir = config_dict['cookiecutters_dir']
config_dict['cookiecutters_dir'] = _expand_path(raw_cookies_dir)
return config_dict
def get_user_config(config_file=None, default_config=False):
"""Return the user config as a dict.
If ``default_config`` is True, ignore ``config_file`` and return default
values for the config parameters.
If a path to a ``config_file`` is given, that is different from the default
location, load the user config from that.
Otherwise look up the config file path in the ``COOKIECUTTER_CONFIG``
environment variable. If set, load the config from this path. This will
raise an error if the specified path is not valid.
If the environment variable is not set, try the default config file path
before falling back to the default config values.
"""
# Do NOT load a config. Return defaults instead.
if default_config:
logger.debug("Force ignoring user config with default_config switch.")
return copy.copy(DEFAULT_CONFIG)
# Load the given config file
if config_file and config_file is not USER_CONFIG_PATH:
logger.debug("Loading custom config from %s.", config_file)
return get_config(config_file)
try:
# Does the user set up a config environment variable?
env_config_file = os.environ['COOKIECUTTER_CONFIG']
except KeyError:
# Load an optional user config if it exists
# otherwise return the defaults
if os.path.exists(USER_CONFIG_PATH):
logger.debug("Loading config from %s.", USER_CONFIG_PATH)
return get_config(USER_CONFIG_PATH)
else:
logger.debug("User config not found. Loading default config.")
return copy.copy(DEFAULT_CONFIG)
else:
# There is a config environment variable. Try to load it.
# Do not check for existence, so invalid file paths raise an error.
logger.debug("User config not found or not specified. Loading default config.")
return get_config(env_config_file)
| 34.488 | 87 | 0.689399 |
273bafb3705a3c03dec9c86413fdbdbf9aacfa4e | 1,556 | py | Python | blog/admin.py | konerjonlar/codejam | a420c2e422366170e8217a0654d6b1254fac7442 | [
"MIT"
]
| null | null | null | blog/admin.py | konerjonlar/codejam | a420c2e422366170e8217a0654d6b1254fac7442 | [
"MIT"
]
| null | null | null | blog/admin.py | konerjonlar/codejam | a420c2e422366170e8217a0654d6b1254fac7442 | [
"MIT"
]
| 1 | 2021-12-03T17:46:48.000Z | 2021-12-03T17:46:48.000Z | from django.contrib import admin
from blog.models import Post, Project
@admin.register(Post)
class PostAdmin(admin.ModelAdmin):
list_display = ("title", "description", "get_tags", "is_active")
search_fields = ("title", "tags")
readonly_fields = ("created_at", "updated_at")
fieldsets = (
(
"Post Information",
{
"fields": (
"author",
"title",
"description",
"content",
"tags",
"image",
"is_active",
),
},
),
(
"Dates",
{
"fields": (
"created_at",
"updated_at",
),
},
),
)
@admin.register(Project)
class ProjectAdmin(admin.ModelAdmin):
list_display = ("title", "description", "tags", "is_active")
search_fields = ("title", "tags")
readonly_fields = ("created_at", "updated_at")
fieldsets = (
(
"Post Information",
{
"fields": (
"title",
"description",
"site",
"tags",
"is_active",
),
},
),
(
"Dates",
{
"fields": (
"created_at",
"updated_at",
),
},
),
)
| 22.882353 | 68 | 0.359254 |
ebcb529679dc379f570fed2f25c56939b3cdcf97 | 409 | py | Python | conanfile.py | bincrafters/conan-boost_uuid | 18c7dbe5cfa9d1954179e181a5b93bef0f4e8e53 | [
"MIT"
]
| null | null | null | conanfile.py | bincrafters/conan-boost_uuid | 18c7dbe5cfa9d1954179e181a5b93bef0f4e8e53 | [
"MIT"
]
| 1 | 2018-02-17T18:34:44.000Z | 2018-02-20T18:31:31.000Z | conanfile.py | bincrafters/conan-boost_uuid | 18c7dbe5cfa9d1954179e181a5b93bef0f4e8e53 | [
"MIT"
]
| 2 | 2018-02-15T19:18:22.000Z | 2019-10-12T16:22:57.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from conans import python_requires
base = python_requires("boost_base/2.0.0@bincrafters/testing")
class BoostUuidConan(base.BoostBaseConan):
name = "boost_uuid"
version = "1.70.0"
def package_info(self):
super(BoostUuidConan, self).package_info()
if self.settings.os == "Windows":
self.cpp_info.libs.append("Bcrypt")
| 22.722222 | 62 | 0.667482 |
aba8b3034601aaf3683086a85e7ae701a2275d7c | 21,517 | py | Python | trimesh/collision.py | dani2112/thesis-trimesh | 99f67a7dc2e05b78e088152a9cafedaebe74de1b | [
"MIT"
]
| null | null | null | trimesh/collision.py | dani2112/thesis-trimesh | 99f67a7dc2e05b78e088152a9cafedaebe74de1b | [
"MIT"
]
| null | null | null | trimesh/collision.py | dani2112/thesis-trimesh | 99f67a7dc2e05b78e088152a9cafedaebe74de1b | [
"MIT"
]
| null | null | null | import numpy as np
import collections
from .constants import log
_fcl_exists = True
try:
import fcl # pip install python-fcl
except BaseException:
log.warning('No FCL -- collision checking will not work')
_fcl_exists = False
class ContactData(object):
"""
Data structure for holding information about a collision contact.
"""
def __init__(self, names, contact):
"""
Initialize a ContactData.
Parameters
----------
names : list of str
The names of the two objects in order.
contact : fcl.Contact
The contact in question.
"""
self.names = set(names)
self._inds = {
names[0]: contact.b1,
names[1]: contact.b2
}
self._point = contact.pos
@property
def point(self):
"""
The 3D point of intersection for this contact.
Returns
-------
point : (3,) float
The intersection point.
"""
return self._point
def index(self, name):
"""
Returns the index of the face in contact for the mesh with
the given name.
Parameters
----------
name : str
The name of the target object.
Returns
-------
index : int
The index of the face in collison
"""
return self._inds[name]
class DistanceData(object):
"""
Data structure for holding information about a distance query.
"""
def __init__(self, names, result):
"""
Initialize a DistanceData.
Parameters
----------
names : list of str
The names of the two objects in order.
contact : fcl.DistanceResult
The distance query result.
"""
self.names = set(names)
self._inds = {
names[0]: result.b1,
names[1]: result.b2
}
self._points = {
names[0]: result.nearest_points[0],
names[1]: result.nearest_points[1]
}
self._distance = result.min_distance
@property
def distance(self):
"""
Returns the distance between the two objects.
Returns
-------
distance : float
The euclidean distance between the objects.
"""
return self._distance
def index(self, name):
"""
Returns the index of the closest face for the mesh with
the given name.
Parameters
----------
name : str
The name of the target object.
Returns
-------
index : int
The index of the face in collisoin.
"""
return self._inds[name]
def point(self, name):
"""
The 3D point of closest distance on the mesh with the given name.
Parameters
----------
name : str
The name of the target object.
Returns
-------
point : (3,) float
The closest point.
"""
return self._points[name]
class CollisionManager(object):
"""
A mesh-mesh collision manager.
"""
def __init__(self):
"""
Initialize a mesh-mesh collision manager.
"""
if not _fcl_exists:
raise ValueError('No FCL Available!')
# {name: {geom:, obj}}
self._objs = {}
# {id(bvh) : str, name}
# unpopulated values will return None
self._names = collections.defaultdict(lambda: None)
# cache BVH objects
# {mesh.md5(): fcl.BVHModel object}
self._bvh = {}
self._manager = fcl.DynamicAABBTreeCollisionManager()
self._manager.setup()
def add_object(self,
name,
mesh,
transform=None):
"""
Add an object to the collision manager.
If an object with the given name is already in the manager,
replace it.
Parameters
----------
name : str
An identifier for the object
mesh : Trimesh object
The geometry of the collision object
transform : (4,4) float
Homogenous transform matrix for the object
"""
# if no transform passed, assume identity transform
if transform is None:
transform = np.eye(4)
transform = np.asanyarray(transform, dtype=np.float32)
if transform.shape != (4, 4):
raise ValueError('transform must be (4,4)!')
# create or recall from cache BVH
bvh = self._get_BVH(mesh)
# create the FCL transform from (4,4) matrix
t = fcl.Transform(transform[:3, :3], transform[:3, 3])
o = fcl.CollisionObject(bvh, t)
# Add collision object to set
if name in self._objs:
self._manager.unregisterObject(self._objs[name])
self._objs[name] = {'obj': o,
'geom': bvh}
# store the name of the geometry
self._names[id(bvh)] = name
self._manager.registerObject(o)
self._manager.update()
return o
def remove_object(self, name):
"""
Delete an object from the collision manager.
Parameters
----------
name : str
The identifier for the object
"""
if name in self._objs:
self._manager.unregisterObject(self._objs[name]['obj'])
self._manager.update(self._objs[name]['obj'])
# remove objects from _objs
geom_id = id(self._objs.pop(name)['geom'])
# remove names
self._names.pop(geom_id)
else:
raise ValueError('{} not in collision manager!'.format(name))
def set_transform(self, name, transform):
"""
Set the transform for one of the manager's objects.
This replaces the prior transform.
Parameters
----------
name : str
An identifier for the object already in the manager
transform : (4,4) float
A new homogenous transform matrix for the object
"""
if name in self._objs:
o = self._objs[name]['obj']
o.setRotation(transform[:3, :3])
o.setTranslation(transform[:3, 3])
self._manager.update(o)
else:
raise ValueError('{} not in collision manager!'.format(name))
def in_collision_single(self, mesh, transform=None,
return_names=False, return_data=False):
"""
Check a single object for collisions against all objects in the
manager.
Parameters
----------
mesh : Trimesh object
The geometry of the collision object
transform : (4,4) float
Homogenous transform matrix
return_names : bool
If true, a set is returned containing the names
of all objects in collision with the object
return_data : bool
If true, a list of ContactData is returned as well
Returns
------------
is_collision : bool
True if a collision occurs and False otherwise
names : set of str
The set of names of objects that collided with the
provided one
contacts : list of ContactData
All contacts detected
"""
if transform is None:
transform = np.eye(4)
# Create FCL data
b = self._get_BVH(mesh)
t = fcl.Transform(transform[:3, :3], transform[:3, 3])
o = fcl.CollisionObject(b, t)
# Collide with manager's objects
cdata = fcl.CollisionData()
if return_names or return_data:
cdata = fcl.CollisionData(request=fcl.CollisionRequest(
num_max_contacts=100000,
enable_contact=True))
self._manager.collide(o, cdata, fcl.defaultCollisionCallback)
result = cdata.result.is_collision
# If we want to return the objects that were collision, collect them.
objs_in_collision = set()
contact_data = []
if return_names or return_data:
for contact in cdata.result.contacts:
cg = contact.o1
if cg == b:
cg = contact.o2
name = self._extract_name(cg)
names = (name, '__external')
if cg == contact.o2:
names = reversed(names)
if return_names:
objs_in_collision.add(name)
if return_data:
contact_data.append(ContactData(names, contact))
if return_names and return_data:
return result, objs_in_collision, contact_data
elif return_names:
return result, objs_in_collision
elif return_data:
return result, contact_data
else:
return result
def in_collision_internal(self, return_names=False, return_data=False):
"""
Check if any pair of objects in the manager collide with one another.
Parameters
----------
return_names : bool
If true, a set is returned containing the names
of all pairs of objects in collision.
return_data : bool
If true, a list of ContactData is returned as well
Returns
-------
is_collision : bool
True if a collision occurred between any pair of objects
and False otherwise
names : set of 2-tup
The set of pairwise collisions. Each tuple
contains two names in alphabetical order indicating
that the two corresponding objects are in collision.
contacts : list of ContactData
All contacts detected
"""
cdata = fcl.CollisionData()
if return_names or return_data:
cdata = fcl.CollisionData(request=fcl.CollisionRequest(
num_max_contacts=100000, enable_contact=True))
self._manager.collide(cdata, fcl.defaultCollisionCallback)
result = cdata.result.is_collision
objs_in_collision = set()
contact_data = []
if return_names or return_data:
for contact in cdata.result.contacts:
names = (self._extract_name(contact.o1),
self._extract_name(contact.o2))
if return_names:
objs_in_collision.add(tuple(sorted(names)))
if return_data:
contact_data.append(ContactData(names, contact))
if return_names and return_data:
return result, objs_in_collision, contact_data
elif return_names:
return result, objs_in_collision
elif return_data:
return result, contact_data
else:
return result
def in_collision_other(self, other_manager,
return_names=False, return_data=False):
"""
Check if any object from this manager collides with any object
from another manager.
Parameters
-------------------
other_manager : CollisionManager
Another collision manager object
return_names : bool
If true, a set is returned containing the names
of all pairs of objects in collision.
return_data : bool
If true, a list of ContactData is returned as well
Returns
-------------
is_collision : bool
True if a collision occurred between any pair of objects
and False otherwise
names : set of 2-tup
The set of pairwise collisions. Each tuple
contains two names (first from this manager,
second from the other_manager) indicating
that the two corresponding objects are in collision.
contacts : list of ContactData
All contacts detected
"""
cdata = fcl.CollisionData()
if return_names or return_data:
cdata = fcl.CollisionData(
request=fcl.CollisionRequest(
num_max_contacts=100000,
enable_contact=True))
self._manager.collide(other_manager._manager,
cdata,
fcl.defaultCollisionCallback)
result = cdata.result.is_collision
objs_in_collision = set()
contact_data = []
if return_names or return_data:
for contact in cdata.result.contacts:
reverse = False
names = (self._extract_name(contact.o1),
other_manager._extract_name(contact.o2))
if names[0] is None:
names = (self._extract_name(contact.o2),
other_manager._extract_name(contact.o1))
reverse = True
if return_names:
objs_in_collision.add(names)
if return_data:
if reverse:
names = reversed(names)
contact_data.append(ContactData(names, contact))
if return_names and return_data:
return result, objs_in_collision, contact_data
elif return_names:
return result, objs_in_collision
elif return_data:
return result, contact_data
else:
return result
def min_distance_single(self,
mesh,
transform=None,
return_name=False,
return_data=False):
"""
Get the minimum distance between a single object and any
object in the manager.
Parameters
---------------
mesh : Trimesh object
The geometry of the collision object
transform : (4,4) float
Homogenous transform matrix for the object
return_names : bool
If true, return name of the closest object
return_data : bool
If true, a DistanceData object is returned as well
Returns
-------------
distance : float
Min distance between mesh and any object in the manager
name : str
The name of the object in the manager that was closest
data : DistanceData
Extra data about the distance query
"""
if transform is None:
transform = np.eye(4)
# Create FCL data
b = self._get_BVH(mesh)
t = fcl.Transform(transform[:3, :3], transform[:3, 3])
o = fcl.CollisionObject(b, t)
# Collide with manager's objects
ddata = fcl.DistanceData()
if return_data:
ddata = fcl.DistanceData(
fcl.DistanceRequest(enable_nearest_points=True),
fcl.DistanceResult()
)
self._manager.distance(o, ddata, fcl.defaultDistanceCallback)
distance = ddata.result.min_distance
# If we want to return the objects that were collision, collect them.
name, data = None, None
if return_name or return_data:
cg = ddata.result.o1
if cg == b:
cg = ddata.result.o2
name = self._extract_name(cg)
names = (name, '__external')
if cg == ddata.result.o2:
names = reversed(names)
data = DistanceData(names, ddata.result)
if return_name and return_data:
return distance, name, data
elif return_name:
return distance, name
elif return_data:
return distance, data
else:
return distance
def min_distance_internal(self, return_names=False, return_data=False):
"""
Get the minimum distance between any pair of objects in the manager.
Parameters
-------------
return_names : bool
If true, a 2-tuple is returned containing the names
of the closest objects.
return_data : bool
If true, a DistanceData object is returned as well
Returns
-----------
distance : float
Min distance between any two managed objects
names : (2,) str
The names of the closest objects
data : DistanceData
Extra data about the distance query
"""
ddata = fcl.DistanceData()
if return_data:
ddata = fcl.DistanceData(
fcl.DistanceRequest(enable_nearest_points=True),
fcl.DistanceResult()
)
self._manager.distance(ddata, fcl.defaultDistanceCallback)
distance = ddata.result.min_distance
names, data = None, None
if return_names or return_data:
names = (self._extract_name(ddata.result.o1),
self._extract_name(ddata.result.o2))
data = DistanceData(names, ddata.result)
names = tuple(sorted(names))
if return_names and return_data:
return distance, names, data
elif return_names:
return distance, names
elif return_data:
return distance, data
else:
return distance
def min_distance_other(self, other_manager,
return_names=False, return_data=False):
"""
Get the minimum distance between any pair of objects,
one in each manager.
Parameters
----------
other_manager : CollisionManager
Another collision manager object
return_names : bool
If true, a 2-tuple is returned containing
the names of the closest objects.
return_data : bool
If true, a DistanceData object is returned as well
Returns
-----------
distance : float
The min distance between a pair of objects,
one from each manager.
names : 2-tup of str
A 2-tuple containing two names (first from this manager,
second from the other_manager) indicating
the two closest objects.
data : DistanceData
Extra data about the distance query
"""
ddata = fcl.DistanceData()
if return_data:
ddata = fcl.DistanceData(
fcl.DistanceRequest(enable_nearest_points=True),
fcl.DistanceResult()
)
self._manager.distance(other_manager._manager,
ddata,
fcl.defaultDistanceCallback)
distance = ddata.result.min_distance
names, data = None, None
if return_names or return_data:
reverse = False
names = (self._extract_name(ddata.result.o1),
other_manager._extract_name(ddata.result.o2))
if names[0] is None:
reverse = True
names = (self._extract_name(ddata.result.o2),
other_manager._extract_name(ddata.result.o1))
dnames = tuple(names)
if reverse:
dnames = reversed(dnames)
data = DistanceData(dnames, ddata.result)
if return_names and return_data:
return distance, names, data
elif return_names:
return distance, names
elif return_data:
return distance, data
else:
return distance
def _get_BVH(self, mesh):
"""
Get a BVH for a mesh.
Parameters
-------------
mesh : Trimesh
Mesh to create BVH for
Returns
--------------
bvh : fcl.BVHModel
BVH object of souce mesh
"""
bvh = mesh_to_BVH(mesh)
return bvh
def _extract_name(self, geom):
"""
Retrieve the name of an object from the manager by its
CollisionObject, or return None if not found.
Parameters
-----------
geom : CollisionObject or BVHModel
Input model
Returns
------------
names : hashable
Name of input geometry
"""
return self._names[id(geom)]
def mesh_to_BVH(mesh):
"""
Create a BVHModel object from a Trimesh object
Parameters
-----------
mesh : Trimesh
Input geometry
Returns
------------
bvh : fcl.BVHModel
BVH of input geometry
"""
bvh = fcl.BVHModel()
bvh.beginModel(num_tris_=len(mesh.faces),
num_vertices_=len(mesh.vertices))
bvh.addSubModel(verts=mesh.vertices,
triangles=mesh.faces)
bvh.endModel()
return bvh
def scene_to_collision(scene):
"""
Create collision objects from a trimesh.Scene object.
Parameters
------------
scene : trimesh.Scene
Scene to create collision objects for
Returns
------------
manager : CollisionManager
CollisionManager for objects in scene
objects: {node name: CollisionObject}
Collision objects for nodes in scene
"""
manager = CollisionManager()
objects = {}
for node in scene.graph.nodes_geometry:
T, geometry = scene.graph[node]
objects[node] = manager.add_object(name=node,
mesh=scene.geometry[geometry],
transform=T)
return manager, objects
| 30.220506 | 77 | 0.551936 |
ff0e465a023ae21485d8f8630f5a9b2bf4de0ed6 | 175 | py | Python | tests/example/reflexes/example_reflex.py | jefftriplett/django-sockpuppet | eedc7f0d4b95646991d8db569e5fca61d56149e2 | [
"MIT"
]
| null | null | null | tests/example/reflexes/example_reflex.py | jefftriplett/django-sockpuppet | eedc7f0d4b95646991d8db569e5fca61d56149e2 | [
"MIT"
]
| null | null | null | tests/example/reflexes/example_reflex.py | jefftriplett/django-sockpuppet | eedc7f0d4b95646991d8db569e5fca61d56149e2 | [
"MIT"
]
| null | null | null | from sockpuppet.reflex import Reflex
class ExampleReflex(Reflex):
def increment(self, step=1):
self.session['count'] = int(self.element.dataset['count']) + step
| 25 | 73 | 0.702857 |
b40ecf2b9ecbf85e04341d9bc16eb50500aa55bd | 27,642 | py | Python | suncasa/pygsfit/gsutils.py | binchensolar/suncasa | 55339715e791be9fc2fc555c4f4f6adf1b350b37 | [
"BSD-2-Clause"
]
| 2 | 2018-02-12T09:34:23.000Z | 2019-07-16T18:25:12.000Z | suncasa/pygsfit/gsutils.py | wulinhui1/suncasa-src | 1f94aaabaf6a3911fa532648ec6676a221553436 | [
"BSD-2-Clause"
]
| 26 | 2016-11-09T17:11:45.000Z | 2021-08-20T13:41:50.000Z | suncasa/pygsfit/gsutils.py | wulinhui1/suncasa-src | 1f94aaabaf6a3911fa532648ec6676a221553436 | [
"BSD-2-Clause"
]
| 17 | 2016-10-27T18:35:46.000Z | 2021-08-03T05:33:57.000Z | import numpy as np
# import sys
import math
import os, sys, platform
import astropy.units as u
from sunpy import map as smap
from astropy.coordinates import SkyCoord
from suncasa.io import ndfits
from . import gstools # initialization library - located either in the current directory or in the system path
from suncasa.utils import mstools
import lmfit
from astropy.time import Time
import matplotlib.pyplot as plt
import matplotlib.colors as colors
import matplotlib.colorbar as colorbar
from suncasa.utils import mstools
from suncasa.utils import qlookplot as ql
from mpl_toolkits.axes_grid1 import make_axes_locatable
from tqdm import tqdm
from astropy.io import fits
import numpy.ma as ma
# name of the fast gyrosynchrotron codes shared library
if platform.system() == 'Linux' or platform.system() == 'Darwin':
libname = os.path.join(os.path.dirname(os.path.realpath(__file__)),
'binaries/MWTransferArr.so')
if platform.system() == 'Windows':
libname = os.path.join(os.path.dirname(os.path.realpath(__file__)),
'binaries/MWTransferArr64.dll')
def kev2k(eng):
return 11604525.00617 * eng
def ff_emission(em, T=1.e7, Z=1., mu=1.e10):
from astropy import constants as const
import astropy.units as u
T = T * u.k
mu = mu * u.Hz
esu = const.e.esu
k_B = const.k_B.cgs
m_e = const.m_e.cgs
c = const.c.cgs
bmax = (3 * k_B * T * u.k / m_e) ** 0.5 / 2.0 / np.pi / (mu * u.Hz)
bmin = Z * esu ** 2 / 3. / k_B / T
lnbb = np.log((bmax / bmin).value)
ka_mu = 1. / mu ** 2 / T ** 1.5 * (
Z ** 2 * esu ** 6 / c / np.sqrt(2. * np.pi * (m_e * k_B) ** 3)) * np.pi ** 2 / 4.0 * lnbb
# print(ka_mu, em)
opc = ka_mu * em
return T.value * (1 - np.exp(-opc.value))
def sfu2tb(freq, flux, area):
# frequency in Hz
# flux in sfu
# area: area of the radio source in arcsec^2
sfu2cgs = 1e-19
vc = 2.998e10
kb = 1.38065e-16
# sr = np.pi * (size[0] / 206265. / 2.) * (size[1] / 206265. / 2.)
sr = area / 206265. ** 2
Tb = flux * sfu2cgs * vc ** 2. / (2. * kb * freq ** 2. * sr)
return Tb
def tb2sfu(freq, tb, area):
# frequency in Hz
# brightness temperature in K
# area: area of the radio source in arcsec^2
sfu2cgs = 1e-19
vc = 2.998e10
kb = 1.38065e-16
# sr = np.pi * (size[0] / 206265. / 2.) * (size[1] / 206265. / 2.)
sr = area / 206265. ** 2
flux = tb / (sfu2cgs * vc ** 2. / (2. * kb * freq ** 2. * sr))
return flux
def initspecplot(axes, cplts):
errobjs = []
for cpltidx, cplt in enumerate(cplts):
errobjs.append(axes.errorbar([], [], yerr=[], linestyle='', marker='o', mfc='none', mec=cplt, alpha=1.0))
axes.set_yscale("log")
axes.set_xscale("log")
axes.set_xlim([1, 20])
axes.set_ylim([0.1, 1000])
axes.set_xticks([1, 5, 10, 20])
axes.set_xticklabels([1, 5, 10, 20])
axes.set_xticks([1, 5, 10, 20])
axes.set_yticks([])
axes.set_yticks([0.01, 0.1, 1, 10, 100, 1000])
axes.set_ylabel('T$_b$ [MK]')
axes.set_xlabel('Frequency [GHz]')
x = np.linspace(1, 20, 10)
for ll in [-1, 0, 1, 2, 3, 4]:
y = 10. ** (-2 * np.log10(x) + ll)
axes.plot(x, y, 'k--', alpha=0.1)
# y2 = 10. ** (-4 * np.log10(x) + ll)
# y3 = 10. ** (-8 * np.log10(x) + ll)
# ax_eospec.plot(x, y, 'k--', x, y2, 'k:', x, y3, 'k-.', alpha=0.1)
return errobjs
def set_errorobj(xout, yout, errobj, yerr=None):
eospec, dummy, (errbar_eospec,) = errobj
eospec.set_data(xout, yout)
if yerr is not None:
yerr_top = yout + yerr
yerr_bot = yout - yerr
new_segments_y = [np.array([[x, yt], [x, yb]]) for x, yt, yb in zip(xout, yerr_top, yerr_bot)]
errbar_eospec.set_segments(new_segments_y)
def mwspec2min_1src(params, freqghz, tb=None, tb_err=None, arcsec2cm=0.725e8, showplt=False):
# params are defined by lmfit.Paramters()
'''
params: parameters defined by lmfit.Paramters()
freqghz: frequencies in GHz
ssz: pixel size in arcsec
tb: reference brightness temperature in K
tb_err: uncertainties of reference brightness temperature in K
'''
from scipy import interpolate
GET_MW = gstools.initGET_MW(libname) # load the library
ssz = float(params['ssz'].value) # # source area in arcsec^2
depth = float(params['depth'].value) # total source depth in arcsec
Bmag = float(params['Bmag'].value) # magnetic field strength in G
Tth = float(params['Tth'].value) # thermal temperature in MK
nth = float(params['nth'].value) # thermal density in 1e10 cm^{-3}
nrlh = 10. ** float(params['lognrlh'].value) # total nonthermal density above 0.1 MeV
delta = float(params['delta'].value) # powerlaw index
theta = float(params['theta'].value) # viewing angle in degrees
Emin = float(params['Emin'].value) # low energy cutoff of nonthermal electrons in MeV
Emax = float(params['Emax'].value) # high energy cutoff of nonthermal electrons in MeV
E_hi = 0.1
nrl = nrlh * (Emin ** (1. - delta) - Emax * (1. - delta)) / (E_hi ** (1. - delta) - Emax ** (1. - delta))
Nf = 100 # number of frequencies
NSteps = 1 # number of nodes along the line-of-sight
N_E = 15 # number of energy nodes
N_mu = 15 # number of pitch-angle nodes
Lparms = np.zeros(11, dtype='int32') # array of dimensions etc.
Lparms[0] = NSteps
Lparms[1] = Nf
Lparms[2] = N_E
Lparms[3] = N_mu
Rparms = np.zeros(5, dtype='double') # array of global floating-point parameters
Rparms[0] = ssz * arcsec2cm ** 2 # Area, cm^2
# Rparms[0] = 1e20 # area, cm^2
Rparms[1] = 1e9 # starting frequency to calculate spectrum, Hz
Rparms[2] = 0.02 # logarithmic step in frequency
Rparms[3] = 12 # f^C
Rparms[4] = 12 # f^WH
ParmLocal = np.zeros(24, dtype='double') # array of voxel parameters - for a single voxel
ParmLocal[0] = depth * arcsec2cm / NSteps # voxel depth, cm
ParmLocal[1] = Tth * 1e6 # T_0, K
ParmLocal[2] = nth * 1e10 # n_0 - thermal electron density, cm^{-3}
ParmLocal[3] = Bmag # B - magnetic field, G
Parms = np.zeros((24, NSteps), dtype='double', order='F') # 2D array of input parameters - for multiple voxels
for i in range(NSteps):
Parms[:, i] = ParmLocal # most of the parameters are the same in all voxels
# if NSteps > 1:
# Parms[4, i] = 50.0 + 30.0 * i / (NSteps - 1) # the viewing angle varies from 50 to 80 degrees along the LOS
# else:
# Parms[4, i] = 50.0 # the viewing angle varies from 50 to 80 degrees along the LOS
Parms[4, i] = theta
# parameters of the electron distribution function
n_b = nrl # n_b - nonthermal electron density, cm^{-3}
mu_c = np.cos(np.pi * 70 / 180) # loss-cone boundary
dmu_c = 0.2 # Delta_mu
E_arr = np.logspace(np.log10(Emin), np.log10(Emax), N_E, dtype='double') # energy grid (logarithmically spaced)
mu_arr = np.linspace(-1.0, 1.0, N_mu, dtype='double') # pitch-angle grid
f0 = np.zeros((N_E, N_mu), dtype='double') # 2D distribution function array - for a single voxel
# computing the distribution function (equivalent to PLW & GLC)
A = n_b / (2.0 * np.pi) * (delta - 1.0) / (Emin ** (1.0 - delta) - Emax ** (1.0 - delta))
B = 0.5 / (mu_c + dmu_c * np.sqrt(np.pi) / 2 * math.erf((1.0 - mu_c) / dmu_c))
for i in range(N_E):
for j in range(N_mu):
amu = abs(mu_arr[j])
f0[i, j] = A * B * E_arr[i] ** (-delta) * (1.0 if amu < mu_c else np.exp(-((amu - mu_c) / dmu_c) ** 2))
f_arr = np.zeros((N_E, N_mu, NSteps), dtype='double',
order='F') # 3D distribution function array - for multiple voxels
for k in range(NSteps):
f_arr[:, :, k] = f0 # electron distribution function is the same in all voxels
RL = np.zeros((7, Nf), dtype='double', order='F') # input/output array
# calculating the emission for array distribution (array -> on)
res = GET_MW(Lparms, Rparms, Parms, E_arr, mu_arr, f_arr, RL)
if res:
# retrieving the results
f = RL[0]
I_L = RL[5]
I_R = RL[6]
if showplt:
import matplotlib.pyplot as plt
fig, ax = plt.subplots(1, 1)
ax.plot(f, I_L + I_R)
ax.set_xscale('log')
ax.set_yscale('log')
ax.set_title('Total intensity (array)')
ax.set_xlabel('Frequency, GHz')
ax.set_ylabel('Intensity, sfu')
flx_model = I_L + I_R
flx_model = np.nan_to_num(flx_model) + 1e-11
logf = np.log10(f)
logflx_model = np.log10(flx_model)
logfreqghz = np.log10(freqghz)
interpfunc = interpolate.interp1d(logf, logflx_model, kind='linear')
logmflx = interpfunc(logfreqghz)
mflx = 10. ** logmflx
mtb = sfu2tb(np.array(freqghz) * 1.e9, mflx, ssz)
else:
print("Calculation error!")
if tb is None:
return mtb
if tb_err is None:
# return mTb - Tb
return mtb - tb
# wt = 1./flx_err
# wt = 1./(Tb_err/Tb/np.log(10.))
# residual = np.abs((logmTb - np.log10(Tb))) * wt
# residual = np.abs((mflx - flx)) * wt
residual = (mtb - tb) / tb_err
return residual
class RegionSelector:
# def set_errorobj(self, xout, yout, errobj, yerr):
# eospec, dummy, (errbar_eospec,) = errobj
# eospec.set_data(xout, yout)
# if yerr is not None:
# yerr_top = yout + yerr
# yerr_bot = yout - yerr
# new_segments_y = [np.array([[x, yt], [x, yb]]) for x, yt, yb in zip(xout, yerr_top, yerr_bot)]
# errbar_eospec.set_segments(new_segments_y)
# return 1
def subdata(self, xs, ys, rfile):
rmap, rdata, rheader, ndim, npol_fits, stokaxis, rfreqs, rdelts = ndfits.read(rfile)
ny, nx = rmap.data.shape
tr_coord = rmap.top_right_coord
bl_coord = rmap.bottom_left_coord
x0 = bl_coord.Tx.to(u.arcsec).value
y0 = bl_coord.Ty.to(u.arcsec).value
x1 = tr_coord.Tx.to(u.arcsec).value
y1 = tr_coord.Ty.to(u.arcsec).value
dx = rmap.scale.axis1.to(u.arcsec / u.pix).value
dy = rmap.scale.axis2.to(u.arcsec / u.pix).value
mapx, mapy = np.linspace(x0, x1, nx) - dx / 2.0, np.linspace(y0, y1, ny) - dy / 2.0
xsmin = np.nanmin(xs)
xsmax = np.nanmax(xs)
ysmin = np.nanmin(ys)
ysmax = np.nanmax(ys)
if np.abs(xsmax - xsmin) < dx:
xsmax = xsmin + dx
if np.abs(ysmax - ysmin) < dy:
ysmax = ysmin + dy
xmask = np.logical_and(mapx >= xsmin, mapx <= xsmax)
nxnew = np.count_nonzero(xmask)
ymask = np.logical_and(mapy >= ysmin, mapy <= ysmax)
nynew = np.count_nonzero(ymask)
xmask = np.tile(xmask, ny).reshape(ny, nx)
ymask = np.tile(ymask, nx).reshape(nx, ny).transpose()
mask = xmask & ymask
# print(np.count_nonzero(mask))
self.npix = np.count_nonzero(mask)
self.area = self.npix * dx * dy
data = rdata[:, mask]
# print(rdata[:, :, mask])
# print(mask.shape, rdata.shape, data.shape)
data = np.squeeze(data)
# print(data.shape)
return data
def __init__(self, clkpnts, boxlines, eofiles, errobjs, cfreqs=None, rms=None, eofile_ref=None, errobj_ref=None,
wTmap=None, outspec_ff=None, scatter_gsfit=None,
get_peak=False, get_sum=False):
self.boxline = []
self.clkpnt = []
self.xs = list(clkpnts[0].get_xdata())
self.ys = list(clkpnts[0].get_ydata())
self.npix = None
self.area = None
self.xout = []
self.yout = []
self.xouterr = []
self.youterr = []
for errobj in errobjs:
eospec, dummy, (errbar_eospec,) = errobj
self.xout.append(eospec.get_xdata())
self.yout.append(eospec.get_ydata())
self.errobjs = errobjs
self.errobj_ref = errobj_ref
self.outspec_ff = outspec_ff
self.scatter_gsfit = scatter_gsfit
self.cfreqs = cfreqs
self.rms = rms
self.eofiles = eofiles
self.eofile_ref = eofile_ref
self.wTmap = wTmap
self.wT = None
self.em = None
self.get_peak = get_peak
self.get_sum = get_sum
self.tps = []
self.params = None
for idx, s in enumerate(clkpnts):
self.boxline.append(boxlines[idx])
self.clkpnt.append(s)
self.cid = s.figure.canvas.mpl_connect('button_press_event', self)
def __call__(self, event):
axes = [clkpnt.axes for clkpnt in self.clkpnt]
if self.clkpnt[0].figure.canvas.toolbar.mode == '':
if event.inaxes not in axes:
return
nxs = len(self.xs)
if event.button == 1:
if nxs < 2:
self.xs.append(event.xdata)
self.ys.append(event.ydata)
else:
self.xs = [event.xdata]
self.ys = [event.ydata]
elif event.button == 3:
if len(self.xs) > 0:
self.xs.pop()
self.ys.pop()
self.get_flux()
def get_flux(self):
if len(self.xs) > 0:
xs = np.array(self.xs, dtype=np.float64)
ys = np.array(self.ys, dtype=np.float64)
for clkpnt in self.clkpnt:
clkpnt.set_data(xs, ys)
else:
for clkpnt in self.clkpnt:
clkpnt.set_data([], [])
nxs = len(self.xs)
if nxs <= 1:
for line in self.boxline:
line.set_data([], [])
elif nxs == 2:
datas = []
# eofile = self.eofiles[0]
# rmap, rdata, rheader, ndim, npol_fits, stokaxis, rfreqs, rdelts = ndfits.read(eofile)
# data = self.subdata(xs, ys, eofile)
# datas.append(data)
for tidx, eofile in enumerate(self.eofiles):
data = self.subdata(xs, ys, eofile)
datas.append(data)
if self.eofile_ref is not None:
data_ref = self.subdata(xs, ys, self.eofile_ref)
if self.wTmap is not None:
datawT = self.subdata(xs, ys, self.wTmap)
if self.get_peak:
youts_outspec = []
for data in datas:
if data.ndim > 1:
youts_outspec.append(np.nanmax(data, axis=-1) / 1e6)
else:
youts_outspec.append(data / 1e6)
if self.eofile_ref is not None:
youts_outspec_ref = np.nanmax(data_ref[0, dd, :, :]) / 1e6
else:
youts_outspec = []
for data in datas:
if data.ndim > 1:
youts_outspec.append(np.nanmean(data, axis=-1) / 1e6)
else:
youts_outspec.append(data / 1e6)
if self.eofile_ref is not None:
if data.ndim > 1:
youts_outspec_ref = np.nanmean(data_ref, axis=-1) / 1e6
else:
youts_outspec_ref = data_ref / 1e6
self.tps = []
for data in datas:
if data.ndim > 1:
self.tps.append(np.nansum(data, axis=-1) / 1e6)
else:
self.tps.append(data / 1e6)
xout = self.cfreqs
for tidx, errobj in enumerate(self.errobjs):
set_errorobj(xout, youts_outspec[tidx], errobj, self.rms)
if self.eofile_ref is not None:
set_errorobj(xout, youts_outspec_ref, self.errobj_ref, self.rms)
if self.wTmap is not None:
print(datawT.shape)
wT = np.nanmean(datawT[..., 1]) * 1e6
em = np.nanmean(datawT[..., 0])
arcsec2cm = (self.wTmap[0].rsun_meters / self.wTmap[0].rsun_obs).to(u.cm / u.arcsec).value
# nele = 4.0e10
# depth = em / nele ** 2 / arcsec2cm
# print('Temperature: {:.1f} MK, EM: {:.2e} cm-5, depth: {:.1f} arcsec if nele is {:.2e} cm-3'.format(wT / 1e6, em, depth, nele))
depth = 20. ## arcsec
nele = np.sqrt(em / (depth * arcsec2cm))
print('Temperature: {:.1f} MK, EM: {:.2e} cm-5, nele: {:.2e} cm-3 if depth is {:.1f} arcsec'.format(
wT / 1e6, em, nele, depth))
self.wT = wT
self.em = em
yout_ff = np.array([ff_emission(em, T=wT, Z=1., mu=ll) for ll in xout * 1e9]) / 1.e6
self.outspec_ff.set_data(xout, yout_ff)
self.errobjs[0][0].figure.canvas.draw_idle()
for line in self.boxline:
line.set_data([xs[0], xs[1], xs[1], xs[0], xs[0]], [ys[0], ys[0], ys[1], ys[1], ys[0]])
clkpnt.figure.canvas.draw_idle()
class GStool:
# def get_showaia(self):
# return self._showaia
#
# def set_showaia(self, value):
# self._showaia = value
#
# showaia = property(fget=get_showaia, fset=set_showaia, doc="`Boolean`-like: Display AIA image or not")
def __init__(self, eofiles, aiafile=None, xycen=None, fov=None, freqghz_bound=[-1, 100], calpha=0.5,
clevels=np.array([0.3, 1.0]), opencontour=None):
self.aiafile = aiafile
self.eofiles = eofiles
self.xycen = xycen
self.fov = fov
self.calpha = calpha
self.clevels = clevels
self.freqghz_bound = freqghz_bound
self.opencontour = opencontour
self._showaia = False
rmap, rdata, rheader, ndim, npol_fits, stokaxis, rfreqs, rdelts = ndfits.read(eofiles[0])
self.bdinfo = bdinfo = ndfits.get_bdinfo(rfreqs, rdelts)
self.cfreqs = cfreqs = bdinfo['cfreqs']
self.cfreqs_all = cfreqs_all = bdinfo['cfreqs_all']
self.freq_dist = lambda fq: (fq - cfreqs_all[0]) / (cfreqs_all[-1] - cfreqs_all[0])
self.ntim = ntim = len(eofiles)
self.xlim = xlim = xycen[0] + np.array([-1, 1]) * 0.5 * fov[0]
self.ylim = ylim = xycen[1] + np.array([-1, 1]) * 0.5 * fov[1]
nspw = len(rfreqs)
eodate = Time(rmap.date.mjd + rmap.exposure_time.value / 2. / 24 / 3600, format='mjd')
ny, nx = rmap.data.shape
x0, x1 = (np.array([1, rmap.meta['NAXIS1']]) - rmap.meta['CRPIX1']) * rmap.meta['CDELT1'] + \
rmap.meta['CRVAL1']
y0, y1 = (np.array([1, rmap.meta['NAXIS2']]) - rmap.meta['CRPIX2']) * rmap.meta['CDELT2'] + \
rmap.meta['CRVAL2']
dx = rmap.meta['CDELT1']
dy = rmap.meta['CDELT2']
mapx, mapy = np.linspace(x0, x1, nx), np.linspace(y0, y1, ny)
fig = plt.figure(figsize=(15, 6))
self.fig = fig
grids = fig.add_gridspec(ncols=3, nrows=1, width_ratios=[1, 1, 0.6])
self.grids = grids
axs = []
axs.append(fig.add_subplot(grids[0, 0]))
axs.append(fig.add_subplot(grids[0, 1], sharex=axs[-1], sharey=axs[-1]))
axs.append(fig.add_subplot(grids[0, 2]))
if aiafile:
if os.path.exists(aiafile):
try:
aiacmap = plt.get_cmap('gray_r')
aiamap = smap.Map(aiafile)
ax = axs[0]
aiamap.plot(axes=ax, cmap=aiacmap)
ax = axs[1]
aiamap.plot(axes=ax, cmap=aiacmap)
self._showaia = True
except:
self._showaia = False
if self._showaia:
if self.opencontour is None:
self.opencontour = False
else:
if self.opencontour is None:
self.opencontour = True
## Plot EOVSA images as filled contour on top of the AIA image
icmap = plt.get_cmap('RdYlBu')
cts = []
## color map for spectra from the image series
tcmap = plt.get_cmap('turbo')
for s, sp in enumerate(rfreqs):
data = rdata[s, ...]
clvls = clevels * np.nanmax(data)
rcmap = [icmap(self.freq_dist(self.cfreqs[s]))] * len(clvls)
if self.opencontour:
cts.append(ax.contour(mapx, mapy, data, levels=clvls,
colors=rcmap,
alpha=calpha))
else:
cts.append(ax.contourf(mapx, mapy, data, levels=clvls,
colors=rcmap,
alpha=calpha))
ax.set_xlim(self.xlim)
ax.set_ylim(self.ylim)
for ax in axs[:2]:
ax.set_xlabel('Solar-X [arcsec]')
ax.set_ylabel('Solar-y [arcsec]')
ax.set_title('')
ax.text(0.02, 0.01,
' '.join(['AIA {:.0f} Å'.format(aiamap.wavelength.value),
aiamap.date.datetime.strftime('%Y-%m-%dT%H:%M:%S')]),
ha='left',
va='bottom',
color='k', transform=ax.transAxes)
ax.text(0.02, 0.05, ' '.join(['EOVSA ', eodate.datetime.strftime('%Y-%m-%dT%H:%M:%S')]), ha='left',
va='bottom',
color='k', transform=ax.transAxes)
divider = make_axes_locatable(axs[0])
cax = divider.append_axes("right", size="8%", pad=0.08)
cax.set_visible(False)
divider = make_axes_locatable(axs[1])
cax = divider.append_axes("right", size="8%", pad=0.08)
ticks, bounds, vmax, vmin, freqmask = ql.get_colorbar_params(bdinfo)
cb = colorbar.ColorbarBase(cax, norm=colors.Normalize(vmin=vmin, vmax=vmax), cmap=icmap,
orientation='vertical', boundaries=bounds, spacing='proportional',
ticks=ticks, format='%4.1f', alpha=calpha)
for fbd_lo, fbd_hi in freqmask:
if fbd_hi is not None:
cax.axhspan(fbd_lo, fbd_hi, hatch='//', edgecolor='k', facecolor='#BBBBBB')
plt.text(0.5, 1.05, 'MW', ha='center', va='bottom', transform=cax.transAxes, color='k', fontweight='normal')
plt.text(0.5, 1.01, '[GHz]', ha='center', va='bottom', transform=cax.transAxes, color='k',
fontweight='normal')
cax.xaxis.set_visible(False)
cax.tick_params(axis="y", pad=-20., length=0, colors='k', labelsize=7)
cax.axhline(vmin, xmin=1.0, xmax=1.2, color='k', clip_on=False)
cax.axhline(vmax, xmin=1.0, xmax=1.2, color='k', clip_on=False)
cax.text(1.25, 0.0, '{:.1f}'.format(vmin), fontsize=9, transform=cax.transAxes, va='center', ha='left')
cax.text(1.25, 1.0, '{:.1f}'.format(vmax), fontsize=9, transform=cax.transAxes, va='center', ha='left')
boxlines = []
clkpnts = []
for idx, ax in enumerate(axs[:2]):
if idx == 0:
c = 'g'
elif idx == 1:
c = 'b'
else:
c = 'k'
line, = ax.plot([], [], '-', c=c, alpha=1.0) # empty line
boxlines.append(line)
clkpnt, = ax.plot([], [], '+', c='white', alpha=0.7) # empty line
clkpnts.append(clkpnt)
if ntim < 2:
cplts = ['k']
else:
cplts = tcmap(np.linspace(0, 1, ntim))
self.cplts = cplts
self.ax_eospec = axs[-1]
errobjs = initspecplot(self.ax_eospec, cplts)
grids.tight_layout(fig)
self.region = RegionSelector(clkpnts, boxlines, eofiles, errobjs, cfreqs=cfreqs, rms=None, wTmap=None)
self.scatter_eospecs_fit = []
self.scatter_eospecs = []
def set_params(self, params):
ssz = self.region.area # source area in arcsec^2
params.add('ssz', value=ssz, vary=False) # pixel size in arcsec^2
self.params = params
def plot_components(self):
ti = 0
tb = self.region.errobjs[ti][0].get_ydata() * 1e6
tb_ma = ma.masked_less_equal(tb, 0)
freqghz = self.region.errobjs[0][0].get_xdata()
# freqghz_ma = ma.masked_outside(freqghz, 1.0, 15.0)
freqghz_ma = ma.masked_outside(freqghz, self.freqghz_bound[0], self.freqghz_bound[1])
mask_fit = np.logical_or(freqghz_ma.mask, tb_ma.mask)
freqghz_ma = ma.masked_array(freqghz, mask_fit)
tb_ma = ma.masked_array(tb, mask_fit)
# scatter_eospecs_fit.append(
# ax_spec.plot(freqghz_ma, tb_ma / 1.e6, marker='o', linestyle='', c=cplts[ti]))
# flx_rms = rms
tb_err = tb * 0.0
tb_err[:] = 1.e6
tb_err_ma = ma.masked_array(tb_err, tb_ma.mask)
if len(self.scatter_eospecs_fit) == 0:
for ti, cplt in enumerate(self.cplts):
self.scatter_eospecs_fit.append(
self.ax_eospec.errorbar(freqghz_ma, tb_ma / 1.e6, yerr=tb_err_ma / 1.e6, marker='.', ms=1,
linestyle='',
c=cplt))
else:
for ti, cplt in enumerate(self.cplts):
set_errorobj(freqghz_ma, tb_ma / 1.e6, self.scatter_eospecs_fit[ti], yerr=tb_err_ma / 1.e6)
def fit(self):
ti = 0
tb = self.region.errobjs[ti][0].get_ydata() * 1e6
tb_ma = ma.masked_less_equal(tb, 0)
freqghz = self.region.errobjs[0][0].get_xdata()
# freqghz_ma = ma.masked_outside(freqghz, 1.0, 15.0)
freqghz_ma = ma.masked_outside(freqghz, self.freqghz_bound[0], self.freqghz_bound[1])
mask_fit = np.logical_or(freqghz_ma.mask, tb_ma.mask)
freqghz_ma = ma.masked_array(freqghz, mask_fit)
tb_ma = ma.masked_array(tb, mask_fit)
# scatter_eospecs_fit.append(
# ax_spec.plot(freqghz_ma, tb_ma / 1.e6, marker='o', linestyle='', c=cplts[ti]))
# flx_rms = rms
tb_err = tb * 0.1
# tb_err[:] = 0.2e6
tb_err_ma = ma.masked_array(tb_err, tb_ma.mask)
if len(self.scatter_eospecs_fit) == 0:
for ti, cplt in enumerate(self.cplts):
self.scatter_eospecs_fit.append(
self.ax_eospec.errorbar(freqghz_ma, tb_ma / 1.e6, yerr=tb_err_ma / 1.e6, marker='.', ms=1,
linestyle='', c=cplt))
else:
for ti, cplt in enumerate(self.cplts):
set_errorobj(freqghz_ma, tb_ma / 1.e6, self.scatter_eospecs_fit[ti], yerr=tb_err_ma / 1.e6)
mini = lmfit.Minimizer(mwspec2min_1src, self.params, fcn_args=(freqghz_ma.compressed(),),
fcn_kws={'tb': tb_ma.compressed(), 'tb_err': tb_err_ma.compressed()},
nan_policy='omit')
method = 'nelder'
# # method = 'differential_evolution'
mi = mini.minimize(method=method)
print(method + ' minimization results')
print(lmfit.fit_report(mi.params))
tb_fit = mwspec2min_1src(mi.params, freqghz)
if len(self.scatter_eospecs) == 0:
for ti, cplt in enumerate(self.cplts):
self.scatter_eospecs.append(self.ax_eospec.plot(freqghz, tb_fit / 1.e6, linestyle='-', c=cplt))
else:
for ti, cplt in enumerate(self.cplts):
self.scatter_eospecs[ti][0].set_data(freqghz, tb_fit / 1.e6)
| 40.769912 | 145 | 0.552529 |
66eed3078e25f4e55856ae2168ea3116d3ba6fee | 937 | py | Python | stu_mgt/urls.py | lennon624/stu_mgt | c0118f9adede46db23b6a9b5329004343023f1a3 | [
"MIT"
]
| null | null | null | stu_mgt/urls.py | lennon624/stu_mgt | c0118f9adede46db23b6a9b5329004343023f1a3 | [
"MIT"
]
| null | null | null | stu_mgt/urls.py | lennon624/stu_mgt | c0118f9adede46db23b6a9b5329004343023f1a3 | [
"MIT"
]
| null | null | null | """stu_mgt URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
import xadmin
xadmin.autodiscover()
urlpatterns = [
# path('admin/', admin.site.urls),
path('xadmin/', xadmin.site.urls),
path('account/', include('account.urls')),
path('acheve_mgt/', include('acheve_mgt.urls')),
]
| 32.310345 | 77 | 0.700107 |
b0540c94140ab0594e675572743463d8f1547fad | 3,672 | py | Python | video_game_geek/middlewares.py | albert-marrero/video-game-web-scraper | 144ae2a4ff70d892ca6bada7d4550ad0f7ae1210 | [
"MIT"
]
| 1 | 2022-01-12T21:33:25.000Z | 2022-01-12T21:33:25.000Z | video_game_geek/middlewares.py | albert-marrero/video-game-web-scraper | 144ae2a4ff70d892ca6bada7d4550ad0f7ae1210 | [
"MIT"
]
| null | null | null | video_game_geek/middlewares.py | albert-marrero/video-game-web-scraper | 144ae2a4ff70d892ca6bada7d4550ad0f7ae1210 | [
"MIT"
]
| 1 | 2022-01-30T04:22:53.000Z | 2022-01-30T04:22:53.000Z | # Define here the models for your spider middleware
#
# See documentation in:
# https://docs.scrapy.org/en/latest/topics/spider-middleware.html
from scrapy import signals
# useful for handling different item types with a single interface
from itemadapter import is_item, ItemAdapter
class VideoGameWebScraperSpiderMiddleware:
# Not all methods need to be defined. If a method is not defined,
# scrapy acts as if the spider middleware does not modify the
# passed objects.
@classmethod
def from_crawler(cls, crawler):
# This method is used by Scrapy to create your spiders.
s = cls()
crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
return s
def process_spider_input(self, response, spider):
# Called for each response that goes through the spider
# middleware and into the spider.
# Should return None or raise an exception.
return None
def process_spider_output(self, response, result, spider):
# Called with the results returned from the Spider, after
# it has processed the response.
# Must return an iterable of Request, or item objects.
for i in result:
yield i
def process_spider_exception(self, response, exception, spider):
# Called when a spider or process_spider_input() method
# (from other spider middleware) raises an exception.
# Should return either None or an iterable of Request or item objects.
pass
def process_start_requests(self, start_requests, spider):
# Called with the start requests of the spider, and works
# similarly to the process_spider_output() method, except
# that it doesn’t have a response associated.
# Must return only requests (not items).
for r in start_requests:
yield r
def spider_opened(self, spider):
spider.logger.info('Spider opened: %s' % spider.name)
class VideoGameWebScraperDownloaderMiddleware:
# Not all methods need to be defined. If a method is not defined,
# scrapy acts as if the downloader middleware does not modify the
# passed objects.
@classmethod
def from_crawler(cls, crawler):
# This method is used by Scrapy to create your spiders.
s = cls()
crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
return s
def process_request(self, request, spider):
# Called for each request that goes through the downloader
# middleware.
# Must either:
# - return None: continue processing this request
# - or return a Response object
# - or return a Request object
# - or raise IgnoreRequest: process_exception() methods of
# installed downloader middleware will be called
return None
def process_response(self, request, response, spider):
# Called with the response returned from the downloader.
# Must either;
# - return a Response object
# - return a Request object
# - or raise IgnoreRequest
return response
def process_exception(self, request, exception, spider):
# Called when a download handler or a process_request()
# (from other downloader middleware) raises an exception.
# Must either:
# - return None: continue processing this exception
# - return a Response object: stops process_exception() chain
# - return a Request object: stops process_exception() chain
pass
def spider_opened(self, spider):
spider.logger.info('Spider opened: %s' % spider.name)
| 35.307692 | 78 | 0.676471 |
fbc45345b6d842c680f57f72f6da6d79305c1f2b | 268 | py | Python | src/api/pdi/application/connection/DeleteConnection/DeleteConnectionCommand.py | ahmetcagriakca/pythondataintegrator | 079b968d6c893008f02c88dbe34909a228ac1c7b | [
"MIT"
]
| 1 | 2020-12-18T21:37:28.000Z | 2020-12-18T21:37:28.000Z | src/api/pdi/application/connection/DeleteConnection/DeleteConnectionCommand.py | ahmetcagriakca/pythondataintegrator | 079b968d6c893008f02c88dbe34909a228ac1c7b | [
"MIT"
]
| null | null | null | src/api/pdi/application/connection/DeleteConnection/DeleteConnectionCommand.py | ahmetcagriakca/pythondataintegrator | 079b968d6c893008f02c88dbe34909a228ac1c7b | [
"MIT"
]
| 1 | 2020-12-18T21:37:31.000Z | 2020-12-18T21:37:31.000Z | from dataclasses import dataclass
from pdip.cqrs import ICommand
from pdi.application.connection.DeleteConnection.DeleteConnectionRequest import DeleteConnectionRequest
@dataclass
class DeleteConnectionCommand(ICommand):
request: DeleteConnectionRequest = None
| 26.8 | 103 | 0.86194 |
f57d0970e3fc31a0a346740afba057b17cbf5748 | 610 | py | Python | AutotestFramework/core/tools/MemcacheTool.py | yangjourney/sosotest | 2e88099a829749910ca325253c9b1a2e368d21a0 | [
"MIT"
]
| 422 | 2019-08-18T05:04:20.000Z | 2022-03-31T06:49:19.000Z | AutotestFramework/core/tools/MemcacheTool.py | LinSongJian1985/sosotest | 091863dee531b5726650bb63efd6f169267cbeb4 | [
"MIT"
]
| 10 | 2019-10-24T09:55:38.000Z | 2021-09-29T17:28:43.000Z | AutotestFramework/core/tools/MemcacheTool.py | LinSongJian1985/sosotest | 091863dee531b5726650bb63efd6f169267cbeb4 | [
"MIT"
]
| 202 | 2019-08-18T05:04:27.000Z | 2022-03-30T05:57:18.000Z | from pymemcache.client.base import Client
class MemcacheTool(object):
def __init__(self,host,port):
self.client = Client((host, port))
def set(self,key,value):
self.client.set(key, value)
def get(self,key):
return self.client.get(key)
if __name__ == '__main__':
rt = MemcacheTool("192.168.0.75",11211)
rt.set("testabc","abc")
print(rt.get("testabc"))
rt = MemcacheTool("192.168.0.75",11212)
rt.set("testabc","abc2")
print(rt.get("testabc"))
rt = MemcacheTool("192.168.0.75",11213)
rt.set("testabc","abc3")
print(rt.get("testabc"))
| 23.461538 | 43 | 0.622951 |
bbc48cdde7037c4d9a2966c475a43331c1206ea5 | 9,988 | py | Python | mlp.py | AliMakiGmail/SFD-CNN-TL | 96890a086cb170334f761a825a5fdcdc51444696 | [
"MIT"
]
| 27 | 2018-09-12T12:00:44.000Z | 2022-03-20T07:33:01.000Z | mlp.py | AliMakiGmail/SFD-CNN-TL | 96890a086cb170334f761a825a5fdcdc51444696 | [
"MIT"
]
| 2 | 2020-01-13T16:35:50.000Z | 2020-09-07T07:10:12.000Z | mlp.py | AliMakiGmail/SFD-CNN-TL | 96890a086cb170334f761a825a5fdcdc51444696 | [
"MIT"
]
| 16 | 2018-08-11T14:41:09.000Z | 2021-10-31T13:24:32.000Z | #!/usr/bin/env python
# Copyright 2019 Augusto Cunha and Axelle Pochet
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this code and
# associated documentation files, to deal in the code without restriction,
# including without limitation the rights to use, copy, modify, merge, publish, distribute,
# sublicense, and/or sell copies of the code, and to permit persons to whom the code is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies or
# substantial portions of the code.
#
# THE CODE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT
# NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE CODE OR THE USE OR OTHER DEALINGS IN THE CODE.
__license__ = "MIT"
__author__ = "Augusto Cunha, Axelle Pochet"
__email__ = "[email protected], [email protected]"
__credits__ = ["Augusto Cunha", "Axelle Pochet", "Helio Lopes", "Marcelo Gattass"]
################# all imports #################
from __future__ import print_function
import numpy, os, time
import pandas as pd
from tensorflow import set_random_seed
numpy.random.seed(1337)
set_random_seed(1337)
from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Activation, Flatten
from keras.models import model_from_json
from keras.utils import np_utils
from sklearn.model_selection import StratifiedKFold
from sklearn.model_selection import train_test_split
import metrics
def save_model(model, filename):
model_json = model.to_json()
with open("output/" + filename + ".json", "w") as json_file: json_file.write(model_json)
# serialize weights to HDF5
model.save_weights("output/" + filename + "_weights.h5")
def load_model(modelJsonPath, modelWeightsPath) :
################# load base model #################
jsonFile = open(modelJsonPath, 'r')
loadedModelJson = jsonFile.read()
jsonFile.close()
base_model = model_from_json(loadedModelJson)
base_model.load_weights(modelWeightsPath)
# remove last layers
for i in range (8):
base_model.layers.pop()
base_model.outputs = [base_model.layers[-1].output]
# freeze layers
for layer in base_model.layers[:7]:
layer.trainable = False
return base_model
def data(X_train, Y_train, numberOfClasses = 2):
Y_train = np_utils.to_categorical(Y_train, numberOfClasses)
x_train, x_test, y_train, y_test = train_test_split(X_train, Y_train, test_size=0.2, shuffle=True, random_state=1337)
return x_train, y_train, x_test, y_test
def dataCV(trainFaultDirectory='dataset/fault/',trainNonFaultDirectory='dataset/nonfault/', modelJsonPath = 'base_model/model.json', modelWeightsPath = 'base_model/model.h5'):
trainFaultURLList = os.listdir(trainFaultDirectory)
trainNonFaultURLList = os.listdir(trainNonFaultDirectory)
# read and save
trainImageDataList = []
trainClassesList = []
for imageURL in trainFaultURLList:
csv_file = trainFaultDirectory + imageURL
df = pd.read_csv(csv_file, delimiter=' ', header = None)
trainImageDataList.append(df.values)
trainClassesList.append(1)
for imageURL in trainNonFaultURLList:
csv_file = trainNonFaultDirectory + imageURL
df = pd.read_csv(csv_file, delimiter=' ', header = None)
trainImageDataList.append(df.values)
trainClassesList.append(0)
# sparsify labels
Y = trainClassesList
# pass input as numpy arrays
imageRows = 45
imageCollumns = 45
imageChannels = 1
trainSamplesList = numpy.array( trainImageDataList)
trainSamplesList = trainSamplesList.reshape( trainSamplesList.shape[0], imageRows, imageCollumns, imageChannels )
trainSamplesList = trainSamplesList.astype( 'float32' )
X = trainSamplesList
## extract features as new input
X = load_model(modelJsonPath, modelWeightsPath).predict(X)
x_train = X
y_train = Y
x_test = []
y_test = []
return x_train, y_train, x_test, y_test
def create_model(x_train, y_train, x_test, y_test, numberOfClasses=2, MLP1=100, MLP2=200, numberOfEpochs = 20, batchSize = 30, save=True, baseName='femlpModel'):
"""
Model providing function:
Create Keras model with MLP as classifier, compile test and generate metrics.
"""
################# define MLP #################
# create my MLP
top_model = Sequential()
top_model.add(Flatten(input_shape=(8, 8, 50))) # shape of last layer or my_model. Couldn´t get it automatically properly using my_model.output_shape
top_model.add(Dense(MLP1))
top_model.add(Activation('relu', name = 'act_1')) # set name, otherwise duplicate names appear
top_model.add(Dropout(0.5))
top_model.add(Dense(MLP2))
top_model.add(Activation('relu', name = 'act_2'))
top_model.add(Dense(numberOfClasses))
top_model.add(Activation('softmax', name = 'softmax'))
# Compile
top_model.compile( loss='binary_crossentropy', optimizer= 'sgd', metrics=['accuracy'] )
# Train
top_model.fit(x_train,
y_train,
batch_size = batchSize,
epochs = numberOfEpochs,
verbose = 0,
validation_data=(x_test, y_test))
# Classify
classesPredictionList = top_model.predict_classes(x_test, verbose=0) # 0 or 1
classesProbaPredictionList = top_model.predict_proba(x_test) # probability
sensitivity, specificity, accuracy, precision, recall, F1_score, auc = metrics.generate_metrics(classesPredictionList,classesProbaPredictionList,y_test,verbose=False)
# Save Model
if(save):
save_model(top_model, baseName)
print("Accuracy: {:.4f}".format(accuracy))
print("Sensitivity: {:.4f}".format(sensitivity))
print("Specificity: {:.4f}".format(specificity))
print("F1 Score: {:.4f}".format(F1_score))
print("AUC: {:.4f}".format(auc))
def create_modelCV(x_train, y_train, x_test, y_test, numFolds= 5, numberOfClasses=2, MLP1=100, MLP2=200, numberOfEpochs = 20, batchSize = 30):
"""
Model providing function:
Create Keras model with SVM as classifier, compile test and generate metrics.
"""
### Cross-validation
skf = StratifiedKFold(n_splits=numFolds, shuffle=True, random_state=1337)
X = x_train
Y = y_train
sensitivitys, specificitys, accuracys, precisions, recalls, F1_scores, aucs = [[],[],[],[],[],[],[]]
#kpbar = tqdm(total=numFolds, desc="Kfold", leave=False)
y = np_utils.to_categorical(Y, 2)
Y = numpy.array(Y)
for train_index, test_index in skf.split(X, Y):
################ define MLP #################
# create my MLP
top_model = Sequential()
top_model.add(Flatten(input_shape=(8, 8, 50))) # shape of last layer or my_model. Couldn´t get it automatically properly using my_model.output_shape
top_model.add(Dense(MLP1))
top_model.add(Activation('relu', name = 'act_1')) # set name, otherwise duplicate names appear
top_model.add(Dropout(0.5))
top_model.add(Dense(MLP2))
top_model.add(Activation('relu', name = 'act_2'))
top_model.add(Dense(numberOfClasses))
top_model.add(Activation('softmax', name = 'softmax'))
# Compile
top_model.compile( loss='binary_crossentropy', optimizer= 'sgd', metrics=['accuracy'] )
# Train
top_model.fit(X[train_index],
y[train_index],
batch_size = batchSize,
epochs = numberOfEpochs,
verbose = 0,
validation_data=(X[test_index], y[test_index]))
# Classify
classesPredictionList = top_model.predict_classes(X[test_index], verbose=0) # 0 or 1
classesProbaPredictionList = top_model.predict_proba(X[test_index]) # probability
sensitivity, specificity, accuracy, precision, recall, F1_score, auc = metrics.generate_metrics(classesPredictionList,classesProbaPredictionList,y[test_index],verbose=False)
sensitivitys.append(sensitivity)
specificitys.append(specificity)
accuracys.append(accuracy)
precisions.append(precision)
recalls.append(recall)
F1_scores.append(F1_score)
aucs.append(auc)
sensitivitys = numpy.array(sensitivitys)
specificitys = numpy.array(specificitys)
accuracys = numpy.array(accuracys)
precisions = numpy.array(precisions)
recalls = numpy.array(recalls)
F1_scores = numpy.array(F1_scores)
aucs = numpy.array(aucs)
print("Mean Accuracy: {:.4f} (+/- {:.4f})".format(accuracys.mean(), accuracys.std()))
print("Mean Sensitivity: {:.4f} (+/- {:.4f})".format(sensitivitys.mean(), sensitivitys.std()))
print("Mean Specificity: {:.4f} (+/- {:.4f})".format(specificitys.mean(), specificitys.std()))
print("Mean F1 Score: {:.4f} (+/- {:.4f})".format(F1_scores.mean(), F1_scores.std()))
print("Mean AUC: {:.4f} (+/- {:.4f})".format(aucs.mean(), aucs.std()))
if __name__ == '__main__':
start_time = time.time()
print("Loading dataset...")
X_train, Y_train, X_test, Y_test = dataCV()
x_train, y_train, x_test, y_test = data(X_train, Y_train)
print("Training...")
create_model(x_train, y_train, x_test, y_test, MLP1=100, MLP2=200, numberOfEpochs = 20, save=True, baseName='femlpModel')
print("Training with cross validation...")
create_modelCV(X_train, Y_train, X_test, Y_test, numFolds=5, MLP1=100, MLP2=200, numberOfEpochs = 20)
print("--- {:.1f} seconds ---".format(time.time() - start_time))
| 43.426087 | 181 | 0.680817 |
6a3c5c42a8619e69dc51d333098ea316d8a51da3 | 1,635 | py | Python | FlowerClassification.py | Akuma47/Iris-Classification | 659cc52bc04dc0cd7088e37d7d1e7fa057e3f603 | [
"MIT"
]
| null | null | null | FlowerClassification.py | Akuma47/Iris-Classification | 659cc52bc04dc0cd7088e37d7d1e7fa057e3f603 | [
"MIT"
]
| null | null | null | FlowerClassification.py | Akuma47/Iris-Classification | 659cc52bc04dc0cd7088e37d7d1e7fa057e3f603 | [
"MIT"
]
| null | null | null | import pandas as pd
import numpy as np
import math
dataset = pd.read_csv('iris.data')
irisData = []
labels = []
for i in range(100):
x1 = [dataset['x1'][i],dataset['x2'][i],dataset['x3'][i],dataset['x4'][i]]
irisData.append(x1)
if dataset['class'][i] == 'Iris-setosa':
labels.append(0)
if dataset['class'][i] == 'Iris-versicolor':
labels.append(0.5)
# if dataset['class'][i] == 'Iris-virginica':
# labels.append(1)
epochs = 50
lr = 0.01
w1 = 0.1
w2 = 0.1
w3 = 0.1
w4 = 0.1
loss = 0
for h in range(epochs):
for i in range(len(irisData)):
x1 = irisData[i][0]
x2 = irisData[i][1]
x3 = irisData[i][2]
x4 = irisData[i][3]
y = labels[i]
y_hat = (x1* w1) + (x2 * w2) + (x3 * w3) + (x4 * w4)
sig = 1 / (1+np.exp(-y_hat))
loss += - (y*math.log10(sig) + (1-y)*math.log10(1-sig))
loss = loss/len(irisData)
# Update weights
w1 = w1 - (loss * lr * y_hat)
w2 = w2 - (loss * lr)
w3 = w3 - (loss * lr * y_hat)
w4 = w4 - (loss * lr * y_hat)
while True:
print('')
x1 = float(input('x1> '))
x2 = float(input('x2> '))
x3 = float(input('x3> '))
x4 = float(input('x4> '))
res = (x1 * w1) + (x2 * w2) + (x3 * w3) + (x4 * w4)
sig = 1 / (1+np.exp(-res))
if sig < 0.5:
print('Iris Setosa')
if sig > 0.5:
print('Versicolar')
# if sig > 0.6:
# print('Virginica')
print(sig)
| 15.721154 | 79 | 0.448318 |
7a58f8304e5be686b7b945a4784c4a2db50efdf2 | 2,809 | py | Python | test/20200920_fungar_cut_volume.py | CITA-cph/deep-sight | 9c60183a5bd1f3b6b930cdba8632c33f38769c0b | [
"Apache-2.0"
]
| null | null | null | test/20200920_fungar_cut_volume.py | CITA-cph/deep-sight | 9c60183a5bd1f3b6b930cdba8632c33f38769c0b | [
"Apache-2.0"
]
| null | null | null | test/20200920_fungar_cut_volume.py | CITA-cph/deep-sight | 9c60183a5bd1f3b6b930cdba8632c33f38769c0b | [
"Apache-2.0"
]
| 1 | 2021-12-05T21:59:49.000Z | 2021-12-05T21:59:49.000Z | '''
Copyright 2020 CITA
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
20200920_fungar_cut_volume.py
This script shows how to load a volume from images and how to cut it with a cutter. It also shows how to implement a button.
Author: Sebastian Gatz
'''
import numpy as np
from vedo import *
from vedo.pyplot import histogram
import os
import glob
import cv2
import pyvista as pv
DEEPSIGHT_DIR = os.getenv('DEEPSIGHT_DIR')
def main():
all_loaded_images = []
x_scale = 2
y_scale = 1
z_scale = 1
res = 2 #read every nth pixel in x,y,z
#set path
all_images = "C:/Users/sgat/OneDrive - KADK/03_CITA/59_DataViz/03_data/all/"
os.chdir(all_images)
#load all images
types = ('*.gif', '*.png', '*.jpg', "*bmp", "*tif")
files = []
for f in types:
files.extend(glob.glob(f))
print (len(files))
#create image stack
img_stack = 0
for i in range(0,len(files),res):
print ("img loaded: ", i)
img = cv2.imread(files[i],0)
img = cv2.resize(img, (0,0), fx=1/res, fy=1/res)
if i == 0:
img.fill(0)
img_stack = img
else:
img_stack = np.dstack((img_stack,img))
img = cv2.imread(files[0],0)
img = cv2.resize(img, (0,0), fx=1/res, fy=1/res)
img.fill(0)
img_stack = np.dstack((img_stack,img))
print ("image stack created")
#create volume
vol = Volume(img_stack, spacing=(x_scale,y_scale,z_scale), mapper="smart", mode=1, alpha= [0.0, 0.0, 0.5, 0.7, 1], c= "jet")
vp = Plotter(N=2, axes=True, bg="black", size="fullscreen") #(1000,1000))
vp.show("Voxel Crop", axes=0, at=0, viewup="y")
def buttonfunc():
vp.close()
bu.switch()
bu = vp.addButton(
buttonfunc,
pos=(0.5, 0.05), # x,y fraction from bottom left corner
states=["done"],
c=["black"],
bc=["w"], # colors of states
font="courier", # arial, courier, times
size=25,
bold=False,
italic=False,
)
vp.show(vol, "Voxel Render", axes=1, at=1)
vp.addCutterTool(vol)
vp.show(interactive=True)
"""
print ("nice")
vp = Plotter(N=1, axes=True, bg="black", size="fullscreen")
vp.show("Voxel Crop", axes=0, at=0)
"""
if __name__ == "__main__":
main() | 24.215517 | 128 | 0.617658 |
23d69c71d80abb4b94fa9d383696e979417fa391 | 5,364 | py | Python | docker/pythonpath_dev/superset_config.py | ayuanty/superset | 132a8ef2cb55fa6692ea31d5c278f102d6c2886b | [
"Apache-2.0"
]
| null | null | null | docker/pythonpath_dev/superset_config.py | ayuanty/superset | 132a8ef2cb55fa6692ea31d5c278f102d6c2886b | [
"Apache-2.0"
]
| null | null | null | docker/pythonpath_dev/superset_config.py | ayuanty/superset | 132a8ef2cb55fa6692ea31d5c278f102d6c2886b | [
"Apache-2.0"
]
| null | null | null | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# This file is included in the final Docker image and SHOULD be overridden when
# deploying the image to prod. Settings configured here are intended for use in local
# development environments. Also note that superset_config_docker.py is imported
# as a final step as a means to override "defaults" configured here
#
import logging
import os
from datetime import timedelta
from cachelib.file import FileSystemCache
from celery.schedules import crontab
from superset.typing import CacheConfig
logger = logging.getLogger()
def get_env_variable(var_name, default=None):
"""Get the environment variable or raise exception."""
try:
return os.environ[var_name]
except KeyError:
if default is not None:
return default
else:
error_msg = "The environment variable {} was missing, abort...".format(
var_name
)
raise EnvironmentError(error_msg)
DATABASE_DIALECT = get_env_variable("DATABASE_DIALECT")
DATABASE_USER = get_env_variable("DATABASE_USER")
DATABASE_PASSWORD = get_env_variable("DATABASE_PASSWORD")
DATABASE_HOST = get_env_variable("DATABASE_HOST")
DATABASE_PORT = get_env_variable("DATABASE_PORT")
DATABASE_DB = get_env_variable("DATABASE_DB")
# The SQLAlchemy connection string.
SQLALCHEMY_DATABASE_URI = "%s://%s:%s@%s:%s/%s" % (
DATABASE_DIALECT,
DATABASE_USER,
DATABASE_PASSWORD,
DATABASE_HOST,
DATABASE_PORT,
DATABASE_DB,
)
REDIS_HOST = get_env_variable("REDIS_HOST")
REDIS_PORT = get_env_variable("REDIS_PORT")
REDIS_CELERY_DB = get_env_variable("REDIS_CELERY_DB", 0)
REDIS_RESULTS_DB = get_env_variable("REDIS_RESULTS_DB", 1)
RESULTS_BACKEND = FileSystemCache("/app/superset_home/sqllab")
class CeleryConfig(object):
BROKER_URL = f"redis://{REDIS_HOST}:{REDIS_PORT}/{REDIS_CELERY_DB}"
CELERY_IMPORTS = ("superset.sql_lab", "superset.tasks", "superset.tasks.thumbnails")
CELERY_RESULT_BACKEND = f"redis://{REDIS_HOST}:{REDIS_PORT}/{REDIS_RESULTS_DB}"
CELERYD_PREFETCH_MULTIPLIER = 10
CELERY_ACKS_LATE = True
CELERYBEAT_SCHEDULE = {
"reports.scheduler": {
"task": "reports.scheduler",
"schedule": crontab(minute="*", hour="*"),
},
"reports.prune_log": {
"task": "reports.prune_log",
"schedule": crontab(minute=10, hour=0),
},
}
CELERY_CONFIG = CeleryConfig
SUPERSET_HOST = get_env_variable("SUPERSET_HOST")
SUPERSET_PORT = get_env_variable("SUPERSET_PORT")
ENABLE_PROXY_FIX = (get_env_variable("ENABLE_PROXY_FIX","False") == "True")
FEATURE_FLAGS = {"ALERT_REPORTS": True, "THUMBNAILS":True, "DASHBOARD_NATIVE_FILTERS":True, "DYNAMIC_PLUGINS":True}
ALERT_REPORTS_NOTIFICATION_DRY_RUN = False
WEBDRIVER_BASEURL = f"http://{SUPERSET_HOST}:{SUPERSET_PORT}/"
# The base URL for the email report hyperlinks.
WEBDRIVER_BASEURL_USER_FRIENDLY = get_env_variable("WEBDRIVER_BASEURL_USER_FRIENDLY",WEBDRIVER_BASEURL)
SQLLAB_CTAS_NO_LIMIT = True
APP_NAME = "養殖漁業管理決策系統"
APP_ICON = "/static/custom/[email protected]"
APP_ICON_WIDTH = 200
BABEL_DEFAULT_LOCALE = "zh_TW"
RECAPTCHA_PUBLIC_KEY = "6LencLYZAAAAAC9Vzg2fKqJoAMmyb2X0C7DwzOQQ"
RECAPTCHA_PRIVATE_KEY = "6LencLYZAAAAAAWrudM1hJA5MsSkm2AajFYOFPXA"
MAPBOX_API_KEY = "pk.eyJ1IjoiYXl1YW50eSIsImEiOiJjazZqNm9pamkwOGx2M2ZudXF1b2xyN20wIn0.tkuQOdU3lMbwqLeh58Z66A"
ENABLE_JAVASCRIPT_CONTROLS = True
SCREENSHOT_LOCATE_WAIT = 100
SCREENSHOT_LOAD_WAIT = 600
THUMBNAIL_CACHE_CONFIG: CacheConfig = {
"CACHE_TYPE": "redis",
"CACHE_DEFAULT_TIMEOUT": 24*60*60*7,
"CACHE_KEY_PREFIX": "thumbnail_",
"CACHE_REDIS_URL": f"redis://{REDIS_HOST}:{REDIS_PORT}/{REDIS_RESULTS_DB}"
}
EMAIL_NOTIFICATIONS = True
SMTP_HOST = get_env_variable("SMTP_HOST")
SMTP_STARTTLS = False
SMTP_SSL = False
SMTP_USER = get_env_variable("SMTP_USER")
SMTP_PORT = 25
SMTP_PASSWORD = get_env_variable("SMTP_PASSWORD")
SMTP_MAIL_FROM = get_env_variable("SMTP_MAIL_FROM")
SUPERSET_WEBSERVER_TIMEOUT = 120
#PERMANENT_SESSION_LIFETIME = 120
ENABLE_TIME_ROTATE = True
from superset.custom_security import CustomSecurityManager
CUSTOM_SECURITY_MANAGER = CustomSecurityManager
#
# Optionally import superset_config_docker.py (which will have been included on
# the PYTHONPATH) in order to allow for local settings to be overridden
#
try:
import superset_config_docker
from superset_config_docker import * # noqa
logger.info(
f"Loaded your Docker configuration at " f"[{superset_config_docker.__file__}]"
)
except ImportError:
logger.info("Using default Docker config...")
| 33.735849 | 115 | 0.75783 |
cb87c98dfbf34eed261e716e602f8a3abad998f3 | 303 | py | Python | data/multilingual/Latn.AST/Sans_16/pdf_to_json_test_Latn.AST_Sans_16.py | antoinecarme/pdf_to_json_tests | d57a024fde862e698d916a1178f285883d7a3b2f | [
"BSD-3-Clause"
]
| 1 | 2021-09-19T19:47:35.000Z | 2021-09-19T19:47:35.000Z | data/multilingual/Latn.AST/Sans_16/pdf_to_json_test_Latn.AST_Sans_16.py | antoinecarme/pdf_to_json_tests | d57a024fde862e698d916a1178f285883d7a3b2f | [
"BSD-3-Clause"
]
| null | null | null | data/multilingual/Latn.AST/Sans_16/pdf_to_json_test_Latn.AST_Sans_16.py | antoinecarme/pdf_to_json_tests | d57a024fde862e698d916a1178f285883d7a3b2f | [
"BSD-3-Clause"
]
| null | null | null | import pdf_to_json as p2j
import json
url = "file:data/multilingual/Latn.AST/Sans_16/udhr_Latn.AST_Sans_16.pdf"
lConverter = p2j.pdf_to_json.pdf_to_json_converter()
lConverter.mImageHashOnly = True
lDict = lConverter.convert(url)
print(json.dumps(lDict, indent=4, ensure_ascii=False, sort_keys=True))
| 30.3 | 73 | 0.811881 |
29a261e4cbabbcf991abfb59fc230e1daeea06ba | 13,489 | py | Python | lib/portal/docpreprocessor/DocParser.py | Jumpscale/jumpscale_portal8 | 3a4d56a1ba985b68fe9b525aed2486a54808332f | [
"Apache-2.0"
]
| null | null | null | lib/portal/docpreprocessor/DocParser.py | Jumpscale/jumpscale_portal8 | 3a4d56a1ba985b68fe9b525aed2486a54808332f | [
"Apache-2.0"
]
| 74 | 2015-12-28T16:17:20.000Z | 2021-09-08T12:28:59.000Z | lib/portal/docpreprocessor/DocParser.py | Jumpscale/jumpscale_portal8 | 3a4d56a1ba985b68fe9b525aed2486a54808332f | [
"Apache-2.0"
]
| null | null | null | from JumpScale import j
import re
class DocParser():
def __init__(self):
self.__jslocation__ = "j.portal.tools.docpreprocessor"
def parseDoc(self, doc):
content = doc.content
# TODO: this is badly written can be done by some easy regex, is not performing like this
content, doc.name = self._findItem(content, "@@name")
content, aliasstr = self._findItem(content, "@@alias")
doc.alias = [item.lower().strip() for item in aliasstr.split(",")]
content, authorstr = self._findItem(content, "@@author")
doc.author = [item.lower().strip() for item in authorstr.split(",")]
content, doc.type = self._findItem(content, "@@type")
content, doc.pagename = self._findItem(content, "@@pagename")
content, doc.title = self._findItem(content, "@@title", removecolon=False)
content, requiredargs = self._findItem(content, "@@requiredargs")
doc.requiredargs = requiredargs.split()
if doc.title == "":
doc.title = j.sal.fs.getBaseName(doc.path).replace(".wiki", "")
content, order = self._findItem(content, "@@order")
if order != "":
doc.order = int(order)
content, prodstr = self._findItem(content, "@@product")
doc.products = [item.lower().strip() for item in prodstr.split(",")]
content, visibility = self._findItem(content, "@@visibility")
if visibility != "":
doc.visibility = [item.lower().strip() for item in visibility.split(",")]
doc.name = doc.name.lower()
if doc.name == "":
doc.name = j.sal.fs.getBaseName(doc.shortpath).replace(".wiki", "").lower().strip()
if doc.pagename == "":
doc.pagename = doc.name
content, parent = self._findItem(content, "@@parent")
if parent.strip() != "":
doc.parent = parent
content, generate = self._findItem(content, "@@generate")
if generate == "0":
doc.generate = False
content, tags = self._findItem(content, item="@@", maxitems=100, removecolon=False)
tags2 = ""
for tag in tags:
if tag.find(":") != -1:
items = tag.split(":")
tags2 += " %s:%s" % (items[0].strip(), items[1].strip())
doc.tags = tags2.strip()
def _findItem(self, text, item="@owner", maxitems=1, removecolon=True, lower=True):
result = []
def process(arg, line):
line = re.sub(item.lower(), "", line, flags=re.IGNORECASE)
if line.find("##") == 0:
line = ""
if line.find("##") != -1:
line = line.split("##")[0]
if removecolon:
line = line.replace(":", "").strip()
else:
line = line.strip()
result.append(line)
return False
text2 = j.tools.code.regex.replaceLines(process, arg="", text=text, includes=["%s.*" % item], excludes='')
if len(result) > maxitems:
self.errorTrap("Error in text to parse, found more entities:%s than %s" % (item, maxitems))
if maxitems == 1:
if len(result) > 0:
result = result[0]
else:
result = ""
return text2, result,
def _findLine(self, text, item="@owner"):
for line in text.split("\n"):
if line.strip().lower().find(item) == 0:
return line
return ""
def _findId(self, text, path):
result = j.tools.code.regex.findAll("\(\(.*: *\d* *\)\)", text)
if len(result) > 1:
raise RuntimeError("Found 2 id's in %s" % path)
if len(result) == 1:
result = result[0].split(":")[1]
result = result.split(")")[0]
else:
result = ""
if result.strip() == "":
result = 0
else:
try:
result = int(result)
except Exception as e:
raise RuntimeError("Could not parse if, error:%s. \nPath = %s" % (e, path))
return text, result
def _parseTimeInfo(self, timestring, modelobj, defaults=[8, 16, 8, 4, 8]):
# print "timestring: %s" % timestring
timeItems = timestring.split("/")
modelobj.time_architecture = defaults[0]
modelobj.time_coding = defaults[1]
modelobj.time_integration = defaults[2]
modelobj.time_doc = defaults[3]
modelobj.time_testing = defaults[4]
modelobj.timestr = timestring
modelobj.time = 0
for item in timeItems:
if item != "":
if item.lower()[0] == "a":
modelobj.time_architecture = int(item.lower().replace("a", ""))
modelobj.time += modelobj.time_architecture
if item.lower()[0] == "c":
modelobj.time_coding = int(item.lower().replace("c", ""))
modelobj.time += modelobj.time_coding
if item.lower()[0] == "i":
modelobj.time_integration = int(item.lower().replace("i", ""))
modelobj.time += modelobj.time_integration
if item.lower()[0] == "d":
modelobj.time_doc = int(item.lower().replace("d", ""))
modelobj.time += modelobj.time_doc
if item.lower()[0] == "t":
modelobj.time_testing = int(item.lower().replace("t", ""))
modelobj.time += modelobj.time_testing
def _parseTaskInfo(self, storyTaskModelObject, info):
for item in info.split(" "):
if item != "":
if item.lower()[0] == "s":
# story
storyTaskModelObject.storyid = int(item.lower().replace("s", ""))
elif item.lower()[0] == "p":
# priority
storyTaskModelObject.priority = int(item.lower().replace("p", ""))
elif item.lower()[0] == "m":
# sprint
storyTaskModelObject.sprintid = int(item.lower().replace("m", ""))
def _parseStoryInfo(self, storyTaskModelObject, info):
for item in info.split(" "):
if item != "":
if item.lower()[0] == "s":
# story
storyTaskModelObject.id = int(item.lower().replace("s", ""))
elif item.lower()[0] == "p":
# priority
storyTaskModelObject.priority = int(item.lower().replace("p", ""))
elif item.lower()[0] == "m":
# sprint
storyTaskModelObject.sprintid = int(item.lower().replace("m", ""))
def _parseTaskQuestionRemark(self, text):
"""
@return [infotext,timetext,user,group,descr]
"""
keys = ["P", "p", "S", "s", "M", "m"]
timeitem = ""
infoitems = ""
descr = ""
state = "start"
user = ""
group = ""
# print "parse task: %s" % text
text = text.replace(" ", " ")
text = text.replace(" ", " ")
if text.strip() == "":
return ["", "", "", "", ""]
for item in text.strip().split(" "):
# print "item: %s" % item
if state == "endofmeta":
descr += item + " "
if state == "start":
if item[0] in keys:
try:
int(item[1:])
infoitems += item + " "
except:
descr += item + " "
state = "endofmeta"
elif item[0:2].lower() == "t:":
timeitem = item[2:]
if not re.match(r"[aAcCiIdDtT/\d:]*\Z", timeitem):
descr += item + " "
state = "endofmeta"
timeitem = ""
#raise RuntimeError("Time item match failed for text %s" % text)
elif item.find(":") != -1:
# found user or group
if not j.tools.code.regex.match("\w*:\w*", item):
descr += item + " "
state = "endofmeta"
elif user == "":
splitted = item.split(":")
user = splitted[0]
group = splitted[1]
# TODO: P2 user & group can be comma separated build support for it in below coFde
if self.getUserMainId(group) != False:
# probably user & group reversed
group2 = user
user = group
group = group2
if self.getUserMainId(user) != False:
user = self.getUserMainId(user) # to get aliasesin order
else:
descr += item + " "
state = "endofmeta"
return [infoitems, timeitem, user, group, descr]
def _getStoryName(self, info):
out = ""
for item in info.split(" "):
if not(item.lower()[0] == "s" or item.lower()[0] == "p" or item.lower()[0] == "m"):
out += " %s" % item
return out.strip()
def _strToArrayInt(self, items):
if items == "":
return []
result = ""
for item in items.split(","):
try:
result.append(int(item))
except:
raise RuntimeError("Cannot convert str to array, item was %s" % item)
return result
def _strToInt(self, item):
if item == "":
return 0
try:
result = int(item)
except:
raise RuntimeError("Cannot convert str to int, item was %s" % item)
return result
def _normalizeDescr(self, text):
text = text.lower()
splitat = ["{", "(", "[", "#", "%", "$", "'"]
for tosplit in splitat:
if len(text.split(tosplit)) > 0:
text = text.split(tosplit)[0]
text = text.replace(",", "")
text = text.replace(":", "")
text = text.replace(";", "")
text = text.replace(" ", " ")
if text != "" and text[-1] == " ":
text = text[:-1]
text = text.replace("-", "")
text = text.replace("_", "")
return text
def shortenDescr(self, text, maxnrchars=60):
return j.tools.code.textToTitle(text, maxnrchars)
def _getLinesAround(self, path, tofind, nrabove, nrbelow):
text = j.sal.fs.fileGetContents(path)
nr = 0
lines = text.split("\n")
for line in lines:
if line.find(tofind) != -1:
if nr - nrabove < 0:
nrstart = 0
else:
nrstart = nr - nrabove
if nr + nrabove > len(lines):
nrstop = len(lines)
else:
nrstop = nr + nrabove
return "\n".join(lines[nrstart:nrstop])
nr += 1
return ""
def addUniqueId(self, line, fullPath, ttype="sprint"):
line, id1 = self._findId(line, fullPath)
if id1 == 0:
# create unique id and put it in the file
id1 = j.base.idpreprocessor.generateIncrID("%sid" % ttype, self.service)
# tfe=j.tools.code.getTextFileEditor(fullPath)
#tfe.addItemToFoundLineOnlyOnce(line," ((%s:%s))"%(ttype,id1),"\(id *: *\d* *\)",reset=True)
tfe = j.tools.code.getTextFileEditor(fullPath)
tfe.addItemToFoundLineOnlyOnce(line, " ((%s:%s))" % (ttype, id1), "\(+.*: *\d* *\)+", reset=self.reset)
return id1
def _findTasks(self, text, path, fullPath):
# TODO: S2 do same for remarks & questions
def findTodoVariants(line):
variants = ["@todo:", "@todo :", "@todo"]
for variant in variants:
if line.strip().find(variant) == 0:
return variant
if text.lower().find("@todo") != -1:
lines = j.tools.code.regex.findAll("@todo.*", text)
for line in lines:
self.addUniqueId(line, fullPath, ttype="todo")
line, id1 = self._findId(line, fullPath)
todostatement = findTodoVariants(line)
line1 = line.replace(todostatement, "")
infotext, timetext, user, group, descr = self._parseTaskQuestionRemark(line1)
obj = self.projectInfoObject.tasks.addTask(id=id1, descr=descr.strip())
obj.model.storyid = 0
obj.model.users = user
obj.model.group = group
obj.model.path = fullPath
obj.model.context = self._getLinesAround(fullPath, line, 10, 20)
obj.model.descrshort = self.shortenDescr(descr)
# print "infotext:%s" % infotext
self._parseTaskInfo(obj.model, infotext)
self._parseTimeInfo(timetext, obj.model, defaults=[0, 1, 0, 1, 0])
if obj.model.storyid == 0:
obj.model.storyid = 999 # 999 is the unsorted story card
def errorTrap(self, msg):
if msg not in self._errors:
self._errors.append(msg)
j.tools.console.echo("ERROR: %s" % msg)
| 41.632716 | 115 | 0.485952 |
2a3d5097d8809d9989db0a2bf45a4663eb471b1d | 9,437 | py | Python | src/sage/quadratic_forms/special_values.py | defeo/sage | d8822036a9843bd4d75845024072515ede56bcb9 | [
"BSL-1.0"
]
| 2 | 2018-06-30T01:37:35.000Z | 2018-06-30T01:37:39.000Z | src/sage/quadratic_forms/special_values.py | boothby/sage | 1b1e6f608d1ef8ee664bb19e991efbbc68cbd51f | [
"BSL-1.0"
]
| null | null | null | src/sage/quadratic_forms/special_values.py | boothby/sage | 1b1e6f608d1ef8ee664bb19e991efbbc68cbd51f | [
"BSL-1.0"
]
| null | null | null | """
Routines for computing special values of L-functions
- :func:`gamma__exact` -- Exact values of the `\Gamma` function at integers and half-integers
- :func:`zeta__exact` -- Exact values of the Riemann `\zeta` function at critical values
- :func:`quadratic_L_function__exact` -- Exact values of the Dirichlet L-functions of quadratic characters at critical values
- :func:`quadratic_L_function__numerical` -- Numerical values of the Dirichlet L-functions of quadratic characters in the domain of convergence
"""
# python3
from __future__ import division, print_function
from sage.combinat.combinat import bernoulli_polynomial
from sage.misc.functional import denominator
from sage.rings.all import RealField
from sage.arith.all import kronecker_symbol, bernoulli, factorial, fundamental_discriminant
from sage.rings.infinity import infinity
from sage.rings.integer_ring import ZZ
from sage.rings.polynomial.polynomial_ring_constructor import PolynomialRing
from sage.rings.rational_field import QQ
from sage.rings.real_mpfr import is_RealField
from sage.symbolic.constants import pi
from sage.symbolic.pynac import I
# ---------------- The Gamma Function ------------------
def gamma__exact(n):
"""
Evaluates the exact value of the `\Gamma` function at an integer or
half-integer argument.
EXAMPLES::
sage: gamma__exact(4)
6
sage: gamma__exact(3)
2
sage: gamma__exact(2)
1
sage: gamma__exact(1)
1
sage: gamma__exact(1/2)
sqrt(pi)
sage: gamma__exact(3/2)
1/2*sqrt(pi)
sage: gamma__exact(5/2)
3/4*sqrt(pi)
sage: gamma__exact(7/2)
15/8*sqrt(pi)
sage: gamma__exact(-1/2)
-2*sqrt(pi)
sage: gamma__exact(-3/2)
4/3*sqrt(pi)
sage: gamma__exact(-5/2)
-8/15*sqrt(pi)
sage: gamma__exact(-7/2)
16/105*sqrt(pi)
TESTS::
sage: gamma__exact(1/3)
Traceback (most recent call last):
...
TypeError: you must give an integer or half-integer argument
"""
from sage.all import sqrt
n = QQ(n)
if denominator(n) == 1:
if n <= 0:
return infinity
if n > 0:
return factorial(n-1)
elif denominator(n) == 2:
ans = QQ.one()
while n != QQ((1, 2)):
if n < 0:
ans /= n
n += 1
elif n > 0:
n += -1
ans *= n
ans *= sqrt(pi)
return ans
else:
raise TypeError("you must give an integer or half-integer argument")
# ------------- The Riemann Zeta Function --------------
def zeta__exact(n):
r"""
Returns the exact value of the Riemann Zeta function
The argument must be a critical value, namely either positive even
or negative odd.
See for example [Iwasawa]_, p13, Special value of `\zeta(2k)`
EXAMPLES:
Let us test the accuracy for negative special values::
sage: RR = RealField(100)
sage: for i in range(1,10):
....: print("zeta({}): {}".format(1-2*i, RR(zeta__exact(1-2*i)) - zeta(RR(1-2*i))))
zeta(-1): 0.00000000000000000000000000000
zeta(-3): 0.00000000000000000000000000000
zeta(-5): 0.00000000000000000000000000000
zeta(-7): 0.00000000000000000000000000000
zeta(-9): 0.00000000000000000000000000000
zeta(-11): 0.00000000000000000000000000000
zeta(-13): 0.00000000000000000000000000000
zeta(-15): 0.00000000000000000000000000000
zeta(-17): 0.00000000000000000000000000000
Let us test the accuracy for positive special values::
sage: all(abs(RR(zeta__exact(2*i))-zeta(RR(2*i))) < 10**(-28) for i in range(1,10))
True
TESTS::
sage: zeta__exact(4)
1/90*pi^4
sage: zeta__exact(-3)
1/120
sage: zeta__exact(0)
-1/2
sage: zeta__exact(5)
Traceback (most recent call last):
...
TypeError: n must be a critical value (i.e. even > 0 or odd < 0)
REFERENCES:
.. [Iwasawa] Iwasawa, *Lectures on p-adic L-functions*
.. [IreRos] Ireland and Rosen, *A Classical Introduction to Modern Number Theory*
.. [WashCyc] Washington, *Cyclotomic Fields*
"""
if n < 0:
return bernoulli(1-n)/(n-1)
elif n > 1:
if (n % 2 == 0):
return ZZ(-1)**(n//2 + 1) * ZZ(2)**(n-1) * pi**n * bernoulli(n) / factorial(n)
else:
raise TypeError("n must be a critical value (i.e. even > 0 or odd < 0)")
elif n == 1:
return infinity
elif n == 0:
return QQ((-1, 2))
# ---------- Dirichlet L-functions with quadratic characters ----------
def QuadraticBernoulliNumber(k, d):
r"""
Compute `k`-th Bernoulli number for the primitive
quadratic character associated to `\chi(x) = \left(\frac{d}{x}\right)`.
EXAMPLES:
Let us create a list of some odd negative fundamental discriminants::
sage: test_set = [d for d in range(-163, -3, 4) if is_fundamental_discriminant(d)]
In general, we have `B_{1, \chi_d} = -2 h/w` for odd negative fundamental
discriminants::
sage: all(QuadraticBernoulliNumber(1, d) == -len(BinaryQF_reduced_representatives(d)) for d in test_set)
True
REFERENCES:
- [Iwasawa]_, pp 7-16.
"""
# Ensure the character is primitive
d1 = fundamental_discriminant(d)
f = abs(d1)
# Make the (usual) k-th Bernoulli polynomial
x = PolynomialRing(QQ, 'x').gen()
bp = bernoulli_polynomial(x, k)
# Make the k-th quadratic Bernoulli number
total = sum([kronecker_symbol(d1, i) * bp(i/f) for i in range(f)])
total *= (f ** (k-1))
return total
def quadratic_L_function__exact(n, d):
r"""
Returns the exact value of a quadratic twist of the Riemann Zeta function
by `\chi_d(x) = \left(\frac{d}{x}\right)`.
The input `n` must be a critical value.
EXAMPLES::
sage: quadratic_L_function__exact(1, -4)
1/4*pi
sage: quadratic_L_function__exact(-4, -4)
5/2
sage: quadratic_L_function__exact(2, 1)
1/6*pi^2
TESTS::
sage: quadratic_L_function__exact(2, -4)
Traceback (most recent call last):
...
TypeError: n must be a critical value (i.e. odd > 0 or even <= 0)
REFERENCES:
- [Iwasawa]_, pp 16-17, Special values of `L(1-n, \chi)` and `L(n, \chi)`
- [IreRos]_
- [WashCyc]_
"""
from sage.all import SR, sqrt
if n <= 0:
return QuadraticBernoulliNumber(1-n,d)/(n-1)
elif n >= 1:
# Compute the kind of critical values (p10)
if kronecker_symbol(fundamental_discriminant(d), -1) == 1:
delta = 0
else:
delta = 1
# Compute the positive special values (p17)
if ((n - delta) % 2 == 0):
f = abs(fundamental_discriminant(d))
if delta == 0:
GS = sqrt(f)
else:
GS = I * sqrt(f)
ans = SR(ZZ(-1)**(1+(n-delta)/2))
ans *= (2*pi/f)**n
ans *= GS # Evaluate the Gauss sum here! =0
ans *= QQ.one()/(2 * I**delta)
ans *= QuadraticBernoulliNumber(n,d)/factorial(n)
return ans
else:
if delta == 0:
raise TypeError("n must be a critical value (i.e. even > 0 or odd < 0)")
if delta == 1:
raise TypeError("n must be a critical value (i.e. odd > 0 or even <= 0)")
def quadratic_L_function__numerical(n, d, num_terms=1000):
"""
Evaluate the Dirichlet L-function (for quadratic character) numerically
(in a very naive way).
EXAMPLES:
First, let us test several values for a given character::
sage: RR = RealField(100)
sage: for i in range(5):
....: print("L({}, (-4/.)): {}".format(1+2*i, RR(quadratic_L_function__exact(1+2*i, -4)) - quadratic_L_function__numerical(RR(1+2*i),-4, 10000)))
L(1, (-4/.)): 0.000049999999500000024999996962707
L(3, (-4/.)): 4.99999970000003...e-13
L(5, (-4/.)): 4.99999922759382...e-21
L(7, (-4/.)): ...e-29
L(9, (-4/.)): ...e-29
This procedure fails for negative special values, as the Dirichlet
series does not converge here::
sage: quadratic_L_function__numerical(-3,-4, 10000)
Traceback (most recent call last):
...
ValueError: the Dirichlet series does not converge here
Test for several characters that the result agrees with the exact
value, to a given accuracy ::
sage: for d in range(-20,0): # long time (2s on sage.math 2014)
....: if abs(RR(quadratic_L_function__numerical(1, d, 10000) - quadratic_L_function__exact(1, d))) > 0.001:
....: print("Oops! We have a problem at d = {}: exact = {}, numerical = {}".format(d, RR(quadratic_L_function__exact(1, d)), RR(quadratic_L_function__numerical(1, d))))
"""
# Set the correct precision if it is given (for n).
if is_RealField(n.parent()):
R = n.parent()
else:
R = RealField()
if n < 0:
raise ValueError('the Dirichlet series does not converge here')
d1 = fundamental_discriminant(d)
ans = R.zero()
for i in range(1,num_terms):
ans += R(kronecker_symbol(d1,i) / R(i)**n)
return ans
| 31.881757 | 184 | 0.592455 |
a74421df746d53832f4dad4571574a94ae973ce6 | 36,667 | py | Python | propertyestimator/protocols/groups.py | MSchauperl/propertyestimator | 9a67cb61498024c511f9bbe55536ac8e1a3c93be | [
"MIT"
]
| null | null | null | propertyestimator/protocols/groups.py | MSchauperl/propertyestimator | 9a67cb61498024c511f9bbe55536ac8e1a3c93be | [
"MIT"
]
| null | null | null | propertyestimator/protocols/groups.py | MSchauperl/propertyestimator | 9a67cb61498024c511f9bbe55536ac8e1a3c93be | [
"MIT"
]
| null | null | null | """
A collection of specialized workflow protocols, which serve to group together
multiple individual protocol building blocks, and apply special behaviours when
executing them.
Such behaviours may include for example running the grouped together
protocols until certain conditions have been met.
"""
import copy
import json
import logging
from enum import Enum, unique
from os import path, makedirs
from propertyestimator import unit
from propertyestimator.utils import graph
from propertyestimator.utils.exceptions import PropertyEstimatorException
from propertyestimator.workflow.decorators import InequalityMergeBehaviour, protocol_input, protocol_output
from propertyestimator.workflow.plugins import register_calculation_protocol, available_protocols
from propertyestimator.workflow.protocols import BaseProtocol, ProtocolPath
from propertyestimator.workflow.schemas import ProtocolGroupSchema
@register_calculation_protocol()
class ProtocolGroup(BaseProtocol):
"""A collection of protocols to be executed in one batch.
This may be used for example to cluster together multiple protocols
that will execute in a linear chain so that multiple scheduler
execution calls are reduced into a single one.
Additionally, a group may provide enhanced behaviour, for example
running all protocols within the group self consistently until
a given condition is met (e.g run a simulation until a given observable
has converged).
"""
@property
def root_protocols(self):
"""List[str]: The ids of the protocols in the group which do not take
input from the other grouped protocols."""
return self._root_protocols
@property
def execution_order(self):
"""List[str]: The ids of the protocols in the group, in the order in which
they will be internally executed."""
return self._execution_order
@property
def dependants_graph(self):
"""Dict[str, str]: A dictionary of which stores which grouped protocols are
dependant on other grouped protocols. Each key in the dictionary is the id of
a grouped protocol, and each value is the id of a protocol which depends on the
protocol by the key."""
return self._dependants_graph
@property
def protocols(self):
"""Dict[str, BaseProtocol]: A dictionary of the protocols in this groups, where the dictionary
key is the protocol id, and the value the protocol itself."""
return self._protocols
def __init__(self, protocol_id):
"""Constructs a new ProtocolGroup.
"""
self._dependants_graph = {}
self._root_protocols = []
self._execution_order = []
self._protocols = {}
super().__init__(protocol_id)
def _initialize(self):
"""Initialize the protocol."""
super(ProtocolGroup, self)._initialize()
self._dependants_graph = {}
self._root_protocols = []
self._execution_order = []
self._protocols = {}
def _get_schema(self):
base_schema = super(ProtocolGroup, self)._get_schema()
# Convert the base schema to a group one.
schema = ProtocolGroupSchema()
schema_dict = schema.__getstate__()
schema_dict.update(base_schema.__getstate__())
schema.__setstate__(schema_dict)
for protocol_id in self._protocols:
schema.grouped_protocol_schemas.append(self._protocols[protocol_id].schema)
return schema
def _set_schema(self, schema_value):
"""
Parameters
----------
schema_value: ProtocolGroupSchema
The schema from which this group should take its properties.
"""
super(ProtocolGroup, self)._set_schema(schema_value)
protocols_to_create = []
for protocol_schema in schema_value.grouped_protocol_schemas:
if protocol_schema.id in self._protocols:
self._protocols[protocol_schema.id].schema = protocol_schema
continue
# Recreate the protocol from scratch.
protocol = available_protocols[protocol_schema.type](protocol_schema.id)
protocol.schema = protocol_schema
protocols_to_create.append(protocol)
if len(protocols_to_create) > 0:
self.add_protocols(*protocols_to_create)
def add_protocols(self, *protocols):
for protocol in protocols:
if protocol.id in self._protocols:
raise ValueError('The {} group already contains a protocol '
'with id {}.'.format(self.id, protocol.id))
self._protocols[protocol.id] = protocol
self._dependants_graph[protocol.id] = []
# Pull each of an individual protocols inputs up so that they
# become a required input of the group.
for protocol_id in self._protocols:
protocol = self._protocols[protocol_id]
for input_path in protocol.required_inputs:
grouped_path = ProtocolPath.from_string(input_path.full_path)
if grouped_path.start_protocol != protocol.id:
grouped_path.prepend_protocol_id(protocol.id)
grouped_path.prepend_protocol_id(self.id)
if grouped_path in self.required_inputs:
continue
reference_values = protocol.get_value_references(input_path)
if len(reference_values) == 0:
self.required_inputs.append(grouped_path)
for source_path, reference_value in reference_values.items():
if reference_value.start_protocol not in self._protocols:
self.required_inputs.append(grouped_path)
continue
if protocol_id in self._dependants_graph[reference_value.start_protocol]:
continue
self._dependants_graph[reference_value.start_protocol].append(protocol_id)
# Figure out the order in which grouped protocols should be executed.
self._root_protocols = graph.find_root_nodes(self._dependants_graph)
self._execution_order = graph.topological_sort(self._dependants_graph)
def set_uuid(self, value):
"""Store the uuid of the calculation this protocol belongs to
Parameters
----------
value : str
The uuid of the parent calculation.
"""
for index in range(len(self._root_protocols)):
self._root_protocols[index] = graph.append_uuid(self._root_protocols[index], value)
for index in range(len(self._execution_order)):
self._execution_order[index] = graph.append_uuid(self._execution_order[index], value)
new_dependants_graph = {}
for protocol_id in self._dependants_graph:
new_protocol_id = graph.append_uuid(protocol_id, value)
new_dependants_graph[new_protocol_id] = []
for dependant in self._dependants_graph[protocol_id]:
new_dependant_id = graph.append_uuid(dependant, value)
new_dependants_graph[new_protocol_id].append(new_dependant_id)
self._dependants_graph = new_dependants_graph
new_protocols = {}
for protocol_id in self._protocols:
protocol = self._protocols[protocol_id]
protocol.set_uuid(value)
new_protocols[protocol.id] = protocol
self._protocols = new_protocols
super(ProtocolGroup, self).set_uuid(value)
def replace_protocol(self, old_id, new_id):
"""Finds each input which came from a given protocol
and redirects it to instead take input from a different one.
Parameters
----------
old_id : str
The id of the old input protocol.
new_id : str
The id of the new input protocol.
"""
super(ProtocolGroup, self).replace_protocol(old_id, new_id)
for index in range(len(self._root_protocols)):
self._root_protocols[index] = self._root_protocols[index].replace(old_id, new_id)
for index in range(len(self._execution_order)):
self._execution_order[index] = self._execution_order[index].replace(old_id, new_id)
new_dependants_graph = {}
for protocol_id in self._dependants_graph:
new_protocol_id = protocol_id.replace(old_id, new_id)
new_dependants_graph[new_protocol_id] = []
for dependant in self._dependants_graph[protocol_id]:
new_dependant_id = dependant.replace(old_id, new_id)
new_dependants_graph[new_protocol_id].append(new_dependant_id)
self._dependants_graph = new_dependants_graph
new_protocols = {}
for protocol_id in self._protocols:
protocol = self._protocols[protocol_id]
protocol.replace_protocol(old_id, new_id)
new_protocols[protocol_id.replace(old_id, new_id)] = protocol
self._protocols = new_protocols
def execute(self, directory, available_resources):
"""Executes the protocols within this groups
Parameters
----------
directory : str
The root directory in which to run the protocols
available_resources: ComputeResources
The resources available to execute on.
Returns
-------
bool
True if all the protocols execute correctly.
"""
output_dictionary = {}
for protocol_id_to_execute in self._execution_order:
protocol_to_execute = self._protocols[protocol_id_to_execute]
protocol_to_execute_schema = protocol_to_execute.schema
working_directory = path.join(directory, protocol_to_execute.id)
if not path.isdir(working_directory):
makedirs(working_directory)
for input_path in protocol_to_execute.required_inputs:
value_references = protocol_to_execute.get_value_references(input_path)
for source_path, value_reference in value_references.items():
if (value_reference.start_protocol == input_path.start_protocol or
value_reference.start_protocol == protocol_to_execute.id):
continue
if value_reference.start_protocol == self._id:
value = self.get_value(value_reference)
else:
value = self._protocols[value_reference.start_protocol].get_value(value_reference)
protocol_to_execute.set_value(source_path, value)
return_value = protocol_to_execute.execute(working_directory, available_resources)
if isinstance(return_value, PropertyEstimatorException):
return return_value
for output_path in return_value:
output_path_prepended = ProtocolPath.from_string(output_path)
if (output_path_prepended.start_protocol != self.id and
output_path_prepended.start_protocol != protocol_id_to_execute):
output_path_prepended.prepend_protocol_id(protocol_id_to_execute)
if output_path_prepended.start_protocol != self.id:
output_path_prepended.prepend_protocol_id(self.id)
output_path_prepended.prepend_protocol_id(self.id)
output_dictionary[output_path_prepended.full_path] = return_value[output_path]
protocol_to_execute.schema = protocol_to_execute_schema
return output_dictionary
def can_merge(self, other, path_replacements=None):
"""Determines whether this protocol group can be merged with another.
Parameters
----------
other : ProtocolGroup
The protocol group to compare against.
path_replacements: list of tuple of str, optional
Replacements to make in any value reference protocol paths
before comparing for equality.
Returns
----------
bool
True if the two protocols are safe to merge.
"""
if path_replacements is None:
path_replacements = []
path_replacements.append((other.id, self.id))
if not super(ProtocolGroup, self).can_merge(other, path_replacements):
return False
# if len(self._root_protocols) != len(other.root_protocols):
# # Only allow groups with the same number of root protocols
# # to merge.
# return False
# Ensure that the starting points in each group can be
# merged.
for self_root_id in self._root_protocols:
self_protocol = self._protocols[self_root_id]
can_merge_with_root = False
for other_root_id in other.root_protocols:
other_protocol = other.protocols[other_root_id]
if not self_protocol.can_merge(other_protocol, path_replacements):
continue
can_merge_with_root = True
break
if not can_merge_with_root:
return False
return True
def _try_merge_protocol(self, other_protocol_id, other_group, parent_ids, merged_ids, path_replacements=None):
"""Recursively inserts a protocol node into the group.
Parameters
----------
other_protocol_id : str
The name of the other protocol to attempt to merge.
other_group : ProtocolGroup
The other protocol group which the protocol to merge belongs to.
parent_ids : List[str]
The ids of the new parents of the node to be inserted. If None,
the protocol will be added as a new parent node.
merged_ids : Dict[str, str]
A map between any original protocol ids and their new merged values.
path_replacements: list of tuple of str, optional
Replacements to make in any value reference protocol paths
before comparing for equality.
"""
if other_protocol_id in self._dependants_graph:
raise RuntimeError('A protocol with id {} has already been merged '
'into the group.'.format(other_protocol_id))
protocol_ids = self._root_protocols if len(parent_ids) == 0 else []
for parent_id in parent_ids:
protocol_ids.extend(x for x in self._dependants_graph[parent_id] if x not in protocol_ids)
protocol_to_merge = other_group.protocols[other_protocol_id]
existing_protocol = None
# Start by checking to see if the starting node of the calculation graph is
# already present in the full graph.
for protocol_id in protocol_ids:
protocol = self._protocols[protocol_id]
if not protocol.can_merge(protocol_to_merge, path_replacements):
continue
existing_protocol = protocol
break
if existing_protocol is not None:
# Make a note that the existing node should be used in place
# of this calculations version.
other_group.protocols[other_protocol_id] = existing_protocol
merged_ids[other_protocol_id] = existing_protocol.id
for protocol_to_update in other_group.protocols:
other_group.protocols[protocol_to_update].replace_protocol(other_protocol_id,
existing_protocol.id)
else:
# Add the protocol as a new node in the graph.
self._protocols[other_protocol_id] = protocol_to_merge
existing_protocol = self._protocols[other_protocol_id]
self._dependants_graph[other_protocol_id] = []
if len(parent_ids) == 0:
self._root_protocols.append(other_protocol_id)
for parent_id in parent_ids:
self._dependants_graph[parent_id].append(other_protocol_id)
return existing_protocol.id
def merge(self, other):
"""Merges another ProtocolGroup with this one. The id
of this protocol will remain unchanged.
It is assumed that can_merge has already returned that
these protocol groups are compatible to be merged together.
Parameters
----------
other: ProtocolGroup
The protocol to merge into this one.
Returns
-------
Dict[str, str]
A map between any original protocol ids and their new merged values.
"""
merged_ids = super(ProtocolGroup, self).merge(other)
other_execution_order = graph.topological_sort(other.dependants_graph)
other_reduced_protocol_dependants = copy.deepcopy(other.dependants_graph)
graph.apply_transitive_reduction(other_reduced_protocol_dependants)
other_parent_protocol_ids = {}
for protocol_id in other_execution_order:
parent_ids = other_parent_protocol_ids.get(protocol_id) or []
inserted_id = self._try_merge_protocol(protocol_id, other, parent_ids, merged_ids, [(other.id, self.id)])
for dependant in other_reduced_protocol_dependants[protocol_id]:
if dependant not in other_parent_protocol_ids:
other_parent_protocol_ids[dependant] = []
other_parent_protocol_ids[dependant].append(inserted_id)
self._execution_order = graph.topological_sort(self._dependants_graph)
return merged_ids
def _get_next_in_path(self, reference_path):
"""Returns the id of the next protocol in a protocol path,
making sure that the targeted protocol is within this group.
Parameters
----------
reference_path: ProtocolPath
The path being traversed.
Returns
-------
str
The id of the next protocol in the path.
ProtocolPath
The remainder of the path to be traversed.
"""
# Make a copy of the path so we can alter it safely.
reference_path_clone = copy.deepcopy(reference_path)
if reference_path.start_protocol == self.id:
reference_path_clone.pop_next_in_path()
target_protocol_id = reference_path_clone.pop_next_in_path()
if target_protocol_id not in self._protocols:
raise ValueError('The reference path does not target this protocol'
'or any of its children.')
return target_protocol_id, reference_path_clone
def get_class_attribute(self, reference_path):
reference_property, reference_ids = ProtocolPath.to_components(reference_path.full_path)
if reference_path.start_protocol is None or (reference_path.start_protocol == self.id and
len(reference_ids) == 1):
return super(ProtocolGroup, self).get_class_attribute(reference_path)
target_protocol_id, truncated_path = self._get_next_in_path(reference_path)
return self._protocols[target_protocol_id].get_class_attribute(truncated_path)
def get_value(self, reference_path):
"""Returns the value of one of this protocols parameters / inputs.
Parameters
----------
reference_path: ProtocolPath
The path pointing to the value to return.
Returns
----------
object:
The value of the input
"""
reference_property, reference_ids = ProtocolPath.to_components(reference_path.full_path)
if reference_path.start_protocol is None or (reference_path.start_protocol == self.id and
len(reference_ids) == 1):
return super(ProtocolGroup, self).get_value(reference_path)
target_protocol_id, truncated_path = self._get_next_in_path(reference_path)
return self._protocols[target_protocol_id].get_value(truncated_path)
def set_value(self, reference_path, value):
"""Sets the value of one of this protocols parameters / inputs.
Parameters
----------
reference_path: ProtocolPath
The path pointing to the value to return.
value: Any
The value to set.
"""
reference_property, reference_ids = ProtocolPath.to_components(reference_path.full_path)
if reference_path.start_protocol is None or (reference_path.start_protocol == self.id and
len(reference_ids) == 1):
return super(ProtocolGroup, self).set_value(reference_path, value)
# Make a copy of the path so we can alter it safely.
reference_path_clone = copy.deepcopy(reference_path)
if reference_path.start_protocol == self.id:
reference_path_clone.pop_next_in_path()
target_protocol_id = reference_path_clone.pop_next_in_path()
if target_protocol_id not in self._protocols:
raise ValueError('The reference path does not target this protocol'
'or any of its children.')
return self._protocols[target_protocol_id].set_value(reference_path_clone, value)
def apply_replicator(self, replicator, template_values, template_index=-1, template_value=None,
update_input_references=False):
protocols, replication_map = replicator.apply(self.protocols, template_values,
template_index, template_value)
if (template_index >= 0 or template_value is not None) and update_input_references is True:
raise ValueError('Specific template indices and values cannot be passed '
'when `update_input_references` is True')
if update_input_references:
replicator.update_references(protocols, replication_map, template_values)
# Re-initialize the group using the replicated protocols.
self._initialize()
self.add_protocols(*protocols.values())
return replication_map
@register_calculation_protocol()
class ConditionalGroup(ProtocolGroup):
"""A collection of protocols which are to execute until
a given condition is met.
"""
@unique
class ConditionType(Enum):
"""The acceptable conditions to place on the group"""
LessThan = 'lessthan'
GreaterThan = 'greaterthan'
@classmethod
def has_value(cls, value):
"""Checks whether an of the enum items matches a given value.
Parameters
----------
value: str
The value to check for.
Returns
---------
bool
True if the enum contains the value.
"""
return any(value == item.value for item in cls)
class Condition:
def __init__(self):
self.type = ConditionalGroup.ConditionType.LessThan
self.left_hand_value = None
self.right_hand_value = None
def __getstate__(self):
return {
'type': self.type.value,
'left_hand_value': self.left_hand_value,
'right_hand_value': self.right_hand_value
}
def __setstate__(self, state):
self.type = ConditionalGroup.ConditionType(state['type'])
self.left_hand_value = state['left_hand_value']
self.right_hand_value = state['right_hand_value']
def __eq__(self, other):
return (self.left_hand_value == other.left_hand_value and
self.right_hand_value == other.right_hand_value and
self.type == other.type)
def __ne__(self, other):
return not self.__eq__(other)
def __str__(self):
return f'{self.left_hand_value} {self.type} {self.right_hand_value}'
@property
def conditions(self):
return self._conditions
max_iterations = protocol_input(
docstring='The maximum number of iterations to run for to try and satisfy the '
'groups conditions.',
type_hint=int,
default_value=100,
merge_behavior=InequalityMergeBehaviour.LargestValue
)
current_iteration = protocol_output(
docstring='The current number of iterations this group has performed while '
'attempting to satisfy the specified conditions. This value starts '
'from one.',
type_hint=int
)
def __init__(self, protocol_id):
"""Constructs a new ConditionalGroup
"""
self._conditions = []
super().__init__(protocol_id)
def _initialize(self):
"""Initialize the protocol."""
super(ConditionalGroup, self)._initialize()
self.required_inputs.append(ProtocolPath('conditions'))
def _set_schema(self, schema_value):
conditions = None
if '.conditions' in schema_value.inputs:
conditions = schema_value.inputs.pop('.conditions')
for condition in conditions:
self.add_condition(copy.deepcopy(condition))
super(ConditionalGroup, self)._set_schema(schema_value)
if conditions is not None:
schema_value.inputs['.conditions'] = conditions
def _evaluate_condition(self, condition):
"""Evaluates whether a condition has been successfully met.
Parameters
----------
condition: ConditionalGroup.Condition
The condition to evaluate.
Returns
-------
bool
True if the condition has been met.
"""
if not isinstance(condition.left_hand_value, ProtocolPath):
left_hand_value = condition.left_hand_value
else:
left_hand_value = self.get_value(condition.left_hand_value)
if not isinstance(condition.right_hand_value, ProtocolPath):
right_hand_value = condition.right_hand_value
else:
right_hand_value = self.get_value(condition.right_hand_value)
if left_hand_value is None or right_hand_value is None:
return False
right_hand_value_correct_units = right_hand_value
if isinstance(right_hand_value, unit.Quantity) and isinstance(left_hand_value, unit.Quantity):
right_hand_value_correct_units = right_hand_value.to(left_hand_value.units)
logging.info(f'Evaluating condition for protocol {self.id}: '
f'{left_hand_value} {condition.type} {right_hand_value_correct_units}')
if condition.type == self.ConditionType.LessThan:
return left_hand_value < right_hand_value
elif condition.type == self.ConditionType.GreaterThan:
return left_hand_value > right_hand_value
raise NotImplementedError()
@staticmethod
def _write_checkpoint(directory, current_iteration):
"""Creates a checkpoint file for this group so that it can continue
executing where it left off if it was killed for some reason (e.g the
worker it was running on was killed).
Parameters
----------
directory: str
The path to the working directory of this protocol
current_iteration: int
The number of iterations this group has performed so far.
"""
checkpoint_path = path.join(directory, 'checkpoint.json')
with open(checkpoint_path, 'w') as file:
json.dump({'current_iteration': current_iteration}, file)
@staticmethod
def _read_checkpoint(directory):
"""Creates a checkpoint file for this group so that it can continue
executing where it left off if it was killed for some reason (e.g the
worker it was running on was killed).
Parameters
----------
directory: str
The path to the working directory of this protocol
Returns
-------
int
The number of iterations this group has performed so far.
"""
current_iteration = 0
checkpoint_path = path.join(directory, 'checkpoint.json')
if not path.isfile(checkpoint_path):
return current_iteration
with open(checkpoint_path, 'r') as file:
checkpoint_dictionary = json.load(file)
current_iteration = checkpoint_dictionary['current_iteration']
return current_iteration
def execute(self, directory, available_resources):
"""Executes the protocols within this groups
Parameters
----------
directory : str
The root directory in which to run the protocols
available_resources: ComputeResources
The resources available to execute on.
Returns
-------
bool
True if all the protocols execute correctly.
"""
logging.info('Starting conditional while loop: {}'.format(self.id))
should_continue = True
self.current_iteration = self._read_checkpoint(directory)
while should_continue:
# Create a checkpoint file so we can pick off where
# we left off if this execution fails due to time
# constraints for e.g.
self._write_checkpoint(directory, self.current_iteration)
self.current_iteration += 1
return_value = super(ConditionalGroup, self).execute(directory, available_resources)
if isinstance(return_value, PropertyEstimatorException):
# Exit on exceptions.
return return_value
conditions_met = True
for condition in self._conditions:
# Check to see if we have reached our goal.
if not self._evaluate_condition(condition):
conditions_met = False
if conditions_met:
logging.info(f'Conditional while loop finished after {self.current_iteration} iterations: {self.id}')
return return_value
if self.current_iteration >= self.max_iterations:
return PropertyEstimatorException(directory=directory,
message=f'Conditional while loop failed to converge: {self.id}')
logging.info(f'Conditional criteria not yet met after {self.current_iteration} iterations')
def can_merge(self, other, path_replacements=None):
return super(ConditionalGroup, self).can_merge(other, path_replacements)
def merge(self, other):
"""Merges another ProtocolGroup with this one. The id
of this protocol will remain unchanged.
It is assumed that can_merge has already returned that
these protocol groups are compatible to be merged together.
Parameters
----------
other: ConditionalGroup
The protocol to merge into this one.
"""
merged_ids = super(ConditionalGroup, self).merge(other)
for condition in other.conditions:
if isinstance(condition.left_hand_value, ProtocolPath):
condition.left_hand_value.replace_protocol(other.id, self.id)
if isinstance(condition.right_hand_value, ProtocolPath):
condition.right_hand_value.replace_protocol(other.id, self.id)
for merged_id in merged_ids:
if isinstance(condition.left_hand_value, ProtocolPath):
condition.left_hand_value.replace_protocol(merged_id, merged_ids[merged_id])
if isinstance(condition.right_hand_value, ProtocolPath):
condition.right_hand_value.replace_protocol(merged_id, merged_ids[merged_id])
self.add_condition(condition)
return merged_ids
def add_condition(self, condition_to_add):
"""Adds a condition to this groups list of conditions if it
not already in the condition list.
Parameters
----------
condition_to_add: :obj:`ConditionalGroup.Condition`
The condition to add.
"""
for condition in self._conditions:
if condition == condition_to_add:
return
self._conditions.append(condition_to_add)
def set_uuid(self, value):
"""Store the uuid of the calculation this protocol belongs to
Parameters
----------
value : str
The uuid of the parent calculation.
"""
super(ConditionalGroup, self).set_uuid(value)
for condition in self._conditions:
if isinstance(condition.left_hand_value, ProtocolPath):
condition.left_hand_value.append_uuid(value)
if isinstance(condition.right_hand_value, ProtocolPath):
condition.right_hand_value.append_uuid(value)
def replace_protocol(self, old_id, new_id):
"""Finds each input which came from a given protocol
and redirects it to instead take input from a different one.
Parameters
----------
old_id : str
The id of the old input protocol.
new_id : str
The id of the new input protocol.
"""
super(ConditionalGroup, self).replace_protocol(old_id, new_id)
for condition in self._conditions:
if isinstance(condition.left_hand_value, ProtocolPath):
condition.left_hand_value.replace_protocol(old_id, new_id)
if isinstance(condition.right_hand_value, ProtocolPath):
condition.right_hand_value.replace_protocol(old_id, new_id)
def get_class_attribute(self, reference_path):
if reference_path.start_protocol is None or (reference_path.start_protocol == self.id and
reference_path.last_protocol == self.id):
if reference_path.property_name == 'conditions' or reference_path.property_name.find('condition_') >= 0:
return None
return super(ConditionalGroup, self).get_class_attribute(reference_path)
def get_value(self, reference_path):
"""Returns the value of one of this protocols parameters / inputs.
Parameters
----------
reference_path: ProtocolPath
The path pointing to the value to return.
Returns
----------
object:
The value of the input
"""
if reference_path.start_protocol is None or (reference_path.start_protocol == self.id and
reference_path.last_protocol == self.id):
if reference_path.property_name == 'conditions':
return self._conditions
return super(ConditionalGroup, self).get_value(reference_path)
def set_value(self, reference_path, value):
"""Sets the value of one of this protocols parameters / inputs.
Parameters
----------
reference_path: ProtocolPath
The path pointing to the value to return.
value: Any
The value to set.
"""
if reference_path.start_protocol is None or (reference_path.start_protocol == self.id and
reference_path.last_protocol == self.id):
if reference_path.property_name == 'conditions':
self._conditions = value
return
super(ConditionalGroup, self).set_value(reference_path, value)
def get_value_references(self, input_path):
if input_path.property_name != 'conditions':
return super(ConditionalGroup, self).get_value_references(input_path)
value_references = {}
for index, condition in enumerate(self.conditions):
if isinstance(condition.left_hand_value, ProtocolPath):
source_path = ProtocolPath('conditions[{}].left_hand_value'.format(index))
value_references[source_path] = condition.left_hand_value
if isinstance(condition.right_hand_value, ProtocolPath):
source_path = ProtocolPath('conditions[{}].right_hand_value'.format(index))
value_references[source_path] = condition.right_hand_value
return value_references
| 34.954242 | 117 | 0.63804 |
c2c02fee1deb405b1da2699f487c8eb0b8d0ad5d | 44 | py | Python | aiofcm/exceptions.py | shop-loyalty/aiofcm | a15f7144389392be84ebeeaf9ba30b46055d554d | [
"Apache-2.0"
]
| 2 | 2022-03-13T09:12:44.000Z | 2022-03-28T10:53:06.000Z | src/graia/amnesia/transport/exceptions.py | GraiaProject/Amnesia | d48d3084f776f788767939d73774146086358887 | [
"MIT"
]
| null | null | null | src/graia/amnesia/transport/exceptions.py | GraiaProject/Amnesia | d48d3084f776f788767939d73774146086358887 | [
"MIT"
]
| null | null | null | class ConnectionClosed(Exception):
pass
| 14.666667 | 34 | 0.772727 |
d840f95dcf5fd2ca5e579d84aab66dadc77648d8 | 227 | py | Python | 01 List/Problems/01_average.py | kmanadkat/leetcode-101 | 8a9db22d98692d634a497ba76c7e9f792bb1f1bc | [
"MIT"
]
| null | null | null | 01 List/Problems/01_average.py | kmanadkat/leetcode-101 | 8a9db22d98692d634a497ba76c7e9f792bb1f1bc | [
"MIT"
]
| null | null | null | 01 List/Problems/01_average.py | kmanadkat/leetcode-101 | 8a9db22d98692d634a497ba76c7e9f792bb1f1bc | [
"MIT"
]
| 1 | 2021-09-15T11:17:36.000Z | 2021-09-15T11:17:36.000Z |
from typing import List
def findListAverage(inputList: List) -> int:
sizeOfList = len(inputList)
sumOfList = sum(inputList)
return sumOfList / sizeOfList
print(findListAverage(inputList=[0, 1, 2, 3, 4, 5, 6]))
| 18.916667 | 55 | 0.696035 |
a27121361b74b0fa7394d7fbab854bb11217e284 | 4,797 | py | Python | Utils/get_cvc_vulnerabilities.py | devarajug/CVC-TechStack | fa4e44711640335f09a5160b6f8bd10bdd0fc78f | [
"MIT"
]
| null | null | null | Utils/get_cvc_vulnerabilities.py | devarajug/CVC-TechStack | fa4e44711640335f09a5160b6f8bd10bdd0fc78f | [
"MIT"
]
| null | null | null | Utils/get_cvc_vulnerabilities.py | devarajug/CVC-TechStack | fa4e44711640335f09a5160b6f8bd10bdd0fc78f | [
"MIT"
]
| null | null | null | from os.path import isfile
from re import search
from re import compile
from re import sub
from json import loads
import pandas as pd
class FetchCvcVulnerabilities:
def __init__(self, cvc_json_file_path, comments={}, escaped_path=[]):
self.json_file_path = cvc_json_file_path
self.comments = comments
self.remove_error = compile(''',"analysisExceptions":\[\{<exception>.*</exception>\}\]''')
self.escaped_path = escaped_path
def readCVCDataFromJsonFile(self):
if isfile(self.json_file_path):
try:
with open(self.json_file_path, 'r', encoding='utf8') as f:
data = f.read()
if search(self.remove_error, data):
data = sub(self.remove_error, '', data)
cvcJsonData = loads(data)
except Exception as e:
cvcJsonData = {}
print("[Error] unable read dependency check json file....", str(e))
else:
cvcJsonData = {}
print("[Error] unable read dependency check json file....")
return cvcJsonData
def cvcJsonDataToDataFrame(self):
vuldependency, cve_id, severity, filePath, description, status, developer_comment = [[] for i in range(7)]
try:
cvcJsonData = self.readCVCDataFromJsonFile()
for dependency in cvcJsonData.get("dependencies", {}):
if 'vulnerabilities' in dependency.keys():
for vulnerability in dependency.get('vulnerabilities', {}):
if 'relatedDependencies' in dependency.keys():
vuldependency.append(dependency.get('fileName').strip())
cve_id.append(vulnerability.get('name').strip())
severity.append(vulnerability.get('severity').upper().strip())
filePath.append('/'.join([x for x in dependency.get('filePath').split("\\") if x not in self.escaped_path]))
description.append(str(vulnerability.get('description')).replace('\n', ' '))
status.append(self.comments.get(vuldependency[-1], {}).get(cve_id[-1], {}).get("Status", "Open"))
developer_comment.append(self.comments.get(vuldependency[-1], {}).get(cve_id[-1], {}).get("Comment", "need add in JsonFile"))
for relatedDependency in dependency.get('relatedDependencies', {}):
filename = relatedDependency.get('filePath').split('\\')[-1].strip()
filePath.append('/'.join([x for x in relatedDependency.get('filePath').split('\\') if x not in self.escaped_path]))
vuldependency.append(filename)
cve_id.append(vulnerability.get('name').strip())
description.append(str(vulnerability.get('description')).replace('\n', ' '))
severity.append(vulnerability.get('severity').upper().strip())
status.append(self.comments.get(vuldependency[-1], {}).get(cve_id[-1], {}).get("Status", "Open"))
developer_comment.append(self.comments.get(vuldependency[-1], {}).get(cve_id[-1], {}).get("Comment", "need add in JsonFile"))
else:
vuldependency.append(dependency.get('fileName').strip())
cve_id.append(vulnerability.get('name').strip())
severity.append(vulnerability.get('severity').upper())
filePath.append('/'.join([x for x in dependency.get('filePath').split('\\') if x not in self.escaped_path]))
description.append(str(vulnerability.get('description')).replace('\n', ' '))
status.append(self.comments.get(vuldependency[-1], {}).get(cve_id[-1], {}).get("Status", "Open"))
developer_comment.append(self.comments.get(vuldependency[-1], {}).get(cve_id[-1], {}).get("Comment", "need add in JsonFile"))
result_data = zip(vuldependency, description, cve_id, severity,filePath, status, developer_comment)
df_cvc = pd.DataFrame(list(result_data),
columns = [
"DependencyName",
"Description",
"CVE",
"Severity",
"FilePath",
"Status",
"Developer Comment"
]
)
except Exception as e:
print("[Error] unable to convert cvc data to data frame....", str(e))
return df_cvc | 59.222222 | 157 | 0.535752 |
c5db84742c4b627a7fdceb6c920589a6b7773d99 | 778 | py | Python | Week2/13. F1 driver Standing.py | HawkingLaugh/Data-Processing-Using-Python | 6c4d7e09317aee41684731d5611f2f0dab217b2b | [
"MIT"
]
| null | null | null | Week2/13. F1 driver Standing.py | HawkingLaugh/Data-Processing-Using-Python | 6c4d7e09317aee41684731d5611f2f0dab217b2b | [
"MIT"
]
| null | null | null | Week2/13. F1 driver Standing.py | HawkingLaugh/Data-Processing-Using-Python | 6c4d7e09317aee41684731d5611f2f0dab217b2b | [
"MIT"
]
| null | null | null | import re
import requests
from bs4 import BeautifulSoup
r = requests.get('https://www.formula1.com/en/results.html/2020/drivers.html')
soup = BeautifulSoup(r.text, 'lxml')
"""
2. Retrieve the data of the athletes from the website of https://www.volleyball.world/en/vnl/2019/women/resultsandranking/round1(include TEAMS and TOTAL, WON, LOST of MATCHES)
Note: Although we can get the every item just like 'USA' or '15' using the way we have learned,
but it will ccut off the link between the data. It is better to parse the data in groups.
Because the source codes including the athletes' data are located in some lines and there are many spaces
in the front of the lines, we can use the '\s+' representing the whitespaces(spaces and \n) to match the multiple lines.
""" | 51.866667 | 177 | 0.760925 |
c183107730ad2c96dd7988f0265f0f9091842ecb | 514 | gyp | Python | test/win/lib-flags/ltcg.gyp | chlorm-forks/gyp | a8921fcaab1a18c8cf7e4ab09ceb940e336918ec | [
"BSD-3-Clause"
]
| 2,151 | 2020-04-18T07:31:17.000Z | 2022-03-31T08:39:18.000Z | test/win/lib-flags/ltcg.gyp | chlorm-forks/gyp | a8921fcaab1a18c8cf7e4ab09ceb940e336918ec | [
"BSD-3-Clause"
]
| 1,432 | 2017-06-21T04:08:48.000Z | 2020-08-25T16:21:15.000Z | test/win/lib-flags/ltcg.gyp | chlorm-forks/gyp | a8921fcaab1a18c8cf7e4ab09ceb940e336918ec | [
"BSD-3-Clause"
]
| 338 | 2020-04-18T08:03:10.000Z | 2022-03-29T12:33:22.000Z | # Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
{
'targets': [
{
'target_name': 'lib_answer',
'type': 'static_library',
'msvs_settings': {
'VCCLCompilerTool': {
'WholeProgramOptimization': 'true', # /GL
},
'VCLibrarianTool': {
'LinkTimeCodeGeneration': 'true', # /LTCG
},
},
'sources': ['answer.cc'],
},
]
}
| 23.363636 | 72 | 0.548638 |
7bba663aa4996a77180fff91cac5dfef967eec1d | 1,649 | py | Python | segment.py | JanusTan/segment_compute | 2fe5659262183435ec338cf7c09597949de6f338 | [
"Apache-2.0"
]
| null | null | null | segment.py | JanusTan/segment_compute | 2fe5659262183435ec338cf7c09597949de6f338 | [
"Apache-2.0"
]
| null | null | null | segment.py | JanusTan/segment_compute | 2fe5659262183435ec338cf7c09597949de6f338 | [
"Apache-2.0"
]
| null | null | null | '''
programmer:Ryan Tan
requirement of homework:
1、segment
2、compute the width of the block
3、calculate the area of the block
'''
import numpy as np
import cv2
#function for verify if the points is in the left edge
def checkall(img2,x1,y1):
a=0
for p in img2[y1,x1+1:x1+122]:
if p == 255:
a=1
break
if p== 0:
a=3
if a==1:
return False
else :
return True
#function for verify if the points is in the right edge
def checkall2(img2,x3,y3):
a=0
for p in img2[y3,x3-122:x3-1]:
if p == 255:
a=1
break
if p== 0:
a=3
if a==1:
return False
else :
return True
img = cv2.imread('source.jpg')
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
canny=cv2.Canny(gray,10,300)
#selct the region of two egde
img2=canny[0:1200,540:860]
y=0
x=0
p=0
x2=161
#build a black mask to draw the segement edge
mask = np.zeros((1200,320),np.uint8)
#the list follow is to store the points we need
T=[]
J=[]
T1=[]
J1=[]
#the loop can compute the left edge points and right edge points
for q in range(0,1200):
for i in img2[y,0:160]:
if i==255 and checkall(img2,x,y):
mask[y,x]=255
T.append(y)
J.append(x)
break
x=x+1
for i in img2[y,161:320]:
if i==255 and checkall2(img2,x2,y):
mask[y,x2]=255
T1.append(y)
J1.append(x2)
break
x2=x2+1
x2=161
x=0
y=y+1
#using numpy discrete integral algorithm to compute area of the block
area=((np.trapz(J1,T1))-(np.trapz(J,T)))
print('the block area is:',area)
X=np.ravel(x2)
Y=np.ravel(x)
maxWidth=np.max(X-Y)
print('the max width:',maxWidth)
cv2.imshow('segment',canny)
cv2.imshow('segment2',mask)
cv2.waitKey(0)
| 16.326733 | 70 | 0.649485 |
cee97b7ad500a634abb0bd7bbcadd0f558d2b7d5 | 3,075 | py | Python | data/p2DJ/New/program/qiskit/noisy/startQiskit_noisy142.py | UCLA-SEAL/QDiff | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | [
"BSD-3-Clause"
]
| null | null | null | data/p2DJ/New/program/qiskit/noisy/startQiskit_noisy142.py | UCLA-SEAL/QDiff | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | [
"BSD-3-Clause"
]
| null | null | null | data/p2DJ/New/program/qiskit/noisy/startQiskit_noisy142.py | UCLA-SEAL/QDiff | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | [
"BSD-3-Clause"
]
| null | null | null | # qubit number=2
# total number=10
import cirq
import qiskit
from qiskit.providers.aer import QasmSimulator
from qiskit.test.mock import FakeVigo
from qiskit import IBMQ
from qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister
from qiskit import BasicAer, execute, transpile
from pprint import pprint
from qiskit.test.mock import FakeVigo
from math import log2,floor, sqrt, pi
import numpy as np
import networkx as nx
def build_oracle(n: int, f) -> QuantumCircuit:
# implement the oracle O_f^\pm
# NOTE: use U1 gate (P gate) with \lambda = 180 ==> CZ gate
# or multi_control_Z_gate (issue #127)
controls = QuantumRegister(n, "ofc")
target = QuantumRegister(1, "oft")
oracle = QuantumCircuit(controls, target, name="Of")
for i in range(2 ** n):
rep = np.binary_repr(i, n)
if f(rep) == "1":
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
oracle.mct(controls, target[0], None, mode='noancilla')
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.barrier()
# oracle.draw('mpl', filename='circuit/deutsch-oracle.png')
return oracle
def make_circuit(n:int,f) -> QuantumCircuit:
# circuit begin
input_qubit = QuantumRegister(n, "qc")
target = QuantumRegister(1, "qt")
prog = QuantumCircuit(input_qubit, target)
# inverse last one (can be omitted if using O_f^\pm)
prog.x(target)
# apply H to get superposition
for i in range(n):
prog.h(input_qubit[i])
prog.h(input_qubit[1]) # number=1
prog.cx(input_qubit[0],input_qubit[1]) # number=2
prog.h(input_qubit[1]) # number=7
prog.cz(input_qubit[0],input_qubit[1]) # number=8
prog.h(input_qubit[1]) # number=9
prog.h(target)
prog.barrier()
# apply oracle O_f
oracle = build_oracle(n, f)
prog.append(
oracle.to_gate(),
[input_qubit[i] for i in range(n)] + [target])
# apply H back (QFT on Z_2^n)
for i in range(n):
prog.h(input_qubit[i])
prog.barrier()
# measure
#for i in range(n):
# prog.measure(input_qubit[i], classicals[i])
prog.x(input_qubit[0]) # number=3
prog.y(input_qubit[1]) # number=6
prog.x(input_qubit[0]) # number=4
# circuit end
return prog
if __name__ == '__main__':
n = 2
f = lambda rep: rep[-1]
# f = lambda rep: "1" if rep[0:2] == "01" or rep[0:2] == "10" else "0"
# f = lambda rep: "0"
prog = make_circuit(n, f)
sample_shot =2800
backend = FakeVigo()
circuit1 = transpile(prog,FakeVigo())
circuit1.x(qubit=3)
circuit1.x(qubit=3)
circuit1.measure_all()
prog = circuit1
info = execute(prog, backend=backend, shots=sample_shot).result().get_counts()
writefile = open("../data/startQiskit_noisy142.csv","w")
print(info,file=writefile)
print("results end", file=writefile)
print(circuit1.depth(),file=writefile)
print(circuit1,file=writefile)
writefile.close()
| 27.954545 | 82 | 0.626667 |
e2568704269d9bd0aa4e53c7671085de29350477 | 1,054 | py | Python | app/geolocation/services.py | raszidzie/geolocation-api | c79ddda6770004624fcb5722e2c04dedfd79e449 | [
"MIT"
]
| 1 | 2021-05-22T18:13:29.000Z | 2021-05-22T18:13:29.000Z | app/geolocation/services.py | raszidzie/geolocation-api | c79ddda6770004624fcb5722e2c04dedfd79e449 | [
"MIT"
]
| null | null | null | app/geolocation/services.py | raszidzie/geolocation-api | c79ddda6770004624fcb5722e2c04dedfd79e449 | [
"MIT"
]
| null | null | null | import os
import requests
from .exceptions import ExternalApiException
def get_location(ip):
url = f'http://api.ipstack.com/{ip}'
params = {'access_key': os.environ.get('ACCESS_KEY')}
try:
res = requests.get(url, params=params)
data = res.json()
return {
'ip':data['ip'],
'country_name':data['country_name'],
'region_code':data['region_code'],
'city':data['city'],
'latitude':data['latitude'],
'longitude':data['longitude'],
'zip_code':data['zip']
}
except requests.exceptions.ConnectionError:
raise ExternalApiException('Connection error occured during the fetch process')
except requests.exceptions.Timeout:
raise ExternalApiException("Connection timeout. Please check your internet connection and try again later")
except requests.exceptions.TooManyRedirects:
raise ExternalApiException("Too many redirects")
except requests.exceptions.RequestException:
raise SystemExit(e) | 39.037037 | 115 | 0.650854 |
d3a684632cecf055075c89bab9fec5e30165edf9 | 4,076 | py | Python | rotation_scripts/decade_scripts/analysis_by_decade.py | greenelab/biovectors | cf298a1f581754851e69defedc4770069edd7750 | [
"BSD-2-Clause-Patent"
]
| 3 | 2021-04-13T15:30:27.000Z | 2022-03-09T01:46:27.000Z | rotation_scripts/decade_scripts/analysis_by_decade.py | greenelab/biovectors | cf298a1f581754851e69defedc4770069edd7750 | [
"BSD-2-Clause-Patent"
]
| 6 | 2020-10-30T11:35:18.000Z | 2021-11-12T18:17:25.000Z | rotation_scripts/decade_scripts/analysis_by_decade.py | greenelab/biovectors | cf298a1f581754851e69defedc4770069edd7750 | [
"BSD-2-Clause-Patent"
]
| 2 | 2020-11-18T15:06:13.000Z | 2020-11-19T07:31:50.000Z | """
This module evaluates each decades's word2vec performance using AUROC and precision-recall.
Very similar to `analysis` module.
"""
from sklearn.metrics import (
roc_curve,
auc,
precision_recall_curve,
average_precision_score,
)
import pandas as pd
import numpy as np
import os
import matplotlib.pyplot as plt
def plot_roc(labels, predictions, dummy_predictions, year):
"""
Plots ROC curve.
@param labels: numpy array of classes
@param predictions: numpy array of corresponding word2vec similarity scores
@param dummy_predictions: numpy array of corresponding dummy classifications
"""
fp, tp, _ = roc_curve(labels, predictions)
roc_auc = auc(fp, tp)
fp_d, tp_d, _ = roc_curve(labels, dummy_predictions)
roc_auc_d = auc(fp_d, tp_d)
plt.figure()
plt.plot(fp, tp, color="darkorange", lw=2, label="Word2vec, AUC = %0.2f" % roc_auc)
plt.plot(
fp_d,
tp_d,
color="navy",
lw=2,
label="Dummy Classifier, AUC = %0.2f" % roc_auc_d,
)
plt.plot([0, 1], [0, 1], color="black", lw=2, linestyle="--")
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel("False Positive Rate")
plt.ylabel("True Positive Rate")
plt.title(f"ROC Curve Analysis ({str(year)}-{str(year+9)})")
plt.legend(loc="lower right")
plt.savefig(f"figures/analysis/roc_curves_{str(year)}-{str(year+9)}.jpg")
def plot_precision_recall(labels, predictions, year):
"""
Plots precision-recall curve.
@param labels: numpy array of classes
@param predictions: numpy array of corresponding similarity scores
"""
precision, recall, threshold = precision_recall_curve(labels, predictions)
avg_precision = average_precision_score(labels, predictions)
plt.figure()
plt.plot(
recall, precision, label="Avg precision-recall score: %0.2f" % avg_precision
)
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel("Recall")
plt.ylabel("Precision")
plt.title(f"Word2vec: Precision-Recall curve ({str(year)}-{str(year+9)})")
plt.legend(loc="lower left")
plt.savefig(
f"figures/analysis/precision_recall_curve_{str(year)}-{str(year+9)}.jpg"
)
def sort_similarity_scores(scores_df, year):
"""
Sorts similarity scores in descending order to differentiate TPs and FPs.
@param scores_df: panda dataframe containing scores (see similarity_scores.tsv)
"""
sorted_df = (
scores_df.sort_values(by=["score"], ascending=False)
# .drop(scores_df.columns[:1], 0)
)
sorted_df.to_csv(
f"outputs/decades/sorted/sorted_similarity_scores_{str(year)}-{str(year+9)}.tsv",
sep="\t",
index=False,
)
if __name__ == "__main__":
base = os.path.abspath(os.getcwd())
# iterate through results from 1971-2020 by decade
years = [1971, 1981, 1991, 2001, 2011]
for filename in os.listdir(os.path.join(base, "outputs/decades/")):
for year in years:
if f"similarity_scores_{str(year)}-{str(year+9)}" in filename:
print(os.path.join(base, filename))
# word2vec
scores_df = pd.read_csv(
os.path.join(base, "outputs/decades/", filename), sep="\t"
)
labels = np.array(scores_df[["class"]].values.tolist())
predictions = np.array(scores_df[["score"]].values.tolist())
# dummy
dummy_df = pd.read_csv(
os.path.join(
base,
f"outputs/decades/dummy_scores_{str(year)}-{str(year+9)}.tsv",
),
sep="\t",
)
dummy_predictions = np.array(dummy_df[["dummy_score"]].values.tolist())
# analysis
plot_roc(labels, predictions, dummy_predictions, year)
plot_precision_recall(labels, predictions, year)
sort_similarity_scores(scores_df, year)
else:
continue
| 33.68595 | 91 | 0.610648 |
2036be9c14e5c125f29726e3c1bf1347ca7905f3 | 37,658 | py | Python | dcnn_train.py | Rohitmaan2012/Twitter_Sentiment_Analysis | 15a6497cdde0079a9bc6d852b42eb49f4960c3bf | [
"MIT"
]
| null | null | null | dcnn_train.py | Rohitmaan2012/Twitter_Sentiment_Analysis | 15a6497cdde0079a9bc6d852b42eb49f4960c3bf | [
"MIT"
]
| null | null | null | dcnn_train.py | Rohitmaan2012/Twitter_Sentiment_Analysis | 15a6497cdde0079a9bc6d852b42eb49f4960c3bf | [
"MIT"
]
| null | null | null | """
CNN for sentence modeling described in paper:
A Convolutional Neural Network for Modeling Sentence
"""
import sys, os, time
import pdb
import math, random
import numpy as np
import theano
import theano.tensor as T
from util import (load_data, dump_params)
from logreg import LogisticRegression
class WordEmbeddingLayer(object):
"""
Layer that takes input vectors, output the sentence matrix
"""
def __init__(self, rng,
input,
vocab_size,
embed_dm,
embeddings = None,
):
"""
input: theano.tensor.dmatrix, (number of instances, sentence word number)
vocab_size: integer, the size of vocabulary,
embed_dm: integer, the dimension of word vector representation
embeddings: theano.tensor.TensorType
pretrained embeddings
"""
if embeddings:
print "Use pretrained embeddings: ON"
assert embeddings.get_value().shape == (vocab_size, embed_dm), "%r != %r" %(
embeddings.get_value().shape,
(vocab_size, embed_dm)
)
self.embeddings = embeddings
else:
print "Use pretrained embeddings: OFF"
embedding_val = np.asarray(
rng.normal(0, 0.05, size = (vocab_size, embed_dm)),
dtype = theano.config.floatX
)
embedding_val[vocab_size-1,:] = 0 # the <PADDING> character is intialized to 0
self.embeddings = theano.shared(
np.asarray(embedding_val,
dtype = theano.config.floatX),
borrow = True,
name = 'embeddings'
)
self.params = [self.embeddings]
self.param_shapes = [(vocab_size, embed_dm)]
# Return:
# :type, theano.tensor.tensor4
# :param, dimension(1, 1, word embedding dimension, number of words in sentence)
# made to be 4D to fit into the dimension of convolution operation
sent_embedding_list, updates = theano.map(lambda sent: self.embeddings[sent],
input)
sent_embedding_tensor = T.stacklists(sent_embedding_list) # make it into a 3D tensor
self.output = sent_embedding_tensor.dimshuffle(0, 'x', 2, 1) # make it a 4D tensor
class ConvFoldingPoolLayer(object):
"""
Convolution, folding and k-max pooling layer
"""
def __init__(self,
rng,
input,
filter_shape,
k,
activation = "tanh",
norm_w = True,
fold = 0,
W = None,
b = None):
"""
rng: numpy random number generator
input: theano.tensor.tensor4
the sentence matrix, (number of instances, number of input feature maps, embedding dimension, number of words)
filter_shape: tuple of length 4,
dimension: (number of filters, num input feature maps, filter height, filter width)
k: int or theano.tensor.iscalar,
the k value in the max-pooling layer
activation: str
the activation unit type, `tanh` or `relu` or 'sigmoid'
norm_w: bool
whether use fan-in fan-out initialization or not. Default, True
If not True, use `normal(0, 0.05, size)`
fold: int, 0 or 1
fold or not
W: theano.tensor.tensor4,
the filter weight matrices,
dimension: (number of filters, num input feature maps, filter height, filter width)
b: theano.tensor.vector,
the filter bias,
dimension: (filter number, )
"""
self.input = input
self.k = k
self.filter_shape = filter_shape
self.fold_flag = fold
assert activation in ('tanh', 'relu', 'sigmoid')
self.activation = activation
if W is not None:
self.W = W
else:
if norm_w:
# use fan-in fan-out init
fan_in = np.prod(filter_shape[1:])
fan_out = (filter_shape[0] * np.prod(filter_shape[2:]) /
k) # it's
W_bound = np.sqrt(6. / (fan_in + fan_out))
W_val = np.asarray(
rng.uniform(low=-W_bound, high=W_bound, size=filter_shape),
dtype=theano.config.floatX
)
else:
# normal initialization
W_val = np.asarray(
rng.normal(0, 0.05, size = filter_shape),
dtype=theano.config.floatX
)
self.W = theano.shared(
value = np.asarray(W_val,
dtype = theano.config.floatX),
name = "W",
borrow=True
)
# make b
if b is not None:
b_val = b
b_size = b.shape
self.b = b
else:
b_size = (filter_shape[0], )
b_val = np.zeros(b_size)
self.b = theano.shared(
value = np.asarray(
b_val,
dtype = theano.config.floatX
),
name = "b",
borrow = True
)
self.params = [self.W, self.b]
self.param_shapes = [filter_shape,
b_size ]
def fold(self, x):
"""
:type x: theano.tensor.tensor4
"""
return (x[:, :, T.arange(0, x.shape[2], 2)] +
x[:, :, T.arange(1, x.shape[2], 2)]) / 2
def k_max_pool(self, x, k):
"""
perform k-max pool on the input along the rows
input: theano.tensor.tensor4
k: theano.tensor.iscalar
the k parameter
Returns:
4D tensor
"""
ind = T.argsort(x, axis = 3)
sorted_ind = T.sort(ind[:,:,:, -k:], axis = 3)
dim0, dim1, dim2, dim3 = sorted_ind.shape
indices_dim0 = T.arange(dim0).repeat(dim1 * dim2 * dim3)
indices_dim1 = T.arange(dim1).repeat(dim2 * dim3).reshape((dim1*dim2*dim3, 1)).repeat(dim0, axis=1).T.flatten()
indices_dim2 = T.arange(dim2).repeat(dim3).reshape((dim2*dim3, 1)).repeat(dim0 * dim1, axis = 1).T.flatten()
return x[indices_dim0, indices_dim1, indices_dim2, sorted_ind.flatten()].reshape(sorted_ind.shape)
@property
def output(self):
# non-linear transform of the convolution output
conv_out = T.nnet.conv.conv2d(self.input,
self.W,
border_mode = "full")
if self.fold_flag:
# fold
fold_out = self.fold(conv_out)
else:
fold_out = conv_out
# k-max pool
pool_out = (self.k_max_pool(fold_out, self.k) +
self.b.dimshuffle('x', 0, 'x', 'x'))
# around 0.
# why tanh becomes extreme?
if self.activation == "tanh":
# return theano.printing.Print("tanh(pool_out)")(T.tanh(pool_out))
return T.tanh(pool_out)
elif self.activation == "sigmoid":
return T.nnet.sigmoid(pool_out)
else:
return T.switch(pool_out > 0, pool_out, 0)
class DropoutLayer(object):
"""
As the name suggests
Refer to here: https://github.com/mdenil/dropout/blob/master/mlp.py
"""
def __init__(self, input, rng, dropout_rate):
srng = theano.tensor.shared_randomstreams.RandomStreams(
rng.randint(999999))
# p=1-p because 1's indicate keep and p is prob of dropping
mask = srng.binomial(n=1,
p=1-dropout_rate,
size=input.shape)
self.output = input * T.cast(mask, theano.config.floatX)
def train_and_test(args, print_config):
assert args.conv_layer_n == len(args.filter_widths) == len(args.nkerns) == (len(args.L2_regs) - 2) == len(args.fold_flags) == len(args.ks)
# \mod{dim, 2^{\sum fold_flags}} == 0
assert args.embed_dm % (2 ** sum(args.fold_flags)) == 0
###################
# get the data #
###################
datasets = load_data(args.corpus_path)
train_set_x, train_set_y = datasets[0]
dev_set_x, dev_set_y = datasets[1]
test_set_x, test_set_y = datasets[2]
word2index = datasets[3]
index2word = datasets[4]
pretrained_embeddings = datasets[5]
n_train_batches = train_set_x.get_value(borrow=True).shape[0] / args.batch_size
n_dev_batches = dev_set_x.get_value(borrow=True).shape[0] / args.dev_test_batch_size
n_test_batches = test_set_x.get_value(borrow=True).shape[0] / args.dev_test_batch_size
train_sent_len = train_set_x.get_value(borrow=True).shape[1]
possible_labels = set(train_set_y.get_value().tolist())
if args.use_pretrained_embedding:
args.embed_dm = pretrained_embeddings.get_value().shape[1]
###################################
# Symbolic variable definition #
###################################
x = T.imatrix('x') # the word indices matrix
y = T.ivector('y') # the sentiment labels
batch_index = T.iscalar('batch_index')
rng = np.random.RandomState(1234)
###############################
# Construction of the network #
###############################
# Layer 1, the embedding layer
layer1 = WordEmbeddingLayer(rng,
input = x,
vocab_size = len(word2index),
embed_dm = args.embed_dm,
embeddings = (
pretrained_embeddings
if args.use_pretrained_embedding else None
)
)
dropout_layers = [layer1]
layers = [layer1]
for i in xrange(args.conv_layer_n):
fold_flag = args.fold_flags[i]
# for the dropout layer
dpl = DropoutLayer(
input = dropout_layers[-1].output,
rng = rng,
dropout_rate = args.dropout_rates[0]
)
next_layer_dropout_input = dpl.output
next_layer_input = layers[-1].output
# for the conv layer
filter_shape = (
args.nkerns[i],
(1 if i == 0 else args.nkerns[i-1]),
1,
args.filter_widths[i]
)
k = args.ks[i]
print "For conv layer(%s) %d, filter shape = %r, k = %d, dropout_rate = %f and normalized weight init: %r and fold: %d" %(
args.conv_activation_unit,
i+2,
filter_shape,
k,
args.dropout_rates[i],
args.norm_w,
fold_flag
)
# we have two layers adding to two paths repsectively,
# one for training
# the other for prediction(averaged model)
dropout_conv_layer = ConvFoldingPoolLayer(rng,
input = next_layer_dropout_input,
filter_shape = filter_shape,
k = k,
norm_w = args.norm_w,
fold = fold_flag,
activation = args.conv_activation_unit)
# for prediction
# sharing weight with dropout layer
conv_layer = ConvFoldingPoolLayer(rng,
input = next_layer_input,
filter_shape = filter_shape,
k = k,
activation = args.conv_activation_unit,
fold = fold_flag,
W = dropout_conv_layer.W * (1 - args.dropout_rates[i]), # model averaging
b = dropout_conv_layer.b
)
dropout_layers.append(dropout_conv_layer)
layers.append(conv_layer)
# last, the output layer
# both dropout and without dropout
if sum(args.fold_flags) > 0:
n_in = args.nkerns[-1] * args.ks[-1] * args.embed_dm / (2**sum(args.fold_flags))
else:
n_in = args.nkerns[-1] * args.ks[-1] * args.embed_dm
print "For output layer, n_in = %d, dropout_rate = %f" %(n_in, args.dropout_rates[-1])
dropout_output_layer = LogisticRegression(
rng,
input = dropout_layers[-1].output.flatten(2),
n_in = n_in, # divided by 2x(how many times are folded)
n_out = len(possible_labels) # five sentiment level
)
output_layer = LogisticRegression(
rng,
input = layers[-1].output.flatten(2),
n_in = n_in,
n_out = len(possible_labels),
W = dropout_output_layer.W * (1 - args.dropout_rates[-1]), # sharing the parameters, don't forget
b = dropout_output_layer.b
)
dropout_layers.append(dropout_output_layer)
layers.append(output_layer)
###############################
# Error and cost #
###############################
# cost and error come from different model!
dropout_cost = dropout_output_layer.nnl(y)
errors = output_layer.errors(y)
def prepare_L2_sqr(param_layers, L2_regs):
assert len(L2_regs) == len(param_layers)
return T.sum([
L2_reg / 2 * ((layer.W if hasattr(layer, "W") else layer.embeddings) ** 2).sum()
for L2_reg, layer in zip(L2_regs, param_layers)
])
L2_sqr = prepare_L2_sqr(dropout_layers, args.L2_regs)
L2_sqr_no_ebd = prepare_L2_sqr(dropout_layers[1:], args.L2_regs[1:])
if args.use_L2_reg:
cost = dropout_cost + L2_sqr
cost_no_ebd = dropout_cost + L2_sqr_no_ebd
else:
cost = dropout_cost
cost_no_ebd = dropout_cost
###############################
# Parameters to be used #
###############################
print "Delay embedding learning by %d epochs" %(args.embedding_learning_delay_epochs)
print "param_layers: %r" %dropout_layers
param_layers = dropout_layers
##############################
# Parameter Update #
##############################
print "Using AdaDelta with rho = %f and epsilon = %f" %(args.rho, args.epsilon)
params = [param for layer in param_layers for param in layer.params]
param_shapes= [param for layer in param_layers for param in layer.param_shapes]
param_grads = [T.grad(cost, param) for param in params]
# AdaDelta parameter update
# E[g^2]
# initialized to zero
egs = [
theano.shared(
value = np.zeros(param_shape,
dtype = theano.config.floatX
),
borrow = True,
name = "Eg:" + param.name
)
for param_shape, param in zip(param_shapes, params)
]
# E[\delta x^2], initialized to zero
exs = [
theano.shared(
value = np.zeros(param_shape,
dtype = theano.config.floatX
),
borrow = True,
name = "Ex:" + param.name
)
for param_shape, param in zip(param_shapes, params)
]
new_egs = [
args.rho * eg + (1 - args.rho) * g ** 2
for eg, g in zip(egs, param_grads)
]
delta_x = [
-(T.sqrt(ex + args.epsilon) / T.sqrt(new_eg + args.epsilon)) * g
for new_eg, ex, g in zip(new_egs, exs, param_grads)
]
new_exs = [
args.rho * ex + (1 - args.rho) * (dx ** 2)
for ex, dx in zip(exs, delta_x)
]
egs_updates = zip(egs, new_egs)
exs_updates = zip(exs, new_exs)
param_updates = [
(p, p + dx)
for dx, g, p in zip(delta_x, param_grads, params)
]
updates = egs_updates + exs_updates + param_updates
# updates WITHOUT embedding
# exclude the embedding parameter
egs_updates_no_ebd = zip(egs[1:], new_egs[1:])
exs_updates_no_ebd = zip(exs[1:], new_exs[1:])
param_updates_no_ebd = [
(p, p + dx)
for dx, g, p in zip(delta_x, param_grads, params)[1:]
]
updates_no_emb = egs_updates_no_ebd + exs_updates_no_ebd + param_updates_no_ebd
def make_train_func(cost, updates):
return theano.function(inputs = [batch_index],
outputs = [cost],
updates = updates,
givens = {
x: train_set_x[batch_index * args.batch_size: (batch_index + 1) * args.batch_size],
y: train_set_y[batch_index * args.batch_size: (batch_index + 1) * args.batch_size]
}
)
train_model_no_ebd = make_train_func(cost_no_ebd, updates_no_emb)
train_model = make_train_func(cost, updates)
def make_error_func(x_val, y_val):
return theano.function(inputs = [],
outputs = errors,
givens = {
x: x_val,
y: y_val
},
)
dev_error = make_error_func(dev_set_x, dev_set_y)
test_error = make_error_func(test_set_x, test_set_y)
#############################
# Debugging purpose code #
#############################
# : PARAMETER TUNING NOTE:
# some demonstration of the gradient vanishing probelm
train_data_at_index = {
x: train_set_x[batch_index * args.batch_size: (batch_index + 1) * args.batch_size],
}
train_data_at_index_with_y = {
x: train_set_x[batch_index * args.batch_size: (batch_index + 1) * args.batch_size],
y: train_set_y[batch_index * args.batch_size: (batch_index + 1) * args.batch_size]
}
if print_config["nnl"]:
get_nnl = theano.function(
inputs = [batch_index],
outputs = dropout_cost,
givens = {
x: train_set_x[batch_index * args.batch_size: (batch_index + 1) * args.batch_size],
y: train_set_y[batch_index * args.batch_size: (batch_index + 1) * args.batch_size]
}
)
if print_config["L2_sqr"]:
get_L2_sqr = theano.function(
inputs = [],
outputs = L2_sqr
)
get_L2_sqr_no_ebd = theano.function(
inputs = [],
outputs = L2_sqr_no_ebd
)
if print_config["grad_abs_mean"]:
print_grads = theano.function(
inputs = [],
outputs = [theano.printing.Print(param.name)(
T.mean(T.abs_(param_grad))
)
for param, param_grad in zip(params, param_grads)
],
givens = {
x: train_set_x,
y: train_set_y
}
)
activations = [
l.output
for l in dropout_layers[1:-1]
]
weight_grads = [
T.grad(cost, l.W)
for l in dropout_layers[1:-1]
]
if print_config["activation_hist"]:
# turn into 1D array
get_activations = theano.function(
inputs = [batch_index],
outputs = [
val.flatten(1)
for val in activations
],
givens = train_data_at_index
)
if print_config["weight_grad_hist"]:
# turn into 1D array
get_weight_grads = theano.function(
inputs = [batch_index],
outputs = [
val.flatten(1)
for val in weight_grads
],
givens = train_data_at_index_with_y
)
if print_config["activation_tracking"]:
# get the mean and variance of activations for each conv layer
get_activation_mean = theano.function(
inputs = [batch_index],
outputs = [
T.mean(val)
for val in activations
],
givens = train_data_at_index
)
get_activation_std = theano.function(
inputs = [batch_index],
outputs = [
T.std(val)
for val in activations
],
givens = train_data_at_index
)
if print_config["weight_grad_tracking"]:
# get the mean and variance of activations for each conv layer
get_weight_grad_mean = theano.function(
inputs = [batch_index],
outputs = [
T.mean(g)
for g in weight_grads
],
givens = train_data_at_index_with_y
)
get_weight_grad_std = theano.function(
inputs = [batch_index],
outputs = [
T.std(g)
for g in weight_grads
],
givens = train_data_at_index_with_y
)
#the training loop
patience = args.patience # look as this many examples regardless
patience_increase = 2 # wait this much longer when a new best is
# found
improvement_threshold = 0.995 # a relative improvement of this much is
# considered significant
validation_frequency = min(n_train_batches, patience / 2)
best_validation_loss = np.inf
best_iter = 0
start_time = time.clock()
done_looping = False
epoch = 0
nnls = []
L2_sqrs = []
activation_means = [[] for i in xrange(args.conv_layer_n)]
activation_stds = [[] for i in xrange(args.conv_layer_n)]
weight_grad_means = [[] for i in xrange(args.conv_layer_n)]
weight_grad_stds = [[] for i in xrange(args.conv_layer_n)]
activation_hist_data = [[] for i in xrange(args.conv_layer_n)]
weight_grad_hist_data = [[] for i in xrange(args.conv_layer_n)]
train_errors = []
dev_errors = []
try:
print "validation_frequency = %d" %validation_frequency
while (epoch < args.n_epochs):
epoch += 1
print "At epoch {0}".format(epoch)
if epoch == (args.embedding_learning_delay_epochs + 1):
print "########################"
print "Start training embedding"
print "########################"
# shuffle the training data
train_set_x_data = train_set_x.get_value(borrow = True)
train_set_y_data = train_set_y.get_value(borrow = True)
permutation = np.random.permutation(train_set_x.get_value(borrow=True).shape[0])
train_set_x.set_value(train_set_x_data[permutation])
train_set_y.set_value(train_set_y_data[permutation])
for minibatch_index in xrange(n_train_batches):
if epoch >= (args.embedding_learning_delay_epochs + 1):
train_cost = train_model(minibatch_index)
else:
train_cost = train_model_no_ebd(minibatch_index)
iter = (epoch - 1) * n_train_batches + minibatch_index
if (iter + 1) % validation_frequency == 0:
# train_error_val = np.mean([train_error(i)
# for i in xrange(n_train_batches)])
dev_error_val = dev_error()
# print "At epoch %d and minibatch %d. \nTrain error %.2f%%\nDev error %.2f%%\n" %(
# epoch,
# minibatch_index,
# train_error_val * 100,
# dev_error_val * 100
# )
print "At epoch %d and minibatch %d. \nDev error %.2f%%\n" %(
epoch,
minibatch_index,
dev_error_val * 100
)
# train_errors.append(train_error_val)
dev_errors.append(dev_error_val)
if dev_error_val < best_validation_loss:
best_iter = iter
#improve patience if loss improvement is good enough
if dev_error_val < best_validation_loss * \
improvement_threshold:
patience = max(patience, iter * patience_increase)
best_validation_loss = dev_error_val
test_error_val = test_error()
print(
(
' epoch %i, minibatch %i/%i, test error of'
' best dev error %f %%'
) %
(
epoch,
minibatch_index + 1,
n_train_batches,
test_error_val * 100.
)
)
print "Dumping model to %s" %(args.model_path)
dump_params(params, args.model_path)
if (minibatch_index+1) % 50 == 0 or minibatch_index == n_train_batches - 1:
print "%d / %d minibatches completed" %(minibatch_index + 1, n_train_batches)
if print_config["nnl"]:
print "`nnl` for the past 50 minibatches is %f" %(np.mean(np.array(nnls)))
nnls = []
if print_config["L2_sqr"]:
print "`L2_sqr`` for the past 50 minibatches is %f" %(np.mean(np.array(L2_sqrs)))
L2_sqrs = []
##################
# Plotting stuff #
##################
if print_config["nnl"]:
nnl = get_nnl(minibatch_index)
# print "nll for batch %d: %f" %(minibatch_index, nnl)
nnls.append(nnl)
if print_config["L2_sqr"]:
if epoch >= (args.embedding_learning_delay_epochs + 1):
L2_sqrs.append(get_L2_sqr())
else:
L2_sqrs.append(get_L2_sqr_no_ebd())
if print_config["activation_tracking"]:
layer_means = get_activation_mean(minibatch_index)
layer_stds = get_activation_std(minibatch_index)
for layer_ms, layer_ss, layer_m, layer_s in zip(activation_means, activation_stds, layer_means, layer_stds):
layer_ms.append(layer_m)
layer_ss.append(layer_s)
if print_config["weight_grad_tracking"]:
layer_means = get_weight_grad_mean(minibatch_index)
layer_stds = get_weight_grad_std(minibatch_index)
for layer_ms, layer_ss, layer_m, layer_s in zip(weight_grad_means, weight_grad_stds, layer_means, layer_stds):
layer_ms.append(layer_m)
layer_ss.append(layer_s)
if print_config["activation_hist"]:
for layer_hist, layer_data in zip(activation_hist_data , get_activations(minibatch_index)):
layer_hist += layer_data.tolist()
if print_config["weight_grad_hist"]:
for layer_hist, layer_data in zip(weight_grad_hist_data , get_weight_grads(minibatch_index)):
layer_hist += layer_data.tolist()
except:
import traceback
traceback.print_exc(file = sys.stdout)
finally:
from plot_util import (plot_hist,
plot_track,
plot_error_vs_epoch,
plt)
if print_config["activation_tracking"]:
plot_track(activation_means,
activation_stds,
"activation_tracking")
if print_config["weight_grad_tracking"]:
plot_track(weight_grad_means,
weight_grad_stds,
"weight_grad_tracking")
if print_config["activation_hist"]:
plot_hist(activation_hist_data, "activation_hist")
if print_config["weight_grad_hist"]:
plot_hist(weight_grad_hist_data, "weight_grad_hist")
if print_config["error_vs_epoch"]:
train_errors = [0] * len(dev_errors)
ax = plot_error_vs_epoch(train_errors, dev_errors,
title = ('Best dev score: %f %% '
' at iter %i with test error %f %%') %(
best_validation_loss * 100., best_iter + 1, test_error_val * 100.
)
)
if not args.task_signature:
plt.show()
else:
plt.savefig("plots/" + args.task_signature + ".png")
end_time = time.clock()
print(('Optimization complete. Best validation score of %f %% '
'obtained at iteration %i, with test performance %f %%') %
(best_validation_loss * 100., best_iter + 1, test_error_val * 100.))
# save the result
with open(args.output, "a") as f:
f.write("%s\t%f\t%f\n" %(args.task_signature, best_validation_loss, test_error_val))
print >> sys.stderr, ('The code for file ' +
os.path.split(__file__)[1] +
' ran for %.2fm' % ((end_time - start_time) / 60.))
if __name__ == "__main__":
print_config = {
"adadelta_lr_mean": 0,
"adagrad_lr_mean": 0,
"embeddings": 0,
"logreg_W": 0,
"logreg_b": 0,
"conv_layer1_W": 0,
"conv_layer2_W": 0,
"activation_tracking": 0, # the activation value, mean and variance
"weight_grad_tracking": 0, # the weight gradient tracking
"backprop_grad_tracking": 0, # the backpropagated gradient, mean and variance. In this case, grad propagated from layer 2 to layer 1
"activation_hist": 0, # the activation value, mean and variance
"weight_grad_hist": 0, # the weight gradient tracking
"backprop_grad_hist": 0,
"error_vs_epoch": 1,
"l1_output": 0,
"dropout_l1_output": 0,
"l2_output": 0,
"dropout_l2_output": 0,
"l3_output": 0,
"p_y_given_x": 0,
"grad_abs_mean": 0,
"nnl": 1,
"L2_sqr": 1,
"param_weight_mean": 0,
}
import argparse, sys
parser = argparse.ArgumentParser(description = "CNN with k-max pooling for sentence classification")
parser.add_argument('--corpus_path', type=str,
required = True,
help = 'Path of preprocessed corpus'
)
parser.add_argument('--model_path', type=str,
required = True,
help = 'Path of model parameters'
)
parser.add_argument("--fold", type=int, default = [1,1], nargs="+",
dest = "fold_flags",
help = "Flags that turn on/off folding"
)
parser.add_argument("--ext_ebd", action = "store_true",
dest = "use_pretrained_embedding",
help = "Use external/pretrained word embedding or not. For unkown reasons, type checking does not work for this argument"
)
parser.add_argument("--l2", action = "store_true",
dest = "use_L2_reg",
help = "Use L2 regularization or not"
)
parser.add_argument("--lr", type=float, default = 0.001,
dest = "learning_rate",
help = "Learning rate if constant learning rate is applied"
)
parser.add_argument("--norm_w", action = "store_true",
help = "Normalized initial weight as descripted in Glorot's paper"
)
parser.add_argument("--ebd_delay_epoch", type=int, default = 4,
dest = "embedding_learning_delay_epochs",
help = "Embedding learning delay epochs"
)
parser.add_argument("--au", type=str, default = "tanh",
dest = "conv_activation_unit",
help = "Activation unit type for the convolution layer"
)
parser.add_argument("--eps", type=float, default =0.000001,
dest = "epsilon",
help = "Epsilon used by AdaDelta"
)
parser.add_argument("--rho", type=float, default = 0.95,
help = "Rho used by AdaDelta"
)
parser.add_argument("--ebd_dm", type=int, default = 48,
dest = "embed_dm",
help = "Dimension for word embedding"
)
parser.add_argument("--batch_size", type=int, default = 10,
dest = "batch_size",
help = "Batch size in the stochastic gradient descent"
)
parser.add_argument("--dev_test_batch_size", type=int, default = 1000,
help = "Batch size for dev/test data"
)
parser.add_argument("--n_epochs", type=int, default =20,
help = "Maximum number of epochs to perform during training"
)
parser.add_argument("--dr", type=float, default = [0.2, 0.5, 0.5], nargs="+",
dest = "dropout_rates",
help = "Dropout rates at all layers except output layer"
)
parser.add_argument("--l2_regs", type = float, default = [0.00001, 0.0003, 0.0003, 0.0001], nargs="+",
dest = "L2_regs",
help = "L2 regularization parameters at each layer. left/low->right/high"
)
parser.add_argument("--ks", type = int, default = [15, 6], nargs="+",
help = "The k values of the k-max pooling operation"
)
parser.add_argument("--conv_layer_n", type=int, default = 2,
help = "Number of convolution layers"
)
parser.add_argument("--nkerns", type=int, default = [6,12], nargs="+",
help = "Number of feature maps at each conv layer"
)
parser.add_argument("--filter_widths", type=int, default = [10,7], nargs="+",
help = "Filter width for each conv layer"
)
parser.add_argument("--task_signature", type=str,
help = "The prefix of the saved images."
)
parser.add_argument("--output", type=str,
required = True,
help = "The output file path to save the result"
)
parser.add_argument("--patience", type=int,
default = 5000,
help = "Patience parameter used for early stopping"
)
args = parser.parse_args(sys.argv[1:])
print "Configs:\n-------------\n"
for attr, value in vars(args).items():
print "%s: %r" %(
attr.ljust(25),
value
)
train_and_test(
args,
print_config
)
| 37.4334 | 146 | 0.48821 |
78e548379d89f168a5ba1a848314f6f14aac9264 | 15,526 | py | Python | project/03-asvspoof-mega/spec2-lcnn-attention-oc/06/model.py | Nijta/project-NN-Pytorch-scripts | 06a50ab072613fb60b8b8e1cea85c4aa8e75549d | [
"BSD-3-Clause"
]
| null | null | null | project/03-asvspoof-mega/spec2-lcnn-attention-oc/06/model.py | Nijta/project-NN-Pytorch-scripts | 06a50ab072613fb60b8b8e1cea85c4aa8e75549d | [
"BSD-3-Clause"
]
| null | null | null | project/03-asvspoof-mega/spec2-lcnn-attention-oc/06/model.py | Nijta/project-NN-Pytorch-scripts | 06a50ab072613fb60b8b8e1cea85c4aa8e75549d | [
"BSD-3-Clause"
]
| null | null | null | #!/usr/bin/env python
"""
model.py
Self defined model definition.
Usage:
"""
from __future__ import absolute_import
from __future__ import print_function
import sys
import numpy as np
import torch
import torch.nn as torch_nn
import torch.nn.functional as torch_nn_func
import sandbox.block_nn as nii_nn
import sandbox.util_frontend as nii_front_end
import core_scripts.other_tools.debug as nii_debug
import core_scripts.data_io.seq_info as nii_seq_tk
import core_modules.oc_softmax as nii_ocsoftmax
import config as prj_conf
__author__ = "Xin Wang"
__email__ = "[email protected]"
__copyright__ = "Copyright 2020, Xin Wang"
##############
## util
##############
def protocol_parse(protocol_filepath):
""" Parse protocol of ASVspoof2019 and get bonafide/spoof for each trial
input:
-----
protocol_filepath: string, path to the protocol file
for convenience, I put train/dev/eval trials into a single protocol file
output:
-------
data_buffer: dic, data_bufer[filename] -> 1 (bonafide), 0 (spoof)
"""
data_buffer = {}
temp_buffer = np.loadtxt(protocol_filepath, dtype='str')
for row in temp_buffer:
if row[-1] == 'bonafide':
data_buffer[row[1]] = 1
else:
data_buffer[row[1]] = 0
return data_buffer
##############
## FOR MODEL
##############
class TrainableLinearFb(nii_nn.LinearInitialized):
"""Linear layer initialized with linear filter bank
"""
def __init__(self, fn, sr, filter_num):
super(TrainableLinearFb, self).__init__(
nii_front_end.linear_fb(fn, sr, filter_num))
return
def forward(self, x):
return torch.log10(
torch.pow(super(TrainableLinearFb, self).forward(x), 2) +
torch.finfo(torch.float32).eps)
class Model(torch_nn.Module):
""" Model definition
"""
def __init__(self, in_dim, out_dim, args, mean_std=None):
super(Model, self).__init__()
##### required part, no need to change #####
# mean std of input and output
in_m, in_s, out_m, out_s = self.prepare_mean_std(in_dim,out_dim,\
args, mean_std)
self.input_mean = torch_nn.Parameter(in_m, requires_grad=False)
self.input_std = torch_nn.Parameter(in_s, requires_grad=False)
self.output_mean = torch_nn.Parameter(out_m, requires_grad=False)
self.output_std = torch_nn.Parameter(out_s, requires_grad=False)
# a flag for debugging (by default False)
# self.model_debug = False
# self.flag_validation = False
#####
####
# on input waveform and output target
####
# Load protocol and prepare the target data for network training
protocol_file = prj_conf.optional_argument[0]
self.protocol_parser = protocol_parse(protocol_file)
# Working sampling rate
# torchaudio may be used to change sampling rate
self.m_target_sr = 16000
####
# optional configs (not used)
####
# re-sampling (optional)
#self.m_resampler = torchaudio.transforms.Resample(
# prj_conf.wav_samp_rate, self.m_target_sr)
# vad (optional)
#self.m_vad = torchaudio.transforms.Vad(sample_rate = self.m_target_sr)
# flag for balanced class (temporary use)
#self.v_flag = 1
####
# front-end configuration
# multiple front-end configurations may be used
# by default, use a single front-end
####
# frame shift (number of waveform points)
self.frame_hops = [160]
# frame length
self.frame_lens = [320]
# FFT length
self.fft_n = [512]
# spectrogram dim (base component)
self.spec_with_delta = False
self.spec_fb_dim = 60
# window type
self.win = torch.hann_window
# floor in log-spectrum-amplitude calculating (not used)
self.amp_floor = 0.00001
# number of frames to be kept for each trial
# no truncation
self.v_truncate_lens = [None for x in self.frame_hops]
# number of sub-models (by default, a single model)
self.v_submodels = len(self.frame_lens)
# dimension of embedding vectors
self.v_emd_dim = 64
# output classes
self.v_out_class = 1
####
# create network
####
# 1st part of the classifier
self.m_transform = []
# pooling layer
self.m_pooling = []
# 2nd part of the classifier
self.m_output_act = []
# front-end
self.m_frontend = []
# final part for output layer
self.m_angle = []
# it can handle models with multiple front-end configuration
# by default, only a single front-end
for idx, (trunc_len, fft_n) in enumerate(zip(
self.v_truncate_lens, self.fft_n)):
fft_n_bins = fft_n // 2 + 1
self.m_transform.append(
torch_nn.Sequential(
TrainableLinearFb(fft_n,self.m_target_sr,self.spec_fb_dim),
torch_nn.Conv2d(1, 64, [5, 5], 1, padding=[2, 2]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 96, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.BatchNorm2d(48, affine=False),
torch_nn.Conv2d(48, 96, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(48, affine=False),
torch_nn.Conv2d(48, 128, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Conv2d(64, 128, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(64, affine=False),
torch_nn.Conv2d(64, 64, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 64, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch_nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Dropout(0.7)
)
)
self.m_pooling.append(
nii_nn.SelfWeightedPooling((self.spec_fb_dim // 16) * 32)
)
self.m_output_act.append(
torch_nn.Linear((self.spec_fb_dim//16) * 32 * 2, self.v_emd_dim)
)
self.m_angle.append(
nii_ocsoftmax.OCAngleLayer(self.v_emd_dim)
)
self.m_frontend.append(
nii_front_end.Spectrogram(self.frame_lens[idx],
self.frame_hops[idx],
self.fft_n[idx],
self.m_target_sr)
)
self.m_frontend = torch_nn.ModuleList(self.m_frontend)
self.m_transform = torch_nn.ModuleList(self.m_transform)
self.m_output_act = torch_nn.ModuleList(self.m_output_act)
self.m_pooling = torch_nn.ModuleList(self.m_pooling)
self.m_angle = torch_nn.ModuleList(self.m_angle)
# done
return
def prepare_mean_std(self, in_dim, out_dim, args, data_mean_std=None):
""" prepare mean and std for data processing
This is required for the Pytorch project, but not relevant to this code
"""
if data_mean_std is not None:
in_m = torch.from_numpy(data_mean_std[0])
in_s = torch.from_numpy(data_mean_std[1])
out_m = torch.from_numpy(data_mean_std[2])
out_s = torch.from_numpy(data_mean_std[3])
if in_m.shape[0] != in_dim or in_s.shape[0] != in_dim:
print("Input dim: {:d}".format(in_dim))
print("Mean dim: {:d}".format(in_m.shape[0]))
print("Std dim: {:d}".format(in_s.shape[0]))
print("Input dimension incompatible")
sys.exit(1)
if out_m.shape[0] != out_dim or out_s.shape[0] != out_dim:
print("Output dim: {:d}".format(out_dim))
print("Mean dim: {:d}".format(out_m.shape[0]))
print("Std dim: {:d}".format(out_s.shape[0]))
print("Output dimension incompatible")
sys.exit(1)
else:
in_m = torch.zeros([in_dim])
in_s = torch.ones([in_dim])
out_m = torch.zeros([out_dim])
out_s = torch.ones([out_dim])
return in_m, in_s, out_m, out_s
def normalize_input(self, x):
""" normalizing the input data
This is required for the Pytorch project, but not relevant to this code
"""
return (x - self.input_mean) / self.input_std
def normalize_target(self, y):
""" normalizing the target data
This is required for the Pytorch project, but not relevant to this code
"""
return (y - self.output_mean) / self.output_std
def denormalize_output(self, y):
""" denormalizing the generated output from network
This is required for the Pytorch project, but not relevant to this code
"""
return y * self.output_std + self.output_mean
def _front_end(self, wav, idx, trunc_len, datalength):
""" simple fixed front-end to extract features
input:
------
wav: waveform
idx: idx of the trial in mini-batch
trunc_len: number of frames to be kept after truncation
datalength: list of data length in mini-batch
output:
-------
x_sp_amp: front-end featues, (batch, frame_num, frame_feat_dim)
"""
with torch.no_grad():
x_sp_amp = self.m_frontend[idx](wav.squeeze(-1))
# return
return x_sp_amp
def _compute_embedding(self, x, datalength):
""" definition of forward method
Assume x (batchsize, length, dim)
Output x (batchsize * number_filter, output_dim)
"""
# resample if necessary
#x = self.m_resampler(x.squeeze(-1)).unsqueeze(-1)
# batch_size
batch_size = x.shape[0] // self.v_submodels
# buffer to store output scores from sub-models
output_emb = torch.zeros([x.shape[0], self.v_emd_dim],
device=x.device, dtype=x.dtype)
# compute scores for each sub-models
for idx, (fs, fl, fn, trunc_len, m_trans, m_pool, m_output) in \
enumerate(
zip(self.frame_hops, self.frame_lens, self.fft_n,
self.v_truncate_lens, self.m_transform,
self.m_pooling, self.m_output_act)):
# extract front-end feature
x_sp_amp = self._front_end(x, idx, trunc_len, datalength)
# compute scores
# 1. unsqueeze to (batch, 1, frame_length, fft_bin)
# 2. compute hidden features
hidden_features = m_trans(x_sp_amp.unsqueeze(1))
# 3. (batch, channel, frame//N, feat_dim//N) ->
# (batch, frame//N, channel * feat_dim//N)
# where N is caused by conv with stride
hidden_features = hidden_features.permute(0, 2, 1, 3).contiguous()
frame_num = hidden_features.shape[1]
hidden_features = hidden_features.view(batch_size, frame_num, -1)
# 4. pooling
hidden_features = m_pool(hidden_features)
# 5. pass through the output layer
tmp_emb = m_output(hidden_features)
output_emb[idx * batch_size : (idx+1) * batch_size] = tmp_emb
return output_emb
def _compute_score(self, x, inference=False):
"""
"""
# number of sub models * batch_size
batch_size = x.shape[0] // self.v_submodels
# buffer to save the scores
# for non-target classes
out_score_neg = torch.zeros(
[x.shape[0], self.v_out_class], device=x.device, dtype=x.dtype)
# for target classes
out_score_pos = torch.zeros_like(out_score_neg)
# compute scores for each sub-models
for idx, m_score in enumerate(self.m_angle):
s_idx = idx * batch_size
e_idx = idx * batch_size + batch_size
tmp_score = m_score(x[s_idx:e_idx], inference)
out_score_neg[s_idx:e_idx] = tmp_score[0]
out_score_pos[s_idx:e_idx] = tmp_score[1]
if inference:
return out_score_neg
else:
return out_score_neg, out_score_pos
def _get_target(self, filenames):
try:
return [self.protocol_parser[x] for x in filenames]
except KeyError:
print("Cannot find target data for %s" % (str(filenames)))
sys.exit(1)
def forward(self, x, fileinfo):
filenames = [nii_seq_tk.parse_filename(y) for y in fileinfo]
datalength = [nii_seq_tk.parse_length(y) for y in fileinfo]
if self.training:
feature_vec = self._compute_embedding(x, datalength)
scores = self._compute_score(feature_vec)
# target
target = self._get_target(filenames)
target_vec = torch.tensor(target, device=x.device, dtype=torch.long)
target_vec = target_vec.repeat(self.v_submodels)
return [scores, target_vec]
else:
feature_vec = self._compute_embedding(x, datalength)
scores = self._compute_score(feature_vec, True)
target = self._get_target(filenames)
print("Output, %s, %d, %f" % (filenames[0],
target[0], scores.mean()))
# don't write output score as a single file
return None
class Loss():
""" Wrapper to define loss function
"""
def __init__(self, args):
"""
"""
self.m_loss = nii_ocsoftmax.OCSoftmaxWithLoss()
def compute(self, input_data, target):
"""loss = compute(input_data, target_data)
Note:
1. input_data will be the output from Model.forward()
input_data will be a tuple of [scores, target_vec]
2. we will not use target given by the system script
we will use the target_vec in input_data[1]
"""
loss = self.m_loss(input_data[0], input_data[1])
return loss
if __name__ == "__main__":
print("Definition of model")
| 34.502222 | 80 | 0.560415 |
806cc5d6465442ee3e8f7edfc8f6b1b903d79062 | 3,906 | py | Python | src/webargs/fields.py | nhoening/webargs | bb77162e317dc8e853f1f6c44b9a0dccbbac2cd4 | [
"MIT"
]
| null | null | null | src/webargs/fields.py | nhoening/webargs | bb77162e317dc8e853f1f6c44b9a0dccbbac2cd4 | [
"MIT"
]
| null | null | null | src/webargs/fields.py | nhoening/webargs | bb77162e317dc8e853f1f6c44b9a0dccbbac2cd4 | [
"MIT"
]
| null | null | null | """Field classes.
Includes all fields from `marshmallow.fields` in addition to a custom
`Nested` field and `DelimitedList`.
All fields can optionally take a special `location` keyword argument, which
tells webargs where to parse the request argument from.
.. code-block:: python
args = {
"active": fields.Bool(location="query"),
"content_type": fields.Str(data_key="Content-Type", location="headers"),
}
"""
import typing
import marshmallow as ma
# Expose all fields from marshmallow.fields.
from marshmallow.fields import * # noqa: F40
__all__ = ["DelimitedList"] + ma.fields.__all__
class Nested(ma.fields.Nested): # type: ignore[no-redef]
"""Same as `marshmallow.fields.Nested`, except can be passed a dictionary as
the first argument, which will be converted to a `marshmallow.Schema`.
.. note::
The schema class here will always be `marshmallow.Schema`, regardless
of whether a custom schema class is set on the parser. Pass an explicit schema
class if necessary.
"""
def __init__(self, nested, *args, **kwargs):
if isinstance(nested, dict):
nested = ma.Schema.from_dict(nested)
super().__init__(nested, *args, **kwargs)
class DelimitedFieldMixin:
"""
This is a mixin class for subclasses of ma.fields.List and ma.fields.Tuple
which split on a pre-specified delimiter. By default, the delimiter will be ","
Because we want the MRO to reach this class before the List or Tuple class,
it must be listed first in the superclasses
For example, a DelimitedList-like type can be defined like so:
>>> class MyDelimitedList(DelimitedFieldMixin, ma.fields.List):
>>> pass
"""
delimiter: str = ","
def _serialize(self, value, attr, obj, **kwargs):
# serializing will start with parent-class serialization, so that we correctly
# output lists of non-primitive types, e.g. DelimitedList(DateTime)
return self.delimiter.join(
format(each) for each in super()._serialize(value, attr, obj, **kwargs)
)
def _deserialize(self, value, attr, data, **kwargs):
# attempting to deserialize from a non-string source is an error
if not isinstance(value, (str, bytes)):
raise self.make_error("invalid")
return super()._deserialize(value.split(self.delimiter), attr, data, **kwargs)
class DelimitedList(DelimitedFieldMixin, ma.fields.List):
"""A field which is similar to a List, but takes its input as a delimited
string (e.g. "foo,bar,baz").
Like List, it can be given a nested field type which it will use to
de/serialize each element of the list.
:param Field cls_or_instance: A field class or instance.
:param str delimiter: Delimiter between values.
"""
default_error_messages = {"invalid": "Not a valid delimited list."}
def __init__(
self,
cls_or_instance: typing.Union[ma.fields.Field, type],
*,
delimiter: typing.Optional[str] = None,
**kwargs
):
self.delimiter = delimiter or self.delimiter
super().__init__(cls_or_instance, **kwargs)
class DelimitedTuple(DelimitedFieldMixin, ma.fields.Tuple):
"""A field which is similar to a Tuple, but takes its input as a delimited
string (e.g. "foo,bar,baz").
Like Tuple, it can be given a tuple of nested field types which it will use to
de/serialize each element of the tuple.
:param Iterable[Field] tuple_fields: An iterable of field classes or instances.
:param str delimiter: Delimiter between values.
"""
default_error_messages = {"invalid": "Not a valid delimited tuple."}
def __init__(
self, tuple_fields, *, delimiter: typing.Optional[str] = None, **kwargs
):
self.delimiter = delimiter or self.delimiter
super().__init__(tuple_fields, **kwargs)
| 33.965217 | 86 | 0.678955 |
f1bcd25a7e17e483560742cb285b58f58525d590 | 97 | py | Python | WD/Cwiczenia/zamianaZnakowImie.py | galursa/UWM | b7ab4a275662764a91af6c5bc79da0d98177d0ac | [
"MIT"
]
| 1 | 2020-02-29T14:38:33.000Z | 2020-02-29T14:38:33.000Z | WD/Cwiczenia/zamianaZnakowImie.py | galursa/UWM | b7ab4a275662764a91af6c5bc79da0d98177d0ac | [
"MIT"
]
| null | null | null | WD/Cwiczenia/zamianaZnakowImie.py | galursa/UWM | b7ab4a275662764a91af6c5bc79da0d98177d0ac | [
"MIT"
]
| null | null | null |
imie="URSZULA"
nazwisko=u"GAŁĄZKA"
print(imie.capitalize())
print(nazwisko.capitalize())
| 13.857143 | 29 | 0.71134 |
307d68771a1b7d284b37614eef69c8c5dad1796f | 847 | py | Python | graphene/utils/tests/test_str_converters.py | nazarepiedady/graphene | 5475a7ad1ff982b973f4c8c2a4507020c8682e15 | [
"MIT"
]
| 7,451 | 2015-10-07T07:17:19.000Z | 2022-03-30T21:56:42.000Z | graphene/utils/tests/test_str_converters.py | nazarepiedady/graphene | 5475a7ad1ff982b973f4c8c2a4507020c8682e15 | [
"MIT"
]
| 1,218 | 2015-10-08T23:58:37.000Z | 2022-03-27T20:47:58.000Z | graphene/utils/tests/test_str_converters.py | nazarepiedady/graphene | 5475a7ad1ff982b973f4c8c2a4507020c8682e15 | [
"MIT"
]
| 985 | 2015-10-13T03:15:30.000Z | 2022-03-29T08:30:54.000Z | # coding: utf-8
from ..str_converters import to_camel_case, to_snake_case
def test_snake_case():
assert to_snake_case("snakesOnAPlane") == "snakes_on_a_plane"
assert to_snake_case("SnakesOnAPlane") == "snakes_on_a_plane"
assert to_snake_case("SnakesOnA_Plane") == "snakes_on_a__plane"
assert to_snake_case("snakes_on_a_plane") == "snakes_on_a_plane"
assert to_snake_case("snakes_on_a__plane") == "snakes_on_a__plane"
assert to_snake_case("IPhoneHysteria") == "i_phone_hysteria"
assert to_snake_case("iPhoneHysteria") == "i_phone_hysteria"
def test_camel_case():
assert to_camel_case("snakes_on_a_plane") == "snakesOnAPlane"
assert to_camel_case("snakes_on_a__plane") == "snakesOnA_Plane"
assert to_camel_case("i_phone_hysteria") == "iPhoneHysteria"
assert to_camel_case("field_i18n") == "fieldI18n"
| 42.35 | 70 | 0.75915 |
73816cec707fd7bbaa014ca2c9bf3eed370d7f50 | 426 | py | Python | singer_sdk/helpers/_classproperty.py | meltano/sdk | 83dde4fe922f9f91bd3c57277849a2a2daa8f09a | [
"Apache-2.0"
]
| 13 | 2021-06-21T17:30:32.000Z | 2021-12-06T18:45:34.000Z | singer_sdk/helpers/_classproperty.py | meltano/sdk | 83dde4fe922f9f91bd3c57277849a2a2daa8f09a | [
"Apache-2.0"
]
| null | null | null | singer_sdk/helpers/_classproperty.py | meltano/sdk | 83dde4fe922f9f91bd3c57277849a2a2daa8f09a | [
"Apache-2.0"
]
| null | null | null | # flake8: noqa
"""Defines the `classproperty` decorator."""
# noqa
class classproperty(property):
"""Class property decorator."""
def __get__(self, obj, objtype=None):
return super(classproperty, self).__get__(objtype)
def __set__(self, obj, value):
super(classproperty, self).__set__(type(obj), value)
def __delete__(self, obj):
super(classproperty, self).__delete__(type(obj))
| 22.421053 | 60 | 0.671362 |
36729353b534cb7fd230089d9fa2a3ccbfb36649 | 334 | py | Python | src/Python/09_Listy_wprowadzenie/Zad13.py | djeada/Nauka-programowania | b1eb6840c15b830acf552f0a0fc5cc692759152f | [
"MIT"
]
| 3 | 2020-09-19T21:38:30.000Z | 2022-03-30T11:02:26.000Z | src/Python/09_Listy_wprowadzenie/Zad13.py | djeada/Nauka-programowania | b1eb6840c15b830acf552f0a0fc5cc692759152f | [
"MIT"
]
| null | null | null | src/Python/09_Listy_wprowadzenie/Zad13.py | djeada/Nauka-programowania | b1eb6840c15b830acf552f0a0fc5cc692759152f | [
"MIT"
]
| 1 | 2022-02-04T09:13:20.000Z | 2022-02-04T09:13:20.000Z | """
Znajdz brakujacy element w liscie.
Ciag arytmetyczny.
"""
# Wersja 1
def znajdz_brakujacy_element(lista):
suma_przedzialu = (len(lista) + 1) * (min(lista) + max(lista)) // 2
return suma_przedzialu - sum(lista)
# Testy poprawnosci
lista = [6, 8, 4, 10, 14, 2]
wynik = 12
assert znajdz_brakujacy_element(lista) == wynik
| 19.647059 | 71 | 0.688623 |
43e5c03f720c95b52709d28512dc41fcf853e240 | 408 | wsgi | Python | kraken.wsgi | peterdemartini/KrakenMaster | 2b3ed18f6dcc720e66e1ac397a3b5ee902914e58 | [
"BSD-3-Clause"
]
| 1 | 2016-12-22T22:25:05.000Z | 2016-12-22T22:25:05.000Z | kraken.wsgi | peterdemartini/KrakenMaster | 2b3ed18f6dcc720e66e1ac397a3b5ee902914e58 | [
"BSD-3-Clause"
]
| null | null | null | kraken.wsgi | peterdemartini/KrakenMaster | 2b3ed18f6dcc720e66e1ac397a3b5ee902914e58 | [
"BSD-3-Clause"
]
| 1 | 2022-03-28T00:28:07.000Z | 2022-03-28T00:28:07.000Z | #!/usr/bin/python
import os
import subprocess
import sys
import logging
logging.basicConfig(stream=sys.stderr)
sys.path.insert(0,"/var/www/vhosts/KrakenMaster")
from kraken.app import create_app
from kraken.settings import DevConfig, ProdConfig
from kraken.database import db
if os.environ.get("KRAKEN_ENV") == 'prod':
application = create_app(ProdConfig)
else:
application = create_app(DevConfig)
| 24 | 49 | 0.784314 |
1019bb8db80cbd93fe89c2548591064bc3e95f34 | 3,748 | py | Python | cli/engine/InitEngine.py | lambdastack/lambdastack | 0898cf23b490aa520b75f1bcd85be56c74cf35cf | [
"Apache-2.0"
]
| 6 | 2021-11-29T13:14:14.000Z | 2022-02-02T19:27:44.000Z | cli/engine/InitEngine.py | lambdastack/lambdastack | 0898cf23b490aa520b75f1bcd85be56c74cf35cf | [
"Apache-2.0"
]
| 5 | 2021-11-17T13:21:58.000Z | 2021-11-22T16:31:08.000Z | cli/engine/InitEngine.py | lambdastack/lambdastack | 0898cf23b490aa520b75f1bcd85be56c74cf35cf | [
"Apache-2.0"
]
| 2 | 2021-10-21T17:31:36.000Z | 2021-12-01T08:20:25.000Z | import os
from cli.helpers.Step import Step
from cli.helpers.build_io import get_build_sshkey_path, save_manifest, get_build_path
from cli.helpers.data_loader import load_all_schema_objs, types
from cli.engine.ApplyEngine import ApplyEngine
from cli.helpers.objdict_helpers import remove_value
from cli.version import VERSION
from cli.helpers.doc_list_helpers import select_all, select_single
class InitEngine(Step):
def __init__(self, input_data):
super().__init__(__name__)
self.provider = input_data.provider
self.full_config = input_data.full_config
self.name = input_data.name
self.is_full_config = input_data.full_config
def __enter__(self):
super().__enter__()
return self
def __exit__(self, exc_type, exc_value, traceback):
pass
def init(self):
input = load_all_schema_objs(types.DEFAULT, self.provider, 'configuration/minimal-cluster-config')
input[0].specification.name = self.name
input[0].build_path = get_build_path(self.name)
input[0].specification.admin_user.path = os.path.join(get_build_sshkey_path(self.name), input[0].specification.admin_user.key_path)
if self.is_full_config:
config = self.get_config_docs(input)
config_only = select_all(config, lambda x: not(x.kind.startswith('lambdastack-cluster')))
# gcp - wip
if self.provider == 'any' or self.provider == 'gcp':
# for any provider we want to use the default config from minimal-cluster-config
cluster_model = select_single(input, lambda x: x.kind == 'lambdastack-cluster')
else:
# for azure|aws provider we want to use the extended defaults cluster-config after dry run.
# TODO: We probably wants this comming from seperate documents since Azure and AWS overlap now...
cluster_model = select_single(config, lambda x: x.kind == 'lambdastack-cluster')
infra = self.get_infra_docs(input)
docs = [cluster_model, *config_only, *infra]
else:
docs = [*input]
# set the provider and version for all docs
for doc in docs:
doc['provider'] = self.provider
doc['version'] = VERSION
# remove SET_BY_AUTOMATION fields
remove_value(docs, 'SET_BY_AUTOMATION')
# save document
save_manifest(docs, self.name, self.name+'.yml')
self.logger.info('Initialized new configuration and saved it to "' + os.path.join(get_build_path(self.name), self.name + '.yml') + '"')
return 0
def get_config_docs(self, input_docs):
cluster_config_path = save_manifest(input_docs, self.name, self.name + '.yml')
args = type('obj', (object,), {'file': cluster_config_path})()
args.ping_retries = 5
# generate the config documents
with ApplyEngine(args) as build:
config = build.dry_run()
return config
def get_infra_docs(self, input_docs):
if self.provider == 'any':
# For any we can include the machine documents from the minimal-cluster-config
infra = select_all(input_docs, lambda x: x.kind.startswith('infrastructure/machine'))
else:
# VMs are curently the infrastructure documents the user might interact with for:
# - type/size
# - distro
# - network security rules
# ...
# So we add the defaults here.
# TODO: Check if we want to include possible other infrastructure documents.
infra = load_all_schema_objs(types.DEFAULT, self.provider, 'infrastructure/virtual-machine')
return infra
| 42.11236 | 143 | 0.654749 |
70bf27bdd44db7bf5b332b50ff769dde5aa58a75 | 8,384 | py | Python | button_recognition.py | Jarvis-Geun/ocr-rcnn-v2 | 2762f71c0a430979378f1a10b71e33520f337fb9 | [
"MIT"
]
| null | null | null | button_recognition.py | Jarvis-Geun/ocr-rcnn-v2 | 2762f71c0a430979378f1a10b71e33520f337fb9 | [
"MIT"
]
| null | null | null | button_recognition.py | Jarvis-Geun/ocr-rcnn-v2 | 2762f71c0a430979378f1a10b71e33520f337fb9 | [
"MIT"
]
| null | null | null | #!/usr/bin/env python
import os
import imageio
import numpy as np
import tensorflow as tf
from PIL import Image, ImageDraw, ImageFont
from utils.ops import native_crop_and_resize
from utils import visualization_utils as vis_util
import tensorflow.contrib.tensorrt as trt
charset = {'0': 0, '1': 1, '2': 2, '3': 3, '4': 4, '5': 5,
'6': 6, '7': 7, '8': 8, '9': 9, 'A': 10, 'B': 11,
'C': 12, 'D': 13, 'E': 14, 'F': 15, 'G': 16, 'H': 17,
'I': 18, 'J': 19, 'K': 20, 'L': 21, 'M': 22, 'N': 23,
'O': 24, 'P': 25, 'R': 26, 'S': 27, 'T': 28, 'U': 29,
'V': 30, 'X': 31, 'Z': 32, '<': 33, '>': 34, '(': 35,
')': 36, '$': 37, '#': 38, '^': 39, 's': 40, '-': 41,
'*': 42, '%': 43, '?': 44, '!': 45, '+': 46} # <nul> = +
class ButtonRecognizer:
def __init__(self, rcnn_path= None, ocr_path=None,
use_trt=False, precision='FP16', use_optimized=False,
use_tx2=False):
self.ocr_graph_path = ocr_path
self.rcnn_graph_path = rcnn_path
self.use_trt = use_trt
self.precision=precision #'INT8, FP16, FP32'
self.use_optimized = use_optimized
self.use_tx2 = use_tx2 # if use tx2, gpu memory is limited to 3GB
self.session = None
self.ocr_input = None
self.ocr_output = None
self.rcnn_input = None
self.rcnn_output = None
self.class_num = 1
self.image_size = [480, 640]
self.recognition_size = [180, 180]
self.category_index = {1: {'id': 1, 'name': u'button'}}
self.idx_lbl = {}
for key in charset.keys():
self.idx_lbl[charset[key]] = key
self.load_and_merge_graphs()
print('Button recognizer initialized!')
def __del__(self):
self.clear_session()
def optimize_rcnn(self, input_graph_def):
trt_graph = trt.create_inference_graph(
input_graph_def=input_graph_def,
outputs=['detection_boxes', 'detection_scores', 'detection_classes', 'num_detections'],
max_batch_size = 1,
# max_workspace_size_bytes=(2 << 10) << 20,
precision_mode = self.precision)
return trt_graph
def optimize_ocr(self, input_graph_def):
output_graph_def = trt.create_inference_graph(
input_graph_def = input_graph_def,
outputs = ['predicted_chars', 'predicted_scores'],
max_batch_size = 1,
# max_workspace_size_bytes=(2 << 10) << 20,
precision_mode = self.precision)
return output_graph_def
def load_and_merge_graphs(self):
# check graph paths
if self.ocr_graph_path is None:
self.ocr_graph_path = './frozen_model/ocr_graph.pb'
if self.rcnn_graph_path is None:
self.rcnn_graph_path = './frozen_model/detection_graph_640x480.pb'
if self.use_optimized:
self.ocr_graph_path.replace('.pb', '_optimized.pb')
self.rcnn_graph_path.replace('.pb', '_optimized.pb')
assert os.path.exists(self.ocr_graph_path) and os.path.exists(self.rcnn_graph_path)
# merge the frozen graphs
ocr_rcnn_graph = tf.Graph()
with ocr_rcnn_graph.as_default():
# load button detection graph definition
with tf.gfile.GFile(self.rcnn_graph_path, 'rb') as fid:
detection_graph_def = tf.GraphDef()
serialized_graph = fid.read()
detection_graph_def.ParseFromString(serialized_graph)
# for node in detection_graph_def.node:
# print node.name
if self.use_trt:
detection_graph_def = self.optimize_rcnn(detection_graph_def)
tf.import_graph_def(detection_graph_def, name='detection')
# load character recognition graph definition
with tf.gfile.GFile(self.ocr_graph_path, 'rb') as fid:
recognition_graph_def = tf.GraphDef()
serialized_graph = fid.read()
recognition_graph_def.ParseFromString(serialized_graph)
if self.use_trt:
recognition_graph_def = self.optimize_ocr(recognition_graph_def)
tf.import_graph_def(recognition_graph_def, name='recognition')
# retrive detection tensors
rcnn_input = ocr_rcnn_graph.get_tensor_by_name('detection/image_tensor:0')
rcnn_boxes = ocr_rcnn_graph.get_tensor_by_name('detection/detection_boxes:0')
rcnn_scores = ocr_rcnn_graph.get_tensor_by_name('detection/detection_scores:0')
rcnn_number = ocr_rcnn_graph.get_tensor_by_name('detection/num_detections:0')
# crop and resize valida boxes (only valid when rcnn input has an known shape)
rcnn_number = tf.to_int32(rcnn_number)
valid_boxes = tf.slice(rcnn_boxes, [0, 0, 0], [1, rcnn_number[0], 4])
ocr_boxes = native_crop_and_resize(rcnn_input, valid_boxes, self.recognition_size)
# retrive recognition tensors
ocr_input = ocr_rcnn_graph.get_tensor_by_name('recognition/ocr_input:0')
ocr_chars = ocr_rcnn_graph.get_tensor_by_name('recognition/predicted_chars:0')
ocr_beliefs = ocr_rcnn_graph.get_tensor_by_name('recognition/predicted_scores:0')
self.rcnn_input = rcnn_input
self.rcnn_output = [rcnn_boxes, rcnn_scores, rcnn_number, ocr_boxes]
self.ocr_input = ocr_input
self.ocr_output = [ocr_chars, ocr_beliefs]
if self.use_tx2:
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=3.0/8.0)
self.session = tf.Session(graph=ocr_rcnn_graph, config=tf.ConfigProto(gpu_options=gpu_options))
else:
self.session = tf.Session(graph=ocr_rcnn_graph)
def clear_session(self):
if self.session is not None:
self.session.close()
def decode_text(self, codes, scores):
score_ave = 0
text = ''
for char, score in zip(codes, scores):
if not self.idx_lbl[char] == '+':
score_ave += score
text += self.idx_lbl[char]
score_ave /= len(text)
return text, score_ave
def predict(self, image_np, draw=False):
# input data
assert image_np.shape == (480, 640, 3)
img_in = np.expand_dims(image_np, axis=0)
# output data
recognition_list = []
# perform detection and recognition
boxes, scores, number, ocr_boxes = self.session.run(self.rcnn_output, feed_dict={self.rcnn_input:img_in})
boxes, scores, number = [np.squeeze(x) for x in [boxes, scores, number]]
for i in range(number):
if scores[i] < 0.5: continue
chars, beliefs = self.session.run(self.ocr_output, feed_dict={self.ocr_input: ocr_boxes[:,i]})
chars, beliefs = [np.squeeze(x) for x in [chars, beliefs]]
text, belief = self.decode_text(chars, beliefs)
recognition_list.append([boxes[i], scores[i], text, belief])
if draw:
classes = [1]*len(boxes)
self.draw_detection_result(image_np, boxes, classes, scores, self.category_index)
self.draw_recognition_result(image_np, recognition_list)
return recognition_list
@staticmethod
def draw_detection_result(image_np, boxes, classes, scores, category, predict_chars=None):
vis_util.visualize_boxes_and_labels_on_image_array(
image_np,
np.squeeze(boxes),
np.squeeze(classes).astype(np.int32),
np.squeeze(scores),
category,
max_boxes_to_draw=100,
use_normalized_coordinates=True,
line_thickness=5,
predict_chars=predict_chars
)
def draw_recognition_result(self, image_np, recognitions):
for item in recognitions:
# crop button patches
y_min = int(item[0][0] * self.image_size[0])
x_min = int(item[0][1] * self.image_size[1])
y_max = int(item[0][2] * self.image_size[0])
x_max = int(item[0][3] * self.image_size[1])
button_patch = image_np[y_min: y_max, x_min: x_max]
# generate image layer for drawing
img_pil = Image.fromarray(button_patch)
img_show = ImageDraw.Draw(img_pil)
# draw at a proper location
x_center = (x_max-x_min) / 2.0
y_center = (y_max-y_min) / 2.0
font_size = min(x_center, y_center)*1.1
text_center = int(x_center-0.5*font_size), int(y_center-0.5*font_size)
font = ImageFont.truetype('/Library/Fonts/Arial.ttf', int(font_size))
img_show.text(text_center, text=item[2], font=font, fill=(255, 0, 255))
# img_pil.show()
image_np[y_min: y_max, x_min: x_max] = np.array(img_pil)
if __name__ == '__main__':
recognizer = ButtonRecognizer(use_optimized=True)
image = imageio.imread('./test_panels/1.jpg')
recognition_list =recognizer.predict(image,True)
image = Image.fromarray(image)
image.show()
recognizer.clear_session()
| 39.734597 | 109 | 0.671994 |
c79b9b80eabea714b06f9a338246920acb2ad4e0 | 3,942 | py | Python | services/dserver_service.py | lastick1/rexpert | cd5908f69cf54671ffe6bb2991c24d19e8f0036d | [
"MIT"
]
| 1 | 2020-07-07T09:58:57.000Z | 2020-07-07T09:58:57.000Z | services/dserver_service.py | lastick1/rexpert | cd5908f69cf54671ffe6bb2991c24d19e8f0036d | [
"MIT"
]
| 42 | 2018-11-11T08:08:46.000Z | 2020-01-10T11:15:47.000Z | services/dserver_service.py | lastick1/rexpert | cd5908f69cf54671ffe6bb2991c24d19e8f0036d | [
"MIT"
]
| null | null | null | "Управление игровым сервером"
from __future__ import annotations
from typing import Dict, Any
import logging
from configs import Config
from core import EventsEmitter
from rcon import DServerRcon
from model import Command, \
CommandType, \
MessageAll, \
MessageAllies, \
MessageAxis, \
MessagePrivate, \
PlayerKick, \
PlayerBanP15M, \
PlayerBanP7D, \
ServerInput
from .base_event_service import BaseEventService
class DServerService(BaseEventService):
"Сервис управления DServer через Rcon"
def __init__(self, emitter: EventsEmitter, config: Config):
super().__init__(emitter)
self._config: Config = config
self._rcon: DServerRcon = DServerRcon(
self._config.main.rcon_ip,
self._config.main.rcon_port
)
self._bindings: Dict[str, Any] = {
str(CommandType.MessageAll): self.message_all,
str(CommandType.MessageAllies): self.message_allies,
str(CommandType.MessageAxis): self.message_axis,
str(CommandType.MessagePrivate): self.message_private,
str(CommandType.PlayerKick): self.kick,
str(CommandType.PlayerBanP15M): self.ban_short,
str(CommandType.PlayerBanP7D): self.ban_long,
str(CommandType.ServerInput): self.server_input,
}
def init(self) -> None:
self.register_subscription(self.emitter.commands_rcon.subscribe_(self.on_command))
def on_command(self, command: Command) -> None:
"Обработать команду в RCon"
if not self._config.main.offline_mode:
if not self._rcon.connected:
self._rcon.connect()
if not self._rcon.authed:
self._rcon.auth(self._config.main.rcon_login,
self._config.main.rcon_password)
self._bindings[str(command.type)](command)
def message_all(self, command: MessageAll) -> None:
"Отправить сообщение всем игрокам"
self._rcon.info_message(command.message)
if self._config.main.console_chat_output:
logging.info(f'CHAT:ALL:{command.message}')
def message_allies(self, command: MessageAllies) -> None:
"Отправить сообщение союзникам"
self._rcon.allies_message(command.message)
if self._config.main.console_chat_output:
logging.info(f'CHAT:ALLIES:{command.message}')
def message_axis(self, command: MessageAxis) -> None:
"Отправить сообщение люфтваффе"
self._rcon.axis_message(command.message)
if self._config.main.console_chat_output:
logging.info(f'CHAT:AXIS:{command.message}')
def message_private(self, command: MessagePrivate) -> None:
"Отправить сообщение игроку"
self._rcon.private_message(command.account_id, command.message)
if self._config.main.console_chat_output:
logging.info(f'CHAT:{command.account_id}:{command.message}')
def kick(self, command: PlayerKick) -> None:
"Выбросить игрока с сервера"
self._rcon.kick(command.account_id)
if self._config.main.console_cmd_output:
logging.info(f'KICK:{command.account_id}')
def ban_short(self, command: PlayerBanP15M) -> None:
"Забанить игрока на 15 минут"
self._rcon.banuser(command.account_id)
if self._config.main.console_cmd_output:
logging.info(f'BANUSER:{command.account_id}')
def ban_long(self, command: PlayerBanP7D) -> None:
"Забанить игрока на 7 дней"
self._rcon.ban(command.account_id)
if self._config.main.console_cmd_output:
logging.info(f'BAN:{command.account_id}')
def server_input(self, command: ServerInput) -> None:
"Активировать MCU ServerInput"
self._rcon.server_input(command.name)
if self._config.main.console_cmd_output:
logging.info(f'SERVER_INPUT:{command.name}')
| 37.542857 | 90 | 0.667935 |
1cd4f8f60609405f499e79ee04ace90bb0e0f7e8 | 5,722 | py | Python | src/genie/libs/parser/iosxe/tests/ShowIsisNeighbors/cli/equal/golden_output2_expected.py | balmasea/genieparser | d1e71a96dfb081e0a8591707b9d4872decd5d9d3 | [
"Apache-2.0"
]
| 204 | 2018-06-27T00:55:27.000Z | 2022-03-06T21:12:18.000Z | src/genie/libs/parser/iosxe/tests/ShowIsisNeighbors/cli/equal/golden_output2_expected.py | balmasea/genieparser | d1e71a96dfb081e0a8591707b9d4872decd5d9d3 | [
"Apache-2.0"
]
| 468 | 2018-06-19T00:33:18.000Z | 2022-03-31T23:23:35.000Z | src/genie/libs/parser/iosxe/tests/ShowIsisNeighbors/cli/equal/golden_output2_expected.py | balmasea/genieparser | d1e71a96dfb081e0a8591707b9d4872decd5d9d3 | [
"Apache-2.0"
]
| 309 | 2019-01-16T20:21:07.000Z | 2022-03-30T12:56:41.000Z | expected_output = {
"isis": {
"test_0": {
"neighbors": {
"R1_xe": {
"type": {
"L2": {
"interfaces": {
"GigabitEthernet1": {
"ip_address": "172.16.10.1",
"state": "UP",
"holdtime": "21",
"circuit_id": "C_ID.00"
},
"GigabitEthernet2": {
"ip_address": "172.16.20.1",
"state": "DOWN",
"holdtime": "25",
"circuit_id": "C_ID.01"
},
"GigabitEthernet3": {
"ip_address": "172.16.30.1",
"state": "INIT",
"holdtime": "21",
"circuit_id": "C_ID.02"
}
}
}
}
},
"R2_xr": {
"type": {
"L1": {
"interfaces": {
"GigabitEthernet4": {
"ip_address": "172.16.40.1",
"state": "NONE",
"holdtime": "25",
"circuit_id": "C_ID.03"
}
}
}
}
}
}
},
"test_1": {
"neighbors": {
"R3_xe": {
"type": {
"L1": {
"interfaces": {
"GigabitEthernet6": {
"ip_address": "172.16.50.1",
"state": "NONE",
"holdtime": "21",
"circuit_id": "C_ID.05"
},
"GigabitEthernet5": {
"ip_address": "172.16.60.1",
"state": "UP",
"holdtime": "25",
"circuit_id": "C_ID.07"
}
}
}
}
},
"R4_xr": {
"type": {
"L2": {
"interfaces": {
"GigabitEthernet8": {
"ip_address": "172.16.70.1",
"state": "INIT",
"holdtime": "21",
"circuit_id": "C_ID.06"
},
"GigabitEthernet7": {
"ip_address": "172.16.80.1",
"state": "DOWN",
"holdtime": "25",
"circuit_id": "C_ID.04"
}
}
}
}
}
}
},
"test_2": {
"neighbors": {
"R7_xe": {
"type": {
"L1": {
"interfaces": {
"GigabitEthernet10.104": {
"ip_address": "172.17.10.1",
"state": "NONE",
"holdtime": "21",
"circuit_id": "C_ID.10"
}
}
}
}
},
"R8_xe": {
"type": {
"L1": {
"interfaces": {
"GigabitEthernet10.103": {
"ip_address": "172.17.20.1",
"state": "UP",
"holdtime": "25",
"circuit_id": "C_ID.08"
}
}
}
}
},
"R9_xr": {
"type": {
"L2": {
"interfaces": {
"GigabitEthernet13.102": {
"ip_address": "172.17.30.1",
"state": "INIT",
"holdtime": "21",
"circuit_id": "C_ID.11"
},
"GigabitEthernet13.101": {
"ip_address": "172.17.40.1",
"state": "DOWN",
"holdtime": "25",
"circuit_id": "C_ID.13"
}
}
}
}
}
}
}
}
} | 39.736111 | 64 | 0.183328 |
0f3ea7400e98f79182eb44ff4cb553e493c1a290 | 8,084 | py | Python | wsgidav/dc/base_dc.py | jaredrunyon/wsgidav | 2e375551f1961380d7afd2cbcf3bef32bb98b8d7 | [
"MIT"
]
| 62 | 2015-02-05T08:16:33.000Z | 2022-02-25T20:51:32.000Z | wsgidav/dc/base_dc.py | jaredrunyon/wsgidav | 2e375551f1961380d7afd2cbcf3bef32bb98b8d7 | [
"MIT"
]
| 32 | 2015-01-29T09:46:18.000Z | 2022-02-15T03:27:29.000Z | wsgidav/dc/base_dc.py | jaredrunyon/wsgidav | 2e375551f1961380d7afd2cbcf3bef32bb98b8d7 | [
"MIT"
]
| 59 | 2015-02-02T02:36:16.000Z | 2022-02-14T01:55:21.000Z | # -*- coding: utf-8 -*-
# (c) 2009-2020 Martin Wendt and contributors; see WsgiDAV https://github.com/mar10/wsgidav
# Licensed under the MIT license: http://www.opensource.org/licenses/mit-license.php
"""
Abstract base class of a domain controller (used by HTTPAuthenticator).
This ABC serves as base class for DomainControllers and provides some
default implementations.
Domain controllers are called by `HTTPAuthenticator` to handle these tasks:
- Basic authentication:
Check if user_name/password is allowed to perform a request
- Digest authentication (optional):
Check if user_name is allowed to perform a request and return the MD5 hash.
- Define permissions and roles for a given user (optional).
Note that there is no checking for `isinstance(BaseDomainController)` in the
code, so WsgiDAV also accepts duck-typed domain controllers.
Digest Authentication
---------------------
See https://en.wikipedia.org/wiki/Digest_access_authentication
Permissions and Roles
---------------------
A domain controller MAY add entries to the `environment["wsgidav.auth. ..."]`
namespace in order to define access permissions for the following middleware
(e.g. dir_browser) and DAV providers.
TODO: Work In Progress / Subject to change
"""
from __future__ import print_function
from hashlib import md5
from wsgidav import compat, util
import abc
import six
import sys
__docformat__ = "reStructuredText"
logger = util.get_module_logger(__name__)
@six.add_metaclass(abc.ABCMeta)
class BaseDomainController(object):
#: A domain controller MAY list these values as
#: `environ["wsgidav.auth.permissions"] = (<permission>, ...)`
known_permissions = ("browse_dir", "delete_resource", "edit_resource")
#: A DC may list these values as `environ["wsgidav.auth.roles"] = (<role>, ...)`
known_roles = ("admin", "editor", "reader")
def __init__(self, wsgidav_app, config):
self.wsgidav_app = wsgidav_app
self.config = config
def __str__(self):
return "{}()".format(self.__class__.__name__)
def _calc_realm_from_path_provider(self, path_info, environ):
"""Internal helper for derived classes to implement get_domain_realm()."""
if environ:
# Called while in a request:
# We don't get the share from the path_info here: it was already
# resolved and stripped by the request_resolver
dav_provider = environ["wsgidav.provider"]
else:
# Called on start-up with the share root URL
_share, dav_provider = self.wsgidav_app.resolve_provider(path_info)
if not dav_provider:
logger.warn(
"_calc_realm_from_path_provider('{}'): '{}'".format(
util.safe_re_encode(path_info, sys.stdout.encoding), None
)
)
return None
realm = dav_provider.share_path
if realm == "":
realm = "/"
return realm
@abc.abstractmethod
def get_domain_realm(self, path_info, environ):
"""Return the normalized realm name for a given URL.
This method is called
- On startup, to check if anonymous access is allowed for a given share.
In this case, `environ` is None.
- For every request, before basic or digest authentication is handled.
A domain controller that uses the share path as realm name may use
the `_calc_realm_from_path_provider()` helper.
Args:
path_info (str):
environ (dict | None):
Returns:
str
"""
raise NotImplementedError
@abc.abstractmethod
def require_authentication(self, realm, environ):
"""Return False to disable authentication for this request.
This method is called
- On startup, to check if anonymous access is allowed for a given share.
In this case, `environ` is None.
- For every request, before basic or digest authentication is handled.
If False is returned, we MAY also set environment variables for
anonymous access::
environment["wsgidav.auth.roles"] = (<role>, ...)
environment["wsgidav.auth.permissions"] = (<perm>, ...)
return False
Args:
realm (str):
environ (dict | None):
Returns:
False to allow anonymous access
True to force subsequent digest or basic authentication
"""
raise NotImplementedError
def is_share_anonymous(self, path_info):
"""Return true if anonymous access will be granted to the share path.
This method is called on start-up to print out info and warnings.
Returns:
bool
"""
realm = self.get_domain_realm(path_info, None)
return not self.require_authentication(realm, None)
@abc.abstractmethod
def basic_auth_user(self, realm, user_name, password, environ):
"""Check request access permissions for realm/user_name/password.
Called by http_authenticator for basic authentication requests.
Optionally set environment variables:
environ["wsgidav.auth.roles"] = (<role>, ...)
environ["wsgidav.auth.permissions"] = (<perm>, ...)
Args:
realm (str):
user_name (str):
password (str):
environ (dict):
Returns:
False if user is not known or not authorized
True if user is authorized
"""
raise NotImplementedError
@abc.abstractmethod
def supports_http_digest_auth(self):
"""Signal if this DC instance supports the HTTP digest authentication theme.
If true, `HTTPAuthenticator` will call `dc.digest_auth_user()`,
so this method must be implemented as well.
Returns:
bool
"""
raise NotImplementedError
# def is_realm_user(self, realm, user_name, environ):
# """Return true if the user is known and allowed for that realm.
# This method is called as a pre-check for digest authentication.
# A domain controller MAY implement this method if this pre-check is
# more efficient than a hash calculation or in order to enforce a
# permission policy.
# If this method is not implemented, or None or True is returned, the
# http_authenticator will proceed with calculating and comparing digest
# hash with the current request.
# Returns:
# bool: False to reject authentication.
# """
# return None
def _compute_http_digest_a1(self, realm, user_name, password):
"""Internal helper for derived classes to compute a digest hash (A1 part)."""
data = user_name + ":" + realm + ":" + password
A1 = md5(compat.to_bytes(data)).hexdigest()
return A1
def digest_auth_user(self, realm, user_name, environ):
"""Check access permissions for realm/user_name.
Called by http_authenticator for basic authentication requests.
Compute the HTTP digest hash A1 part.
Any domain controller that returns true for `supports_http_digest_auth()`
MUST implement this method.
Optionally set environment variables:
environ["wsgidav.auth.roles"] = (<role>, ...)
environ["wsgidav.auth.permissions"] = (<perm>, ...)
Note that in order to calculate A1, we need either
- Access the plain text password of the user.
In this case the method `self._compute_http_digest_a1()` can be used
for convenience.
Or
- Return a stored hash value that is associated with the user name
(for example from Apache's htdigest files).
Args:
realm (str):
user_name (str):
environ (dict):
Returns:
str: MD5("{usern_name}:{realm}:{password}")
or false if user is unknown or rejected
"""
raise NotImplementedError
| 33.26749 | 91 | 0.64139 |
a793f46d0ce59440889ea637691349ecb7a79b0b | 55,735 | py | Python | mahjong/hand_calculating/tests/tests_yaku_calculation.py | wardenlym/mahjong | 475da6360e24e70e6e0d1deada573c842a71f261 | [
"MIT"
]
| 254 | 2017-09-20T15:02:20.000Z | 2022-03-28T11:33:28.000Z | mahjong/hand_calculating/tests/tests_yaku_calculation.py | wardenlym/mahjong | 475da6360e24e70e6e0d1deada573c842a71f261 | [
"MIT"
]
| 39 | 2017-09-23T14:28:36.000Z | 2022-01-06T08:41:57.000Z | mahjong/hand_calculating/tests/tests_yaku_calculation.py | wardenlym/mahjong | 475da6360e24e70e6e0d1deada573c842a71f261 | [
"MIT"
]
| 38 | 2017-10-19T09:06:53.000Z | 2022-03-15T05:08:22.000Z | import unittest
from mahjong.constants import EAST, FIVE_RED_SOU, NORTH, SOUTH, WEST
from mahjong.hand_calculating.hand import HandCalculator
from mahjong.hand_calculating.hand_config import HandConfig, OptionalRules
from mahjong.hand_calculating.yaku_config import YakuConfig
from mahjong.meld import Meld
from mahjong.tests_mixin import TestMixin
from mahjong.tile import TilesConverter
class YakuCalculationTestCase(unittest.TestCase, TestMixin):
def setUp(self):
self.config = YakuConfig()
def test_hands_calculation(self):
"""
Group of hands that were not properly calculated on tenhou replays
I did fixes and leave hands in tests, to be sure that bugs were fixed.
"""
hand = HandCalculator()
player_wind = EAST
tiles = self._string_to_136_array(pin="112233999", honors="11177")
win_tile = self._string_to_136_tile(pin="9")
melds = [
self._make_meld(Meld.PON, honors="111"),
self._make_meld(Meld.CHI, pin="123"),
self._make_meld(Meld.CHI, pin="123"),
]
result = hand.estimate_hand_value(tiles, win_tile, melds=melds)
self.assertEqual(result.fu, 30)
# we had a bug with multiple dora indicators and honor sets
# this test is working with this situation
tiles = self._string_to_136_array(pin="22244456799", honors="4444")
win_tile = self._string_to_136_tile(pin="2")
dora_indicators = [self._string_to_136_tile(sou="3"), self._string_to_136_tile(honors="3")]
melds = [self._make_meld(Meld.KAN, honors="4444")]
result = hand.estimate_hand_value(tiles, win_tile, dora_indicators=dora_indicators, melds=melds)
self.assertEqual(result.error, None)
self.assertEqual(result.han, 6)
self.assertEqual(result.fu, 50)
self.assertEqual(len(result.yaku), 2)
# if we can't add pinfu to the hand
# we can add 2 fu to make hand more expensive
tiles = self._string_to_136_array(sou="678", man="11", pin="123345", honors="666")
win_tile = self._string_to_136_tile(pin="3")
result = hand.estimate_hand_value(tiles, win_tile, config=self._make_hand_config(is_tsumo=True))
self.assertEqual(result.fu, 40)
tiles = self._string_to_136_array(man="234789", pin="12345666")
win_tile = self._string_to_136_tile(pin="6")
result = hand.estimate_hand_value(tiles, win_tile)
self.assertEqual(result.fu, 30)
tiles = self._string_to_136_array(sou="678", pin="34555789", honors="555")
win_tile = self._string_to_136_tile(pin="5")
result = hand.estimate_hand_value(tiles, win_tile, config=self._make_hand_config(is_tsumo=True))
self.assertEqual(result.fu, 40)
tiles = self._string_to_136_array(sou="123345678", man="678", pin="88")
win_tile = self._string_to_136_tile(sou="3")
result = hand.estimate_hand_value(tiles, win_tile)
self.assertEqual(result.error, None)
self.assertEqual(result.han, 1)
self.assertEqual(result.fu, 30)
self.assertEqual(len(result.yaku), 1)
tiles = self._string_to_136_array(sou="12399", man="123456", pin="456")
win_tile = self._string_to_136_tile(sou="1")
result = hand.estimate_hand_value(tiles, win_tile)
self.assertEqual(result.error, None)
self.assertEqual(result.han, 1)
self.assertEqual(result.fu, 30)
self.assertEqual(len(result.yaku), 1)
tiles = self._string_to_136_array(sou="111123666789", honors="11")
win_tile = self._string_to_136_tile(sou="1")
melds = [self._make_meld(Meld.PON, sou="666")]
dora_indicators = [self._string_to_136_tile(honors="4")]
result = hand.estimate_hand_value(
tiles,
win_tile,
melds=melds,
dora_indicators=dora_indicators,
config=self._make_hand_config(player_wind=player_wind),
)
self.assertEqual(result.fu, 40)
self.assertEqual(result.han, 4)
tiles = self._string_to_136_array(pin="12333", sou="567", honors="666777")
win_tile = self._string_to_136_tile(pin="3")
melds = [self._make_meld(Meld.PON, honors="666"), self._make_meld(Meld.PON, honors="777")]
result = hand.estimate_hand_value(tiles, win_tile, melds=melds)
self.assertEqual(result.fu, 30)
self.assertEqual(result.han, 2)
tiles = self._string_to_136_array(pin="12367778", sou="678", man="456")
win_tile = self._string_to_136_tile(pin="7")
result = hand.estimate_hand_value(tiles, win_tile, config=self._make_hand_config(is_riichi=True))
self.assertEqual(result.fu, 40)
self.assertEqual(result.han, 1)
tiles = self._string_to_136_array(man="11156677899", honors="7777")
win_tile = self._string_to_136_tile(man="7")
melds = [
self._make_meld(Meld.KAN, honors="7777"),
self._make_meld(Meld.PON, man="111"),
self._make_meld(Meld.CHI, man="678"),
]
result = hand.estimate_hand_value(tiles, win_tile, melds=melds)
self.assertEqual(result.fu, 40)
self.assertEqual(result.han, 3)
tiles = self._string_to_136_array(man="122223777888", honors="66")
win_tile = self._string_to_136_tile(man="2")
melds = [self._make_meld(Meld.CHI, man="123"), self._make_meld(Meld.PON, man="777")]
result = hand.estimate_hand_value(tiles, win_tile, melds=melds)
self.assertEqual(result.fu, 30)
self.assertEqual(result.han, 2)
tiles = self._string_to_136_array(pin="11144678888", honors="444")
win_tile = self._string_to_136_tile(pin="8")
melds = [
self._make_meld(Meld.PON, honors="444"),
self._make_meld(Meld.PON, pin="111"),
self._make_meld(Meld.PON, pin="888"),
]
result = hand.estimate_hand_value(tiles, win_tile, melds=melds)
self.assertEqual(result.fu, 30)
self.assertEqual(result.han, 2)
tiles = self._string_to_136_array(sou="67778", man="345", pin="999", honors="222")
win_tile = self._string_to_136_tile(sou="7")
result = hand.estimate_hand_value(tiles, win_tile, config=self._make_hand_config(is_tsumo=True))
self.assertEqual(result.fu, 40)
self.assertEqual(result.han, 1)
tiles = self._string_to_136_array(sou="33445577789", man="345")
win_tile = self._string_to_136_tile(sou="7")
result = hand.estimate_hand_value(tiles, win_tile, config=self._make_hand_config(is_tsumo=True))
self.assertEqual(result.fu, 30)
self.assertEqual(result.han, 2)
tiles = self._string_to_136_array(pin="112233667788", honors="22")
win_tile = self._string_to_136_tile(pin="3")
melds = [self._make_meld(Meld.CHI, pin="123")]
result = hand.estimate_hand_value(tiles, win_tile, melds=melds)
self.assertEqual(result.fu, 30)
self.assertEqual(result.han, 2)
tiles = self._string_to_136_array(sou="345", man="12333456789")
win_tile = self._string_to_136_tile(man="3")
result = hand.estimate_hand_value(tiles, win_tile)
self.assertEqual(result.fu, 40)
self.assertEqual(result.han, 2)
tiles = self._string_to_136_array(sou="11123456777888")
melds = [
self._make_meld(Meld.CHI, sou="123"),
self._make_meld(Meld.PON, sou="777"),
self._make_meld(Meld.PON, sou="888"),
]
win_tile = self._string_to_136_tile(sou="4")
result = hand.estimate_hand_value(tiles, win_tile, melds=melds, config=self._make_hand_config(is_tsumo=True))
self.assertEqual(result.fu, 30)
self.assertEqual(result.han, 5)
tiles = self._string_to_136_array(sou="112233789", honors="55777")
melds = [self._make_meld(Meld.CHI, sou="123")]
win_tile = self._string_to_136_tile(sou="2")
result = hand.estimate_hand_value(tiles, win_tile, melds=melds)
self.assertEqual(result.fu, 40)
self.assertEqual(result.han, 4)
tiles = self._string_to_136_array(pin="234777888999", honors="22")
melds = [self._make_meld(Meld.CHI, pin="234"), self._make_meld(Meld.CHI, pin="789")]
win_tile = self._string_to_136_tile(pin="9")
result = hand.estimate_hand_value(tiles, win_tile, melds=melds)
self.assertEqual(result.fu, 30)
self.assertEqual(result.han, 2)
tiles = self._string_to_136_array(pin="77888899", honors="777", man="444")
melds = [self._make_meld(Meld.PON, honors="777"), self._make_meld(Meld.PON, man="444")]
win_tile = self._string_to_136_tile(pin="8")
result = hand.estimate_hand_value(tiles, win_tile, melds=melds, config=self._make_hand_config(is_tsumo=True))
self.assertEqual(result.fu, 30)
self.assertEqual(result.han, 1)
tiles = self._string_to_136_array(pin="12333345", honors="555", man="567")
win_tile = self._string_to_136_tile(pin="3")
result = hand.estimate_hand_value(tiles, win_tile)
self.assertEqual(result.fu, 40)
self.assertEqual(result.han, 1)
tiles = self._string_to_136_array(pin="34567777889", honors="555")
win_tile = self._string_to_136_tile(pin="7")
melds = [self._make_meld(Meld.CHI, pin="345")]
result = hand.estimate_hand_value(tiles, win_tile, melds=melds)
self.assertEqual(result.fu, 30)
self.assertEqual(result.han, 3)
tiles = self._string_to_136_array(pin="567", sou="3334444555", honors="77")
win_tile = self._string_to_136_tile(sou="3")
melds = [self._make_meld(Meld.KAN, is_open=False, sou="4444")]
result = hand.estimate_hand_value(tiles, win_tile, melds=melds, config=self._make_hand_config(is_riichi=True))
self.assertEqual(result.fu, 60)
self.assertEqual(result.han, 1)
def test_is_riichi(self):
hand = HandCalculator()
tiles = self._string_to_136_array(sou="123444", man="234456", pin="66")
win_tile = self._string_to_136_tile(sou="4")
result = hand.estimate_hand_value(tiles, win_tile, config=self._make_hand_config(is_riichi=True))
self.assertEqual(result.error, None)
self.assertEqual(result.han, 1)
self.assertEqual(result.fu, 40)
self.assertEqual(len(result.yaku), 1)
def test_is_tsumo(self):
hand = HandCalculator()
tiles = self._string_to_136_array(sou="123444", man="234456", pin="66")
win_tile = self._string_to_136_tile(sou="4")
result = hand.estimate_hand_value(tiles, win_tile, config=self._make_hand_config(is_tsumo=True))
self.assertEqual(result.error, None)
self.assertEqual(result.han, 1)
self.assertEqual(result.fu, 30)
self.assertEqual(len(result.yaku), 1)
# with open hand tsumo not giving yaku
melds = [self._make_meld(Meld.CHI, sou="123")]
result = hand.estimate_hand_value(tiles, win_tile, melds=melds, config=self._make_hand_config(is_tsumo=True))
self.assertNotEqual(result.error, None)
def test_is_ippatsu(self):
hand = HandCalculator()
tiles = self._string_to_136_array(sou="123444", man="234456", pin="66")
win_tile = self._string_to_136_tile(sou="4")
result = hand.estimate_hand_value(
tiles, win_tile, config=self._make_hand_config(is_riichi=True, is_ippatsu=True)
)
self.assertEqual(result.error, None)
self.assertEqual(result.han, 2)
self.assertEqual(result.fu, 40)
self.assertEqual(len(result.yaku), 2)
def test_is_rinshan(self):
hand = HandCalculator()
tiles = self._string_to_136_array(sou="1234444", man="234456", pin="66")
win_tile = self._string_to_136_tile(sou="1")
# closed kan: rinshan & tsumo
melds = [self._make_meld(Meld.KAN, is_open=False, sou="4444")]
result = hand.estimate_hand_value(
tiles, win_tile, melds=melds, config=self._make_hand_config(is_tsumo=True, is_rinshan=True)
)
self.assertEqual(result.error, None)
self.assertEqual(result.han, 2)
self.assertEqual(result.fu, 40)
self.assertEqual(len(result.yaku), 2)
# open kan: rinshan only
melds = [self._make_meld(Meld.KAN, sou="4444")]
result = hand.estimate_hand_value(
tiles, win_tile, melds=melds, config=self._make_hand_config(is_tsumo=True, is_rinshan=True)
)
self.assertEqual(result.error, None)
self.assertEqual(result.han, 1)
self.assertEqual(result.fu, 30)
self.assertEqual(len(result.yaku), 1)
def test_is_chankan(self):
hand = HandCalculator()
tiles = self._string_to_136_array(sou="123444", man="234456", pin="66")
win_tile = self._string_to_136_tile(sou="1")
result = hand.estimate_hand_value(
tiles, win_tile, config=self._make_hand_config(is_tsumo=False, is_chankan=True)
)
self.assertEqual(result.error, None)
self.assertEqual(result.han, 1)
self.assertEqual(result.fu, 40)
self.assertEqual(len(result.yaku), 1)
def test_is_haitei(self):
hand = HandCalculator()
tiles = self._string_to_136_array(sou="123444", man="234456", pin="66")
win_tile = self._string_to_136_tile(sou="4")
# menzen tsumo & haitei
result = hand.estimate_hand_value(tiles, win_tile, config=self._make_hand_config(is_tsumo=True, is_haitei=True))
self.assertEqual(result.error, None)
self.assertEqual(result.han, 2)
self.assertEqual(result.fu, 30)
self.assertEqual(len(result.yaku), 2)
# haitei only
melds = [self._make_meld(Meld.CHI, sou="123")]
result = hand.estimate_hand_value(
tiles, win_tile, melds=melds, config=self._make_hand_config(is_tsumo=True, is_haitei=True)
)
self.assertEqual(result.error, None)
self.assertEqual(result.han, 1)
self.assertEqual(result.fu, 30)
self.assertEqual(len(result.yaku), 1)
def test_is_houtei(self):
hand = HandCalculator()
tiles = self._string_to_136_array(sou="123444", man="234456", pin="66")
win_tile = self._string_to_136_tile(sou="4")
result = hand.estimate_hand_value(
tiles, win_tile, config=self._make_hand_config(is_tsumo=False, is_houtei=True)
)
self.assertEqual(result.error, None)
self.assertEqual(result.han, 1)
self.assertEqual(result.fu, 40)
self.assertEqual(len(result.yaku), 1)
def test_is_renhou(self):
hand = HandCalculator()
tiles = self._string_to_136_array(sou="123444", man="234456", pin="66")
win_tile = self._string_to_136_tile(sou="4")
result = hand.estimate_hand_value(
tiles, win_tile, config=self._make_hand_config(is_tsumo=False, is_renhou=True)
)
self.assertEqual(result.error, None)
self.assertEqual(result.han, 5)
self.assertEqual(result.fu, 40)
self.assertEqual(len(result.yaku), 1)
def test_is_daburu_riichi(self):
hand = HandCalculator()
tiles = self._string_to_136_array(sou="123444", man="234456", pin="66")
win_tile = self._string_to_136_tile(sou="4")
result = hand.estimate_hand_value(
tiles, win_tile, config=self._make_hand_config(is_daburu_riichi=True, is_riichi=True)
)
self.assertEqual(result.error, None)
self.assertEqual(result.han, 2)
self.assertEqual(result.fu, 40)
self.assertEqual(len(result.yaku), 1)
def test_is_open_riichi(self):
hand = HandCalculator()
tiles = self._string_to_136_array(sou="123444", man="234456", pin="66")
win_tile = self._string_to_136_tile(sou="4")
result = hand.estimate_hand_value(
tiles, win_tile, config=self._make_hand_config(is_riichi=True, is_open_riichi=True)
)
self.assertEqual(result.error, None)
self.assertEqual(result.han, 2)
self.assertEqual(result.fu, 40)
self.assertEqual(len(result.yaku), 1)
def test_is_daburu_open_riichi(self):
hand = HandCalculator()
tiles = self._string_to_136_array(sou="123444", man="234456", pin="66")
win_tile = self._string_to_136_tile(sou="4")
result = hand.estimate_hand_value(
tiles, win_tile, config=self._make_hand_config(is_daburu_riichi=True, is_riichi=True, is_open_riichi=True)
)
self.assertEqual(result.error, None)
self.assertEqual(result.han, 3)
self.assertEqual(result.fu, 40)
self.assertEqual(len(result.yaku), 1)
def test_is_nagashi_mangan(self):
hand = HandCalculator()
tiles = self._string_to_136_array(sou="13579", man="234456", pin="66")
result = hand.estimate_hand_value(tiles, None, config=self._make_hand_config(is_nagashi_mangan=True))
self.assertEqual(result.error, None)
self.assertEqual(result.han, 5)
self.assertEqual(result.fu, 30)
self.assertEqual(len(result.yaku), 1)
def test_is_chitoitsu_hand(self):
hand = HandCalculator()
tiles = self._string_to_34_array(sou="113355", man="113355", pin="11")
self.assertTrue(self.config.chiitoitsu.is_condition_met(self._hand(tiles)))
tiles = self._string_to_34_array(sou="2299", man="2299", pin="1199", honors="44")
self.assertTrue(self.config.chiitoitsu.is_condition_met(self._hand(tiles)))
tiles = self._string_to_136_array(sou="113355", man="113355", pin="11")
win_tile = self._string_to_136_tile(pin="1")
result = hand.estimate_hand_value(tiles, win_tile)
self.assertEqual(result.error, None)
self.assertEqual(result.han, 2)
self.assertEqual(result.fu, 25)
self.assertEqual(len(result.yaku), 1)
def test_is_chitoitsu_hand_and_identical_pairs(self):
hand = HandCalculator()
tiles = self._string_to_136_array(sou="11335555", man="1133", pin="11")
win_tile = self._string_to_136_tile(pin="1")
result = hand.estimate_hand_value(tiles, win_tile)
self.assertEqual(result.error, HandCalculator.ERR_HAND_NOT_WINNING)
def test_is_tanyao(self):
hand = HandCalculator()
tiles = self._string_to_34_array(sou="234567", man="234567", pin="22")
self.assertTrue(self.config.tanyao.is_condition_met(self._hand(tiles)))
tiles = self._string_to_34_array(sou="123456", man="234567", pin="22")
self.assertFalse(self.config.tanyao.is_condition_met(self._hand(tiles)))
tiles = self._string_to_34_array(sou="234567", man="234567", honors="22")
self.assertFalse(self.config.tanyao.is_condition_met(self._hand(tiles)))
tiles = self._string_to_136_array(sou="234567", man="234567", pin="22")
win_tile = self._string_to_136_tile(man="7")
result = hand.estimate_hand_value(
tiles, win_tile, config=self._make_hand_config(is_tsumo=False, is_riichi=True)
)
self.assertEqual(result.error, None)
self.assertEqual(result.han, 3)
self.assertEqual(result.fu, 30)
self.assertEqual(len(result.yaku), 3)
tiles = self._string_to_136_array(sou="234567", man="234567", pin="22")
win_tile = self._string_to_136_tile(man="7")
melds = [self._make_meld(Meld.CHI, sou="234")]
result = hand.estimate_hand_value(
tiles, win_tile, melds=melds, config=self._make_hand_config(has_open_tanyao=True)
)
self.assertEqual(result.error, None)
self.assertEqual(result.han, 1)
self.assertEqual(result.fu, 30)
self.assertEqual(len(result.yaku), 1)
tiles = self._string_to_136_array(sou="234567", man="234567", pin="22")
win_tile = self._string_to_136_tile(man="7")
melds = [self._make_meld(Meld.CHI, sou="234")]
result = hand.estimate_hand_value(
tiles, win_tile, melds=melds, config=self._make_hand_config(has_open_tanyao=False)
)
self.assertNotEqual(result.error, None)
def test_is_pinfu_hand(self):
player_wind, round_wind = EAST, WEST
hand = HandCalculator()
tiles = self._string_to_136_array(sou="123456", man="123456", pin="55")
win_tile = self._string_to_136_tile(man="6")
result = hand.estimate_hand_value(tiles, win_tile)
self.assertEqual(result.error, None)
self.assertEqual(result.han, 1)
self.assertEqual(result.fu, 30)
self.assertEqual(len(result.yaku), 1)
# waiting in two pairs
tiles = self._string_to_136_array(sou="123456", man="123555", pin="55")
win_tile = self._string_to_136_tile(man="5")
result = hand.estimate_hand_value(tiles, win_tile)
self.assertNotEqual(result.error, None)
# contains pon or kan
tiles = self._string_to_136_array(sou="111456", man="123456", pin="55")
win_tile = self._string_to_136_tile(man="6")
result = hand.estimate_hand_value(tiles, win_tile)
self.assertNotEqual(result.error, None)
# penchan waiting
tiles = self._string_to_136_array(sou="123456", man="123456", pin="55")
win_tile = self._string_to_136_tile(sou="3")
result = hand.estimate_hand_value(tiles, win_tile)
self.assertNotEqual(result.error, None)
# kanchan waiting
tiles = self._string_to_136_array(sou="123567", man="123456", pin="55")
win_tile = self._string_to_136_tile(sou="6")
result = hand.estimate_hand_value(tiles, win_tile)
self.assertNotEqual(result.error, None)
# tanki waiting
tiles = self._string_to_136_array(man="22456678", pin="123678")
win_tile = self._string_to_136_tile(man="2")
result = hand.estimate_hand_value(tiles, win_tile)
self.assertNotEqual(result.error, None)
# valued pair
tiles = self._string_to_136_array(sou="123678", man="123456", honors="11")
win_tile = self._string_to_136_tile(sou="6")
result = hand.estimate_hand_value(
tiles, win_tile, config=self._make_hand_config(player_wind=player_wind, round_wind=round_wind)
)
self.assertNotEqual(result.error, None)
# not valued pair
tiles = self._string_to_136_array(sou="123678", man="123456", honors="22")
win_tile = self._string_to_136_tile(sou="6")
result = hand.estimate_hand_value(tiles, win_tile)
self.assertEqual(result.error, None)
self.assertEqual(result.han, 1)
self.assertEqual(result.fu, 30)
self.assertEqual(len(result.yaku), 1)
# open hand
tiles = self._string_to_136_array(sou="12399", man="123456", pin="456")
win_tile = self._string_to_136_tile(man="1")
melds = [self._make_meld(Meld.CHI, sou="123")]
result = hand.estimate_hand_value(tiles, win_tile, melds=melds)
self.assertNotEqual(result.error, None)
def test_is_iipeiko(self):
hand = HandCalculator()
tiles = self._string_to_34_array(sou="112233", man="123", pin="23444")
self.assertTrue(self.config.iipeiko.is_condition_met(self._hand(tiles)))
tiles = self._string_to_136_array(sou="112233", man="333", pin="12344")
win_tile = self._string_to_136_tile(man="3")
result = hand.estimate_hand_value(tiles, win_tile)
self.assertEqual(result.error, None)
self.assertEqual(result.han, 1)
self.assertEqual(result.fu, 40)
self.assertEqual(len(result.yaku), 1)
melds = [self._make_meld(Meld.CHI, sou="123")]
result = hand.estimate_hand_value(tiles, win_tile, melds=melds)
self.assertNotEqual(result.error, None)
def test_is_ryanpeiko(self):
hand = HandCalculator()
tiles = self._string_to_34_array(sou="112233", man="22", pin="223344")
self.assertTrue(self.config.ryanpeiko.is_condition_met(self._hand(tiles, 1)))
tiles = self._string_to_34_array(sou="111122223333", man="22")
self.assertTrue(self.config.ryanpeiko.is_condition_met(self._hand(tiles, 1)))
tiles = self._string_to_34_array(sou="112233", man="123", pin="23444")
self.assertFalse(self.config.ryanpeiko.is_condition_met(self._hand(tiles)))
tiles = self._string_to_136_array(sou="112233", man="33", pin="223344")
win_tile = self._string_to_136_tile(pin="3")
result = hand.estimate_hand_value(tiles, win_tile)
self.assertEqual(result.error, None)
self.assertEqual(result.han, 3)
self.assertEqual(result.fu, 40)
self.assertEqual(len(result.yaku), 1)
melds = [self._make_meld(Meld.CHI, sou="123")]
result = hand.estimate_hand_value(tiles, win_tile, melds=melds)
self.assertNotEqual(result.error, None)
def test_is_sanshoku(self):
hand = HandCalculator()
tiles = self._string_to_34_array(sou="123", man="123", pin="12345677")
self.assertTrue(self.config.sanshoku.is_condition_met(self._hand(tiles)))
tiles = self._string_to_34_array(sou="123456", man="23455", pin="123")
self.assertFalse(self.config.sanshoku.is_condition_met(self._hand(tiles)))
tiles = self._string_to_136_array(sou="123456", man="12399", pin="123")
win_tile = self._string_to_136_tile(man="2")
result = hand.estimate_hand_value(tiles, win_tile)
self.assertEqual(result.error, None)
self.assertEqual(result.han, 2)
self.assertEqual(result.fu, 40)
self.assertEqual(len(result.yaku), 1)
melds = [self._make_meld(Meld.CHI, sou="123")]
result = hand.estimate_hand_value(tiles, win_tile, melds=melds)
self.assertEqual(result.error, None)
self.assertEqual(result.han, 1)
self.assertEqual(result.fu, 30)
self.assertEqual(len(result.yaku), 1)
def test_is_sanshoku_douko(self):
hand = HandCalculator()
tiles = self._string_to_34_array(sou="111", man="111", pin="11145677")
self.assertTrue(self.config.sanshoku_douko.is_condition_met(self._hand(tiles)))
tiles = self._string_to_34_array(sou="111", man="222", pin="33344455")
self.assertFalse(self.config.sanshoku_douko.is_condition_met(self._hand(tiles)))
tiles = self._string_to_136_array(sou="222", man="222", pin="22245699")
melds = [self._make_meld(Meld.CHI, sou="222")]
win_tile = self._string_to_136_tile(pin="9")
result = hand.estimate_hand_value(tiles, win_tile, melds=melds)
self.assertEqual(result.error, None)
self.assertEqual(result.han, 2)
self.assertEqual(result.fu, 40)
self.assertEqual(len(result.yaku), 1)
def test_is_sanshoku_douko_and_kan_in_hand(self):
hand = HandCalculator()
tiles = self._string_to_136_array(sou="2222", man="222", pin="22245699")
melds = [self._make_meld(Meld.KAN, sou="2222")]
win_tile = self._string_to_136_tile(pin="9")
result = hand.estimate_hand_value(tiles, win_tile, melds=melds)
self.assertEqual(result.error, None)
self.assertEqual(result.han, 2)
self.assertEqual(result.fu, 40)
self.assertEqual(len(result.yaku), 1)
def test_is_toitoi(self):
hand = HandCalculator()
tiles = self._string_to_34_array(sou="111333", man="333", pin="44555")
self.assertTrue(self.config.toitoi.is_condition_met(self._hand(tiles)))
tiles = self._string_to_34_array(sou="777", pin="777888999", honors="44")
self.assertTrue(self.config.toitoi.is_condition_met(self._hand(tiles, 0)))
tiles = self._string_to_136_array(sou="111333", man="333", pin="44555")
melds = [self._make_meld(Meld.PON, sou="111"), self._make_meld(Meld.PON, sou="333")]
win_tile = self._string_to_136_tile(pin="5")
result = hand.estimate_hand_value(tiles, win_tile, melds=melds)
self.assertEqual(result.error, None)
self.assertEqual(result.han, 2)
self.assertEqual(result.fu, 40)
self.assertEqual(len(result.yaku), 1)
tiles = self._string_to_136_array(sou="777", pin="777888999", honors="44")
melds = [self._make_meld(Meld.PON, sou="777")]
win_tile = self._string_to_136_tile(pin="9")
result = hand.estimate_hand_value(tiles, win_tile, melds=melds)
self.assertEqual(result.error, None)
self.assertEqual(result.han, 2)
self.assertEqual(result.fu, 40)
self.assertEqual(len(result.yaku), 1)
def test_is_sankantsu(self):
hand = HandCalculator()
tiles = self._string_to_136_array(sou="11113333", man="123", pin="446666")
melds = [
self._make_meld(Meld.KAN, sou="1111"),
self._make_meld(Meld.KAN, sou="3333"),
self._make_meld(Meld.KAN, pin="6666"),
]
self.assertTrue(self.config.sankantsu.is_condition_met(hand, melds))
melds = [
self._make_meld(Meld.SHOUMINKAN, sou="1111"),
self._make_meld(Meld.KAN, sou="3333"),
self._make_meld(Meld.KAN, pin="6666"),
]
win_tile = self._string_to_136_tile(man="3")
result = hand.estimate_hand_value(tiles, win_tile, melds=melds)
self.assertEqual(result.error, None)
self.assertEqual(result.han, 2)
self.assertEqual(result.fu, 60)
self.assertEqual(len(result.yaku), 1)
def test_is_honroto(self):
hand = HandCalculator()
tiles = self._string_to_34_array(sou="111999", man="111", honors="11222")
self.assertTrue(self.config.honroto.is_condition_met(self._hand(tiles)))
tiles = self._string_to_34_array(pin="11", honors="22334466", man="1199")
self.assertTrue(self.config.honroto.is_condition_met(self._hand(tiles)))
tiles = self._string_to_136_array(sou="111999", man="111", honors="11222")
win_tile = self._string_to_136_tile(honors="2")
melds = [self._make_meld(Meld.PON, sou="111")]
result = hand.estimate_hand_value(tiles, win_tile, melds=melds)
self.assertEqual(result.error, None)
self.assertEqual(result.han, 4)
self.assertEqual(result.fu, 50)
self.assertEqual(len(result.yaku), 2)
tiles = self._string_to_136_array(pin="11", honors="22334466", man="1199")
win_tile = self._string_to_136_tile(man="1")
result = hand.estimate_hand_value(tiles, win_tile)
self.assertEqual(result.fu, 25)
self.assertEqual(result.han, 4)
def test_is_sanankou(self):
hand = HandCalculator()
tiles = self._string_to_34_array(sou="111444", man="333", pin="44555")
win_tile = self._string_to_136_tile(sou="4")
melds = [self._make_meld(Meld.PON, sou="444"), self._make_meld(Meld.PON, sou="111")]
self.assertFalse(self.config.sanankou.is_condition_met(self._hand(tiles), win_tile, melds, False))
melds = [self._make_meld(Meld.PON, sou="111")]
self.assertFalse(self.config.sanankou.is_condition_met(self._hand(tiles), win_tile, melds, False))
self.assertTrue(self.config.sanankou.is_condition_met(self._hand(tiles), win_tile, melds, True))
tiles = self._string_to_34_array(pin="444789999", honors="22333")
win_tile = self._string_to_136_tile(pin="9")
self.assertTrue(self.config.sanankou.is_condition_met(self._hand(tiles), win_tile, [], False))
melds = [self._make_meld(Meld.CHI, pin="456")]
tiles = self._string_to_34_array(pin="222456666777", honors="77")
win_tile = self._string_to_136_tile(pin="6")
self.assertFalse(self.config.sanankou.is_condition_met(self._hand(tiles), win_tile, melds, False))
tiles = self._string_to_136_array(sou="123444", man="333", pin="44555")
melds = [self._make_meld(Meld.CHI, sou="123")]
win_tile = self._string_to_136_tile(pin="5")
result = hand.estimate_hand_value(tiles, win_tile, melds=melds, config=self._make_hand_config(is_tsumo=True))
self.assertEqual(result.error, None)
self.assertEqual(result.han, 2)
self.assertEqual(result.fu, 40)
self.assertEqual(len(result.yaku), 1)
def test_is_shosangen(self):
hand = HandCalculator()
tiles = self._string_to_34_array(sou="123", man="345", honors="55666777")
self.assertTrue(self.config.shosangen.is_condition_met(self._hand(tiles)))
tiles = self._string_to_136_array(sou="123", man="345", honors="55666777")
win_tile = self._string_to_136_tile(honors="7")
result = hand.estimate_hand_value(tiles, win_tile)
self.assertEqual(result.error, None)
self.assertEqual(result.han, 4)
self.assertEqual(result.fu, 50)
self.assertEqual(len(result.yaku), 3)
def test_is_chanta(self):
hand = HandCalculator()
tiles = self._string_to_34_array(sou="123", man="123789", honors="22333")
self.assertTrue(self.config.chantai.is_condition_met(self._hand(tiles)))
tiles = self._string_to_34_array(sou="111", man="111999", honors="22333")
self.assertFalse(self.config.chantai.is_condition_met(self._hand(tiles)))
tiles = self._string_to_34_array(sou="111999", man="111999", pin="11999")
self.assertFalse(self.config.chantai.is_condition_met(self._hand(tiles)))
tiles = self._string_to_136_array(sou="123", man="123789", honors="22333")
win_tile = self._string_to_136_tile(honors="3")
result = hand.estimate_hand_value(tiles, win_tile)
self.assertEqual(result.error, None)
self.assertEqual(result.han, 2)
self.assertEqual(result.fu, 40)
self.assertEqual(len(result.yaku), 1)
melds = [self._make_meld(Meld.CHI, sou="123")]
result = hand.estimate_hand_value(tiles, win_tile, melds=melds)
self.assertEqual(result.error, None)
self.assertEqual(result.han, 1)
self.assertEqual(result.fu, 30)
self.assertEqual(len(result.yaku), 1)
def test_is_junchan(self):
hand = HandCalculator()
tiles = self._string_to_34_array(sou="789", man="123789", pin="12399")
self.assertTrue(self.config.junchan.is_condition_met(self._hand(tiles)))
tiles = self._string_to_34_array(sou="111", man="111999", honors="22333")
self.assertFalse(self.config.junchan.is_condition_met(self._hand(tiles)))
tiles = self._string_to_34_array(sou="111999", man="111999", pin="11999")
self.assertFalse(self.config.junchan.is_condition_met(self._hand(tiles)))
tiles = self._string_to_136_array(sou="789", man="123789", pin="12399")
win_tile = self._string_to_136_tile(man="2")
result = hand.estimate_hand_value(tiles, win_tile)
self.assertEqual(result.error, None)
self.assertEqual(result.han, 3)
self.assertEqual(result.fu, 40)
self.assertEqual(len(result.yaku), 1)
melds = [self._make_meld(Meld.CHI, sou="789")]
result = hand.estimate_hand_value(tiles, win_tile, melds=melds)
self.assertEqual(result.error, None)
self.assertEqual(result.han, 2)
self.assertEqual(result.fu, 30)
self.assertEqual(len(result.yaku), 1)
def test_is_honitsu(self):
hand = HandCalculator()
tiles = self._string_to_34_array(man="123456789", honors="11122")
self.assertTrue(self.config.honitsu.is_condition_met(self._hand(tiles)))
tiles = self._string_to_34_array(man="123456789", pin="123", honors="22")
self.assertFalse(self.config.honitsu.is_condition_met(self._hand(tiles)))
tiles = self._string_to_34_array(man="12345666778899")
self.assertFalse(self.config.honitsu.is_condition_met(self._hand(tiles)))
tiles = self._string_to_136_array(man="123455667", honors="11122")
win_tile = self._string_to_136_tile(honors="2")
result = hand.estimate_hand_value(tiles, win_tile)
self.assertEqual(result.error, None)
self.assertEqual(result.han, 3)
self.assertEqual(result.fu, 40)
self.assertEqual(len(result.yaku), 1)
melds = [self._make_meld(Meld.CHI, man="123")]
result = hand.estimate_hand_value(tiles, win_tile, melds=melds)
self.assertEqual(result.error, None)
self.assertEqual(result.han, 2)
self.assertEqual(result.fu, 30)
self.assertEqual(len(result.yaku), 1)
def test_is_chinitsu(self):
hand = HandCalculator()
tiles = self._string_to_34_array(man="12345666778899")
self.assertTrue(self.config.chinitsu.is_condition_met(self._hand(tiles)))
tiles = self._string_to_34_array(man="123456778899", honors="22")
self.assertFalse(self.config.chinitsu.is_condition_met(self._hand(tiles)))
tiles = self._string_to_136_array(man="11234567677889")
win_tile = self._string_to_136_tile(man="1")
result = hand.estimate_hand_value(tiles, win_tile)
self.assertEqual(result.error, None)
self.assertEqual(result.han, 6)
self.assertEqual(result.fu, 40)
self.assertEqual(len(result.yaku), 1)
melds = [self._make_meld(Meld.CHI, man="678")]
result = hand.estimate_hand_value(tiles, win_tile, melds=melds)
self.assertEqual(result.error, None)
self.assertEqual(result.han, 5)
self.assertEqual(result.fu, 30)
self.assertEqual(len(result.yaku), 1)
def test_is_ittsu(self):
hand = HandCalculator()
tiles = self._string_to_34_array(man="123456789", sou="123", honors="22")
self.assertTrue(self.config.ittsu.is_condition_met(self._hand(tiles)))
tiles = self._string_to_34_array(man="112233456789", honors="22")
self.assertTrue(self.config.ittsu.is_condition_met(self._hand(tiles)))
tiles = self._string_to_34_array(man="122334567789", honors="11")
self.assertFalse(self.config.ittsu.is_condition_met(self._hand(tiles)))
tiles = self._string_to_136_array(man="123456789", sou="123", honors="22")
win_tile = self._string_to_136_tile(sou="3")
result = hand.estimate_hand_value(tiles, win_tile)
self.assertEqual(result.error, None)
self.assertEqual(result.han, 2)
self.assertEqual(result.fu, 40)
self.assertEqual(len(result.yaku), 1)
melds = [self._make_meld(Meld.CHI, man="123")]
result = hand.estimate_hand_value(tiles, win_tile, melds=melds)
self.assertEqual(result.error, None)
self.assertEqual(result.han, 1)
self.assertEqual(result.fu, 30)
self.assertEqual(len(result.yaku), 1)
def test_is_haku(self):
hand = HandCalculator()
tiles = self._string_to_34_array(sou="234567", man="23422", honors="555")
self.assertTrue(self.config.haku.is_condition_met(self._hand(tiles)))
tiles = self._string_to_136_array(sou="234567", man="23422", honors="555")
win_tile = self._string_to_136_tile(honors="5")
result = hand.estimate_hand_value(
tiles, win_tile, config=self._make_hand_config(is_tsumo=False, is_riichi=False)
)
self.assertEqual(result.error, None)
self.assertEqual(result.han, 1)
self.assertEqual(result.fu, 40)
self.assertEqual(len(result.yaku), 1)
def test_is_hatsu(self):
hand = HandCalculator()
tiles = self._string_to_34_array(sou="234567", man="23422", honors="666")
self.assertTrue(self.config.hatsu.is_condition_met(self._hand(tiles)))
tiles = self._string_to_136_array(sou="234567", man="23422", honors="666")
win_tile = self._string_to_136_tile(honors="6")
result = hand.estimate_hand_value(
tiles, win_tile, config=self._make_hand_config(is_tsumo=False, is_riichi=False)
)
self.assertEqual(result.error, None)
self.assertEqual(result.han, 1)
self.assertEqual(result.fu, 40)
self.assertEqual(len(result.yaku), 1)
def test_is_chun(self):
hand = HandCalculator()
tiles = self._string_to_34_array(sou="234567", man="23422", honors="777")
self.assertTrue(self.config.chun.is_condition_met(self._hand(tiles)))
tiles = self._string_to_136_array(sou="234567", man="23422", honors="777")
win_tile = self._string_to_136_tile(honors="7")
result = hand.estimate_hand_value(
tiles, win_tile, config=self._make_hand_config(is_tsumo=False, is_riichi=False)
)
self.assertEqual(result.error, None)
self.assertEqual(result.han, 1)
self.assertEqual(result.fu, 40)
self.assertEqual(len(result.yaku), 1)
def test_is_east(self):
player_wind, round_wind = EAST, WEST
hand = HandCalculator()
tiles = self._string_to_34_array(sou="234567", man="23422", honors="111")
self.assertTrue(self.config.east.is_condition_met(self._hand(tiles), player_wind, round_wind))
tiles = self._string_to_136_array(sou="234567", man="23422", honors="111")
win_tile = self._string_to_136_tile(honors="1")
result = hand.estimate_hand_value(
tiles,
win_tile,
config=self._make_hand_config(
is_tsumo=False, is_riichi=False, player_wind=player_wind, round_wind=round_wind
),
)
self.assertEqual(result.error, None)
self.assertEqual(result.han, 1)
self.assertEqual(result.fu, 40)
self.assertEqual(len(result.yaku), 1)
round_wind = EAST
result = hand.estimate_hand_value(
tiles,
win_tile,
config=self._make_hand_config(
is_tsumo=False, is_riichi=False, player_wind=player_wind, round_wind=round_wind
),
)
self.assertEqual(result.error, None)
self.assertEqual(result.han, 2)
self.assertEqual(result.fu, 40)
self.assertEqual(len(result.yaku), 2)
def test_is_south(self):
player_wind, round_wind = SOUTH, EAST
hand = HandCalculator()
tiles = self._string_to_34_array(sou="234567", man="23422", honors="222")
self.assertTrue(self.config.south.is_condition_met(self._hand(tiles), player_wind, round_wind))
tiles = self._string_to_136_array(sou="234567", man="23422", honors="222")
win_tile = self._string_to_136_tile(honors="2")
result = hand.estimate_hand_value(
tiles,
win_tile,
config=self._make_hand_config(
is_tsumo=False, is_riichi=False, player_wind=player_wind, round_wind=round_wind
),
)
self.assertEqual(result.error, None)
self.assertEqual(result.han, 1)
self.assertEqual(result.fu, 40)
self.assertEqual(len(result.yaku), 1)
round_wind = SOUTH
result = hand.estimate_hand_value(
tiles,
win_tile,
config=self._make_hand_config(
is_tsumo=False, is_riichi=False, player_wind=player_wind, round_wind=round_wind
),
)
self.assertEqual(result.error, None)
self.assertEqual(result.han, 2)
self.assertEqual(result.fu, 40)
self.assertEqual(len(result.yaku), 2)
def test_is_west(self):
player_wind, round_wind = WEST, EAST
hand = HandCalculator()
tiles = self._string_to_34_array(sou="234567", man="23422", honors="333")
self.assertTrue(self.config.west.is_condition_met(self._hand(tiles), player_wind, round_wind))
tiles = self._string_to_136_array(sou="234567", man="23422", honors="333")
win_tile = self._string_to_136_tile(honors="3")
result = hand.estimate_hand_value(
tiles,
win_tile,
config=self._make_hand_config(
is_tsumo=False, is_riichi=False, player_wind=player_wind, round_wind=round_wind
),
)
self.assertEqual(result.error, None)
self.assertEqual(result.han, 1)
self.assertEqual(result.fu, 40)
self.assertEqual(len(result.yaku), 1)
round_wind = WEST
result = hand.estimate_hand_value(
tiles,
win_tile,
config=self._make_hand_config(
is_tsumo=False, is_riichi=False, player_wind=player_wind, round_wind=round_wind
),
)
self.assertEqual(result.error, None)
self.assertEqual(result.han, 2)
self.assertEqual(result.fu, 40)
self.assertEqual(len(result.yaku), 2)
def test_is_north(self):
player_wind, round_wind = NORTH, EAST
hand = HandCalculator()
tiles = self._string_to_34_array(sou="234567", man="23422", honors="444")
self.assertTrue(self.config.north.is_condition_met(self._hand(tiles), player_wind, round_wind))
tiles = self._string_to_136_array(sou="234567", man="23422", honors="444")
win_tile = self._string_to_136_tile(honors="4")
result = hand.estimate_hand_value(
tiles,
win_tile,
config=self._make_hand_config(
is_tsumo=False, is_riichi=False, player_wind=player_wind, round_wind=round_wind
),
)
self.assertEqual(result.error, None)
self.assertEqual(result.han, 1)
self.assertEqual(result.fu, 40)
self.assertEqual(len(result.yaku), 1)
round_wind = NORTH
result = hand.estimate_hand_value(
tiles,
win_tile,
config=self._make_hand_config(
is_tsumo=False, is_riichi=False, player_wind=player_wind, round_wind=round_wind
),
)
self.assertEqual(result.error, None)
self.assertEqual(result.han, 2)
self.assertEqual(result.fu, 40)
self.assertEqual(len(result.yaku), 2)
def test_dora_in_hand(self):
hand = HandCalculator()
# hand without yaku, but with dora should be consider as invalid
tiles = self._string_to_136_array(sou="345678", man="456789", honors="55")
win_tile = self._string_to_136_tile(sou="5")
dora_indicators = [self._string_to_136_tile(sou="5")]
melds = [self._make_meld(Meld.CHI, sou="678")]
result = hand.estimate_hand_value(tiles, win_tile, dora_indicators=dora_indicators, melds=melds)
self.assertNotEqual(result.error, None)
tiles = self._string_to_136_array(sou="123456", man="123456", pin="33")
win_tile = self._string_to_136_tile(man="6")
dora_indicators = [self._string_to_136_tile(pin="2")]
result = hand.estimate_hand_value(tiles, win_tile, dora_indicators=dora_indicators)
self.assertEqual(result.error, None)
self.assertEqual(result.han, 3)
self.assertEqual(result.fu, 30)
self.assertEqual(len(result.yaku), 2)
tiles = self._string_to_136_array(man="22456678", pin="123678")
win_tile = self._string_to_136_tile(man="2")
dora_indicators = [self._string_to_136_tile(man="1"), self._string_to_136_tile(pin="2")]
result = hand.estimate_hand_value(
tiles, win_tile, dora_indicators=dora_indicators, config=self._make_hand_config(is_tsumo=True)
)
self.assertEqual(result.error, None)
self.assertEqual(result.han, 4)
self.assertEqual(result.fu, 30)
self.assertEqual(len(result.yaku), 2)
# double dora
tiles = self._string_to_136_array(man="678", pin="34577", sou="123345")
win_tile = self._string_to_136_tile(pin="7")
dora_indicators = [self._string_to_136_tile(sou="4"), self._string_to_136_tile(sou="4")]
result = hand.estimate_hand_value(
tiles, win_tile, dora_indicators=dora_indicators, config=self._make_hand_config(is_tsumo=True)
)
self.assertEqual(result.error, None)
self.assertEqual(result.han, 3)
self.assertEqual(result.fu, 30)
self.assertEqual(len(result.yaku), 2)
# double dora and honor tiles
tiles = self._string_to_136_array(man="678", pin="345", sou="123345", honors="66")
win_tile = self._string_to_136_tile(pin="5")
dora_indicators = [self._string_to_136_tile(honors="5"), self._string_to_136_tile(honors="5")]
result = hand.estimate_hand_value(
tiles, win_tile, dora_indicators=dora_indicators, config=self._make_hand_config(is_riichi=True)
)
self.assertEqual(result.error, None)
self.assertEqual(result.han, 5)
self.assertEqual(result.fu, 40)
self.assertEqual(len(result.yaku), 2)
# double dora indicators and red fives
tiles = self._string_to_136_array(sou="12346", man="123678", pin="44")
win_tile = self._string_to_136_tile(pin="4")
tiles.append(FIVE_RED_SOU)
dora_indicators = [self._string_to_136_tile(pin="2"), self._string_to_136_tile(pin="2")]
result = hand.estimate_hand_value(
tiles,
win_tile,
dora_indicators=dora_indicators,
config=self._make_hand_config(is_tsumo=True, has_aka_dora=True),
)
self.assertEqual(result.error, None)
self.assertEqual(result.han, 2)
self.assertEqual(result.fu, 30)
self.assertEqual(len(result.yaku), 2)
# dora in kan
tiles = self._string_to_136_array(man="7777", pin="34577", sou="123345")
win_tile = self._string_to_136_tile(pin="7")
melds = [self._make_meld(Meld.KAN, is_open=False, man="7777")]
dora_indicators = [self._string_to_136_tile(man="6")]
result = hand.estimate_hand_value(
tiles, win_tile, dora_indicators=dora_indicators, melds=melds, config=self._make_hand_config(is_tsumo=True)
)
self.assertEqual(result.error, None)
self.assertEqual(result.han, 5)
self.assertEqual(result.fu, 40)
self.assertEqual(len(result.yaku), 2)
def test_is_agari_and_closed_kan(self):
"""
There were a bug when we don't count closed kan set for agari
and calculator though that hand was agari (but it doesn't)
:return:
"""
hand = HandCalculator()
tiles = self._string_to_136_array(man="45666777", pin="111", honors="222")
win_tile = self._string_to_136_tile(man="4")
melds = [
self._make_meld(Meld.PON, pin="111"),
self._make_meld(Meld.KAN, man="6666", is_open=False),
self._make_meld(Meld.PON, man="777"),
]
result = hand.estimate_hand_value(tiles, win_tile, melds)
# error is correct answer
self.assertNotEqual(result.error, None)
def test_kazoe_settings(self):
hand = HandCalculator()
tiles = self._string_to_136_array(man="222244466677788")
win_tile = self._string_to_136_tile(man="7")
melds = [
self._make_meld(Meld.KAN, man="2222", is_open=False),
]
dora_indicators = [
self._string_to_136_tile(man="1"),
self._string_to_136_tile(man="1"),
self._string_to_136_tile(man="1"),
self._string_to_136_tile(man="1"),
]
config = HandConfig(is_riichi=True, options=OptionalRules(kazoe_limit=HandConfig.KAZOE_LIMITED))
result = hand.estimate_hand_value(tiles, win_tile, melds, dora_indicators, config)
self.assertEqual(result.han, 28)
self.assertEqual(result.cost["main"], 32000)
config = HandConfig(is_riichi=True, options=OptionalRules(kazoe_limit=HandConfig.KAZOE_SANBAIMAN))
result = hand.estimate_hand_value(tiles, win_tile, melds, dora_indicators, config)
self.assertEqual(result.han, 28)
self.assertEqual(result.cost["main"], 24000)
config = HandConfig(is_riichi=True, options=OptionalRules(kazoe_limit=HandConfig.KAZOE_NO_LIMIT))
result = hand.estimate_hand_value(tiles, win_tile, melds, dora_indicators, config)
self.assertEqual(result.han, 28)
self.assertEqual(result.cost["main"], 64000)
def test_open_hand_without_additional_fu(self):
hand = HandCalculator()
tiles = self._string_to_136_array(sou="234678", man="234567", pin="22")
win_tile = self._string_to_136_tile(sou="6")
melds = [self._make_meld(Meld.CHI, sou="234")]
config = HandConfig(options=OptionalRules(has_open_tanyao=True, fu_for_open_pinfu=False))
result = hand.estimate_hand_value(tiles, win_tile, melds, config=config)
self.assertEqual(result.han, 1)
self.assertEqual(result.fu, 20)
self.assertEqual(result.cost["main"], 700)
def test_aka_dora(self):
hand_calculator = HandCalculator()
win_tile = TilesConverter.string_to_136_array(man="9")[0]
hand_config = HandConfig(is_tsumo=True, options=OptionalRules(has_aka_dora=True))
# three red old style, but not that useful
tiles = TilesConverter.string_to_136_array(sou="345", pin="456", man="12355599", has_aka_dora=False)
hand_calculation = hand_calculator.estimate_hand_value(tiles, win_tile, config=hand_config)
self.assertIsNone(hand_calculation.error)
self.assertEqual(hand_calculation.han, 4)
# zero red
tiles = TilesConverter.string_to_136_array(sou="345", pin="456", man="12355599", has_aka_dora=True)
win_tile = TilesConverter.string_to_136_array(man="9")[0]
hand_config = HandConfig(is_tsumo=True, options=OptionalRules(has_aka_dora=True))
hand_calculation = hand_calculator.estimate_hand_value(tiles, win_tile, config=hand_config)
self.assertIsNone(hand_calculation.error)
self.assertEqual(hand_calculation.han, 1)
# one red
tiles = TilesConverter.string_to_136_array(sou="34r", pin="456", man="12355599", has_aka_dora=True)
hand_calculation = hand_calculator.estimate_hand_value(tiles, win_tile, config=hand_config)
self.assertIsNone(hand_calculation.error)
self.assertEqual(hand_calculation.han, 2)
# two red
tiles = TilesConverter.string_to_136_array(sou="34r", pin="4r6", man="12355599", has_aka_dora=True)
hand_calculation = hand_calculator.estimate_hand_value(tiles, win_tile, config=hand_config)
self.assertIsNone(hand_calculation.error)
self.assertEqual(hand_calculation.han, 3)
# three red
tiles = TilesConverter.string_to_136_array(sou="34r", pin="4r6", man="123r5599", has_aka_dora=True)
hand_calculation = hand_calculator.estimate_hand_value(tiles, win_tile, config=hand_config)
self.assertIsNone(hand_calculation.error)
self.assertEqual(hand_calculation.han, 4)
# four red
tiles = TilesConverter.string_to_136_array(sou="34r", pin="4r6", man="123rr599", has_aka_dora=True)
hand_calculation = hand_calculator.estimate_hand_value(tiles, win_tile, config=hand_config)
self.assertIsNone(hand_calculation.error)
self.assertEqual(hand_calculation.han, 5)
# five+ red (technically not legal in mahjong but not the fault of evaluator, really)
tiles = TilesConverter.string_to_136_array(sou="34r", pin="4r6", man="123rrr99", has_aka_dora=True)
hand_calculation = hand_calculator.estimate_hand_value(tiles, win_tile, config=hand_config)
self.assertIsNone(hand_calculation.error)
self.assertEqual(hand_calculation.han, 6)
| 42.972244 | 120 | 0.660411 |
cb1d9cfba15fbe3abd6f326f4b1d820540d854c5 | 1,044 | py | Python | modules/math-codes/modules/statistics-and-probability/src/nyc_average-v3.py | drigols/Studies | 9c293156935b491ded24be6b511daac67fd43538 | [
"MIT"
]
| 1 | 2020-09-06T22:17:19.000Z | 2020-09-06T22:17:19.000Z | modules/math-codes/modules/statistics-and-probability/src/nyc_average-v3.py | drigols/Studies | 9c293156935b491ded24be6b511daac67fd43538 | [
"MIT"
]
| null | null | null | modules/math-codes/modules/statistics-and-probability/src/nyc_average-v3.py | drigols/Studies | 9c293156935b491ded24be6b511daac67fd43538 | [
"MIT"
]
| null | null | null | ########################################################
# Rodrigo Leite - drigols #
# Last update: 26/09/2021 #
########################################################
def create_plot(x, y):
import matplotlib.pyplot as plt
# Display to each "x" the temperature average "y".
for year, average_temp in zip(x, y):
print("In year {0} the average New York temperature was {1}°".format(year, average_temp))
plt.plot(x, y, marker='o')
plt.savefig('../images/new-york-tem-03.png', format='png')
plt.show()
if __name__ =='__main__':
# Create a list to represent New York temperatures from 2000 to 2021.
nyc_temp = [53.9, 56.3, 56.4, 53.4, 54.5, 55.8, 56.8, 55.0, 55.3, 54.0, 56.7, 56.4, 57.3]
# Use range() function to create a predefined list (2000 to 2021).
# NOTE: Remember that range() function never display the last element. That is from 2000 to 2013.
years = range(2000, 2013)
# Cria a plot with create_plot() function.
create_plot(years, nyc_temp)
| 37.285714 | 99 | 0.564176 |
dc24632cbb400bc4b5153dc9f6469ef10327de7e | 2,185 | py | Python | sympy/tensor/array/expressions/array_expressions.py | suryam35/sympy | 4aa3cd6c7c689fbe4e604082fb44e2136fa4224d | [
"BSD-3-Clause"
]
| null | null | null | sympy/tensor/array/expressions/array_expressions.py | suryam35/sympy | 4aa3cd6c7c689fbe4e604082fb44e2136fa4224d | [
"BSD-3-Clause"
]
| null | null | null | sympy/tensor/array/expressions/array_expressions.py | suryam35/sympy | 4aa3cd6c7c689fbe4e604082fb44e2136fa4224d | [
"BSD-3-Clause"
]
| null | null | null | import itertools
from sympy import Expr, ImmutableDenseNDimArray, S, Symbol
from sympy.core.sympify import _sympify
class _ArrayExpr(Expr):
pass
class ArraySymbol(_ArrayExpr):
"""
Symbol representing an array expression
"""
def __new__(cls, symbol, *shape):
if isinstance(symbol, str):
symbol = Symbol(symbol)
# symbol = _sympify(symbol)
shape = map(_sympify, shape)
obj = Expr.__new__(cls, symbol, *shape)
return obj
@property
def name(self):
return self._args[0]
@property
def shape(self):
return self._args[1:]
def __getitem__(self, item):
return ArrayElement(self, item)
def as_explicit(self):
data = [self[i] for i in itertools.product(*[range(j) for j in self.shape])]
return ImmutableDenseNDimArray(data).reshape(*self.shape)
class ArrayElement(_ArrayExpr):
"""
An element of an array.
"""
def __new__(cls, name, indices):
if isinstance(name, str):
name = Symbol(name)
name = _sympify(name)
indices = _sympify(indices)
if hasattr(name, "shape"):
if any([(i >= s) == True for i, s in zip(indices, name.shape)]):
raise ValueError("shape is out of bounds")
if any([(i < 0) == True for i in indices]):
raise ValueError("shape contains negative values")
obj = Expr.__new__(cls, name, indices)
return obj
@property
def name(self):
return self._args[0]
@property
def indices(self):
return self._args[1]
class ZeroArray(Expr):
"""
Symbolic array of zeros. Equivalent to ``ZeroMatrix`` for matrices.
"""
def __new__(cls, *shape):
if len(shape) == 0:
return S.Zero
shape = map(_sympify, shape)
obj = Expr.__new__(cls, *shape)
return obj
@property
def shape(self):
return self._args
def as_explicit(self):
if any(not i.is_Integer for i in self.shape):
raise ValueError("Cannot return explicit form for symbolic shape.")
return ImmutableDenseNDimArray.zeros(*self.shape)
| 25.406977 | 84 | 0.600915 |
d7fe140392ce5481d85ab4c9813ad330feb45b70 | 1,050 | py | Python | release/stubs.min/System/Windows/Controls/Primitives_parts/IRecyclingItemContainerGenerator.py | YKato521/ironpython-stubs | b1f7c580de48528490b3ee5791b04898be95a9ae | [
"MIT"
]
| null | null | null | release/stubs.min/System/Windows/Controls/Primitives_parts/IRecyclingItemContainerGenerator.py | YKato521/ironpython-stubs | b1f7c580de48528490b3ee5791b04898be95a9ae | [
"MIT"
]
| null | null | null | release/stubs.min/System/Windows/Controls/Primitives_parts/IRecyclingItemContainerGenerator.py | YKato521/ironpython-stubs | b1f7c580de48528490b3ee5791b04898be95a9ae | [
"MIT"
]
| null | null | null | class IRecyclingItemContainerGenerator(IItemContainerGenerator):
""" Extends the System.Windows.Controls.Primitives.IItemContainerGenerator interface to reuse the UI content it generates. Classes that are responsible for generating user interface (UI) content on behalf of a host implement this interface. """
def Recycle(self, position, count):
"""
Recycle(self: IRecyclingItemContainerGenerator,position: GeneratorPosition,count: int)
Disassociates item containers from their data items and saves the containers so they can be
reused later for other data items.
position: The zero-based index of the first element to reuse. position must refer to a previously
generated (realized) item.
count: The number of elements to reuse,starting at position.
"""
pass
def __init__(self, *args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
| 36.206897 | 249 | 0.714286 |
70ebd9a173c724563941391bc64081140eba65de | 1,092 | py | Python | tigercontrol/utils/optimizers/tests/test_adam.py | MinRegret/TigerControl | b1ca0617cbb2198f9d5cb37f725f3d7accbab08f | [
"Apache-2.0"
]
| 31 | 2019-11-08T06:01:54.000Z | 2021-11-20T04:50:43.000Z | tigercontrol/utils/optimizers/tests/test_adam.py | johnhallman/ctsb | b1ca0617cbb2198f9d5cb37f725f3d7accbab08f | [
"Apache-2.0"
]
| 32 | 2019-06-27T15:05:04.000Z | 2019-08-07T04:23:47.000Z | tigercontrol/utils/optimizers/tests/test_adam.py | MinRegret/tigercontrol | b1ca0617cbb2198f9d5cb37f725f3d7accbab08f | [
"Apache-2.0"
]
| 3 | 2020-09-30T17:06:50.000Z | 2021-04-12T22:39:34.000Z | import tigercontrol
from tigercontrol.utils.optimizers.adam import Adam
from tigercontrol.utils.optimizers.losses import mse
import matplotlib.pyplot as plt
def test_adam(show=False):
environment = tigercontrol.environment('LDS')
x = environment.reset(p=2,q=0)
controller = tigercontrol.controllers('LSTM')
controller.initialize(n=1, m=1, l=3, h=10, optimizer=Adam) # initialize with class
controller.predict(1.0) # call controllers to verify it works
controller.update(1.0)
optimizer = Adam(learning_rate=0.1)
controller = tigercontrol.controllers('LSTM')
controller.initialize(n=1, m=1, l=3, h=10, optimizer=optimizer) # reinitialize with instance
loss = []
for t in range(1000):
y_pred = controller.predict(x)
y_true = environment.step()
loss.append(mse(y_pred, y_true))
controller.update(y_true)
x = y_true
if show:
plt.plot(loss)
plt.show(block=False)
plt.pause(3)
plt.close()
print("test_adam passed")
if __name__ == "__main__":
test_adam(show=True) | 28.736842 | 96 | 0.677656 |
8300c0efef839deac1a813a2f856985648869a86 | 632 | py | Python | DataAnalysis_FangSpider.py | Beolus/FangSpider | 4fc130085e36f8338041fef075015c3786a9ccf4 | [
"Apache-2.0"
]
| 1 | 2018-04-03T07:00:14.000Z | 2018-04-03T07:00:14.000Z | DataAnalysis_FangSpider.py | Beolus/FangSpider | 4fc130085e36f8338041fef075015c3786a9ccf4 | [
"Apache-2.0"
]
| null | null | null | DataAnalysis_FangSpider.py | Beolus/FangSpider | 4fc130085e36f8338041fef075015c3786a9ccf4 | [
"Apache-2.0"
]
| null | null | null | #_*_coding:utf8_*_
import pymongo
import pandas as pd
import numpy as np
import matplotlib.pylab as plt
client = pymongo.MongoClient('192.168.1.123',27017)
fangdb = client['fangdb']
newfanginfo = fangdb['20171121140104']
df = pd.DataFrame(list(newfanginfo.find()))
df1 = df.loc[:,['EstateArea','RefPrice','AvePrice','HuXing']]
areas = [u'北京',u'上海',u'深圳',u'广州',u'杭州',u'东莞',u'惠州',u'佛山',u'珠海']
areasmeans = []
for i in xrange(0,len(areas)):
areasdf = df1.loc[df['EstateArea'].isin([areas[i]])]
areasave = areasdf['RefPrice']*10000/areasdf['HuXing']
areasmean = areasave.mean()
areasmeans.append(areasmean)
| 20.387097 | 63 | 0.678797 |
a393db40e46cb8ee364e3b222f7489e83760175a | 1,287 | py | Python | envoy/tests/common.py | glasser/integrations-core | 1dd515d49b1690a1369ee5195713605b1b072b1f | [
"BSD-3-Clause"
]
| 2 | 2019-05-28T03:48:29.000Z | 2019-07-05T07:05:58.000Z | envoy/tests/common.py | glasser/integrations-core | 1dd515d49b1690a1369ee5195713605b1b072b1f | [
"BSD-3-Clause"
]
| 4 | 2019-07-03T02:53:19.000Z | 2019-07-10T14:52:14.000Z | envoy/tests/common.py | glasser/integrations-core | 1dd515d49b1690a1369ee5195713605b1b072b1f | [
"BSD-3-Clause"
]
| 1 | 2019-12-23T13:35:17.000Z | 2019-12-23T13:35:17.000Z | import os
from datadog_checks.utils.common import get_docker_hostname
try:
from functools import lru_cache
except ImportError:
from backports.functools_lru_cache import lru_cache
HERE = os.path.dirname(os.path.abspath(__file__))
FIXTURE_DIR = os.path.join(HERE, 'fixtures')
HOST = get_docker_hostname()
PORT = '8001'
INSTANCES = {
'main': {'stats_url': 'http://{}:{}/stats'.format(HOST, PORT)},
'whitelist': {'stats_url': 'http://{}:{}/stats'.format(HOST, PORT), 'metric_whitelist': [r'envoy\.cluster\..*']},
'blacklist': {'stats_url': 'http://{}:{}/stats'.format(HOST, PORT), 'metric_blacklist': [r'envoy\.cluster\..*']},
'whitelist_blacklist': {
'stats_url': 'http://{}:{}/stats'.format(HOST, PORT),
'metric_whitelist': [r'envoy\.cluster\.'],
'metric_blacklist': [r'envoy\.cluster\.out'],
},
}
class MockResponse:
def __init__(self, content, status_code):
self.content = content
self.status_code = status_code
@lru_cache(maxsize=None)
def response(kind):
file_path = os.path.join(FIXTURE_DIR, kind)
if os.path.isfile(file_path):
with open(file_path, 'rb') as f:
return MockResponse(f.read(), 200)
else:
raise IOError('File `{}` does not exist.'.format(file_path))
| 30.642857 | 117 | 0.65035 |
e114798ab808293d0633d2c2ec8c46e2d636881f | 5,993 | py | Python | remove.py | afnanhaq/political-data-builder | a79f62b31f11652c066882170d4392f86022f94b | [
"MIT"
]
| null | null | null | remove.py | afnanhaq/political-data-builder | a79f62b31f11652c066882170d4392f86022f94b | [
"MIT"
]
| null | null | null | remove.py | afnanhaq/political-data-builder | a79f62b31f11652c066882170d4392f86022f94b | [
"MIT"
]
| 3 | 2021-02-12T18:48:16.000Z | 2021-02-13T18:47:23.000Z | # -*- coding: utf-8 -*-
"""
The class remove is used for data anonymization and dropping of columns
with over 75% NaN values. It implements privacy protection by dropping
columns containing personally identifiable information.
"""
# remove_null(df) reads in a dataset, drops columns that have
# over 75% of their values as NaN, and then returns the modified
# dataframe
# df: dataset for a state, pandas.DataFrame
# returns df, without columns that are primarily filled with nulls
def remove_null(df):
df.dropna(axis=1, inplace=True, how='all')
return df
# remove_null_all(place_dic) reads in all data from the global variable
# 'place_dic' and drops columns that have over 75% of their
# values as NaN, and then returns the modified dictionary
# place_dic: dictionary, must be the global dictionary in 'main'
# returns place_dic, without columns that are primarily filled with nulls
def remove_null_all(place_dic):
for key in place_dic.keys():
#finds 75% of columns
threshold = int(place_dic[key].shape[1] * 0.75)
place_dic[key].dropna(thresh=threshold)
return place_dic
# drop_private(df) reads in a dataset and drops columns
# that have identifiable data. it then returns the modified dictionary
# df: dataset for a state, pandas.DataFrame
# returns df, without columns that have personally identifable info
def drop_private(df):
# list of all columns that contain private information
private_cols = ['VoterTelephones_Landline7Digit'
,'VoterTelephones_LandlineUnformatted'
,'VoterTelephones_CellPhoneFormatted'
,'VoterTelephones_CellPhoneUnformatted'
,'Voters_FirstName'
,'Voters_MiddleName'
,'Voters_LastName'
,'Voters_NameSuffix'
,'Residence_Addresses_AddressLine'
,'Residence_Addresses_ExtraAddressLine'
,'Residence_Addresses_ZipPlus4'
,'Residence_Addresses_HouseNumber'
,'Residence_Addresses_PrefixDirection'
,'Residence_Addresses_StreetName'
,'Residence_Addresses_Designator'
,'Residence_Addresses_SuffixDirection'
,'Residence_Addresses_ApartmentNum'
,'Residence_Families_FamilyID'
,'Mailing_Addresses_AddressLine'
,'Mailing_Addresses_ExtraAddressLine'
,'Mailing_Addresses_HouseNumber'
,'Mailing_Addresses_PrefixDirection'
,'Mailing_Addresses_StreetName'
,'Mailing_Addresses_Designator'
,'Mailing_Addresses_SuffixDirection'
,'Mailing_Addresses_ApartmentNum'
,'Mailing_Families_FamilyID'
,'Voters_BirthDate'
,'Residence_Addresses_City'
,'DateConfidence_Description'
,'Precinct'
,'VoterTelephones_LandlineAreaCode'
,'Residence_Addresses_State'
,'Residence_Addresses_LatLongAccuracy'
,'Mailing_Addresses_ZipPlus4',
'Voters_StateVoterID',
'VoterTelephones_LandlineFormatted']
# getting the indices for columns containing private information
df.drop(private_cols, axis = 1, inplace = True, errors='ignore')
return df
# drop_private_all(place_dic) reads in all data from the global variable
# 'place_dic' and drops columns that have identifiable data.
# it then returns the modified dictionary
# place_dic: dictionary, must be the global dictionary in 'main'
# returns place_dic, without columns that have personally identifable info
def drop_private_all(place_dic):
# list of all columns that contain private information
private_cols = ['VoterTelephones_Landline7Digit'
,'VoterTelephones_LandlineUnformatted'
,'VoterTelephones_CellPhoneFormatted'
,'VoterTelephones_CellPhoneUnformatted'
,'Voters_FirstName'
,'Voters_MiddleName'
,'Voters_LastName'
,'Voters_NameSuffix'
,'Residence_Addresses_AddressLine'
,'Residence_Addresses_ExtraAddressLine'
,'Residence_Addresses_ZipPlus4'
,'Residence_Addresses_HouseNumber'
,'Residence_Addresses_PrefixDirection'
,'Residence_Addresses_StreetName'
,'Residence_Addresses_Designator'
,'Residence_Addresses_SuffixDirection'
,'Residence_Addresses_ApartmentNum'
,'Residence_Families_FamilyID'
,'Mailing_Addresses_AddressLine'
,'Mailing_Addresses_ExtraAddressLine'
,'Mailing_Addresses_HouseNumber'
,'Mailing_Addresses_PrefixDirection'
,'Mailing_Addresses_StreetName'
,'Mailing_Addresses_Designator'
,'Mailing_Addresses_SuffixDirection'
,'Mailing_Addresses_ApartmentNum'
,'Mailing_Families_FamilyID'
,'Voters_BirthDate'
,'Residence_Addresses_City'
,'DateConfidence_Description'
,'Precinct'
,'VoterTelephones_LandlineAreaCode'
,'Residence_Addresses_State'
,'Residence_Addresses_LatLongAccuracy'
,'Mailing_Addresses_ZipPlus4'
,'Voters_StateVoterID']
# dropping columns with the names listed in 'private_cols'
for df in place_dic.values():
df.drop(private_cols, axis = 1, inplace = True)
return place_dic
| 46.457364 | 74 | 0.615051 |
cb2de45b308eed70c30fec58294a7a89fb8f91a3 | 562 | py | Python | fate/logReader.py | Huangxy-Minel/Flare | 3c091567bfaedfdf0f0d41b00f3e3d501d572515 | [
"Apache-2.0"
]
| 1 | 2021-11-04T10:28:09.000Z | 2021-11-04T10:28:09.000Z | fate/logReader.py | Huangxy-Minel/flare | 3c091567bfaedfdf0f0d41b00f3e3d501d572515 | [
"Apache-2.0"
]
| null | null | null | fate/logReader.py | Huangxy-Minel/flare | 3c091567bfaedfdf0f0d41b00f3e3d501d572515 | [
"Apache-2.0"
]
| null | null | null | import numpy as np
filepath = "/home/xinyang/fate_cluster_1.6.1/flare/fate/confs-10000/shared_dir/fate_flow_logs/202111241042473462280/guest/10000/INFO.log"
match_str = "Encrypt half_d costs: "
num_list = []
with open(filepath, 'r') as f:
for line in f.readlines():
idx = line.find(match_str)
if idx > -1:
temp_str = line[idx + len(match_str) :]
time = temp_str.split(' ')[0]
try:
num_list.append(float(time))
except ValueError:
pass
print(np.mean(num_list)) | 31.222222 | 137 | 0.606762 |
0844ae3b861d556ae03f04453ef61ff25e9ad5e0 | 1,781 | py | Python | src/commercetools/ml/client/by_project_key_request_builder.py | lime-green/commercetools-python-sdk | 63b77f6e5abe43e2b3ebbf3cdbbe00c7cf80dca6 | [
"MIT"
]
| 1 | 2021-04-07T20:01:30.000Z | 2021-04-07T20:01:30.000Z | src/commercetools/ml/client/by_project_key_request_builder.py | lime-green/commercetools-python-sdk | 63b77f6e5abe43e2b3ebbf3cdbbe00c7cf80dca6 | [
"MIT"
]
| null | null | null | src/commercetools/ml/client/by_project_key_request_builder.py | lime-green/commercetools-python-sdk | 63b77f6e5abe43e2b3ebbf3cdbbe00c7cf80dca6 | [
"MIT"
]
| null | null | null | # Generated file, please do not change!!!
import typing
from .image_search.by_project_key_image_search_request_builder import (
ByProjectKeyImageSearchRequestBuilder,
)
from .missing_data.by_project_key_missing_data_request_builder import (
ByProjectKeyMissingDataRequestBuilder,
)
from .recommendations.by_project_key_recommendations_request_builder import (
ByProjectKeyRecommendationsRequestBuilder,
)
from .similarities.by_project_key_similarities_request_builder import (
ByProjectKeySimilaritiesRequestBuilder,
)
if typing.TYPE_CHECKING:
from ..base_client import BaseClient
class ByProjectKeyRequestBuilder:
_client: "BaseClient"
_project_key: str
def __init__(
self,
project_key: str,
client: "BaseClient",
):
self._project_key = project_key
self._client = client
def image_search(self) -> ByProjectKeyImageSearchRequestBuilder:
"""Search for similar products using an image as search input."""
return ByProjectKeyImageSearchRequestBuilder(
project_key=self._project_key,
client=self._client,
)
def recommendations(self) -> ByProjectKeyRecommendationsRequestBuilder:
return ByProjectKeyRecommendationsRequestBuilder(
project_key=self._project_key,
client=self._client,
)
def missing_data(self) -> ByProjectKeyMissingDataRequestBuilder:
return ByProjectKeyMissingDataRequestBuilder(
project_key=self._project_key,
client=self._client,
)
def similarities(self) -> ByProjectKeySimilaritiesRequestBuilder:
return ByProjectKeySimilaritiesRequestBuilder(
project_key=self._project_key,
client=self._client,
)
| 30.706897 | 77 | 0.729366 |
06570cdc7d959e09c31e847eb2270df7f6845782 | 635 | py | Python | backend/wallet/api/v1/urls.py | crowdbotics-apps/ledger-wallet-29295 | d96542a71685ce6d335882c10cf840355c8252f7 | [
"FTL",
"AML",
"RSA-MD"
]
| null | null | null | backend/wallet/api/v1/urls.py | crowdbotics-apps/ledger-wallet-29295 | d96542a71685ce6d335882c10cf840355c8252f7 | [
"FTL",
"AML",
"RSA-MD"
]
| null | null | null | backend/wallet/api/v1/urls.py | crowdbotics-apps/ledger-wallet-29295 | d96542a71685ce6d335882c10cf840355c8252f7 | [
"FTL",
"AML",
"RSA-MD"
]
| null | null | null | from django.urls import path, include
from rest_framework.routers import DefaultRouter
from .viewsets import (
PaymentTransactionViewSet,
TaskerPaymentAccountViewSet,
TaskerWalletViewSet,
PaymentMethodViewSet,
CustomerWalletViewSet,
)
router = DefaultRouter()
router.register("customerwallet", CustomerWalletViewSet)
router.register("taskerwallet", TaskerWalletViewSet)
router.register("taskerpaymentaccount", TaskerPaymentAccountViewSet)
router.register("paymenttransaction", PaymentTransactionViewSet)
router.register("paymentmethod", PaymentMethodViewSet)
urlpatterns = [
path("", include(router.urls)),
]
| 30.238095 | 68 | 0.811024 |
cb4bfdb19d3fafac29848032ad0f722b749b81df | 2,090 | py | Python | scripts/artifacts/accounts_de.py | deagler4n6/ALEAPP | c25692dac34e2382d1a8d51e5fbb2a39998dc638 | [
"MIT"
]
| 187 | 2020-02-22T23:35:32.000Z | 2022-03-31T13:46:24.000Z | scripts/artifacts/accounts_de.py | deagler4n6/ALEAPP | c25692dac34e2382d1a8d51e5fbb2a39998dc638 | [
"MIT"
]
| 65 | 2020-02-25T18:22:47.000Z | 2022-03-27T21:41:21.000Z | scripts/artifacts/accounts_de.py | deagler4n6/ALEAPP | c25692dac34e2382d1a8d51e5fbb2a39998dc638 | [
"MIT"
]
| 47 | 2020-02-24T22:33:35.000Z | 2022-03-11T05:19:42.000Z | import glob
import json
import os
import shutil
import sqlite3
from scripts.artifact_report import ArtifactHtmlReport
from scripts.ilapfuncs import logfunc, tsv, timeline, is_platform_windows, open_sqlite_db_readonly
def get_accounts_de(files_found, report_folder, seeker, wrap_text):
slash = '\\' if is_platform_windows() else '/'
# Filter for path xxx/yyy/system_ce/0
for file_found in files_found:
file_found = str(file_found)
parts = file_found.split(slash)
uid = parts[-2]
try:
uid_int = int(uid)
# Skip sbin/.magisk/mirror/data/system_de/0 , it should be duplicate data??
if file_found.find('{0}mirror{0}'.format(slash)) >= 0:
continue
process_accounts_de(file_found, uid, report_folder)
except ValueError:
pass # uid was not a number
def process_accounts_de(folder, uid, report_folder):
#Query to create report
db = open_sqlite_db_readonly(folder)
cursor = db.cursor()
#Query to create report
cursor.execute('''
SELECT
datetime(last_password_entry_time_millis_epoch / 1000, 'unixepoch') as 'last pass entry',
name,
type
FROM
accounts
''')
all_rows = cursor.fetchall()
usageentries = len(all_rows)
if usageentries > 0:
report = ArtifactHtmlReport('Accounts_de')
report.start_artifact_report(report_folder, f'accounts_de_{uid}')
report.add_script()
data_headers = ('Last password entry','Name','Type')
data_list = []
for row in all_rows:
data_list.append((row[0], row[1], row[2]))
report.write_artifact_data_table(data_headers, data_list, folder)
report.end_artifact_report()
tsvname = f'accounts de {uid}'
tsv(report_folder, data_headers, data_list, tsvname)
tlactivity = f'Accounts DE {uid}'
timeline(report_folder, tlactivity, data_list, data_headers)
else:
logfunc(f'No accounts_de_{uid} data available')
db.close() | 33.174603 | 98 | 0.647847 |
d198456c6a97cb442b9f40e4c8995f805462d089 | 4,541 | py | Python | example/algs.py | jessicagainesbmi203/example | 94aac309e2e8f217edd1d45ffde10b586db0a6a7 | [
"Apache-2.0"
]
| null | null | null | example/algs.py | jessicagainesbmi203/example | 94aac309e2e8f217edd1d45ffde10b586db0a6a7 | [
"Apache-2.0"
]
| null | null | null | example/algs.py | jessicagainesbmi203/example | 94aac309e2e8f217edd1d45ffde10b586db0a6a7 | [
"Apache-2.0"
]
| null | null | null | import numpy as np
import collections
def is_sorted(x):
for i in range(0,x.size-1,1):
if x[i] > x[i+1]:
return False
return True
def pointless_sort(x):
"""
This function always returns the same values to show how testing
works, check out the `test/test_alg.py` file to see.
"""
return np.array([1,2,3])
def bubblesort(x):
"""
Make a copy of x. Iterate through the elements of x, comparing each element
to the element ahead of it. If the first element is greater than the second,
swap the elements. Repeat the iteration until no swaps occur in an iteration.`
"""
conditionals = 0
assignments = 0
copy = np.copy(x)
sorted = False
while not sorted:
swap_this_round = False
for i in range(0,len(copy)-1,1):
assignments = assignments + 1 # assign counter
conditionals = conditionals + 1
if (copy[i] > copy[i+1]):
assignments = assignments + 1
temp = copy[i+1]
assignments = assignments + 1
copy[i+1] = copy[i]
assignments = assignments + 1
copy[i] = temp
swap_this_round = True
if not swap_this_round:
sorted = True
assert len(copy) == len(x)
assert set(copy) == set(x)
assert is_sorted(copy)
return {'sorted':copy, 'c':conditionals, 'a':assignments}
def quicksort(x):
"""
Pick a pivot element from the copy and move all elements less than the pivot
element to a left partition, and all elements greater than the pivot element
to a right partition. Recursively quicksort the left and right sub arrays.
Then concatenate the left partition, pivot, and right partition in that order.
"""
conditionals = 0
assignments = 0
if x.size <= 1:
return {'sorted' : x, 'c':0, 'a':0}
copy = np.copy(x)
pivot = copy[0]
left = np.array([])
right = np.array([])
equal = np.array([])
for item in copy[1:]:
conditionals = conditionals + 1
if item > pivot:
assignments = assignments + 1
right = np.append(right,item)
conditionals = conditionals + 1
if item < pivot:
assignments = assignments + 1
left = np.append(left,item)
conditionals = conditionals + 1
if item == pivot:
assignments = assignments + 1
equal = np.append(equal,item)
assignments = assignments + 1
equal = np.append(equal,pivot)
assignments = assignments + x.size
left_data = quicksort(left)
conditionals = conditionals + left_data['c']
assignments = assignments + left_data['a']
temp = np.append(left_data['sorted'],pivot)
right_data = quicksort(right)
conditionals = conditionals + right_data['c']
assignments = assignments + right_data['a']
sorted = np.append(temp,right_data['sorted'])
print(len(sorted))
print(len(x))
#assert len(sorted) == len(x)
#assert set(sorted) == set(x)
#assert is_sorted(sorted)
return {'sorted' : sorted, 'c' : conditionals, 'a' : assignments}
def insertionsort(x):
"""
Iterate through the elements of x. Add the first element to an empty array.
Then add each subsequent element of x to its sorted place in the new array
by iterating through the sorted array until reaching the first element
greater than the x element. Place the x element just before this element in the
new array. If an element greater than the x element is not found, place the x
element at the end of the new array. Repeat for all elements of x.
"""
conditionals = 0
assignments = 0
if x.size <= 1:
return {'sorted':x, 'c':0, 'a':0}
copy = np.copy(x)
sorted = np.array([])
assignments = assignments + 1
sorted = np.append(sorted,copy[0])
for item in copy[1:]:
item_placed = False
for j in range(0,len(sorted),1):
conditionals = conditionals + 1
if sorted[j] > item:
assignments = assignments + 1
sorted = np.insert(sorted,j,item)
item_placed = True
break
if not item_placed:
assignments = assignments + 1
sorted = np.insert(sorted,len(sorted),item)
assert len(sorted) == len(x)
assert set(sorted) == set(x)
assert is_sorted(sorted)
return {'sorted':sorted, 'c':conditionals, 'a':assignments}
| 34.664122 | 84 | 0.600749 |
930e111fb51336c0b115c993aa563cec891027b6 | 219 | py | Python | src/lesson3/samples/keyInputBasic.py | saji-ryu/pyxel-study | b10ef781a86cfea4dad28efee89f851195189560 | [
"MIT"
]
| null | null | null | src/lesson3/samples/keyInputBasic.py | saji-ryu/pyxel-study | b10ef781a86cfea4dad28efee89f851195189560 | [
"MIT"
]
| null | null | null | src/lesson3/samples/keyInputBasic.py | saji-ryu/pyxel-study | b10ef781a86cfea4dad28efee89f851195189560 | [
"MIT"
]
| null | null | null | import pyxel
pyxel.init(200, 200)
a = 0
def update():
global a
if pyxel.btn(pyxel.KEY_SPACE):
a += 1
def draw():
global a
pyxel.cls(7)
pyxel.circ(a, a, 10, 0)
pyxel.run(update, draw)
| 10.428571 | 34 | 0.570776 |
afeb2199b2290f87e1cab148227a5318b676c4a8 | 1,199 | py | Python | test/test_html_ssrf_detection_result.py | Cloudmersive/Cloudmersive.APIClient.Python.Validate | 894a3f578c3860db41b3eed179dcc52e02f565a0 | [
"Apache-2.0"
]
| 3 | 2018-06-23T21:37:21.000Z | 2020-04-20T23:07:36.000Z | test/test_html_ssrf_detection_result.py | Cloudmersive/Cloudmersive.APIClient.Python.Validate | 894a3f578c3860db41b3eed179dcc52e02f565a0 | [
"Apache-2.0"
]
| 1 | 2019-02-04T17:03:35.000Z | 2019-03-02T20:16:52.000Z | test/test_html_ssrf_detection_result.py | Cloudmersive/Cloudmersive.APIClient.Python.Validate | 894a3f578c3860db41b3eed179dcc52e02f565a0 | [
"Apache-2.0"
]
| 2 | 2019-03-21T15:54:15.000Z | 2020-05-27T17:30:43.000Z | # coding: utf-8
"""
validateapi
The validation APIs help you validate data. Check if an E-mail address is real. Check if a domain is real. Check up on an IP address, and even where it is located. All this and much more is available in the validation API. # noqa: E501
OpenAPI spec version: v1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import cloudmersive_validate_api_client
from cloudmersive_validate_api_client.models.html_ssrf_detection_result import HtmlSsrfDetectionResult # noqa: E501
from cloudmersive_validate_api_client.rest import ApiException
class TestHtmlSsrfDetectionResult(unittest.TestCase):
"""HtmlSsrfDetectionResult unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testHtmlSsrfDetectionResult(self):
"""Test HtmlSsrfDetectionResult"""
# FIXME: construct object with mandatory attributes with example values
# model = cloudmersive_validate_api_client.models.html_ssrf_detection_result.HtmlSsrfDetectionResult() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| 29.243902 | 240 | 0.748957 |
e062c94bad94c0d995da1a508a5a451bccbd0c25 | 2,086 | py | Python | tests/tests/correctness/EPLAnalytics/Streaming_Calculations/FFT/fft_cor_010/run.py | rpeach-sag/apama-industry-analytics-kit | a3f6039915501d41251b6f7ec41b0cb8111baf7b | [
"Apache-2.0"
]
| 3 | 2019-09-02T18:21:22.000Z | 2020-04-17T16:34:57.000Z | tests/tests/correctness/EPLAnalytics/Streaming_Calculations/FFT/fft_cor_010/run.py | rpeach-sag/apama-industry-analytics-kit | a3f6039915501d41251b6f7ec41b0cb8111baf7b | [
"Apache-2.0"
]
| null | null | null | tests/tests/correctness/EPLAnalytics/Streaming_Calculations/FFT/fft_cor_010/run.py | rpeach-sag/apama-industry-analytics-kit | a3f6039915501d41251b6f7ec41b0cb8111baf7b | [
"Apache-2.0"
]
| null | null | null | # $Copyright (c) 2015 Software AG, Darmstadt, Germany and/or Software AG USA Inc., Reston, VA, USA, and/or Terracotta Inc., San Francisco, CA, USA, and/or Software AG (Canada) Inc., Cambridge, Ontario, Canada, and/or, Software AG (UK) Ltd., Derby, United Kingdom, and/or Software A.G. (Israel) Ltd., Or-Yehuda, Israel and/or their licensors.$
# Use, reproduction, transfer, publication or disclosure is prohibited except as specifically provided for in your License Agreement with Software AG
from industry.framework.AnalyticsBaseTest import AnalyticsBaseTest
from pysys.constants import *
class PySysTest(AnalyticsBaseTest):
def execute(self):
# Start the correlator
correlator = self.startTest()
self.injectAnalytic(correlator)
self.injectFFTAnalysis(correlator)
self.ready(correlator)
correlator.receive(filename='FFT.evt', channels=['Output1'])
correlator.injectMonitorscript(['test.mon'], self.input)
self.waitForSignal('correlator.out',
expr='Analytic FFT started for inputDataNames',
condition='==1',
timeout=5)
correlator.sendLiteral('com.industry.analytics.streaming_calculations.FFTAnalysis_cor_010.StartDataGenerator()')
self.waitForSignal('FFT.evt', expr='com.industry.analytics\.Data', condition='==27', timeout=10)
def validate(self):
# Make sure there were no errors in the logs
self.checkSanity()
# Make sure that the we got the right number of Data events called
self.assertLineCount('FFT.evt', expr='com.industry.analytics\.Data', condition='==27')
self.assertLineCount('FFT.evt', expr='com.industry.analytics\.Data\("Output1","c","sourceId",0,230,"",.*,0,0,{}\)', condition='==9')
self.assertLineCount('FFT.evt', expr='com.industry.analytics\.Data\("Output1","c","sourceId",0,120,"",.*,0,0,{}\)', condition='==9')
self.assertLineCount('FFT.evt', expr='com.industry.analytics\.Data\("Output1","c","sourceId",0,50,"",.*,0,0,{}\)', condition='==9')
# Make sure that the Data events were as expected
self.assertDiff('FFT.evt', 'FFT.evt')
| 48.511628 | 343 | 0.704698 |
26d1e4e17ad7b065abe2a53d7352b9aff2efd025 | 10,590 | py | Python | zero/common/train.py | Wesley-Jzy/ColossalAI-Benchmark | 0b8b737297316c898c9b3ed6d7eaebaee4e3692c | [
"Apache-2.0"
]
| 21 | 2022-01-18T07:37:55.000Z | 2022-03-27T12:54:13.000Z | zero/common/train.py | Wesley-Jzy/ColossalAI-Benchmark | 0b8b737297316c898c9b3ed6d7eaebaee4e3692c | [
"Apache-2.0"
]
| 12 | 2022-01-18T06:09:57.000Z | 2022-03-23T07:48:02.000Z | zero/common/train.py | Wesley-Jzy/ColossalAI-Benchmark | 0b8b737297316c898c9b3ed6d7eaebaee4e3692c | [
"Apache-2.0"
]
| 21 | 2022-01-18T05:52:55.000Z | 2022-03-25T06:05:57.000Z | import math
import time
import torch
from torch.distributed import all_reduce, get_rank, get_world_size
from tqdm import tqdm
from zero.common.utils import CONFIG, AsyncMemoryMonitor, print_log, get_tflops
def _train(epoch, rank, world_size, train_dataloader, model, criterion, optimizer, lr_scheduler, scaler, mem_monitor):
use_optimizer_backward = CONFIG['method'] in ['colossalai']
use_integrated_backward = CONFIG['method'] in ['deepspeed', 'patrickstar']
use_integrated_step = CONFIG['method'] in ['deepspeed']
use_autocast = CONFIG['method'] in ['torch', 'colossalai'] and \
'fp16' in CONFIG and CONFIG['fp16'].get('enabled', True)
clip_grad_norm = CONFIG.get('gradient_clipping', 0.)
use_integraded_clip_grad = CONFIG['method'] in ['fairscale']
use_colossalai_zero_v1 = CONFIG['method'] == 'colossalai' and CONFIG.get('sharded_model_version', 2) == 1
model.train()
num_steps = len(train_dataloader)
if 'steps_per_epoch' in CONFIG['hyperparameter'] and CONFIG['hyperparameter']['steps_per_epoch'] < num_steps:
num_steps = CONFIG['hyperparameter']['steps_per_epoch']
progress = range(num_steps)
if rank == 0:
progress = tqdm(progress, desc=f"[Epoch {epoch} / Train]")
train_loss = torch.zeros(()).to(torch.float).to(rank)
used_time = 0.
num_steps = 0
num_samples = torch.zeros(()).to(torch.int).to(rank)
num_tokens = torch.zeros(()).to(torch.int).to(rank)
data_iter = iter(train_dataloader)
if mem_monitor is not None:
mem_monitor.start()
for _ in progress:
fwd_start = time.time()
optimizer.zero_grad()
if use_colossalai_zero_v1:
model.zero_grad(set_to_none=True)
batch = next(data_iter)
labels = batch.pop('labels')
batch_size = None
batch_tokens = None
if isinstance(labels, torch.Tensor):
labels = labels.to(rank)
batch_size = labels.size(0)
batch_tokens = labels.numel()
else:
for k, v in labels.items():
labels[k] = v.to(rank)
if batch_size is None:
batch_size = v.size(0)
if batch_tokens is None:
batch_tokens = v.numel()
for k, v in batch.items():
batch[k] = v.to(rank)
if use_autocast:
with torch.cuda.amp.autocast():
outputs = model(**batch)
else:
outputs = model(**batch)
loss = criterion(outputs, labels)
train_loss += loss
fwd_end = time.time()
bwd_start = time.time()
if use_colossalai_zero_v1:
loss.backward()
optimizer.step()
lr_scheduler.step()
elif use_integrated_backward: # deepspeed & patrickstar style
model.backward(loss)
if use_integrated_step:
model.step() # deepspeed style
else:
optimizer.step() # patrickstar style
lr_scheduler.step()
elif use_optimizer_backward: # colossalai style
optimizer.backward(loss)
if clip_grad_norm > 0:
optimizer.clip_grad_norm(model, clip_grad_norm)
optimizer.step()
lr_scheduler.step()
elif scaler is not None: # torch & fairscale amp style
scaler.scale(loss).backward()
scaler.unscale_(optimizer)
if clip_grad_norm > 0:
if use_integraded_clip_grad: # fairscale style
model.clip_grad_norm_(clip_grad_norm)
else: # torch style
torch.nn.utils.clip_grad_norm_(model.parameters(), clip_grad_norm)
scaler.step(optimizer)
scaler.update()
lr_scheduler.step()
else: # torch & fairscale normal style
loss.backward()
if clip_grad_norm > 0:
if use_integraded_clip_grad: # fairscale style
model.clip_grad_norm_(clip_grad_norm)
else: # torch style
torch.nn.utils.clip_grad_norm_(model.parameters(), clip_grad_norm)
optimizer.step()
lr_scheduler.step()
bwd_end = time.time()
num_steps += 1
num_samples += batch_size
num_tokens += batch_tokens
fwd_time = fwd_end - fwd_start
bwd_time = bwd_end - bwd_start
batch_time = fwd_time + bwd_time
used_time += batch_time
if rank == 0:
progress.set_postfix(loss=loss.item(),
lr=lr_scheduler.get_last_lr()[0],
time_forward=fwd_time,
time_backward=bwd_time,
throughput=batch_size * world_size / (batch_time + 1e-12),
tflops=get_tflops(batch_time, batch_tokens * world_size))
peak_mem = None
if mem_monitor is not None:
peak_mem = max(mem_monitor.finish())
all_reduce(train_loss)
all_reduce(num_samples)
all_reduce(num_tokens)
msg = f'[Epoch {epoch} / Train]: Loss = {train_loss.item() / (world_size * num_steps):.3f}'
msg += f' | Throughput = {num_samples.item() / (used_time + 1e-12):.3f} samples/sec'
msg += f' | TFLOPS = {get_tflops(used_time, num_tokens.item()):.3f}'
if peak_mem is not None:
msg += f' | Peak memory = {peak_mem / 1024:.3f} GB.'
print_log(msg)
def _test(epoch, rank, world_size, test_dataloader, model, criterion, mem_monitor):
use_autocast = CONFIG['method'] in ['torch', 'colossalai'] and \
'fp16' in CONFIG and CONFIG['fp16'].get('enabled', True)
evaluation = CONFIG['model']['evaluation']
model.eval()
num_steps = len(test_dataloader)
if 'steps_per_epoch' in CONFIG['hyperparameter'] and CONFIG['hyperparameter']['steps_per_epoch'] < num_steps:
num_steps = CONFIG['hyperparameter']['steps_per_epoch']
progress = range(num_steps)
if rank == 0:
progress = tqdm(progress, desc=f"[Epoch {epoch} / Test]")
test_loss = torch.zeros(()).to(torch.float).to(rank)
used_time = 0.
num_steps = 0
num_samples = torch.zeros(()).to(torch.int).to(rank)
num_tokens = torch.zeros(()).to(torch.int).to(rank)
correct = torch.zeros(()).to(torch.int).to(rank)
data_iter = iter(test_dataloader)
if mem_monitor is not None:
mem_monitor.start()
with torch.no_grad():
for _ in progress:
batch_start = time.time()
batch = next(data_iter)
labels = batch.pop('labels')
batch_size = None
batch_tokens = None
if isinstance(labels, torch.Tensor):
labels = labels.to(rank)
batch_size = labels.size(0)
batch_tokens = labels.numel()
else:
for k, v in labels.items():
labels[k] = v.to(rank)
if batch_size is None:
batch_size = v.size(0)
if batch_tokens is None:
batch_tokens = v.numel()
for k, v in batch.items():
batch[k] = v.to(rank)
if use_autocast:
with torch.cuda.amp.autocast():
outputs = model(**batch)
else:
outputs = model(**batch)
loss = criterion(outputs, labels)
test_loss += loss
batch_end = time.time()
num_steps += 1
num_samples += batch_size
num_tokens += batch_tokens
batch_time = batch_end - batch_start
used_time += batch_time
if rank == 0:
metrics = dict(loss=loss.item(),
step_time=batch_time,
throughput=batch_size * world_size / (batch_time + 1e-12),
tflops=get_tflops(batch_time, batch_tokens * world_size))
if evaluation == 'ppl':
metrics['perplexity'] = math.exp(loss.item())
elif evaluation == 'acc':
if not isinstance(labels, torch.Tensor):
labels = labels['targets_a']
batch_correct = torch.sum(labels == torch.argmax(outputs, dim=-1)).item()
metrics['accuracy'] = batch_correct / batch_size
correct += batch_correct
else:
raise ValueError(f'Invalid evaluation method {evaluation}')
progress.set_postfix(**metrics)
peak_mem = None
if mem_monitor is not None:
peak_mem = max(mem_monitor.finish())
all_reduce(test_loss)
reduced_loss = test_loss.item() / (world_size * num_steps)
all_reduce(num_samples)
all_reduce(num_tokens)
if evaluation == 'acc':
all_reduce(correct)
msg = f'[Epoch {epoch} / Test]: Loss = {reduced_loss:.3f}'
if evaluation == 'ppl':
msg += f' | Perplexity = {math.exp(reduced_loss):.3f}'
else:
msg += f' | Accuracy = {correct.item() * 100 / num_samples.item():.3f} %'
msg += f' | Throughput = {num_samples.item() / (used_time + 1e-12):.3f} samples/sec'
msg += f' | TFLOPS = {get_tflops(used_time, num_tokens.item()):.3f}'
if peak_mem is not None:
msg += f' | Peak memory = {peak_mem / 1024:.3f} GB.'
print_log(msg)
def train(model, train_data, test_data, criterion, optimizer, scaler, lr_scheduler):
rank = get_rank()
world_size = get_world_size()
mem_monitor = None
if CONFIG.get('use_mem_monitor'):
mem_monitor = AsyncMemoryMonitor(rank)
numel = CONFIG['model']['numel']
if numel < 1e9:
msg = f'{numel / 1e6:.3f} M'
else:
msg = f'{numel / 1e9:.3f} B'
print_log(f'Model is built (parameter size = {msg}).')
print_log('Benchmark start.')
for epoch in range(CONFIG['hyperparameter']['num_epochs']):
_train(epoch, rank, world_size, train_data, model, criterion, optimizer, lr_scheduler, scaler, mem_monitor)
_test(epoch, rank, world_size, test_data, model, criterion, mem_monitor)
print_log('Benchmark complete.')
| 36.770833 | 119 | 0.560246 |
bbc1d6e0a55056cbe4b8ffb3c3cc4cc9eb80fe3c | 7,572 | py | Python | homemonitoring/homemonitoring/solaredge.py | BigCrunsh/home-monitoring | c82e02462cbd040e5814464b926321bdb84ed446 | [
"MIT"
]
| 2 | 2020-06-25T20:13:33.000Z | 2020-08-06T09:08:26.000Z | homemonitoring/homemonitoring/solaredge.py | BigCrunsh/home-monitoring | c82e02462cbd040e5814464b926321bdb84ed446 | [
"MIT"
]
| 1 | 2021-06-17T09:50:33.000Z | 2021-06-17T09:50:33.000Z | homemonitoring/homemonitoring/solaredge.py | BigCrunsh/home-monitoring | c82e02462cbd040e5814464b926321bdb84ed446 | [
"MIT"
]
| null | null | null | """The module contains a wrapper of the Solaredge API with additional functionality."""
import solaredge
import datetime
import pytz
import pandas as pd
import numpy as np
class Solaredge(solaredge.Solaredge):
"""Solaredge is a wrapper around the Solaredge API with additional functionality.
The Solaredge API is queried with date ranges. Date ranges are relative to the location
of the photovoltaic system. The wrapper sets reasonable default values, normalizes dates
to be compliant with the API call, and splits the call by month ranges.
Args:
site_token(string): api key
"""
def __init__(self, *args, **kwargs):
super(Solaredge, self).__init__(*args, **kwargs)
self.meta = None
def get_meta(self):
"""Returns solaredge meta data.
Queries list end point of API. The call is cached it will return
the same result when called twice.
Returns:
dict: meta data
"""
if self.meta is None:
self.meta = self.get_list()['sites']['site'][0]
return self.meta
def get_site_id(self):
"""Returns solaredge site id.
Queries list end point of API. The call is cached it will return
the same result when called twice.
Returns:
int: site id
"""
return self.get_meta()['id']
def get_installation_date(self):
"""Returns the installation date of the photovoltaic system.
Queries list end point of API. The call is cached it will return
the same result when called twice.
Returns:
datetime.datetime: installation date (localized)
"""
return self.get_tz().localize(
datetime.datetime.fromisoformat(self.get_meta()['installationDate'])
)
def get_tz(self):
"""Returns time zone.
Queries list end point of API. The call is cached it will return
the same result when called twice.
Returns:
pytz.timezone: system time zone
"""
return pytz.timezone(self.get_meta()['location']['timeZone'])
@staticmethod
def _get_date_ranges(start_datetime, end_datetime):
"""Split date range by months.
The SolarEdge API is limited to one-month period, i.e., period between endTime and
startTime should not exceed one month. This function splits the date range into
chunks by months.
Args:
start_datetime (datetime.datetime): localized start datetime of range
end_datetime (datetime.datetime): localized end datetime of range
Returns:
zip: list of start and end dates by months
"""
end_dates = np.array(list(map(
lambda d: d.replace(hour=23, minute=59, second=59),
pd.date_range(start_datetime, end_datetime, freq='M', normalize=False).to_pydatetime()
)))
start_dates = end_dates + datetime.timedelta(seconds=1)
if start_datetime <= end_datetime:
end_dates = np.append(end_dates, end_datetime)
start_dates = np.append(start_datetime, start_dates)
return zip(start_dates, end_dates)
def _normalize_date(self, datetime):
"""Normalizes datetime for SolarEdge API call.
Normalizes `datetime` to be used in solaredge API, i.e.,
- timezone is converted to time zone of system location
- time zone info and microseconds are removed (API fails otherwise)
Args:
datetime (datetime.datetime): localized datetime to be normalized
Raises:
AssertionError: if datime misses tzinfo
Returns:
datetime.datetime: datetime normalized for solaredge API call
"""
assert datetime.tzinfo is not None, "dates are expected to be localized"
return datetime.astimezone(self.get_tz()).replace(microsecond=0).replace(tzinfo=None)
def get_power_details(self, start_time=None, end_time=None):
"""Returns power details.
Calls `powerDetails` endpoint of SolarEdge API. The parameters are not
limited to one-month period. The first data point returned is the `start_time`
rounded down to the full quarter of an hour. The last data point returned is the
`end_time` of the last (not equal) full quarter of an hour. The last available data point
might change until the next quarter of the hour.
Example:
self.get_power_details(
start_time=datetime.datetime(2020, 4, 21, 20, 55),
end_time=datetime.datetime(2020, 4, 21, 21, 0)
)
# returns data for '2020-04-21 20:45:00'
self.get_power_details(
start_time=datetime.datetime(2020, 4, 21, 20, 55),
end_time=datetime.datetime(2020, 4, 21, 21, 1)
)
# returns data for '2020-04-21 20:45:00' and '2020-04-21 21:00:00'
Args:
start_time (datetime.datetime): start datetime of range;
default: installation date (based on solaredge meta data)
end_time (datetime.datetime): end datetime of range
default: current timestamp
Returns:
dict: response
"""
start_time = self._normalize_date(
start_time or self.get_installation_date()
)
end_time = self._normalize_date(
end_time or datetime.datetime.now(self.get_tz())
)
return [
super(Solaredge, self).get_power_details(
site_id=self.get_site_id(), start_time=s, end_time=e
)
for s, e in self._get_date_ranges(start_time, end_time)
]
def get_energy_details(self, start_time=None, end_time=None):
"""Returns energy details.
Calls `energyDetails` endpoint of SolarEdge API. The parameters are not
limited to one-month period. The first data point returned is the `start_time`
rounded down to the full quarter of an hour. The last data point returned is the
`end_time` of the last (not equal) full quarter of an hour. The last available data point
might change until the next quarter of the hour.
Example:
self.get_power_details(
start_time=datetime.datetime(2020, 4, 21, 20, 55),
end_time=datetime.datetime(2020, 4, 21, 21, 0)
)
# returns data for '2020-04-21 20:45:00'
self.get_power_details(
start_time=datetime.datetime(2020, 4, 21, 20, 55),
end_time=datetime.datetime(2020, 4, 21, 21, 1)
)
# returns data for '2020-04-21 20:45:00' and '2020-04-21 21:00:00'
Args:
start_time (datetime.datetime): start datetime of range;
default: installation date (based on solaredge meta data)
end_time (datetime.datetime): end datetime of range
default: current timestamp
Returns:
dict: response
"""
start_time = self._normalize_date(
start_time or self.get_installation_date()
)
end_time = self._normalize_date(
end_time or datetime.datetime.now(self.get_tz())
)
return [
super(Solaredge, self).get_energy_details(
site_id=self.get_site_id(), start_time=s, end_time=e, time_unit='QUARTER_OF_AN_HOUR'
)
for s, e in self._get_date_ranges(start_time, end_time)
]
| 36.57971 | 100 | 0.623217 |
2ab675eae82058adddc28ece463f504e9b3fd706 | 6,211 | py | Python | sugaroid/brain/convert.py | vardaan-raj/sugaroid | d0476fb9c44a73fee2e0de45162f2b1ac86452aa | [
"MIT"
]
| 4 | 2020-09-28T13:52:40.000Z | 2020-10-30T15:24:50.000Z | sugaroid/brain/convert.py | sreyasaju/sugaroid | d58e06fb664daa16fda1bf23cc73068efcd5634c | [
"MIT"
]
| null | null | null | sugaroid/brain/convert.py | sreyasaju/sugaroid | d58e06fb664daa16fda1bf23cc73068efcd5634c | [
"MIT"
]
| null | null | null | """
MIT License
Sugaroid Artificial Intelligence
Chatbot Core
Copyright (c) 2020-2021 Srevin Saju
Copyright (c) 2021 The Sugaroid Project
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import logging
from chatterbot.logic import LogicAdapter
from currency_converter import CurrencyConverter
from sugaroid.sugaroid import SugaroidStatement
from sugaroid.brain.ooo import Emotion
class SugaroidCurrency:
def __init__(self):
self.currency_api = CurrencyConverter()
def convert(self, src: str, dest: str, amount: float):
if (src in self.currency_api.currencies) and (
dest in self.currency_api.currencies
):
return self.currency_api.convert(amount, src, dest)
else:
if src not in self.currency_api.currencies:
bad_cur = src
else:
bad_cur = dest
return "Hmm. Seems like {} is not a recognized currency.".format(bad_cur)
class CurrencyAdapter(LogicAdapter):
"""
Gives a random response, because Sugaroid tries not to say I don't know
"""
def __init__(self, chatbot, **kwargs):
super().__init__(chatbot, **kwargs)
self.currencies_src_ord = None
self.currencies_dest = None
self.currencies_src = None
self.tokenized = None
def can_process(self, statement):
self.tokenized = self.chatbot.lp.tokenize(
str(statement)
.replace("$", " USD ")
.replace("₹", " INR ")
.replace("€", " EUR ")
.replace("£", " GBP ")
)
self.currencies_dest = []
self.currencies_src = None
if len(self.tokenized) >= 3:
for i in range(len(self.tokenized) - 1):
if self.tokenized[i].tag_ == "TO":
dst = str(self.tokenized[i + 1].text).upper()
if len(dst) < 4:
self.currencies_dest.append(dst)
try:
if len(self.tokenized[i - 1].lower_) < 4:
self.currencies_src = str(
self.tokenized[i - 1].text
).upper()
except IndexError:
pass
elif self.tokenized[i].lower_ == "is":
for j in range(i + 1, len(self.tokenized)):
if self.tokenized[j].tag_ == "IN":
dst = str(self.tokenized[j + 1].text).upper()
if len(dst) < 4:
self.currencies_dest.append(dst)
try:
src = str(self.tokenized[j - 1].text).upper()
if len(src) < 4:
self.currencies_src = src
except IndexError:
pass
if self.currencies_dest and self.currencies_src:
return True
else:
return False
elif self.tokenized[i].tag_ == "IN":
dst = str(self.tokenized[i + 1].text).upper()
if len(dst) < 4:
self.currencies_dest.append(dst)
if self.currencies_dest and self.currencies_src:
logging.info(
"CurrencyAdapter: Recognized source and destination currency types. src: {} and dest: {}".format(
self.currencies_src, self.currencies_dest
)
)
return True
else:
return False
def process(self, statement, additional_response_selection_parameters=None):
emotion = Emotion.rich
confidence = 0.9
response = None
converted = []
for i in self.tokenized:
if i.tag_ in ["LS", "CD"]:
self.currencies_src_ord = i.text
if self.currencies_src_ord:
try:
self.currencies_src_ord = float(self.currencies_src_ord)
sg_currency = SugaroidCurrency()
for destination in self.currencies_dest:
converted.append(
"{} {}".format(
sg_currency.convert(
self.currencies_src.upper(),
destination.upper(),
self.currencies_src_ord,
),
destination.upper(),
)
)
response = " ".join(converted)
except ValueError:
response = "Seems like I cannot process {}. Maybe try a numerical value for me to understand better".format(
self.currencies_src_ord
)
else:
response = "Seems like you forgot the important part of your currency conversion statement. The number!"
selected_statement = SugaroidStatement(response, chatbot=True)
selected_statement.confidence = confidence
selected_statement.emotion = emotion
return selected_statement
| 39.310127 | 124 | 0.5545 |
749c2f169a498db20130c5e07b8f6e4474e468e0 | 12,087 | py | Python | karolos/agents/sac.py | tmdt-buw/karolos | 33039b9a318b4d7ab4b399e2a8737655eccbc80b | [
"MIT"
]
| 12 | 2021-01-18T15:00:23.000Z | 2022-03-04T13:42:14.000Z | karolos/agents/sac.py | tmdt-buw/karolos | 33039b9a318b4d7ab4b399e2a8737655eccbc80b | [
"MIT"
]
| 4 | 2021-03-12T15:53:34.000Z | 2021-11-18T08:38:48.000Z | karolos/agents/sac.py | tmdt-buw/karolos | 33039b9a318b4d7ab4b399e2a8737655eccbc80b | [
"MIT"
]
| null | null | null | """
https://spinningup.openai.com/en/latest/algorithms/sac.html
"""
import os
import os.path as osp
import numpy as np
import torch
import torch.nn as nn
from torch.distributions import MultivariateNormal
from . import Agent
from .utils.nn import NeuralNetwork, Clamp, init_xavier_uniform
class Policy(NeuralNetwork):
def __init__(self, state_dims, action_dim, network_structure,
log_std_min=-20, log_std_max=2):
in_dim = int(
np.sum([np.product(state_dim) for state_dim in state_dims]))
out_dim = int(np.product(action_dim)) * 2
super(Policy, self).__init__(in_dim, network_structure)
dummy = super(Policy, self).forward(torch.zeros((1, in_dim)))
self.operators.append(nn.Linear(dummy.shape[1], out_dim))
self.operators.apply(init_xavier_uniform)
self.std_clamp = Clamp(log_std_min, log_std_max)
def forward(self, *state_args, deterministic=True):
x = super(Policy, self).forward(*state_args)
mean, log_std = torch.split(x, x.shape[1] // 2, dim=1)
log_std = self.std_clamp(log_std)
if deterministic:
action = torch.tanh(mean)
log_prob = torch.zeros(log_std.shape[0]).unsqueeze_(-1)
else:
std = log_std.exp()
normal = MultivariateNormal(mean, torch.diag_embed(std.pow(2)))
action_base = normal.rsample()
log_prob = normal.log_prob(action_base)
log_prob.unsqueeze_(-1)
action = torch.tanh(action_base)
action_bound_compensation = torch.log(
1. - action.pow(2) + np.finfo(float).eps).sum(dim=1,
keepdim=True)
log_prob.sub_(action_bound_compensation)
return action, log_prob
class Critic(NeuralNetwork):
def __init__(self, state_dims, action_dim, network_structure):
in_dim = int(
np.sum([np.product(arg) for arg in state_dims]) + np.product(
action_dim))
super(Critic, self).__init__(in_dim, network_structure)
dummy = super(Critic, self).forward(torch.zeros((1, in_dim)))
self.operators.append(nn.Linear(dummy.shape[1], 1))
self.operators.apply(init_xavier_uniform)
def forward(self, *args):
return super(Critic, self).forward(*args)
class AgentSAC(Agent):
def __init__(self, config, observation_space, action_space,
reward_function, experiment_dir=None):
super(AgentSAC, self).__init__(config, observation_space, action_space,
reward_function, experiment_dir)
self.learning_rate_critic = config.get("learning_rate_critic", 5e-4)
self.learning_rate_policy = config.get("learning_rate_policy", 5e-4)
self.learning_rate_entropy_regularization = config.get(
"learning_rate_entropy_regularization", 5e-5)
self.weight_decay = config.get("weight_decay", 1e-4)
self.tau = config.get('tau', 2.5e-3)
self.entropy_regularization = config.get("entropy_regularization", 1)
self.automatic_entropy_regularization = config.get(
'automatic_entropy_regularization', True)
self.policy_structure = config.get('policy_structure', [])
self.critic_structure = config.get('critic_structure', [])
self.target_entropy = -1 * self.action_dim[0]
# generate networks
self.critic_1 = Critic(self.state_dim, self.action_dim,
self.critic_structure).to(self.device)
self.critic_2 = Critic(self.state_dim, self.action_dim,
self.critic_structure).to(self.device)
self.target_critic_1 = Critic(self.state_dim, self.action_dim,
self.critic_structure).to(self.device)
self.target_critic_2 = Critic(self.state_dim, self.action_dim,
self.critic_structure).to(self.device)
self.policy = Policy(self.state_dim, self.action_dim,
self.policy_structure).to(self.device)
self.log_entropy_regularization = torch.tensor(
[np.log(self.entropy_regularization)], dtype=torch.float,
requires_grad=True, device=self.device)
self.optimizer_critic_1 = torch.optim.AdamW(self.critic_1.parameters(),
lr=self.learning_rate_critic,
weight_decay=self.weight_decay)
self.optimizer_critic_2 = torch.optim.AdamW(self.critic_2.parameters(),
lr=self.learning_rate_critic,
weight_decay=self.weight_decay)
self.optimizer_policy = torch.optim.AdamW(self.policy.parameters(),
lr=self.learning_rate_policy,
weight_decay=self.weight_decay)
self.optimizer_entropy_regularization = torch.optim.AdamW(
[self.log_entropy_regularization],
lr=self.learning_rate_entropy_regularization,
weight_decay=self.weight_decay)
self.update_target(self.critic_1, self.target_critic_1, 1.)
self.update_target(self.critic_2, self.target_critic_2, 1.)
self.criterion_critic_1 = nn.MSELoss()
self.criterion_critic_2 = nn.MSELoss()
def learn(self):
self.policy.train()
self.critic_1.train()
self.critic_2.train()
self.target_critic_1.train()
self.target_critic_2.train()
experiences, indices = self.memory.sample(self.batch_size)
states, actions, rewards, next_states, dones = experiences
states = torch.FloatTensor(states).to(self.device)
actions = torch.FloatTensor(actions).to(self.device)
rewards = torch.FloatTensor(rewards).unsqueeze(1).to(self.device)
next_states = torch.FloatTensor(next_states).to(self.device)
dones = torch.FloatTensor(np.float32(dones)).unsqueeze(1).to(
self.device)
rewards *= self.reward_scale
predicted_value_1 = self.critic_1(states, actions)
predicted_value_2 = self.critic_2(states, actions)
predicted_action, log_prob = self.policy(states,
deterministic=False)
predicted_next_action, next_log_prob = self.policy(next_states,
deterministic=False)
if self.automatic_entropy_regularization is True:
entropy_regularization_loss = -(self.log_entropy_regularization * (
log_prob + self.target_entropy).detach()).mean()
self.optimizer_entropy_regularization.zero_grad()
entropy_regularization_loss.backward()
self.optimizer_entropy_regularization.step()
self.entropy_regularization = self.log_entropy_regularization.exp()
else:
self.entropy_regularization = 1.
# Train critic
target_critic_min = torch.min(
self.target_critic_1(next_states, predicted_next_action),
self.target_critic_2(next_states, predicted_next_action))
target_critic_min.sub_(self.entropy_regularization * next_log_prob)
target_q_value = rewards + (
1 - dones) * self.reward_discount * target_critic_min
q_val_loss_1 = self.criterion_critic_1(predicted_value_1,
target_q_value.detach())
q_val_loss_2 = self.criterion_critic_2(predicted_value_2,
target_q_value.detach())
self.optimizer_critic_1.zero_grad()
self.optimizer_critic_2.zero_grad()
q_val_loss_1.backward()
q_val_loss_2.backward()
self.optimizer_critic_1.step()
self.optimizer_critic_2.step()
# Training policy
predicted_new_q_val = torch.min(
self.critic_1(states, predicted_action),
self.critic_2(states, predicted_action))
loss_policy = (
self.entropy_regularization * log_prob - predicted_new_q_val).mean()
self.optimizer_policy.zero_grad()
loss_policy.backward()
self.optimizer_policy.step()
predicted_value_avg = (predicted_value_1 + predicted_value_2) / 2
self.update_priorities(indices, predicted_value_avg, target_q_value)
# Update target
self.update_target(self.critic_1, self.target_critic_1, self.tau)
self.update_target(self.critic_2, self.target_critic_2, self.tau)
if self.writer:
self.writer.add_scalar('entropy_regularization', self.entropy_regularization, self.learning_step)
self.writer.add_histogram('predicted_value_1', predicted_value_1, self.learning_step)
self.writer.add_histogram('predicted_value_2', predicted_value_2, self.learning_step)
self.writer.add_histogram('rewards', rewards, self.learning_step)
try:
self.writer.add_histogram('target_critic_min_1', target_critic_min,
self.learning_step)
except:
raise
self.writer.add_histogram('target_critic_min_2', target_critic_min,
self.learning_step)
self.writer.add_histogram('target_q_value', target_q_value, self.learning_step)
self.writer.add_scalar('q_val_loss1', q_val_loss_1.item(), self.learning_step)
self.writer.add_scalar('q_val_loss2', q_val_loss_2.item(), self.learning_step)
self.learning_step += self.sample_training_ratio
def save(self, path):
if not osp.exists(path):
os.makedirs(path)
torch.save(self.policy.state_dict(), osp.join(path, "policy.pt"))
torch.save(self.critic_1.state_dict(), osp.join(path, "critic_1.pt"))
torch.save(self.critic_2.state_dict(), osp.join(path, "critic_2.pt"))
torch.save(self.target_critic_1.state_dict(),
osp.join(path, "target_critic_1.pt"))
torch.save(self.target_critic_2.state_dict(),
osp.join(path, "target_critic_2.pt"))
torch.save(self.optimizer_policy.state_dict(),
osp.join(path, "optimizer_policy.pt"))
torch.save(self.optimizer_critic_1.state_dict(),
osp.join(path, "optimizer_critic_1.pt"))
torch.save(self.optimizer_critic_2.state_dict(),
osp.join(path, "optimizer_critic_2.pt"))
def load(self, path):
self.policy.load_state_dict(
torch.load(osp.join(path, "policy.pt")))
self.critic_1.load_state_dict(
torch.load(osp.join(path, "critic_1.pt")))
self.critic_2.load_state_dict(
torch.load(osp.join(path, "critic_2.pt")))
self.target_critic_1.load_state_dict(
torch.load(osp.join(path, "target_critic_1.pt")))
self.target_critic_2.load_state_dict(
torch.load(osp.join(path, "target_critic_2.pt")))
self.optimizer_policy.load_state_dict(
torch.load(osp.join(path, "optimizer_policy.pt")))
self.optimizer_critic_1.load_state_dict(
torch.load(osp.join(path, "optimizer_critic_1.pt")))
self.optimizer_critic_2.load_state_dict(
torch.load(osp.join(path, "optimizer_critic_2.pt")))
def predict(self, states, deterministic=True):
self.policy.eval()
states = torch.tensor(states, dtype=torch.float).to(self.device)
action, _ = self.policy(states, deterministic=deterministic)
action = action.detach().cpu().numpy()
action = action.clip(self.action_space.low, self.action_space.high)
return action
def set_target_entropy(self, target_entropy):
self.target_entropy = target_entropy
| 40.424749 | 109 | 0.627037 |
467fd0839130985051b26afada913ce8eff80d11 | 1,082 | py | Python | experiment/tools/test_measure.py | Shitaibin/tweets-spam-clustering | 52148c63dd1d1ad97ea7595503e3172ca9268a75 | [
"MIT"
]
| null | null | null | experiment/tools/test_measure.py | Shitaibin/tweets-spam-clustering | 52148c63dd1d1ad97ea7595503e3172ca9268a75 | [
"MIT"
]
| null | null | null | experiment/tools/test_measure.py | Shitaibin/tweets-spam-clustering | 52148c63dd1d1ad97ea7595503e3172ca9268a75 | [
"MIT"
]
| null | null | null | from unittest import TestCase
from measure import cos_similarity
class MeasureTestCase(TestCase):
"""
Unittest for tools.py
"""
##################################################
# Unittest for Visulize fucntions
def test_cos_similarity(self):
"""
Unittest for function CosSimilirity.
The input parameters is normalize vector/list. So, cos_similarity
just caculate the dot product. In this test, we just need
to test dot product is enough.
"""
self.assertEqual(8, cos_similarity([1, 2], [2, 3]), msg="The cosine\
similarity of (1,2) and (2,3) should be \
{}".format(8))
self.assertEqual(0, cos_similarity([0, 0], [2, 3]), msg="The cosine\
similarity of (0,0) and (2,3) should be \
{}".format(0))
self.assertEqual(4, cos_similarity([-1, 2], [2, 3]), msg="The cosine\
similarity of (-1,2) and (2,3) should be \
{}".format(4))
| 31.823529 | 77 | 0.511091 |
b8d4fd9cd0b7e8d4b845e72743a3adaa7f7dc2c3 | 313 | py | Python | pkgs/nltk-3.2-py27_0/lib/python2.7/site-packages/nltk/test/gluesemantics_malt_fixt.py | wangyum/anaconda | 6e5a0dbead3327661d73a61e85414cf92aa52be6 | [
"Apache-2.0",
"BSD-3-Clause"
]
| null | null | null | pkgs/nltk-3.2-py27_0/lib/python2.7/site-packages/nltk/test/gluesemantics_malt_fixt.py | wangyum/anaconda | 6e5a0dbead3327661d73a61e85414cf92aa52be6 | [
"Apache-2.0",
"BSD-3-Clause"
]
| null | null | null | pkgs/nltk-3.2-py27_0/lib/python2.7/site-packages/nltk/test/gluesemantics_malt_fixt.py | wangyum/anaconda | 6e5a0dbead3327661d73a61e85414cf92aa52be6 | [
"Apache-2.0",
"BSD-3-Clause"
]
| null | null | null | # -*- coding: utf-8 -*-
from __future__ import absolute_import
def setup_module(module):
from nose import SkipTest
from nltk.parse.malt import MaltParser
try:
depparser = MaltParser('maltparser-1.7.2')
except LookupError:
raise SkipTest("MaltParser is not available")
| 26.083333 | 54 | 0.670927 |
179e68939b02779715c289f7af4d34c763cefff6 | 1,012 | py | Python | django_boilerplate/user/migrations/0003_rightssupport.py | eugen1j/django-boilerplate | 952df758141acded74dc762ccfd6d0eb7de5bf43 | [
"MIT"
]
| null | null | null | django_boilerplate/user/migrations/0003_rightssupport.py | eugen1j/django-boilerplate | 952df758141acded74dc762ccfd6d0eb7de5bf43 | [
"MIT"
]
| null | null | null | django_boilerplate/user/migrations/0003_rightssupport.py | eugen1j/django-boilerplate | 952df758141acded74dc762ccfd6d0eb7de5bf43 | [
"MIT"
]
| null | null | null | # Generated by Django 3.1.7 on 2021-04-08 12:47
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("user", "0002_auto_20210329_0713"),
]
operations = [
migrations.CreateModel(
name="RightsSupport",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
],
options={
"permissions": [
("MANAGE_USERS", "Manage Users"),
("VIEW_USERS", "Read Users"),
("MANAGE_BLOG", "Manage Blog"),
("MANAGE_TASKS", "Manage Tasks"),
],
"managed": False,
"default_permissions": (),
},
),
]
| 26.631579 | 53 | 0.400198 |
45dd63b5923f8ab25a55d2afa3aa5014fddaad7d | 7,468 | py | Python | pennylane/templates/layers/particle_conserving_u2.py | aglitoiu/pennylane | fd99be754d55bbb919aadbbbdff70e40fbe3bcbf | [
"Apache-2.0"
]
| null | null | null | pennylane/templates/layers/particle_conserving_u2.py | aglitoiu/pennylane | fd99be754d55bbb919aadbbbdff70e40fbe3bcbf | [
"Apache-2.0"
]
| null | null | null | pennylane/templates/layers/particle_conserving_u2.py | aglitoiu/pennylane | fd99be754d55bbb919aadbbbdff70e40fbe3bcbf | [
"Apache-2.0"
]
| null | null | null | # Copyright 2018-2021 Xanadu Quantum Technologies Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""
Contains the hardware efficient ``ParticleConservingU2`` template.
"""
import pennylane as qml
# pylint: disable-msg=too-many-branches,too-many-arguments,protected-access
from pennylane.templates.decorator import template
from pennylane.ops import CNOT, CRX, RZ
from pennylane.wires import Wires
def _preprocess(weights, wires, init_state):
"""Validate and pre-process inputs as follows:
* Check that the weights tensor has the correct shape.
* Extract a wire list for the subroutines of this template.
* Cast initial state to a numpy array.
Args:
weights (tensor_like): trainable parameters of the template
wires (Wires): wires that template acts on
init_state (tensor_like): shape ``(len(wires),)`` tensor
Returns:
int, list[Wires], array: number of times that the ansatz is repeated, wires pattern,
and preprocessed initial state
"""
if len(wires) < 2:
raise ValueError(
"This template requires the number of qubits to be greater than one;"
"got a wire sequence with {} elements".format(len(wires))
)
shape = qml.math.shape(weights)
if len(shape) != 2:
raise ValueError(f"Weights tensor must be 2-dimensional; got shape {shape}")
if shape[1] != 2 * len(wires) - 1:
raise ValueError(
f"Weights tensor must have a second dimension of length {2 * len(wires) - 1}; got {shape[1]}"
)
repeat = shape[0]
nm_wires = [wires.subset([l, l + 1]) for l in range(0, len(wires) - 1, 2)]
nm_wires += [wires.subset([l, l + 1]) for l in range(1, len(wires) - 1, 2)]
# we can extract the numpy representation here
# since init_state can never be differentiable
init_state = qml.math.toarray(init_state)
return repeat, nm_wires, init_state
def u2_ex_gate(phi, wires=None):
r"""Implements the two-qubit exchange gate :math:`U_{2,\mathrm{ex}}` proposed in
`arXiv:1805.04340 <https://arxiv.org/abs/1805.04340>`_ to build particle-conserving VQE ansatze
for Quantum Chemistry simulations.
The unitary matrix :math:`U_{2, \mathrm{ex}}` acts on the Hilbert space of two qubits
.. math::
U_{2, \mathrm{ex}}(\phi) = \left(\begin{array}{cccc}
1 & 0 & 0 & 0 \\
0 & \mathrm{cos}(\phi) & -i\;\mathrm{sin}(\phi) & 0 \\
0 & -i\;\mathrm{sin}(\phi) & \mathrm{cos}(\phi) & 0 \\
0 & 0 & 0 & 1 \\
\end{array}\right).
Args:
phi (float): angle entering the controlled-RX operator :math:`CRX(2\phi)`
wires (list[Wires]): the two wires ``n`` and ``m`` the circuit acts on
"""
CNOT(wires=wires)
CRX(2 * phi, wires=wires[::-1])
CNOT(wires=wires)
@template
def ParticleConservingU2(weights, wires, init_state=None):
r"""Implements the heuristic VQE ansatz for Quantum Chemistry simulations using the
particle-conserving entangler :math:`U_\mathrm{ent}(\vec{\theta}, \vec{\phi})` proposed in
`arXiv:1805.04340 <https://arxiv.org/abs/1805.04340>`_.
This template prepares :math:`N`-qubit trial states by applying :math:`D` layers of the entangler
block :math:`U_\mathrm{ent}(\vec{\theta}, \vec{\phi})` to the Hartree-Fock state
.. math::
\vert \Psi(\vec{\theta}, \vec{\phi}) \rangle = \hat{U}^{(D)}_\mathrm{ent}(\vec{\theta}_D,
\vec{\phi}_D) \dots \hat{U}^{(2)}_\mathrm{ent}(\vec{\theta}_2, \vec{\phi}_2)
\hat{U}^{(1)}_\mathrm{ent}(\vec{\theta}_1, \vec{\phi}_1) \vert \mathrm{HF}\rangle,
where :math:`\hat{U}^{(i)}_\mathrm{ent}(\vec{\theta}_i, \vec{\phi}_i) =
\hat{R}_\mathrm{z}(\vec{\theta}_i) \hat{U}_\mathrm{2,\mathrm{ex}}(\vec{\phi}_i)`.
The circuit implementing the entangler blocks is shown in the figure below:
|
.. figure:: ../../_static/templates/layers/particle_conserving_u2.png
:align: center
:width: 60%
:target: javascript:void(0);
|
Each layer contains :math:`N` rotation gates :math:`R_\mathrm{z}(\vec{\theta})` and
:math:`N-1` particle-conserving exchange gates :math:`U_{2,\mathrm{ex}}(\phi)`
that act on pairs of nearest-neighbors qubits. The repeated units across several qubits are
shown in dotted boxes. The unitary matrix representing :math:`U_{2,\mathrm{ex}}(\phi)`
(`arXiv:1805.04340 <https://arxiv.org/abs/1805.04340>`_) is decomposed into its elementary
gates and implemented in the :func:`~.u2_ex_gate` function using PennyLane quantum operations.
|
.. figure:: ../../_static/templates/layers/u2_decomposition.png
:align: center
:width: 60%
:target: javascript:void(0);
|
Args:
weights (tensor_like): Weight tensor of shape ``(D, M)`` where ``D`` is the number of
layers and ``M`` = ``2N-1`` is the total number of rotation ``(N)`` and exchange
``(N-1)`` gates per layer.
wires (Iterable or Wires): Wires that the template acts on. Accepts an iterable of numbers
or strings, or a Wires object.
init_state (tensor_like): shape ``(len(wires),)`` tensor representing the Hartree-Fock state
used to initialize the wires.
Raises:
ValueError: if inputs do not have the correct format
.. UsageDetails::
#. The number of wires has to be equal to the number of spin orbitals included in
the active space.
#. The number of trainable parameters scales with the number of layers :math:`D` as
:math:`D(2N-1)`.
An example of how to use this template is shown below:
.. code-block:: python
import pennylane as qml
from pennylane.templates import ParticleConservingU2
from functools import partial
# Build the electronic Hamiltonian from a local .xyz file
h, qubits = qml.qchem.molecular_hamiltonian("h2", "h2.xyz")
# Define the HF state
ref_state = qml.qchem.hf_state(2, qubits)
# Define the device
dev = qml.device('default.qubit', wires=qubits)
# Define the ansatz
ansatz = partial(ParticleConservingU2, init_state=ref_state)
# Define the cost function
cost_fn = qml.ExpvalCost(ansatz, h, dev)
# Compute the expectation value of 'h' for a given set of parameters
layers = 1
params = qml.init.particle_conserving_u2_normal(layers, qubits)
print(cost_fn(params))
"""
wires = Wires(wires)
repeat, nm_wires, init_state = _preprocess(weights, wires, init_state)
qml.BasisState(init_state, wires=wires)
for l in range(repeat):
for j, _ in enumerate(wires):
RZ(weights[l, j], wires=wires[j])
for i, wires_ in enumerate(nm_wires):
u2_ex_gate(weights[l, len(wires) + i], wires=wires_)
| 36.607843 | 105 | 0.641537 |
0d2c790ad119c6f5f84e0d70ef9be1411d800cf4 | 6,232 | py | Python | lectures/nonlinear_equations/nonlinear_algorithms.py | carolinalvarez/ose-course-scientific-computing | 4b816fa81320c88fc5f35b203f0541e0a1a00939 | [
"MIT"
]
| null | null | null | lectures/nonlinear_equations/nonlinear_algorithms.py | carolinalvarez/ose-course-scientific-computing | 4b816fa81320c88fc5f35b203f0541e0a1a00939 | [
"MIT"
]
| null | null | null | lectures/nonlinear_equations/nonlinear_algorithms.py | carolinalvarez/ose-course-scientific-computing | 4b816fa81320c88fc5f35b203f0541e0a1a00939 | [
"MIT"
]
| null | null | null | """Algorithms for lecture on nonlinear equations.
The materials follow Miranda and Fackler (2004, :cite:`miranda2004applied`) (Chapter 3).
The python code draws on Romero-Aguilar (2020, :cite:`CompEcon`).
"""
from functools import partial
import numpy as np
from scipy import optimize
def bisect(f, a, b, tolerance=1.5e-8):
"""Apply bisect method to root finding problem.
Iterative procedure to find the root of a continuous real-values function :math:`f(x)` defined
on a bounded interval of the real line. Define interval :math:`[a, b]` that is known to contain
or bracket the root of :math:`f` (i.e. the signs of :math:`f(a)` and :math:`f(b)` must differ).
The given interval :math:`[a, b]` is then repeatedly bisected into subintervals of equal length.
Each iteration, one of the two subintervals has endpoints of different signs (thus containing
the root of :math:`f`) and is again bisected until the size of the subinterval containing the
root reaches a specified convergence tolerance.
Parameters
----------
f : callable
Continuous, real-valued, univariate function :math:`f(x)`.
a : int or float
Lower bound :math:`a` for :math:`x \\in [a,b]`.
b : int or float
Upper bound :math:`a` for :math:`x \\in [a,b]`. Select :math:`a` and :math:`b` so
that :math:`f(b)` has different sign than :math:`f(a)`.
tolerance : float
Convergence tolerance.
Returns
-------
x : float
Solution to the root finding problem within specified tolerance.
Examples
--------
>>> x = bisect(f=lambda x : x ** 3 - 2, a=1, b=2)[0]
>>> round(x, 4)
1.2599
"""
# Get sign for f(a).
s = np.sign(f(a))
# Get staring values for x and interval length.
x = (a + b) / 2
d = (b - a) / 2
# Continue operation as long as d is above the convergence tolerance threshold.
# Update x by adding or subtracting value of d depending on sign of f.
xvals = [x]
while d > tolerance:
d = d / 2
if s == np.sign(f(x)):
x += d
else:
x -= d
xvals.append(x)
return x, np.array(xvals)
def fixpoint(f, x0, tolerance=10e-5):
"""Compute fixed point using function iteration.
Parameters
----------
f : callable
Function :math:`f(x)`.
x0 : float
Initial guess for fixed point (starting value for function iteration).
tolerance : float
Convergence tolerance (tolerance < 1).
Returns
-------
x : float
Solution of function iteration.
Examples
--------
>>> import numpy as np
>>> x = fixpoint(f=lambda x : x**0.5, x0=0.4, tolerance=1e-10)[0]
>>> np.allclose(x, 1)
True
"""
e = 1
xvals = [x0]
while e > tolerance:
# Fixed point equation.
x = f(x0)
# Error at the current step.
e = np.linalg.norm(x0 - x)
x0 = x
xvals.append(x0)
return x, np.array(xvals)
def newton_method(f, x0, tolerance=1.5e-8):
"""Apply Newton's method to solving nonlinear equation.
Solve equation using successive linearization, which replaces the nonlinear problem
by a sequence of linear problems whose solutions converge to the solution of the nonlinear
problem.
Parameters
----------
f : callable
(Univariate) function :math:`f(x)`.
x0 : float
Initial guess for the root of :math:`f`.
tolerance : float
Convergence tolerance.
Returns
-------
xn : float
Solution of function iteration.
"""
x0 = np.atleast_1d(x0)
# This is tailored to the univariate case.
assert x0.shape[0] == 1
xn = x0.copy()
while True:
fxn, gxn = f(xn)
if np.linalg.norm(fxn) < tolerance:
return xn
else:
xn = xn - fxn / gxn
def mcp_minmax(f, x0, a, b):
"""Apply minmax root finding formulation to mixed complementarity problem.
Function utilizes Broyden's method for solution using the function
:func:`scipy.optimize.root`.
Parameters
----------
f : callable
Function :math:`f(x)`.
x0 : float
Initial guess to root finding problem.
a : float
Lower bound :math:`a`.
b : float
Upper bound :math:`b`.
Returns
-------
rslt : float
"""
# Define minmax formulation.
def wrapper(f, a, b, x):
fval = f(x)[0]
return np.fmin(np.fmax(fval, a - x), b - x)
# Apply partial function to minmax wrapper to fix all arguments but x0.
wrapper_p = partial(wrapper, f, a, b)
# Apply scipy function to find root using Broyden's method.
rslt = optimize.root(wrapper_p, x0, method="broyden1", options={"maxiter": 500})
return rslt
def fischer(u, v, sign):
"""Define Fischer's function.
.. math::
\\phi_{i}^{\\pm}(u, v) = u_{i} + v_{i} \\pm \\sqrt{u_{i}^{2} + v_{i}^{2}}
Parameters
----------
u : float
v : float
sign : float or int
Gives sign of equation. Should be either 1 or -1.
Returns
-------
callable
"""
return u + v + sign * np.sqrt(u ** 2 + v ** 2)
def mcp_fischer(f, x0, a, b):
"""Apply Fischer's function :func:`fischer` to mixed complementarity Problem.
Parameters
----------
f : callable
Function :math:`f(x)`.
x0 : float
Initial guess to root finding problem.
a : float
Lower bound :math:`a`.
b : float
Upper bound :math:`b`.
Returns
-------
rslt : float
"""
def wrapper(f, a, b, x):
b[b == np.inf] = 1000 # fisher solution quite sensitive, maybe good exercise to run in
# class.
u_inner, v_inner, sign_inner = f(x)[0], a - x, +1.0
u_outer, v_outer, sign_outer = fischer(u_inner, v_inner, sign_inner), b - x, -1.0
return fischer(u_outer, v_outer, sign_outer)
# Apply partial function to minmax wrapper to fix all arguments but x0.
wrapper_p = partial(wrapper, f, a, b)
# Apply scipy function to find root using Broyden's method.
rslt = optimize.root(wrapper_p, x0, method="broyden1", options={"maxiter": 500})
return rslt
| 26.519149 | 100 | 0.59018 |
be70cd7ccae184bc5cc83e6cd43ddc535c9219a7 | 6,096 | py | Python | src/crawler/test_crawler.py | gbarkway/hypnospace-sitemap | fce9ed296971fd409bdd4d6da5101a7f7f6afcc9 | [
"MIT"
]
| 4 | 2021-04-18T16:01:54.000Z | 2022-02-02T17:23:01.000Z | src/crawler/test_crawler.py | gbarkway/hypnospace-sitemap | fce9ed296971fd409bdd4d6da5101a7f7f6afcc9 | [
"MIT"
]
| null | null | null | src/crawler/test_crawler.py | gbarkway/hypnospace-sitemap | fce9ed296971fd409bdd4d6da5101a7f7f6afcc9 | [
"MIT"
]
| null | null | null | import unittest
import crawler
from pathlib import Path
class TestCrawler(unittest.TestCase):
def test_readHypnospace(self):
hs = crawler.readHypnospace('./test_data')
self.assertEqual(len(hs.captures), 1,
'Hypnospace should have expected # captures')
self.assertEqual(len(hs.adLinks), 2,
'Hypnospace should have expected # ad links')
self.assertEqual(len(hs.mailLinks), 1,
'Hypnospace should have expected # mail links')
def test_readCapture(self):
capture = crawler.readCapture('./test_data/hs')
self.assertEqual('1999-11-05', capture.date)
page = [
page for page in capture.pages if 'dead_link_test' in page.path
][0]
self.assertIn(r'00_test\tags_no_description.hsp', page.linksTo,
'non-dead link should be kept')
self.assertNotIn(r'00_test\dead_link.hsp', page.linksTo,
'dead link should be removed')
self.assertEqual(len(capture.pages), 14)
def test_readZone(self):
pages = crawler.readZone('test_data/hs/00_test')
self.assertEqual(
len(pages),
len([
path for path in Path('test_data/hs/00_test').iterdir()
if '.hsp' in path.suffix
]))
implicit_links = [
str(path.relative_to('test_data/hs')).replace('/', '\\')
for path in Path('test_data/hs/00_test').iterdir()
if '~' not in str(path) and not 'zone.hsp' == path.name
]
explicit_links = [r'00_test\explicit_zone_link.hsp']
zonePage = [p for p in pages if r'00_test\zone.hsp' == p.path][0]
self.assertEqual('00_test', zonePage.zone)
self.assertTrue(zonePage.isZoneHome)
self.assertEqual(len(zonePage.linksTo),
len(implicit_links) + len(explicit_links))
for link in implicit_links + explicit_links:
self.assertIn(link, zonePage.linksTo)
def test_readPage_basics(self):
page = crawler.readPage('test_data/hs/00_test/page.hsp')
self.assertEqual(r'00_test\page.hsp', page.path)
self.assertEqual('Citizen name', page.citizenName)
self.assertEqual('Page name', page.name)
self.assertEqual('00_test', page.zone)
self.assertFalse(page.isZoneHome)
def test_readPage_zone(self):
page = crawler.readPage('test_data/hs/00_test/zone.hsp')
self.assertTrue(page.isZoneHome)
def test_readPage_finds_links_from_all_captures(self):
links = crawler.readPage('test_data/hs/00_test/page.hsp').linksTo
self.assertIn(r'00_test\~10th_el_hs_prefix.hsp', links,
'link with hs prefix should be found')
self.assertIn(r'00_test\~10th_el_hsa_prefix.hsp', links,
'link with hsa prefix should be found')
self.assertIn(r'00_test\~10th_el_hsb_prefix.hsp', links,
'link with hsb prefix should be found')
self.assertIn(r'00_test\10th_el_hsc_prefix.hsp', links,
'link with hsc prefix should be found')
def test_readPage_finds_links_from_10th_or_11th_element(self):
links = crawler.readPage('test_data/hs/00_test/page.hsp').linksTo
self.assertIn(r'00_test\~10th_el_hs_prefix.hsp', links,
'link in 10th position should be found')
self.assertIn(r'00_test\11th_el.hsp', links,
'link in 11th position should be found')
def test_readPage_finds_complex_link(self):
links = crawler.readPage('test_data/hs/00_test/page.hsp').linksTo
self.assertIn(r'00_test\~complex_link.hsp', links,
'complex link should be found')
def test_readPage_has_right_link_count(self):
links = crawler.readPage('test_data/hs/00_test/page.hsp').linksTo
self.assertEqual(len(links), 6, 'should find expected amount of links')
def test_readPage_no_duplicate_links(self):
links = crawler.readPage('test_data/hs/00_test/page.hsp').linksTo
self.assertCountEqual(links, set(links),
'should have no duplicate links')
def test_readPage_no_links_to_self(self):
links = crawler.readPage('test_data/hs/00_test/page.hsp').linksTo
self.assertNotIn('00_test/page.hsp', links,
'should have no links to self')
def test_readPage_empty_list_if_no_links(self):
links = crawler.readPage('test_data/hs/00_test/no_links.hsp').linksTo
self.assertEqual(len(links), 0,
'page with no links should have empty linksTo')
def test_readPage_tags_and_description(self):
page = crawler.readPage(
'test_data/hs/00_test/tags_and_description.hsp')
self.assertIn('tag1', page.tags)
self.assertIn('tag2', page.tags)
self.assertEqual(len(page.tags), 2)
self.assertEqual('Test description', page.description)
def test_readPage_tags_and_no_description(self):
page = crawler.readPage('test_data/hs/00_test/tags_no_description.hsp')
self.assertIn('tag1', page.tags)
self.assertIn('tag2', page.tags)
self.assertEqual(len(page.tags), 2)
self.assertEqual(None, page.description)
def test_readPage_no_tags_and_description(self):
page = crawler.readPage('test_data/hs/00_test/no_tags_description.hsp')
self.assertEqual(len(page.tags), 0)
self.assertEqual('Test description', page.description)
def test_readPage_no_tags_no_description(self):
page = crawler.readPage(
'test_data/hs/00_test/no_tags_no_description.hsp')
self.assertEqual(len(page.tags), 0)
self.assertEqual(None, page.description)
def test_readPage_empty_citizen_name_is_none(self):
page = crawler.readPage(
'test_data/hs/00_test/no_citizen_name.hsp')
self.assertEqual(None, page.citizenName)
if __name__ == '__main__':
unittest.main()
| 42.929577 | 79 | 0.64042 |
704759eedb76612148bda5a7c5d4b877cdb52443 | 2,661 | py | Python | data/p4VQE/R4/benchmark/startQiskit_QC695.py | UCLA-SEAL/QDiff | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | [
"BSD-3-Clause"
]
| null | null | null | data/p4VQE/R4/benchmark/startQiskit_QC695.py | UCLA-SEAL/QDiff | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | [
"BSD-3-Clause"
]
| null | null | null | data/p4VQE/R4/benchmark/startQiskit_QC695.py | UCLA-SEAL/QDiff | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | [
"BSD-3-Clause"
]
| null | null | null | # qubit number=3
# total number=14
import numpy as np
from qiskit import QuantumCircuit, execute, Aer, QuantumRegister, ClassicalRegister, transpile, BasicAer, IBMQ
import networkx as nx
from qiskit.visualization import plot_histogram
from typing import *
from pprint import pprint
from math import log2
from collections import Counter
from qiskit.test.mock import FakeVigo, FakeYorktown
kernel = 'circuit/bernstein'
def make_circuit(n:int) -> QuantumCircuit:
# circuit begin
input_qubit = QuantumRegister(n,"qc")
prog = QuantumCircuit(input_qubit)
prog.h(input_qubit[0]) # number=1
prog.cx(input_qubit[3],input_qubit[0]) # number=11
prog.z(input_qubit[3]) # number=12
prog.cx(input_qubit[3],input_qubit[0]) # number=13
prog.z(input_qubit[1]) # number=8
prog.h(input_qubit[2]) # number=3
prog.h(input_qubit[3]) # number=4
for edge in E:
k = edge[0]
l = edge[1]
prog.cp(-2 * gamma, input_qubit[k-1], input_qubit[l-1])
prog.p(gamma, k)
prog.p(gamma, l)
prog.rx(2 * beta, range(len(V)))
prog.swap(input_qubit[3],input_qubit[0]) # number=5
prog.swap(input_qubit[3],input_qubit[0]) # number=6
prog.x(input_qubit[3]) # number=9
prog.x(input_qubit[3]) # number=10
# circuit end
return prog
if __name__ == '__main__':
n = 4
V = np.arange(0, n, 1)
E = [(0, 1, 1.0), (0, 2, 1.0), (1, 2, 1.0), (3, 2, 1.0), (3, 1, 1.0)]
G = nx.Graph()
G.add_nodes_from(V)
G.add_weighted_edges_from(E)
step_size = 0.1
a_gamma = np.arange(0, np.pi, step_size)
a_beta = np.arange(0, np.pi, step_size)
a_gamma, a_beta = np.meshgrid(a_gamma, a_beta)
F1 = 3 - (np.sin(2 * a_beta) ** 2 * np.sin(2 * a_gamma) ** 2 - 0.5 * np.sin(4 * a_beta) * np.sin(4 * a_gamma)) * (
1 + np.cos(4 * a_gamma) ** 2)
result = np.where(F1 == np.amax(F1))
a = list(zip(result[0], result[1]))[0]
gamma = a[0] * step_size
beta = a[1] * step_size
prog = make_circuit(4)
sample_shot =5600
writefile = open("../data/startQiskit_QC695.csv", "w")
# prog.draw('mpl', filename=(kernel + '.png'))
IBMQ.load_account()
provider = IBMQ.get_provider(hub='ibm-q')
provider.backends()
backend = provider.get_backend("ibmq_5_yorktown")
circuit1 = transpile(prog, FakeYorktown())
circuit1.measure_all()
prog = circuit1
info = execute(prog,backend=backend, shots=sample_shot).result().get_counts()
print(info, file=writefile)
print("results end", file=writefile)
print(circuit1.depth(), file=writefile)
print(circuit1, file=writefile)
writefile.close()
| 28.010526 | 118 | 0.637354 |
87d02725f50cb901eb3cee04d59a902f776f6909 | 3,423 | py | Python | shared/sentry/external/crashpad/util/mach/mig_gen.py | Eeems-Org/oxide | d3bfa47e60bf311feb7768234dfe95a15adeb9da | [
"MIT"
]
| 18 | 2022-01-11T17:24:50.000Z | 2022-03-30T04:35:25.000Z | shared/sentry/external/crashpad/util/mach/mig_gen.py | Eeems-Org/oxide | d3bfa47e60bf311feb7768234dfe95a15adeb9da | [
"MIT"
]
| 21 | 2022-01-07T19:20:04.000Z | 2022-03-24T14:32:28.000Z | shared/sentry/external/crashpad/util/mach/mig_gen.py | Eeems-Org/oxide | d3bfa47e60bf311feb7768234dfe95a15adeb9da | [
"MIT"
]
| 2 | 2022-01-15T16:45:34.000Z | 2022-03-01T22:37:48.000Z | #!/usr/bin/env python3
# Copyright 2019 The Crashpad Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import collections
import os
import subprocess
import sys
MigInterface = collections.namedtuple(
'MigInterface', ['user_c', 'server_c', 'user_h', 'server_h'])
def generate_interface(defs,
interface,
includes=[],
sdk=None,
clang_path=None,
mig_path=None,
migcom_path=None,
arch=None):
if mig_path is None:
mig_path = 'mig'
# yapf: disable
command = [
mig_path,
'-user', interface.user_c,
'-server', interface.server_c,
'-header', interface.user_h,
'-sheader', interface.server_h,
]
# yapf: enable
if clang_path is not None:
os.environ['MIGCC'] = clang_path
if migcom_path is not None:
os.environ['MIGCOM'] = migcom_path
if arch is not None:
command.extend(['-arch', arch])
if sdk is not None:
command.extend(['-isysroot', sdk])
for include in includes:
command.extend(['-I' + include])
command.append(defs)
subprocess.check_call(command)
def parse_args(args, multiple_arch=False):
parser = argparse.ArgumentParser()
parser.add_argument('--clang-path', help='Path to clang')
parser.add_argument('--mig-path', help='Path to mig')
parser.add_argument('--migcom-path', help='Path to migcom')
if not multiple_arch:
parser.add_argument('--arch', help='Target architecture')
else:
parser.add_argument(
'--arch',
default=[],
action='append',
help='Target architecture (may appear multiple times)')
parser.add_argument('--sdk', help='Path to SDK')
parser.add_argument(
'--include',
default=[],
action='append',
help='Additional include directory (may appear multiple times)')
parser.add_argument('defs')
parser.add_argument('user_c')
parser.add_argument('server_c')
parser.add_argument('user_h')
parser.add_argument('server_h')
# This is a HACK to parse arch from env when cmake is configured to use xcode
parsed = parser.parse_args(args)
if multiple_arch and len(parsed.arch) == 1 and parsed.arch[0] == "FROM_ENV":
parsed.arch = os.environ.get("ARCHS", "").split(" ")
return parsed
def main(args):
parsed = parse_args(args)
interface = MigInterface(parsed.user_c, parsed.server_c, parsed.user_h,
parsed.server_h)
generate_interface(parsed.defs, interface, parsed.include, parsed.sdk,
parsed.clang_path, parsed.mig_path, parsed.migcom_path,
parsed.arch)
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
| 32.6 | 81 | 0.62752 |
6b2c5bb85d0c5f15a8750916042d66240cd89dfa | 1,921 | py | Python | examples/background_tasks.py | CoderLeague/research_aiohttp | 01113d3edb09388e25fd4a0af73fb7cc01e4aa74 | [
"Apache-2.0"
]
| 1 | 2019-05-09T08:59:46.000Z | 2019-05-09T08:59:46.000Z | examples/background_tasks.py | snjypl/aiohttp | 1473f2722b2a804269d4fdbff4880f9ff9da0c3f | [
"Apache-2.0"
]
| null | null | null | examples/background_tasks.py | snjypl/aiohttp | 1473f2722b2a804269d4fdbff4880f9ff9da0c3f | [
"Apache-2.0"
]
| null | null | null | #!/usr/bin/env python3
"""Example of aiohttp.web.Application.on_startup signal handler"""
import asyncio
import aioredis
from aiohttp.web import Application, WebSocketResponse, run_app
async def websocket_handler(request):
ws = WebSocketResponse()
await ws.prepare(request)
request.app['websockets'].append(ws)
try:
async for msg in ws:
print(msg)
await asyncio.sleep(1)
finally:
request.app['websockets'].remove(ws)
return ws
async def on_shutdown(app):
for ws in app['websockets']:
await ws.close(code=999, message='Server shutdown')
async def listen_to_redis(app):
try:
sub = await aioredis.create_redis(('localhost', 6379), loop=app.loop)
ch, *_ = await sub.subscribe('news')
async for msg in ch.iter(encoding='utf-8'):
# Forward message to all connected websockets:
for ws in app['websockets']:
await ws.send_str('{}: {}'.format(ch.name, msg))
print("message in {}: {}".format(ch.name, msg))
except asyncio.CancelledError:
pass
finally:
print('Cancel Redis listener: close connection...')
await sub.unsubscribe(ch.name)
await sub.quit()
print('Redis connection closed.')
async def start_background_tasks(app):
app['redis_listener'] = app.loop.create_task(listen_to_redis(app))
async def cleanup_background_tasks(app):
print('cleanup background tasks...')
app['redis_listener'].cancel()
await app['redis_listener']
async def init(loop):
app = Application()
app['websockets'] = []
app.router.add_get('/news', websocket_handler)
app.on_startup.append(start_background_tasks)
app.on_cleanup.append(cleanup_background_tasks)
app.on_shutdown.append(on_shutdown)
return app
loop = asyncio.get_event_loop()
app = loop.run_until_complete(init(loop))
run_app(app)
| 28.671642 | 77 | 0.665279 |
18fdd813f66637068fc484a9d2cf7ba0f5c590c2 | 105,442 | py | Python | lib/galaxy/tools/__init__.py | igorhollaender/sirv_dashboard | 85aec60b80ef6f561d89398e3da5963d3d0f2aa4 | [
"CC-BY-3.0"
]
| 2 | 2018-10-14T16:42:39.000Z | 2018-10-14T16:42:41.000Z | lib/galaxy/tools/__init__.py | igorhollaender/OBSOLETE_sirv_dashboard | 85aec60b80ef6f561d89398e3da5963d3d0f2aa4 | [
"CC-BY-3.0"
]
| null | null | null | lib/galaxy/tools/__init__.py | igorhollaender/OBSOLETE_sirv_dashboard | 85aec60b80ef6f561d89398e3da5963d3d0f2aa4 | [
"CC-BY-3.0"
]
| null | null | null | """
Classes encapsulating galaxy tools and tool configuration.
"""
import glob
import json
import logging
import os
import re
import tarfile
import tempfile
import threading
import urllib
from datetime import datetime
from cgi import FieldStorage
from xml.etree import ElementTree
from mako.template import Template
from paste import httpexceptions
from six import string_types
from galaxy.version import VERSION_MAJOR
from galaxy import model
from galaxy.managers import histories
from galaxy.datatypes.metadata import JobExternalOutputMetadataWrapper
from galaxy import exceptions
from galaxy.tools.actions import DefaultToolAction
from galaxy.tools.actions.upload import UploadToolAction
from galaxy.tools.actions.data_source import DataSourceToolAction
from galaxy.tools.actions.data_manager import DataManagerToolAction
from galaxy.tools.parameters import params_to_incoming, check_param, params_from_strings, params_to_strings, visit_input_values
from galaxy.tools.parameters import output_collect
from galaxy.tools.parameters.basic import (BaseURLToolParameter,
DataToolParameter, DataCollectionToolParameter, HiddenToolParameter,
SelectToolParameter, ToolParameter)
from galaxy.tools.parameters.grouping import Conditional, ConditionalWhen, Repeat, Section, UploadDataset
from galaxy.tools.parameters.input_translation import ToolInputTranslator
from galaxy.tools.test import parse_tests
from galaxy.tools.parser import get_tool_source
from galaxy.tools.parser.xml import XmlPageSource
from galaxy.tools.parser import ToolOutputCollectionPart
from galaxy.tools.toolbox import BaseGalaxyToolBox
from galaxy.util import rst_to_html, string_as_bool
from galaxy.util import ExecutionTimer
from galaxy.util import listify
from galaxy.util import unicodify
from galaxy.tools.parameters.meta import expand_meta_parameters
from galaxy.util.bunch import Bunch
from galaxy.util.expressions import ExpressionContext
from galaxy.util.json import json_fix
from galaxy.util.odict import odict
from galaxy.util.template import fill_template
from galaxy.web import url_for
from galaxy.web.form_builder import SelectField
from galaxy.util.dictifiable import Dictifiable
from galaxy.work.context import WorkRequestContext
from tool_shed.util import common_util
from tool_shed.util import shed_util_common as suc
from .loader import template_macro_params, raw_tool_xml_tree, imported_macro_paths
from .execute import execute as execute_job
import galaxy.jobs
log = logging.getLogger( __name__ )
HELP_UNINITIALIZED = threading.Lock()
class ToolErrorLog:
def __init__(self):
self.error_stack = []
self.max_errors = 100
def add_error(self, file, phase, exception):
self.error_stack.insert(0, {
"file": file,
"time": str(datetime.now()),
"phase": phase,
"error": str(exception)
} )
if len(self.error_stack) > self.max_errors:
self.error_stack.pop()
global_tool_errors = ToolErrorLog()
class ToolNotFoundException( Exception ):
pass
class ToolBox( BaseGalaxyToolBox ):
""" A derivative of AbstractToolBox with knowledge about Tool internals -
how to construct them, action types, dependency management, etc....
"""
def __init__( self, config_filenames, tool_root_dir, app ):
super( ToolBox, self ).__init__(
config_filenames=config_filenames,
tool_root_dir=tool_root_dir,
app=app,
)
@property
def tools_by_id( self ):
# Deprecated method, TODO - eliminate calls to this in test/.
return self._tools_by_id
def create_tool( self, config_file, repository_id=None, guid=None, **kwds ):
try:
tool_source = get_tool_source( config_file, getattr( self.app.config, "enable_beta_tool_formats", False ) )
except Exception, e:
# capture and log parsing errors
global_tool_errors.add_error(config_file, "Tool XML parsing", e)
raise e
# Allow specifying a different tool subclass to instantiate
tool_module = tool_source.parse_tool_module()
if tool_module is not None:
module, cls = tool_module
mod = __import__( module, globals(), locals(), [cls] )
ToolClass = getattr( mod, cls )
elif tool_source.parse_tool_type():
tool_type = tool_source.parse_tool_type()
ToolClass = tool_types.get( tool_type )
else:
# Normal tool
root = getattr( tool_source, 'root', None )
ToolClass = Tool
tool = ToolClass( config_file, tool_source, self.app, guid=guid, repository_id=repository_id, **kwds )
return tool
def handle_datatypes_changed( self ):
""" Refresh upload tools when new datatypes are added. """
for tool_id in self._tools_by_id:
tool = self._tools_by_id[ tool_id ]
if isinstance( tool.tool_action, UploadToolAction ):
self.reload_tool_by_id( tool_id )
def get_tool_components( self, tool_id, tool_version=None, get_loaded_tools_by_lineage=False, set_selected=False ):
"""
Retrieve all loaded versions of a tool from the toolbox and return a select list enabling
selection of a different version, the list of the tool's loaded versions, and the specified tool.
"""
toolbox = self
tool_version_select_field = None
tools = []
tool = None
# Backwards compatibility for datasource tools that have default tool_id configured, but which
# are now using only GALAXY_URL.
tool_ids = listify( tool_id )
for tool_id in tool_ids:
if get_loaded_tools_by_lineage:
tools = toolbox.get_loaded_tools_by_lineage( tool_id )
else:
tools = toolbox.get_tool( tool_id, tool_version=tool_version, get_all_versions=True )
if tools:
tool = toolbox.get_tool( tool_id, tool_version=tool_version, get_all_versions=False )
if len( tools ) > 1:
tool_version_select_field = self.__build_tool_version_select_field( tools, tool.id, set_selected )
break
return tool_version_select_field, tools, tool
def _get_tool_shed_repository( self, tool_shed, name, owner, installed_changeset_revision ):
# Abstract toolbox doesn't have a dependency on the the database, so
# override _get_tool_shed_repository here to provide this information.
return suc.get_installed_repository(
self.app,
tool_shed=tool_shed,
name=name,
owner=owner,
installed_changeset_revision=installed_changeset_revision
)
def __build_tool_version_select_field( self, tools, tool_id, set_selected ):
"""Build a SelectField whose options are the ids for the received list of tools."""
options = []
refresh_on_change_values = []
for tool in tools:
options.insert( 0, ( tool.version, tool.id ) )
refresh_on_change_values.append( tool.id )
select_field = SelectField( name='tool_id', refresh_on_change=True, refresh_on_change_values=refresh_on_change_values )
for option_tup in options:
selected = set_selected and option_tup[ 1 ] == tool_id
if selected:
select_field.add_option( 'version %s' % option_tup[ 0 ], option_tup[ 1 ], selected=True )
else:
select_field.add_option( 'version %s' % option_tup[ 0 ], option_tup[ 1 ] )
return select_field
class DefaultToolState( object ):
"""
Keeps track of the state of a users interaction with a tool between
requests.
"""
def __init__( self ):
self.page = 0
self.rerun_remap_job_id = None
self.inputs = None
def encode( self, tool, app ):
"""
Convert the data to a string
"""
# Convert parameters to a dictionary of strings, and save curent
# page in that dict
value = params_to_strings( tool.inputs, self.inputs, app )
value["__page__"] = self.page
value["__rerun_remap_job_id__"] = self.rerun_remap_job_id
return json.dumps( value )
def decode( self, value, tool, app ):
"""
Restore the state from a string
"""
values = json_fix( json.loads( value ) )
self.page = values.pop( "__page__" )
if '__rerun_remap_job_id__' in values:
self.rerun_remap_job_id = values.pop( "__rerun_remap_job_id__" )
else:
self.rerun_remap_job_id = None
self.inputs = params_from_strings( tool.inputs, values, app, ignore_errors=True )
def copy( self ):
"""
Shallow copy of the state
"""
new_state = DefaultToolState()
new_state.page = self.page
new_state.rerun_remap_job_id = self.rerun_remap_job_id
new_state.inputs = self.inputs
return new_state
class Tool( object, Dictifiable ):
"""
Represents a computational tool that can be executed through Galaxy.
"""
tool_type = 'default'
requires_setting_metadata = True
default_tool_action = DefaultToolAction
dict_collection_visible_keys = ( 'id', 'name', 'version', 'description', 'labels' )
def __init__( self, config_file, tool_source, app, guid=None, repository_id=None, allow_code_files=True ):
"""Load a tool from the config named by `config_file`"""
# Determine the full path of the directory where the tool config is
self.config_file = config_file
self.tool_dir = os.path.dirname( config_file )
self.app = app
self.repository_id = repository_id
self._allow_code_files = allow_code_files
# setup initial attribute values
self.inputs = odict()
self.stdio_exit_codes = list()
self.stdio_regexes = list()
self.inputs_by_page = list()
self.display_by_page = list()
self.action = '/tool_runner/index'
self.target = 'galaxy_main'
self.method = 'post'
self.labels = []
self.check_values = True
self.nginx_upload = False
self.input_required = False
self.display_interface = True
self.require_login = False
self.rerun = False
# Define a place to keep track of all input These
# differ from the inputs dictionary in that inputs can be page
# elements like conditionals, but input_params are basic form
# parameters like SelectField objects. This enables us to more
# easily ensure that parameter dependencies like index files or
# tool_data_table_conf.xml entries exist.
self.input_params = []
# Attributes of tools installed from Galaxy tool sheds.
self.tool_shed = None
self.repository_name = None
self.repository_owner = None
self.changeset_revision = None
self.installed_changeset_revision = None
# The tool.id value will be the value of guid, but we'll keep the
# guid attribute since it is useful to have.
self.guid = guid
self.old_id = None
self.version = None
# Enable easy access to this tool's version lineage.
self.lineage_ids = []
# populate toolshed repository info, if available
self.populate_tool_shed_info()
# add tool resource parameters
self.populate_resource_parameters( tool_source )
# Parse XML element containing configuration
try:
self.parse( tool_source, guid=guid )
except Exception, e:
global_tool_errors.add_error(config_file, "Tool Loading", e)
raise e
self.history_manager = histories.HistoryManager( app )
@property
def sa_session( self ):
"""Returns a SQLAlchemy session"""
return self.app.model.context
@property
def tool_version( self ):
"""Return a ToolVersion if one exists for our id"""
return self.app.install_model.context.query( self.app.install_model.ToolVersion ) \
.filter( self.app.install_model.ToolVersion.table.c.tool_id == self.id ) \
.first()
@property
def tool_versions( self ):
# If we have versions, return them.
tool_version = self.tool_version
if tool_version:
return tool_version.get_versions( self.app )
return []
@property
def tool_shed_repository( self ):
# If this tool is included in an installed tool shed repository, return it.
if self.tool_shed:
return suc.get_installed_repository( self.app,
tool_shed=self.tool_shed,
name=self.repository_name,
owner=self.repository_owner,
installed_changeset_revision=self.installed_changeset_revision )
return None
@property
def produces_collections_of_unknown_type( self ):
def output_is_dynamic_collection(output):
if not output.collection:
return False
if output.structure.collection_type:
return False
return True
return any( map( output_is_dynamic_collection, self.outputs.values() ) )
@property
def produces_collections_with_unknown_structure( self ):
def output_is_dynamic(output):
if not output.collection:
return False
return output.dynamic_structure
return any( map( output_is_dynamic, self.outputs.values() ) )
def __get_job_tool_configuration(self, job_params=None):
"""Generalized method for getting this tool's job configuration.
:type job_params: dict or None
:returns: `galaxy.jobs.JobToolConfiguration` -- JobToolConfiguration that matches this `Tool` and the given `job_params`
"""
rval = None
if len(self.job_tool_configurations) == 1:
# If there's only one config, use it rather than wasting time on comparisons
rval = self.job_tool_configurations[0]
elif job_params is None:
for job_tool_config in self.job_tool_configurations:
if not job_tool_config.params:
rval = job_tool_config
break
else:
for job_tool_config in self.job_tool_configurations:
if job_tool_config.params:
# There are job params and this config has params defined
for param, value in job_params.items():
if param not in job_tool_config.params or job_tool_config.params[param] != job_params[param]:
break
else:
# All params match, use this config
rval = job_tool_config
break
else:
rval = job_tool_config
assert rval is not None, 'Could not get a job tool configuration for Tool %s with job_params %s, this is a bug' % (self.id, job_params)
return rval
def get_job_handler(self, job_params=None):
"""Get a suitable job handler for this `Tool` given the provided `job_params`. If multiple handlers are valid for combination of `Tool` and `job_params` (e.g. the defined handler is a handler tag), one will be selected at random.
:param job_params: Any params specific to this job (e.g. the job source)
:type job_params: dict or None
:returns: str -- The id of a job handler for a job run of this `Tool`
"""
# convert tag to ID if necessary
return self.app.job_config.get_handler(self.__get_job_tool_configuration(job_params=job_params).handler)
def get_job_destination(self, job_params=None):
"""
:returns: galaxy.jobs.JobDestination -- The destination definition and runner parameters.
"""
return self.app.job_config.get_destination(self.__get_job_tool_configuration(job_params=job_params).destination)
def get_panel_section( self ):
return self.app.toolbox.get_integrated_section_for_tool( self )
def allow_user_access( self, user, attempting_access=True ):
"""
:returns: bool -- Whether the user is allowed to access the tool.
"""
if self.require_login and user is None:
return False
return True
def parse( self, tool_source, guid=None ):
"""
Read tool configuration from the element `root` and fill in `self`.
"""
self.profile = float( tool_source.parse_profile() )
# Get the UNIQUE id for the tool
self.old_id = tool_source.parse_id()
if guid is None:
self.id = self.old_id
else:
self.id = guid
if not self.id:
raise Exception( "Missing tool 'id' for tool at '%s'" % tool_source )
if self.profile >= 16.04 and VERSION_MAJOR < self.profile:
template = "The tool %s targets version %s of Galaxy, you should upgrade Galaxy to ensure proper functioning of this tool."
message = template % (self.id, self.profile)
log.warn(message)
# Get the (user visible) name of the tool
self.name = tool_source.parse_name()
if not self.name:
raise Exception( "Missing tool 'name' for tool with id '%s' at '%s'" % (self.id, tool_source) )
self.version = tool_source.parse_version()
if not self.version:
if self.profile < 16.04:
# For backward compatibility, some tools may not have versions yet.
self.version = "1.0.0"
else:
raise Exception( "Missing tool 'version' for tool with id '%s' at '%s'" % (self.id, tool_source) )
# Support multi-byte tools
self.is_multi_byte = tool_source.parse_is_multi_byte()
# Legacy feature, ignored by UI.
self.force_history_refresh = False
self.display_interface = tool_source.parse_display_interface( default=self.display_interface )
self.require_login = tool_source.parse_require_login( self.require_login )
request_param_translation_elem = tool_source.parse_request_param_translation_elem()
if request_param_translation_elem is not None:
# Load input translator, used by datasource tools to change names/values of incoming parameters
self.input_translator = ToolInputTranslator.from_element( request_param_translation_elem )
else:
self.input_translator = None
self.parse_command( tool_source )
self.environment_variables = self.parse_environment_variables( tool_source )
# Parameters used to build URL for redirection to external app
redirect_url_params = tool_source.parse_redirect_url_params_elem()
if redirect_url_params is not None and redirect_url_params.text is not None:
# get rid of leading / trailing white space
redirect_url_params = redirect_url_params.text.strip()
# Replace remaining white space with something we can safely split on later
# when we are building the params
self.redirect_url_params = redirect_url_params.replace( ' ', '**^**' )
else:
self.redirect_url_params = ''
# Short description of the tool
self.description = tool_source.parse_description()
# Versioning for tools
self.version_string_cmd = None
version_command = tool_source.parse_version_command()
if version_command is not None:
self.version_string_cmd = version_command.strip()
version_cmd_interpreter = tool_source.parse_version_command_interpreter()
if version_cmd_interpreter:
executable = self.version_string_cmd.split()[0]
abs_executable = os.path.abspath(os.path.join(self.tool_dir, executable))
command_line = self.version_string_cmd.replace(executable, abs_executable, 1)
self.version_string_cmd = version_cmd_interpreter + " " + command_line
# Parallelism for tasks, read from tool config.
self.parallelism = tool_source.parse_parallelism()
# Get JobToolConfiguration(s) valid for this particular Tool. At least
# a 'default' will be provided that uses the 'default' handler and
# 'default' destination. I thought about moving this to the
# job_config, but it makes more sense to store here. -nate
self_ids = [ self.id.lower() ]
if self.old_id != self.id:
# Handle toolshed guids
self_ids = [ self.id.lower(), self.id.lower().rsplit('/', 1)[0], self.old_id.lower() ]
self.all_ids = self_ids
# In the toolshed context, there is no job config.
if hasattr( self.app, 'job_config' ):
self.job_tool_configurations = self.app.job_config.get_job_tool_configurations(self_ids)
# Is this a 'hidden' tool (hidden in tool menu)
self.hidden = tool_source.parse_hidden()
self.__parse_legacy_features(tool_source)
# Load any tool specific options (optional)
self.options = dict(
sanitize=tool_source.parse_sanitize(),
refresh=tool_source.parse_refresh(),
)
self.options = Bunch(** self.options)
# Parse tool inputs (if there are any required)
self.parse_inputs( tool_source )
# Parse tool help
self.parse_help( tool_source )
# Description of outputs produced by an invocation of the tool
self.parse_outputs( tool_source )
# Parse result handling for tool exit codes and stdout/stderr messages:
self.parse_stdio( tool_source )
self.strict_shell = tool_source.parse_strict_shell()
# Any extra generated config files for the tool
self.__parse_config_files(tool_source)
# Action
action = tool_source.parse_action_module()
if action is None:
self.tool_action = self.default_tool_action()
else:
module, cls = action
mod = __import__( module, globals(), locals(), [cls])
self.tool_action = getattr( mod, cls )()
# Tests
self.__parse_tests(tool_source)
# Requirements (dependencies)
requirements, containers = tool_source.parse_requirements_and_containers()
self.requirements = requirements
self.containers = containers
self.citations = self._parse_citations( tool_source )
# Determine if this tool can be used in workflows
self.is_workflow_compatible = self.check_workflow_compatible(tool_source)
self.__parse_trackster_conf( tool_source )
def __parse_legacy_features(self, tool_source):
self.code_namespace = dict()
self.hook_map = {}
self.uihints = {}
if not hasattr(tool_source, 'root'):
return
# TODO: Move following logic into XmlToolSource.
root = tool_source.root
# Load any tool specific code (optional) Edit: INS 5/29/2007,
# allow code files to have access to the individual tool's
# "module" if it has one. Allows us to reuse code files, etc.
if self._allow_code_files:
for code_elem in root.findall("code"):
for hook_elem in code_elem.findall("hook"):
for key, value in hook_elem.items():
# map hook to function
self.hook_map[key] = value
file_name = code_elem.get("file")
code_path = os.path.join( self.tool_dir, file_name )
execfile( code_path, self.code_namespace )
# User interface hints
uihints_elem = root.find( "uihints" )
if uihints_elem is not None:
for key, value in uihints_elem.attrib.iteritems():
self.uihints[ key ] = value
def __parse_tests(self, tool_source):
self.__tests_source = tool_source
self.__tests_populated = False
def __parse_config_files(self, tool_source):
self.config_files = []
if not hasattr(tool_source, 'root'):
return
root = tool_source.root
conf_parent_elem = root.find("configfiles")
if conf_parent_elem is not None:
inputs_elem = conf_parent_elem.find( "inputs" )
if inputs_elem is not None:
name = inputs_elem.get( "name" )
filename = inputs_elem.get( "filename", None )
format = inputs_elem.get("format", "json")
content = dict(format=format)
self.config_files.append( ( name, filename, content ) )
for conf_elem in conf_parent_elem.findall( "configfile" ):
name = conf_elem.get( "name" )
filename = conf_elem.get( "filename", None )
content = conf_elem.text
self.config_files.append( ( name, filename, content ) )
def __parse_trackster_conf(self, tool_source):
self.trackster_conf = None
if not hasattr(tool_source, 'root'):
return
# Trackster configuration.
trackster_conf = tool_source.root.find( "trackster_conf" )
if trackster_conf is not None:
self.trackster_conf = TracksterConfig.parse( trackster_conf )
@property
def tests( self ):
if not self.__tests_populated:
tests_source = self.__tests_source
if tests_source:
try:
self.__tests = parse_tests( self, tests_source )
except:
self.__tests = None
log.exception( "Failed to parse tool tests" )
else:
self.__tests = None
self.__tests_populated = True
return self.__tests
def parse_command( self, tool_source ):
"""
"""
# Command line (template). Optional for tools that do not invoke a local program
command = tool_source.parse_command()
if command is not None:
self.command = command.lstrip() # get rid of leading whitespace
# Must pre-pend this AFTER processing the cheetah command template
self.interpreter = tool_source.parse_interpreter()
else:
self.command = ''
self.interpreter = None
def parse_environment_variables( self, tool_source ):
return tool_source.parse_environment_variables()
def parse_inputs( self, tool_source ):
"""
Parse the "<inputs>" element and create appropriate `ToolParameter`s.
This implementation supports multiple pages and grouping constructs.
"""
# Load parameters (optional)
pages = tool_source.parse_input_pages()
enctypes = set()
if pages.inputs_defined:
if hasattr(pages, "input_elem"):
input_elem = pages.input_elem
# Handle properties of the input form
self.check_values = string_as_bool( input_elem.get("check_values", self.check_values ) )
self.nginx_upload = string_as_bool( input_elem.get( "nginx_upload", self.nginx_upload ) )
self.action = input_elem.get( 'action', self.action )
# If we have an nginx upload, save the action as a tuple instead of
# a string. The actual action needs to get url_for run to add any
# prefixes, and we want to avoid adding the prefix to the
# nginx_upload_path. This logic is handled in the tool_form.mako
# template.
if self.nginx_upload and self.app.config.nginx_upload_path:
if '?' in urllib.unquote_plus( self.action ):
raise Exception( 'URL parameters in a non-default tool action can not be used '
'in conjunction with nginx upload. Please convert them to '
'hidden POST parameters' )
self.action = (self.app.config.nginx_upload_path + '?nginx_redir=',
urllib.unquote_plus(self.action))
self.target = input_elem.get( "target", self.target )
self.method = input_elem.get( "method", self.method )
# Parse the actual parameters
# Handle multiple page case
for page_source in pages.page_sources:
inputs = self.parse_input_elem( page_source, enctypes )
display = page_source.parse_display()
self.inputs_by_page.append( inputs )
self.inputs.update( inputs )
self.display_by_page.append( display )
else:
self.inputs_by_page.append( self.inputs )
self.display_by_page.append( None )
self.display = self.display_by_page[0]
self.npages = len( self.inputs_by_page )
self.last_page = len( self.inputs_by_page ) - 1
self.has_multiple_pages = bool( self.last_page )
# Determine the needed enctype for the form
if len( enctypes ) == 0:
self.enctype = "application/x-www-form-urlencoded"
elif len( enctypes ) == 1:
self.enctype = enctypes.pop()
else:
raise Exception( "Conflicting required enctypes: %s" % str( enctypes ) )
# Check if the tool either has no parameters or only hidden (and
# thus hardcoded) FIXME: hidden parameters aren't
# parameters at all really, and should be passed in a different
# way, making this check easier.
template_macros = {}
if hasattr(tool_source, 'root'):
template_macros = template_macro_params(tool_source.root)
self.template_macro_params = template_macros
for param in self.inputs.values():
if not isinstance( param, ( HiddenToolParameter, BaseURLToolParameter ) ):
self.input_required = True
break
def parse_help( self, tool_source ):
"""
Parse the help text for the tool. Formatted in reStructuredText, but
stored as Mako to allow for dynamic image paths.
This implementation supports multiple pages.
"""
# TODO: Allow raw HTML or an external link.
self.__help = HELP_UNINITIALIZED
self.__help_by_page = HELP_UNINITIALIZED
self.__help_source = tool_source
def parse_outputs( self, tool_source ):
"""
Parse <outputs> elements and fill in self.outputs (keyed by name)
"""
self.outputs, self.output_collections = tool_source.parse_outputs(self)
# TODO: Include the tool's name in any parsing warnings.
def parse_stdio( self, tool_source ):
"""
Parse <stdio> element(s) and fill in self.return_codes,
self.stderr_rules, and self.stdout_rules. Return codes have a range
and an error type (fault or warning). Stderr and stdout rules have
a regular expression and an error level (fault or warning).
"""
exit_codes, regexes = tool_source.parse_stdio()
self.stdio_exit_codes = exit_codes
self.stdio_regexes = regexes
def _parse_citations( self, tool_source ):
# TODO: Move following logic into ToolSource abstraction.
if not hasattr(tool_source, 'root'):
return []
root = tool_source.root
citations = []
citations_elem = root.find("citations")
if citations_elem is None:
return citations
for citation_elem in citations_elem:
if citation_elem.tag != "citation":
pass
citation = self.app.citations_manager.parse_citation( citation_elem, self.tool_dir )
if citation:
citations.append( citation )
return citations
def parse_input_elem( self, page_source, enctypes, context=None ):
"""
Parse a parent element whose children are inputs -- these could be
groups (repeat, conditional) or param elements. Groups will be parsed
recursively.
"""
rval = odict()
context = ExpressionContext( rval, context )
for input_source in page_source.parse_input_sources():
# Repeat group
input_type = input_source.parse_input_type()
if input_type == "repeat":
group = Repeat()
group.name = input_source.get( "name" )
group.title = input_source.get( "title" )
group.help = input_source.get( "help", None )
page_source = input_source.parse_nested_inputs_source()
group.inputs = self.parse_input_elem( page_source, enctypes, context )
group.default = int( input_source.get( "default", 0 ) )
group.min = int( input_source.get( "min", 0 ) )
# Use float instead of int so that 'inf' can be used for no max
group.max = float( input_source.get( "max", "inf" ) )
assert group.min <= group.max, \
ValueError( "Min repeat count must be less-than-or-equal to the max." )
# Force default to be within min-max range
group.default = min( max( group.default, group.min ), group.max )
rval[group.name] = group
elif input_type == "conditional":
group = Conditional()
group.name = input_source.get( "name" )
group.value_ref = input_source.get( 'value_ref', None )
group.value_ref_in_group = input_source.get_bool( 'value_ref_in_group', True )
value_from = input_source.get("value_from", None)
if value_from:
value_from = value_from.split( ':' )
group.value_from = locals().get( value_from[0] )
group.test_param = rval[ group.value_ref ]
group.test_param.refresh_on_change = True
for attr in value_from[1].split( '.' ):
group.value_from = getattr( group.value_from, attr )
for case_value, case_inputs in group.value_from( context, group, self ).iteritems():
case = ConditionalWhen()
case.value = case_value
if case_inputs:
page_source = XmlPageSource( ElementTree.XML( "<when>%s</when>" % case_inputs ) )
case.inputs = self.parse_input_elem( page_source, enctypes, context )
else:
case.inputs = odict()
group.cases.append( case )
else:
# Should have one child "input" which determines the case
test_param_input_source = input_source.parse_test_input_source()
group.test_param = self.parse_param_elem( test_param_input_source, enctypes, context )
if group.test_param.optional:
log.warning("Tool with id %s declares a conditional test parameter as optional, this is invalid and will be ignored." % self.id)
group.test_param.optional = False
possible_cases = list( group.test_param.legal_values ) # store possible cases, undefined whens will have no inputs
# Must refresh when test_param changes
group.test_param.refresh_on_change = True
# And a set of possible cases
for (value, case_inputs_source) in input_source.parse_when_input_sources():
case = ConditionalWhen()
case.value = value
case.inputs = self.parse_input_elem( case_inputs_source, enctypes, context )
group.cases.append( case )
try:
possible_cases.remove( case.value )
except:
log.warning( "Tool %s: a when tag has been defined for '%s (%s) --> %s', but does not appear to be selectable." %
( self.id, group.name, group.test_param.name, case.value ) )
for unspecified_case in possible_cases:
log.warning( "Tool %s: a when tag has not been defined for '%s (%s) --> %s', assuming empty inputs." %
( self.id, group.name, group.test_param.name, unspecified_case ) )
case = ConditionalWhen()
case.value = unspecified_case
case.inputs = odict()
group.cases.append( case )
rval[group.name] = group
elif input_type == "section":
group = Section()
group.name = input_source.get( "name" )
group.title = input_source.get( "title" )
group.help = input_source.get( "help", None )
group.expanded = input_source.get_bool( "expanded", False )
page_source = input_source.parse_nested_inputs_source()
group.inputs = self.parse_input_elem( page_source, enctypes, context )
rval[group.name] = group
elif input_type == "upload_dataset":
elem = input_source.elem()
group = UploadDataset()
group.name = elem.get( "name" )
group.title = elem.get( "title" )
group.file_type_name = elem.get( 'file_type_name', group.file_type_name )
group.default_file_type = elem.get( 'default_file_type', group.default_file_type )
group.metadata_ref = elem.get( 'metadata_ref', group.metadata_ref )
rval[ group.file_type_name ].refresh_on_change = True
rval[ group.file_type_name ].refresh_on_change_values = \
self.app.datatypes_registry.get_composite_extensions()
group_page_source = XmlPageSource(elem)
group.inputs = self.parse_input_elem( group_page_source, enctypes, context )
rval[ group.name ] = group
elif input_type == "param":
param = self.parse_param_elem( input_source, enctypes, context )
rval[param.name] = param
if hasattr( param, 'data_ref' ):
param.ref_input = context[ param.data_ref ]
self.input_params.append( param )
return rval
def parse_param_elem( self, input_source, enctypes, context ):
"""
Parse a single "<param>" element and return a ToolParameter instance.
Also, if the parameter has a 'required_enctype' add it to the set
enctypes.
"""
param = ToolParameter.build( self, input_source )
param_enctype = param.get_required_enctype()
if param_enctype:
enctypes.add( param_enctype )
# If parameter depends on any other paramters, we must refresh the
# form when it changes
for name in param.get_dependencies():
# Let it throw exception, but give some hint what the problem might be
if name not in context:
log.error("Could not find dependency '%s' of parameter '%s' in tool %s" % (name, param.name, self.name) )
context[ name ].refresh_on_change = True
return param
def populate_resource_parameters( self, tool_source ):
root = getattr( tool_source, 'root', None )
if root is not None and hasattr( self.app, 'job_config' ) and hasattr( self.app.job_config, 'get_tool_resource_xml' ):
resource_xml = self.app.job_config.get_tool_resource_xml( root.get( 'id' ), self.tool_type )
if resource_xml is not None:
inputs = root.find( 'inputs' )
if inputs is None:
inputs = ElementTree.fromstring( '<inputs/>' )
root.append( inputs )
inputs.append( resource_xml )
def populate_tool_shed_info( self ):
if self.repository_id is not None and self.app.name == 'galaxy':
repository_id = self.app.security.decode_id( self.repository_id )
tool_shed_repository = self.app.install_model.context.query( self.app.install_model.ToolShedRepository ).get( repository_id )
if tool_shed_repository:
self.tool_shed = tool_shed_repository.tool_shed
self.repository_name = tool_shed_repository.name
self.repository_owner = tool_shed_repository.owner
self.changeset_revision = tool_shed_repository.changeset_revision
self.installed_changeset_revision = tool_shed_repository.installed_changeset_revision
@property
def help(self):
if self.__help is HELP_UNINITIALIZED:
self.__ensure_help()
return self.__help
@property
def help_by_page(self):
if self.__help_by_page is HELP_UNINITIALIZED:
self.__ensure_help()
return self.__help_by_page
def __ensure_help(self):
with HELP_UNINITIALIZED:
if self.__help is HELP_UNINITIALIZED:
self.__inititalize_help()
def __inititalize_help(self):
tool_source = self.__help_source
self.__help = None
self.__help_by_page = []
help_header = ""
help_footer = ""
help_text = tool_source.parse_help()
if help_text is not None:
if self.repository_id and help_text.find( '.. image:: ' ) >= 0:
# Handle tool help image display for tools that are contained in repositories in the tool shed or installed into Galaxy.
try:
help_text = suc.set_image_paths( self.app, self.repository_id, help_text )
except Exception, e:
log.exception( "Exception in parse_help, so images may not be properly displayed:\n%s" % str( e ) )
try:
self.__help = Template( rst_to_html(help_text), input_encoding='utf-8',
output_encoding='utf-8', default_filters=[ 'decode.utf8' ],
encoding_errors='replace' )
except:
log.exception( "error in help for tool %s" % self.name )
# Handle deprecated multi-page help text in XML case.
if hasattr(tool_source, "root"):
help_elem = tool_source.root.find("help")
help_header = help_text
help_pages = help_elem.findall( "page" )
# Multiple help page case
if help_pages:
for help_page in help_pages:
self.__help_by_page.append( help_page.text )
help_footer = help_footer + help_page.tail
# Each page has to rendered all-together because of backreferences allowed by rst
try:
self.__help_by_page = [ Template( rst_to_html( help_header + x + help_footer ),
input_encoding='utf-8', output_encoding='utf-8',
default_filters=[ 'decode.utf8' ],
encoding_errors='replace' )
for x in self.__help_by_page ]
except:
log.exception( "error in multi-page help for tool %s" % self.name )
# Pad out help pages to match npages ... could this be done better?
while len( self.__help_by_page ) < self.npages:
self.__help_by_page.append( self.__help )
def find_output_def( self, name ):
# name is JobToOutputDatasetAssociation name.
# TODO: to defensive, just throw IndexError and catch somewhere
# up that stack.
if ToolOutputCollectionPart.is_named_collection_part_name( name ):
collection_name, part = ToolOutputCollectionPart.split_output_name( name )
collection_def = self.output_collections.get( collection_name, None )
if not collection_def:
return None
return collection_def.outputs.get( part, None )
else:
return self.outputs.get( name, None )
def check_workflow_compatible( self, tool_source ):
"""
Determine if a tool can be used in workflows. External tools and the
upload tool are currently not supported by workflows.
"""
# Multiple page tools are not supported -- we're eliminating most
# of these anyway
if self.has_multiple_pages:
return False
# This is probably the best bet for detecting external web tools
# right now
if self.tool_type.startswith( 'data_source' ):
return False
if self.produces_collections_of_unknown_type:
# Getting there...
return False
if hasattr( tool_source, "root"):
root = tool_source.root
if not string_as_bool( root.get( "workflow_compatible", "True" ) ):
return False
# TODO: Anyway to capture tools that dynamically change their own
# outputs?
return True
def new_state( self, trans ):
"""
Create a new `DefaultToolState` for this tool. It will be initialized
with default values for inputs.
"""
state = DefaultToolState()
state.inputs = {}
self.fill_in_new_state( trans, self.inputs, state.inputs )
return state
def fill_in_new_state( self, trans, inputs, state, context=None ):
"""
Fill in a tool state dictionary with default values for all parameters
in the dictionary `inputs`. Grouping elements are filled in recursively.
"""
context = ExpressionContext( state, context )
for input in inputs.itervalues():
state[ input.name ] = input.get_initial_value( trans, context )
def get_param( self, key ):
"""
Returns the parameter named `key` or None if there is no such
parameter.
"""
return self.inputs.get( key, None )
def get_hook(self, name):
"""
Returns an object from the code file referenced by `code_namespace`
(this will normally be a callable object)
"""
if self.code_namespace:
# Try to look up hook in self.hook_map, otherwise resort to default
if name in self.hook_map and self.hook_map[name] in self.code_namespace:
return self.code_namespace[self.hook_map[name]]
elif name in self.code_namespace:
return self.code_namespace[name]
return None
def visit_inputs( self, values, callback ):
"""
Call the function `callback` on each parameter of this tool. Visits
grouping parameters recursively and constructs unique prefixes for
each nested set of The callback method is then called as:
`callback( level_prefix, parameter, parameter_value )`
"""
# HACK: Yet another hack around check_values -- WHY HERE?
if self.check_values:
visit_input_values( self.inputs, values, callback )
def handle_input( self, trans, incoming, history=None ):
"""
Process incoming parameters for this tool from the dict `incoming`,
update the tool state (or create if none existed), and either return
to the form or execute the tool (only if 'execute' was clicked and
there were no errors).
"""
request_context = WorkRequestContext( app=trans.app, user=trans.user, history=history or trans.history )
rerun_remap_job_id = None
if 'rerun_remap_job_id' in incoming:
try:
rerun_remap_job_id = trans.app.security.decode_id( incoming[ 'rerun_remap_job_id' ] )
except Exception, exception:
log.error( str( exception ) )
raise exceptions.MessageException( 'Failure executing tool (attempting to rerun invalid job).' )
# Fixed set of input parameters may correspond to any number of jobs.
# Expand these out to individual parameters for given jobs (tool executions).
expanded_incomings, collection_info = expand_meta_parameters( trans, self, incoming )
if not expanded_incomings:
raise exceptions.MessageException( 'Tool execution failed, trying to run a tool over an empty collection.' )
# Remapping a single job to many jobs doesn't make sense, so disable
# remap if multi-runs of tools are being used.
if rerun_remap_job_id and len( expanded_incomings ) > 1:
raise exceptions.MessageException( 'Failure executing tool (cannot create multiple jobs when remapping existing job).' )
# Process incoming data
validation_timer = ExecutionTimer()
all_errors = []
all_params = []
for expanded_incoming in expanded_incomings:
params = {}
errors = {}
if self.input_translator:
self.input_translator.translate( expanded_incoming )
if not self.check_values:
# If `self.check_values` is false we don't do any checking or
# processing on input This is used to pass raw values
# through to/from external sites.
params = expanded_incoming
else:
# Update state for all inputs on the current page taking new
# values from `incoming`.
self.populate_state( request_context, self.inputs, expanded_incoming, params, errors )
# If the tool provides a `validate_input` hook, call it.
validate_input = self.get_hook( 'validate_input' )
if validate_input:
validate_input( request_context, errors, params, self.inputs )
all_errors.append( errors )
all_params.append( params )
log.debug( 'Validated and populated state for tool request %s' % validation_timer )
# If there were errors, we stay on the same page and display them
if any( all_errors ):
raise exceptions.MessageException( ', '.join( [ msg for msg in all_errors[ 0 ].itervalues() ] ), err_data=all_errors[ 0 ] )
else:
execution_tracker = execute_job( trans, self, all_params, history=request_context.history, rerun_remap_job_id=rerun_remap_job_id, collection_info=collection_info )
if execution_tracker.successful_jobs:
return dict( out_data=execution_tracker.output_datasets,
num_jobs=len( execution_tracker.successful_jobs ),
job_errors=execution_tracker.execution_errors,
jobs=execution_tracker.successful_jobs,
output_collections=execution_tracker.output_collections,
implicit_collections=execution_tracker.implicit_collections )
else:
raise exceptions.MessageException( execution_tracker.execution_errors[ 0 ] )
def handle_single_execution( self, trans, rerun_remap_job_id, params, history, mapping_over_collection, execution_cache=None ):
"""
Return a pair with whether execution is successful as well as either
resulting output data or an error message indicating the problem.
"""
try:
job, out_data = self.execute( trans, incoming=params, history=history, rerun_remap_job_id=rerun_remap_job_id, mapping_over_collection=mapping_over_collection, execution_cache=execution_cache )
except httpexceptions.HTTPFound, e:
# if it's a paste redirect exception, pass it up the stack
raise e
except Exception, e:
log.exception('Exception caught while attempting tool execution:')
message = 'Error executing tool: %s' % str(e)
return False, message
if isinstance( out_data, odict ):
return job, out_data.items()
else:
if isinstance( out_data, string_types ):
message = out_data
else:
message = 'Failure executing tool (invalid data returned from tool execution)'
return False, message
def find_fieldstorage( self, x ):
if isinstance( x, FieldStorage ):
raise InterruptedUpload( None )
elif isinstance(x, dict):
[ self.find_fieldstorage( y ) for y in x.values() ]
elif isinstance(x, list):
[ self.find_fieldstorage( y ) for y in x ]
@property
def params_with_missing_data_table_entry( self ):
"""
Return all parameters that are dynamically generated select lists whose
options require an entry not currently in the tool_data_table_conf.xml file.
"""
params = []
for input_param in self.input_params:
if isinstance( input_param, SelectToolParameter ) and input_param.is_dynamic:
options = input_param.options
if options and options.missing_tool_data_table_name and input_param not in params:
params.append( input_param )
return params
@property
def params_with_missing_index_file( self ):
"""
Return all parameters that are dynamically generated
select lists whose options refer to a missing .loc file.
"""
params = []
for input_param in self.input_params:
if isinstance( input_param, SelectToolParameter ) and input_param.is_dynamic:
options = input_param.options
if options and options.missing_index_file and input_param not in params:
params.append( input_param )
return params
def get_static_param_values( self, trans ):
"""
Returns a map of parameter names and values if the tool does not
require any user input. Will raise an exception if any parameter
does require input.
"""
args = dict()
for key, param in self.inputs.iteritems():
# BaseURLToolParameter is now a subclass of HiddenToolParameter, so
# we must check if param is a BaseURLToolParameter first
if isinstance( param, BaseURLToolParameter ):
args[key] = param.get_initial_value( trans, None )
elif isinstance( param, HiddenToolParameter ):
args[key] = model.User.expand_user_properties( trans.user, param.value )
else:
raise Exception( "Unexpected parameter type" )
return args
def execute( self, trans, incoming={}, set_output_hid=True, history=None, **kwargs ):
"""
Execute the tool using parameter values in `incoming`. This just
dispatches to the `ToolAction` instance specified by
`self.tool_action`. In general this will create a `Job` that
when run will build the tool's outputs, e.g. `DefaultToolAction`.
"""
return self.tool_action.execute( self, trans, incoming=incoming, set_output_hid=set_output_hid, history=history, **kwargs )
def params_to_strings( self, params, app ):
return params_to_strings( self.inputs, params, app )
def params_from_strings( self, params, app, ignore_errors=False ):
return params_from_strings( self.inputs, params, app, ignore_errors )
def check_and_update_param_values( self, values, trans, update_values=True, workflow_building_mode=False ):
"""
Check that all parameters have values, and fill in with default
values where necessary. This could be called after loading values
from a database in case new parameters have been added.
"""
messages = {}
request_context = WorkRequestContext( app=trans.app, user=trans.user, history=trans.history, workflow_building_mode=workflow_building_mode )
def validate_inputs( input, value, error, parent, context, prefixed_name, prefixed_label, **kwargs ):
if not error:
value, error = check_param( request_context, input, value, context )
if error:
if update_values:
try:
value = input.get_initial_value( request_context, context )
if not prefixed_name.startswith( '__' ):
messages[ prefixed_name ] = '%s Using default: \'%s\'.' % ( error, value )
parent[ input.name ] = value
except:
messages[ prefixed_name ] = 'Attempt to replace invalid value for \'%s\' failed.' % ( prefixed_label )
else:
messages[ prefixed_name ] = error
visit_input_values( self.inputs, values, validate_inputs )
return messages
def build_dependency_shell_commands( self, job_directory=None ):
"""Return a list of commands to be run to populate the current environment to include this tools requirements."""
return self.app.toolbox.dependency_manager.dependency_shell_commands(
self.requirements,
installed_tool_dependencies=self.installed_tool_dependencies,
tool_dir=self.tool_dir,
job_directory=job_directory,
)
@property
def installed_tool_dependencies(self):
if self.tool_shed_repository:
installed_tool_dependencies = self.tool_shed_repository.tool_dependencies_installed_or_in_error
else:
installed_tool_dependencies = None
return installed_tool_dependencies
def build_redirect_url_params( self, param_dict ):
"""
Substitute parameter values into self.redirect_url_params
"""
if not self.redirect_url_params:
return
redirect_url_params = None
# Substituting parameter values into the url params
redirect_url_params = fill_template( self.redirect_url_params, context=param_dict )
# Remove newlines
redirect_url_params = redirect_url_params.replace( "\n", " " ).replace( "\r", " " )
return redirect_url_params
def parse_redirect_url( self, data, param_dict ):
"""
Parse the REDIRECT_URL tool param. Tools that send data to an external
application via a redirect must include the following 3 tool params:
1) REDIRECT_URL - the url to which the data is being sent
2) DATA_URL - the url to which the receiving application will send an
http post to retrieve the Galaxy data
3) GALAXY_URL - the url to which the external application may post
data as a response
"""
redirect_url = param_dict.get( 'REDIRECT_URL' )
redirect_url_params = self.build_redirect_url_params( param_dict )
# Add the parameters to the redirect url. We're splitting the param
# string on '**^**' because the self.parse() method replaced white
# space with that separator.
params = redirect_url_params.split( '**^**' )
rup_dict = {}
for param in params:
p_list = param.split( '=' )
p_name = p_list[0]
p_val = p_list[1]
rup_dict[ p_name ] = p_val
DATA_URL = param_dict.get( 'DATA_URL', None )
assert DATA_URL is not None, "DATA_URL parameter missing in tool config."
DATA_URL += "/%s/display" % str( data.id )
redirect_url += "?DATA_URL=%s" % DATA_URL
# Add the redirect_url_params to redirect_url
for p_name in rup_dict:
redirect_url += "&%s=%s" % ( p_name, rup_dict[ p_name ] )
# Add the current user email to redirect_url
if data.history.user:
USERNAME = str( data.history.user.email )
else:
USERNAME = 'Anonymous'
redirect_url += "&USERNAME=%s" % USERNAME
return redirect_url
def call_hook( self, hook_name, *args, **kwargs ):
"""
Call the custom code hook function identified by 'hook_name' if any,
and return the results
"""
try:
code = self.get_hook( hook_name )
if code:
return code( *args, **kwargs )
except Exception, e:
original_message = ''
if len( e.args ):
original_message = e.args[0]
e.args = ( "Error in '%s' hook '%s', original message: %s" % ( self.name, hook_name, original_message ), )
raise
def exec_before_job( self, app, inp_data, out_data, param_dict={} ):
pass
def exec_after_process( self, app, inp_data, out_data, param_dict, job=None ):
pass
def job_failed( self, job_wrapper, message, exception=False ):
"""
Called when a job has failed
"""
pass
def collect_child_datasets( self, output, job_working_directory ):
"""
Look for child dataset files, create HDA and attach to parent.
"""
children = {}
# Loop through output file names, looking for generated children in
# form of 'child_parentId_designation_visibility_extension'
for name, outdata in output.items():
filenames = []
if 'new_file_path' in self.app.config.collect_outputs_from:
filenames.extend( glob.glob(os.path.join(self.app.config.new_file_path, "child_%i_*" % outdata.id) ) )
if 'job_working_directory' in self.app.config.collect_outputs_from:
filenames.extend( glob.glob(os.path.join(job_working_directory, "child_%i_*" % outdata.id) ) )
for filename in filenames:
if name not in children:
children[name] = {}
fields = os.path.basename(filename).split("_")
designation = fields[2]
visible = fields[3].lower()
if visible == "visible":
visible = True
else:
visible = False
ext = fields[4].lower()
child_dataset = self.app.model.HistoryDatasetAssociation( extension=ext,
parent_id=outdata.id,
designation=designation,
visible=visible,
dbkey=outdata.dbkey,
create_dataset=True,
sa_session=self.sa_session )
self.app.security_agent.copy_dataset_permissions( outdata.dataset, child_dataset.dataset )
# Move data from temp location to dataset location
self.app.object_store.update_from_file(child_dataset.dataset, file_name=filename, create=True)
self.sa_session.add( child_dataset )
self.sa_session.flush()
child_dataset.set_size()
child_dataset.name = "Secondary Dataset (%s)" % ( designation )
child_dataset.init_meta()
child_dataset.set_meta()
child_dataset.set_peek()
# Associate new dataset with job
job = None
for assoc in outdata.creating_job_associations:
job = assoc.job
break
if job:
assoc = self.app.model.JobToOutputDatasetAssociation( '__new_child_file_%s|%s__' % ( name, designation ), child_dataset )
assoc.job = job
self.sa_session.add( assoc )
self.sa_session.flush()
child_dataset.state = outdata.state
self.sa_session.add( child_dataset )
self.sa_session.flush()
# Add child to return dict
children[name][designation] = child_dataset
# Need to update all associated output hdas, i.e. history was
# shared with job running
for dataset in outdata.dataset.history_associations:
if outdata == dataset:
continue
# Create new child dataset
child_data = child_dataset.copy( parent_id=dataset.id )
self.sa_session.add( child_data )
self.sa_session.flush()
return children
def collect_primary_datasets( self, output, job_working_directory, input_ext, input_dbkey="?" ):
"""
Find any additional datasets generated by a tool and attach (for
cases where number of outputs is not known in advance).
"""
return output_collect.collect_primary_datasets( self, output, job_working_directory, input_ext, input_dbkey=input_dbkey )
def collect_dynamic_collections( self, output, **kwds ):
""" Find files corresponding to dynamically structured collections.
"""
return output_collect.collect_dynamic_collections( self, output, **kwds )
def to_archive(self):
tool = self
tarball_files = []
temp_files = []
tool_xml = open( os.path.abspath( tool.config_file ), 'r' ).read()
# Retrieve tool help images and rewrite the tool's xml into a temporary file with the path
# modified to be relative to the repository root.
image_found = False
if tool.help is not None:
tool_help = tool.help._source
# Check each line of the rendered tool help for an image tag that points to a location under static/
for help_line in tool_help.split( '\n' ):
image_regex = re.compile( 'img alt="[^"]+" src="\${static_path}/([^"]+)"' )
matches = re.search( image_regex, help_line )
if matches is not None:
tool_help_image = matches.group(1)
tarball_path = tool_help_image
filesystem_path = os.path.abspath( os.path.join( self.app.config.root, 'static', tool_help_image ) )
if os.path.exists( filesystem_path ):
tarball_files.append( ( filesystem_path, tarball_path ) )
image_found = True
tool_xml = tool_xml.replace( '${static_path}/%s' % tarball_path, tarball_path )
# If one or more tool help images were found, add the modified tool XML to the tarball instead of the original.
if image_found:
fd, new_tool_config = tempfile.mkstemp( suffix='.xml' )
os.close( fd )
open( new_tool_config, 'w' ).write( tool_xml )
tool_tup = ( os.path.abspath( new_tool_config ), os.path.split( tool.config_file )[-1] )
temp_files.append( os.path.abspath( new_tool_config ) )
else:
tool_tup = ( os.path.abspath( tool.config_file ), os.path.split( tool.config_file )[-1] )
tarball_files.append( tool_tup )
# TODO: This feels hacky.
tool_command = tool.command.strip().split()[0]
tool_path = os.path.dirname( os.path.abspath( tool.config_file ) )
# Add the tool XML to the tuple that will be used to populate the tarball.
if os.path.exists( os.path.join( tool_path, tool_command ) ):
tarball_files.append( ( os.path.join( tool_path, tool_command ), tool_command ) )
# Find and add macros and code files.
for external_file in tool.get_externally_referenced_paths( os.path.abspath( tool.config_file ) ):
external_file_abspath = os.path.abspath( os.path.join( tool_path, external_file ) )
tarball_files.append( ( external_file_abspath, external_file ) )
if os.path.exists( os.path.join( tool_path, "Dockerfile" ) ):
tarball_files.append( ( os.path.join( tool_path, "Dockerfile" ), "Dockerfile" ) )
# Find tests, and check them for test data.
tests = tool.tests
if tests is not None:
for test in tests:
# Add input file tuples to the list.
for input in test.inputs:
for input_value in test.inputs[ input ]:
input_path = os.path.abspath( os.path.join( 'test-data', input_value ) )
if os.path.exists( input_path ):
td_tup = ( input_path, os.path.join( 'test-data', input_value ) )
tarball_files.append( td_tup )
# And add output file tuples to the list.
for label, filename, _ in test.outputs:
output_filepath = os.path.abspath( os.path.join( 'test-data', filename ) )
if os.path.exists( output_filepath ):
td_tup = ( output_filepath, os.path.join( 'test-data', filename ) )
tarball_files.append( td_tup )
for param in tool.input_params:
# Check for tool data table definitions.
if hasattr( param, 'options' ):
if hasattr( param.options, 'tool_data_table' ):
data_table = param.options.tool_data_table
if hasattr( data_table, 'filenames' ):
data_table_definitions = []
for data_table_filename in data_table.filenames:
# FIXME: from_shed_config seems to always be False.
if not data_table.filenames[ data_table_filename ][ 'from_shed_config' ]:
tar_file = data_table.filenames[ data_table_filename ][ 'filename' ] + '.sample'
sample_file = os.path.join( data_table.filenames[ data_table_filename ][ 'tool_data_path' ],
tar_file )
# Use the .sample file, if one exists. If not, skip this data table.
if os.path.exists( sample_file ):
tarfile_path, tarfile_name = os.path.split( tar_file )
tarfile_path = os.path.join( 'tool-data', tarfile_name )
tarball_files.append( ( sample_file, tarfile_path ) )
data_table_definitions.append( data_table.xml_string )
if len( data_table_definitions ) > 0:
# Put the data table definition XML in a temporary file.
table_definition = '<?xml version="1.0" encoding="utf-8"?>\n<tables>\n %s</tables>'
table_definition = table_definition % '\n'.join( data_table_definitions )
fd, table_conf = tempfile.mkstemp()
os.close( fd )
open( table_conf, 'w' ).write( table_definition )
tarball_files.append( ( table_conf, os.path.join( 'tool-data', 'tool_data_table_conf.xml.sample' ) ) )
temp_files.append( table_conf )
# Create the tarball.
fd, tarball_archive = tempfile.mkstemp( suffix='.tgz' )
os.close( fd )
tarball = tarfile.open( name=tarball_archive, mode='w:gz' )
# Add the files from the previously generated list.
for fspath, tarpath in tarball_files:
tarball.add( fspath, arcname=tarpath )
tarball.close()
# Delete any temporary files that were generated.
for temp_file in temp_files:
os.remove( temp_file )
return tarball_archive
def to_dict( self, trans, link_details=False, io_details=False ):
""" Returns dict of tool. """
# Basic information
tool_dict = super( Tool, self ).to_dict()
# Fill in ToolShedRepository info
if hasattr(self, 'tool_shed') and self.tool_shed:
tool_dict['tool_shed_repository'] = {
'name': self.repository_name,
'owner': self.repository_owner,
'changeset_revision': self.changeset_revision,
'tool_shed': self.tool_shed
}
# If an admin user, expose the path to the actual tool config XML file.
if trans.user_is_admin():
tool_dict[ 'config_file' ] = os.path.abspath( self.config_file )
# Add link details.
if link_details:
# Add details for creating a hyperlink to the tool.
if not isinstance( self, DataSourceTool ):
link = url_for( controller='tool_runner', tool_id=self.id )
else:
link = url_for( controller='tool_runner', action='data_source_redirect', tool_id=self.id )
# Basic information
tool_dict.update( { 'link': link,
'min_width': self.uihints.get( 'minwidth', -1 ),
'target': self.target } )
# Add input and output details.
if io_details:
tool_dict[ 'inputs' ] = [ input.to_dict( trans ) for input in self.inputs.values() ]
tool_dict[ 'outputs' ] = [ output.to_dict( app=self.app ) for output in self.outputs.values() ]
tool_dict[ 'panel_section_id' ], tool_dict[ 'panel_section_name' ] = self.get_panel_section()
return tool_dict
def to_json( self, trans, kwd={}, job=None, workflow_mode=False ):
"""
Recursively creates a tool dictionary containing repeats, dynamic options and updated states.
"""
history_id = kwd.get( 'history_id', None )
history = None
try:
if history_id is not None:
history = self.history_manager.get_owned( trans.security.decode_id( history_id ), trans.user, current_history=trans.history )
else:
history = trans.get_history()
if history is None:
raise exceptions.MessageException( 'History unavailable. Please specify a valid history id' )
except Exception, e:
raise exceptions.MessageException( '[history_id=%s] Failed to retrieve history. %s.' % ( history_id, str( e ) ) )
# build request context
request_context = WorkRequestContext( app=trans.app, user=trans.user, history=history, workflow_building_mode=workflow_mode )
# load job parameters into incoming
tool_message = ''
tool_warnings = ''
if job:
try:
job_params = job.get_param_values( self.app, ignore_errors=True )
tool_warnings = self.check_and_update_param_values( job_params, request_context, update_values=False )
self._map_source_to_history( request_context, self.inputs, job_params )
tool_message = self._compare_tool_version( job )
params_to_incoming( kwd, self.inputs, job_params, self.app )
except Exception as e:
raise exceptions.MessageException( str( e ) )
# create parameter object
params = galaxy.util.Params( kwd, sanitize=False )
# populates model from state
def populate_model( inputs, state_inputs, group_inputs, other_values=None ):
other_values = ExpressionContext( state_inputs, other_values )
for input_index, input in enumerate( inputs.itervalues() ):
tool_dict = None
group_state = state_inputs.get( input.name, {} )
if input.type == 'repeat':
tool_dict = input.to_dict( request_context )
group_cache = tool_dict[ 'cache' ] = {}
for i in range( len( group_state ) ):
group_cache[ i ] = {}
populate_model( input.inputs, group_state[ i ], group_cache[ i ], other_values )
elif input.type == 'conditional':
tool_dict = input.to_dict( request_context )
if 'test_param' in tool_dict:
test_param = tool_dict[ 'test_param' ]
test_param[ 'value' ] = input.test_param.value_to_basic( group_state.get( test_param[ 'name' ], input.test_param.get_initial_value( request_context, other_values ) ), self.app )
test_param[ 'text_value' ] = input.test_param.value_to_display_text( test_param[ 'value' ], self.app )
for i in range( len( tool_dict['cases'] ) ):
current_state = {}
if i == group_state.get( '__current_case__' ):
current_state = group_state
populate_model( input.cases[ i ].inputs, current_state, tool_dict[ 'cases' ][ i ][ 'inputs' ], other_values )
elif input.type == 'section':
tool_dict = input.to_dict( request_context )
populate_model( input.inputs, group_state, tool_dict[ 'inputs' ], other_values )
else:
try:
tool_dict = input.to_dict( request_context, other_values=other_values )
tool_dict[ 'value' ] = input.value_to_basic( state_inputs.get( input.name, input.get_initial_value( request_context, other_values ) ), self.app )
tool_dict[ 'text_value' ] = input.value_to_display_text( tool_dict[ 'value' ], self.app )
except Exception as e:
tool_dict = input.to_dict( request_context )
log.exception('tools::to_json() - Skipping parameter expansion \'%s\': %s.' % ( input.name, e ) )
pass
group_inputs[ input_index ] = tool_dict
# expand incoming parameters (parameters might trigger multiple tool executions,
# here we select the first execution only in order to resolve dynamic parameters)
expanded_incomings, _ = expand_meta_parameters( trans, self, params.__dict__ )
if expanded_incomings:
params.__dict__ = expanded_incomings[ 0 ]
# do param translation here, used by datasource tools
if self.input_translator:
self.input_translator.translate( params )
# create tool state
state_inputs = {}
state_errors = {}
self.populate_state( request_context, self.inputs, params.__dict__, state_inputs, state_errors )
# create tool model
tool_model = self.to_dict( request_context )
tool_model[ 'inputs' ] = {}
populate_model( self.inputs, state_inputs, tool_model[ 'inputs' ] )
# create tool help
tool_help = ''
if self.help:
tool_help = self.help.render( static_path=url_for( '/static' ), host_url=url_for( '/', qualified=True ) )
tool_help = unicodify( tool_help, 'utf-8' )
# create tool versions
tool_versions = []
tools = self.app.toolbox.get_loaded_tools_by_lineage( self.id )
for t in tools:
if t.version not in tool_versions:
tool_versions.append( t.version )
# update tool model
tool_model.update({
'id' : self.id,
'help' : tool_help,
'citations' : bool( self.citations ),
'biostar_url' : self.app.config.biostar_url,
'sharable_url' : self.tool_shed_repository.get_sharable_url( self.app ) if self.tool_shed_repository else None,
'message' : tool_message,
'warnings' : tool_warnings,
'versions' : tool_versions,
'requirements' : [ { 'name' : r.name, 'version' : r.version } for r in self.requirements ],
'errors' : state_errors,
'state_inputs' : params_to_strings( self.inputs, state_inputs, self.app ),
'job_id' : trans.security.encode_id( job.id ) if job else None,
'job_remap' : self._get_job_remap( job ),
'history_id' : trans.security.encode_id( history.id ),
'display' : self.display_interface,
'action' : url_for( self.action ),
'method' : self.method,
'enctype' : self.enctype
})
return tool_model
# populates state from incoming parameters
def populate_state( self, request_context, inputs, incoming, state, errors={}, prefix='', context=None ):
context = ExpressionContext( state, context )
for input in inputs.itervalues():
state[ input.name ] = input.get_initial_value( request_context, context )
key = prefix + input.name
group_state = state[ input.name ]
group_prefix = '%s|' % ( key )
if input.type == 'repeat':
rep_index = 0
del group_state[:]
while True:
rep_prefix = '%s_%d' % ( key, rep_index )
if not any( [ incoming_key.startswith( rep_prefix ) for incoming_key in incoming.keys() ] ) and rep_index >= input.min:
break
if rep_index < input.max:
new_state = { '__index__' : rep_index }
group_state.append( new_state )
self.populate_state( request_context, input.inputs, incoming, new_state, errors, prefix=rep_prefix + '|', context=context )
rep_index += 1
elif input.type == 'conditional':
if input.value_ref and not input.value_ref_in_group:
test_param_key = prefix + input.test_param.name
else:
test_param_key = group_prefix + input.test_param.name
test_param_value = incoming.get( test_param_key, group_state.get( input.test_param.name ) )
value, error = check_param( request_context, input.test_param, test_param_value, context )
if error:
errors[ test_param_key ] = error
else:
try:
current_case = input.get_current_case( value )
group_state = state[ input.name ] = {}
self.populate_state( request_context, input.cases[ current_case ].inputs, incoming, group_state, errors, prefix=group_prefix, context=context )
group_state[ '__current_case__' ] = current_case
except Exception:
errors[ test_param_key ] = 'The selected case is unavailable/invalid.'
pass
group_state[ input.test_param.name ] = value
elif input.type == 'section':
self.populate_state( request_context, input.inputs, incoming, group_state, errors, prefix=group_prefix, context=context )
elif input.type == 'upload_dataset':
d_type = input.get_datatype( request_context, context=context )
writable_files = d_type.writable_files
while len( group_state ) > len( writable_files ):
del group_state[ -1 ]
while len( writable_files ) > len( group_state ):
new_state = { '__index__' : len( group_state ) }
for upload_item in input.inputs.itervalues():
new_state[ upload_item.name ] = upload_item.get_initial_value( request_context, context )
group_state.append( new_state )
for i, rep_state in enumerate( group_state ):
rep_index = rep_state[ '__index__' ]
rep_prefix = '%s_%d|' % ( key, rep_index )
self.populate_state( request_context, input.inputs, incoming, rep_state, errors, prefix=rep_prefix, context=context )
else:
param_value = self._get_incoming_value( incoming, key, state.get( input.name ) )
value, error = check_param( request_context, input, param_value, context )
if error:
errors[ key ] = error
state[ input.name ] = value
def _get_incoming_value( self, incoming, key, default ):
"""
Fetch value from incoming dict directly or check special nginx upload
created variants of this key.
"""
if '__' + key + '__is_composite' in incoming:
composite_keys = incoming[ '__' + key + '__keys' ].split()
value = dict()
for composite_key in composite_keys:
value[ composite_key ] = incoming[ key + '_' + composite_key ]
return value
else:
return incoming.get( key, default )
def _get_job_remap( self, job):
if job:
if job.state == job.states.ERROR:
try:
if [ hda.dependent_jobs for hda in [ jtod.dataset for jtod in job.output_datasets ] if hda.dependent_jobs ]:
return True
except Exception, exception:
log.error( str( exception ) )
pass
return False
def _map_source_to_history( self, trans, tool_inputs, params ):
# Need to remap dataset parameters. Job parameters point to original
# dataset used; parameter should be the analygous dataset in the
# current history.
history = trans.history
# Create index for hdas.
hda_source_dict = {}
for hda in history.datasets:
key = '%s_%s' % ( hda.hid, hda.dataset.id )
hda_source_dict[ hda.dataset.id ] = hda_source_dict[ key ] = hda
# Ditto for dataset collections.
hdca_source_dict = {}
for hdca in history.dataset_collections:
key = '%s_%s' % ( hdca.hid, hdca.collection.id )
hdca_source_dict[ hdca.collection.id ] = hdca_source_dict[ key ] = hdca
# Map dataset or collection to current history
def map_to_history( value ):
id = None
source = None
if isinstance( value, self.app.model.HistoryDatasetAssociation ):
id = value.dataset.id
source = hda_source_dict
elif isinstance( value, self.app.model.HistoryDatasetCollectionAssociation ):
id = value.collection.id
source = hdca_source_dict
else:
return None
key = '%s_%s' % ( value.hid, id )
if key in source:
return source[ key ]
elif id in source:
return source[ id ]
else:
return None
def mapping_callback( input, value, **kwargs ):
if isinstance( input, DataToolParameter ):
if isinstance(value, list):
values = []
for val in value:
new_val = map_to_history( val )
if new_val:
values.append( new_val )
else:
values.append( val )
return values
else:
return map_to_history( value )
elif isinstance( input, DataCollectionToolParameter ):
return map_to_history( value )
visit_input_values( tool_inputs, params, mapping_callback )
def _compare_tool_version( self, job ):
"""
Compares a tool version with the tool version from a job (from ToolRunner).
"""
tool_id = job.tool_id
tool_version = job.tool_version
message = ''
try:
select_field, tools, tool = self.app.toolbox.get_tool_components( tool_id, tool_version=tool_version, get_loaded_tools_by_lineage=False, set_selected=True )
if tool is None:
raise exceptions.MessageException( 'This dataset was created by an obsolete tool (%s). Can\'t re-run.' % tool_id )
if ( self.id != tool_id and self.old_id != tool_id ) or self.version != tool_version:
if self.id == tool_id:
if tool_version is None:
# for some reason jobs don't always keep track of the tool version.
message = ''
else:
message = 'This job was run with tool version "%s", which is not available. ' % tool_version
if len( tools ) > 1:
message += 'You can re-run the job with the selected tool or choose another version of the tool.'
else:
message += 'You can re-run the job with this tool version, which is a different version of the original tool.'
else:
new_tool_shed_url = '%s/%s/' % ( tool.tool_shed_repository.get_sharable_url( tool.app ), tool.tool_shed_repository.changeset_revision )
old_tool_shed_url = common_util.get_tool_shed_url_from_tool_shed_registry( self.app, tool_id.split( '/repos/' )[ 0 ] )
old_tool_shed_url = '%s/view/%s/%s/' % ( old_tool_shed_url, tool.repository_owner, tool.repository_name )
message = 'This job was run with <a href=\"%s\" target=\"_blank\">tool id \"%s\"</a>, version "%s", which is not available. ' % ( old_tool_shed_url, tool_id, tool_version )
if len( tools ) > 1:
message += 'You can re-run the job with the selected <a href=\"%s\" target=\"_blank\">tool id \"%s\"</a> or choose another derivation of the tool.' % ( new_tool_shed_url, self.id )
else:
message += 'You can re-run the job with <a href=\"%s\" target=\"_blank\">tool id \"%s\"</a>, which is a derivation of the original tool.' % ( new_tool_shed_url, self.id )
except Exception as e:
raise exceptions.MessageException( str( e ) )
return message
def get_default_history_by_trans( self, trans, create=False ):
return trans.get_history( create=create )
@classmethod
def get_externally_referenced_paths( self, path ):
""" Return relative paths to externally referenced files by the tool
described by file at `path`. External components should not assume things
about the structure of tool xml files (this is the tool's responsibility).
"""
tree = raw_tool_xml_tree(path)
root = tree.getroot()
external_paths = []
for code_elem in root.findall( 'code' ):
external_path = code_elem.get( 'file' )
if external_path:
external_paths.append( external_path )
external_paths.extend( imported_macro_paths( root ) )
# May also need to load external citation files as well at some point.
return external_paths
class OutputParameterJSONTool( Tool ):
"""
Alternate implementation of Tool that provides parameters and other values
JSONified within the contents of an output dataset
"""
tool_type = 'output_parameter_json'
def _prepare_json_list( self, param_list ):
rval = []
for value in param_list:
if isinstance( value, dict ):
rval.append( self._prepare_json_param_dict( value ) )
elif isinstance( value, list ):
rval.append( self._prepare_json_list( value ) )
else:
rval.append( str( value ) )
return rval
def _prepare_json_param_dict( self, param_dict ):
rval = {}
for key, value in param_dict.iteritems():
if isinstance( value, dict ):
rval[ key ] = self._prepare_json_param_dict( value )
elif isinstance( value, list ):
rval[ key ] = self._prepare_json_list( value )
else:
rval[ key ] = str( value )
return rval
def exec_before_job( self, app, inp_data, out_data, param_dict=None ):
if param_dict is None:
param_dict = {}
json_params = {}
json_params[ 'param_dict' ] = self._prepare_json_param_dict( param_dict ) # it would probably be better to store the original incoming parameters here, instead of the Galaxy modified ones?
json_params[ 'output_data' ] = []
json_params[ 'job_config' ] = dict( GALAXY_DATATYPES_CONF_FILE=param_dict.get( 'GALAXY_DATATYPES_CONF_FILE' ), GALAXY_ROOT_DIR=param_dict.get( 'GALAXY_ROOT_DIR' ), TOOL_PROVIDED_JOB_METADATA_FILE=galaxy.jobs.TOOL_PROVIDED_JOB_METADATA_FILE )
json_filename = None
for i, ( out_name, data ) in enumerate( out_data.iteritems() ):
# use wrapped dataset to access certain values
wrapped_data = param_dict.get( out_name )
# allow multiple files to be created
file_name = str( wrapped_data )
extra_files_path = str( wrapped_data.files_path )
data_dict = dict( out_data_name=out_name,
ext=data.ext,
dataset_id=data.dataset.id,
hda_id=data.id,
file_name=file_name,
extra_files_path=extra_files_path )
json_params[ 'output_data' ].append( data_dict )
if json_filename is None:
json_filename = file_name
out = open( json_filename, 'w' )
out.write( json.dumps( json_params ) )
out.close()
class DataSourceTool( OutputParameterJSONTool ):
"""
Alternate implementation of Tool for data_source tools -- those that
allow the user to query and extract data from another web site.
"""
tool_type = 'data_source'
default_tool_action = DataSourceToolAction
def _build_GALAXY_URL_parameter( self ):
return ToolParameter.build( self, ElementTree.XML( '<param name="GALAXY_URL" type="baseurl" value="/tool_runner?tool_id=%s" />' % self.id ) )
def parse_inputs( self, tool_source ):
super( DataSourceTool, self ).parse_inputs( tool_source )
# Open all data_source tools in _top.
self.target = '_top'
if 'GALAXY_URL' not in self.inputs:
self.inputs[ 'GALAXY_URL' ] = self._build_GALAXY_URL_parameter()
self.inputs_by_page[0][ 'GALAXY_URL' ] = self.inputs[ 'GALAXY_URL' ]
def exec_before_job( self, app, inp_data, out_data, param_dict=None ):
if param_dict is None:
param_dict = {}
dbkey = param_dict.get( 'dbkey' )
info = param_dict.get( 'info' )
data_type = param_dict.get( 'data_type' )
name = param_dict.get( 'name' )
json_params = {}
json_params[ 'param_dict' ] = self._prepare_json_param_dict( param_dict ) # it would probably be better to store the original incoming parameters here, instead of the Galaxy modified ones?
json_params[ 'output_data' ] = []
json_params[ 'job_config' ] = dict( GALAXY_DATATYPES_CONF_FILE=param_dict.get( 'GALAXY_DATATYPES_CONF_FILE' ), GALAXY_ROOT_DIR=param_dict.get( 'GALAXY_ROOT_DIR' ), TOOL_PROVIDED_JOB_METADATA_FILE=galaxy.jobs.TOOL_PROVIDED_JOB_METADATA_FILE )
json_filename = None
for i, ( out_name, data ) in enumerate( out_data.iteritems() ):
# use wrapped dataset to access certain values
wrapped_data = param_dict.get( out_name )
# allow multiple files to be created
cur_base_param_name = 'GALAXY|%s|' % out_name
cur_name = param_dict.get( cur_base_param_name + 'name', name )
cur_dbkey = param_dict.get( cur_base_param_name + 'dkey', dbkey )
cur_info = param_dict.get( cur_base_param_name + 'info', info )
cur_data_type = param_dict.get( cur_base_param_name + 'data_type', data_type )
if cur_name:
data.name = cur_name
if not data.info and cur_info:
data.info = cur_info
if cur_dbkey:
data.dbkey = cur_dbkey
if cur_data_type:
data.extension = cur_data_type
file_name = str( wrapped_data )
extra_files_path = str( wrapped_data.files_path )
data_dict = dict( out_data_name=out_name,
ext=data.ext,
dataset_id=data.dataset.id,
hda_id=data.id,
file_name=file_name,
extra_files_path=extra_files_path )
json_params[ 'output_data' ].append( data_dict )
if json_filename is None:
json_filename = file_name
out = open( json_filename, 'w' )
out.write( json.dumps( json_params ) )
out.close()
class AsyncDataSourceTool( DataSourceTool ):
tool_type = 'data_source_async'
def _build_GALAXY_URL_parameter( self ):
return ToolParameter.build( self, ElementTree.XML( '<param name="GALAXY_URL" type="baseurl" value="/async/%s" />' % self.id ) )
class DataDestinationTool( Tool ):
tool_type = 'data_destination'
class SetMetadataTool( Tool ):
"""
Tool implementation for special tool that sets metadata on an existing
dataset.
"""
tool_type = 'set_metadata'
requires_setting_metadata = False
def exec_after_process( self, app, inp_data, out_data, param_dict, job=None ):
for name, dataset in inp_data.iteritems():
external_metadata = JobExternalOutputMetadataWrapper( job )
if external_metadata.external_metadata_set_successfully( dataset, app.model.context ):
dataset.metadata.from_JSON_dict( external_metadata.get_output_filenames_by_dataset( dataset, app.model.context ).filename_out )
else:
dataset._state = model.Dataset.states.FAILED_METADATA
self.sa_session.add( dataset )
self.sa_session.flush()
return
# If setting external metadata has failed, how can we inform the
# user? For now, we'll leave the default metadata and set the state
# back to its original.
dataset.datatype.after_setting_metadata( dataset )
if job and job.tool_id == '1.0.0':
dataset.state = param_dict.get( '__ORIGINAL_DATASET_STATE__' )
else:
# Revert dataset.state to fall back to dataset.dataset.state
dataset._state = None
# Need to reset the peek, which may rely on metadata
dataset.set_peek()
self.sa_session.add( dataset )
self.sa_session.flush()
def job_failed( self, job_wrapper, message, exception=False ):
job = job_wrapper.sa_session.query( model.Job ).get( job_wrapper.job_id )
if job:
inp_data = {}
for dataset_assoc in job.input_datasets:
inp_data[dataset_assoc.name] = dataset_assoc.dataset
return self.exec_after_process( job_wrapper.app, inp_data, {}, job_wrapper.get_param_dict(), job=job )
class ExportHistoryTool( Tool ):
tool_type = 'export_history'
class ImportHistoryTool( Tool ):
tool_type = 'import_history'
class DataManagerTool( OutputParameterJSONTool ):
tool_type = 'manage_data'
default_tool_action = DataManagerToolAction
def __init__( self, config_file, root, app, guid=None, data_manager_id=None, **kwds ):
self.data_manager_id = data_manager_id
super( DataManagerTool, self ).__init__( config_file, root, app, guid=guid, **kwds )
if self.data_manager_id is None:
self.data_manager_id = self.id
def exec_after_process( self, app, inp_data, out_data, param_dict, job=None, **kwds ):
assert self.allow_user_access( job.user ), "You must be an admin to access this tool."
# run original exec_after_process
super( DataManagerTool, self ).exec_after_process( app, inp_data, out_data, param_dict, job=job, **kwds )
# process results of tool
if job and job.state == job.states.ERROR:
return
# Job state may now be 'running' instead of previous 'error', but datasets are still set to e.g. error
for dataset in out_data.itervalues():
if dataset.state != dataset.states.OK:
return
data_manager_id = job.data_manager_association.data_manager_id
data_manager = self.app.data_managers.get_manager( data_manager_id, None )
assert data_manager is not None, "Invalid data manager (%s) requested. It may have been removed before the job completed." % ( data_manager_id )
data_manager.process_result( out_data )
def get_default_history_by_trans( self, trans, create=False ):
def _create_data_manager_history( user ):
history = trans.app.model.History( name='Data Manager History (automatically created)', user=user )
data_manager_association = trans.app.model.DataManagerHistoryAssociation( user=user, history=history )
trans.sa_session.add_all( ( history, data_manager_association ) )
trans.sa_session.flush()
return history
user = trans.user
assert user, 'You must be logged in to use this tool.'
assert self.allow_user_access( user ), "You must be an admin to access this tool."
history = user.data_manager_histories
if not history:
# create
if create:
history = _create_data_manager_history( user )
else:
history = None
else:
for history in reversed( history ):
history = history.history
if not history.deleted:
break
if history.deleted:
if create:
history = _create_data_manager_history( user )
else:
history = None
return history
def allow_user_access( self, user, attempting_access=True ):
"""
:param user: model object representing user.
:type user: galaxy.model.User
:param attempting_access: is the user attempting to do something with the
the tool (set false for incidental checks like toolbox
listing)
:type attempting_access: bool
:returns: bool -- Whether the user is allowed to access the tool.
Data Manager tools are only accessible to admins.
"""
if super( DataManagerTool, self ).allow_user_access( user ) and self.app.config.is_admin_user( user ):
return True
# If this is just an incidental check - do not log the scary message
# about users attempting to do something problematic.
if attempting_access:
if user:
user = user.id
log.debug( "User (%s) attempted to access a data manager tool (%s), but is not an admin.", user, self.id )
return False
# Populate tool_type to ToolClass mappings
tool_types = {}
for tool_class in [ Tool, SetMetadataTool, OutputParameterJSONTool,
DataManagerTool, DataSourceTool, AsyncDataSourceTool,
DataDestinationTool ]:
tool_types[ tool_class.tool_type ] = tool_class
# ---- Utility classes to be factored out -----------------------------------
class TracksterConfig:
""" Trackster configuration encapsulation. """
def __init__( self, actions ):
self.actions = actions
@staticmethod
def parse( root ):
actions = []
for action_elt in root.findall( "action" ):
actions.append( SetParamAction.parse( action_elt ) )
return TracksterConfig( actions )
class SetParamAction:
""" Set parameter action. """
def __init__( self, name, output_name ):
self.name = name
self.output_name = output_name
@staticmethod
def parse( elt ):
""" Parse action from element. """
return SetParamAction( elt.get( "name" ), elt.get( "output_name" ) )
class BadValue( object ):
def __init__( self, value ):
self.value = value
class InterruptedUpload( Exception ):
pass
| 47.862914 | 249 | 0.602986 |
029abe629472399ca12593136eee3c058f346634 | 1,988 | py | Python | speecht/vocabulary.py | Mehdi-Habibi/MySpeechT | 00b07f4fff8b8b0cee1e01fbd07eb9032143eda9 | [
"Apache-2.0"
]
| 154 | 2016-11-21T19:31:40.000Z | 2020-07-30T12:18:56.000Z | speecht/vocabulary.py | sporiyano/speechT | e68dc77e3e0c62be6822963c521750dfcc4272f1 | [
"Apache-2.0"
]
| 23 | 2017-04-21T15:26:09.000Z | 2019-06-24T13:11:13.000Z | speecht/vocabulary.py | sporiyano/speechT | e68dc77e3e0c62be6822963c521750dfcc4272f1 | [
"Apache-2.0"
]
| 37 | 2016-11-03T09:37:34.000Z | 2020-03-02T07:41:17.000Z | # Copyright 2016 Louis Kirsch. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
APOSTROPHE = 26
SPACE_ID = 27
A_ASCII_CODE = ord('a')
SIZE = 28
def letter_to_id(letter):
"""
Converts `letter` to vocabulary id
Args:
letter: letter to convert, allowed is a-z, apostrophe and space
Returns: the vocabulary encoded letter
"""
if letter == ' ':
return SPACE_ID
if letter == '\'':
return APOSTROPHE
return ord(letter) - A_ASCII_CODE
def id_to_letter(identifier):
"""
Converts the vocabulary encoded letter `identifier` to its character representation
Args:
identifier: encoded letter to decode
Returns: the character letter
"""
if identifier == SPACE_ID:
return ' '
if identifier == APOSTROPHE:
return '\''
return chr(identifier + A_ASCII_CODE)
def sentence_to_ids(sentence):
"""
Convert a string `sentence` to its encoded representation
Args:
sentence: sentence of type string
Returns: list of ints (encoded characters)
"""
return [letter_to_id(letter) for letter in sentence.lower()]
def ids_to_sentence(identifiers):
"""
Convert an complete list of encoded characters `identifiers` to their character representation
Args:
identifiers: list of ints (encoded characters)
Returns: decoded sentence as string
"""
return ''.join(id_to_letter(identifier) for identifier in identifiers)
| 24.243902 | 96 | 0.694165 |
e42b9fc9668fde06e771f70503dc05ab143406a4 | 2,927 | py | Python | tests/test_MulticlassEvaluator.py | AlexeyVatolin/sentence-transformers | 084b80cd351b135a1d48211ec4f4aea37f01a641 | [
"Apache-2.0"
]
| null | null | null | tests/test_MulticlassEvaluator.py | AlexeyVatolin/sentence-transformers | 084b80cd351b135a1d48211ec4f4aea37f01a641 | [
"Apache-2.0"
]
| null | null | null | tests/test_MulticlassEvaluator.py | AlexeyVatolin/sentence-transformers | 084b80cd351b135a1d48211ec4f4aea37f01a641 | [
"Apache-2.0"
]
| null | null | null | """
Tests the correct computation of evaluation scores from BinaryClassificationEvaluator
"""
from sentence_transformers import SentenceTransformer, evaluation, models, losses, InputExample
import unittest
import torch
from torch.utils.data import DataLoader
from sklearn.metrics import f1_score, accuracy_score
import numpy as np
from sentence_transformers.evaluation import MulticlassEvaluator
class MulticlassEvaluatorTest(unittest.TestCase):
def test_multiclass(self):
transformer = models.Transformer('prajjwal1/bert-tiny')
model = SentenceTransformer(modules=[
transformer,
models.Pooling(transformer.get_word_embedding_dimension())
])
softmax_loss = losses.SoftmaxLoss(model, transformer.get_word_embedding_dimension(), num_labels=3)
samples = [
InputExample(texts=["Hello Word, a first test sentence", "Hello Word, a other test sentence"], label=0),
InputExample(texts=["Hello Word, a second test sentence", "Hello Word, a other test sentence"], label=1),
InputExample(texts=["Hello Word, a third test sentence", "Hello Word, a other test sentence"], label=2)
]
dataloader = DataLoader(samples, batch_size=1)
evaluator = MulticlassEvaluator(dataloader, softmax_model=softmax_loss)
result = evaluator(model)
i = 0
# assert emb.shape == (transformer.get_word_embedding_dimension() * num_heads,)
#
# # Single sentence as list
# emb = model.encode(["Hello Word, a test sentence"])
# assert emb.shape == (1, transformer.get_word_embedding_dimension() * num_heads
def test_find_best_f1_and_threshold(self):
"""Tests that the F1 score for the computed threshold is correct"""
y_true = np.random.randint(0, 2, 1000)
y_pred_cosine = np.random.randn(1000)
best_f1, best_precision, best_recall, threshold = evaluation.BinaryClassificationEvaluator.find_best_f1_and_threshold(
y_pred_cosine, y_true, high_score_more_similar=True)
y_pred_labels = [1 if pred >= threshold else 0 for pred in y_pred_cosine]
sklearn_f1score = f1_score(y_true, y_pred_labels)
assert np.abs(best_f1 - sklearn_f1score) < 1e-6
def test_find_best_accuracy_and_threshold(self):
"""Tests that the Acc score for the computed threshold is correct"""
y_true = np.random.randint(0, 2, 1000)
y_pred_cosine = np.random.randn(1000)
max_acc, threshold = evaluation.BinaryClassificationEvaluator.find_best_acc_and_threshold(y_pred_cosine, y_true,
high_score_more_similar=True)
y_pred_labels = [1 if pred >= threshold else 0 for pred in y_pred_cosine]
sklearn_acc = accuracy_score(y_true, y_pred_labels)
assert np.abs(max_acc - sklearn_acc) < 1e-6
| 50.465517 | 127 | 0.689443 |
c671a638a900f177d5c4e011b79cb992537acbb9 | 11,997 | py | Python | reefbot_msgs/src/reefbot_msgs/msg/_ImageRegion.py | MRSD2018/reefbot-1 | a595ca718d0cda277726894a3105815cef000475 | [
"MIT"
]
| null | null | null | reefbot_msgs/src/reefbot_msgs/msg/_ImageRegion.py | MRSD2018/reefbot-1 | a595ca718d0cda277726894a3105815cef000475 | [
"MIT"
]
| null | null | null | reefbot_msgs/src/reefbot_msgs/msg/_ImageRegion.py | MRSD2018/reefbot-1 | a595ca718d0cda277726894a3105815cef000475 | [
"MIT"
]
| null | null | null | """autogenerated by genpy from reefbot_msgs/ImageRegion.msg. Do not edit."""
import sys
python3 = True if sys.hexversion > 0x03000000 else False
import genpy
import struct
import std_msgs.msg
import sensor_msgs.msg
class ImageRegion(genpy.Message):
_md5sum = "65cc1a85d539c02ff4e503921c8e033b"
_type = "reefbot_msgs/ImageRegion"
_has_header = False #flag to mark the presence of a Header object
_full_text = """# Message that species a region in the image to look at to identify
# the species.
#
# Author: Mark Desnoyer ([email protected])
# Date: June 2010
# Bounding box in the image that specifies the region
sensor_msgs/RegionOfInterest bounding_box
# Binary mask image specifying where the species is in the bounding
# box. A pixel value of zero means to ignore the pixel. If this image
# is empty, then the entire box should be used, but if it's not empty,
# it must be the same size as bbox or else an error will be generated.
sensor_msgs/Image mask
================================================================================
MSG: sensor_msgs/RegionOfInterest
# This message is used to specify a region of interest within an image.
#
# When used to specify the ROI setting of the camera when the image was
# taken, the height and width fields should either match the height and
# width fields for the associated image; or height = width = 0
# indicates that the full resolution image was captured.
uint32 x_offset # Leftmost pixel of the ROI
# (0 if the ROI includes the left edge of the image)
uint32 y_offset # Topmost pixel of the ROI
# (0 if the ROI includes the top edge of the image)
uint32 height # Height of ROI
uint32 width # Width of ROI
# True if a distinct rectified ROI should be calculated from the "raw"
# ROI in this message. Typically this should be False if the full image
# is captured (ROI not used), and True if a subwindow is captured (ROI
# used).
bool do_rectify
================================================================================
MSG: sensor_msgs/Image
# This message contains an uncompressed image
# (0, 0) is at top-left corner of image
#
Header header # Header timestamp should be acquisition time of image
# Header frame_id should be optical frame of camera
# origin of frame should be optical center of cameara
# +x should point to the right in the image
# +y should point down in the image
# +z should point into to plane of the image
# If the frame_id here and the frame_id of the CameraInfo
# message associated with the image conflict
# the behavior is undefined
uint32 height # image height, that is, number of rows
uint32 width # image width, that is, number of columns
# The legal values for encoding are in file src/image_encodings.cpp
# If you want to standardize a new string format, join
# [email protected] and send an email proposing a new encoding.
string encoding # Encoding of pixels -- channel meaning, ordering, size
# taken from the list of strings in src/image_encodings.cpp
uint8 is_bigendian # is this data bigendian?
uint32 step # Full row length in bytes
uint8[] data # actual matrix data, size is (step * rows)
================================================================================
MSG: std_msgs/Header
# Standard metadata for higher-level stamped data types.
# This is generally used to communicate timestamped data
# in a particular coordinate frame.
#
# sequence ID: consecutively increasing ID
uint32 seq
#Two-integer timestamp that is expressed as:
# * stamp.secs: seconds (stamp_secs) since epoch
# * stamp.nsecs: nanoseconds since stamp_secs
# time-handling sugar is provided by the client library
time stamp
#Frame this data is associated with
# 0: no frame
# 1: global frame
string frame_id
"""
__slots__ = ['bounding_box','mask']
_slot_types = ['sensor_msgs/RegionOfInterest','sensor_msgs/Image']
def __init__(self, *args, **kwds):
"""
Constructor. Any message fields that are implicitly/explicitly
set to None will be assigned a default value. The recommend
use is keyword arguments as this is more robust to future message
changes. You cannot mix in-order arguments and keyword arguments.
The available fields are:
bounding_box,mask
:param args: complete set of field values, in .msg order
:param kwds: use keyword arguments corresponding to message field names
to set specific fields.
"""
if args or kwds:
super(ImageRegion, self).__init__(*args, **kwds)
#message fields cannot be None, assign default values for those that are
if self.bounding_box is None:
self.bounding_box = sensor_msgs.msg.RegionOfInterest()
if self.mask is None:
self.mask = sensor_msgs.msg.Image()
else:
self.bounding_box = sensor_msgs.msg.RegionOfInterest()
self.mask = sensor_msgs.msg.Image()
def _get_types(self):
"""
internal API method
"""
return self._slot_types
def serialize(self, buff):
"""
serialize message into buffer
:param buff: buffer, ``StringIO``
"""
try:
_x = self
buff.write(_struct_4IB3I.pack(_x.bounding_box.x_offset, _x.bounding_box.y_offset, _x.bounding_box.height, _x.bounding_box.width, _x.bounding_box.do_rectify, _x.mask.header.seq, _x.mask.header.stamp.secs, _x.mask.header.stamp.nsecs))
_x = self.mask.header.frame_id
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = self
buff.write(_struct_2I.pack(_x.mask.height, _x.mask.width))
_x = self.mask.encoding
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = self
buff.write(_struct_BI.pack(_x.mask.is_bigendian, _x.mask.step))
_x = self.mask.data
length = len(_x)
# - if encoded as a list instead, serialize as bytes instead of string
if type(_x) in [list, tuple]:
buff.write(struct.pack('<I%sB'%length, length, *_x))
else:
buff.write(struct.pack('<I%ss'%length, length, _x))
except struct.error as se: self._check_types(se)
except TypeError as te: self._check_types(te)
def deserialize(self, str):
"""
unpack serialized message in str into this message instance
:param str: byte array of serialized message, ``str``
"""
try:
if self.bounding_box is None:
self.bounding_box = sensor_msgs.msg.RegionOfInterest()
if self.mask is None:
self.mask = sensor_msgs.msg.Image()
end = 0
_x = self
start = end
end += 29
(_x.bounding_box.x_offset, _x.bounding_box.y_offset, _x.bounding_box.height, _x.bounding_box.width, _x.bounding_box.do_rectify, _x.mask.header.seq, _x.mask.header.stamp.secs, _x.mask.header.stamp.nsecs,) = _struct_4IB3I.unpack(str[start:end])
self.bounding_box.do_rectify = bool(self.bounding_box.do_rectify)
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.mask.header.frame_id = str[start:end].decode('utf-8')
else:
self.mask.header.frame_id = str[start:end]
_x = self
start = end
end += 8
(_x.mask.height, _x.mask.width,) = _struct_2I.unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.mask.encoding = str[start:end].decode('utf-8')
else:
self.mask.encoding = str[start:end]
_x = self
start = end
end += 5
(_x.mask.is_bigendian, _x.mask.step,) = _struct_BI.unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.mask.data = str[start:end].decode('utf-8')
else:
self.mask.data = str[start:end]
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
def serialize_numpy(self, buff, numpy):
"""
serialize message with numpy array types into buffer
:param buff: buffer, ``StringIO``
:param numpy: numpy python module
"""
try:
_x = self
buff.write(_struct_4IB3I.pack(_x.bounding_box.x_offset, _x.bounding_box.y_offset, _x.bounding_box.height, _x.bounding_box.width, _x.bounding_box.do_rectify, _x.mask.header.seq, _x.mask.header.stamp.secs, _x.mask.header.stamp.nsecs))
_x = self.mask.header.frame_id
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = self
buff.write(_struct_2I.pack(_x.mask.height, _x.mask.width))
_x = self.mask.encoding
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = self
buff.write(_struct_BI.pack(_x.mask.is_bigendian, _x.mask.step))
_x = self.mask.data
length = len(_x)
# - if encoded as a list instead, serialize as bytes instead of string
if type(_x) in [list, tuple]:
buff.write(struct.pack('<I%sB'%length, length, *_x))
else:
buff.write(struct.pack('<I%ss'%length, length, _x))
except struct.error as se: self._check_types(se)
except TypeError as te: self._check_types(te)
def deserialize_numpy(self, str, numpy):
"""
unpack serialized message in str into this message instance using numpy for array types
:param str: byte array of serialized message, ``str``
:param numpy: numpy python module
"""
try:
if self.bounding_box is None:
self.bounding_box = sensor_msgs.msg.RegionOfInterest()
if self.mask is None:
self.mask = sensor_msgs.msg.Image()
end = 0
_x = self
start = end
end += 29
(_x.bounding_box.x_offset, _x.bounding_box.y_offset, _x.bounding_box.height, _x.bounding_box.width, _x.bounding_box.do_rectify, _x.mask.header.seq, _x.mask.header.stamp.secs, _x.mask.header.stamp.nsecs,) = _struct_4IB3I.unpack(str[start:end])
self.bounding_box.do_rectify = bool(self.bounding_box.do_rectify)
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.mask.header.frame_id = str[start:end].decode('utf-8')
else:
self.mask.header.frame_id = str[start:end]
_x = self
start = end
end += 8
(_x.mask.height, _x.mask.width,) = _struct_2I.unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.mask.encoding = str[start:end].decode('utf-8')
else:
self.mask.encoding = str[start:end]
_x = self
start = end
end += 5
(_x.mask.is_bigendian, _x.mask.step,) = _struct_BI.unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.mask.data = str[start:end].decode('utf-8')
else:
self.mask.data = str[start:end]
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
_struct_I = genpy.struct_I
_struct_4IB3I = struct.Struct("<4IB3I")
_struct_2I = struct.Struct("<2I")
_struct_BI = struct.Struct("<BI")
| 37.726415 | 248 | 0.645411 |
494a05a78966b975a6f0acabab2821e49ff1ebb3 | 22,669 | py | Python | model_zoo/research/cv/centernet_det/src/utils.py | Vincent34/mindspore | a39a60878a46e7e9cb02db788c0bca478f2fa6e5 | [
"Apache-2.0"
]
| 2 | 2021-07-08T13:10:42.000Z | 2021-11-08T02:48:57.000Z | model_zoo/research/cv/centernet_det/src/utils.py | peixinhou/mindspore | fcb2ec2779b753e95c762cf292b23bd81d1f561b | [
"Apache-2.0"
]
| null | null | null | model_zoo/research/cv/centernet_det/src/utils.py | peixinhou/mindspore | fcb2ec2779b753e95c762cf292b23bd81d1f561b | [
"Apache-2.0"
]
| null | null | null | # Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""
Functional Cells to be used.
"""
import math
import time
import numpy as np
import mindspore.nn as nn
import mindspore.ops as ops
from mindspore import dtype as mstype
from mindspore.common.initializer import initializer
from mindspore.common.parameter import Parameter
from mindspore.common.tensor import Tensor
from mindspore.nn.learning_rate_schedule import LearningRateSchedule, PolynomialDecayLR, WarmUpLR
from mindspore.train.callback import Callback
clip_grad = ops.MultitypeFuncGraph("clip_grad")
@clip_grad.register("Number", "Tensor")
def _clip_grad(clip_value, grad):
"""
Clip gradients.
Inputs:
clip_value (float): Specifies how much to clip.
grad (tuple[Tensor]): Gradients.
Outputs:
tuple[Tensor], clipped gradients.
"""
dt = ops.dtype(grad)
new_grad = nn.ClipByNorm()(grad, ops.cast(ops.tuple_to_array((clip_value,)), dt))
return new_grad
class ClipByNorm(nn.Cell):
"""
Clip grads by gradient norm
Args:
clip_norm(float): The target norm of graident clip. Default: 1.0
Returns:
Tuple of Tensors, gradients after clip.
"""
def __init__(self, clip_norm=1.0):
super(ClipByNorm, self).__init__()
self.hyper_map = ops.HyperMap()
self.clip_norm = clip_norm
def construct(self, grads):
grads = self.hyper_map(ops.partial(clip_grad, self.clip_norm), grads)
return grads
reciprocal = ops.Reciprocal()
grad_scale = ops.MultitypeFuncGraph("grad_scale")
@grad_scale.register("Tensor", "Tensor")
def tensor_grad_scale(scale, grad):
return grad * reciprocal(scale)
class GradScale(nn.Cell):
"""
Gradients scale
Args: None
Returns:
Tuple of Tensors, gradients after rescale.
"""
def __init__(self):
super(GradScale, self).__init__()
self.hyper_map = ops.HyperMap()
def construct(self, scale, grads):
grads = self.hyper_map(ops.partial(grad_scale, scale), grads)
return grads
class ClipByValue(nn.Cell):
"""
Clip tensor by value
Args: None
Returns:
Tensor, output after clip.
"""
def __init__(self):
super(ClipByValue, self).__init__()
self.min = ops.Minimum()
self.max = ops.Maximum()
def construct(self, x, clip_value_min, clip_value_max):
x_min = self.min(x, clip_value_max)
x_max = self.max(x_min, clip_value_min)
return x_max
class GatherFeature(nn.Cell):
"""
Gather feature at specified position
Args:
enable_cpu_gather (bool): Use cpu operator GatherD to gather feature or not, adaption for CPU. Default: True.
Returns:
Tensor, feature at spectified position
"""
def __init__(self, enable_cpu_gather=True):
super(GatherFeature, self).__init__()
self.tile = ops.Tile()
self.shape = ops.Shape()
self.concat = ops.Concat(axis=1)
self.reshape = ops.Reshape()
self.enable_cpu_gather = enable_cpu_gather
if self.enable_cpu_gather:
self.gather_nd = ops.GatherD()
self.expand_dims = ops.ExpandDims()
else:
self.gather_nd = ops.GatherNd()
def construct(self, feat, ind):
"""gather by specified index"""
if self.enable_cpu_gather:
_, _, c = self.shape(feat)
# (b, N, c)
index = self.expand_dims(ind, -1)
index = self.tile(index, (1, 1, c))
feat = self.gather_nd(feat, 1, index)
else:
# (b, N)->(b*N, 1)
b, N = self.shape(ind)
ind = self.reshape(ind, (-1, 1))
ind_b = nn.Range(0, b, 1)()
ind_b = self.reshape(ind_b, (-1, 1))
ind_b = self.tile(ind_b, (1, N))
ind_b = self.reshape(ind_b, (-1, 1))
index = self.concat((ind_b, ind))
# (b, N, 2)
index = self.reshape(index, (b, N, -1))
# (b, N, c)
feat = self.gather_nd(feat, index)
return feat
class TransposeGatherFeature(nn.Cell):
"""
Transpose and gather feature at specified position
Args: None
Returns:
Tensor, feature at spectified position
"""
def __init__(self):
super(TransposeGatherFeature, self).__init__()
self.shape = ops.Shape()
self.reshape = ops.Reshape()
self.transpose = ops.Transpose()
self.perm_list = (0, 2, 3, 1)
self.gather_feat = GatherFeature()
def construct(self, feat, ind):
# (b, c, h, w)->(b, h*w, c)
feat = self.transpose(feat, self.perm_list)
b, _, _, c = self.shape(feat)
feat = self.reshape(feat, (b, -1, c))
# (b, N, c)
feat = self.gather_feat(feat, ind)
return feat
class Sigmoid(nn.Cell):
"""
Sigmoid and then Clip by value
Args: None
Returns:
Tensor, feature after sigmoid and clip.
"""
def __init__(self):
super(Sigmoid, self).__init__()
self.cast = ops.Cast()
self.dtype = ops.DType()
self.sigmoid = nn.Sigmoid()
self.clip_by_value = ops.clip_by_value
def construct(self, x, min_value=1e-4, max_value=1-1e-4):
x = self.sigmoid(x)
dt = self.dtype(x)
x = self.clip_by_value(x, self.cast(ops.tuple_to_array((min_value,)), dt),
self.cast(ops.tuple_to_array((max_value,)), dt))
return x
class FocalLoss(nn.Cell):
"""
Warpper for focal loss.
Args:
alpha(int): Super parameter in focal loss to mimic loss weight. Default: 2.
beta(int): Super parameter in focal loss to mimic imbalance between positive and negative samples. Default: 4.
Returns:
Tensor, focal loss.
"""
def __init__(self, alpha=2, beta=4):
super(FocalLoss, self).__init__()
self.alpha = alpha
self.beta = beta
self.pow = ops.Pow()
self.log = ops.Log()
self.select = ops.Select()
self.equal = ops.Equal()
self.less = ops.Less()
self.cast = ops.Cast()
self.fill = ops.Fill()
self.dtype = ops.DType()
self.shape = ops.Shape()
self.reduce_sum = ops.ReduceSum()
def construct(self, out, target):
"""focal loss"""
pos_inds = self.cast(self.equal(target, 1.0), mstype.float32)
neg_inds = self.cast(self.less(target, 1.0), mstype.float32)
neg_weights = self.pow(1 - target, self.beta)
pos_loss = self.log(out) * self.pow(1 - out, self.alpha) * pos_inds
neg_loss = self.log(1 - out) * self.pow(out, self.alpha) * neg_weights * neg_inds
num_pos = self.reduce_sum(pos_inds, ())
num_pos = self.select(self.equal(num_pos, 0.0),
self.fill(self.dtype(num_pos), self.shape(num_pos), 1.0), num_pos)
pos_loss = self.reduce_sum(pos_loss, ())
neg_loss = self.reduce_sum(neg_loss, ())
loss = - (pos_loss + neg_loss) / num_pos
return loss
class GHMCLoss(nn.Cell):
"""
Warpper for gradient harmonizing loss for classification.
Args:
bins(int): Number of bins. Default: 10.
momentum(float): Momentum for moving gradient density. Default: 0.0.
Returns:
Tensor, GHM loss for classification.
"""
def __init__(self, bins=10, momentum=0.0):
super(GHMCLoss, self).__init__()
self.bins = bins
self.momentum = momentum
edges_left = np.array([float(x) / bins for x in range(bins)], dtype=np.float32)
self.edges_left = Tensor(edges_left.reshape((bins, 1, 1, 1, 1)))
edges_right = np.array([float(x) / bins for x in range(1, bins + 1)], dtype=np.float32)
edges_right[-1] += 1e-4
self.edges_right = Tensor(edges_right.reshape((bins, 1, 1, 1, 1)))
if momentum >= 0:
self.acc_sum = Parameter(initializer(0, [bins], mstype.float32))
self.abs = ops.Abs()
self.log = ops.Log()
self.cast = ops.Cast()
self.select = ops.Select()
self.reshape = ops.Reshape()
self.reduce_sum = ops.ReduceSum()
self.max = ops.Maximum()
self.less = ops.Less()
self.equal = ops.Equal()
self.greater = ops.Greater()
self.logical_and = ops.LogicalAnd()
self.greater_equal = ops.GreaterEqual()
self.zeros_like = ops.ZerosLike()
self.expand_dims = ops.ExpandDims()
def construct(self, out, target):
"""GHM loss for classification"""
g = self.abs(out - target)
g = self.expand_dims(g, 0) # (1, b, c, h, w)
pos_inds = self.cast(self.equal(target, 1.0), mstype.float32)
tot = self.max(self.reduce_sum(pos_inds, ()), 1.0)
# (bin, b, c, h, w)
inds_mask = self.logical_and(self.greater_equal(g, self.edges_left), self.less(g, self.edges_right))
zero_matrix = self.cast(self.zeros_like(inds_mask), mstype.float32)
inds = self.cast(inds_mask, mstype.float32)
# (bins,)
num_in_bin = self.reduce_sum(inds, (1, 2, 3, 4))
valid_bins = self.greater(num_in_bin, 0)
num_valid_bin = self.reduce_sum(self.cast(valid_bins, mstype.float32), ())
if self.momentum > 0:
self.acc_sum = self.select(valid_bins,
self.momentum * self.acc_sum + (1 - self.momentum) * num_in_bin,
self.acc_sum)
acc_sum = self.acc_sum
acc_sum = self.reshape(acc_sum, (self.bins, 1, 1, 1, 1))
acc_sum = acc_sum + zero_matrix
weights = self.select(self.equal(inds, 1), tot / acc_sum, zero_matrix)
# (b, c, h, w)
weights = self.reduce_sum(weights, 0)
else:
num_in_bin = self.reshape(num_in_bin, (self.bins, 1, 1, 1, 1))
num_in_bin = num_in_bin + zero_matrix
weights = self.select(self.equal(inds, 1), tot / num_in_bin, zero_matrix)
# (b, c, h, w)
weights = self.reduce_sum(weights, 0)
weights = weights / num_valid_bin
ghmc_loss = (target - 1.0) * self.log(1.0 - out) - target * self.log(out)
ghmc_loss = self.reduce_sum(ghmc_loss * weights, ()) / tot
return ghmc_loss
class GHMRLoss(nn.Cell):
"""
Warpper for gradient harmonizing loss for regression.
Args:
bins(int): Number of bins. Default: 10.
momentum(float): Momentum for moving gradient density. Default: 0.0.
mu(float): Super parameter for smoothed l1 loss. Default: 0.02.
Returns:
Tensor, GHM loss for regression.
"""
def __init__(self, bins=10, momentum=0.0, mu=0.02):
super(GHMRLoss, self).__init__()
self.bins = bins
self.momentum = momentum
self.mu = mu
edges_left = np.array([float(x) / bins for x in range(bins)], dtype=np.float32)
self.edges_left = Tensor(edges_left.reshape((bins, 1, 1, 1, 1)))
edges_right = np.array([float(x) / bins for x in range(1, bins + 1)], dtype=np.float32)
edges_right[-1] += 1e-4
self.edges_right = Tensor(edges_right.reshape((bins, 1, 1, 1, 1)))
if momentum >= 0:
self.acc_sum = Parameter(initializer(0, [bins], mstype.float32))
self.abs = ops.Abs()
self.sqrt = ops.Sqrt()
self.cast = ops.Cast()
self.select = ops.Select()
self.reshape = ops.Reshape()
self.reduce_sum = ops.ReduceSum()
self.max = ops.Maximum()
self.less = ops.Less()
self.equal = ops.Equal()
self.greater = ops.Greater()
self.logical_and = ops.LogicalAnd()
self.greater_equal = ops.GreaterEqual()
self.zeros_like = ops.ZerosLike()
self.expand_dims = ops.ExpandDims()
def construct(self, out, target):
"""GHM loss for regression"""
# ASL1 loss
diff = out - target
# gradient length
g = self.abs(diff / self.sqrt(self.mu * self.mu + diff * diff))
g = self.expand_dims(g, 0) # (1, b, c, h, w)
pos_inds = self.cast(self.equal(target, 1.0), mstype.float32)
tot = self.max(self.reduce_sum(pos_inds, ()), 1.0)
# (bin, b, c, h, w)
inds_mask = self.logical_and(self.greater_equal(g, self.edges_left), self.less(g, self.edges_right))
zero_matrix = self.cast(self.zeros_like(inds_mask), mstype.float32)
inds = self.cast(inds_mask, mstype.float32)
# (bins,)
num_in_bin = self.reduce_sum(inds, (1, 2, 3, 4))
valid_bins = self.greater(num_in_bin, 0)
num_valid_bin = self.reduce_sum(self.cast(valid_bins, mstype.float32), ())
if self.momentum > 0:
self.acc_sum = self.select(valid_bins,
self.momentum * self.acc_sum + (1 - self.momentum) * num_in_bin,
self.acc_sum)
acc_sum = self.acc_sum
acc_sum = self.reshape(acc_sum, (self.bins, 1, 1, 1, 1))
acc_sum = acc_sum + zero_matrix
weights = self.select(self.equal(inds, 1), tot / acc_sum, zero_matrix)
# (b, c, h, w)
weights = self.reduce_sum(weights, 0)
else:
num_in_bin = self.reshape(num_in_bin, (self.bins, 1, 1, 1, 1))
num_in_bin = num_in_bin + zero_matrix
weights = self.select(self.equal(inds, 1), tot / num_in_bin, zero_matrix)
# (b, c, h, w)
weights = self.reduce_sum(weights, 0)
weights = weights / num_valid_bin
ghmr_loss = self.sqrt(diff * diff + self.mu * self.mu) - self.mu
ghmr_loss = self.reduce_sum(ghmr_loss * weights, ()) / tot
return ghmr_loss
class RegLoss(nn.Cell): #reg_l1_loss
"""
Warpper for regression loss.
Args:
mode(str): L1 or Smoothed L1 loss. Default: "l1"
Returns:
Tensor, regression loss.
"""
def __init__(self, mode='l1'):
super(RegLoss, self).__init__()
self.reduce_sum = ops.ReduceSum()
self.cast = ops.Cast()
self.expand_dims = ops.ExpandDims()
self.reshape = ops.Reshape()
self.gather_feature = TransposeGatherFeature()
if mode == 'l1':
self.loss = nn.L1Loss(reduction='sum')
elif mode == 'sl1':
self.loss = nn.SmoothL1Loss()
else:
self.loss = None
def construct(self, output, mask, ind, target):
pred = self.gather_feature(output, ind)
mask = self.cast(mask, mstype.float32)
num = self.reduce_sum(mask, ())
mask = self.expand_dims(mask, 2)
target = target * mask
pred = pred * mask
regr_loss = self.loss(pred, target)
regr_loss = regr_loss / (num + 1e-4)
return regr_loss
class RegWeightedL1Loss(nn.Cell):
"""
Warpper for weighted regression loss.
Args: None
Returns:
Tensor, regression loss.
"""
def __init__(self):
super(RegWeightedL1Loss, self).__init__()
self.reduce_sum = ops.ReduceSum()
self.gather_feature = TransposeGatherFeature()
self.cast = ops.Cast()
self.l1_loss = nn.L1Loss(reduction='sum')
def construct(self, output, mask, ind, target):
pred = self.gather_feature(output, ind)
mask = self.cast(mask, mstype.float32)
num = self.reduce_sum(mask, ())
loss = self.l1_loss(pred * mask, target * mask)
loss = loss / (num + 1e-4)
return loss
class LossCallBack(Callback):
"""
Monitor the loss in training.
If the loss in NAN or INF terminating training.
Args:
dataset_size (int): Dataset size. Default: -1.
enable_static_time (bool): enable static time cost, adaption for CPU. Default: False.
"""
def __init__(self, dataset_size=-1, enable_static_time=False):
super(LossCallBack, self).__init__()
self._dataset_size = dataset_size
self._enable_static_time = enable_static_time
def step_begin(self, run_context):
"""
Get beginning time of each step
"""
self._begin_time = time.time()
def step_end(self, run_context):
"""
Print loss after each step
"""
cb_params = run_context.original_args()
if self._dataset_size > 0:
percent, epoch_num = math.modf(cb_params.cur_step_num / self._dataset_size)
if percent == 0:
percent = 1
epoch_num -= 1
if self._enable_static_time:
cur_time = time.time()
time_per_step = cur_time - self._begin_time
print("epoch: {}, current epoch percent: {}, step: {}, time per step: {} s, outputs are {}"
.format(int(epoch_num), "%.3f" % percent, cb_params.cur_step_num, "%.3f" % time_per_step,
str(cb_params.net_outputs)), flush=True)
else:
print("epoch: {}, current epoch percent: {}, step: {}, outputs are {}"
.format(int(epoch_num), "%.3f" % percent, cb_params.cur_step_num,
str(cb_params.net_outputs)), flush=True)
else:
print("epoch: {}, step: {}, outputs are {}".format(cb_params.cur_epoch_num, cb_params.cur_step_num,
str(cb_params.net_outputs)), flush=True)
class CenterNetPolynomialDecayLR(LearningRateSchedule):
"""
Warmup and polynomial decay learning rate for CenterNet network.
Args:
learning_rate(float): Initial learning rate.
end_learning_rate(float): Final learning rate after decay.
warmup_steps(int): Warmup steps.
decay_steps(int): Decay steps.
power(int): Learning rate decay factor.
Returns:
Tensor, learning rate in time.
"""
def __init__(self, learning_rate, end_learning_rate, warmup_steps, decay_steps, power):
super(CenterNetPolynomialDecayLR, self).__init__()
self.warmup_flag = False
if warmup_steps > 0:
self.warmup_flag = True
self.warmup_lr = WarmUpLR(learning_rate, warmup_steps)
self.decay_lr = PolynomialDecayLR(learning_rate, end_learning_rate, decay_steps, power)
self.warmup_steps = Tensor(np.array([warmup_steps]).astype(np.float32))
self.greater = ops.Greater()
self.one = Tensor(np.array([1.0]).astype(np.float32))
self.cast = ops.Cast()
def construct(self, global_step):
decay_lr = self.decay_lr(global_step)
if self.warmup_flag:
is_warmup = self.cast(self.greater(self.warmup_steps, global_step), mstype.float32)
warmup_lr = self.warmup_lr(global_step)
lr = (self.one - is_warmup) * decay_lr + is_warmup * warmup_lr
else:
lr = decay_lr
return lr
class CenterNetMultiEpochsDecayLR(LearningRateSchedule):
"""
Warmup and multi-steps decay learning rate for CenterNet network.
Args:
learning_rate(float): Initial learning rate.
warmup_steps(int): Warmup steps.
multi_steps(list int): The steps corresponding to decay learning rate.
steps_per_epoch(int): How many steps for each epoch.
factor(int): Learning rate decay factor. Default: 10.
Returns:
Tensor, learning rate in time.
"""
def __init__(self, learning_rate, warmup_steps, multi_epochs, steps_per_epoch, factor=10):
super(CenterNetMultiEpochsDecayLR, self).__init__()
self.warmup_flag = False
if warmup_steps > 0:
self.warmup_flag = True
self.warmup_lr = WarmUpLR(learning_rate, warmup_steps)
self.decay_lr = MultiEpochsDecayLR(learning_rate, multi_epochs, steps_per_epoch, factor)
self.warmup_steps = Tensor(np.array([warmup_steps]).astype(np.float32))
self.greater = ops.Greater()
self.one = Tensor(np.array([1.0]).astype(np.float32))
self.cast = ops.Cast()
def construct(self, global_step):
decay_lr = self.decay_lr(global_step)
if self.warmup_flag:
is_warmup = self.cast(self.greater(self.warmup_steps, global_step), mstype.float32)
warmup_lr = self.warmup_lr(global_step)
lr = (self.one - is_warmup) * decay_lr + is_warmup * warmup_lr
else:
lr = decay_lr
# print('CenterNetMultiEpochsDecayLR:',lr.dtype)
return lr
class MultiEpochsDecayLR(LearningRateSchedule):
"""
Calculate learning rate base on multi epochs decay function.
Args:
learning_rate(float): Initial learning rate.
multi_steps(list int): The steps corresponding to decay learning rate.
steps_per_epoch(int): How many steps for each epoch.
factor(int): Learning rate decay factor. Default: 10.
Returns:
Tensor, learning rate.
"""
def __init__(self, learning_rate, multi_epochs, steps_per_epoch, factor=10):
super(MultiEpochsDecayLR, self).__init__()
if not isinstance(multi_epochs, (list, tuple)):
raise TypeError("multi_epochs must be list or tuple.")
self.multi_epochs = Tensor(np.array(multi_epochs, dtype=np.float32) * steps_per_epoch)
self.num = len(multi_epochs)
self.start_learning_rate = learning_rate
self.steps_per_epoch = steps_per_epoch
self.factor = factor
self.pow = ops.Pow()
self.cast = ops.Cast()
self.less_equal = ops.LessEqual()
self.reduce_sum = ops.ReduceSum()
def construct(self, global_step):
cur_step = self.cast(global_step, mstype.float32)
multi_epochs = self.cast(self.multi_epochs, mstype.float32)
epochs = self.cast(self.less_equal(multi_epochs, cur_step), mstype.float32)
lr = self.start_learning_rate / self.pow(self.factor, self.reduce_sum(epochs, ()))
return lr
| 35.309969 | 118 | 0.603776 |
11da27cd9925e5d76f06ff68d377bb095e8cd8b2 | 1,188 | py | Python | workflow_array_ephys/pipeline.py | dimitri-yatsenko/workflow-ephys | ea89aa92da527960a6993245e53881bffd40e123 | [
"MIT"
]
| 1 | 2021-07-28T07:54:40.000Z | 2021-07-28T07:54:40.000Z | workflow_array_ephys/pipeline.py | dimitri-yatsenko/workflow-ephys | ea89aa92da527960a6993245e53881bffd40e123 | [
"MIT"
]
| 28 | 2021-03-24T16:22:03.000Z | 2022-03-25T16:02:05.000Z | workflow_array_ephys/pipeline.py | dimitri-yatsenko/workflow-ephys | ea89aa92da527960a6993245e53881bffd40e123 | [
"MIT"
]
| 6 | 2021-04-27T23:15:36.000Z | 2022-01-24T21:29:43.000Z | import datajoint as dj
from element_animal import subject
from element_lab import lab
from element_session import session
from element_array_ephys import probe, ephys
from element_animal.subject import Subject
from element_lab.lab import Source, Lab, Protocol, User, Project
from element_session.session import Session
from .paths import get_ephys_root_data_dir, get_session_directory
if 'custom' not in dj.config:
dj.config['custom'] = {}
db_prefix = dj.config['custom'].get('database.prefix', '')
# Activate "lab", "subject", "session" schema ----------------------------------
lab.activate(db_prefix + 'lab')
subject.activate(db_prefix + 'subject', linking_module=__name__)
session.activate(db_prefix + 'session', linking_module=__name__)
# Declare table "SkullReference" for use in element-array-ephys ----------------
@lab.schema
class SkullReference(dj.Lookup):
definition = """
skull_reference : varchar(60)
"""
contents = zip(['Bregma', 'Lambda'])
# Activate "ephys" schema ------------------------------------------------------
ephys.activate(db_prefix + 'ephys',
db_prefix + 'probe',
linking_module=__name__)
| 27.627907 | 80 | 0.66835 |
29361a6123f58daf5a9f423360f8dfb90ffa384a | 497 | py | Python | nomi/migrations/0121_auto_20170805_1213.py | TheDjangoBoys/Gymkhana-Nominations | 6ce13fb3a21fe91630e0c8fdaf597e61c87f2d06 | [
"MIT"
]
| null | null | null | nomi/migrations/0121_auto_20170805_1213.py | TheDjangoBoys/Gymkhana-Nominations | 6ce13fb3a21fe91630e0c8fdaf597e61c87f2d06 | [
"MIT"
]
| null | null | null | nomi/migrations/0121_auto_20170805_1213.py | TheDjangoBoys/Gymkhana-Nominations | 6ce13fb3a21fe91630e0c8fdaf597e61c87f2d06 | [
"MIT"
]
| null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.11.2 on 2017-08-05 12:13
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('nomi', '0120_auto_20170805_1210'),
]
operations = [
migrations.RemoveField(
model_name='post',
name='end_date',
),
migrations.RemoveField(
model_name='post',
name='post_session',
),
]
| 20.708333 | 48 | 0.581489 |
5b2a64cca7083722a08fe3a607628e9bc3e5f5e5 | 1,002 | py | Python | aqt/jax_legacy/jax/wmt_mlperf/hparams_configs/experimental/small_model_4bit_weights_and_fixed_acts.py | ychzhang/aqt | 54427ea65120af980b8f2540e94ebe2db1dd3ccd | [
"Apache-2.0"
]
| 2 | 2022-01-13T06:34:00.000Z | 2022-03-30T17:08:55.000Z | aqt/jax_legacy/jax/wmt_mlperf/hparams_configs/experimental/small_model_4bit_weights_and_fixed_acts.py | ychzhang/aqt | 54427ea65120af980b8f2540e94ebe2db1dd3ccd | [
"Apache-2.0"
]
| 3 | 2022-03-30T19:48:22.000Z | 2022-03-31T20:47:30.000Z | aqt/jax_legacy/jax/wmt_mlperf/hparams_configs/experimental/small_model_4bit_weights_and_fixed_acts.py | ychzhang/aqt | 54427ea65120af980b8f2540e94ebe2db1dd3ccd | [
"Apache-2.0"
]
| 3 | 2022-01-13T00:10:17.000Z | 2022-03-29T17:31:16.000Z | # Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Small version of full_model_4bit_weights_and_fixed_acts."""
from aqt.jax_legacy.jax.wmt_mlperf.hparams_configs import base_config
from aqt.jax_legacy.jax.wmt_mlperf.hparams_configs.experimental import small_model_bfloat16
def get_config():
config = small_model_bfloat16.get_config(
quant_target=base_config.QuantTarget.WEIGHTS_AND_FIXED_ACTS)
config.weight_prec = 4
config.quant_act.prec = 4
return config
| 37.111111 | 91 | 0.788423 |
367626f40298ba8214c2c80e13657abbcf7d77cf | 599 | py | Python | reachme/complaint/migrations/0009_alter_complaint_subcategory_servant_role.py | akshita-k06/Socialshout | ef45c7f4a2d73f613760f59d507554465ba19264 | [
"MIT"
]
| 1 | 2021-06-28T08:20:25.000Z | 2021-06-28T08:20:25.000Z | reachme/complaint/migrations/0009_alter_complaint_subcategory_servant_role.py | akshita-k06/SocialShout | ef45c7f4a2d73f613760f59d507554465ba19264 | [
"MIT"
]
| null | null | null | reachme/complaint/migrations/0009_alter_complaint_subcategory_servant_role.py | akshita-k06/SocialShout | ef45c7f4a2d73f613760f59d507554465ba19264 | [
"MIT"
]
| 1 | 2021-06-27T14:45:59.000Z | 2021-06-27T14:45:59.000Z | # Generated by Django 3.2 on 2021-06-14 05:50
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('complaint', '0008_alter_complaint_subcategory_servant_role'),
]
operations = [
migrations.AlterField(
model_name='complaint_subcategory',
name='servant_role',
field=models.SmallIntegerField(choices=[(1, 'MSEB Chief Engineer'), (2, 'Health Department'), (3, 'Water Conservation officer'), (4, 'Public grievance officer'), (5, 'Department Safety Officer')], default=0),
),
]
| 31.526316 | 220 | 0.656093 |
4fa93542a574917045fd4e0f8e634b7399f863b5 | 1,907 | py | Python | service-rpc/src/pypi/setup.py | Genius-pig/incubator-iotdb | 67c0b62dbd7d1423a3ee2bf13683815a5bdb3d4a | [
"Apache-2.0"
]
| 1 | 2019-12-23T14:27:56.000Z | 2019-12-23T14:27:56.000Z | service-rpc/src/pypi/setup.py | rongbo-j/incubator-iotdb | 96fd6d70e35f8bbbcea1a0fc2d48989dd61c5379 | [
"Apache-2.0"
]
| null | null | null | service-rpc/src/pypi/setup.py | rongbo-j/incubator-iotdb | 96fd6d70e35f8bbbcea1a0fc2d48989dd61c5379 | [
"Apache-2.0"
]
| null | null | null | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import setuptools
import io
try:
with io.open('README.md', encoding='utf-8') as f:
long_description = f.read()
except FileNotFoundError:
long_description = ''
print long_description
setuptools.setup(
name="apache-iotdb", # Replace with your own username
version="0.9.0",
author=" Apache Software Foundation",
author_email="[email protected]",
description="Apache IoTDB (incubating) client API",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/apache/incubator-iotdb",
packages=setuptools.find_packages(),
install_requires=[
'thrift',
],
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: Apache Software License",
"Operating System :: OS Independent",
"Topic :: Software Development :: Libraries",
"Topic :: Software Development :: Libraries :: Python Modules",
],
python_requires='>=3.7',
license='Apache License, Version 2.0',
website='https://iotdb.apache.org',
)
| 34.053571 | 71 | 0.698479 |
9291a3c63408da6afb0946c54331f76850050bfa | 512 | py | Python | views/charts/ClosenessCentralityChart.py | alvarofpp/ufrn-imd1155-brazil-air-traffic-network-analysis | 41b9b24a238110c17c09e2a4e2df542c6bcbce1b | [
"MIT"
]
| null | null | null | views/charts/ClosenessCentralityChart.py | alvarofpp/ufrn-imd1155-brazil-air-traffic-network-analysis | 41b9b24a238110c17c09e2a4e2df542c6bcbce1b | [
"MIT"
]
| null | null | null | views/charts/ClosenessCentralityChart.py | alvarofpp/ufrn-imd1155-brazil-air-traffic-network-analysis | 41b9b24a238110c17c09e2a4e2df542c6bcbce1b | [
"MIT"
]
| null | null | null | import networkx as nx
from typing import Dict
from .BokehChart import BokehChart
class ClosenessCentralityChart(BokehChart):
_title = 'Closeness Centrality'
_attribute = 'closeness_centrality'
def __init__(self, **kwargs):
kwargs.update({
'tooltips': [
('closeness centrality', '@' + self._attribute)
],
})
super().__init__(**kwargs)
def manipulate_data(self, graph) -> Dict:
return dict(nx.closeness_centrality(graph))
| 25.6 | 63 | 0.630859 |
b2f448445656385de2eaf744dfdc6dff0eb9055f | 1,566 | py | Python | Assignment/Week6/LinkMySQL.py | wcsodw1/Wehelp-assignments | 1225e4d8614cb89b5ed4ea8ad58411a52cacc2f8 | [
"MIT"
]
| null | null | null | Assignment/Week6/LinkMySQL.py | wcsodw1/Wehelp-assignments | 1225e4d8614cb89b5ed4ea8ad58411a52cacc2f8 | [
"MIT"
]
| null | null | null | Assignment/Week6/LinkMySQL.py | wcsodw1/Wehelp-assignments | 1225e4d8614cb89b5ed4ea8ad58411a52cacc2f8 | [
"MIT"
]
| null | null | null |
# python LinkMySQL.py
import pymysql
# 打开数据库连接
db = pymysql.connect(host='localhost',
user='root',
password='root',
database='website')
# 使用 cursor() 方法创建一个游标对象 cursor
cursor = db.cursor()
# 使用 execute() 方法执行 SQL,如果表存在则删除
cursor.execute("DROP TABLE IF EXISTS EMPLOYEE")
# 使用预处理语句创建表
sql = """CREATE TABLE MEMEBER_DATA (
NAME CHAR(20) NOT NULL,
ACCOUNT CHAR(20),
PASSWORD CHAR(20)
)"""
cursor.execute(sql)
# 关闭数据库连接
db.close()
# config = {
# 'host': '127.0.0.1',
# 'port': 3306,
# 'user': 'root',
# 'password': 'root',
# 'db': 'website',
# 'charset': 'utf8mb4',
# 'cursorclass': pymysql.cursors.DictCursor,
# }
# with connection.cursor() as cursor:
# # 執行sql語句,進行查詢
# sql = 'SHOW DATABASES'
# cursor.execute(sql)
# result = cursor.fetchall()
# print(result)
# db = pymysql.connect(host='127.0.0.1', port=3306, user='root',
# passwd='root', charset='utf8mb4')
# cursor = db.cursor()
# sql = 'SELECT VERSION()'
# cursor.execute(sql)
# data = cursor.fetchone()
# print("Database version : %s " % data)
# # sql2 = "SHOW DATABASES"
# sql2 = "SELECT * FROM member_"
# cursor.execute(sql2)
# restul = cursor.fetchall()
# print(restul)
# cursor.close()
# import mysql.connector
# from mysql.connector import Error
# maxdb = mysql.connector.connect(
# host="127.0.0.1",
# user="root",
# password="password",
# database="website",
# auth_plugin='root'
# )
# cursor = maxdb.cursor()
| 21.452055 | 64 | 0.5894 |
e67436a1651546e9c261f400a2a86b3225fdcdad | 8,930 | py | Python | fies/fiesutils.py | nespinoza/ceres | e5426067bc5855b0f690e4a51b7d6fd2a48471c6 | [
"MIT"
]
| null | null | null | fies/fiesutils.py | nespinoza/ceres | e5426067bc5855b0f690e4a51b7d6fd2a48471c6 | [
"MIT"
]
| null | null | null | fies/fiesutils.py | nespinoza/ceres | e5426067bc5855b0f690e4a51b7d6fd2a48471c6 | [
"MIT"
]
| null | null | null | import pyfits
import numpy as np
import scipy
import copy
import glob
import os
import matplotlib.pyplot as plt
import sys
from pylab import *
base = "../"
sys.path.append(base+"utils/GLOBALutils")
import GLOBALutils
def get_thar_offsets(lines_thar, order_dir='wavcals/', pref='order_', suf='.iwdat', delt_or=10, del_width=200.,binning=1):
start_or = int(.5*delt_or)
xcs = []
for ii in range(delt_or,len(lines_thar)-delt_or):
thar_order = lines_thar[ii]
xct = []
for order in range(ii-start_or,ii+start_or):
order_s = str(order)
if (order < 10):
order_s = '0' + order_s
if os.access(order_dir+pref+order_s+suf,os.F_OK):
f = open(order_dir+pref+order_s+suf,'r')
llins = f.readlines()
if True:
pixel_centers_0 = []
for line in llins:
w = line.split()
nlines = int(w[0])
for j in range(nlines):
pixel_centers_0.append(float(w[2*j+1])*1./float(binning))
pixel_centers_0 = np.array(pixel_centers_0).astype('int')
#plot(thar_order)
#plot(pixel_centers_0,thar_order[pixel_centers_0],'ro')
#print order, order_s
#show()
ml = np.array(pixel_centers_0) - 2
mh = np.array(pixel_centers_0) + 2
if len(ml)>0:
xc,offs = GLOBALutils.XCorPix( thar_order, ml, mh, del_width=del_width)
else:
xc = np.zeros(len(offs))
if len(xct) == 0:
xct = xc.copy()
else:
xct = np.vstack((xct,xc))
if len(xcs) == 0:
xcs = xct.copy()
else:
xcs += xct
maxes, maxvels = [],[]
for i in range(xcs.shape[0]):
maxes.append(xcs[i].max())
maxvels.append(offs[np.argmax(xcs[i])])
#plot(offs,xcs[i])
#show()
maxes,maxvels = np.array(maxes),np.array(maxvels)
orders_offset = -start_or + np.argmax(maxes)
rough_shift = maxvels[np.argmax(maxes)]
return orders_offset, rough_shift
def ra_from_sec(ra,time=True):
ra = float(ra)
sign = ' '
if ra < 0:
sign = '-'
ra *= -1
hh = ra/3600.
mm = (hh - int(hh))*60.
ss = (mm - int(mm))*60.
shh = str(int(hh))
smm = str(int(mm))
sss = str(np.around(ss,2))
if hh<10:
shh = '0' + shh
if mm<10:
smm = '0' + smm
if ss<10:
sss = '0' + sss
return sign + shh + ':' + smm + ':' + sss
def FileClassify(diri, log,binning=1,mode='F1', dark_corr=False):
"""
Classifies all files in a directory and writes a night log of science images
"""
# define output lists
sim_sci = []
biases = []
flats = []
ThAr_ref = []
ThAr_ref_dates = []
ThAr_co = []
ThAr_co_dates = []
ThAr_sim = []
ThAr_sim_dates = []
flat_ref_dates = []
bias_ref_dates = []
obnames = []
exptimes = []
darks = []
flats_co = []
flats_co_dates = []
sdarks = []
if dark_corr and os.access(diri+'/darks.txt',os.F_OK):
fd = open(diri+'/darks.txt','r')
ds = fd.readlines()
for dk in ds:
sdarks.append(diri+dk[:-1])
sdarks = np.array(sdarks)
f = open(log,'w')
#Do not consider the images specified in dir+badfiles.txt
bad_files = []
if os.access(diri+'bad_files.txt',os.F_OK):
bf = open(diri+'bad_files.txt')
linesbf = bf.readlines()
for line in linesbf:
bad_files.append(diri+line[:-1])
bf.close()
all_files = glob.glob(diri+"/*fits")
for archivo in all_files:
#print archivo
dump = False
for bf in bad_files:
if archivo == bf:
dump = True
break
isdark=False
for df in sdarks:
if archivo == df:
darks.append(archivo)
isdark=True
if dump == False and isdark == False:
print archivo
h = pyfits.open(archivo)
hd = pyfits.getheader(archivo)
if int(h[0].header['DETXBIN']) == binning and int(h[0].header['DETYBIN']) == binning and (mode in h[0].header['FIFMSKNM']) and h[0].header['IMAGETYP'] != 'COUNTTEST':
print archivo, h[0].header['IMAGETYP'], h[0].header['SHSTAT'], h[0].header['EXPTIME'], h[0].header['OBJECT'], h[0].header['TCSTGT'], int(h[0].header['DETYBIN'])
if h[0].header['IMAGETYP'] == 'BIAS':
biases.append(archivo)
mjd, mjd0 = mjd_fromheader2(h)
bias_ref_dates.append( mjd )
elif h[0].header['IMAGETYP'] == 'FLAT':
flats.append(archivo)
mjd, mjd0 = mjd_fromheader2(h)
flat_ref_dates.append( mjd )
if h[0].header['FICARMID'] == 6 and h[0].header['FILMP1'] == 1 and h[0].header['FILMP6']==0:
flats_co.append(archivo)
mjd, mjd0 = mjd_fromheader2(h)
flats_co_dates.append( mjd )
else:
flats.append(archivo)
mjd, mjd0 = mjd_fromheader2(h)
flat_ref_dates.append( mjd )
sc = pyfits.getdata(archivo)
#plot(sc[1000])
elif h[0].header['IMAGETYP'] == 'WAVE':
ThAr_ref.append(archivo)
mjd, mjd0 = mjd_fromheader2(h)
ThAr_ref_dates.append( mjd )
elif ((mode=='F3' or mode=='F4') and h[0].header['FICARMID'] == 6 and h[0].header['FILMP4'] == 0 and h[0].header['FILMP7']==1)\
or (mode=='F1' and h[0].header['FICARMID'] == 2 and h[0].header['FILMP4'] == 0 and h[0].header['FILMP7']==1):
ThAr_ref.append(archivo)
mjd, mjd0 = mjd_fromheader2(h)
ThAr_ref_dates.append( mjd )
elif h[0].header['FICARMID'] == 6 and h[0].header['FILMP4'] == 1 and h[0].header['FILMP7']==0:
ThAr_co.append(archivo)
mjd, mjd0 = mjd_fromheader2(h)
ThAr_co_dates.append( mjd )
elif h[0].header['FICARMID'] == 6 and h[0].header['FILMP4'] == 1 and h[0].header['FILMP7']==1:
ThAr_sim.append(archivo)
mjd, mjd0 = mjd_fromheader2(h)
ThAr_sim_dates.append( mjd )
elif (mode=='F3' and h[0].header['FICARMID'] == 2) or (mode == 'F1' and h[0].header['FICARMID'] == 5)\
or (mode=='F4' and (h[0].header['FICARMID'] == 5 or h[0].header['FICARMID'] == 4)):
sim_sci.append(archivo)
obname = h[0].header['OBJECT']
obnames.append( obname )
ra = ra_from_sec(h[0].header['RA']*3600.*24./360.)
delta = ra_from_sec(h[0].header['DEC']*3600.)
airmass= float(h[0].header['AIRMASS'])
texp = float(h[0].header['EXPTIME'])
date = h[0].header['DATE-OBS']
hour = date[11:]
date = date[:10]
exptimes.append( texp )
if h[0].header['FILMP4'] == 1:
simult = 'SIMULT'
else:
simult = 'NO_SIMULT'
line = "%-15s %10s %10s %8.2f %4.2f %8s %11s %s %s\n" % (obname, ra, delta, texp, airmass, date, hour, archivo, simult)
f.write(line)
#show()
flat_ref_dates = np.array(flat_ref_dates)
flats = np.array(flats)
IS = np.argsort(flat_ref_dates)
flat_ref_dates = flat_ref_dates[IS]
flats = flats[IS]
#for i in range(len(flats)):
# print 'flat',flats[i], flat_ref_dates[i]
bias_ref_dates = np.array(bias_ref_dates)
biases = np.array(biases)
IS = np.argsort(bias_ref_dates)
bias_ref_dates = bias_ref_dates[IS]
biases = biases[IS]
#for i in range(len(biases)):
# print 'bias',biases[i], bias_ref_dates[i]
f.close()
return biases, np.array(flats), np.array(ThAr_ref), sim_sci, np.array(ThAr_ref_dates), obnames, exptimes, np.array(darks), np.array(flats_co), np.array(flats_co_dates),np.array(ThAr_sim), np.array(ThAr_sim_dates),np.array(ThAr_co), np.array(ThAr_co_dates)
def get_darktimes(darks):
times = []
for dark in darks:
hd = pyfits.getheader(dark)
times.append(hd['EXPTIME'])
return np.unique(np.sort(np.array(times))), np.array(times)
def mjd_fromheader2(h):
"""
return modified Julian date from header
"""
datetu = h[0].header['DATE-OBS']
mjd0,mjd,i = GLOBALutils.iau_cal2jd(int(datetu[:4]),int(datetu[5:7]),int(datetu[8:10]))
ho = int(datetu[11:13])
mi = int(datetu[14:16])
se = float(datetu[17:])
ut = float(ho) + float(mi)/60.0 + float(se)/3600.0
mjd_start = mjd + ut/24.0
secinday = 24*3600.0
fraction = 0.5
texp = h[0].header['EXPTIME'] #sec
mjd = mjd_start + (fraction * texp) / secinday
return mjd, mjd0
def get_RONGAIN(hd):
return hd['RDNOISE'], hd['GAIN']
def MedianCombine(ImgList, zero='none', binning=1, oii=100, off=2148):
"""
Median combine a list of images
"""
n = len(ImgList)
if n==0:
raise ValueError("empty list provided!")
h = pyfits.open(ImgList[0])
d1 = h[1].data
h1 = h[1].header
d1 = OverscanTrim(d1, binning=binning,ii=oii, ff=off)
if zero != 'none':
z = pyfits.open(zero)[0]
d1 -= z.data
factor = 1.25
if (n < 3):
factor = 1
ron1,gain1 = get_RONGAIN(h[1].header)
ron1 = factor * ron1 / np.sqrt(n)
if n>1:
for i in range(n-1):
td = pyfits.open(ImgList[i+1])
if zero == 'none':
d1 = np.dstack((d1,OverscanTrim(td[1].data, binning=binning, ii=oii, ff=off)))
else:
d1 = np.dstack((d1,OverscanTrim(td[1].data, binning=binning, ii=oii, ff=off)-z.data))
d1 = np.median(d1,axis=2)
return d1, ron1, gain1
def OverscanTrim(dat,binning=1,ii=100,ff=2148):
"""
Overscan correct and Trim a refurbished FEROS image
"""
#ff = 2098
#ii = 50
ff = int(np.around(ff/binning))
ii = int(np.around(ii/binning))
os = dat[:,ff:]
s = np.median(os)
newdata = dat[:,ii:ff].copy() - s
return newdata | 28.259494 | 256 | 0.619709 |
9f0ddebbd166f9815a9445e766f834f3cc461f15 | 4,396 | py | Python | examples/getArch.py | kamnsv/impacket | 83a581e4ba0cb3b7ba5dfa3018b87f9bf1a2cb58 | [
"Apache-1.1"
]
| 6,612 | 2018-10-10T22:45:11.000Z | 2022-03-31T18:13:01.000Z | examples/getArch.py | anno5750/impacket | ed7082cd0bc0d951f6eefb0a98c4c1360fe1a8a2 | [
"Apache-1.1"
]
| 703 | 2018-10-11T11:38:30.000Z | 2022-03-31T14:59:22.000Z | examples/getArch.py | anno5750/impacket | ed7082cd0bc0d951f6eefb0a98c4c1360fe1a8a2 | [
"Apache-1.1"
]
| 2,172 | 2018-10-11T10:51:26.000Z | 2022-03-31T04:45:49.000Z | #!/usr/bin/env python
# Impacket - Collection of Python classes for working with network protocols.
#
# SECUREAUTH LABS. Copyright (C) 2020 SecureAuth Corporation. All rights reserved.
#
# This software is provided under a slightly modified version
# of the Apache Software License. See the accompanying LICENSE file
# for more information.
#
# Description:
# This script will connect against a target (or list of targets) machine/s and gather the OS architecture type
# installed.
# The trick has been discovered many years ago and is actually documented by Microsoft here:
# https://msdn.microsoft.com/en-us/library/cc243948.aspx#Appendix_A_53
# and doesn't require any authentication at all.
#
# Have in mind this trick will *not* work if the target system is running Samba. Don't know what happens with macOS.
#
# Author:
# beto (@agsolino)
#
# Reference for:
# RPCRT, NDR
#
from __future__ import division
from __future__ import print_function
import argparse
import logging
import sys
from impacket import version
from impacket.examples import logger
from impacket.dcerpc.v5.rpcrt import DCERPCException
from impacket.dcerpc.v5.transport import DCERPCTransportFactory
from impacket.dcerpc.v5.epm import MSRPC_UUID_PORTMAP
class TARGETARCH:
def __init__(self, options):
self.__machinesList = list()
self.__options = options
self.NDR64Syntax = ('71710533-BEBA-4937-8319-B5DBEF9CCC36', '1.0')
def run(self):
if self.__options.targets is not None:
for line in self.__options.targets.readlines():
self.__machinesList.append(line.strip(' \r\n'))
else:
self.__machinesList.append(self.__options.target)
logging.info('Gathering OS architecture for %d machines' % len(self.__machinesList))
logging.info('Socket connect timeout set to %s secs' % self.__options.timeout)
for machine in self.__machinesList:
try:
stringBinding = r'ncacn_ip_tcp:%s[135]' % machine
transport = DCERPCTransportFactory(stringBinding)
transport.set_connect_timeout(int(self.__options.timeout))
dce = transport.get_dce_rpc()
dce.connect()
try:
dce.bind(MSRPC_UUID_PORTMAP, transfer_syntax=self.NDR64Syntax)
except DCERPCException as e:
if str(e).find('syntaxes_not_supported') >= 0:
print('%s is 32-bit' % machine)
else:
logging.error(str(e))
pass
else:
print('%s is 64-bit' % machine)
dce.disconnect()
except Exception as e:
#import traceback
#traceback.print_exc()
logging.error('%s: %s' % (machine, str(e)))
# Process command-line arguments.
if __name__ == '__main__':
# Init the example's logger theme
logger.init()
print(version.BANNER)
parser = argparse.ArgumentParser(add_help = True, description = "Gets the target system's OS architecture version")
parser.add_argument('-target', action='store', help='<targetName or address>')
parser.add_argument('-targets', type=argparse.FileType('r'), help='input file with targets system to query Arch '
'from (one per line). ')
parser.add_argument('-timeout', action='store', default='2', help='socket timeout out when connecting to the target (default 2 sec)')
parser.add_argument('-debug', action='store_true', help='Turn DEBUG output ON')
if len(sys.argv)==1:
parser.print_help()
sys.exit(1)
options = parser.parse_args()
if options.target is None and options.targets is None:
logging.error('You have to specify a target!')
sys.exit(1)
if options.debug is True:
logging.getLogger().setLevel(logging.DEBUG)
# Print the Library's installation path
logging.debug(version.getInstallationPath())
else:
logging.getLogger().setLevel(logging.INFO)
try:
getArch = TARGETARCH(options)
getArch.run()
except (Exception, KeyboardInterrupt) as e:
if logging.getLogger().level == logging.DEBUG:
import traceback
traceback.print_exc()
logging.error(str(e))
sys.exit(0)
| 37.254237 | 137 | 0.648544 |
821a9f891dacf7a50c44e62891cfc79598d34d3b | 13,350 | py | Python | tchannel/tchannel.py | islavov/tchannel-python | ca3da9b4e0367d2e00078b158ab9e0bb4f328619 | [
"MIT"
]
| 98 | 2015-07-10T23:42:11.000Z | 2021-11-08T11:21:02.000Z | tchannel/tchannel.py | islavov/tchannel-python | ca3da9b4e0367d2e00078b158ab9e0bb4f328619 | [
"MIT"
]
| 445 | 2015-07-10T23:58:02.000Z | 2021-08-24T14:58:39.000Z | tchannel/tchannel.py | islavov/tchannel-python | ca3da9b4e0367d2e00078b158ab9e0bb4f328619 | [
"MIT"
]
| 43 | 2015-07-22T19:14:57.000Z | 2021-09-14T12:12:38.000Z | # Copyright (c) 2016 Uber Technologies, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from __future__ import (
absolute_import, division, print_function, unicode_literals
)
import json
import logging
from threading import Lock
from tornado import gen
from . import schemes
from . import transport
from . import retry
from . import tracing
from .errors import AlreadyListeningError, ServiceNameIsRequiredError
from .glossary import DEFAULT_TIMEOUT
from .health import health
from .health import Meta
from .response import Response, TransportHeaders
from .tornado import TChannel as DeprecatedTChannel
from .tornado.dispatch import RequestDispatcher as DeprecatedDispatcher
from .tracing import TracingContextProvider
log = logging.getLogger('tchannel')
__all__ = ['TChannel']
class TChannel(object):
"""Manages connections and requests to other TChannel services.
Usage for a JSON client/server:
.. code:: python
tchannel = TChannel(name='foo')
@tchannel.json.register
def handler(request):
return {'foo': 'bar'}
response = yield tchannel.json(
service='some-service',
endpoint='endpoint',
headers={'req': 'headers'},
body={'req': 'body'},
)
:cvar thrift:
Make Thrift requests over TChannel and register Thrift handlers.
:vartype thrift: ThriftArgScheme
:cvar json:
Make JSON requests over TChannel and register JSON handlers.
:vartype json: JsonArgScheme
:cvar raw:
Make requests and register handles that pass raw bytes.
:vartype raw: RawArgScheme
"""
FALLBACK = DeprecatedTChannel.FALLBACK
def __init__(self, name, hostport=None, process_name=None,
known_peers=None, trace=True, reuse_port=False,
context_provider=None, tracer=None):
"""
**Note:** In general only one ``TChannel`` instance should be used at a
time. Multiple ``TChannel`` instances are not advisable and could
result in undefined behavior.
:param string name:
How this application identifies itself. This is the name callers
will use to make contact, it is also what your downstream services
will see in their metrics.
:param string hostport:
An optional host/port to serve on, e.g., ``"127.0.0.1:5555``. If
not provided an ephemeral port will be used. When advertising on
Hyperbahn you callers do not need to know your port.
"""
if not name:
raise ServiceNameIsRequiredError
self.context_provider = context_provider or TracingContextProvider()
# until we move everything here,
# lets compose the old tchannel
self._dep_tchannel = DeprecatedTChannel(
name=name,
hostport=hostport,
process_name=process_name,
known_peers=known_peers,
trace=trace,
tracer=tracer,
dispatcher=DeprecatedDispatcher(_handler_returns_response=True),
reuse_port=reuse_port,
_from_new_api=True,
context_provider_fn=lambda: self.context_provider,
)
self.name = name
# set arg schemes
self.raw = schemes.RawArgScheme(self)
self.json = schemes.JsonArgScheme(self)
self.thrift = schemes.ThriftArgScheme(self)
self._listen_lock = Lock()
# register default health endpoint
self.thrift.register(Meta)(health)
# advertise_response is the Future containing the response of calling
# advertise().
self._advertise_response = None
self._advertise_lock = Lock()
tracing.api_check(tracer=tracer)
def is_listening(self):
return self._dep_tchannel.is_listening()
@property
def hooks(self):
return self._dep_tchannel.hooks
@property
def tracer(self):
return self._dep_tchannel.tracer
@gen.coroutine
def call(
self,
scheme,
service,
arg1,
arg2=None,
arg3=None,
timeout=None,
retry_on=None,
retry_limit=None,
routing_delegate=None,
hostport=None,
shard_key=None,
tracing_span=None,
trace=None, # to trace or not, defaults to self._dep_tchannel.trace
caller_name=None,
):
"""Make low-level requests to TChannel services.
**Note:** Usually you would interact with a higher-level arg scheme
like :py:class:`tchannel.schemes.JsonArgScheme` or
:py:class:`tchannel.schemes.ThriftArgScheme`.
"""
# TODO - don't use asserts for public API
assert format, "format is required"
assert service, "service is required"
assert arg1, "arg1 is required"
# default args
if arg2 is None:
arg2 = ""
if arg3 is None:
arg3 = ""
if timeout is None:
timeout = DEFAULT_TIMEOUT
if retry_on is None:
retry_on = retry.DEFAULT
if retry_limit is None:
retry_limit = retry.DEFAULT_RETRY_LIMIT
# TODO - allow filters/steps for serialization, tracing, etc...
tracing.apply_trace_flag(tracing_span, trace, self._dep_tchannel.trace)
# calls tchannel.tornado.peer.PeerClientOperation.__init__
operation = self._dep_tchannel.request(
service=service,
hostport=hostport,
arg_scheme=scheme,
retry=retry_on,
tracing_span=tracing_span
)
# fire operation
transport_headers = {
transport.SCHEME: scheme,
transport.CALLER_NAME: caller_name or self.name,
}
if shard_key:
transport_headers[transport.SHARD_KEY] = shard_key
if routing_delegate:
transport_headers[transport.ROUTING_DELEGATE] = routing_delegate
response = yield operation.send(
arg1=arg1,
arg2=arg2,
arg3=arg3,
headers=transport_headers,
retry_limit=retry_limit,
ttl=timeout,
)
# unwrap response
body = yield response.get_body()
headers = yield response.get_header()
t = TransportHeaders.from_dict(response.headers)
result = Response(
body=body,
headers=headers,
transport=t,
status=response.code,
)
raise gen.Return(result)
def listen(self, port=None):
with self._listen_lock:
if self._dep_tchannel.is_listening():
listening_port = int(self.hostport.rsplit(":")[1])
if port and port != listening_port:
raise AlreadyListeningError(
"TChannel server is already listening on port: %d"
% listening_port
)
else:
return
return self._dep_tchannel.listen(port)
@property
def host(self):
return self._dep_tchannel.host
@property
def hostport(self):
return self._dep_tchannel.hostport
@property
def port(self):
return self._dep_tchannel.port
def is_closed(self):
return self._dep_tchannel.closed
def close(self):
return self._dep_tchannel.close()
def register(self, scheme, endpoint=None, handler=None, **kwargs):
if scheme is self.FALLBACK:
# scheme is not required for fallback endpoints
endpoint = scheme
scheme = None
def decorator(fn):
# assert handler is None, "can't handler when using as decorator"
if endpoint is None:
e = fn.__name__
else:
e = endpoint
return self._dep_tchannel.register(
endpoint=e,
scheme=scheme,
handler=fn,
**kwargs
)
if handler is None:
return decorator
else:
return decorator(handler)
def advertise(self, routers=None, name=None, timeout=None,
router_file=None, jitter=None):
"""Advertise with Hyperbahn.
After a successful advertisement, Hyperbahn will establish long-lived
connections with your application. These connections are used to load
balance inbound and outbound requests to other applications on the
Hyperbahn network.
Re-advertisement happens periodically after calling this method (every
minute). Hyperbahn will eject us from the network if it doesn't get a
re-advertise from us after 5 minutes.
This function may be called multiple times if it fails. If it
succeeds, all consecutive calls are ignored.
:param list routers:
A seed list of known Hyperbahn addresses to attempt contact with.
Entries should be of the form ``"host:port"``.
:param string name:
The name your application identifies itself as. This is usually
unneeded because in the common case it will match the ``name`` you
initialized the ``TChannel`` instance with. This is the identifier
other services will use to make contact with you.
:param timeout:
The timeout (in sec) for the initial advertise attempt.
Defaults to 30 seconds.
:param jitter:
Variance allowed in the interval per request. Defaults to 5
seconds. The jitter applies to the initial advertise request as
well.
:param router_file:
The host file that contains the routers information. The file
should contain a JSON stringified format of the routers parameter.
Either routers or router_file should be provided. If both provided,
a ValueError will be raised.
:returns:
A future that resolves to the remote server's response after the
first advertise finishes.
:raises TimeoutError:
When unable to make our first advertise request to Hyperbahn.
Subsequent requests may fail but will be ignored.
"""
if routers is not None and router_file is not None:
raise ValueError(
'Only one of routers and router_file can be provided.')
if routers is None and router_file is not None:
# should just let the exceptions fly
try:
with open(router_file, 'r') as json_data:
routers = json.load(json_data)
except (IOError, OSError, ValueError):
log.exception('Failed to read seed routers list.')
raise
@gen.coroutine
def _advertise():
result = yield self._dep_tchannel.advertise(
routers=routers,
name=name,
timeout=timeout,
)
body = yield result.get_body()
headers = yield result.get_header()
response = Response(json.loads(body.decode('utf8')), headers or {})
raise gen.Return(response)
def _on_advertise(future):
if not future.exception():
return
# If the request failed, clear the response so that we can try
# again.
with self._advertise_lock:
# `is` comparison to ensure we're not deleting another Future.
if self._advertise_response is future:
self._advertise_response = None
with self._advertise_lock:
if self._advertise_response is not None:
return self._advertise_response
future = self._advertise_response = _advertise()
# We call add_done_callback here rather than when we call _advertise()
# because if the future has already resolved by the time we call
# add_done_callback, the callback will immediately be executed. The
# callback will try to acquire the advertise_lock which we already
# hold and end up in a deadlock.
future.add_done_callback(_on_advertise)
return future
| 34.056122 | 79 | 0.625243 |
1ec417d31df43492b706f9cf0a742f874e8822eb | 653 | bzl | Python | rules/jvm/private/label.bzl | tjarvstrand/rules_scala | ff423d8bdd0e5383f8f2c048ffd7704bb51a91bf | [
"Apache-2.0"
]
| 53 | 2019-01-07T23:15:32.000Z | 2021-09-24T00:27:40.000Z | rules/jvm/private/label.bzl | tjarvstrand/rules_scala | ff423d8bdd0e5383f8f2c048ffd7704bb51a91bf | [
"Apache-2.0"
]
| 101 | 2019-01-05T04:52:40.000Z | 2021-01-29T16:48:58.000Z | rules/jvm/private/label.bzl | tjarvstrand/rules_scala | ff423d8bdd0e5383f8f2c048ffd7704bb51a91bf | [
"Apache-2.0"
]
| 24 | 2019-01-23T07:54:28.000Z | 2022-02-10T19:42:07.000Z | load("@rules_scala_annex//rules:providers.bzl", "LabeledJars")
def labeled_jars_implementation(target, ctx):
if JavaInfo not in target:
return []
deps_labeled_jars = [dep[LabeledJars] for dep in getattr(ctx.rule.attr, "deps", []) if LabeledJars in dep]
java_info = target[JavaInfo]
return [
LabeledJars(
values = depset(
[struct(label = ctx.label, jars = depset(transitive = [java_info.compile_jars, java_info.full_compile_jars]))],
order = "preorder",
transitive = [labeled_jars.values for labeled_jars in deps_labeled_jars],
),
),
]
| 36.277778 | 127 | 0.623277 |
f955058a256de3e402aec4fa22a9f344a9ac4b04 | 11,961 | py | Python | tests/v1/test_actions.py | tfuji384/atodeyomu | 8a5e61a593d099726131e060a33b8801056bc286 | [
"MIT"
]
| null | null | null | tests/v1/test_actions.py | tfuji384/atodeyomu | 8a5e61a593d099726131e060a33b8801056bc286 | [
"MIT"
]
| null | null | null | tests/v1/test_actions.py | tfuji384/atodeyomu | 8a5e61a593d099726131e060a33b8801056bc286 | [
"MIT"
]
| null | null | null | import json
from http import HTTPStatus
from unittest import mock
from fastapi.testclient import TestClient
from moto import mock_dynamodb2
from app import app, models
from tests.factories import get_object
client = TestClient(app)
def test_redirect_to_view_submission():
payload = {
'type': 'view_submission',
'team': {
'id': 'T0000000000',
'domain': 'team_domain'
},
'user': {
'id': 'U00XXXXXXX',
'team_id': 'T0000000000'
},
}
data = {'payload': json.dumps(payload)}
res = client.post('/v1/actions/', data=data, allow_redirects=False)
assert res.status_code == HTTPStatus.TEMPORARY_REDIRECT
assert res.headers['location'] == './view_submission/'
def test_redirect_to_shortcuts():
payload = {
'type': 'shortcut',
'team': {
'id': 'T0000000000',
'domain': 'team_domain'
},
'user': {
'id': 'U00XXXXXXX',
'team_id': 'T0000000000'
},
}
data = {'payload': json.dumps(payload)}
res = client.post('/v1/actions/', data=data, allow_redirects=False)
assert res.status_code == HTTPStatus.TEMPORARY_REDIRECT
assert res.headers['location'] == './shortcuts/'
def test_redirect_to_message():
payload = {
'type': 'block_actions',
'team': {
'id': 'T0000000000',
'domain': 'team_domain'
},
'container': {
'type': 'message'
},
'user': {
'id': 'U00XXXXXXX',
'team_id': 'T0000000000'
},
}
data = {'payload': json.dumps(payload)}
res = client.post('/v1/actions/', data=data, allow_redirects=False)
assert res.status_code == HTTPStatus.TEMPORARY_REDIRECT
assert res.headers['location'] == './message/'
@mock.patch('app.models.TeamConf.get', mock.Mock(side_effect=models.TeamConf.DoesNotExist))
def test_shortcut_teamconf_doesnot_exists():
payload = {
'type': 'shortcut',
'team': {
'id': 'T0000000000',
'domain': 'team_domain'
},
'user': {
'id': 'U00XXXXXXX',
'team_id': 'T0000000000'
},
}
data = {'payload': json.dumps(payload)}
res = client.post('/v1/actions/shortcuts/', data=data)
assert res.status_code == HTTPStatus.BAD_REQUEST
@mock_dynamodb2
@mock.patch('slack_sdk.web.base_client.BaseClient.api_call', lambda x, *y, **z:...)
def test_shortcut_add_emoji_set():
models.TeamConf.create_table()
models.TeamConf('T0000000000', access_token='access_token').save()
payload = {
'type': 'shortcut',
'team': {
'id': 'T0000000000',
'domain': 'team_domain'
},
'user': {
'id': 'U00XXXXXXX',
'team_id': 'T0000000000'
},
'callback_id': 'edit_emoji_set',
'trigger_id': 'trigger_id',
}
data = {'payload': json.dumps(payload)}
res = client.post('/v1/actions/shortcuts/', data=data)
assert res.status_code == HTTPStatus.OK
@mock.patch('app.models.TeamConf.get', get_object)
@mock.patch('slack_sdk.web.base_client.BaseClient.api_call', lambda x, *y, **z:...)
def test_shortcut_edit_emoji_set():
payload = {
'type': 'shortcut',
'team': {
'id': 'T0000000000',
'domain': 'team_domain'
},
'user': {
'id': 'U00XXXXXXX',
'team_id': 'T0000000000'
},
'callback_id': 'edit_emoji_set',
'trigger_id': 'trigger_id',
}
data = {'payload': json.dumps(payload)}
res = client.post('/v1/actions/shortcuts/', data=data)
assert res.status_code == HTTPStatus.OK
@mock.patch('app.models.TeamConf.get', get_object)
@mock.patch('slack_sdk.web.base_client.BaseClient.api_call', lambda x, *y, **z:...)
def test_shortcut_edit_emoji_set():
payload = {
'type': 'shortcut',
'team': {
'id': 'T0000000000',
'domain': 'team_domain'
},
'user': {
'id': 'U00XXXXXXX',
'team_id': 'T0000000000'
},
'callback_id': 'edit_emoji_set',
'trigger_id': 'trigger_id',
}
data = {'payload': json.dumps(payload)}
res = client.post('/v1/actions/shortcuts/', data=data)
assert res.status_code == HTTPStatus.OK
@mock.patch('app.models.TeamConf.get', mock.Mock(side_effect=models.TeamConf.DoesNotExist))
def test_message_team_conf_does_not_exist():
payload = {
'type': 'block_actions',
'team': {
'id': 'T0000000000',
'domain': 'team_domain'
},
'user': {
'id': 'U00XXXXXXX',
'team_id': 'T0000000000'
},
'actions': [{
'action_id': 'mark_as_read',
'value': 'mark_as_read',
'type': 'button',
'action_ts': '1629922346.043279'
}],
'container': {
'type': 'message',
'message_ts': '1629922334.000700',
'channel_id': 'channel_id',
}
}
data = {'payload': json.dumps(payload)}
res = client.post('/v1/actions/message/', data=data)
assert res.status_code == HTTPStatus.BAD_REQUEST
@mock.patch('app.models.TeamConf.get', get_object)
@mock.patch('slack_sdk.web.base_client.BaseClient.api_call', lambda x, *y, **z:...)
def test_message():
payload = {
'type': 'block_actions',
'team': {
'id': 'T0000000000',
'domain': 'team_domain'
},
'user': {
'id': 'U00XXXXXXX',
'team_id': 'T0000000000'
},
'actions': [{
'action_id': 'mark_as_read',
'value': 'mark_as_read',
'type': 'button',
'action_ts': '1629922346.043279'
}],
'container': {
'type': 'message',
'message_ts': '1629922334.000700',
'channel_id': 'channel_id',
}
}
data = {'payload': json.dumps(payload)}
res = client.post('/v1/actions/message/', data=data)
assert res.status_code == HTTPStatus.OK
@mock.patch('app.models.TeamConf.get', mock.Mock(side_effect=models.TeamConf.DoesNotExist))
def test_view_submission_team_conf_does_not_exist():
payload = {
'type': 'view_submission',
'team': {
'id': 'T0000000000',
'domain': 'team_domain'
},
'user': {
'id': 'U00XXXXXXX',
'team_id': 'T0000000000'
},
'view': {
'callback_id': 'edit_emoji_set',
'state': {
'values': {
'emoji': {
'emoji': {
'type': 'plain_text_input',
'value': 'example'
}
}
}
},
}
}
data = {'payload': json.dumps(payload)}
res = client.post('/v1/actions/view_submission/', data=data)
assert res.status_code == HTTPStatus.BAD_REQUEST
@mock_dynamodb2
@mock.patch('slack_sdk.WebClient.emoji_list', lambda x: {'emoji': {'example': 'https://emoji.com/example.png'}})
def test_view_submission_add_emoji():
models.TeamConf.create_table()
team_conf = models.TeamConf('T0000000000', access_token='access_token')
team_conf.save()
payload = {
'type': 'view_submission',
'team': {
'id': 'T0000000000',
'domain': 'team_domain'
},
'user': {
'id': 'U00XXXXXXX',
'team_id': 'T0000000000'
},
'view': {
'callback_id': 'edit_emoji_set',
'state': {
'values': {
'emoji_0': {
'emoji_0': {
'type': 'plain_text_input',
'value': ':example:'
}
},
'emoji_1': {
'emoji_1': {
'type': 'plain_text_input',
'value': None
}
},
'emoji_2': {
'emoji_2': {
'type': 'plain_text_input',
'value': None
}
}
}
},
}
}
data = {'payload': json.dumps(payload)}
res = client.post('/v1/actions/view_submission/', data=data)
team_conf.refresh()
assert len(team_conf.emoji_set) == 1
assert 'example' in team_conf.emoji_set
assert res.status_code == HTTPStatus.OK
@mock_dynamodb2
@mock.patch('slack_sdk.WebClient.emoji_list', lambda x: {'emoji': {}})
def test_view_submission_add_unregistered_emoji():
"""存在しないemojiを入力したケース
"""
models.TeamConf.create_table()
team_conf = models.TeamConf('T0000000000', access_token='access_token')
team_conf.save()
payload = {
'type': 'view_submission',
'team': {
'id': 'T0000000000',
'domain': 'team_domain'
},
'user': {
'id': 'U00XXXXXXX',
'team_id': 'T0000000000'
},
'view': {
'callback_id': 'edit_emoji_set',
'state': {
'values': {
'emoji_0': {
'emoji_0': {
'type': 'plain_text_input',
'value': 'example'
}
},
'emoji_1': {
'emoji_1': {
'type': 'plain_text_input',
'value': None
}
},
'emoji_2': {
'emoji_2': {
'type': 'plain_text_input',
'value': None
}
}
}
},
}
}
data = {'payload': json.dumps(payload)}
res = client.post('/v1/actions/view_submission/', data=data)
team_conf.refresh()
assert team_conf.emoji_set is None
assert res.status_code == HTTPStatus.OK
assert res.json()['errors']['emoji_0'] == '登録されていないemojiです'
@mock_dynamodb2
@mock.patch('slack_sdk.WebClient.emoji_list', lambda x: {'emoji': {}})
def test_view_submission_remove_emoji():
"""emojiを削除"""
models.TeamConf.create_table()
team_conf = models.TeamConf('T0000000000', access_token='access_token', emoji_set={'example'})
team_conf.save()
payload = {
'type': 'view_submission',
'team': {
'id': 'T0000000000',
'domain': 'team_domain'
},
'user': {
'id': 'U00XXXXXXX',
'team_id': 'T0000000000'
},
'view': {
'callback_id': 'edit_emoji_set',
'state': {
'values': {
'emoji_0': {
'emoji_0': {
'type': 'plain_text_input',
'value': None
}
},
'emoji_1': {
'emoji_1': {
'type': 'plain_text_input',
'value': None
}
},
'emoji_2': {
'emoji_2': {
'type': 'plain_text_input',
'value': None
}
}
}
}
}
}
data = {'payload': json.dumps(payload)}
res = client.post('/v1/actions/view_submission/', data=data)
team_conf.refresh()
assert res.status_code == HTTPStatus.OK
assert team_conf.emoji_set is None
| 30.357868 | 112 | 0.484491 |
c1508cc2a1134d35a365925048d460eef69170ca | 4,369 | py | Python | code/cmt/python/tests/glm_test.py | lucastheis/cmt | 39cf7a341279a3700ca30909a2b254ae86d0c273 | [
"MIT"
]
| 33 | 2015-06-15T16:52:23.000Z | 2019-11-26T19:49:36.000Z | code/cmt/python/tests/glm_test.py | lucastheis/cmt | 39cf7a341279a3700ca30909a2b254ae86d0c273 | [
"MIT"
]
| 35 | 2015-03-17T09:27:32.000Z | 2022-01-13T00:44:05.000Z | code/cmt/python/tests/glm_test.py | lucastheis/cmt | 39cf7a341279a3700ca30909a2b254ae86d0c273 | [
"MIT"
]
| 17 | 2015-01-20T10:39:59.000Z | 2020-12-16T10:24:13.000Z | import sys
import unittest
from pickle import dump, load
from tempfile import mkstemp
from numpy import *
from numpy import max
from numpy.linalg import inv
from numpy.random import randn, rand
from cmt.models import Bernoulli, GLM
from cmt.nonlinear import LogisticFunction, BlobNonlinearity
class Tests(unittest.TestCase):
def test_glm_basics(self):
glm = GLM(4, LogisticFunction, Bernoulli)
x = randn(1000)
f = glm.nonlinearity
y = f(x).ravel()
for i in range(x.size):
self.assertAlmostEqual(y[i], 1. / (1. + exp(-x[i])))
glm.nonlinearity = f
y = glm.nonlinearity(x).ravel()
for i in range(x.size):
self.assertAlmostEqual(y[i], 1. / (1. + exp(-x[i])))
b = Bernoulli()
glm = GLM(4, f, b)
glm.nonlinearity = f
y = glm.nonlinearity(x).ravel()
for i in range(x.size):
self.assertAlmostEqual(y[i], 1. / (1. + exp(-x[i])))
self.assertTrue(isinstance(glm.distribution, Bernoulli))
# test wrong order of arguments
self.assertRaises(TypeError, lambda: GLM(5, Bernoulli, LogisticFunction))
def test_glm_train(self):
w = asarray([[-1., 0., 1., 2.]]).T
b = 1.
x = randn(4, 100000)
p = 1. / (1. + exp(-dot(w.T, x) - b))
y = rand(*p.shape) < p
glm = GLM(4, LogisticFunction, Bernoulli)
# test gradient
err = glm._check_gradient(x, y, 1e-5, parameters={
'train_weights': False,
'train_bias': True})
self.assertLess(err, 1e-8)
err = glm._check_gradient(x, y, 1e-5, parameters={
'train_weights': True,
'train_bias': False})
self.assertLess(err, 1e-8)
err = glm._check_gradient(x, y, 1e-5)
self.assertLess(err, 1e-8)
err = glm._check_gradient(x, y, 1e-5, parameters={
'regularize_weights': 10.,
'regularize_bias': 10.})
self.assertLess(err, 1e-8)
# test training
glm.train(x, y, parameters={'verbosity': 0})
self.assertLess(max(abs(glm.weights - w)), 0.1)
self.assertLess(max(abs(glm.bias - b)), 0.1)
glm.weights = w
glm.bias = -1.
glm.train(x, y, parameters={'verbosity': 0, 'train_weights': False})
self.assertLess(max(abs(glm.weights - w)), 1e-12)
self.assertLess(max(abs(glm.bias - b)), 0.1)
glm.weights = randn(*glm.weights.shape)
glm.bias = b
glm.train(x, y, parameters={'verbosity': 0, 'train_bias': False})
self.assertLess(max(abs(glm.weights - w)), 0.1)
self.assertLess(max(abs(glm.bias - b)), 1e-12)
def test_glm_fisher_information(self):
N = 1000
T = 100
glm = GLM(3)
glm.weights = randn(glm.dim_in, 1)
glm.bias = -2.
inputs = randn(glm.dim_in, N)
outputs = glm.sample(inputs)
x = glm._parameters()
I = glm._fisher_information(inputs, outputs)
x_mle = []
# repeated maximum likelihood estimation
for t in range(T):
inputs = randn(glm.dim_in, N)
outputs = glm.sample(inputs)
# initialize at true parameters for fast convergence
glm_ = GLM(glm.dim_in)
glm_.weights = glm.weights
glm_.bias = glm.bias
glm_.train(inputs, outputs)
x_mle.append(glm_._parameters())
C = cov(hstack(x_mle), ddof=1)
# inv(I) should be sufficiently close to C
self.assertLess(max(abs(inv(I) - C) / (abs(C) + .1)), max(abs(C) / (abs(C) + .1)) / 2.)
def test_glm_data_gradient(self):
glm = GLM(7, LogisticFunction, Bernoulli)
x = randn(glm.dim_in, 100)
y = glm.sample(x)
dx, _, ll = glm._data_gradient(x, y)
h = 1e-7
# compute numerical gradient
dx_ = zeros_like(dx)
for i in range(glm.dim_in):
x_p = x.copy()
x_m = x.copy()
x_p[i] += h
x_m[i] -= h
dx_[i] = (
glm.loglikelihood(x_p, y) -
glm.loglikelihood(x_m, y)) / (2. * h)
self.assertLess(max(abs(ll - glm.loglikelihood(x, y))), 1e-8)
self.assertLess(max(abs(dx_ - dx)), 1e-7)
def test_glm_pickle(self):
tmp_file = mkstemp()[1]
model0 = GLM(5, BlobNonlinearity, Bernoulli)
model0.weights = randn(*model0.weights.shape)
model0.bias = randn()
# store model
with open(tmp_file, 'wb') as handle:
dump({'model': model0}, handle)
# load model
with open(tmp_file, 'rb') as handle:
model1 = load(handle)['model']
# make sure parameters haven't changed
self.assertLess(max(abs(model0.bias - model1.bias)), 1e-20)
self.assertLess(max(abs(model0.weights - model1.weights)), 1e-20)
x = randn(model0.dim_in, 100)
y = model0.sample(x)
self.assertEqual(
model0.evaluate(x, y),
model1.evaluate(x, y))
if __name__ == '__main__':
unittest.main()
| 22.755208 | 89 | 0.651637 |
b0a045f5932d32e2af57aa53fe2d0e0efd11f087 | 935 | py | Python | tests/tools/perf/kvtable/test_dict2kvtable.py | michalbiesek/rpma | 100bed7bebc57ba1164f9b786acd4ea9ba0b688e | [
"BSD-3-Clause"
]
| 83 | 2020-02-18T13:19:10.000Z | 2022-03-29T08:22:28.000Z | tests/tools/perf/kvtable/test_dict2kvtable.py | michalbiesek/rpma | 100bed7bebc57ba1164f9b786acd4ea9ba0b688e | [
"BSD-3-Clause"
]
| 603 | 2020-02-13T10:45:28.000Z | 2022-03-31T11:13:17.000Z | tests/tools/perf/kvtable/test_dict2kvtable.py | michalbiesek/rpma | 100bed7bebc57ba1164f9b786acd4ea9ba0b688e | [
"BSD-3-Clause"
]
| 51 | 2020-02-13T09:11:15.000Z | 2022-03-29T08:22:31.000Z | #!/usr/bin/env python3
#
# SPDX-License-Identifier: BSD-3-Clause
# Copyright 2021, Intel Corporation
#
"""test_dict2kvtable.py -- lib.kvtable.dict2kvtable() tests"""
import lib.kvtable
HEADER = '<tr><th>Item</th><th>Description</th></tr></thead><tbody>'
START = '<table><thead>'
STOP = '</tbody></table>'
EMPTY = START + HEADER + STOP
SIMPLE_RANGE = 10
SIMPLE = START + HEADER \
+ "".join([
'<tr><td>{}</td><td><pre>{}</pre></td></tr>'.format(k, k + 1)
for k in range(SIMPLE_RANGE)]) \
+ STOP
EMPTY_KVTABLE = {"type": "kvtable"}
def test_empty_empty():
"""produce an empty table"""
output = lib.kvtable.dict2kvtable(EMPTY_KVTABLE, {})
assert(output == EMPTY)
def test_simple():
"""produce a simple table n -> (n + 1)"""
kvt = {str(k): str(k + 1) for k in range(SIMPLE_RANGE)}
kvt["type"] = "kvtable"
output = lib.kvtable.dict2kvtable(kvt, {})
assert(output == SIMPLE)
| 25.972222 | 69 | 0.612834 |
4593aa22a82f7cf0a2cdf1f486867cbe7d2f68a3 | 556 | py | Python | gryphon/data_service/pollers/orderbook/bitstamp_btc_eur_orderbook_poller.py | qiquanzhijia/gryphon | 7bb2c646e638212bd1352feb1b5d21536a5b918d | [
"Apache-2.0"
]
| 1,109 | 2019-06-20T19:23:27.000Z | 2022-03-20T14:03:43.000Z | gryphon/data_service/pollers/orderbook/bitstamp_btc_eur_orderbook_poller.py | qiquanzhijia/gryphon | 7bb2c646e638212bd1352feb1b5d21536a5b918d | [
"Apache-2.0"
]
| 63 | 2019-06-21T05:36:17.000Z | 2021-05-26T21:08:15.000Z | gryphon/data_service/pollers/orderbook/bitstamp_btc_eur_orderbook_poller.py | qiquanzhijia/gryphon | 7bb2c646e638212bd1352feb1b5d21536a5b918d | [
"Apache-2.0"
]
| 181 | 2019-06-20T19:42:05.000Z | 2022-03-21T13:05:13.000Z | from gryphon.data_service.pollers.orderbook.orderbook_poller import OrderbookPoller
class BitstampBTCEUROrderbook(OrderbookPoller):
def __init__(self):
self.exchange_name = u'BITSTAMP_BTC_EUR'
self.url = 'https://www.bitstamp.net/api/v2/order_book/btceur/'
self.poll_time = 1
# API response format:
# {
# "asks":[["0.09022887",1704.11453071]],
# "bids":[["0.09000011",3.74072284]],
# "isFrozen":"0","seq":477056211,
# }
def parse_order(self, order):
return [order[0], order[1], '']
| 29.263158 | 83 | 0.638489 |
13a870a656ab218338034f8183320b874dfe75f3 | 7,358 | py | Python | nets/triplet_loss_all.py | avagait/gaitcopy | 2fee760156b289ef12f19fb366fb62cf535c305e | [
"MIT"
]
| 1 | 2022-01-25T17:32:47.000Z | 2022-01-25T17:32:47.000Z | nets/triplet_loss_all.py | avagait/gaitcopy | 2fee760156b289ef12f19fb366fb62cf535c305e | [
"MIT"
]
| null | null | null | nets/triplet_loss_all.py | avagait/gaitcopy | 2fee760156b289ef12f19fb366fb62cf535c305e | [
"MIT"
]
| null | null | null | import tensorflow as tf
from tensorflow_addons.utils.types import FloatTensorLike, TensorLike
from typeguard import typechecked
from typing import Optional
def _pairwise_distances(embeddings, squared=False):
"""Compute the 2D matrix of distances between all the embeddings.
Args:
embeddings: tensor of shape (batch_size, embed_dim)
squared: Boolean. If true, output is the pairwise squared euclidean distance matrix.
If false, output is the pairwise euclidean distance matrix.
Returns:
pairwise_distances: tensor of shape (batch_size, batch_size)
"""
# Get the dot product between all embeddings
# shape (batch_size, batch_size)
dot_product = tf.linalg.matmul(embeddings, tf.transpose(embeddings))
# Get squared L2 norm for each embedding. We can just take the diagonal of `dot_product`.
# This also provides more numerical stability (the diagonal of the result will be exactly 0).
# shape (batch_size,)
square_norm = tf.linalg.diag_part(dot_product)
# Compute the pairwise distance matrix as we have:
# ||a - b||^2 = ||a||^2 - 2 <a, b> + ||b||^2
# shape (batch_size, batch_size)
distances = tf.expand_dims(square_norm, 1) - 2.0 * dot_product + tf.expand_dims(square_norm, 0)
# Because of computation errors, some distances might be negative so we put everything >= 0.0
distances = tf.math.maximum(distances, 0.0)
if not squared:
# Because the gradient of sqrt is infinite when distances == 0.0 (ex: on the diagonal)
# we need to add a small epsilon where distances == 0.0
mask = tf.cast(tf.equal(distances, 0.0), dtype=tf.dtypes.float32)
distances = distances + mask * 1e-16
distances = tf.sqrt(distances)
# Correct the epsilon added: set the distances on the mask to be exactly 0.0
distances = distances * (1.0 - mask)
return distances
def _get_triplet_mask(labels):
"""Return a 3D mask where mask[a, p, n] is True iff the triplet (a, p, n) is valid.
A triplet (i, j, k) is valid if:
- i, j, k are distinct
- labels[i] == labels[j] and labels[i] != labels[k]
Args:
labels: tf.int32 `Tensor` with shape [batch_size]
"""
# Check that i, j and k are distinct
indices_equal = tf.cast(tf.eye(tf.shape(labels)[0]), tf.bool)
indices_not_equal = tf.logical_not(indices_equal)
i_not_equal_j = tf.expand_dims(indices_not_equal, 2)
i_not_equal_k = tf.expand_dims(indices_not_equal, 1)
j_not_equal_k = tf.expand_dims(indices_not_equal, 0)
distinct_indices = tf.logical_and(tf.logical_and(i_not_equal_j, i_not_equal_k), j_not_equal_k)
# Check if labels[i] == labels[j] and labels[i] != labels[k]
label_equal = tf.equal(tf.expand_dims(labels, 0), tf.expand_dims(labels, 1))
i_equal_j = tf.expand_dims(label_equal, 2)
i_equal_k = tf.expand_dims(label_equal, 1)
valid_labels = tf.logical_and(i_equal_j, tf.logical_not(i_equal_k))
# Combine the two masks
mask = tf.logical_and(distinct_indices, valid_labels)
return mask
@tf.function
def triplet_batch_all_loss(
y_true: TensorLike, y_pred: TensorLike, margin: FloatTensorLike = 1.0, squared: bool = False
) -> tf.Tensor:
"""Computes the triplet loss with semi-hard negative mining.
Args:
y_true: 1-D integer `Tensor` with shape [batch_size] of
multiclass integer labels.
y_pred: 2-D float `Tensor` of embedding vectors. Embeddings should
be l2 normalized.
margin: Float, margin term in the loss definition.
Returns:
triplet_loss: float scalar with dtype of y_pred.
"""
labels, embeddings = y_true, y_pred
convert_to_float32 = (
embeddings.dtype == tf.dtypes.float16 or embeddings.dtype == tf.dtypes.bfloat16
)
precise_embeddings = (
tf.cast(embeddings, tf.dtypes.float32) if convert_to_float32 else embeddings
)
# Reshape label tensor to [batch_size, 1].
# lshape = tf.shape(labels)
# labels = tf.reshape(labels, [lshape[0], 1])
labels = tf.squeeze(labels, axis = -1)
print()
print('labels.shape: {}'.format(labels.shape))
print('embeddings.shape: {}'.format(precise_embeddings.shape))
pairwise_dist = _pairwise_distances(precise_embeddings, squared=squared)
# shape (batch_size, batch_size, 1)
anchor_positive_dist = tf.expand_dims(pairwise_dist, 2)
# shape (batch_size, 1, batch_size)
anchor_negative_dist = tf.expand_dims(pairwise_dist, 1)
# Compute a 3D tensor of size (batch_size, batch_size, batch_size)
# triplet_loss[i, j, k] will contain the triplet loss of anchor=i, positive=j, negative=k
# Uses broadcasting where the 1st argument has shape (batch_size, batch_size, 1)
# and the 2nd (batch_size, 1, batch_size)
triplet_loss = anchor_positive_dist - anchor_negative_dist + margin
# Put to zero the invalid triplets
# (where label(a) != label(p) or label(n) == label(a) or a == p)
mask = _get_triplet_mask(labels)
mask = tf.cast(mask, dtype=tf.dtypes.float32)
triplet_loss = tf.math.multiply(mask, triplet_loss)
# Remove negative losses (i.e. the easy triplets)
triplet_loss = tf.math.maximum(triplet_loss, 0.0)
# Count number of positive triplets (where triplet_loss > 0)
valid_triplets = tf.cast(tf.greater(triplet_loss, 1e-16), dtype=tf.dtypes.float32)
num_positive_triplets = tf.math.reduce_sum(valid_triplets)
num_valid_triplets = tf.math.reduce_sum(mask)
fraction_positive_triplets = num_positive_triplets / (num_valid_triplets + 1e-16)
# Get final mean triplet loss over the valid triplets
triplet_loss = tf.math.truediv(
tf.math.reduce_sum(
triplet_loss
),
num_positive_triplets + 1e-16,
)
if convert_to_float32:
return tf.cast(triplet_loss, embeddings.dtype)
else:
return triplet_loss
class TripletBatchAllLoss(tf.keras.losses.Loss):
"""Computes the triplet loss with semi-hard negative mining.
The loss encourages the positive distances (between a pair of embeddings
with the same labels) to be smaller than the minimum negative distance
among which are at least greater than the positive distance plus the
margin constant (called semi-hard negative) in the mini-batch.
If no such negative exists, uses the largest negative distance instead.
See: https://arxiv.org/abs/1503.03832.
We expect labels `y_true` to be provided as 1-D integer `Tensor` with shape
[batch_size] of multi-class integer labels. And embeddings `y_pred` must be
2-D float `Tensor` of l2 normalized embedding vectors.
Args:
margin: Float, margin term in the loss definition. Default value is 1.0.
name: Optional name for the op.
"""
@typechecked
def __init__(
self, margin: FloatTensorLike = 1.0, name: Optional[str] = None, **kwargs
):
super().__init__(name=name, reduction=tf.keras.losses.Reduction.NONE)
self.margin = margin
def call(self, y_true, y_pred):
return triplet_batch_all_loss(y_true, y_pred, self.margin)
def get_config(self):
config = {
"margin": self.margin,
}
base_config = super().get_config()
return {**base_config, **config}
if __name__ == '__main__':
import numpy as np
logits = tf.convert_to_tensor([[1.1, 1.2, 1.4], [1.09, 1.21,1.41], [0.25, 0.45, 0.75], [0.23, 0.43, 0.7], [1.5, 2.5, 3.5], [1.55, 2.75, 3.8]], dtype=tf.dtypes.float32)
labels = tf.convert_to_tensor(np.array([1, 1, 2, 2, 3, 3]), dtype=tf.dtypes.float32)
loss = triplet_batch_all_loss(labels, logits)
print(loss)
| 38.726316 | 168 | 0.718266 |
f1eaf6337ea04e32b587e8948581b4d279792607 | 471 | py | Python | reinforceflow/__init__.py | ktk1012/reinforceflow | 88d426a5ce0480a8a0bf8f28f573582e950ebe15 | [
"MIT"
]
| 12 | 2017-07-16T12:13:25.000Z | 2022-03-08T04:45:16.000Z | reinforceflow/__init__.py | ktk1012/reinforceflow | 88d426a5ce0480a8a0bf8f28f573582e950ebe15 | [
"MIT"
]
| null | null | null | reinforceflow/__init__.py | ktk1012/reinforceflow | 88d426a5ce0480a8a0bf8f28f573582e950ebe15 | [
"MIT"
]
| 4 | 2017-07-25T13:28:56.000Z | 2018-12-01T11:05:09.000Z | from __future__ import absolute_import
from reinforceflow.config import version as __version__
from reinforceflow.config import get_random_seed
from reinforceflow.config import set_random_seed
from reinforceflow.config import logger
from reinforceflow import config
from reinforceflow import agents
from reinforceflow import core
from reinforceflow import envs
from reinforceflow import utils
from reinforceflow import models
config.logger_setup()
del absolute_import
| 27.705882 | 55 | 0.874735 |
6f219aeb01ea519373b2d340f40c1eec75858a63 | 1,242 | py | Python | adafruit_circuitpython_libs/adafruit-circuitpython-bundle-py-20210214/examples/circuitplayground_temperature_neopixels.py | jacoblb64/pico_rgb_keypad_hid | 3251ca6a98ef86d9f98c54f639c4d61810601a0b | [
"MIT"
]
| 75 | 2017-08-17T18:00:28.000Z | 2022-03-30T00:37:20.000Z | adafruit_circuitpython_libs/adafruit-circuitpython-bundle-py-20210214/examples/circuitplayground_temperature_neopixels.py | jacoblb64/pico_rgb_keypad_hid | 3251ca6a98ef86d9f98c54f639c4d61810601a0b | [
"MIT"
]
| 65 | 2017-08-02T02:06:54.000Z | 2022-03-21T22:45:08.000Z | adafruit_circuitpython_libs/adafruit-circuitpython-bundle-py-20210214/examples/circuitplayground_temperature_neopixels.py | jacoblb64/pico_rgb_keypad_hid | 3251ca6a98ef86d9f98c54f639c4d61810601a0b | [
"MIT"
]
| 64 | 2017-09-15T21:05:50.000Z | 2022-02-10T06:20:45.000Z | # SPDX-FileCopyrightText: 2021 ladyada for Adafruit Industries
# SPDX-License-Identifier: MIT
"""
This example use the temperature sensor on the Circuit Playground, located next to the picture of
the thermometer on the board. Try warming up the board to watch the number of NeoPixels lit up
increase, or cooling it down to see the number decrease. You can set the min and max temperatures
to make it more or less sensitive to temperature changes.
"""
import time
from adafruit_circuitplayground import cp
cp.pixels.auto_write = False
cp.pixels.brightness = 0.3
# Set these based on your ambient temperature in Celsius for best results!
minimum_temp = 24
maximum_temp = 30
def scale_range(value):
"""Scale a value from the range of minimum_temp to maximum_temp (temperature range) to 0-10
(the number of NeoPixels). Allows remapping temperature value to pixel position."""
return int((value - minimum_temp) / (maximum_temp - minimum_temp) * 10)
while True:
peak = scale_range(cp.temperature)
print(cp.temperature)
print(int(peak))
for i in range(10):
if i <= peak:
cp.pixels[i] = (0, 255, 255)
else:
cp.pixels[i] = (0, 0, 0)
cp.pixels.show()
time.sleep(0.05)
| 31.846154 | 97 | 0.716586 |
5bf21623b2fd650d7ac57816200a2d165266d337 | 422 | py | Python | teraserver/python/tests/opentera/db/models/test_TeraUserGroup.py | introlab/opentera | bfc4de672c9de40b7c9a659be2138731e7ee4e94 | [
"Apache-2.0"
]
| 10 | 2020-03-16T14:46:06.000Z | 2022-02-11T16:07:38.000Z | teraserver/python/tests/opentera/db/models/test_TeraUserGroup.py | introlab/opentera | bfc4de672c9de40b7c9a659be2138731e7ee4e94 | [
"Apache-2.0"
]
| 114 | 2019-09-16T13:02:50.000Z | 2022-03-22T19:17:36.000Z | teraserver/python/tests/opentera/db/models/test_TeraUserGroup.py | introlab/opentera | bfc4de672c9de40b7c9a659be2138731e7ee4e94 | [
"Apache-2.0"
]
| null | null | null | import unittest
import os
from modules.DatabaseModule.DBManager import DBManager
from opentera.config.ConfigManager import ConfigManager
from tests.opentera.db.models.BaseModelsTest import BaseModelsTest
class TeraUserGroupTest(BaseModelsTest):
filename = os.path.join(os.path.dirname(__file__), 'TeraUserGroupTest.db')
SQLITE = {
'filename': filename
}
def test_defaults(self):
pass
| 22.210526 | 78 | 0.758294 |
febf7fbc7ecaef4776bcedcb2d3c0564a2c9b3d8 | 3,461 | py | Python | crseek/annotators.py | DamLabResources/crisprtree | 13d8870dc1d3bba6c58ef23772c1a2504e817198 | [
"MIT"
]
| 5 | 2018-11-11T08:57:42.000Z | 2021-07-27T21:31:00.000Z | crseek/annotators.py | DamLabResources/crisprtree | 13d8870dc1d3bba6c58ef23772c1a2504e817198 | [
"MIT"
]
| 1 | 2019-01-08T15:17:07.000Z | 2019-01-09T08:13:01.000Z | crseek/annotators.py | DamLabResources/crseek | 13d8870dc1d3bba6c58ef23772c1a2504e817198 | [
"MIT"
]
| null | null | null | import pandas as pd
from Bio import Alphabet
from Bio.Seq import Seq, reverse_complement
from Bio.SeqFeature import SeqFeature, FeatureLocation
from Bio.SeqRecord import SeqRecord
from crseek import exceptions
from crseek.estimators import SequenceBase
from crseek import utils
def annotate_grna_binding(spacer, seq_record, estimator, extra_qualifiers=None,
exhaustive=False, mismatch_tolerance=6, openci_devices=None):
""" In-place annotation of gRNA binding location.
Parameters
----------
spacer : Seq
spacer to search for.
seq_record : SeqRecord
The sequence to search within
estimator : SequenceBase or None
Estimator to use to evaluate spacer binding. If None, exact string matching is used.
extra_qualifiers : dict
Extra qualifiers to add to the SeqFeature
exhaustive : bool
If True then all positions within the seq_record are checked.
If False then a mismatch search is performed first.
mismatch_tolerance : int
If using a mismatch search, the tolerance.
openci_devices : str or None
Formatted string of device-IDs acceptable to cas-offinder. If None
the first choice is picked from the OpenCI device list.
Returns
-------
SeqRecord
"""
exceptions._check_seq_alphabet(spacer, base_alphabet=Alphabet.RNAAlphabet)
exceptions._check_seq_alphabet(seq_record.seq, base_alphabet=Alphabet.DNAAlphabet)
if estimator is None:
pos = seq_record.seq.find(spacer)
strand = 1
if pos == -1:
pos = seq_record.seq.find(reverse_complement(spacer))
strand = -1
if pos == -1:
raise ValueError('Could not find exact match on either strand')
seq_record.features.append(_build_target_feature(pos, strand, spacer, score=1,
extra_quals=extra_qualifiers))
return seq_record
if exhaustive:
tiles = utils.tile_seqrecord(spacer, seq_record)
else:
tiles = utils.cas_offinder([spacer], mismatch_tolerance, locus=[seq_record],
openci_devices=openci_devices)
pred = estimator.predict(tiles[['spacer', 'target']].values)
pred_ser = pd.Series(pred, index=tiles.index)
hits = pred_ser[pred_ser]
for _, strand, left in hits.index:
seq_record.features.append(_build_target_feature(left, strand, spacer, score=1,
extra_quals=extra_qualifiers))
return seq_record
def _build_target_feature(left, strand, spacer, score=1, extra_quals=None):
"""
Parameters
----------
left : int
Left most position of the binding site
strand : int
1 or -1 indicating the positive or negative strand
spacer : Seq
gRNA that's targetted to this location
score : float
Binding score of the gRNA to this location
extra_quals : dict
Extra qualifiers to add to the SeqFeature
Returns
-------
SeqFeature
"""
if strand not in {-1, 1}:
raise ValueError('Strand must be {1, -1}')
quals = {'spacer': str(spacer),
'On Target Score': score}
if extra_quals is not None:
quals.update(extra_quals)
return SeqFeature(FeatureLocation(start=left, end=left + 23, strand=strand),
qualifiers=quals)
| 32.345794 | 92 | 0.645767 |
571aa4a263581d9cdbb9d3d32ade91bccc9465eb | 173 | py | Python | iamheadless_publisher/apps.py | plain-ie/iamheadless_publisher | 8a7bcee202c3dae6909452d87936f4d9f2d2668e | [
"MIT"
]
| null | null | null | iamheadless_publisher/apps.py | plain-ie/iamheadless_publisher | 8a7bcee202c3dae6909452d87936f4d9f2d2668e | [
"MIT"
]
| null | null | null | iamheadless_publisher/apps.py | plain-ie/iamheadless_publisher | 8a7bcee202c3dae6909452d87936f4d9f2d2668e | [
"MIT"
]
| null | null | null | from django.apps import AppConfig
class IamheadlessPublisherConfig(AppConfig):
default_auto_field = 'django.db.models.BigAutoField'
name = 'iamheadless_publisher'
| 24.714286 | 56 | 0.797688 |
b775e76527b76b4f14eabe487616fcd891e8a359 | 1,004 | py | Python | main.py | yadhu621/zerossl-app | a20a9820cc0afb0b57968bfa8c13033623710db7 | [
"MIT"
]
| null | null | null | main.py | yadhu621/zerossl-app | a20a9820cc0afb0b57968bfa8c13033623710db7 | [
"MIT"
]
| 1 | 2022-03-09T01:58:17.000Z | 2022-03-09T01:58:17.000Z | main.py | yadhu621/zerossl-app | a20a9820cc0afb0b57968bfa8c13033623710db7 | [
"MIT"
]
| null | null | null | import requests
API_KEY = "96903f1cf7f7d5e8493cf56d52be4c31"
BASE_URL = "https://api.zerossl.com"
# api.zerossl.com/example_endpoint?access_key=EXAMPLE_KEY
URL = BASE_URL + '/certificates?access_key=' + API_KEY
DOMAIN = "amyra.co.uk"
# List certificates
resp = requests.get(URL)
resp_dict = resp.json()
results = resp_dict["results"]
existing_subdomains = []
for result in results:
subdomain = result["common_name"]
if result["status"] == "issued" and DOMAIN in subdomain:
existing_subdomains.append(subdomain)
print(existing_subdomains)
# List certificates with expiry date
existing_subdomains_with_expiry = []
for result in results:
if result["status"] == "issued":
subdomain = result["common_name"]
expiry = result["expires"][0:10]
existing_subdomains_with_expiry.append(
{
"subdomain": subdomain,
"expiry": expiry
})
print(existing_subdomains_with_expiry)
if __name__ == "__main__":
pass
| 25.74359 | 60 | 0.688247 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.