metadata
dict | text
stringlengths 60
3.49M
|
---|---|
{
"source": "jpmeijers/picontroller",
"score": 2
} |
#### File: jpmeijers/picontroller/aprs_transmit.py
```python
import pprint
import subprocess
import time
import random
#own classes
import gpio_management
packet_counter = 1
def init():
global packet_counter
packet_counter = 1
def send_beacon(config):
#beacon -c ZS1JPM-6 -d "BEACON WIDE1-1" -s sm0 "=3357.92S/01850.19E#JP in technopark"
location = config.get("APRS encoding", "my_location")
location = location.split(",")
latitude = float(location[0])
longitude = float(location[1])
latDeg = int(latitude)
lonDeg = int(longitude)
latMin = abs(latitude-latDeg)*60
lonMin = abs(longitude-lonDeg)*60
latDir = "N"
if (latDeg<0): latDir = "S"
lonDir = "E"
if (lonDeg<0): lonDir = "W"
latFull = "%2d%2.2f%s" % (abs(latDeg),latMin,latDir)
#print latFull
lonFull = "%03d%2.2f%s" % (abs(lonDeg),lonMin,lonDir)
#print lonFull
send_packet(config, "="+latFull+config.get("APRS encoding", "my_symbol_table")
+lonFull+config.get("APRS encoding", "my_symbol_object")
+config.get("APRS encoding", "my_name"))
def send_current_state(config,source_callsign, source_ssid):
input_states = gpio_management.get_input_states(config)
output_states = gpio_management.get_output_states_from_cache(config)
reply_message = "IN "
for i in input_states:
reply_message += i.upper()+","
reply_message = reply_message.rstrip(",")
reply_message += " OUT "
for o in output_states:
reply_message += o.upper()+","
reply_message = reply_message.rstrip(",")
#TODO: temperature sensor
send_message(config, ":"+(source_callsign+"-"+source_ssid).ljust(9)+":STATUS "+reply_message+" END")
def send_message(config, message_text):
global packet_counter
send_packet(config, message_text+"{%d" % packet_counter)
if(packet_counter<99999):
packet_counter+=1
else:
packet_counter=1
def send_packet(config, message_text):
#works, but just give a second delay first
time.sleep(1)
time.sleep(random.random()*2)
subprocess.call(["beacon -c "+config.get("APRS encoding", "my_callsign")
+" -d "+config.get("APRS encoding", "beacon_path")
+" -s "+config.get("soundmodem", "sm_interface")
+" \""+message_text+"\""], shell=True)
print ("Packet sent: "+message_text)
``` |
{
"source": "jpmens/jopleet",
"score": 3
} |
#### File: jpmens/jopleet/jopleet.py
```python
import configparser
from string import Template
from bs4 import BeautifulSoup
import tweepy
import os
import sys
import json
import requests
import re
import getopt
jurl = None
token = None
parent_folder = None
# This is the Markdown template which will be written to each note.
t = Template("""$text
$images
* * *
$name (@$screen_name)
[$date]($url)
""")
def get_all_tags():
taglist = {}
joplin_url = "{0}/tags?token={1}".format(jurl, token)
r = requests.get(joplin_url)
if r.status_code == 200:
# print(json.dumps(r.json(), indent=4))
items = r.json()["items"]
for t in items:
taglist[t['title']] = t['id']
return taglist
def set_tag(tag_id, note_id):
""" post a note to the tag id; it suffices if note has "id" in it """
joplin_url = "{0}/tags/{1}/notes?token={2}".format(jurl, tag_id, token)
headers = {
"Content-type" : "application/json",
"Accept" : "text/plain"
}
data = {
"id" : note_id,
}
r = requests.post(joplin_url, data=json.dumps(data), headers=headers)
if r.status_code != 200:
print("Cannot POST to tag {0} for note {1}: code={2} {3}".format(tag_id, note_id, r.status_code, r.text), file=sys.stderr)
def upload_image(filename, url):
""" url points to image in tweet; download the bytes and upload
to Joplin; return a resource_id """
resource_id = None
joplin_url = "{0}/resources?token={1}".format(jurl, token)
payload = {
"title" : filename,
}
tw_r = requests.get(url)
if tw_r.status_code == 200:
# tw_r.decode_content = True
files = {
"data" : (filename, tw_r.content, "application/octet-stream"),
"props" : (None, json.dumps(payload), "application/json"),
}
r = requests.request("POST", verify=False, url=joplin_url, files=files)
if r.status_code == 200:
data = json.loads(r.text)
resource_id = data['id']
else:
print("UPLOAD of image failed", r.content)
print(r.status_code)
return resource_id
def trunc(s, length=50):
if len(s) <= length:
return s
else:
return " ".join(s[:length + 1].split(' ')[0:-1]) + "..."
def new_note(params):
# print(params)
headers = {
"Content-type" : "application/json",
"Accept" : "text/plain"
}
joplin_url = "{0}/notes?token={1}".format(jurl, token)
data = {
"parent_id" : parent_folder,
"is_todo" : 0,
"title" : trunc(params["text"]),
"body" : params["body"],
"author" : params["screen_name"],
"source_url" : params["url"],
}
if "lat" in params:
# warning: Joplin wants strings!
data["latitude"] = str(params["lat"])
data["longitude"] = str(params["lon"])
data["altitude"] = "0.0000"
## print(json.dumps(data, indent=4))
r = requests.post(joplin_url, data=json.dumps(data), headers=headers)
if r.status_code != 200:
print("status_code: {0} {1}".format(r.status_code, r.text))
else:
j = json.loads(r.text)
note_id = j.get("id")
note_title = j.get("title", "unknown")
print("ID: {0}, {1}".format(note_id, note_title))
if "tags" in params and params["tags"] is not None:
taglist = get_all_tags()
for tag in params["tags"].split(','):
if tag in taglist:
tag_id = taglist[tag]
set_tag(tag_id, note_id)
def store(api, url, status, tags=None):
status_id = status.id
# remove link to tweet in body
s = re.sub('https://t\.co/[a-zA-Z0-9]+$', '', status.full_text)
soup = BeautifulSoup(s, features="html.parser")
images = ""
params = {
'url' : url,
'status_id' : status_id,
'date' : status.created_at,
'name' : status.user.name,
'screen_name' : status.user.screen_name,
'profile_img' : status.user.profile_image_url_https,
'text' : soup.get_text(),
'images' : images,
'tags' : tags,
}
# Coordinates now work; the bug I submitted in October 2020
# https://github.com/laurent22/joplin/issues/3884
# has been fixed.
if status.coordinates is not None:
if "coordinates" in status.coordinates:
params["lon"] = status.coordinates["coordinates"][0]
params["lat"] = status.coordinates["coordinates"][1]
# see if we have media and upload each image (warning: no check!)
# to Joplin; assemble resource_ids so we can create Markdown
# image links to add to the note's body
if 'extended_entities' in status._json:
if 'media' in status._json["extended_entities"]:
for image in status._json["extended_entities"]['media']:
u = image['media_url']
# get image, send to Joplin
basename = u.split('/')[-1]
filename = f'tweet_{status_id}-{basename}'
resource_id = upload_image(filename, u)
if resource_id is not None:
images = images + f'\n\n'
params['images'] = images
params["body"] = t.substitute(params)
new_note(params)
if __name__ == '__main__':
tags = None
try:
opts, args = getopt.getopt(sys.argv[1:], "t:")
except getopt.GetoptError:
print("Usage: {0} [-t tags] url [url ...]".format(sys.argv[0]))
sys.exit(2)
for opt, arg in opts:
if opt == "-t":
tags = arg
config = os.path.join(
os.path.dirname(os.path.realpath(__file__)),
"jopleet.config")
configparser = configparser.RawConfigParser()
configparser.read(config)
jurl = configparser.get('authentication', 'joplin_url')
token = configparser.get('authentication', 'token')
parent_folder = configparser.get('authentication', 'parent_folder')
ConsumerKey = configparser.get('authentication', 'ConsumerKey')
ConsumerSecret = configparser.get('authentication', 'ConsumerSecret')
AccessToken = configparser.get('authentication', 'AccessToken')
AccessTokenSecret = configparser.get('authentication', 'AccessTokenSecret')
auth = tweepy.OAuthHandler(ConsumerKey, ConsumerSecret)
auth.set_access_token(AccessToken, AccessTokenSecret)
api = tweepy.API(auth)
for url in args:
status_id = url.split('/')[-1]
status = api.get_status(status_id, include_entities=True, tweet_mode='extended')
store(api, url, status, tags)
``` |
{
"source": "jpmens/revgeod",
"score": 3
} |
#### File: python/revgeod/geocoder.py
```python
import requests
import json
__author__ = '<NAME> <<EMAIL>>'
__copyright__ = 'Copyright 2018--2019 <NAME>'
__license__ = """GNU General Public License"""
class RevgeodGeocode(object):
"""
Geocoder object. Initialize with host, port
>>> geocoder = RevgeodGeocode(host="127.0.0.1", port=8865)
Reverse geocode a latitude & longitude into an address dict:
>>> geocoder.reverse_geocode(51.5104, -0.1021)
"""
def __init__(self, host="127.0.0.1", port=8865, app="python"):
self.url = "http://{host}:{port}/rev".format(host=host, port=port)
self.appname = app
self.session = requests.Session()
self.session.headers.update({
"Content-type" : "application/json"
})
def reverse_geocode(self, lat, lon):
"""
Given a lat, lon, return the address for that point from revgeod
"""
params = {
u'lat' : float_it(lat),
u'lon' : float_it(lon),
u'app' : self.appname,
}
data = None
r = self.session.get(self.url, params=params)
try:
data = json.loads(r.content)
if 'address' in data:
data = data['address']
except Exception as e:
print(str(e))
data.update(**params)
return data
def float_it(float_string):
try:
return float(float_string)
except ValueError:
return float_string
``` |
{
"source": "jpmens/tripp",
"score": 2
} |
#### File: tripp/contrib/worker.py
```python
import beanstalkc # pip install beanstalkc
import json
import sys
def process_job(body):
data = json.loads(body)
print json.dumps(data, indent=4)
return True
if __name__ == '__main__':
beanstalk = beanstalkc.Connection(host='localhost', port=11300)
beanstalk.use('qtripp')
beanstalk.watch('qtripp')
beanstalk.ignore('default')
print "using:", beanstalk.using()
print "watching:", beanstalk.watching()
try:
while True:
job = beanstalk.reserve()
if process_job(job.body) == True:
job.delete()
except KeyboardInterrupt:
sys.exit(1)
except:
raise
``` |
{
"source": "jpmerlet/gp_spc",
"score": 2
} |
#### File: gp_spc/code_git/mp_gp_test.py
```python
import GPy
import numpy as np
import time
from os import getpid
import pandas as pd
import matplotlib.pyplot as plt
import scipy.spatial as spatial
from scipy import stats
from scipy.special import inv_boxcox
import multiprocessing
import math
# se cargan los datos de entrenamiento
train_data = pd.read_csv('../../GP_Data/cy17_spc_assays_rl6_entry.csv')
train_cols = ['midx', 'midy', 'midz', 'cut']
test_data = pd.read_csv('../../GP_Data/cy17_spc_assays_pvo_entry.csv')
test_cols = ['midx', 'midy', 'midz']
# se definen los estilos de los graficos
# jtplot.style(theme='onedork',figsize = (16.5,12))
class Timer(object):
def __init__(self, name=None):
self.name = name
def __enter__(self):
self.tstart = time.time()
def __exit__(self, type, value, traceback):
if self.name:
print('[%s]' % self.name, end=' ')
print('Elapsed: %s' % (time.time() - self.tstart))
def get_holeids():
df_holeid = test_data['dhid']
seen = set()
HOLEID = []
for item in df_holeid:
if item not in seen:
seen.add(item)
HOLEID.append(item)
return HOLEID
def get_test_points_holeid(idhole):
return test_data.loc[test_data['dhid'] == idhole][test_cols].as_matrix()
def get_y_holeid(idhole):
return test_data.loc[test_data['dhid'] == idhole][['cut']].as_matrix()
def get_cut_xyz_by_holeid(idhole):
xyz_cut = test_data.loc[test_data['dhid'] == idhole][['midx',
'midy',
'midz',
'cut']].as_matrix()
return xyz_cut
def get_pozo_holeid(idhole, cols_names=None):
if cols_names is None:
cols_names = ['midx', 'midy', 'midz', 'minty', 'cut', 'f1']
hole = test_data.loc[test_data['dhid'] == idhole][cols_names].as_matrix()
return hole
def get_trainingSet_by_point(test_point, distancia):
# distancia se podria calcular por caso,
# segun la cantidad de pts. que se encuentren
X_train_df = train_data[['dhid', 'midx', 'midy', 'midz']]
y_df = train_data[['dhid', 'cut']]
X_train = X_train_df[['midx', 'midy', 'midz']].as_matrix()
train_tree = spatial.cKDTree(X_train)
idx = train_tree.query_ball_point(list(test_point), distancia)
return X_train_df.iloc[idx, :], y_df.iloc[idx, :]
def get_traningSet(idhole, distancia):
# retorna X dataFrame con los puntos de
# entrenamiento para todo el sondaje dhid
X_train_df = train_data[['dhid', 'midx', 'midy', 'midz']]
y_df = train_data[['dhid', 'cut']]
X_train = X_train_df[['midx', 'midy', 'midz']].as_matrix()
test_points = get_test_points_holeid(idhole)
test_tree = spatial.cKDTree(test_points)
train_tree = spatial.cKDTree(X_train)
idx_rep = test_tree.query_ball_tree(train_tree, distancia)
idx_sin_rep = list(set([indice for lista in idx_rep for indice in lista]))
return X_train_df.iloc[idx_sin_rep, :], y_df.iloc[idx_sin_rep, :]
def printProgressBar(iteration, total, prefix='', suffix='', decimals=1, length=100, fill='█'):
"""
Call in a loop to create terminal progress bar
@params:
iteration - Required : current iteration (Int)
total - Required : total iterations (Int)
prefix - Optional : prefix string (Str)
suffix - Optional : suffix string (Str)
decimals - Optional : positive number of decimals in percent complete (Int)
length - Optional : character length of bar (Int)
fill - Optional : bar fill character (Str)
"""
percent = ("{0:." + str(decimals) + "f}").format(100 * (iteration / float(total)))
filledLength = int(length * iteration // total)
bar = fill * filledLength + '-' * (length - filledLength)
print('\r%s |%s| %s%% %s' % (prefix, bar, percent, suffix), end='\r')
# Print New Line on Complete
if iteration == total:
print()
def estimacion_by_point(modelo, ker, transform=False, Plotear=False, std=1,
lik=GPy.likelihoods.Gaussian(),
inf=GPy.inference.latent_function_inference.ExactGaussianInference()):
# diccionario que guardara las predicciones por dhid
global m, y_pred, lmb
dicc_preds = dict()
# distancia a la que se buscan muestras (quizas es mejor tomar la minima necesaria?)
distancia = 33
IDHOLEs = get_holeids()
n = len(IDHOLEs)
# HOLEIDs.remove('F06-1580-016')
# HOLEIDs.remove('F06-1610-004')
# HOLEIDs.remove('F06-1625-021')
# HOLEIDs.remove('F13-1595-005')
# HOLEIDs.remove('F05-1565-009')
for idx, idhole in enumerate(IDHOLEs):
y_preds = list()
test_points = get_test_points_holeid(idhole)
for test_point in test_points:
X_df, y_df = get_trainingSet_by_point(test_point, distancia)
X = X_df[['midx', 'midy', 'midz']].as_matrix()
y = y_df[['cut']].as_matrix()
X_std = (X - X.mean()) / X.std()
y_std = (y - y.mean()) / y.std()
if std == 1:
test_point_std = (test_point - X.mean()) / X.std()
elif std == 0:
(test_point - test_points.mean()) / test_points.std()
else:
print('std debe ser 0 o 1.')
if X_df.shape[0] < 10:
y_preds.extend(list(np.array([-99])))
continue
if modelo == 'sgpr':
# m = GPy.models.SparseGPRegression(X,y,ker)
if transform:
y_cox, lmb = stats.boxcox(y)
m = GPy.core.GP(X, y_cox, kernel=ker, likelihood=lik, inference_method=inf)
else:
m = GPy.core.GP(X_std, y_std, kernel=ker,
likelihood=lik,
inference_method=inf)
else:
m = GPy.models.GPRegression(X, y, ker)
try:
m.optimize(messages=False, max_f_eval=1000)
y_pred, _ = m.predict(np.array([[test_point_std[0],
test_point_std[1],
test_point_std[2]]]))
except GPy.util.linalg.linalg.LinAlgError as err:
if 'not positive definite' in err.message:
print('not positive definite, even with jitter.')
pass
except np.linalg.LinAlgError:
print('La matriz definida por el kernel no es definida positiva')
pass
y_preds.extend(list(y_pred * y.std() + y.mean()))
if transform:
y_preds = inv_boxcox(y_preds, lmb)
# transformar restricciones en ndarray, por sia caso
y_preds_ndarray = np.array(y_preds.copy())
dicc_preds[idhole] = y_preds_ndarray
# guardar valores reales de oro en los puntos test de dhid
y_medido = get_y_holeid(idhole).reshape(y_preds_ndarray.shape)
if Plotear:
# se analizan los resultados por dhid
fig, ax = plt.subplots()
ax.scatter(y_medido, y_preds_ndarray, edgecolors=(0, 0, 0))
ax.plot([y_medido.min(), y_medido.max()], [y_medido.min(), y_medido.max()], 'k--', lw=2)
ax.set_xlabel('Medido')
ax.set_ylabel('Prediccion')
ax.set_title('Regresion para el sondaje %s' % idhole)
# ax.set_title('DHID:%s, Kernel: %s' % (dhid,gp.kernel_))
printProgressBar(idx + 1, n,
prefix='Current HOLEID: {}. Total Progress:'.format(IDHOLEs[(idx + 1) * (idx < n - 1)]),
suffix='Complete', length=50)
if Plotear:
plt.show()
return m, dicc_preds
def estimation_by_point_mp(IDHOLEs, out_q, modelo, ker, distancia, transform, std=1, lik=GPy.likelihoods.Gaussian(),
inf=GPy.inference.latent_function_inference.ExactGaussianInference()):
# distancia a la que se buscan muestras (quizas es mejor tomar la minima necesaria?)
global lmbda
n = len(IDHOLEs)
dicc_preds = {}
for idx, idhole in enumerate(IDHOLEs):
y_preds = list()
test_points = get_test_points_holeid(idhole)
for test_point in test_points:
X_df, y_df = get_trainingSet_by_point(test_point, distancia)
# while X_df.shape[0] < 20:
# distancia_aumentada = 5 + distancia
# X_df, y_df = get_trainingSet_by_point(test_point, distancia_aumentada)
if X_df.shape[0] < 10:
y_preds.extend(list(np.array([-99])))
continue
X = X_df[['midx', 'midy', 'midz']].as_matrix()
y = y_df[['cut']].as_matrix()
X_std = (X - X.mean()) / X.std()
y_std = (y - y.mean()) / y.std()
if std == 1:
test_point_std = (test_point - X.mean()) / X.std()
elif std == 0:
(test_point - test_points.mean()) / test_points.std()
else:
print('std debe ser 0 o 1.')
if modelo == 'sgpr':
# m = GPy.models.SparseGPRegression(X,y,ker)
if transform:
y_cox, lmbda = stats.boxcox(y)
modelo = GPy.core.GP(X, y_cox, kernel=ker, likelihood=lik, inference_method=inf)
else:
modelo = GPy.core.GP(X_std, y_std, kernel=ker,
likelihood=lik,
inference_method=inf)
else:
modelo = GPy.models.GPRegression(X, y, ker)
y_predicc = -99
try:
modelo.optimize(messages=False, max_f_eval=1000)
y_predicc, _ = modelo.predict(np.array([[test_point_std[0],
test_point_std[1],
test_point_std[2]]]))
except GPy.util.linalg.linalg.LinAlgError as err:
if 'not positive definite' in err.message:
print('not positive definite, even with jitter.')
pass
except np.linalg.LinAlgError:
print('La matriz definida por el kernel no es definida positiva')
pass
y_preds.extend(list(y_predicc * y.std() + y.mean()))
if transform:
y_preds = inv_boxcox(y_preds, lmbda)
# transformar restricciones en ndarray, por sia caso
y_preds_ndarray = np.array(y_preds.copy())
dicc_preds[idhole] = y_preds_ndarray
# printProgressBar(i + 1, n,
# prefix='Current HOLEID: {}. Total Progress:'.format(HOLEIDs[(i + 1) * (i < n - 1)]),
# suffix='Complete', length=50)
printProgressBar(idx + 1, n,
prefix='Current process: {}. Total Progress:'.format(getpid()),
suffix='Complete', length=50)
out_q.put(dicc_preds)
return
def mp_gaussian_process_by_test_point(IDHOLEs, nprocs, modelo, ker, distancia=33, transform=False):
out_q = multiprocessing.Queue()
chuncksize = int(math.ceil(len(IDHOLEs) / float(nprocs)))
procs = []
for idx in range(nprocs):
p = multiprocessing.Process(target=estimation_by_point_mp,
args=(IDHOLEs[chuncksize * idx:chuncksize * (idx + 1)],
out_q, modelo, ker, distancia, transform))
procs.append(p)
p.start()
resultdict = {}
for idx in range(nprocs):
resultdict.update(out_q.get())
for p in procs:
p.join()
return resultdict
if __name__ == '__main__':
# modelo: sgpr
# transform: True
# lik: GPy.likelihoods.Gaussian()
# ker: GPy.kern.Matern32(input_dim=2, active_dims=[0, 1]) + GPy.kern.Matern32(input_dim =1, active_dims = [2])
# inf: GPy.inference.latent_function_inference.ExactGaussianInference()
# HOLEIDs = get_holeids()
# kernel = GPy.kern.Matern32(input_dim=2, active_dims=[0, 1]) + GPy.kern.Matern32(input_dim =1, active_dims = [2])
# t0 = time.time()
# diccionario = mp_gaussian_process_by_test_point(HOLEIDs, 8, 'sgpr', kernel)
# print('Tiempo para gp en paralelo: {}'.format(time.time()-t0))
# exportar los datos
# outfile_name = 'mp_test_'+'all'+'.csv'
# outfile = open(outfile_name, 'w')
# outfile.write('xcentre,ycentre,zcentre,minty,cut_poz,cut,f1\n')
# for holeid in HOLEIDs:
# pozo = get_pozo_holeid(holeid)
# for i, fila in enumerate(pozo):
# line = fila[0], fila[1], fila[2], fila[3], fila[4],diccionario[holeid][i,], fila[5]
# outfile.write('%f,%f,%f,%s,%s,%s,%f,%f\n' % line)
# outfile.close()
# Modelo sobre todos los pozos
# modelo: sgpr (Sparse Gaussian process)
# transform: False
# lik: GPy.likelihoods.Gaussian()
# ker: GPy.kern.Matern32(input_dim=2, active_dims=[0, 1]) + GPy.kern.Bias(3)
# inf: GPy.inference.latent_function_inference.ExactGaussianInference()
# HOLEIDs = get_holeids()
# kernel = GPy.kern.Matern32(input_dim=2, active_dims=[0, 1]) + GPy.kern.Bias(3)
# t0 = time.time()
# diccionario = mp_gaussian_process_by_test_point(HOLEIDs, 8, 'sgpr', kernel)
# print('Tiempo para gp en paralelo: {}'.format(time.time()-t0))
# exportar los datos
# outfile_name = 'mp_test_'+'all_1'+'.csv'
# outfile = open(outfile_name, 'w')
# outfile.write('xcentre,ycentre,zcentre,minty,lito,alt,cut,cut_poz\n')
# for holeid in HOLEIDs:
# pozo = get_pozo_holeid(holeid)
# for i, fila in enumerate(pozo):
# line = fila[0], fila[1], fila[2], fila[3], fila[4], fila[5], diccionario[holeid][i, ],fila[6]
# outfile.write('%f,%f,%f,%s,%s,%s,%f,%f\n' % line)
# outfile.close()
# Modelo sobre todos los pozos
# modelo: sgpr (Sparse Gaussian process)
# transform: False
# lik: GPy.likelihoods.Gaussian()
# ker: GPy.kern.RBF(3,ARD = True)
# inf: GPy.inference.latent_function_inference.ExactGaussianInference()
# HOLEIDs = get_holeids()
# kernel = GPy.kern.RBF(3,ARD=True)
# t0 = time.time()
# diccionario = mp_gaussian_process_by_test_point(HOLEIDs, 8, 'sgpr', kernel)
# print('Tiempo para gp en paralelo: {}'.format(time.time()-t0))
# exportar los datos
# outfile_name = 'mp_test_'+'all_2'+'.csv'
# outfile = open(outfile_name, 'w')
# outfile.write('xcentre,ycentre,zcentre,minty,cut_poz,cut,f1\n')
# for holeid in HOLEIDs:
# pozo = get_pozo_holeid(holeid)
# for i, fila in enumerate(pozo):
# line = fila[0], fila[1], fila[2], fila[3], fila[4], diccionario[holeid][i,], fila[5]
# outfile.write('%f,%f,%f,%f,%f,%f,%f\n' % line)
# outfile.close()
# Modelo sobre todos los pozos
# modelo: sgpr (Sparse Gaussian process)
# transform: False
# lik: GPy.likelihoods.Gaussian()
# ker: GPy.kern.RBF(3) + GPy.kern.Linear(3)
# inf: GPy.inference.latent_function_inference.ExactGaussianInference()
# HOLEIDs = get_holeids()
# kernel = GPy.kern.RBF(3) + GPy.kern.Linear(3)
# t0 = time.time()
# diccionario = mp_gaussian_process_by_test_point(HOLEIDs, 8, 'sgpr', kernel) # se prueba print en multiprocessing
# print('Tiempo para gp en paralelo: {}'.format(time.time() - t0))
# exportar los datos
# outfile_name = 'mp_test_' + 'all_3' + '.csv'
# outfile = open(outfile_name, 'w')
# outfile.write('xcentre,ycentre,zcentre,minty,cut_poz,cut,f1\n')
# for holeid in HOLEIDs:
# pozo = get_pozo_holeid(holeid)
# for i, fila in enumerate(pozo):
# line = fila[0], fila[1], fila[2], fila[3], fila[4], diccionario[holeid][i,], fila[5]
# outfile.write('%f,%f,%f,%f,%f,%f,%f\n' % line)
# outfile.close()
# Modelo sobre todos los pozos
# modelo: sgpr (Sparse Gaussian process)
# transform: False
# lik: GPy.likelihoods.Gaussian()
# ker: GPy.kern.RBF(x)+ GPy.kern.RBF(y) + GPy.kern.RBF(z)
# inf: GPy.inference.latent_function_inference.ExactGaussianInference()
# HOLEIDs = get_holeids()
# kernel = GPy.kern.RBF(input_dim=1, active_dims=[1]) + \
# GPy.kern.RBF(input_dim=1, active_dims=[2]) + GPy.kern.RBF(input_dim=1, active_dims=[0])
# t0 = time.time()
# diccionario = mp_gaussian_process_by_test_point(HOLEIDs, 5, 'sgpr', kernel)
# print('Tiempo para gp en paralelo: {}'.format(time.time() - t0))
# exportar los datos
# outfile_name = 'mp_test_' + 'all_4' + '.csv'
# outfile = open(outfile_name, 'w')
# outfile.write('xcentre,ycentre,zcentre,minty,cut_poz,cut,f1\n')
# for holeid in HOLEIDs:
# pozo = get_pozo_holeid(holeid)
# for i, fila in enumerate(pozo):
# line = fila[0], fila[1], fila[2], fila[3], fila[4], diccionario[holeid][i,], fila[5]
# outfile.write('%f,%f,%f,%f,%f,%f,%f\n' % line)
# outfile.close()
# Modelo sobre todos los pozos
# modelo: sgpr (Sparse Gaussian process)
# ker: Matern32(3, ARD=True)
# HOLEIDs = get_holeids()
# kernel = GPy.kern.Matern32(3, ARD=True)
# t0 = time.time()
# diccionario = mp_gaussian_process_by_test_point(HOLEIDs, 8, 'sgpr', kernel)
# print('Tiempo para gp con multiprocessing: {}'.format(time.time()-t0))
# exportar los datos
# outfile_name = 'mp_test_'+'all_5'+'.csv'
# outfile = open(outfile_name, 'w')
# outfile.write('xcentre,ycentre,zcentre,minty,cut_poz,cut,f1\n')
# for holeid in HOLEIDs:
# pozo = get_pozo_holeid(holeid)
# for i, fila in enumerate(pozo):
# line = fila[0], fila[1], fila[2], fila[3], fila[4], diccionario[holeid][i, ], fila[5]
# outfile.write('%f,%f,%f,%f,%f,%f,%f\n' % line)
# outfile.close()
# Modelo sobre todos los pozos
# modelo: sgpr (Sparse Gaussian process)
# ker: Matern52(3, ARD=True)
HOLEIDs = get_holeids()
kernel = GPy.kern.Matern52(3, ARD=True)
t0 = time.time()
diccionario = mp_gaussian_process_by_test_point(HOLEIDs[:8], 8, 'sgpr', kernel)
print('Tiempo para gp con multiprocessing: {}'.format(time.time()-t0))
# exportar los datos
outfile_name = 'mp_test_'+'all_6'+'.csv'
path_estimacion = 'estimaciones/'
outfile = open(path_estimacion + outfile_name, 'w')
outfile.write('xcentre,ycentre,zcentre,minty,cut_poz,cut,f1\n')
for holeid in HOLEIDs[:8]:
pozo = get_pozo_holeid(holeid)
for i, fila in enumerate(pozo):
line = fila[0], fila[1], fila[2], fila[3], fila[4], diccionario[holeid][i, ], fila[5]
outfile.write('%f,%f,%f,%f,%f,%f,%f\n' % line)
outfile.close()
```
#### File: gp_spc/kriging/estimar_outlier.py
```python
from block_model.controller.block_model import BlockModel
from drillhole.controller.composites import Composites
from geometry.controller.ellipsoid import Ellipsoid
from kriging.controller.search_ellipsoid import SearchEllipsoid
from kriging.controller.point_kriging import PointKriging
from variogram.controller.model import Model
from variogram.controller.structure import Structure
from common.discretize import *
from common.rotation import *
blockPath = '../../GP_Data/cy17_spc_assays_pvo_entry_ug.csv'
# blockPath = '../../GP_Data/test_kriging.csv'
ugVarBlock = 'ugcutPVO'
blockColumns = [(ugVarBlock, int)]
var = 'cut'
ugVarComp = 'ugcut' # columna que contiene ug de los datos de sondaje
compColumns = [(var, float), (ugVarComp, float)]
compPath = '../../GP_Data/cy17_spc_assays_rl6_entry.csv'
# compPath = '../../GP_Data/cy17_spc_au_rl6_entry.csv'
def run():
blockModel, composites, ellipsoid = getObjects()
ugs = [10, 20, 30, 40, 50, 51, 60, 70, 71, 80]
for ug in ugs:
model = getModel(ug)
if model is not None:
blocks = blockModel.applyFilter('"%s" == %d' % (ugVarBlock, ug))
comps = composites.applyFilter('"%s" == %d' % (ugVarComp, ug))
estimate(blocks, comps, ellipsoid, model)
exportBlockModel(blockModel)
def getModel(ug):
# modelo de variograma
if ug == 10:
nugget = 0.250
s1 = Structure(Structure.EXPONENTIAL, 0.480, Ellipsoid(19, 19, 19, 0, 0, 0))
s2 = Structure(Structure.EXPONENTIAL, 0.270, Ellipsoid(436, 436, 436, 0, 0, 0))
structures = [s1, s2]
return Model(nugget, structures)
elif ug == 20:
nugget = 0.250
s1 = Structure(Structure.EXPONENTIAL, 0.370, Ellipsoid(16, 22, 5, 20, 0, 0))
s2 = Structure(Structure.EXPONENTIAL, 0.380, Ellipsoid(177, 97, 27, 20, 0, 0))
structures = [s1, s2]
return Model(nugget, structures)
elif ug == 30:
nugget = 0.290
s1 = Structure(Structure.SPHERIC, 0.320, Ellipsoid(47, 103, 20, 30, 0, 0))
s2 = Structure(Structure.SPHERIC, 0.390, Ellipsoid(601, 500, 32, 30, 0, 0))
structures = [s1, s2]
return Model(nugget, structures)
elif ug == 40:
nugget = 0.220
s1 = Structure(Structure.SPHERIC, 0.420, Ellipsoid(55, 20, 11, 40, 0, 0))
s2 = Structure(Structure.SPHERIC, 0.360, Ellipsoid(447, 183, 26, 40, 0, 0))
structures = [s1, s2]
return Model(nugget, structures)
elif ug == 50:
nugget = 0.180
s1 = Structure(Structure.SPHERIC, 0.390, Ellipsoid(16, 29, 11, 40, 0, 0))
s2 = Structure(Structure.SPHERIC, 0.430, Ellipsoid(144, 93, 145, 40, 0, 0))
structures = [s1, s2]
return Model(nugget, structures)
elif ug == 51:
nugget = 0.140
s1 = Structure(Structure.SPHERIC, 0.390, Ellipsoid(14, 37, 28, 35, 0, 0))
s2 = Structure(Structure.SPHERIC, 0.470, Ellipsoid(343, 183, 125, 35, 0, 0))
structures = [s1, s2]
return Model(nugget, structures)
elif ug == 60:
nugget = 0.150
s1 = Structure(Structure.SPHERIC, 0.550, Ellipsoid(14.8, 10.3, 11.9, 10, 0, 0))
s2 = Structure(Structure.SPHERIC, 0.300, Ellipsoid(954.5, 98.9, 16337.9, 10, 0, 0))
structures = [s1, s2]
return Model(nugget, structures)
elif ug == 70:
nugget = 0.150
s1 = Structure(Structure.EXPONENTIAL, 0.444, Ellipsoid(18.6, 15.1, 18.1, 10, 0, 0))
s2 = Structure(Structure.EXPONENTIAL, 0.406, Ellipsoid(18.8, 14.9, 208.9, 10, 0, 0))
structures = [s1, s2]
return Model(nugget, structures)
elif ug == 71:
nugget = 0.200
s1 = Structure(Structure.EXPONENTIAL, 0.441, Ellipsoid(11.1, 7.9, 9.8, 20, 0, 0))
s2 = Structure(Structure.EXPONENTIAL, 0.359, Ellipsoid(143.7, 161.0, 3777.8, 20, 0, 0))
structures = [s1, s2]
return Model(nugget, structures)
return None
def estimate(blocks, composites, ellipsoid, model):
# se rotan los compósitos
rotatedPoints = rotateComposites(composites, ellipsoid.rotationMatrix)
# se crea un diccionario para acceder a las muestras según su coordenada rotada
compositesByRotatedPoint = dict([(tuple(rotatedPoints[i]), composites[i])
for i in range(len(rotatedPoints))])
# se discretiza el espacio
discretizedPoints = discretizePoints(rotatedPoints,
ellipsoid.major,
ellipsoid.medium,
ellipsoid.minor)
kriging = PointKriging(ellipsoid, model)
cap = 2
print('Estimando modelo de bloques:')
for block in blocks:
# se rota el punto que se quiere estimar
rx, ry, rz = rotateBlock(block, ellipsoid.rotationMatrix)
# se obtienen los compósitos cercanos al centro del bloque
points = ellipsoid.searchPointsInDiscretizedPoints((rx, ry, rz), discretizedPoints)
if len(points) > 0:
# se ordenan los puntos por distancia al bloque
points = sorted(points, key=lambda point: point[0])
inEllipsoid = []
for distance, rotatedPoint, movedPoint, octant in points:
composite = compositesByRotatedPoint[rotatedPoint]
inEllipsoid.append((distance, composite, octant))
# se seleccionan las muestras que cumplen con los criterios pedidos
selectedSamples = ellipsoid.selectSamples(inEllipsoid)
if len(selectedSamples) > 0:
print('se utilizaron {}'.format(len(selectedSamples)))
blockpoint = (block.x, block.y, block.z)
weights, variance = kriging.ordinary(selectedSamples, blockpoint)
value = 0
for i in range(len(selectedSamples)):
_, comp, _ = selectedSamples[i]
# capping
gradeComp = comp[var] if comp[var] <= cap else cap
value = gradeComp * weights[i]
block.grade = value
def exportBlockModel(blockModel):
# Exportación modelo de bloques
outfile = 'modelo_estimado_sondaje.csv'
outfile = open(outfile, 'w')
outfile.write('x,y,z,grade\n')
for block in blockModel:
if hasattr(block, 'grade'):
line = block.x, block.y, block.z, block.grade
else:
line = block.x, block.y, block.z, -99
outfile.write("%f,%f,%f,%f\n" % line)
outfile.close()
def getObjects():
# se carga el modelo de bloques, compósitos y script de categoría
blockModel = BlockModel(path=blockPath, x='midx', y='midy', z='midz', readBlocks=True)
# composites = Composites(path=compPath, holeid='dhid', middlex='midx', middley='midy', middlez='midz',
# from_='from', to_='to', columns=compColumns, readComposites=True)
composites = Composites(path=compPath, holeid='dhid', middlex='midx', middley='midy', middlez='midz',
columns=compColumns, readComposites=True)
major, medium, minor = 100, 100, 100
bearing, plunge, dip = 0, 0, 0
minSamples, maxSamples = 10, 100
minSamplesByOctant, maxSamplesByOctant = 1, 100
minOctantWithSamples, maxSamplesByDrillhole = 1, 100
ellipsoid = SearchEllipsoid(major=major, medium=medium, minor=minor, bearing=bearing, plunge=plunge, dip=dip,
minSamples=minSamples, maxSamples=maxSamples,
minSamplesByOctant=minSamplesByOctant, maxSamplesByOctant=maxSamplesByOctant,
minOctantWithSamples=minOctantWithSamples, maxSamplesByDrillhole=maxSamplesByDrillhole)
return blockModel, composites, ellipsoid
if __name__ == '__main__':
run()
``` |
{
"source": "jpmfribeiro/PyCharts",
"score": 3
} |
#### File: pycharts/fields/color_axis_field.py
```python
class ColorAxisField(object):
def __init__(self, min=0):
self.min = min
def set_min(self, min):
self.min = min
def to_javascript(self):
jsc = "colorAxis: { min: " + str(self.min) + " }"
return jsc
```
#### File: fields/plot_options/data_labels.py
```python
class DataLabels(object):
def __init__(self, enabled=True):
self.enabled = enabled
def show_labels(self, enable):
if not type(enable) is bool:
raise TypeError('enable should be a boolean (True or False).')
self.enabled = enable
def to_javascript(self):
jsc = "dataLabels: {"
jsc += "enabled: "
if self.enabled:
jsc += "true"
else:
jsc += "false"
jsc += "}"
return jsc
```
#### File: fields/series/series.py
```python
class Series(object):
def __init__(self, name, data):
self.name = name
self.data = data
def to_javascript(self):
jsc = "{"
jsc += "name: '" + self.name + "',"
jsc += "data: " + str(self.data) + "}"
return jsc
```
#### File: pycharts/fields/title_field.py
```python
class TitleField(object):
'''
The Python class representation of the title options, as specified in http://api.highcharts.com/highcharts#title
This is a reduced version with only the most important options.
'''
def __init__(self, text=""):
self.text = text
self.align = 'center'
def set_text(self, text):
self.text = str(text)
def set_align(self, align):
valid_aligns = ['center', 'left', 'right']
if align not in valid_aligns:
raise AttributeError('align ' + str(align) + ' is not supported, it should be one of: ' + str(valid_aligns))
else:
self.align = align
def to_javascript(self):
jsc = "title: {"
jsc += "text: '" + self.text + "'"
jsc += "}"
return jsc
``` |
{
"source": "jpmh1309/pyszn",
"score": 2
} |
#### File: pyszn/test/test_parser.py
```python
from collections import OrderedDict
from deepdiff import DeepDiff
from pyszn.parser import parse_txtmeta
def test_parse():
"""
Tests parsing of a complete SZN
"""
topology = """
# Environment
[virtual=none awesomeness=medium]
# Nodes
[shell=vtysh] sw1 sw2
[type=host] hs1
hs2
# Links
sw1:1 -- hs1:1
[attr1=2.1e2 attr2=-2.7e-1] sw1:a -- hs1:a
[attr1=1 attr2="lorem ipsum" attr3=(1, 3.0, "B")] sw1:4 -- hs2:a
"""
actual = parse_txtmeta(topology)
expected = {
'environment': OrderedDict(
[('virtual', 'none'), ('awesomeness', 'medium')]
),
'nodes': [
{
'attributes': OrderedDict([('shell', 'vtysh')]),
'nodes': ['sw1']
},
{
'attributes': OrderedDict([('type', 'host')]),
'nodes': ['hs1']
},
{
'attributes': OrderedDict(),
'nodes': ['hs2']
},
{
'attributes': OrderedDict([('shell', 'vtysh')]),
'nodes': ['sw2']
},
],
'ports': [
{
'ports': [('sw1', '1')],
'attributes': OrderedDict()
},
{
'ports': [('hs1', '1')],
'attributes': OrderedDict()
},
{
'ports': [('sw1', 'a')],
'attributes': OrderedDict()
},
{
'ports': [('hs1', 'a')],
'attributes': OrderedDict()
},
{
'ports': [('sw1', '4')],
'attributes': OrderedDict()
},
{
'ports': [('hs2', 'a')],
'attributes': OrderedDict()
},
],
'links': [
{
'attributes': OrderedDict(),
'endpoints': (('sw1', '1'), ('hs1', '1'))
},
{
'attributes': OrderedDict(
[
('attr1', 210.0),
('attr2', -0.27)
]
),
'endpoints': (('sw1', 'a'), ('hs1', 'a'))
},
{
'attributes': OrderedDict(
[
('attr1', 1), ('attr2', 'lorem ipsum'),
('attr3', [1, 3.0, 'B'])
]
),
'endpoints': (('sw1', '4'), ('hs2', 'a'))
}
]
}
assert not DeepDiff(actual, expected)
def test_autonode():
"""
Test the automatic creation of implicit nodes
"""
topology = """
sw1:port1
"""
actual = parse_txtmeta(topology)
expected = {
'environment': OrderedDict(),
'nodes': [{'attributes': OrderedDict(), 'nodes': ['sw1']}],
'ports': [{'ports': [('sw1', 'port1')], 'attributes': OrderedDict()}],
'links': []
}
assert not DeepDiff(actual, expected)
def test_multiline():
"""
Test the support for multiline attributes
"""
topology = """
# Environment
[
virtual=none
awesomeness=medium
float=1.0
list=(
1,
3.14,
True
)
]
"""
actual = parse_txtmeta(topology)
expected = {
'environment': OrderedDict(
[
('virtual', 'none'), ('awesomeness', 'medium'), ('float', 1.0),
('list', [1, 3.14, True])
]
),
'nodes': [],
'ports': [],
'links': []
}
assert not DeepDiff(actual, expected)
def test_attributes():
"""
Test that attributes should just be added to the nodes on the same line
"""
topology = """
[attr=value] hs1 hs3
hs2
"""
actual = parse_txtmeta(topology)
expected = {
'environment': OrderedDict(),
'nodes': [
{
'attributes': OrderedDict([('attr', 'value')]),
'nodes': ['hs1']
},
{
'attributes': OrderedDict([('attr', 'value')]),
'nodes': ['hs3']
},
{
'attributes': OrderedDict(),
'nodes': ['hs2']
},
],
'ports': [],
'links': []
}
assert not DeepDiff(actual, expected)
def test_single():
"""
Test that a single line string (no new lines '\\n') is parsed
"""
topology = """[attr=value] hs1"""
actual = parse_txtmeta(topology)
expected = {
'environment': OrderedDict(),
'nodes': [
{
'attributes': OrderedDict([('attr', 'value')]),
'nodes': ['hs1']
},
],
'ports': [],
'links': []
}
assert not DeepDiff(actual, expected)
``` |
{
"source": "jpmh1309/topology",
"score": 2
} |
#### File: topology/pytest/plugin.py
```python
from time import time
from logging import getLogger
from os import getcwd, makedirs
from traceback import format_exc
from collections import OrderedDict
from os.path import join, isabs, abspath, realpath, exists, isdir
from pytest import fixture, fail, hookimpl, skip
from topology.args import parse_options
from topology.logging import get_logger, StepLogger
log = getLogger(__name__)
class TopologyPlugin(object):
"""
pytest plugin for Topology.
:param str platform: Platform engine name to run the tests with.
:param str plot_dir: Directory to auto-plot topologies. ``None`` if
feature is disabled.
:param str plot_format: Format to plot the topologies.
:param str nml_dir: Directory to auto-export topologies. ``None`` if
feature is disabled.
:param dict injected_attr: A dictionary holding topology attributes to
inject.
:param str log_dir: Path where to store logs.
:param list szn_dir: List of paths to directories where ``*.szn`` files
are located.
:param dict platform_options: Dictionary holding parameters passed directly
to the topology platform object.
:param int build_retries: Amount of times to retry the build stage.
"""
def __init__(
self, platform, plot_dir, plot_format,
nml_dir, injected_attr, log_dir, szn_dir, platform_options,
build_retries
):
super(TopologyPlugin, self).__init__()
self.platform = platform
self.plot_dir = plot_dir
self.plot_format = plot_format
self.nml_dir = nml_dir
self.injected_attr = injected_attr
self.log_dir = log_dir
self.szn_dir = szn_dir
self.platform_options = platform_options
self.build_retries = build_retries
def pytest_report_header(self, config):
"""
pytest hook to print information of the report header.
"""
header = ["topology: platform='{}'".format(self.platform)]
if self.plot_dir:
header.append(" plot_dir='{}' ({})".format(
self.plot_dir, self.plot_format
))
if self.nml_dir:
header.append(" nml_dir='{}'".format(
self.nml_dir
))
if self.log_dir:
header.append(" log_dir='{}'".format(
self.log_dir
))
return '\n'.join(header)
@fixture(scope='module')
def topology(request):
"""
Fixture that injects a TopologyManager into as a test fixture.
See:
- https://pytest.org/latest/fixture.html
- https://pytest.org/latest/builtin.html#_pytest.python.FixtureRequest
"""
from ..manager import TopologyManager
from ..logging import manager as logmanager
plugin = request.config._topology_plugin
module = request.module
topomgr = TopologyManager(
engine=plugin.platform, options=plugin.platform_options
)
# Setup framework logging
logmanager.logging_context = module.__name__
if plugin.log_dir:
logmanager.logging_directory = plugin.log_dir
# Finalizer unbuild the topology and plot it
def finalizer():
# Do nothing is topology isn't built
if not topomgr.is_built():
return
# Plot topology
if plugin.plot_dir:
plot_file = join(
plugin.plot_dir,
'{}.{}'.format(module.__name__, plugin.plot_format)
)
topomgr.nml.save_graphviz(
plot_file, keep_gv=True
)
# Export topology as NML
if plugin.nml_dir:
nml_file = join(
plugin.nml_dir,
'{}.xml'.format(module.__name__)
)
topomgr.nml.save_nml(
nml_file, pretty=True
)
topomgr.unbuild()
# Autobuild topology if available.
if hasattr(module, 'TOPOLOGY'):
# Get topology description
topo = module.TOPOLOGY
# Get attributes to inject
suite_injected_attr = None
if plugin.injected_attr is not None:
suite_injected_attr = plugin.injected_attr.get(
abspath(module.__file__), None
)
try:
if isinstance(topo, dict):
topomgr.load(topo, inject=suite_injected_attr)
else:
topomgr.parse(topo, inject=suite_injected_attr)
except Exception:
fail(
'Error loading topology in module {}:\n{}'.format(
module.__name__,
format_exc()
),
pytrace=False
)
for iteration in range(plugin.build_retries + 1):
try:
topomgr.build()
log.info(
'Attempt {} on building topology was successful'.format(
iteration
)
)
break
except Exception:
msg = (
'{}\nAttempt {} to build topology failed.'
).format(format_exc(), iteration)
log.warning(msg)
else:
fail(
'Error building topology in module {}:\n{}'.format(
module.__name__,
format_exc()
), pytrace=False
)
request.addfinalizer(finalizer)
return topomgr
@fixture(scope='function')
def step(request):
"""
Fixture to log a step in a test.
"""
return get_logger(
OrderedDict([
('test_suite', request.module.__name__),
('test_case', request.function.__name__)
]),
category='step'
)
def pytest_addoption(parser):
"""
pytest hook to add CLI arguments.
"""
from ..platforms.manager import platforms, DEFAULT_PLATFORM
group = parser.getgroup('topology', 'Testing of network topologies')
group.addoption(
'--topology-platform',
default=DEFAULT_PLATFORM,
help='Select platform to run topology tests',
choices=platforms()
)
group.addoption(
'--topology-plot-dir',
default=None,
help='Directory to auto-plot topologies'
)
group.addoption(
'--topology-plot-format',
default='svg',
help='Format for plotting topologies'
)
group.addoption(
'--topology-nml-dir',
default=None,
help='Directory to export topologies as NML XML'
)
group.addoption(
'--topology-inject',
default=None,
help='Path to an attributes injection file'
)
group.addoption(
'--topology-log-dir',
default=None,
help='Path to a directory where logs are to be stored'
)
group.addoption(
'--topology-szn-dir',
default=None,
action='append',
help='Path to a directory where szn files are located. '
'Can be used multiple times'
)
group.addoption(
'--topology-platform-options',
nargs='+',
default=None,
help='An argument used by the topology platform '
'with the form <key>=<value>'
)
group.addoption(
'--topology-build-retries',
default=0,
type='int',
help='Retry building a topology up to defined times'
)
def pytest_sessionstart(session):
"""
pytest hook to configure plugin.
"""
config = session.config
# Get registered options
platform = config.getoption('--topology-platform')
plot_format = config.getoption('--topology-plot-format')
plot_dir = config.getoption('--topology-plot-dir')
nml_dir = config.getoption('--topology-nml-dir')
injection_file = config.getoption('--topology-inject')
log_dir = config.getoption('--topology-log-dir')
szn_dir = config.getoption('--topology-szn-dir')
platform_options = config.getoption('--topology-platform-options')
build_retries = config.getoption('--topology-build-retries')
if build_retries < 0:
raise Exception('--topology-build-retries can\'t be less than 0')
def create_dir(path):
if path:
if not isabs(path):
path = join(abspath(getcwd()), path)
if not exists(path):
makedirs(path)
# Determine plot, NML and log directory paths and create them if required
create_dir(plot_dir)
create_dir(nml_dir)
create_dir(log_dir)
# Parse attributes injection file
from pyszn.injection import parse_attribute_injection
injected_attr = None
if injection_file is not None:
log.info('Processing attribute injection...')
start_time = time()
# Get a list of all testing directories
search_paths = [
realpath(arg) for arg in config.args if isdir(arg)
]
injected_attr = parse_attribute_injection(
injection_file,
search_paths=search_paths,
ignored_paths=config.getini('norecursedirs'),
szn_dir=szn_dir
)
log.info(
'Attribute injection completed after {}s'
.format(time() - start_time)
)
# Create and register plugin
config._topology_plugin = TopologyPlugin(
platform,
plot_dir,
plot_format.lstrip('.'),
nml_dir,
injected_attr,
log_dir,
szn_dir,
parse_options(platform_options),
build_retries
)
config.pluginmanager.register(config._topology_plugin)
# Add test_id marker
config.addinivalue_line(
'markers',
'test_id(id): assign a test identifier to the test'
)
# Add topology_compatible marker
config.addinivalue_line(
'markers',
'platform_incompatible(platforms, reason=None): '
'mark a test as incompatible with a list of platform engines. '
'Optionally specify a reason for better reporting'
)
def pytest_unconfigure(config):
"""
pytest hook to unconfigure plugin.
"""
plugin = getattr(config, '_topology_plugin', None)
if plugin:
del config._topology_plugin
config.pluginmanager.unregister(plugin)
@hookimpl(tryfirst=True)
def pytest_runtest_setup(item):
"""
pytest hook to setup test before run.
"""
test_id_marker = item.get_closest_marker('test_id')
incompatible_marker = item.get_closest_marker('platform_incompatible')
# If marked and xml logging enabled
if test_id_marker is not None and hasattr(item.config, '_xml'):
test_id = test_id_marker.args[0]
item.config._xml.node_reporter(item.nodeid).add_property(
'test_id', test_id
)
if incompatible_marker:
platform = item.config._topology_plugin.platform
if platform in incompatible_marker.args[0]:
message = (
incompatible_marker.kwargs.get('reason') or (
'Test is incompatible with {} platform'.format(platform)
)
)
skip(message)
__all__ = [
'TopologyPlugin',
'topology',
'pytest_addoption',
'StepLogger'
]
``` |
{
"source": "jpmieville/sir",
"score": 3
} |
#### File: sir/book_examples/program_2_6.py
```python
import scipy.integrate as spi
import numpy as np
import pylab as pl
mu = 1 / (70 * 365.0)
beta = 520 / 365.0
sigma = 1 / 14.0
gamma = 1 / 7.0
ND = 60 * 365.0
TS = 1.0
S0 = 0.1
E0 = 1e-4
I0 = 1e-4
INPUT = (S0, E0, I0)
def diff_eqs(INP, t):
"""The main set of equations"""
Y = np.zeros((3))
V = INP
Y[0] = mu - beta * V[0] * V[2] - mu * V[0]
Y[1] = beta * V[0] * V[2] - sigma * V[1] - mu * V[1]
Y[2] = sigma * V[1] - gamma * V[2] - mu * V[2]
return Y # For odeint
t_start = 0.0
t_end = ND
t_inc = TS
t_range = np.arange(t_start, t_end + t_inc, t_inc)
RES = spi.odeint(diff_eqs, INPUT, t_range)
Rec = 1.0 - (RES[:, 0] + RES[:, 1] + RES[:, 2])
print(RES)
# Ploting
pl.subplot(311)
pl.plot(RES[:, 0], "-g", label="Susceptibles")
pl.title("Program_2_6.py")
pl.xlabel("Time")
pl.ylabel("Susceptibles")
pl.subplot(312)
pl.plot(RES[:, 1], "-m", label="Exposed")
pl.plot(RES[:, 2], "-r", label="Infectious")
pl.legend(loc=0)
pl.xlabel("Time")
pl.ylabel("Infected")
pl.subplot(313)
pl.plot(Rec, "-k", label="Recovereds")
pl.xlabel("Time")
pl.ylabel("Recovereds")
pl.show()
```
#### File: sir/book_examples/program_3_4.py
```python
import scipy.integrate as spi
import numpy as np
import pylab as pl
from matplotlib.font_manager import FontProperties
m = 4
mu = np.array([0.0, 0.0, 0.0, 1.0 / (55 * 365)])
nu = np.array([1.0 / (55 * 365), 0.0, 0.0, 0.0])
n = np.array([6.0, 4.0, 10.0, 55.0]) / 75.0
S0 = np.array([0.05, 0.01, 0.01, 0.008])
E0 = np.array([0.0001, 0.0001, 0.0001, 0.0001])
I0 = np.array([0.0001, 0.0001, 0.0001, 0.0001])
R0 = np.array([0.0298, 0.04313333, 0.12313333, 0.72513333])
ND = MaxTime = 365.0
beta = np.array(
(
[2.089, 2.089, 2.086, 2.037],
[2.089, 9.336, 2.086, 2.037],
[2.086, 2.086, 2.086, 2.037],
[2.037, 2.037, 2.037, 2.037],
)
)
gamma = 1 / 5.0
sigma = 1 / 8.0
TS = 1.0
INPUT = np.hstack((S0, E0, I0, R0))
def diff_eqs(INP, t):
"""The main set of equations"""
Y = np.zeros((16))
V = INP
for i in range(m):
Inf = np.dot(beta[i], V[list(np.array(range(m)) + 2 * m)]) * V[i]
Y[i] = nu[i] * n[3] - Inf - mu[i] * V[i]
Y[(m + i)] = Inf - mu[i] * V[(m + i)] - sigma * V[(m + i)]
Y[(2 * m + i)] = sigma * V[(m + i)] - gamma * V[(2 * m + i)] - mu[i] * V[(2 * m + i)]
Y[(3 * m + i)] = gamma * V[(2 * m + i)] - mu[i] * V[(3 * m + i)]
return Y # For odeint
t_start = 0.0
t_end = ND
t_inc = TS
t_range = np.arange(t_start, t_end + t_inc, t_inc)
RES2 = np.zeros((16))
k = 1
while k <= 100:
RES = spi.odeint(diff_eqs, INPUT, t_range)
INPUT = RES[-1]
INPUT[15] = INPUT[15] + INPUT[14] / 10
INPUT[14] = INPUT[14] + INPUT[13] / 4 - INPUT[14] / 10
INPUT[13] = INPUT[13] + INPUT[12] / 6 - INPUT[13] / 4
INPUT[12] = INPUT[12] - INPUT[12] / 6
INPUT[11] = INPUT[11] + INPUT[10] / 10
INPUT[10] = INPUT[10] + INPUT[9] / 4 - INPUT[10] / 10
INPUT[9] = INPUT[9] + INPUT[8] / 6 - INPUT[9] / 4
INPUT[8] = INPUT[8] - INPUT[8] / 6
INPUT[7] = INPUT[7] + INPUT[6] / 10
INPUT[6] = INPUT[6] + INPUT[5] / 4 - INPUT[6] / 10
INPUT[5] = INPUT[5] + INPUT[4] / 6 - INPUT[5] / 4
INPUT[4] = INPUT[4] - INPUT[4] / 6
INPUT[3] = INPUT[3] + INPUT[2] / 10
INPUT[2] = INPUT[2] + INPUT[1] / 4 - INPUT[2] / 10
INPUT[1] = INPUT[1] + INPUT[0] / 6 - INPUT[1] / 4
INPUT[0] = INPUT[0] - INPUT[0] / 6
RES2 = np.vstack((RES2, RES))
k = k + 1
RES = RES2[
1:,
]
print(RES)
Time = np.arange(100 * (ND + 1)) / (ND + 1)
##Ploting
pl.subplot(311)
pl.plot(Time, RES[:, 0], "c", label="0-6")
pl.plot(Time, RES[:, 1], "b", label="6-10")
pl.plot(Time, RES[:, 2], "g", label="10-20")
pl.plot(Time, RES[:, 3], "r", label="20+")
pl.ylabel("Susceptibles")
pl.xlabel("Time (years)")
pl.legend(loc=1, prop=FontProperties(size="smaller"))
pl.subplot(312)
pl.semilogy(Time, RES[:, 0 + 2 * m], "c", label="0-6")
pl.semilogy(Time, RES[:, 1 + 2 * m], "b", label="6-10")
pl.semilogy(Time, RES[:, 2 + 2 * m], "g", label="10-20")
pl.semilogy(Time, RES[:, 3 + 2 * m], "r", label="20+")
pl.ylabel("Infectious")
pl.xlabel("Time (years)")
pl.legend(loc=1, prop=FontProperties(size="smaller"))
R = np.zeros(4)
pl.subplot(313)
mm = pl.find(Time > (ND - 365.0))
for i in range(4):
R[i] = 1.0 - np.mean(RES[mm, i]) / n[i]
pl.fill(
np.array([0, 0, 6, 6, 6, 6, 10, 10, 10, 10, 20, 20, 20, 20, 75, 75]),
np.array([0, R[0], R[0], 0, 0, R[1], R[1], 0, 0, R[2], R[2], 0, 0, R[3], R[3], 0]),
"r",
)
pl.xlabel("Age-group")
pl.ylabel("Proportion Sero-positive")
pl.xlim((0, 25))
pl.ylim((0, 1))
pl.show()
```
#### File: sir/book_examples/program_3_5.py
```python
import scipy.integrate as spi
import numpy as np
import pylab as pl
n = 13
m = 8
gamma = 1 / 13.0
beta = 17 / 5.0
mu = 1.0 / (55 * 365)
S0 = 0.05
I0 = 0.00001
ND = MaxTime = 30 * 365.0
TS = 1.0
#####################################################################################
### To be compatible with other versions of programs the
### following options are available. To try some of them
### uncomment the code (remove '#'):
#####################################################################################
### As well as the default, you may want to compare the structured model:
# ( n, m, beta, gamma, mu, S0, I0, ND )=(10.0, 0.0, 1.0, 0.1, 0.0, 0.5, 1e-6, 60.);
### with the unstructured version
# ( n, m, beta, gamma, mu, S0, I0, ND )=(1.0, 0.0, 1.0, 0.1, 0.0, 0.5, 1e-6, 60.);
### Or compare the SEIR:
# ( n, m, beta, gamma, mu, S0, I0, ND )=(10.0, 5.0, 1.0, 0.1, 0.0, 0.5, 1e-4, 150.);
### with the unstructured version
# ( n, m, beta, gamma, mu, S0, I0, ND )=(2.0, 1.0, 1.0, 0.1, 0.0, 0.5, 1e-4, 150.);
#####################################################################################
I0 = I0 * np.ones((n)) / n
INPUT = np.hstack((S0, I0))
def diff_eqs(INP, t):
"""The main set of equations"""
Y = np.zeros((n + 1))
V = INP
Y[0] = mu - beta * sum(V[range(m + 1, n + 1)]) * V[0] - mu * V[0]
Y[1] = beta * sum(V[range(m + 1, n + 1)]) * V[0] - gamma * n * V[1] - mu * V[1]
for i in range(2, n + 1):
Y[i] = gamma * n * V[i - 1] - gamma * n * V[i] - mu * V[i]
return Y # For odeint
t_start = 0.0
t_end = ND
t_inc = TS
t_range = np.arange(t_start, t_end + t_inc, t_inc)
RES = spi.odeint(diff_eqs, INPUT, t_range)
print(RES)
REST = np.zeros(len(RES), "float")
for i in range(1, n + 1):
REST += RES[:, i]
##Ploting
pl.subplot(311)
pl.plot(RES[:, 0], "g-", label="Susc")
pl.ylabel("Susceptibles")
pl.subplot(312)
if m > 0:
for i in range(1, (n + 1 - m)):
for j in range(1, m + 1):
pl.semilogy(RES[:, j], "c", label="Exposed")
pl.semilogy(RES[:, (i + m)], "r", label="Infectious")
else:
for i in range(1, n + 1):
pl.semilogy(RES[:, i], "r", label="Infectious")
pl.ylabel("Infection,I")
pl.subplot(313)
if n > 1:
pl.plot(REST, "r-", label="Infec")
else:
pl.plot(RES[:, 1], "r-", label="Infec")
pl.ylabel("Total Infection")
pl.xlabel("Time")
pl.show()
```
#### File: sir/book_examples/program_5_2.py
```python
import scipy.integrate as spi
import numpy as np
import pylab as pl
Beta0 = 17 / 13.0
Beta1 = [0.25]
gamma = 1 / 13.0
mu = 1 / (50 * 365.0)
S0 = 1 / 17.0
I0 = 1e-4
Years = 10
MaxTime = 365.0
TS = 1.0
def term(t):
t = np.mod(t, 365)
if t < 6 or t > 100 and t < 115 or t > 200 and t < 251 or t > 300 and t < 307 or t > 356 and t <= 365:
Term = -1
else:
Term = 1
return Term
### This code can also be used to generate bifurcation diagrams, by setting
### beta1 equal to a vector of seasonality rates. The bifurcation diagram is
### constructed using extrapolated initial conditions. Try:
# (Beta0,Beta1,gamma,mu,S0,I0,Years, MaxTime)=(17/13.,np.arange(0.00,0.301,0.001),1/13.,1./(50*365),1/17.,1e-4,20,365.)
ND = Years * MaxTime
INPUT = np.array((S0, I0, 1 - S0 - I0))
def diff_eqs(INP, t):
"""The main set of equations"""
Y = np.zeros((3))
V = INP
beta = beta0 - beta1
Y[0] = mu - beta * V[0] * V[1] - mu * V[0]
Y[1] = beta * V[0] * V[1] - mu * V[1] - gamma * V[1]
Y[2] = gamma * V[1] - mu * V[2]
return Y # For odeint
def diff_eqs2(INP, t):
"""The main set of equations"""
Y = np.zeros((3))
V = INP
beta = beta0 + beta1
Y[0] = mu - beta * V[0] * V[1] - mu * V[0]
Y[1] = beta * V[0] * V[1] - mu * V[1] - gamma * V[1]
Y[2] = gamma * V[1] - mu * V[2]
return Y # For odeint
def FORCED_ODE(diff_eqs, INPUT, beta0, beta1):
"""Calculates the differential rates used in the integration."""
RES = np.zeros((3))
for Year in range(Years):
t_start = Year * 365.0 + 1
t_end = Year * 365.0 + 6.0
t_inc = TS
t_range = np.arange(t_start, t_end + t_inc, t_inc)
RES2 = spi.odeint(diff_eqs, INPUT, t_range)
RES = np.vstack((RES, RES2))
INPUT = RES2[-1]
t_start = Year * 365.0 + 7
t_end = Year * 365.0 + 100
t_inc = TS
t_range = np.arange(t_start, t_end + t_inc, t_inc)
RES2 = spi.odeint(diff_eqs2, INPUT, t_range)
RES = np.vstack((RES, RES2))
INPUT = RES2[-1]
t_start = Year * 365.0 + 101
t_end = Year * 365.0 + 115
t_inc = TS
t_range = np.arange(t_start, t_end + t_inc, t_inc)
RES2 = spi.odeint(diff_eqs, INPUT, t_range)
RES = np.vstack((RES, RES2))
INPUT = RES2[-1]
t_start = Year * 365.0 + 116
t_end = Year * 365.0 + 200
t_inc = TS
t_range = np.arange(t_start, t_end + t_inc, t_inc)
RES2 = spi.odeint(diff_eqs2, INPUT, t_range)
RES = np.vstack((RES, RES2))
INPUT = RES2[-1]
t_start = Year * 365.0 + 201
t_end = Year * 365.0 + 251
t_inc = TS
t_range = np.arange(t_start, t_end + t_inc, t_inc)
RES2 = spi.odeint(diff_eqs, INPUT, t_range)
RES = np.vstack((RES, RES2))
INPUT = RES2[-1]
t_start = Year * 365.0 + 252
t_end = Year * 365.0 + 300
t_inc = TS
t_range = np.arange(t_start, t_end + t_inc, t_inc)
RES2 = spi.odeint(diff_eqs2, INPUT, t_range)
RES = np.vstack((RES, RES2))
INPUT = RES2[-1]
t_start = Year * 365.0 + 301
t_end = Year * 365.0 + 307
t_inc = TS
t_range = np.arange(t_start, t_end + t_inc, t_inc)
RES2 = spi.odeint(diff_eqs, INPUT, t_range)
RES = np.vstack((RES, RES2))
INPUT = RES2[-1]
t_start = Year * 365.0 + 308
t_end = Year * 365.0 + 356
t_inc = TS
t_range = np.arange(t_start, t_end + t_inc, t_inc)
RES2 = spi.odeint(diff_eqs2, INPUT, t_range)
RES = np.vstack((RES, RES2))
INPUT = RES2[-1]
t_start = Year * 365.0 + 357
t_end = Year * 365.0 + 365
t_inc = TS
t_range = np.arange(t_start, t_end + t_inc, t_inc)
RES2 = spi.odeint(diff_eqs, INPUT, t_range)
RES = np.vstack((RES, RES2))
INPUT = RES2[-1]
INPUT = RES[-1]
return RES
if len(Beta1) == 1:
beta1 = Beta1[0]
### Calculate Average Effect of Forcing and Correct for it.
Ave = 0
for t in np.arange(1, 366):
Ave += 1 + beta1 * term(t + 0.5)
beta0 = Beta0 / (Ave / 365)
print(beta0, beta1)
t_start = 0.0
t_end = ND
t_inc = TS
t_range = np.arange(t_start, t_end + t_inc, t_inc)
RES = FORCED_ODE(diff_eqs, INPUT, beta0, beta1)
print(RES)
t = np.arange(ND) / 365.0
# Ploting
pl.subplot(311)
pl.plot(t, RES[1:, 0], "g", label="S")
pl.xlabel("Time (years)")
pl.ylabel("Susceptibles")
pl.subplot(312)
pl.plot(t, RES[1:, 1], "r", label="I")
pl.xlabel("Time (years)")
pl.ylabel("Infectious")
pl.subplot(313)
pl.plot(t, 1 - (RES[1:, 0] + RES[1:, 1]), "k", label="R")
pl.xlabel("Time (years)")
pl.ylabel("Recovereds")
else:
if ND < 3650:
ND = 3650
Bifur_I = np.zeros((len(Beta1), 10))
for i in range(len(Beta1)):
beta1 = Beta1[i]
### Calculate Average Effect of Forcing and Correct for it.
Ave = 0
for t in np.arange(1, 366):
Ave += 1 + beta1 * term(t + 0.5)
beta0 = Beta0 / (Ave / 365)
t_start = 0.0
t_end = ND
t_inc = TS
t_range = np.arange(t_start, t_end + t_inc, t_inc)
RES = FORCED_ODE(diff_eqs, INPUT, beta0, beta1)
INPUT = RES[-1]
for j in range(10):
Bifur_I[i, j] = RES[np.arange(ND)[((ND - j * 365.0) - 1)], 1]
pl.plot(Beta1, np.log10(Bifur_I), ".k")
### if TeX commands do not work comment the next line
pl.xlabel(r"Seasonality, $\beta_1$")
pl.ylabel(r"Level of Infection $(log_{10})$")
### if TeX commands do not work uncomment the next line
# pl.xlabel ('Seasonality, beta1')
# pl.ylabel ('Level of Infection (log_10)')
pl.show()
```
#### File: sir/book_examples/program_5_3.py
```python
import scipy.integrate as spi
import numpy as np
import pylab as pl
beta = 17 / 13.0
gamma = 1 / 13.0
alpha0 = 1 / (50 * 365.0)
alpha1 = [0.25]
S0 = 1 / 17.0
I0 = 1e-4
ND = MaxTime = 60 * 365
TS = 1.0
### This code can also be used to generate bifurcation diagrams, by setting
### beta1 equal to a vector of seasonality rates. The bifurcation diagram is
### constructed using extrapolated initial conditions. Try:
# (beta,gamma,alpha0, alpha1,S0,I0,ND)=(17/13.,1/13., 1./(50*365), np.arange(0.00,1.0,0.01),1/17., 1e-4, 20*365)
INPUT = np.array((S0, I0, 1 - S0 - I0))
def diff_eqs(INP, t):
"""The main set of equations"""
Y = np.zeros((3))
V = INP
t = np.mod(t, 365.0)
alpha = alpha0 * (1 + alpha1 * np.sin(2 * np.pi * t / 365))
mu = alpha0
Y[0] = alpha - beta * V[0] * V[1] - mu * V[0]
Y[1] = beta * V[0] * V[1] - mu * V[1] - gamma * V[1]
Y[2] = gamma * V[1] - mu * V[2]
return Y # For odeint
if len(alpha1) == 1:
alpha1 = alpha1[0]
t_start = 0.0
t_end = ND
t_inc = TS
t_range = np.arange(t_start, t_end + t_inc, t_inc)
RES = spi.odeint(diff_eqs, INPUT, t_range)
print(RES)
t = np.arange(ND) / 365.0
# Ploting
pl.subplot(311)
pl.plot(t, RES[1:, 0], "g", label="S")
pl.xlabel("Time (years)")
pl.ylabel("Susceptibles")
pl.subplot(312)
pl.plot(t, RES[1:, 1], "r", label="I")
pl.xlabel("Time (years)")
pl.ylabel("Infectious")
pl.subplot(313)
pl.plot(t, 1 - (RES[1:, 0] + RES[1:, 1]), "k", label="R")
pl.xlabel("Time (years)")
pl.ylabel("Recovereds")
else:
if ND < 3650:
ND = 3650
alpha2 = alpha1
Bifur_I = np.zeros((len(alpha2), 10))
for i in range(len(alpha2)):
alpha1 = alpha2[i]
t_start = 0.0
t_end = ND
t_inc = TS
t_range = np.arange(t_start, t_end + t_inc, t_inc)
RES = spi.odeint(diff_eqs, INPUT, t_range)
INPUT = RES[-1]
for j in range(10):
Bifur_I[i, j] = RES[np.arange(ND)[((ND - j * 365.0) - 1)], 1]
### Plotting
pl.plot(alpha2, np.log10(Bifur_I), ".k")
### if TeX commands do not work comment comment the next line
pl.xlabel(r"Seasonality, $\alpha_1$")
pl.ylabel(r"Level of Infection $(log_{10})$")
### if TeX commands do not work comment uncomment the next line
# pl.xlabel ('Seasonality, beta1')
# pl.ylabel ('Level of Infection (log_10)')
pl.show()
```
#### File: sir/book_examples/program_6_1.py
```python
import scipy.integrate as spi
import numpy as np
import pylab as pl
beta = 1.0
noise = 10
gamma = 1 / 10.0
mu = 1 / (50 * 365.0)
X0 = 1e5
Y0 = 500
N0 = 1e6
Step = 1.0
ND = MaxTime = 5 * 365.0
TS = 1.0
INPUT0 = np.hstack((X0, Y0))
def diff_eqs(INP, t):
"""The main set of equations"""
Y = np.zeros((2))
V = INP
Y[0] = mu * N0 - beta * V[0] * V[1] / N0 - Noise - mu * V[1]
Y[1] = beta * V[0] * V[1] / N0 + Noise - mu * V[1] - gamma * V[1]
return Y # For odeint
T = np.zeros((np.ceil(ND / Step), 1))
RES = np.zeros((np.ceil(ND / Step), 2))
INPUT = INPUT0
t = 0
loop = 0
while t < ND and INPUT[0] > 0 and INPUT[1] > 0:
t_start = 0.0
t_end = t_start + Step
t_inc = TS
t_range = np.arange(t_start, t_end + t_inc, t_inc)
sqrtStep = np.sqrt(Step)
Noise = noise * np.random.normal(size=1) / sqrtStep
PRES = spi.odeint(diff_eqs, INPUT, t_range)
T[loop] = t = t + Step
INPUT = PRES[-1]
RES[loop] = PRES[-1]
loop += 1
print(RES)
### plotting
pl.subplot(211)
pl.plot(T / 365.0, RES[:, 0], ".-g")
pl.xlabel("Time (Years)")
pl.ylabel("Susceptibles")
pl.subplot(212)
pl.plot(T / 365.0, RES[:, 1], ".-r")
pl.ylabel("Infected")
pl.xlabel("Time (Years)")
pl.show()
```
#### File: sir/book_examples/program_7_2.py
```python
import scipy.integrate as spi
import numpy as np
import pylab as pl
from matplotlib.font_manager import FontProperties as fmp
n = 5
beta = 1.0 * np.ones(n)
gamma = 0.3 * np.ones(n)
N0 = np.zeros(n * n)
X0 = np.zeros(n * n)
for i in np.arange(0, n * n, n + 1):
N0[i] = 1000.0
X0[i] = 800.0
Y0 = np.zeros(n * n)
Y0[0] = 1.0
ND = MaxTime = 60.0
TS = 1.0
l = np.zeros((n, n))
r = np.zeros((n, n))
for i in range(n):
for j in range(n):
if abs(i - j) == 1:
l[i][j] = 0.1
r = 2 * np.ones((n, n))
r = r - np.diag(np.diag(r))
INPUT0 = np.hstack((X0, Y0, N0))
INPUT = np.zeros((3 * n * n))
for i in range(n * n):
INPUT[3 * i] = INPUT0[i]
INPUT[1 + 3 * i] = INPUT0[n * n + i]
INPUT[2 + 3 * i] = INPUT0[2 * n * n + i]
def diff_eqs(INP, t):
"""The main set of equations"""
Y = np.zeros((3 * n * n))
V = INP
sumY = np.zeros(n)
sumN = np.zeros(n)
## Calculate number currently in Subpopulation i
for i in range(n):
sumY[i] = 0.0
sumN[i] = 0.0
for j in range(n):
k = 3 * (j + i * n)
sumN[i] += V[2 + k]
sumY[i] += V[1 + k]
## Set all rates to zeros
for i in range(n):
for j in range(n):
k = 3 * (j + i * n)
Y[k] = 0
Y[1 + k] = 0
Y[2 + k] = 0
for i in range(n):
for j in range(n):
## Calculate the rates
k = 3 * (j + i * n)
K = 3 * (i + j * n)
h = 3 * (i + i * n)
H = 3 * (j + j * n)
Y[k] -= beta[i] * V[k] * (sumY[i] / sumN[i])
Y[k + 1] += beta[i] * V[k] * (sumY[i] / sumN[i])
Y[k + 1] -= gamma[i] * V[k + 1]
## Movement
Y[h] += r[j][i] * V[K]
Y[h] -= l[j][i] * V[h]
Y[h + 1] += r[j][i] * V[K + 1]
Y[h + 1] -= l[j][i] * V[h + 1]
Y[h + 2] += r[j][i] * V[K + 2]
Y[h + 2] -= l[j][i] * V[h + 2]
Y[k] += l[i][j] * V[H]
Y[k] -= r[i][j] * V[k]
Y[1 + k] += l[i][j] * V[1 + H]
Y[1 + k] -= r[i][j] * V[1 + k]
Y[2 + k] += l[i][j] * V[2 + H]
Y[2 + k] -= r[i][j] * V[2 + k]
return Y # For odeint
t_start = 0.0
t_end = ND
t_inc = TS
t_range = np.arange(t_start, t_end + t_inc, t_inc)
t_course = spi.odeint(diff_eqs, INPUT, t_range)
tc = t_course
### Plotting
totalS = np.zeros((len(tc), 5))
totalI = np.zeros((len(tc), 5))
for i in range(n):
for j in range(n):
k = 3 * (j + i * n)
totalS[:, i] += tc[:, k]
totalI[:, i] += tc[:, k + 1]
# print len(totalS)
pl.subplot(211)
for i in range(5):
pl.plot(t_range, totalS[:, i], label=("data %s" % (i + 1)), color=(0.3, i / 10.0 + 0.5, 0.1))
pl.xlabel("Time")
pl.ylabel("Susceptibles")
pl.legend(loc=1, prop=fmp(size="smaller"))
pl.subplot(212)
for i in range(5):
pl.plot(t_range, totalI[:, i], label=("data %s" % (i + 1)), color=(0.8, i / 10.0 + 0.0, 0.3))
pl.xlabel("Time")
pl.ylabel("Infectious")
pl.legend(loc=1, prop=fmp(size="smaller"))
pl.show()
```
#### File: sir/book_examples/program_7_8.py
```python
import scipy.integrate as spi
import numpy as np
import pylab as pl
n = 4
tau = 0.1
gamma = 0.05
Y0 = 1.0
N0 = 10000
ND = MaxTime = 100.0
TS = 1.0
X0 = N0 - Y0
XY0 = n * Y0 * X0 / N0
INPUT = np.hstack((X0, XY0))
def diff_eqs(INP, t):
"""The main set of equations"""
Y = np.zeros((2))
V = INP
Y[0] = gamma * (N0 - V[0]) - tau * V[1]
Y[1] = (
tau * (n - 1) * (n * V[0] - V[1]) * V[1] / (n * V[0])
+ gamma * (n * N0 - n * V[0] - V[1])
- tau * V[1]
- tau * (n - 1) * V[1] * V[1] / (n * V[0])
- gamma * V[1]
)
return Y # For odeint
t_start = 0.0
t_end = ND
t_inc = TS
t_range = np.arange(t_start, t_end + t_inc, t_inc)
RES = spi.odeint(diff_eqs, INPUT, t_range)
X = RES[:, 0]
XY = RES[:, 1]
Y = N0 - X
# Ploting
pl.subplot(311)
pl.plot(Y, "-g")
pl.ylabel("Infectious")
pl.subplot(312)
pl.plot(XY, "-r")
pl.ylabel("[XY] pairs")
pl.subplot(313)
pl.plot(XY / ((n * X) * (Y / N0)), "-r")
pl.ylabel("relative XY correlation")
pl.xlabel("Time (Years)")
print XY / (n * X * Y / N0)
pl.show()
``` |
{
"source": "jpmjim/CProfesionalPython",
"score": 4
} |
#### File: CProfesionalPython/cprofesional_python/4reto_closures.py
```python
from typing import Generic, TypeVar
T = TypeVar('T')
def make_divisor_by(n: int) -> Generic[T]:
def numerator(x: int) -> float:
assert n !=0, 'You cannot divide by zero'
return x/n
return numerator
def main():
divided_by_2 = make_divisor_by(2)
print(divided_by_2(10))
divided_by_2 = make_divisor_by(3)
print(divided_by_2(18))
divided_by_2 = make_divisor_by(5)
print(divided_by_2(100))
divided_by_2 = make_divisor_by(18)
print(divided_by_2(54))
if __name__ == '__main__':
main()
```
#### File: CProfesionalPython/cprofesional_python/8reto_iterators.py
```python
from time import sleep
class FiboIter():
def __init__(self, max_number:int):
self.max_number = max_number
def __iter__(self):
self.n1 = 0
self.n2 = 1
self.counter = 0
return self
def __next__(self):
if self.counter == 0:
self.counter += 1
return self.n1
elif self.counter == 1:
self.counter += 1
return self.n2
else:
self.aux = self.n1 + self.n2
if self.aux >= self.max_number:
raise StopIteration
self.n1, self.n2 = self.n2, self.aux
self.counter += 1
return self.aux
if __name__ == "__main__":
for element in FiboIter(100):
print(element)
sleep(0.1)
``` |
{
"source": "jpmjpmjpm/pyhotsax",
"score": 2
} |
#### File: pyhotsax/tests/test_hotsax_brute.py
```python
from hotsax import HotSax, InputError
from pytest import raises, approx
import pytest
import numpy as np
DATALENGTH = 50
W_SIZE = 5
@pytest.fixture()
def setup_brute():
window_size = W_SIZE
data = [i for i in range(1, DATALENGTH + 1)]
hotsax = HotSax(window_size=window_size, mode='brute')
return hotsax, data
def test_data_load_shape(setup_brute):
hotsax, data = setup_brute
hotsax._load_data(data=np.array(data))
assert hotsax.data.shape == (1, DATALENGTH)
def test_data_load_as_numpy(setup_brute):
hotsax, data = setup_brute
with raises(InputError):
hotsax._load_data(data=data)
def test_data_normalization(setup_brute):
hotsax, data = setup_brute
hotsax._load_data(data=np.array(data))
hotsax._normalize_data()
m = np.mean(data)
sd = np.std(data)
data = np.array(data).reshape(1, -1)
norm_data = (data - m) / sd
assert hotsax._norm_data == approx(norm_data)
def test_segments(setup_brute):
hotsax, data = setup_brute
hotsax.fit(data=np.array(data))
assert len(hotsax._segments) == DATALENGTH - W_SIZE + 1
for s in hotsax._segments:
assert s.shape == (1, W_SIZE)
def test_euclidean_distance_inputs(setup_brute):
hotsax, _ = setup_brute
inputs = [('abc', 123), (123, 'abc'), (np.array([1, 2, 3]), np.array([1, 2, 3, 4]))]
for inpt in inputs:
with raises(InputError):
hotsax._euclidean_distance(inpt[0], inpt[1])
@pytest.mark.parametrize("test_input1, test_input2 ,expected",
[(np.array([0, 0, 0]), np.array([0, 0, 0]), 0),
(np.array([1, 1, 1, 1]), np.array([1, 1, 1, 1]), 0),
(np.array([1, 2, 3]), np.array([7, 6, 5]),
np.sqrt((W_SIZE / DATALENGTH) * np.sum((np.array([1, 2, 3]) - np.array([7, 6, 5])) ** 2)))])
def test_euclidean_distance(setup_brute, test_input1, test_input2, expected):
hotsax, data = setup_brute
hotsax.fit(data=np.array(data))
assert hotsax._euclidean_distance(test_input1, test_input2) == approx(expected)
def test_anomaly_detection_brute_force():
window_size = W_SIZE
data = np.array([1] * DATALENGTH)
data[37:45] = 4
hotsax = HotSax(window_size=window_size, mode="brute", multiple_discords=True, nb_discords=5)
hotsax.fit_transform(data=data)
results = list(hotsax.all_discords.keys())[:5]
assertion = [i in results for i in range(35, 38)]
assert any(assertion)
``` |
{
"source": "jpmjunior/apostila_python_caelum",
"score": 3
} |
#### File: apostila_python_caelum/capitulo_5/funcoes.py
```python
def velocidade_media(distancia, tempo):
return divisao(distancia,tempo)
def soma(num1, num2):
return num1+num2
def subtracao(num1, num2):
return num1-num2
def divisao(num1,num2):
return num1/num2
def calculadora(num1, num2):
return soma(num1,num2), subtracao(num1,num2), num1*num2, num1/num2
print(velocidade_media(100,20))
print(velocidade_media(-20,10))
print(velocidade_media(150,0))
``` |
{
"source": "jpmlt/freecad-cadquery2-workbench",
"score": 2
} |
#### File: freecad/cadquery2workbench/cadquery_model.py
```python
import ast
import traceback
import time
from cadquery.cqgi import CQSCRIPT, CQModel, ScriptCallback, BuildResult
from cadquery.cqgi import EnvironmentBuilder, ShapeResult, InputParameter
from cadquery.cqgi import ConstantAssignmentFinder
from cadquery.cqgi import BooleanParameterType, StringParameterType
from cadquery.cqgi import NumberParameterType, NumberParameterType
class CQ_Model(CQModel):
'''extend Cadquery cqgi.CQModel class.'''
def __init__(self, script_source):
'''
Create an object by parsing the supplied python script.
:param script_source: a python script to parse
'''
CQModel.__init__(self, script_source)
def _find_vars(self):
'''
Parse the script, and populate variables that appear to be overridable.
Override original one to use updated ConstantAssignmentFinder class.
'''
# assumption here: we assume that variable declarations
# are only at the top level of the script. IE, we'll ignore any
# variable definitions at lower levels of the script
# we don't want to use the visit interface because here we explicitly
# want to walk only the top level of the tree.
assignment_finder = Constant_Assignment_Finder(self.metadata) # Updated line
for node in self.ast_tree.body:
if isinstance(node, ast.Assign):
assignment_finder.visit_Assign(node)
def build(self, build_parameters=None, build_options=None):
'''
Executes the script, using the optional parameters to override those in the model.
Override original one to use updated ScriptCallback class.
:param build_parameters: a dictionary of variables. The variables must be
assignable to the underlying variable type. These variables override default values in the script
:param build_options: build options for how to build the model. Build options include things like
timeouts, tessellation tolerances, etc
:raises: Nothing. If there is an exception, it will be on the exception property of the result.
This is the interface so that we can return other information on the result, such as the build time
:return: a BuildResult object, which includes the status of the result, and either
a resulting shape or an exception
'''
if not build_parameters:
build_parameters = {}
start = time.perf_counter()
result = BuildResult()
try:
self.set_param_values(build_parameters)
collector = Script_Callback() # Updated line
env = (
EnvironmentBuilder()
.with_real_builtins()
.with_cadquery_objects()
.add_entry("__name__", "__cqgi__")
.add_entry("show_object", collector.show_object)
.add_entry("debug", collector.debug)
.add_entry("describe_parameter", collector.describe_parameter)
.build()
)
c = compile(self.ast_tree, CQSCRIPT, "exec")
exec(c, env)
result.set_debug(collector.debugObjects)
result.set_success_result(collector.outputObjects)
result.env = env
except Exception as ex:
result.set_failure_result(ex)
end = time.perf_counter()
result.buildTime = end - start
return result
class Script_Callback(ScriptCallback):
'''
extend Cadquery cqgi.ScriptCallback class.
Allows a script to communicate with the container
the show_object() method is exposed to CQ scripts, to allow them
to return objects to the execution environment
'''
def __init__(self):
ScriptCallback.__init__(self)
def show_object(self, shape, options=None, **kwargs):
'''
Override original one. Return an object to the executing environment, with options.
:param shape: a cadquery object
:param options: a dictionary of options that will be made available to the executing environment
:param **kwargs: allow to pass option as a list of pair key=value
'''
if options == None:
options = {}
options.update(kwargs)
o = ShapeResult()
o.options = options
o.shape = shape
self.outputObjects.append(o)
def debug(self, shape, options=None, **kwargs):
'''
Override original one. Debug print/output an object, with optional arguments.
:param shape: a cadquery object
:param options: a dictionary of options that will be made available to the executing environment
:param **kwargs: allow to pass option as a list of pair key=value
'''
if options == None:
options = {}
options.update(kwargs)
s = ShapeResult()
s.options = options
s.shape = shape
self.debugObjects.append(s)
def describe_parameter(self, varname, desc):
'''
Override original one. Do Nothing-- we parsed the ast ahead of execution to get what we need.
update to 2 arguments instead of 1 in cqgi.py
'''
pass
class Constant_Assignment_Finder(ast.NodeTransformer):
"""
override Cadquery cqgi.ConstantAssignmentFinder class.
Visits a parse tree, and adds script parameters to the cqModel
Update to parse the variables setting using cqvar(value, description) method
"""
def __init__(self, cq_model):
self.cqModel = cq_model
def handle_assignment(self, var_name, value_node, value_desc=None):
try:
if type(value_node) == ast.Num:
self.cqModel.add_script_parameter(
InputParameter.create(
value_node, var_name, NumberParameterType, value_node.n
)
)
elif type(value_node) == ast.Str:
self.cqModel.add_script_parameter(
InputParameter.create(
value_node, var_name, StringParameterType, value_node.s
)
)
elif type(value_node) == ast.Name:
if value_node.id == "True":
self.cqModel.add_script_parameter(
InputParameter.create(
value_node, var_name, BooleanParameterType, True
)
)
elif value_node.id == "False":
self.cqModel.add_script_parameter(
InputParameter.create(
value_node, var_name, BooleanParameterType, False
)
)
elif hasattr(ast, "NameConstant") and type(value_node) == ast.NameConstant:
if value_node.value == True:
self.cqModel.add_script_parameter(
InputParameter.create(
value_node, var_name, BooleanParameterType, True
)
)
else:
self.cqModel.add_script_parameter(
InputParameter.create(
value_node, var_name, BooleanParameterType, False
)
)
elif hasattr(ast, "Constant") and type(value_node) == ast.Constant:
type_dict = {
bool: BooleanParameterType,
str: StringParameterType,
float: NumberParameterType,
int: NumberParameterType,
}
self.cqModel.add_script_parameter(
InputParameter.create(
value_node,
var_name,
type_dict[type(value_node.value)],
value_node.value,
)
)
if value_desc:
self.cqModel.add_parameter_description(var_name, value_desc)
except:
print("Unable to handle assignment for variable '%s'" % var_name)
pass
def visit_Assign(self, node):
try:
left_side = node.targets[0]
# do not handle attribute assignments
if isinstance(left_side, ast.Attribute):
return
# Handle the NamedConstant type that is only present in Python 3
astTypes = [ast.Num, ast.Str, ast.Name]
if hasattr(ast, "NameConstant"):
astTypes.append(ast.NameConstant)
if hasattr(ast, "Constant"):
astTypes.append(ast.Constant)
if type(node.value) in astTypes:
self.handle_assignment(left_side.id, node.value)
elif type(node.value) == ast.Tuple:
# we have a multi-value assignment
for n, v in zip(left_side.elts, node.value.elts):
self.handle_assignment(n.id, v)
elif type(node.value) == ast.Call:
try:
if node.value.func.id == "cqvar":
self.handle_assignment(left_side.id, node.value.args[0],
node.value.args[1].s)
except:
pass
except:
traceback.print_exc()
print("Unable to handle assignment for node '%s'" % ast.dump(left_side))
return node
```
#### File: freecad/cadquery2workbench/script_commands.py
```python
import os
import pathlib
import math as m
from random import random
import FreeCAD as App
import FreeCADGui as Gui
import Part
from PySide2.QtCore import QFileInfo, QTimer, QObject
from PySide2.QtWidgets import QMessageBox, QFileDialog, QLineEdit
from freecad.cadquery2workbench import MODULENAME
from freecad.cadquery2workbench import shared
from freecad.cadquery2workbench import cadquery_model
import cadquery as cq
class Script_Commands(QObject):
def __init__(self, parent):
QObject.__init__(self, parent)
self.parent = parent
self.file_contents = None
self.previous_path = os.environ['HOME']
# QTimer to check if file was modified on the disk
self.fiName = None
self.timeStamp = None
self.activity_timer = QTimer()
self.activity_timer.timeout.connect(self.changed_on_disk)
# open a file
def open_file(self, filename=None):
# Before open, check if file exist
if os.path.isfile(filename):
# OK so we can open the File
with open(filename) as f: self.file_contents = f.read()
self.parent.editor.setPlainText(self.file_contents)
# Watch the file we've opened
fi = QFileInfo(filename)
self.fiName = fi.baseName()
self.timeStamp = fi.lastModified().toTime_t()
self.activity_timer.setSingleShot(True)
self.activity_timer.start(3000)
# setup parent
self.parent.view3DApp = None
self.parent.view3DMdi = None
self.parent.filename = filename
self.parent.editor.document().setModified(False)
self.parent.ismodifed()
self.parent.setWindowTitle(self.parent.objectName() + " - " + os.path.basename(filename))
else:
App.Console.PrintError("Cannot find file : {0}\r\n".format(filename))
# reload file when changes is made from external editor
def reload_file(self):
# re-open our File
App.Console.PrintWarning("File {0} has been changed on the disk - Reload it\r\n".format(self.parent.filename))
with open(self.parent.filename) as f: self.file_contents = f.read()
self.parent.editor.setPlainText(self.file_contents)
self.parent.editor.document().setModified(False)
self.parent.ismodifed()
self.activity_timer.setSingleShot(True)
self.activity_timer.start(3000)
# Connect to QTimer to catch when file change on the disk
def changed_on_disk(self):
fi = QFileInfo(self.parent.filename)
fiName = fi.baseName()
timeStamp = fi.lastModified().toTime_t()
if (timeStamp != self.timeStamp): # or fiName != self.fiName
# reset our Timer
self.activity_timer.stop()
self.timeStamp = timeStamp
self.fiName = fiName
# if allowReload is set by the user don´t prompt reload it in any case
allowReload = App.ParamGet("User parameter:BaseApp/Preferences/Mod/" + MODULENAME).GetBool("allowReload")
if not allowReload:
ret = QMessageBox.question(self.parent, self.parent.mw.tr("Modified file"),
self.parent.mw.tr("{0}.\n\nThis file has been modified outside of the source editor. Do you want to reload it?".format(self.parent.filename)),
QMessageBox.Yes | QMessageBox.No, QMessageBox.Yes)
if ret == QMessageBox.Yes:
self.reload_file() # (self.parent.filename)
# Execute the script if the user has asked for it
if App.ParamGet("User parameter:BaseApp/Preferences/Mod/" + MODULENAME) \
.GetBool("executeOnSave"):
self.execute(action='Rebuild')
return
else:
self.reload_file() # (self.parent.filename)
# Execute the script if the user has asked for it
if App.ParamGet("User parameter:BaseApp/Preferences/Mod/" + MODULENAME) \
.GetBool("executeOnSave"):
self.execute(action='Rebuild')
return
self.activity_timer.setSingleShot(True)
self.activity_timer.start(3000)
# Ask user if need save the changes
def aboutSave(self, title):
ret = QMessageBox.warning(self.parent, title,
"{0}\r\n\n".format(self.parent.filename) +
"This document has been modified.\r\n" +
"Do you want to save your changes?",
QMessageBox.Save | QMessageBox.Discard | QMessageBox.Cancel,
QMessageBox.Save)
if ret == QMessageBox.Cancel:
return False
if ret == QMessageBox.Save:
if not self.save():
return False
return True
# Save current file or given filename file
def save(self, filename=None):
# Saving current file ?
if not filename:
filename = self.parent.filename
# If the code pane doesn't have a filename, we need to present the save as dialog
if len(filename) == 0 \
or os.path.basename(filename) == 'script_template.py' \
or os.path.split(filename)[0].endswith('FreeCAD'):
App.Console.PrintWarning("You cannot save over a blank file, example file or template file.\r\n")
return self.saveAs()
self.activity_timer.stop()
with open(filename, "w") as code_file:
code_file.write(self.parent.editor.toPlainText())
# Reset the QTimer
fi = QFileInfo(filename)
self.fiName = fi.baseName()
self.timeStamp = fi.lastModified().toTime_t()
self.activity_timer.setSingleShot(True)
self.activity_timer.start(3000)
# Update the status
self.parent.editor.document().setModified(False)
self.parent.ismodifed()
return filename
# Save As method
def saveAs(self):
fileDlg = QFileDialog.getSaveFileName(self.parent.mw,
self.parent.mw.tr("Save CadQuery Script As"),
self.previous_path,
self.parent.mw.tr("CadQuery Files (*.py)"))
filename = fileDlg[0]
# Make sure the user didn't click cancel
if filename:
if filename[-3:] != '.py':
filename += '.py'
self.previous_path = os.path.dirname(filename)
savedname = self.save(filename)
if not savedname:
return False
else:
filename = savedname
# Watch the file we've saved as new path/name
fi = QFileInfo(filename)
self.fiName = fi.baseName()
self.timeStamp = fi.lastModified().toTime_t()
self.activity_timer.setSingleShot(True)
self.activity_timer.start(3000)
# setup parent
self.parent.view3DApp = None
self.parent.view3DMdi = None
self.parent.filename = filename
self.parent.editor.document().setModified(False)
self.parent.ismodifed()
self.parent.setWindowTitle(self.parent.objectName() + " - " + os.path.basename(filename))
return filename
return False
# command to validate or execute or rebuild a script file
def execute(self, action='Execute'):
scriptText = self.parent.editor.toPlainText().encode('utf-8')
if (b"show_object(" not in scriptText) and (b"debug(" not in scriptText):
App.Console.PrintError("Script did not call show_object or debug, no output available. Script must be CQGI compliant to get build output, variable editing and validation.\r\n")
return
# A repreentation of the CQ script with all the metadata attached
cqModel = cadquery_model.CQ_Model(scriptText) # cqgi.parse(scriptText)
# Allows us to present parameters to users later that they can alter
parameters = cqModel.metadata.parameters
# If paramEditor not yet build or execute with rebuild argument
if (not self.parent.cqvarseditor.haveParameters) or action=='Rebuild':
self.parent.cqvarseditor.clearParameters()
self.parent.cqvarseditor.populateParameterEditor(parameters)
# Build cq object
build_parameters = {}
# Get the variables from the paramEditor
dockWidget = self.parent.cqvarseditor
valueControls = dockWidget.findChildren(QLineEdit)
for valueControl in valueControls:
objectName = valueControl.objectName()
# We only want text fields that will have parameter values in them
if objectName != None and objectName != '' and objectName.find('pcontrol_') >= 0:
# when doing Execute or Validate, it doesn't rebuild the cqvarseditor
# however user may have remove a variable in the script
# so import only variable if they are in the script, ie in parameters
if objectName.split('pcontrol_')[1] in parameters:
# Associate the value in the text field with the variable name in the script
# As we pass the parameters through a QLineEdit as a String we loose the type
# Then it is convert to a float
# However sometimes a value must stay as an int for the script to work properly
# Let's try to force the right type
val = valueControl.text()
try:
valtype = int(val)
except:
try:
valtype = float(val)
except:
valtype = val
build_parameters[objectName.replace('pcontrol_', '')] = valtype
build_result = cqModel.build(build_parameters=build_parameters)
list_objects = []
# if Settings.report_execute_time:
# App.Console.PrintMessage("Script executed in " + str(build_result.buildTime) + " seconds\r\n")
# Make sure that the build was successful
if build_result.success:
# Clean the 3D view if exist (user may have closed the view3D)
try:
if self.parent.view3DApp != None:
shared.clearActive3DView(self.parent.view3DApp)
except:
pass
# Display all the results that the user requested
for result in build_result.results:
# Apply options to the show function if any were provided
name = "Shape_" + str(random())
group = None
rgba = (204, 204, 204, 0.0)
if result.options :
# parse the options
# object name
name = result.options['name'] \
if 'name' in result.options \
else name
# object group
group = result.options['group'] \
if 'group' in result.options else group
# object color
# if rgba is defined it superseed any colors or alpha options
if 'rgba' in result.options:
rgba= result.options['rgba']
# rgba provided as a String '#RRGGBB'or '#RRGGBBAA'
if type(rgba) == str:
rgba = rgba[1:] # remove first char '#'
red = int(rgba[0]+rgba[1], 16)
green = int(rgba[2]+rgba[3], 16)
blue = int(rgba[4]+rgba[5], 16)
if len(rgba) > 6:
alpha = int(rgba[6]+rgba[7], 16) / 255.0
else:
alpha = 0.0
rgba = (red, green, blue, alpha)
# rgba defined as CadQuery Color class
elif type(rgba) == cq.Color:
r, g, b, a = rgba.toTuple()
r *= 255
g *= 255
b *= 255
a = 1 - a
rgba = (r, g, b, a)
else:
# rgba is supposed to be a list (red, green, blue, alpha)
pass
else:
# rgba is not defined check for color and alpha
color = result.options['color'] if 'color' in result.options else (204, 204, 204)
alpha = result.options['alpha'] if 'alpha' in result.options else 0.0
rgba = (color[0], color[1], color[2], alpha)
# append object to the list of objects
list_objects.append((result.shape, rgba, name, group))
# if user choose to show render objects
if self.parent.show_debug:
for debugObj in build_result.debugObjects:
# Apply options to the show function if any were provided
if debugObj.options:
# parse the options
# object name, Mark this as a debug object
name = "Debug_" + debugObj.options['name'] \
if 'name' in debugObj.options \
else "Debug_" + str(random())
# object group
group = debugObj.options['group'] \
if 'group' in debugObj.options else None
# force color for Debug object
rgba = (255, 0, 0, 0.60)
name = debugObj.options['name'] if 'name' in debugObj.options else '_'
# Mark this as a debug object
name = "Debug_" + name
else:
name = "Debug_" + str(random())
group = None
rgba = (255, 0, 0, 0.60)
# append object to the list of objects
list_objects.append((debugObj.shape, rgba, name, group))
# show list of objects
if len(list_objects) > 0 and action != 'Validate':
self.showInFreeCAD(list_objects)
else:
App.Console.PrintError("Error executing CQGI-compliant script. " + str(build_result.exception) + "\r\n")
def append_assembly_parts(self, list_objects):
assy_obj_toremove=[]
for obj in list_objects:
cqObject = obj[0]
rgba = obj[1]
name = obj[2]
group = obj[3]
if len(obj) > 4:
loc = obj[4]
else:
loc = None
# CadQuery Assembly, add each childrens to be rendered to list_objects
if type(cqObject) == cq.assembly.Assembly:
assy_obj_toremove.append(obj)
# add childrens to list_objects
loc_parent = None
for assy in list(cqObject.objects.values()):
# color
if assy.color:
r, g, b, a = assy.color.toTuple()
r *= 255
g *= 255
b *= 255
a = 1 - a
rgba = (r, g, b, a)
else:
rgba = (204, 204, 204, 0.0)
# location
loc_matrix = App.Base.Matrix()
if not loc_parent:
if assy.loc:
loc = assy.loc.toTuple()
loc_parent = loc
else:
loc = ((0, 0, 0), (0, 0, 0))
loc_parent = loc
trans = loc[0]
rot = loc[1]
ax, ay, az = rot
loc_matrix.rotateX(ax)
loc_matrix.rotateY(ay)
loc_matrix.rotateZ(az)
loc_matrix.move(trans)
else:
# location of child assy are relative to parent one
if assy.loc:
loc = assy.loc.toTuple()
trans = loc[0]
rot = loc[1]
ax, ay, az = rot
loc_matrix.rotateX(ax)
loc_matrix.rotateY(ay)
loc_matrix.rotateZ(az)
loc_matrix.move(trans)
loc = loc_parent
trans = loc[0]
rot = loc[1]
ax, ay, az = rot
loc_matrix.rotateX(ax)
loc_matrix.rotateY(ay)
loc_matrix.rotateZ(az)
loc_matrix.move(trans)
# append each part of the Assembly
list_objects.append((assy.obj, rgba, assy.name, group, loc_matrix))
# remove Assy from list_objects
for obj in assy_obj_toremove:
list_objects.pop(list_objects.index(obj))
return list_objects
def showInFreeCAD(self, list_objects):
# get FreeCAD 3D view
activeDoc = shared.getActive3DView(self.parent.view3DApp, self.parent, self.parent.filename)
# first loop to split Assembly parts
list_objects = self.append_assembly_parts(list_objects)
for obj in list_objects:
cqObject = obj[0]
rgba = obj[1]
name = obj[2]
group = obj[3]
if len(obj) > 4:
loc = obj[4]
else:
loc = None
# CadQuery Assembly
if type(cqObject) == cq.assembly.Assembly:
App.Console.PrintError("Only one Sub Level of Assembly Parts is supported yet")
return
# Make sure we replace any troublesome characters not supported in FreeCAD
# As we search for existing group below
if group:
for ch in ['&', '#', '.', '$', '%', ',', ' ']:
if ch in group:
group = group.replace(ch, "_")
#Convert our rgba values
r = rgba[0] / 255.0
g = rgba[1] / 255.0
b = rgba[2] / 255.0
a = int(rgba[3] * 100.0)
# case group was passed in the options
if group:
# group is already is activeDoc
group_exist = False
for obj in activeDoc.Objects:
if type(obj) == App.DocumentObjectGroup and obj.Name == group:
group_exist = True
if not group_exist:
# create it
activeDoc.Tip = activeDoc.addObject('App::DocumentObjectGroup', group)
# add newFeature to group
newFeature = activeDoc.addObject("Part::Feature", name)
activeDoc.getObject(group).addObject(newFeature)
else:
# add newFeature to activeDoc
newFeature = activeDoc.addObject("Part::Feature", name)
#Change our shape's properties accordingly
newFeature.ViewObject.ShapeColor = (r, g, b)
newFeature.ViewObject.Transparency = a
self.tofreecad(cqObject, newFeature)
# Placement
if loc:
newFeature.Placement = App.Placement(loc).multiply(newFeature.Placement)
# newFeature.Placement = newFeature.Placement.multiply(App.Placement(m))
# All object are added to FreeCAD, recompute
activeDoc.recompute()
if self.parent.firstexecute:
# On the first Execution force the Camera and View settings
# Then next time keep user view
Gui.activeDocument().activeView().setCamera('OrthographicCamera{}')
Gui.activeDocument().activeView().viewIsometric()
Gui.SendMsgToActiveView("ViewFit")
self.parent.firstexecute = False
# Expand Tree View if there are groups
# activeDoc = App.ActiveDocument
for obj in activeDoc.Objects:
if type(obj) == App.DocumentObjectGroup:
# Gui.Selection.addSelection(activeDoc.Name,obj.Group[0].Name)
Gui.activeDocument(). \
scrollToTreeItem(Gui.activeDocument().getObject(obj.Group[0].Name))
# Gui.Selection.clearSelection()
def tofreecad(self, cqObject, feature):
# Use a temporary BREP file to get the cadquery shape
# Use FreeCAD Home directory
env = os.environ
temppath = env['FREECAD_USER_HOME'] if 'FREECAD_USER_HOME' in env else env['HOME']
temppath += '/.FreeCAD/tmp'
# if not exist create the tmp directory
pathlib.Path(temppath).mkdir(parents=True, exist_ok=True)
# convert to FreeCAD Shape using BRep export/import
filename = temppath + '/brep'
cqObject.val().exportBrep(filename)
tmp_shape = Part.Shape()
tmp_shape.importBrep(filename)
feature.Shape = tmp_shape
``` |
{
"source": "jpmmcneill/dbt",
"score": 2
} |
#### File: test/unit/test_macro_calls.py
```python
import os
import unittest
from unittest.mock import MagicMock, patch
from dataclasses import dataclass, field
from typing import Dict, Any
from dbt.clients.jinja import statically_extract_macro_calls
from dbt.context.base import generate_base_context
class MacroCalls(unittest.TestCase):
def setUp(self):
self.macro_strings = [
"{% macro parent_macro() %} {% do return(nested_macro()) %} {% endmacro %}",
"{% macro lr_macro() %} {{ return(load_result('relations').table) }} {% endmacro %}",
"{% macro get_snapshot_unique_id() -%} {{ return(adapter.dispatch('get_snapshot_unique_id')()) }} {%- endmacro %}",
"{% macro get_columns_in_query(select_sql) -%} {{ return(adapter.dispatch('get_columns_in_query')(select_sql)) }} {% endmacro %}",
"""{% macro test_mutually_exclusive_ranges(model) %}
with base as (
select {{ get_snapshot_unique_id() }} as dbt_unique_id,
*
from {{ model }} )
{% endmacro %}""",
"{% macro test_my_test(model) %} select {{ dbt_utils.current_timestamp() }} {% endmacro %}"
]
self.possible_macro_calls = [
['nested_macro'],
['load_result'],
['get_snapshot_unique_id'],
['get_columns_in_query'],
['get_snapshot_unique_id'],
['dbt_utils.current_timestamp'],
]
def test_macro_calls(self):
ctx = generate_base_context({})
index = 0
for macro_string in self.macro_strings:
possible_macro_calls = statically_extract_macro_calls(macro_string, ctx)
self.assertEqual(possible_macro_calls, self.possible_macro_calls[index])
index = index + 1
``` |
{
"source": "jpmn/sqlalchemy-imageattach",
"score": 3
} |
#### File: sqlalchemy-imageattach/sqlalchemy_imageattach/context.py
```python
import contextlib
import sys
if sys.version_info >= (3,):
try:
import _thread
except ImportError:
import _dummy_thread as _thread
else:
try:
import thread as _thread
except ImportError:
import dummy_thread as _thread
try:
import greenlet
except ImportError:
greenlet = None
try:
import stackless
except ImportError:
stackless = None
from .store import Store # noqa
__all__ = ('ContextError', 'LocalProxyStore', 'context_stacks',
'current_store', 'get_current_context_id', 'get_current_store',
'pop_store_context', 'push_store_context', 'store_context')
def get_current_context_id():
"""Identifis which context it is (greenlet, stackless, or thread).
:returns: the identifier of the current context.
"""
global get_current_context_id
if greenlet is not None:
if stackless is None:
get_current_context_id = greenlet.getcurrent
return greenlet.getcurrent()
return greenlet.getcurrent(), stackless.getcurrent()
elif stackless is not None:
get_current_context_id = stackless.getcurrent
return stackless.getcurrent()
get_current_context_id = _thread.get_ident
return _thread.get_ident()
#: (:class:`dict`) The dictionary of concurrent contexts to their stacks.
context_stacks = {}
def push_store_context(store):
"""Manually pushes a store to the current stack.
Although :func:`store_context()` and :keyword:`with` keyword are
preferred than using it, it's useful when you have to push and pop
the current stack on different hook functions like setup/teardown.
:param store: the image store to set to the :data:`current_store`
:type store: :class:`~sqlalchemy_imageattach.store.Store`
"""
context_stacks.setdefault(get_current_context_id(), []).append(store)
def pop_store_context():
"""Manually pops the current store from the stack.
Although :func:`store_context()` and :keyword:`with` keyword are
preferred than using it, it's useful when you have to push and pop
the current stack on different hook functions like setup/teardown.
:returns: the current image store
:rtype: :class:`~sqlalchemy_imageattach.store.Store`
"""
return context_stacks.setdefault(get_current_context_id(), []).pop()
@contextlib.contextmanager
def store_context(store):
"""Sets the new (nested) context of the current image storage::
with store_context(store):
print current_store
It could be set nestedly as well::
with store_context(store1):
print current_store # store1
with store_context(store2):
print current_store # store2
print current_store # store1 back
:param store: the image store to set to the :data:`current_store`
:type store: :class:`~sqlalchemy_imageattach.store.Store`
"""
if not isinstance(store, Store):
raise TypeError('store must be an instance of sqlalchemy_imageattach.'
'store.Store, not ' + repr(store))
push_store_context(store)
yield store
pop_store_context()
def get_current_store():
"""The lower-level function of :data:`current_store`. It returns
the **actual** store instance while :data:`current_store` is a just
proxy of it.
:returns: the actual object of the currently set image store
:rtype: :class:`~sqlalchemy_imageattach.store.Store`
"""
try:
store = context_stacks.setdefault(get_current_context_id(), [])[-1]
except IndexError:
raise ContextError('not in store_context; use sqlalchemy_imageattach.'
'entity.store_context()')
return store
class LocalProxyStore(Store):
"""Proxy of another image storage.
:param get_current_object: a function that returns "current" store
:type get_current_object: :class:`typing.Callable`\ [[],
:class:`.store.Store`]
:param repr_string: an optional string for :func:`repr()`
:type repr_string: :class:`str`
"""
def __init__(self, get_current_object, repr_string=None):
if not callable(get_current_object):
raise TypeError('expected callable')
self.get_current_object = get_current_object
self.repr_string = repr_string
def put_file(self, file, object_type, object_id, width, height,
mimetype, reproducible):
self.get_current_object().put_file(
file, object_type, object_id, width, height,
mimetype, reproducible
)
def delete_file(self, object_type, object_id, width, height, mimetype):
self.get_current_object().delete_file(
object_type, object_id, width, height, mimetype
)
def get_file(self, object_type, object_id, width, height, mimetype):
return self.get_current_object().get_file(
object_type, object_id, width, height, mimetype
)
def get_url(self, object_type, object_id, width, height, mimetype):
return self.get_current_object().get_url(
object_type, object_id, width, height, mimetype
)
def __eq__(self, other):
return self.get_current_object() == other
def __ne__(self, other):
return self.get_current_object() != other
def __hash__(self):
return hash(self.get_current_object())
def __repr__(self):
if self.repr_string is None:
try:
current_store = self.get_current_object()
except ContextError:
return '<Unbound {0}.{1}>'.format(self.__module__,
self.__name__)
return repr(current_store)
return self.repr_string
#: (:class:`LocalProxyStore`) The currently set context of the image store
#: backend. It can be set using :func:`store_context()`.
current_store = LocalProxyStore(get_current_store,
__name__ + '.current_store')
class ContextError(Exception):
"""The exception which rises when the :data:`current_store` is required
but there's no currently set store context.
"""
```
#### File: sqlalchemy-imageattach/tests/migration_test.py
```python
import io
import os.path
from pytest import fixture
from sqlalchemy_imageattach.context import store_context
from sqlalchemy_imageattach.migration import migrate, migrate_class
from sqlalchemy_imageattach.store import Store
from .conftest import Base, sample_images_dir
from .entity_test import Samething, Something, SomethingCover
class SourceStore(Store):
def __init__(self):
self.files = {}
def put_file(self, file, object_type, object_id, width, height, mimetype,
reproducible):
key = object_type, object_id, width, height, mimetype
self.files[key] = file.read(), reproducible
def get_file(self, object_type, object_id, width, height, mimetype):
key = object_type, object_id, width, height, mimetype
return io.BytesIO(self.files[key][0])
@fixture
def fx_source_store():
return SourceStore()
@fixture
def fx_migration(fx_session, fx_source_store):
with store_context(fx_source_store):
with fx_session.begin():
a1 = Something(name='a1')
fx_session.add(a1)
with open(os.path.join(sample_images_dir, 'iu.jpg'), 'rb') as f:
a1.cover.from_file(f)
a1.cover.generate_thumbnail(height=480)
a1.cover.generate_thumbnail(height=320)
a1.cover.generate_thumbnail(height=160)
a2 = Something(name='a2')
fx_session.add(a2)
with open(os.path.join(sample_images_dir, 'iu2.jpg'), 'rb') as f:
a2.cover.from_file(f)
b1 = Samething(name='b1')
fx_session.add(b1)
with open(os.path.join(sample_images_dir, 'asuka.jpg'), 'rb') as f:
b1.cover.from_file(f)
b1.cover.generate_thumbnail(height=375)
b1.cover.generate_thumbnail(height=250)
b1.cover.generate_thumbnail(height=125)
b2 = Samething(name='b2')
fx_session.add(b2)
with open(os.path.join(sample_images_dir, 'shinji.jpg'),
'rb') as f:
b2.cover.from_file(f)
def test_migrate_class_execute(fx_session, fx_source_store, fx_migration):
dst = SourceStore()
plan = migrate_class(fx_session, SomethingCover, fx_source_store, dst)
assert dst.files == {}
plan.execute()
assert dst.files == dict(
(k, v)
for k, v in fx_source_store.files.items()
if k[0] == 'something-cover'
)
def test_migrate_class_iter(fx_session, fx_source_store, fx_migration):
dst = SourceStore()
plan = migrate_class(fx_session, SomethingCover, fx_source_store, dst)
assert dst.files == {}
for _ in plan:
pass
assert dst.files == dict(
(k, v)
for k, v in fx_source_store.files.items()
if k[0] == 'something-cover'
)
def test_migrate_execute(fx_session, fx_source_store, fx_migration):
dst = SourceStore()
plan = migrate(fx_session, Base, fx_source_store, dst)
assert dst.files == {}
plan.execute()
assert fx_source_store.files == dst.files
def test_migrate_iter(fx_session, fx_source_store, fx_migration):
dst = SourceStore()
plan = migrate(fx_session, Base, fx_source_store, dst)
assert dst.files == {}
for _ in plan:
pass
assert fx_source_store.files == dst.files
``` |
{
"source": "jpmn/sqlalchemy-media",
"score": 2
} |
#### File: cherrypy-demo/cp_sam/models.py
```python
import json
from sqlalchemy import TypeDecorator, Unicode, Column, Integer
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker, scoped_session
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy_media import Image, ImageValidator, ImageProcessor, ImageAnalyzer
from sqlalchemy_media.constants import MB, KB
Base = declarative_base()
engine = create_engine('sqlite:///demo.db', echo=True)
DBSession = scoped_session(sessionmaker(bind=engine))
class Json(TypeDecorator):
impl = Unicode
def process_bind_param(self, value, engine):
return json.dumps(value)
def process_result_value(self, value, engine):
if value is None:
return None
return json.loads(value)
class Avatar(Image):
__auto_coercion__ = True
__pre_processors__ = [
ImageAnalyzer(),
ImageValidator(
minimum=(10, 10),
maximum=(3840, 3840),
content_types=('image/jpeg', 'image/png', 'image/gif'),
min_aspect_ratio=1,
max_aspect_ratio=1
),
ImageProcessor(fmt='jpeg', width=128)
]
__max_length__ = 6*MB
__min_length__ = 10*KB
class Person(Base):
__tablename__ = 'person'
id = Column(Integer, primary_key=True)
name = Column(Unicode)
avatar = Column(Avatar.as_mutable(Json))
Base.metadata.create_all(engine, checkfirst=True)
```
#### File: sqlalchemy-media/sqlalchemy_media/context.py
```python
try:
import _thread
except ImportError: # pragma: no cover
import _dummy_thread as _thread
try:
# noinspection PyPackageRequirements
import greenlet
except ImportError:
greenlet = None
try:
import stackless
except ImportError:
stackless = None
if greenlet is not None: # pragma: no cover
if stackless is None:
get_id = greenlet.getcurrent
else:
def get_id():
return greenlet.getcurrent(), stackless.getcurrent()
elif stackless is not None: # pragma: no cover
get_id = stackless.getcurrent
else:
get_id = _thread.get_ident
```
#### File: sqlalchemy_media/stores/s3.py
```python
from io import BytesIO
# Importing optional stuff required by http based store
try:
# noinspection PyPackageRequirements
import requests
except ImportError: # pragma: no cover
requests = None
# Importing optional stuff required by S3 store
try:
# noinspection PyPackageRequirements
from requests_aws4auth import AWS4Auth
except ImportError: # pragma: no cover
AWS4Auth = None
from sqlalchemy_media.exceptions import S3Error
from sqlalchemy_media.optionals import ensure_aws4auth
from sqlalchemy_media.typing_ import FileLike
from .base import Store
DEFAULT_MAX_AGE = 60 * 60 * 24 * 365
class S3Store(Store):
"""
Store for dealing with s3 of aws
.. versionadded:: 0.9.0
.. versionadded:: 0.9.6
- ``prefix``
"""
base_url = 'https://{0}.s3.amazonaws.com'
def __init__(self, bucket: str, access_key: str, secret_key: str,
region: str, max_age: int = DEFAULT_MAX_AGE,
prefix: str = None, base_url: str = None,
cdn_url: str = None, cdn_prefix_ignore: bool = False, acl: str = 'private'):
self.bucket = bucket
self.access_key = access_key
self.secret_key = secret_key
self.region = region
self.max_age = max_age
self.prefix = prefix
self.acl = acl
if base_url:
self.base_url = base_url
else:
self.base_url = self.base_url.format(bucket)
if prefix:
self.base_url = '{0}/{1}'.format(self.base_url, prefix)
if cdn_url and not cdn_prefix_ignore:
cdn_url = '%s/%s' % (cdn_url, prefix)
if self.base_url.endswith('/'):
self.base_url = self.base_url.rstrip('/')
if cdn_url and cdn_url.endswith('/'):
cdn_url = cdn_url.rstrip('/')
self.cdn_url = cdn_url
def _get_s3_url(self, filename: str):
return '{0}/{1}'.format(self.base_url, filename)
def _upload_file(self, url: str, data: str, content_type: str,
rrs: bool = False):
ensure_aws4auth()
auth = AWS4Auth(self.access_key, self.secret_key, self.region, 's3')
if rrs:
storage_class = 'REDUCED_REDUNDANCY'
else:
storage_class = 'STANDARD'
headers = {
'Cache-Control': 'max-age=' + str(self.max_age),
'x-amz-acl': self.acl,
'x-amz-storage-class': storage_class
}
if content_type:
headers['Content-Type'] = content_type
res = requests.put(url, auth=auth, data=data, headers=headers)
if not 200 <= res.status_code < 300:
raise S3Error(res.text)
def put(self, filename: str, stream: FileLike):
url = self._get_s3_url(filename)
data = stream.read()
content_type = getattr(stream, 'content_type', None)
rrs = getattr(stream, 'reproducible', False)
self._upload_file(url, data, content_type, rrs=rrs)
return len(data)
def delete(self, filename: str):
ensure_aws4auth()
url = self._get_s3_url(filename)
auth = AWS4Auth(self.access_key, self.secret_key, self.region, 's3')
res = requests.delete(url, auth=auth)
if not 200 <= res.status_code < 300:
raise S3Error(res.text)
def open(self, filename: str, mode: str='rb') -> FileLike:
ensure_aws4auth()
url = self._get_s3_url(filename)
auth = AWS4Auth(self.access_key, self.secret_key, self.region, 's3')
res = requests.get(url, auth=auth)
if not 200 <= res.status_code < 300:
raise S3Error(res.text)
return BytesIO(res.content)
def locate(self, attachment) -> str:
if self.cdn_url:
base_url = self.cdn_url
else:
base_url = self.base_url
return '%s/%s' % (base_url, attachment.path)
```
#### File: sqlalchemy_media/tests/test_delete_orphan.py
```python
import unittest
from io import BytesIO
from os.path import join, exists
from sqlalchemy import Column, Integer
from sqlalchemy_media.attachments import File, FileList, FileDict, Image
from sqlalchemy_media.stores import StoreManager
from sqlalchemy_media.tests.helpers import Json, TempStoreTestCase
class DeleteOrphanTestCase(TempStoreTestCase):
def setUp(self):
super().setUp()
self.sample_text_file1 = join(self.stuff_path, 'sample_text_file1.txt')
def test_delete_orphan(self):
class Person(self.Base):
__tablename__ = 'person'
id = Column(Integer, primary_key=True)
cv = Column(File.as_mutable(Json), nullable=True)
session = self.create_all_and_get_session()
person1 = Person()
self.assertIsNone(person1.cv)
with StoreManager(session, delete_orphan=True):
# First file before commit
person1.cv = File.create_from(BytesIO(b'Simple text.'), content_type='text/plain', extension='.txt')
self.assertIsInstance(person1.cv, File)
first_filename = join(self.temp_path, person1.cv.path)
self.assertTrue(exists(first_filename))
person1.cv = File.create_from(BytesIO(b'Second simple text.'))
second_filename = join(self.temp_path, person1.cv.path)
self.assertTrue(exists(first_filename))
self.assertTrue(exists(second_filename))
session.add(person1)
session.commit()
self.assertFalse(exists(first_filename))
self.assertTrue(exists(second_filename))
def test_without_delete_orphan(self):
class Person(self.Base):
__tablename__ = 'person'
id = Column(Integer, primary_key=True)
cv = Column(File.as_mutable(Json), nullable=True)
session = self.create_all_and_get_session()
person1 = Person()
with StoreManager(session):
# First file before commit
person1.cv = File.create_from(BytesIO(b'Simple text.'), content_type='text/plain', extension='.txt')
first_filename = join(self.temp_path, person1.cv.path)
person1.cv = File.create_from(BytesIO(b'Second simple text.'))
second_filename = join(self.temp_path, person1.cv.path)
session.add(person1)
session.commit()
self.assertTrue(exists(first_filename))
self.assertTrue(exists(second_filename))
def test_delete_orphan_list(self):
class Person(self.Base):
__tablename__ = 'person'
id = Column(Integer, primary_key=True)
files = Column(FileList.as_mutable(Json))
session = self.create_all_and_get_session()
with StoreManager(session, delete_orphan=True):
person1 = Person()
person1.files = FileList([
File.create_from(BytesIO(b'simple text %d' % i)) for i in range(2)
])
# Removing the first file
first_filename = join(self.temp_path, person1.files[0].path)
second_filename = join(self.temp_path, person1.files[1].path)
person1.files = FileList([
File.create_from(BytesIO(b'New test file: %d' % i)) for i in range(2)
])
session.add(person1)
session.commit()
self.assertFalse(exists(first_filename))
self.assertFalse(exists(second_filename))
first_filename = join(self.temp_path, person1.files[0].path)
second_filename = join(self.temp_path, person1.files[1].path)
self.assertTrue(exists(first_filename))
self.assertTrue(exists(second_filename))
def test_delete_orphan_list_item(self):
class Person(self.Base):
__tablename__ = 'person'
id = Column(Integer, primary_key=True)
files = Column(FileList.as_mutable(Json))
session = self.create_all_and_get_session()
with StoreManager(session, delete_orphan=True):
person1 = Person()
person1.files = FileList()
person1.files.append(File.create_from(BytesIO(b'simple text 1')))
person1.files.append(File.create_from(BytesIO(b'simple text 2')))
person1.files.append(File.create_from(BytesIO(b'simple text 3')))
# Removing the first file
first_filename = join(self.temp_path, person1.files[0].path)
person1.files.remove(person1.files[0])
session.add(person1)
session.commit()
self.assertFalse(exists(first_filename))
# noinspection PyTypeChecker
self.assertEqual(len(person1.files), 2)
# Loading from db
person1 = session.query(Person).one()
# Preserving the first file's path
first_filename = join(self.temp_path, person1.files[0].path)
# remove from orphan list
f = person1.files[1]
person1.files.remove(f)
person1.files.insert(1, f)
self.assertEqual(len(person1.files), 2)
# Removing the first file
del person1.files[0]
session.commit()
self.assertFalse(exists(first_filename))
self.assertEqual(len(person1.files), 1)
old_attachment_filename = join(self.temp_path, person1.files[0].path)
attachment = person1.files[0].attach(BytesIO(b'Changed inside nested mutable!'))
attachment_filename = join(self.temp_path, attachment.path)
self.assertTrue(exists(old_attachment_filename))
self.assertTrue(exists(attachment_filename))
session.commit()
self.assertFalse(exists(old_attachment_filename))
self.assertTrue(exists(attachment_filename))
def test_delete_orphan_dict(self):
class Person(self.Base):
__tablename__ = 'person'
id = Column(Integer, primary_key=True)
files = Column(FileDict.as_mutable(Json))
session = self.create_all_and_get_session()
with StoreManager(session, delete_orphan=True):
person1 = Person()
person1.files = FileDict({
str(i): File.create_from(BytesIO(b'simple text %d' % i)) for i in range(2)
})
# Removing the first file
first_filename = join(self.temp_path, person1.files['0'].path)
second_filename = join(self.temp_path, person1.files['1'].path)
person1.files = FileDict({
str(i): File.create_from(BytesIO(b'New Text File %d' % i)) for i in range(2)
})
session.add(person1)
session.commit()
self.assertFalse(exists(first_filename))
self.assertFalse(exists(second_filename))
first_filename = join(self.temp_path, person1.files['0'].path)
second_filename = join(self.temp_path, person1.files['1'].path)
self.assertTrue(exists(first_filename))
self.assertTrue(exists(second_filename))
def test_delete_orphan_dict_item(self):
class Person(self.Base):
__tablename__ = 'person'
id = Column(Integer, primary_key=True)
files = Column(FileDict.as_mutable(Json))
session = self.create_all_and_get_session()
with StoreManager(session, delete_orphan=True):
person1 = Person()
person1.files = FileDict({
str(i): File.create_from(BytesIO(b'simple text %d' % i)) for i in range(2)
})
# Removing the first file
first_filename = join(self.temp_path, person1.files['0'].path)
del person1.files['0']
session.add(person1)
session.commit()
self.assertFalse(exists(first_filename))
# noinspection PyTypeChecker
self.assertEqual(len(person1.files), 1)
# Loading from db
person1 = session.query(Person).one()
# Preserving the first file's path
first_filename = join(self.temp_path, person1.files['1'].path)
# Clearing
person1.files.clear()
session.commit()
self.assertFalse(exists(first_filename))
self.assertEqual(len(person1.files), 0)
def test_delete_orphan_image(self):
"""
https://github.com/pylover/sqlalchemy-media/issues/81
"""
class Person(self.Base):
__tablename__ = 'person'
id = Column(Integer, primary_key=True)
pic = Column(Image.as_mutable(Json), nullable=True)
session = self.create_all_and_get_session()
with StoreManager(session, delete_orphan=True):
person1 = Person()
person1.pic = Image.create_from(self.cat_jpeg)
first_filename = join(self.temp_path, person1.pic.path)
session.commit()
self.assertTrue(exists(first_filename))
person1.pic = Image.create_from(self.dog_jpeg)
session.commit()
self.assertFalse(exists(first_filename))
if __name__ == '__main__': # pragma: no cover
unittest.main()
```
#### File: sqlalchemy_media/tests/test_descriptors.py
```python
import os
import unittest
import io
import cgi
from os.path import dirname, abspath, join, split
from sqlalchemy_media.helpers import copy_stream, md5sum
from sqlalchemy_media.tests.helpers import mockup_http_static_server, encode_multipart_data
from sqlalchemy_media.descriptors import AttachableDescriptor, LocalFileSystemDescriptor, CgiFieldStorageDescriptor, \
UrlDescriptor, StreamDescriptor
from sqlalchemy_media.exceptions import MaximumLengthIsReachedError, DescriptorOperationError
class AttachableDescriptorsTestCase(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.this_dir = abspath(dirname(__file__))
cls.stuff_path = join(cls.this_dir, 'stuff')
cls.cat_jpeg = join(cls.stuff_path, 'cat.jpg')
cls.dog_jpeg = join(cls.stuff_path, 'dog.jpg')
def test_stream(self):
# guess content types from extension
descriptor = AttachableDescriptor(io.BytesIO(b'Simple text'), extension='.txt')
self.assertIsInstance(descriptor, StreamDescriptor)
self.assertEqual(descriptor.content_type, 'text/plain')
descriptor.seek(2)
self.assertEqual(descriptor.tell(), 2)
descriptor.seek(0, os.SEEK_END)
self.assertEqual(descriptor.tell(), 11)
# guess extension from original filename
descriptor = AttachableDescriptor(io.BytesIO(b'Simple text'), original_filename='letter.pdf')
self.assertEqual(descriptor.extension, '.pdf')
# guess extension from content type
descriptor = AttachableDescriptor(io.BytesIO(b'Simple text'), content_type='application/json')
self.assertEqual(descriptor.extension, '.json')
self.assertRaises(DescriptorOperationError, lambda: descriptor.filename)
def test_non_seekable(self):
class NonSeekableStream(io.BytesIO):
def seekable(self, *args, **kwargs):
return False
inp = b'abcdefghijklmnopqrstuvwxyz'
descriptor = AttachableDescriptor(NonSeekableStream(inp), header_buffer_size=10)
# fetching header, it forces to cache header_buffer_size bytes from header.
buffer = descriptor.get_header_buffer()
self.assertEqual(buffer, b'abcdefghij')
# fetching again to test the cache functionality
buffer = descriptor.get_header_buffer()
self.assertEqual(buffer, b'abcdefghij')
out = b''
out += descriptor.read(9)
self.assertEqual(descriptor.tell(), 9)
out += descriptor.read(11)
self.assertEqual(descriptor.tell(), 20)
out += descriptor.read(10)
self.assertEqual(out, inp)
# Max length error
descriptor = AttachableDescriptor(NonSeekableStream(inp), header_buffer_size=24, max_length=20)
buffer = descriptor.get_header_buffer()
self.assertEqual(buffer, b'abcdefghijklmnopqrstuvwx')
self.assertRaises(MaximumLengthIsReachedError, descriptor.read, 1)
# Test getting header buffer after read on non-seekable streams.
descriptor = AttachableDescriptor(NonSeekableStream(inp), header_buffer_size=10, max_length=20)
self.assertEqual(descriptor.read(10), b'abcdefghij')
self.assertRaises(DescriptorOperationError, descriptor.get_header_buffer)
def test_localfs(self):
descriptor = AttachableDescriptor(self.cat_jpeg, width=100, height=80)
self.assertIsInstance(descriptor, LocalFileSystemDescriptor)
self.assertEqual(descriptor.filename, self.cat_jpeg)
# Must be determined from the given file's extension: .jpg
self.assertEqual(descriptor.content_type, 'image/jpeg')
self.assertEqual(descriptor.original_filename, self.cat_jpeg)
# noinspection PyUnresolvedReferences
self.assertEqual(descriptor.width, 100)
# noinspection PyUnresolvedReferences
self.assertEqual(descriptor.height, 80)
self.assertEqual(len(descriptor.get_header_buffer()), 1024)
buffer = io.BytesIO()
copy_stream(descriptor, buffer)
buffer.seek(0)
self.assertEqual(md5sum(buffer), md5sum(self.cat_jpeg))
def test_url(self):
with mockup_http_static_server(self.cat_jpeg) as http_server:
url = 'http://%s:%s' % http_server.server_address
descriptor = AttachableDescriptor(url)
self.assertIsInstance(descriptor, UrlDescriptor)
self.assertEqual(descriptor.content_type, 'image/jpeg') # Must be determined from response headers
self.assertEqual(descriptor.content_length, 70279) # Must be determined from response headers
self.assertEqual(descriptor.original_filename, url)
def test_cgi_field_storage(self):
# encode a multipart form
content_type, body, content_length = encode_multipart_data(files=dict(cat=self.cat_jpeg))
environ = {
'REQUEST_METHOD': 'POST',
'CONTENT_TYPE': content_type,
'CONTENT_LENGTH': content_length
}
storage = cgi.FieldStorage(body, environ=environ)
descriptor = AttachableDescriptor(storage['cat'])
self.assertIsInstance(descriptor, CgiFieldStorageDescriptor)
self.assertEqual(descriptor.content_type, 'image/jpeg')
self.assertEqual(descriptor.original_filename, split(self.cat_jpeg)[1])
buffer = io.BytesIO()
copy_stream(descriptor, buffer)
buffer.seek(0)
self.assertEqual(md5sum(buffer), md5sum(self.cat_jpeg))
def test_force_seekable(self):
with mockup_http_static_server(self.cat_jpeg) as http_server:
url = 'http://%s:%s' % http_server.server_address
original_sum = md5sum(self.cat_jpeg)
with AttachableDescriptor(url) as descriptor:
descriptor.prepare_to_read(backend='file')
self.assertEqual(original_sum, md5sum(descriptor))
with AttachableDescriptor(url) as descriptor:
descriptor.prepare_to_read(backend='temp')
self.assertEqual(original_sum, md5sum(descriptor))
with AttachableDescriptor(url) as descriptor:
descriptor.prepare_to_read(backend='memory')
self.assertEqual(original_sum, md5sum(descriptor))
with AttachableDescriptor(url) as descriptor:
# Reading some bytes, before making the stream seekable
descriptor.get_header_buffer()
descriptor.prepare_to_read(backend='temp')
self.assertEqual(original_sum, md5sum(descriptor))
with AttachableDescriptor(url) as descriptor:
self.assertRaises(DescriptorOperationError, descriptor.prepare_to_read, backend='InvalidBackend')
with open(self.dog_jpeg, 'rb') as f, AttachableDescriptor(url) as descriptor:
descriptor.replace(f, position=1024)
with open(self.dog_jpeg, 'rb') as f, AttachableDescriptor(url) as descriptor:
descriptor.replace(f)
self.assertEqual(md5sum(descriptor), md5sum(self.dog_jpeg))
if __name__ == '__main__': # pragma: no cover
unittest.main()
``` |
{
"source": "jp-modernisation-gouv-fr/sydent",
"score": 2
} |
#### File: http/servlets/__init__.py
```python
import json
import copy
def get_args(request, required_args):
"""
Helper function to get arguments for an HTTP request
Currently takes args from the top level keys of a json object or
www-form-urlencoded for backwards compatability.
Returns a tuple (error, args) where if error is non-null,
the requesat is malformed. Otherwise, args contains the
parameters passed.
"""
args = None
if (
request.requestHeaders.hasHeader('Content-Type') and
request.requestHeaders.getRawHeaders('Content-Type')[0].startswith('application/json')
):
try:
args = json.load(request.content)
except ValueError:
request.setResponseCode(400)
return {'errcode': 'M_BAD_JSON', 'error': 'Malformed JSON'}, None
# If we didn't get anything from that, try the request args
# (riot-web's usage of the ed25519 sign servlet currently involves
# sending the params in the query string with a json body of 'null')
if args is None:
args = copy.copy(request.args)
# Twisted supplies everything as an array because it's valid to
# supply the same params multiple times with www-form-urlencoded
# params. This make it incompatible with the json object though,
# so we need to convert one of them. Since this is the
# backwards-compat option, we convert this one.
for k, v in args.items():
if isinstance(v, list) and len(v) == 1:
args[k] = v[0]
missing = []
for a in required_args:
if a not in args:
missing.append(a)
if len(missing) > 0:
request.setResponseCode(400)
msg = "Missing parameters: "+(",".join(missing))
return {'errcode': 'M_MISSING_PARAMS', 'error': msg}, None
return None, args
def jsonwrap(f):
def inner(*args, **kwargs):
return json.dumps(f(*args, **kwargs)).encode("UTF-8")
return inner
def send_cors(request):
request.setHeader(b"Content-Type", b"application/json")
request.setHeader("Access-Control-Allow-Origin", "*")
request.setHeader("Access-Control-Allow-Methods",
"GET, POST, PUT, DELETE, OPTIONS")
request.setHeader("Access-Control-Allow-Headers",
"Origin, X-Requested-With, Content-Type, Accept")
```
#### File: http/servlets/msisdnservlet.py
```python
import logging
from twisted.web.resource import Resource
import phonenumbers
from sydent.validators import (
IncorrectClientSecretException, SessionExpiredException, DestinationRejectedException
)
from sydent.http.servlets import get_args, jsonwrap, send_cors
logger = logging.getLogger(__name__)
class MsisdnRequestCodeServlet(Resource):
isLeaf = True
def __init__(self, syd):
self.sydent = syd
@jsonwrap
def render_POST(self, request):
send_cors(request)
error, args = get_args(request, ('phone_number', 'country', 'client_secret', 'send_attempt'))
if error:
request.setResponseCode(400)
return error
raw_phone_number = args['phone_number']
country = args['country']
clientSecret = args['client_secret']
sendAttempt = args['send_attempt']
try:
phone_number_object = phonenumbers.parse(raw_phone_number, country)
except Exception as e:
logger.warn("Invalid phone number given: %r", e)
request.setResponseCode(400)
return {'errcode': 'M_INVALID_PHONE_NUMBER', 'error': "Invalid phone number" }
msisdn = phonenumbers.format_number(
phone_number_object, phonenumbers.PhoneNumberFormat.E164
)[1:]
# International formatted number. The same as an E164 but with spaces
# in appropriate places to make it nicer for the humans.
intl_fmt = phonenumbers.format_number(
phone_number_object, phonenumbers.PhoneNumberFormat.INTERNATIONAL
)
resp = None
try:
sid = self.sydent.validators.msisdn.requestToken(
phone_number_object, clientSecret, sendAttempt, None
)
except DestinationRejectedException:
logger.error("Destination rejected for number: %s", msisdn);
request.setResponseCode(400)
resp = {'errcode': 'M_DESTINATION_REJECTED', 'error': 'Phone numbers in this country are not currently supported'}
except Exception as e:
logger.error("Exception sending SMS: %r", e);
request.setResponseCode(500)
resp = {'errcode': 'M_UNKNOWN', 'error':'Internal Server Error'}
if not resp:
resp = {
'success': True, 'sid': str(sid),
'msisdn': msisdn, 'intl_fmt': intl_fmt,
}
return resp
@jsonwrap
def render_OPTIONS(self, request):
send_cors(request)
request.setResponseCode(200)
return {}
class MsisdnValidateCodeServlet(Resource):
isLeaf = True
def __init__(self, syd):
self.sydent = syd
def render_GET(self, request):
send_cors(request)
err, args = get_args(request, ('token', 'sid', 'client_secret'))
if err:
return err
resp = self.do_validate_request(args)
if 'success' in resp and resp['success']:
msg = "Verification successful! Please return to your Matrix client to continue."
if 'next_link' in args:
next_link = args['next_link']
request.setResponseCode(302)
request.setHeader("Location", next_link)
else:
msg = "Verification failed: you may need to request another verification text"
templateFile = self.sydent.cfg.get('http', 'verify_response_template')
request.setHeader("Content-Type", "text/html")
return open(templateFile).read() % {'message': msg}
@jsonwrap
def render_POST(self, request):
send_cors(request)
err, args = get_args(request, ('token', 'sid', 'client_secret'))
if err:
return err
return self.do_validate_request(args)
def do_validate_request(self, args):
sid = args['sid']
tokenString = args['token']
clientSecret = args['client_secret']
try:
resp = self.sydent.validators.msisdn.validateSessionWithToken(sid, clientSecret, tokenString)
except IncorrectClientSecretException:
return {'success': False, 'errcode': 'M_INCORRECT_CLIENT_SECRET',
'error': "Client secret does not match the one given when requesting the token"}
except SessionExpiredException:
return {'success': False, 'errcode': 'M_SESSION_EXPIRED',
'error': "This validation session has expired: call requestToken again"}
if not resp:
resp = {'success': False}
return resp
@jsonwrap
def render_OPTIONS(self, request):
send_cors(request)
request.setResponseCode(200)
return {}
```
#### File: http/servlets/threepidunbindservlet.py
```python
import json
import logging
from sydent.http.servlets import get_args, jsonwrap
from sydent.hs_federation.verifier import NoAuthenticationError
from signedjson.sign import SignatureVerifyException
from twisted.web.resource import Resource
from twisted.web import server
from twisted.internet import defer
logger = logging.getLogger(__name__)
class ThreePidUnbindServlet(Resource):
def __init__(self, sydent):
self.sydent = sydent
def render_POST(self, request):
self._async_render_POST(request)
return server.NOT_DONE_YET
@defer.inlineCallbacks
def _async_render_POST(self, request):
try:
try:
body = json.load(request.content)
except ValueError:
request.setResponseCode(400)
request.write(json.dumps({'errcode': 'M_BAD_JSON', 'error': 'Malformed JSON'}))
request.finish()
return
missing = [k for k in ("threepid", "mxid") if k not in body]
if len(missing) > 0:
request.setResponseCode(400)
msg = "Missing parameters: "+(",".join(missing))
request.write(json.dumps({'errcode': 'M_MISSING_PARAMS', 'error': msg}))
request.finish()
return
threepid = body['threepid']
mxid = body['mxid']
if 'medium' not in threepid or 'address' not in threepid:
request.setResponseCode(400)
request.write(json.dumps({'errcode': 'M_MISSING_PARAMS', 'error': 'Threepid lacks medium / address'}))
request.finish()
return
try:
origin_server_name = yield self.sydent.sig_verifier.authenticate_request(request, body)
except SignatureVerifyException as ex:
request.setResponseCode(401)
request.write(json.dumps({'errcode': 'M_FORBIDDEN', 'error': ex.message}))
request.finish()
return
except NoAuthenticationError as ex:
request.setResponseCode(401)
request.write(json.dumps({'errcode': 'M_FORBIDDEN', 'error': ex.message}))
request.finish()
return
except:
logger.exception("Exception whilst authenticating unbind request")
request.setResponseCode(500)
request.write(json.dumps({'errcode': 'M_UNKNOWN', 'error': 'Internal Server Error'}))
request.finish()
return
if not mxid.endswith(':' + origin_server_name):
request.setResponseCode(403)
request.write(json.dumps({'errcode': 'M_FORBIDDEN', 'error': 'Origin server name does not match mxid'}))
request.finish()
res = self.sydent.threepidBinder.removeBinding(threepid, mxid)
request.write(json.dumps({}))
request.finish()
except Exception as ex:
logger.exception("Exception whilst handling unbind")
request.setResponseCode(500)
request.write(json.dumps({'errcode': 'M_UNKNOWN', 'error': ex.message}))
request.finish()
``` |
{
"source": "jpmolinamatute/labs",
"score": 4
} |
#### File: server/Python/init.py
```python
class SimpleClass():
def __init__(self):
print("hello")
def yell(self):
print("Yelling!")
class ExtendedClass(SimpleClass):
def __init__(self):
super().__init__()
print("Exteded")
s = "World"
x = SimpleClass()
y = ExtendedClass()
``` |
{
"source": "jpmolinamatute/randomness",
"score": 3
} |
#### File: randomness/randomness/db_library.py
```python
from copy import deepcopy
from .db import DB
from .common import Track_List, Music_Table, Mark
def is_valid_mark_order(mark: Mark) -> bool:
return "order" in mark and isinstance(mark["order"], int) and mark["order"] >= 0
def is_valid_mark_min(mark: Mark) -> bool:
return "min_mark" in mark and isinstance(mark["min_mark"], int) and mark["min_mark"] >= 1
def is_valid_mark_weight(mark: Mark) -> bool:
return "weight" in mark and isinstance(mark["weight"], float) and mark["weight"] > 0.0
class Library(DB):
def __init__(self, filepath: str, mark_list: list[Mark]):
super().__init__("Library", "library", filepath)
self.add_marks(mark_list)
self.history_table = "history"
self.create_table()
def add_marks(self, mark_list: list[Mark]) -> None:
valid = False
weight_total = 0.0
for mark in mark_list:
if is_valid_mark_order(mark) and is_valid_mark_min(mark) and is_valid_mark_weight(mark):
valid = True
weight_total += mark["weight"]
if valid and weight_total == 1.0:
self.mark_list = mark_list
else:
self.logger.warning("WARNING: Generator list is empty or invalid")
self.mark_list = []
def reset_table(self) -> None:
self.logger.debug(f"Reseting table {self.table}")
self.execute(f"DROP TABLE IF EXISTS {self.table};")
self.create_table()
def create_table(self) -> None:
self.execute(
f"""
CREATE TABLE IF NOT EXISTS {self.table}(
uri TEXT NOT NULL PRIMARY KEY,
name TEXT NOT NULL,
added_at TEXT NOT NULL,
duration_ms REAL NOT NULL,
album_uri TEXT NOT NULL,
album_name TEXT NOT NULL,
artists_uri TEXT NOT NULL,
artists_name TEXT NOT NULL
);
"""
)
self.execute(
f"""
CREATE TABLE IF NOT EXISTS {self.history_table}(
uri TEXT NOT NULL PRIMARY KEY,
count INTEGER DEFAULT 1,
FOREIGN KEY(uri) REFERENCES {self.table}(uri)
);
"""
)
def sample(self, limit: int, mark: Mark, old_track_list: Track_List) -> Track_List:
func = lambda row: row[0]
min_point = mark["min_mark"]
max_point = mark["max_mark"] if "max_mark" in mark else None
sub_limit = int(mark["weight"] * limit)
not_id = ""
values = deepcopy(old_track_list)
for _ in values:
not_id += "?, "
not_id = not_id[:-2]
sql_str = f"""
SELECT uri
FROM {self.table}
WHERE uri NOT IN ({not_id})
AND artists_uri IN (
SELECT artists_uri
FROM {self.table}
GROUP BY artists_uri
"""
values.append(min_point)
if max_point:
sql_str += " HAVING (COUNT(artists_uri) >= ? AND COUNT(artists_uri) < ?)"
values.append(max_point)
else:
sql_str += " HAVING COUNT(artists_uri) >= ?"
values.append(sub_limit)
sql_str += """
)
ORDER BY random()
LIMIT ?;
"""
self.logger.info(f"Processing mark {mark['order']}:")
songs = self.execute(sql_str, tuple(values))
len_song = len(songs)
if sub_limit != len_song:
self.logger.warning(f"Mark {mark['order']} limit was {sub_limit} we got {len_song}")
return list(map(func, songs))
def write_history(self, old_track_list: Track_List) -> None:
sql_str = f"""
INSERT INTO {self.history_table}(uri) VALUES(?)
ON CONFLICT(uri) DO UPDATE SET count=count+1;
"""
sql_track_list: list[tuple] = [(l,) for l in old_track_list]
self.logger.info("Inserting previous tracks played")
self.executemany(sql_str, sql_track_list)
def clear_removed_tracks(self, track_list: Music_Table) -> None:
not_id = ""
sql_track_list: list[str] = []
for t in track_list:
not_id += "?, "
sql_track_list.append(t[0])
not_id = not_id[:-2]
sql_str = f"""
DELETE FROM {self.table}
WHERE uri NOT IN ({not_id});
"""
self.logger.info("Clearing removed tracks")
self.execute(sql_str, tuple(sql_track_list))
def write_table(self, track_list: Music_Table) -> None:
sql_str = f"""
INSERT OR IGNORE INTO {self.table}(uri, name, added_at, duration_ms, album_uri, album_name,
artists_uri, artists_name)
VALUES (?, ?, ?, ?, ?, ?, ?, ?);
"""
self.logger.info("Inserting new tracks")
self.executemany(sql_str, track_list)
def get_sample(self, limit: int, old_track_list: Track_List) -> Track_List:
result_all: Track_List = []
for mark in self.mark_list:
result = self.sample(limit, mark, old_track_list)
result_all.extend(result)
return result_all
``` |
{
"source": "jpmoncao/gestor-de-contas",
"score": 3
} |
#### File: jpmoncao/gestor-de-contas/main.py
```python
from tkinter import messagebox
from lib.arquivo import *
from tkinter import *
arquivo = '.\contas.txt' # Arquivo txt.
# Verifica se o arquivo é existente.
verificar_arquivo = verificarArquivo(arquivo)
if not verificar_arquivo: # Se não existe,
criarArquivo(arquivo) # Cria o arquivo txt.
class Aplicativo:
def __init__(self, master=None, arquivo=arquivo):
# Container 1 //Título
self.container1 = Frame(master)
self.container1['padx'] = 10
self.container1.pack()
# Container 2 //Descrição
self.container2 = Frame(master)
self.container2['padx'] = 20
self.container2['pady'] = 3
self.container2.pack()
# Container 3 //Valor
self.container3 = Frame(master)
self.container3['padx'] = 20
self.container3['pady'] = 3
self.container3.pack()
# Container 4 //Adicionar conta
self.container4 = Frame(master)
self.container4['padx'] = 20
self.container4['pady'] = 4
self.container4.pack()
# Container 5 //Remover conta
self.container5 = Frame(master)
self.container5['padx'] = 20
self.container5['pady'] = 2
self.container5.pack()
# Container 6 //Listbox
self.container6 = Frame(master)
self.container6['padx'] = 20
self.container6['pady'] = 30
self.container6.pack()
# Texto: GESTOR DE CONTAS
self.titulo = Label(self.container1, text="GESTOR DE CONTAS")
self.titulo["font"] = ("Corbel", "14", "italic")
self.titulo.pack()
# Campo de texto: Descrição
self.input_descLabel = Label(self.container2, text='Descrição')
self.input_descLabel.pack(side=LEFT)
self.input_desc = Entry(self.container2)
self.input_desc['width'] = '30'
self.input_desc['font'] = 'Corbel', '10'
self.input_desc.pack()
# Campo de texto: Valor
self.input_valLabel = Label(self.container3, text='Valor ')
self.input_valLabel.pack(side=LEFT)
self.input_val = Entry(self.container3)
self.input_val['width'] = '30'
self.input_val['font'] = 'Arial', '10'
self.input_val.pack()
# Botão: Adicionar conta
self.add_conta = Button(self.container4)
self.add_conta['text'] = 'Adicionar conta'
self.add_conta['font'] = 'Corbel', '10'
self.add_conta['width'] = '15'
self.add_conta['command'] = self.cadastrarConta
self.add_conta.pack()
# Botão: Remover conta
self.del_conta = Button(self.container5)
self.del_conta['text'] = 'Remover conta'
self.del_conta['font'] = 'Corbel', '10'
self.del_conta['width'] = '15'
self.del_conta['command'] = self.removerConta
self.del_conta.pack()
# Listbox
self.lista = Listbox(self.container6, height=15, width=40)
self.lista.pack(side=LEFT)
self.scrollbar = Scrollbar(self.container6)
self.scrollbar.pack(side=RIGHT, fill=Y)
self.lista.config(yscrollcommand=self.scrollbar.set)
self.scrollbar.config(command=self.lista.yview)
# Atualiza os valores da lista, seguindo o arquivo txt
self.lerConta(arquivo)
def cadastrarConta(self):
try:
# Declara "desc", como str inserido em 'descrição'.
desc = self.input_desc.get()
# Declara "valor", como float inserido em 'valor'.
valor = float(self.input_val.get())
# Insire valores formatados na lista.
self.lista.insert(END, f'{desc} - R${valor:.2f}')
except (ValueError, TypeError):
messagebox.showwarning(
title='Aviso!', message='É necessário preencher todos os campos para realizar essa ação.')
with open(".\contas.txt", 'a', encoding='utf-8') as a:
# Adiciona conta no arquivo .txt
a.write(f'{desc.capitalize()};R${valor:.2f}\n')
def lerConta(self, arquivo):
with open(arquivo, 'r', encoding='utf-8') as a:
# Para cada linha do arquivo,
for linha in a:
# Separa os valores entre ';',
dado = linha.split(';')
# Substitue '\n' por '',
dado[1] = dado[1].replace('\n', '')
# Formata o conteúdo e
dados = f'{dado[0]} - {dado[1]}'
# Insere os valores na lista.
self.lista.insert(END, dados)
def removerConta(self):
try:
# Indica item da lista selecionado.
index_lista = self.lista.curselection()[0]
except:
messagebox.showwarning(
title='Aviso!', message='Selecione uma conta.') # Mensagem de erro caso não selecionar uma conta.
else:
# Deleta valor da lista, conforme selecionado.
self.lista.delete(index_lista)
with open(".\contas.txt", 'r+', encoding='utf-8') as a:
# Lê o arquivo txt inteiro.
listaArquivo = a.readlines()
# Copia os valores do arquivo.
listaManipulacao = listaArquivo[:]
# Para cada posição (c) e valor (v) em "listaManipulacao",
for c, v in enumerate(listaManipulacao):
# Se a posição for igual a selecionada,
if c == index_lista:
# Delete esse valor de "listaManipulacao"
del listaManipulacao[c]
with open(".\contas.txt", 'w', encoding='utf-8') as a:
# Para cada linha em "listaManipulção",
for linha in listaManipulacao:
# Escreva (substitua) ela (linha) no arquivo.
a.write(linha)
root = Tk()
root.title('Gestor de contas')
root.geometry('300x450')
Aplicativo(root)
root.mainloop()
``` |
{
"source": "jpmondet/Naasgul",
"score": 3
} |
#### File: Naasgul/backend/db_layer.py
```python
from os import getenv
from typing import List, Dict, Any, Optional, Tuple
from time import time
from re import compile as rcompile, IGNORECASE as rIGNORECASE
# from itertools import chain
from pymongo import MongoClient, UpdateMany # type: ignore
from pymongo.errors import DuplicateKeyError as MDDPK # type: ignore
DB_STRING: Optional[str] = getenv("DB_STRING")
if not DB_STRING:
# DB_STRING = "mongodb://mongodb:27017/"
DB_STRING = "mongodb://127.0.0.1:27017/"
DB_CLIENT: MongoClient = MongoClient(DB_STRING)
DB = DB_CLIENT.automapping
# Collections that avoid data duplication (target)
# All nodes infos of the graph
NODES_COLLECTION = DB.nodes
# All ifaces Stats by devices
STATS_COLLECTION = DB.stats
# All ifaces current highest utilization (to colorize links accordingly)
UTILIZATION_COLLECTION = DB.utilization
# All links infos of the graph (neighborships)
LINKS_COLLECTION = DB.links
def prep_db_if_not_exist() -> None:
"""If db is empty, we create proper indexes."""
if (
get_entire_collection(NODES_COLLECTION)
and get_entire_collection(LINKS_COLLECTION)
and get_entire_collection(STATS_COLLECTION)
and get_entire_collection(UTILIZATION_COLLECTION)
):
# Looks like everything is ready
return
print("Preping db since at least one collection is empty")
# We ensure that entries will be unique
# (this is a mongodb feature)
NODES_COLLECTION.create_index([("device_name", 1)], unique=True)
LINKS_COLLECTION.create_index(
[("device_name", 1), ("iface_name", 1), ("neighbor_name", 1), ("neighbor_iface", 1)],
unique=True,
)
STATS_COLLECTION.create_index(
[("device_name", 1), ("iface_name", 1), ("timestamp", 1)], unique=True
)
UTILIZATION_COLLECTION.create_index([("device_name", 1), ("iface_name", 1)], unique=True)
def get_entire_collection(mongodb_collection) -> List[Dict[str, Any]]: # type: ignore
"""Returns the entire collection passed in parameter as a list"""
return list(mongodb_collection.find({}, {"_id": False}))
def get_all_nodes() -> List[Dict[str, Any]]:
"""Returns all nodes as an iterator"""
return get_entire_collection(NODES_COLLECTION)
def get_nodes_by_patterns(patterns: List[str]) -> List[Dict[str, Any]]:
"""Returns all nodes matched as an iterator"""
return list(
NODES_COLLECTION.find(
{"$or": [{"device_name": rcompile(pattern, rIGNORECASE)} for pattern in patterns]},
{"_id": False},
)
)
def get_node(node_name: str) -> Dict[str, Any]:
"""Returns a single exact node from the db"""
return NODES_COLLECTION.find_one({"device_name": node_name}, {"_id": False}) # type: ignore
def get_all_links() -> List[Dict[str, Any]]:
"""Returns all links as an iterator"""
return get_entire_collection(LINKS_COLLECTION)
def get_link(
name_node1: str, iface_id_node1: str, name_node2: str, iface_id_node2: str
) -> Dict[str, Any]:
"""Returns a single exact link from the db"""
return LINKS_COLLECTION.find_one( # type: ignore
{
"device_name": name_node1,
"iface_name": iface_id_node1,
"neighbor_name": name_node2,
"neighbor_iface": iface_id_node2,
}
)
def get_links_by_patterns(patterns: List[str]) -> List[Dict[str, Any]]:
"""Returns all links matched as an iterator"""
return list(
LINKS_COLLECTION.find(
{
"$or": [
{"device_name": rcompile(pattern, rIGNORECASE)} for pattern in patterns
] # + [
# {"neighbor_name": rcompile(pattern, rIGNORECASE)} for pattern in patterns
# ]
}
)
)
def get_all_highest_utilizations() -> Dict[str, int]:
"""Calculates and returns all highest links utilizations
as a dict. Keys are constructed as 'device_name+iface_name'"""
utilizations: Dict[str, int] = {}
current_timestamp: int = int(time())
for utilization in get_entire_collection(UTILIZATION_COLLECTION):
id_utilz: str = utilization["device_name"] + utilization["iface_name"]
if utilizations.get(id_utilz):
continue
try:
if current_timestamp - utilization["timestamp"] > 1300:
# utilization is expired... We just return 0 as 'unknown'
# (remember, it's just to colorize links so there's no use to show
# a link red if its utilization possibly went down already)
highest_utilization: int = 0
else:
if not utilization["prev_timestamp"]:
highest_utilization = 0
elif not utilization["prev_utilization"]:
highest_utilization = 0
else:
interval: int = utilization["timestamp"] - utilization["prev_timestamp"]
interval = max(interval, 1)
highest_utilization = max(
utilization["last_utilization"] - utilization["prev_utilization"], 0
)
highest_utilization = int(highest_utilization / interval)
except KeyError:
highest_utilization = 0
utilizations[id_utilz] = highest_utilization
return utilizations
def get_all_speeds() -> Dict[str, int]:
"""Returns all links speeds as a dict.
Keys are constructed as 'device_name+iface_name'"""
speeds: Dict[str, int] = {}
for stat in get_entire_collection(STATS_COLLECTION):
id_speed = stat["device_name"] + stat["iface_name"]
if speeds.get(id_speed):
continue
speeds[id_speed] = stat["speed"]
return speeds
def get_links_device(device: str) -> Any:
"""Returns all links of one specific device (also looks
at links on which this device is appearing as a neighbor)"""
query: List[Dict[str, str]] = [{"device_name": device}, {"neighbor_name": device}]
return LINKS_COLLECTION.find({"$or": query}, {"_id": False})
def get_utilizations_device(device: str) -> List[Dict[str, Any]]:
"""Returns all links utilizations of one specific device"""
return list(UTILIZATION_COLLECTION.find({"device_name": device}, {"_id": False}))
def get_stats_devices(devices: List[str]) -> List[Dict[str, Any]]:
"""Returns all stats of all devices passed in parameter"""
query: List[Dict[str, str]] = [{"device_name": device} for device in devices]
return list(STATS_COLLECTION.find({"$or": query}, {"_id": False}))
def get_speed_iface(device_name: str, iface_name: str) -> int:
"""Returns speed (max bandwidth, not utilization) of a specific interface"""
speed: int = 1
try:
*_, laststat = STATS_COLLECTION.find({"device_name": device_name, "iface_name": iface_name})
speed = laststat["speed"]
except (KeyError, IndexError) as err:
print("oops? " + str(err))
speed = 10
return speed
def get_latest_utilization(device_name: str, iface_name: str) -> Tuple[int, int]:
"""Returns last link utilization) of a specific interface"""
utilization_line = UTILIZATION_COLLECTION.find_one(
{"device_name": device_name, "iface_name": iface_name}
)
try:
return utilization_line["last_utilization"], utilization_line["timestamp"]
except (KeyError, TypeError):
return 0, 0
def add_iface_stats(stats: List[Dict[str, Any]]) -> None:
"""Tries to insert all stats from parameter directly to db"""
STATS_COLLECTION.insert_many(stats)
def add_node( # pylint: disable=too-many-arguments
node_name: str,
groupx: Optional[int] = 11,
groupy: Optional[int] = 11,
image: Optional[str] = "router.png",
node_description: Optional[str] = "",
to_poll: Optional[bool] = True,
) -> None:
"""Inserts (or updates) a node into db"""
try:
NODES_COLLECTION.insert_one(
{
"device_name": node_name,
"device_descr": node_description,
"groupx": groupx,
"groupy": groupy,
"image": image,
"to_poll": to_poll,
}
)
except MDDPK:
NODES_COLLECTION.update_many(
{"device_name": node_name},
{
"$set": {
"device_name": node_name,
"device_descr": node_description,
"groupx": groupx,
"groupy": groupy,
"image": image,
"to_poll": to_poll,
}
},
)
def add_link( # pylint: disable=too-many-arguments
node_name: str,
neigh_name: str,
local_iface: str,
neigh_iface: str,
local_iface_descr: Optional[str] = "",
neigh_iface_descr: Optional[str] = "",
) -> None:
"""Tries to insert a link directly into db"""
try:
LINKS_COLLECTION.insert_one(
{
"device_name": node_name,
"iface_name": local_iface,
"iface_descr": local_iface_descr,
"neighbor_name": neigh_name,
"neighbor_iface": neigh_iface,
"neighbor_iface_descr": neigh_iface_descr,
}
)
except MDDPK:
LINKS_COLLECTION.update_many(
{
"device_name": node_name,
"neighbor_name": neigh_name,
"iface_name": local_iface,
"neighbor_iface": neigh_iface,
},
{
"$set": {
"device_name": node_name,
"iface_name": local_iface,
"iface_descr": local_iface_descr,
"neighbor_name": neigh_name,
"neighbor_iface": neigh_iface,
"neighbor_iface_descr": neigh_iface_descr,
}
},
)
def add_fake_iface_utilization( # pylint: disable=too-many-arguments
device_name: str,
iface_name: str,
prev_utilization: int = 0,
last_utilization: int = 0,
timestamp: float = time(),
prev_timestamp: float = 0,
) -> None:
"""Inserts (or updates) default (0) link utilization into db"""
UTILIZATION_COLLECTION.update_one(
{"device_name": f"{device_name}", "iface_name": f"{iface_name}"},
{
"$set": {
"device_name": f"{device_name}",
"iface_name": f"{iface_name}",
"prev_utilization": prev_utilization,
"last_utilization": last_utilization,
"prev_timestamp": prev_timestamp,
"timestamp": timestamp,
}
},
True,
)
def add_fake_iface_stats(
device_name: str,
iface_name: str,
timestamp: float = time(),
in_bytes: int = 0,
out_bytes: int = 0,
) -> None:
"""Inserts fake stats for a specific interface into db"""
STATS_COLLECTION.insert_one(
{
"device_name": f"{device_name}",
"iface_name": f"{iface_name}",
"timestamp": int(timestamp),
"mtu": 1500,
"mac": "",
"speed": 10,
"in_discards": 0,
"in_errors": 0,
"out_discards": 0,
"out_errors": 0,
"in_bytes": in_bytes,
"in_ucast_pkts": 0,
"in_mcast_pkts": 0,
"in_bcast_pkts": 0,
"out_bytes": out_bytes,
"out_ucast_pkts": 0,
"out_mcast_pkts": 0,
"out_bcast_pkts": 0,
}
)
def bulk_update_collection(mongodb_collection, list_tuple_key_query) -> None: # type: ignore
"""Update massively a collection. It uses the special 'UpdateMany'
pymongo object :
# (https://pymongo.readthedocs.io/en/stable/api/pymongo/collection.html\
# ?highlight=update#pymongo.collection.Collection.update_many)
"""
request: List[UpdateMany] = []
for query, data in list_tuple_key_query:
request.append(UpdateMany(query, {"$set": data}, True))
mongodb_collection.bulk_write(request)
def delete_node(node_name: str) -> None:
"""Deletes everything related to a specific node from db.
(everything means node, links, stats & utilizations entries)"""
NODES_COLLECTION.delete_one({"device_name": node_name})
LINKS_COLLECTION.delete_many({"device_name": node_name})
LINKS_COLLECTION.delete_many({"neighbor_name": node_name})
STATS_COLLECTION.delete_many({"device_name": node_name})
UTILIZATION_COLLECTION.delete_many({"neighbor_name": node_name})
def delete_link(
node_name: str,
neigh_name: str,
local_iface: str,
neigh_iface: str,
) -> None:
"""Deletes everything related to a specific node from db.
(everything means node, links, stats & utilizations entries)"""
LINKS_COLLECTION.delete_one(
{
"device_name": node_name,
"neighbor_name": neigh_name,
"iface_name": local_iface,
"neighbor_iface": neigh_iface,
}
)
STATS_COLLECTION.delete_many(
{
"device_name": node_name,
"iface_name": local_iface,
}
)
STATS_COLLECTION.delete_many(
{
"device_name": neigh_name,
"iface_name": neigh_iface,
}
)
UTILIZATION_COLLECTION.delete_many(
{
"device_name": node_name,
"iface_name": local_iface,
}
)
UTILIZATION_COLLECTION.delete_many(
{
"device_name": neigh_name,
"iface_name": neigh_iface,
}
)
def disable_node(node_name: str) -> None:
"""Disable polling on a node"""
NODES_COLLECTION.update_one(
{"device_name": node_name},
{
"$set": {
"to_poll": False,
}
},
)
```
#### File: Naasgul/backend/snmp_functions.py
```python
from typing import List, Dict, Any, Union, Optional, Iterator, Tuple
from pysnmp import hlapi # type: ignore
from pysnmp.entity.rfc3413.oneliner import cmdgen # type: ignore
# from pysnmp.error import PySnmpError # type: ignore
IFACES_TABLE_TO_COUNT: str = "1.3.6.1.2.1.2.1.0"
NEEDED_MIBS_FOR_STATS: Dict[str, str] = {
"iface_name": "1.3.6.1.2.1.2.2.1.2", # ifDescr
"iface_alias": "1.3.6.1.2.1.31.1.1.1.18", # ifAlias
"mtu": "1.3.6.1.2.1.2.2.1.4", # ifMtu
"speed": "1.3.6.1.2.1.31.1.1.1.15", # ifHighSpeed
"mac": "1.3.6.1.2.1.2.2.1.6", # ifPhysAddress
"in_disc": "1.3.6.1.2.1.2.2.1.13", # ifInDiscards
"in_err": "1.3.6.1.2.1.2.2.1.14", # ifInErrors
"out_disc": "1.3.6.1.2.1.2.2.1.19", # ifOutDiscards
"out_err": "1.3.6.1.2.1.2.2.1.20", # ifOutErrors
"in_octets": "1.3.6.1.2.1.31.1.1.1.6", # ifHCInOctets
"in_ucast_pkts": "1.3.6.1.2.1.31.1.1.1.7", # ifHCInUcastPkts
"in_mcast_pkts": "1.3.6.1.2.1.31.1.1.1.8", # ifHCInMulticastPkts
"in_bcast_pkts": "1.3.6.1.2.1.31.1.1.1.9", # ifHCInBroadcastPkts
"out_octets": "1.3.6.1.2.1.31.1.1.1.10", # ifHCOutOctets
"out_ucast_pkts": "1.3.6.1.2.1.31.1.1.1.11", # ifHCOutUcastPkts
"out_mcast_pkts": "1.3.6.1.2.1.31.1.1.1.12", # ifHCOutMulticastPkts
"out_bcast_pkts": "1.3.6.1.2.1.31.1.1.1.13", # ifHCOutBroadcastPkts
}
NEEDED_MIBS_FOR_LLDP: Dict[str, str] = {
"lldp_neigh_name": "1.0.8802.1.1.2.1.4.1.1.9", # lldpRemSysName
"lldp_neigh_iface": "1.0.8802.1.1.2.1.4.1.1.7", # lldpRemPortId
"lldp_neigh_ip": "1.0.8802.1.1.2.1.4.2.1", # lldpRemManAddrEntry
"lldp_local_iface": "1.0.8802.1.1.2.1.3.7.1.3", # lldpLocPortId
"lldp_neigh_sys_descr": "1.0.8802.1.1.2.1.4.1.1.10", # lldpRemSysDesc
"lldp_neigh_iface_descr": "1.0.8802.1.1.2.1.4.1.1.8", # lldpRemPortDesc
}
def get_snmp_creds(
snmp_user: Optional[str] = "public",
snmp_auth_pwd: Optional[str] = None,
snmp_priv_pwd: Optional[str] = None,
) -> Union[hlapi.CommunityData, hlapi.UsmUserData]:
"""Returns snmp v2 or v3 pysnmp credentials depending on parameters"""
snmpv2_user: str = ""
if snmp_user and snmp_auth_pwd and snmp_priv_pwd:
return get_snmp_v3_creds(snmp_user, snmp_auth_pwd, snmp_priv_pwd)
if snmp_user:
snmpv2_user = snmp_user
return get_snmp_v2_creds(snmpv2_user)
def get_snmp_v2_creds(snmp_community: str) -> hlapi.CommunityData:
"""Returns snmp v2 pysnmp credentials"""
return hlapi.CommunityData(snmp_community)
def get_snmp_v3_creds(snmp_user: str, snmp_auth_pwd: str, snmp_priv_pwd: str) -> hlapi.UsmUserData:
"""Returns snmp v3 pysnmp credentials"""
return hlapi.UsmUserData(
snmp_user,
snmp_auth_pwd,
snmp_priv_pwd,
authProtocol=cmdgen.usmHMACSHAAuthProtocol,
privProtocol=cmdgen.usmAesCfb128Protocol,
)
def construct_object_types(list_of_oids: List[str]) -> List[hlapi.ObjectType]:
"""Builds and returns a list of special 'ObjectType'
from pysnmp"""
object_types: List[hlapi.ObjectType] = []
for oid in list_of_oids:
object_types.append(hlapi.ObjectType(hlapi.ObjectIdentity(oid)))
return object_types
def cast(value: Any) -> Any:
"""Casts and return a value depending on its real type"""
try:
return int(value)
except (ValueError, TypeError):
try:
return float(value)
except (ValueError, TypeError):
try:
return str(value)
except (ValueError, TypeError):
pass
return value
def fetch(
handler: Iterator[Tuple[str, str, int, Tuple[str, Any]]], count: Optional[int] = 1000
) -> List[Dict[str, str]]:
"""Actually getting snmp values from a device and
returns a list of the results retrieved"""
result: List[Dict[str, str]] = []
if count:
for _ in range(count):
try:
error_indication, error_status, _, var_binds = next(handler)
if not error_indication and not error_status:
items: Dict[str, Any] = {}
for var_bind in var_binds:
items[str(var_bind[0])] = cast(var_bind[1])
result.append(items)
else:
raise RuntimeError(f"Got SNMP error: {error_indication}")
except StopIteration:
break
return result
def get_table(
target: str,
oids: List[str],
credentials: Union[hlapi.CommunityData, hlapi.UsmUserData],
port: int = 161,
engine: hlapi.SnmpEngine = hlapi.SnmpEngine(),
context: hlapi.ContextData = hlapi.ContextData(),
) -> List[Dict[str, str]]:
"""Prepares the handler to fetch snmp oids as a table
and returns the actual values return by fetch"""
handler: Iterator[Tuple[str, str, int, Tuple[str, Any]]] = hlapi.nextCmd(
engine,
credentials,
hlapi.UdpTransportTarget((target, port)),
context,
*construct_object_types(oids),
lexicographicMode=False,
)
return fetch(handler)
def get(
target: str,
oids: List[str],
credentials: Union[hlapi.CommunityData, hlapi.UsmUserData],
port: int = 161,
engine: hlapi.SnmpEngine = hlapi.SnmpEngine(),
context: hlapi.ContextData = hlapi.ContextData(),
) -> Dict[str, Any]:
"""Prepares the handler to fetch snmp oids and
returns the actual values return by fetch"""
handler: Iterator[Tuple[str, str, int, Tuple[str, Any]]] = hlapi.getCmd(
engine,
credentials,
hlapi.UdpTransportTarget((target, port)),
context,
*construct_object_types(oids),
)
return fetch(handler, 1)[0]
def get_bulk(
target: str,
oids: List[str],
credentials: Union[hlapi.CommunityData, hlapi.UsmUserData],
count: int,
start_from: int = 0,
port: int = 161,
engine: hlapi.SnmpEngine = hlapi.SnmpEngine(),
context: hlapi.ContextData = hlapi.ContextData(),
) -> List[Dict[str, str]]:
"""Prepares the handler to fetch snmp oids with a bulk
cmd and returns the actual values return by fetch"""
handler: Iterator[Tuple[str, str, int, Tuple[str, Any]]] = hlapi.bulkCmd(
engine,
credentials,
hlapi.UdpTransportTarget((target, port)),
context,
start_from,
count,
*construct_object_types(oids),
)
return fetch(handler, count)
def get_bulk_auto(
target: str,
oids: List[str],
credentials: Union[hlapi.CommunityData, hlapi.UsmUserData],
count_oid: str,
start_from: int = 0,
port: int = 161,
engine: hlapi.SnmpEngine = hlapi.SnmpEngine(),
context: hlapi.ContextData = hlapi.ContextData(),
) -> List[Dict[str, str]]:
"""Basically tries to automate get_bulk by discovering how much
we have to count with a 'count_oid'"""
count: int = get(target, [count_oid], credentials, port, engine, context)[count_oid]
return get_bulk(target, oids, credentials, count, start_from, port, engine, context)
# Not an actual snmp func but it's used in the context of snmp scrapping
def split_list(list_to_split: List[Any], size: int) -> Iterator[List[Any]]:
"""Yield successive n-sized chunks from lst."""
for i in range(0, len(list_to_split), size):
yield list_to_split[i : i + size]
```
#### File: Naasgul/tests/test_dblayer.py
```python
import sys
import os
from typing import Dict, List, Any, Tuple
from time import time
import pytest
from pymongo.errors import DuplicateKeyError as MDDPK # type: ignore
from add_fake_data_to_db import delete_all_collections_datas, add_fake_datas
sys.path.append(os.path.realpath(os.path.dirname(__file__) + "/../backend/"))
# pylint:disable=import-error, wrong-import-position
from db_layer import (
prep_db_if_not_exist,
get_all_nodes,
get_all_links,
get_latest_utilization,
get_all_highest_utilizations,
add_iface_stats,
add_fake_iface_utilization,
bulk_update_collection,
get_stats_devices,
get_speed_iface,
UTILIZATION_COLLECTION,
add_node,
add_link,
NODES_COLLECTION,
)
def test_db_prep() -> None:
"""Testing if uniqueness constraints are correctly applied on db preparation"""
delete_all_collections_datas()
prep_db_if_not_exist()
node_name: str = "test_duplicate"
add_node(node_name)
with pytest.raises(MDDPK):
NODES_COLLECTION.insert_one({"device_name": node_name})
def test_get_all_nodes() -> None:
"""Test get_all_nodes func by adding
node and retrieve it"""
delete_all_collections_datas()
prep_db_if_not_exist()
node_name: str = "test_node"
add_node(node_name)
nodes: List[Dict[str, Any]] = list(get_all_nodes())
assert len(nodes) == 1
assert nodes[0]["device_name"] == node_name
def test_get_all_links() -> None:
"""Test get_all_links func by adding
a link and retrieve it"""
delete_all_collections_datas()
prep_db_if_not_exist()
node_name: str = "test_node"
neigh_name: str = "test_neigh"
iface_name: str = "e1/1"
neigh_iface_name: str = "e2/1"
add_link(node_name, neigh_name, iface_name, neigh_iface_name)
links: List[Dict[str, Any]] = list(get_all_links())
assert len(links) == 1
assert links[0] == {
"device_name": node_name,
"neighbor_name": neigh_name,
"iface_name": iface_name,
"neighbor_iface": neigh_iface_name,
"iface_descr": "",
"neighbor_iface_descr": "",
}
def test_get_latest_utilization() -> None:
"""Test get_latest_utilization func by adding
fake datas and retrieving latest utilization with
db func"""
delete_all_collections_datas()
prep_db_if_not_exist()
add_fake_datas(12, 5, False, False)
latest: int = -1
timestamp: int = -1
latest, timestamp = get_latest_utilization("fake_device_stage1_1", "1/1")
assert isinstance(latest, int)
assert isinstance(timestamp, int)
assert latest >= 0
assert latest < 10000000
def test_get_latest_utilization_not_existing() -> None:
"""Ensure that get_latest_utilization func
returns 0,0 when the couple device+iface is
not known"""
latest: int = -1
timestamp: int = -1
latest, timestamp = get_latest_utilization("Device_that_not_exist", "1/1")
assert latest == 0
assert timestamp == 0
def test_get_all_highest_utilizations() -> None:
"""Tests getting highest utilization when
timestamp is near 'now time'."""
delete_all_collections_datas()
prep_db_if_not_exist()
add_fake_datas(12, 5, False, False)
device_name: str = "fake_device"
iface_name: str = "1/1"
prev_utilization: int = 1000
timestamp: float = time()
prev_timestamp: float = timestamp - 100
last_utilization: int = 2000
add_fake_iface_utilization(
device_name, iface_name, prev_utilization, last_utilization, timestamp, prev_timestamp
)
assert get_all_highest_utilizations()[device_name + iface_name] == int(1000 / 100)
def test_add_iface_stats() -> None:
"""Test add_iface_stats func by
checking the db after using it"""
delete_all_collections_datas()
prep_db_if_not_exist()
device: str = "fake_device_stage1_1"
iface: str = "1/1"
timestamp: str = "now"
speed: int = 1337
stats_list: List[Dict[str, Any]] = [
{
"device_name": device,
"iface_name": iface,
"timestamp": timestamp,
"speed": speed,
}
]
add_iface_stats(stats_list)
for device_stats in get_stats_devices([device]):
if (
device_stats["iface_name"] == iface
and device_stats["timestamp"] == timestamp
and device_stats["speed"] == speed
):
return
raise ValueError
def test_get_speed_iface() -> None:
"""Test add_iface_stats func by
checking the db after using it"""
delete_all_collections_datas()
prep_db_if_not_exist()
device: str = "fake_device_stage1_1"
iface: str = "1/1"
timestamp: str = "now"
speed: int = 1337
stats_list: List[Dict[str, Any]] = [
{
"device_name": device,
"iface_name": iface,
"timestamp": timestamp,
"speed": speed,
}
]
add_iface_stats(stats_list)
assert get_speed_iface(device, iface) == speed
def test_bulk_update_collection() -> None:
"""Test bulk_update_collection func by
checking the db after using it"""
delete_all_collections_datas()
prep_db_if_not_exist()
device_name: str = "fake_device_stage1_1"
iface_name: str = "6/6"
prev_utilization: int = 1337
prev_timestamp: int = 1337
last_utilization: int = 1337
timestamp: int = 1338
query = {"device_name": device_name, "iface_name": iface_name}
utilization: Dict[str, Any] = {
"device_name": device_name,
"iface_name": iface_name,
"prev_utilization": prev_utilization,
"prev_timestamp": prev_timestamp,
"last_utilization": last_utilization,
"timestamp": timestamp,
}
utilization_list: List[Tuple[Dict[str, str], Dict[str, Any]]] = [(query, utilization)]
bulk_update_collection(UTILIZATION_COLLECTION, utilization_list)
last_db_utilization: int = -1
last_db_timestamp: int = -1
last_db_utilization, last_db_timestamp = get_latest_utilization(device_name, iface_name)
assert last_utilization == last_db_utilization
assert timestamp == last_db_timestamp
``` |
{
"source": "jpmondet/norniring-bgpfabric",
"score": 2
} |
#### File: jpmondet/norniring-bgpfabric/deploy_network.py
```python
from __future__ import (
absolute_import,
division,
generators,
generator_stop,
unicode_literals,
print_function,
nested_scopes,
with_statement,
)
# annotations,
from argparse import ArgumentParser
from nornir import InitNornir
from nornir_ansible.plugins.inventory.ansible import AnsibleInventory
from fabric import Fabric
def main():
""" Main func to initialize Nornir and get started """
parser = ArgumentParser(
description="Script to deploy a working RFC5549 BGP Fabric (and to undeploy it as well)"
)
parser.add_argument(
"-u",
"--undeploy",
default=False,
type=bool,
help="If True, try to unconfigure the deployed Fabric (default: False)",
)
#TODO: Add an option to do some connectivity tests
args = parser.parse_args()
nr = InitNornir(
runner={
#"plugin": "serial",
"plugin": "threaded",
"options": {
"num_workers": 10,
},
},
inventory={
"plugin": "AnsibleInventory",
"options": {"hostsfile": "hosts"},
},
)
for key, host in nr.inventory.hosts.items():
if host.hostname is None:
host.hostname = key
print("~~~~~~ Deploying with this inventory ~~~~~~")
for name, host in nr.inventory.hosts.items():
vals = host.items()
print(name, vals)
print("########## GROUPS ##############")
for name in nr.inventory.groups:
print(name, nr.inventory.children_of_group(name))
fabric = Fabric(nr)
if args.undeploy:
fabric.undeploy()
else:
fabric.deploy()
if __name__ == "__main__":
main()
```
#### File: jpmondet/norniring-bgpfabric/fabric.py
```python
from __future__ import (
absolute_import,
division,
generators,
generator_stop,
unicode_literals,
print_function,
nested_scopes,
with_statement,
) # , annotations
#from nornir.plugins.tasks import commands, apis, text, files
from nornir_netmiko import netmiko_send_command as commands #commands plugin still doesn't exist on 3.0. We use netmiko instead.
from nornir_netmiko import netmiko_file_transfer
from nornir_utils.plugins.functions import print_result
from nornir_utils.plugins.tasks.files import write_file
from nornir_jinja2.plugins.tasks import template_file, template_string
from nornir.core.filter import F
class Fabric:
def __init__(self, nornir):
self._nornir = nornir
def linux_local_cmd(self, cmd):
local = self._nornir.filter(F(role="local"))
cmd_res = local.run(task=commands, command_string=cmd)
print_result(cmd_res)
@staticmethod
def run_remote_cmd(task, cmd):
res = task.run(commands, command_string=cmd)
print_result(res)
def to_local_file(self, filename, content, path="./resources/"):
hosts = self._nornir.filter(F(platform="linux"))
hosts.run(write_file, filename=path + filename, content=content)
# Api (http_method) is still not implemented on nornir3
#def calling_api(self, url, method):
# local = self._nornir.filter(F(platform="linux"))
# api_res = local.run(task=apis.http_method, method=method, url=url)
# print_result(api_res)
def render_template(self, tplate, path="./templates"):
# hosts = self._nornir.filter(~F(platform="linux"))
hosts = self._nornir.filter(
F(role="servers") | F(role="spine") | F(role="leaf")
)
rendered_cfg = hosts.run(
template_file, template=tplate, path=path
)
# print_result(rendered_cfg)
rendered_cfg_dict = dict()
for name, res in rendered_cfg.items():
rendered_cfg_dict[name] = res.result
return rendered_cfg_dict
def to_remote_file(
self, filename, content, name=None, path="./resources/"
):
# hosts = self._nornir.filter(~F(platform="linux"))
if not name:
hosts = self._nornir.filter(
F(role="servers") | F(role="spine") | F(role="leaf")
)
else:
hosts = self._nornir.filter(F(hostname=name))
command = f'sudo su ; echo "{content}" > {path}{filename}'
hosts.run(commands, command_string=command)
# print_result(res)
@staticmethod
def copy_files(task, src_file, dst_file, named=True):
if named:
task.run(
netmiko_file_transfer,
source_file=f"{src_file}-{task.host.name}",
dest_file=dst_file,
file_system="/",
)
else:
task.run(netmiko_file_transfer, source_file=f"{src_file}", dest_file=dst_file, file_system="/")
def send_j2_command(self, filtered_nr, command_j2):
commands_rendered = filtered_nr.run(
template_string, template=command_j2
)
for name, cmds in commands_rendered.items():
unique_srv = self._nornir.filter(F(hostname=name))
unique_srv.run(self.run_remote_cmd, cmd=cmds.result)
def configuring_interfaces(self):
rendered = self.render_template("interfaces.j2")
for name, config in rendered.items():
self.to_local_file(f"interfaces-{name}", config)
hosts = self._nornir.filter(
F(role="servers") | F(role="spine") | F(role="leaf")
)
res = hosts.run(
task=self.copy_files,
src_file="./resources/interfaces",
dst_file="/tmp/interfaces",
)
print_result(res)
hosts.run(
task=self.run_remote_cmd,
cmd="sudo cp /tmp/interfaces /etc/network/interfaces",
)
def flushing_interfaces(self):
# hosts = self._nornir.filter(~F(platform="linux"))
hosts = self._nornir.filter(
F(role="servers") | F(role="spine") | F(role="leaf")
)
command_j2 = "{% for intf in host.interfaces -%} sudo ip addr flush dev {{ intf.name }} && sudo ifup {{ intf.name }} --force ; {% endfor -%}"
self.send_j2_command(hosts, command_j2)
def net_restart(self):
# hosts = self._nornir.filter(~F(platform="linux"))
hosts = self._nornir.filter(
F(role="servers") | F(role="spine") | F(role="leaf")
)
#command = "sudo systemctl restart networking" # Disrupt admin connection..
#hosts.run(self.run_remote_cmd, cmd=command)
command_j2 = "{% for intf in host.interfaces -%} sudo ifdown {{ intf.name }} && sudo ifup {{ intf.name }} ; {% endfor -%} sudo ifup lo ; "
self.send_j2_command(hosts, command_j2)
def _install_frr_cumulus(self, task):
install_cmds = "sudo apt install -y frr"
res = task.run(task=self.run_remote_cmd, cmd=install_cmds)
print_result(res)
def _install_frr_debian(self, task):
# Trick to retrieve the frr version from the set of servers
# first_srv = next(iter(srvs.inventory.hosts.keys()))
# frr_ver = self._nornir.inventory.hosts[first_srv]["frr_version"]
install_cmds = [
"curl -s https://deb.frrouting.org/frr/keys.asc | sudo apt-key add",
"echo deb https://deb.frrouting.org/frr $(lsb_release -s -c) frr-stable | sudo tee /etc/apt/sources.list.d/frr.list",
"sudo apt-get update -y",
"sudo apt-get install -y frr frr-pythontools",
]
# install_cmds = f"curl -s https://deb.frrouting.org/frr/keys.asc | sudo apt-key add -i ; echo deb https://deb.frrouting.org/frr $(lsb_release -s -c) frr-stable | sudo tee /etc/apt/sources.list.d/frr.list ; sudo apt install -y --allow-unauthenticated frr frr-pythontools"
# install_cmds = "curl -sLO https://github.com/FRRouting/frr/releases/download/frr-6.0.2/frr_6.0.2-0.ubuntu16.04.1_amd64.deb ; sudo apt-get install -y --allow-unauthenticated ./frr_6.0.2-0.ubuntu16.04.1_amd64.deb"
# install_cmds = "sudo apt install -y frr"
for cmd in install_cmds:
res = task.run(task=self.run_remote_cmd, cmd=cmd)
print_result(res)
def install_frr(self):
hosts = self._nornir.filter(F(role="spine") | F(role="leaf"))
res = hosts.run(task=self._install_frr_cumulus)
print_result(res)
hosts = self._nornir.filter(F(role="servers"))
res = hosts.run(task=self._install_frr_debian)
print_result(res)
def configuring_frr(self):
rendered = self.render_template("bgp.j2")
for name, config in rendered.items():
self.to_local_file(f"frrconf-{name}", config)
hosts = self._nornir.filter(
F(role="servers") | F(role="spine") | F(role="leaf")
)
res = hosts.run(
task=self.copy_files,
src_file="./resources/frrconf",
dst_file="/tmp/frr.conf",
)
print_result(res)
res = hosts.run(
task=self.copy_files,
src_file="./templates/daemons",
dst_file="/tmp/daemons",
named=False,
)
print_result(res)
hosts.run(
task=self.run_remote_cmd,
cmd="sudo cp /tmp/frr.conf /etc/frr/frr.conf",
)
hosts.run(
task=self.run_remote_cmd,
cmd="sudo cp /tmp/daemons /etc/frr/daemons",
)
def restart_frr(self):
hosts = self._nornir.filter(
F(role="servers") | F(role="spine") | F(role="leaf")
)
command = "sudo systemctl restart frr"
hosts.run(self.run_remote_cmd, cmd=command)
@staticmethod
def delimiter(action):
print("#" * 50)
print(action)
print("#" * 50)
def deploy(self):
""" Workflow to deploy a fully bgp fabric on CITC """
# self.linux_local_cmd('ls -alh')
# self.calling_api("https://api.chucknorris.io/jokes/random", 'get')
# Installing FRR
self.delimiter("Installing FRR")
self.install_frr()
# Handling interfaces
self.delimiter("Prep ifaces config")
self.configuring_interfaces()
self.delimiter("Flushing Ifaces just in case")
self.flushing_interfaces()
self.delimiter("Restarting the network")
self.net_restart()
# Configuring BGP and restarting FRR on all nodes
self.delimiter("Prep bgp config")
self.configuring_frr()
self.delimiter("Restart frr")
self.restart_frr()
def uninstall_frr(self):
# srvs = self._nornir.filter(F(role="servers"))
hosts = self._nornir.filter(
F(role="servers") | F(role="spine") | F(role="leaf")
)
uninstall_cmds = "sudo apt remove -y frr ; sudo rm -rf /etc/frr/ /tmp/frr.conf /tmp/interfaces /tmp/daemons"
res = hosts.run(task=self.run_remote_cmd, cmd=uninstall_cmds)
print_result(res)
def unconfigure_ifaces(self):
hosts = self._nornir.filter(
F(role="servers") | F(role="spine") | F(role="leaf")
)
res = hosts.run(
task=self.run_remote_cmd,
cmd='echo -e "auto lo\niface lo inet loopback\nauto eth0\niface eth0 inet dhcp" | sudo tee /etc/network/interfaces',
)
def undeploy(self):
""" Unconfigure all the fabric """
self.delimiter("Uninstalling FRR and removing its files")
self.uninstall_frr()
# Handling interfaces
self.delimiter("Unconfigure interfaces")
self.unconfigure_ifaces()
self.delimiter("Flushing Ifaces")
self.flushing_interfaces()
self.delimiter("Restarting the network")
self.net_restart()
``` |
{
"source": "jpmondet/transintentlation",
"score": 2
} |
#### File: transintentlation/tests/cli_test.py
```python
import sys
import os
from click.testing import CliRunner
sys.path.append(os.path.abspath("."))
sys.path.insert(0, os.path.abspath(".."))
from transintentlation.cli import cli
def test_cli_default_success():
runner = CliRunner()
result = runner.invoke(cli, ["./tests/configs/intent.cfg", "./tests/configs/n9k.cfg"])
assert result.exit_code == 0
``` |
{
"source": "jpmorenorj/valkyrie_scripts",
"score": 2
} |
#### File: jpmorenorj/valkyrie_scripts/dEzreal.py
```python
from math import dist
import vk_orbwalker
from valkyrie import *
from helpers.flags import EvadeFlags
from dMoveModule import CurrentTarget, MovePrediction
from helpers.targeting import *
from helpers.damages import calculate_raw_spell_dmg
from helpers.spells import Slot
from boku_no_orbwalker import target_selector, Orbwalker
from time import time
import math
target_selector = None
minion_selector = None
E_Gap_Close, E_Anti_Melee, E_Force_Evade, Q_Harass, Q_Kill_Minion, R_Enabled, E_Enabled, W_Enabled, Q_Enabled, E_Safe_KS = True, True, True, True, True, True, True, True, True, True
Q_Range = 1100 # 1200
W_Range = 1100 # 1200
E_Range = 475
R_Min = 1000
R_Max = 3000
#R_Can_Hit = 3
last_positions = []
last_pos_id = []
def is_immobile(ctx, target):
for buff in target.buffs:
if 'snare' in buff.name.lower():
return True
elif 'stun' in buff.name.lower():
return True
elif 'suppress' in buff.name.lower():
return True
elif 'root' in buff.name.lower():
return True
elif 'taunt' in buff.name.lower():
return True
elif 'sleep' in buff.name.lower():
return True
elif 'knockup' in buff.name.lower():
return True
elif 'binding' in buff.name.lower():
return True
elif 'morganaq' in buff.name.lower():
return True
elif 'jhinw' in buff.name.lower():
return True
return False
def calc_q_damage(ctx, target):
q_dmg = calculate_raw_spell_dmg(ctx.player, ctx.player.spells[Slot.Q])
return q_dmg.calc_against(ctx, ctx.player, target)
def calc_w_damage(ctx, target):
w_dmg = calculate_raw_spell_dmg(ctx.player, ctx.player.spells[Slot.W])
return w_dmg.calc_against(ctx, ctx.player, target)
def calc_e_damage(ctx, target):
e_dmg = calculate_raw_spell_dmg(ctx.player, ctx.player.spells[Slot.E])
return e_dmg.calc_against(ctx, ctx.player, target)
def calc_r_damage(ctx, target):
r_dmg = calculate_raw_spell_dmg(ctx.player, ctx.player.spells[Slot.R])
return r_dmg.calc_against(ctx, ctx.player, target)
def get_enemy_targets(ctx: Context, range):
return ctx.champs.enemy_to(ctx.player).targetable().near(ctx.player, range).get()
def get_minion_targets(ctx: Context, range):
return ctx.minions.enemy_to(ctx.player).targetable().near(ctx.player, range).get()
def get_jg_targets(ctx: Context, range):
return ctx.jungle.enemy_to(ctx.player).targetable().near(ctx.player, range).get()
def lasthit_q(ctx, Q):
if not ctx.player.can_cast_spell(Q):
return
if Q_Kill_Minion:
targetted_minion = minion_selector.get_target(ctx, get_minion_targets(ctx, Q_Range))
untargetted_minions = get_minion_targets(ctx, Q_Range)
ctx.pill("QFarm", Col.Black, Col.Cyan)
if targetted_minion is None:
return
for minion in untargetted_minions:
if minion == targetted_minion:
continue
if minion.dead:
continue
if not minion.health < calc_q_damage(ctx, minion):
continue
predicted_pos = ctx.predict_cast_point(ctx.player, minion, Q)
if predicted_pos is not None:
ctx.cast_spell(Q, predicted_pos)
def harass_q(ctx, Q):
if not ctx.player.can_cast_spell(Q):
return
if Q_Harass:
target = target_selector.get_target(ctx, get_enemy_targets(ctx, Q_Range))
ctx.pill("Harass", Col.Black, Col.Cyan)
if target is None:
return
distance = ctx.player.pos.distance(target.pos)
if distance < ctx.player.atk_range:
return
predicted_pos = ctx.predict_cast_point(ctx.player, target, Q)
if predicted_pos is not None:
ctx.cast_spell(Q, predicted_pos)
def combo_q(ctx, Q):
if not ctx.player.can_cast_spell(Q):
return
if ctx.player.curr_casting is not None:
return
if Q_Enabled:
Current_Target = target_selector.get_target(ctx, get_enemy_targets(ctx, Q_Range))
if Current_Target is None:
return
predicted_pos = MovePrediction.predict_collision(ctx, Q, Current_Target, 2, False)
if predicted_pos is not None and ctx.predict_cast_point(ctx.player, Current_Target, Q) is not None:
ctx.cast_spell(Q, predicted_pos)
def combo_w(ctx, W, Q):
if not ctx.player.can_cast_spell(W):
return
if not ctx.player.can_cast_spell(Q):
return
if ctx.player.curr_casting is not None:
return
if W_Enabled:
Current_Target = target_selector.get_target(ctx, get_enemy_targets(ctx, W_Range))
if Current_Target is None:
return
predicted_Qpos = MovePrediction.predict_collision(ctx, Q, Current_Target, 3, False)
predicted_pos = MovePrediction.predict_collision(ctx, W, Current_Target, 3, False)
if predicted_pos is not None and predicted_Qpos is not None:
ctx.cast_spell(W, predicted_pos)
def point_under_turret(ctx, pos: Vec3):
turrets = ctx.turrets.enemy_to(ctx.player).alive().near(ctx.player.pos, 1250).get()
for turret in turrets:
if pos.distance(turret.pos) <= 915:
return True
return False
def point_has_minion(ctx, pos: Vec3):
minions = ctx.minions.enemy_to(ctx.player).targetable().near(pos, 1250).get()
for minion in minions:
try:
if pos.distance(minion.pos) < 250:
return True
except:
pass
return False
def point_has_enemy_champ(ctx, pos: Vec3):
champs = ctx.champs.enemy_to(ctx.player).targetable().near(pos, 1250).get()
for champ in champs:
try:
if pos.distance(champ.pos) < E_Range+100:
return True
except:
pass
return False
def combo_e(ctx, E, Q, W):
if not ctx.player.can_cast_spell(E):
return
if ctx.player.curr_casting is not None:
return
if E_Enabled:
q2Targets = get_enemy_targets(ctx, Q_Range*2)
bestPoint = None
lowestDistance = 10000
if len(q2Targets) == 0:
return
if len(q2Targets) > 2:
return
for Current_Target in q2Targets:
if Current_Target is None:
return
if ctx.player.pos.distance(Current_Target.pos) <= Q_Range:
return
Target_Killable = False
totalDamage = 0
for buff in Current_Target.buffs:
if 'ezrealwattach' in buff.name:
totalDamage = calc_q_damage(ctx, Current_Target) + calc_w_damage(ctx, Current_Target)
if ctx.player.can_cast_spell(W):
totalDamage = calc_q_damage(ctx, Current_Target) + calc_w_damage(ctx, Current_Target)
totalDamage = calc_q_damage(ctx, Current_Target)
if totalDamage > Current_Target.health:
Target_Killable = True
for point in range(0, 360, 15):
point_temp = math.radians(point)
pX, pY, pZ = E_Range * math.cos(point_temp) + ctx.player.pos.x, ctx.player.pos.y, E_Range * math.sin(point_temp) + ctx.player.pos.z
if Vec3(pX, pY, pZ).distance(Current_Target.pos) < lowestDistance:
if not point_has_minion(ctx, Vec3(pX, pY, pZ)) and not point_has_enemy_champ(ctx, Vec3(pX, pY, pZ)) and not ctx.is_wall_at(Vec3(pX, pY, pZ)) and not point_under_turret(ctx, Vec3(pX, pY, pZ)):
lowestDistance = Vec3(pX, pY, pZ).distance(Current_Target.pos)
bestPoint = Vec3(pX, pY, pZ)
if ctx.player.can_cast_spell(Q) and bestPoint is not None and Target_Killable:
ctx.circle(ctx.w2s(bestPoint), 10, 20, 2, Col.Green)
ctx.cast_spell(E, bestPoint)
def combo_r(ctx, R):
if not ctx.player.can_cast_spell(R):
return
if ctx.player.curr_casting is not None:
return
if R_Enabled:
possible_targets = get_enemy_targets(ctx, R_Max)
for cur_target in possible_targets:
distance = ctx.player.pos.distance(cur_target.pos)
W_Attached = False
Target_Killable = False
for buff in cur_target.buffs:
if 'ezrealwattach' in buff.name:
W_Attached = True
if W_Attached:
if calc_r_damage(ctx, cur_target) + calc_w_damage(ctx, cur_target) > cur_target.health:
Target_Killable = True
if calc_r_damage(ctx, cur_target) > cur_target.health:
Target_Killable = True
if cur_target is not None and not (distance < R_Min) and not (distance > R_Max) and Target_Killable:
predicted_pos = MovePrediction.predict_collision(ctx, R, cur_target, 10, True)
if predicted_pos is not None:
ctx.cast_spell(R, predicted_pos)
def valkyrie_menu(ctx: Context):
ui = ctx.ui
global Q_Enabled, W_Enabled, E_Enabled, R_Enabled, Q_Kill_Minion, R_Min, R_Max, Q_Harass, E_Gap_Close, E_Anti_Melee, E_Force_Evade, E_Safe_KS#, R_Can_Hit
ui.text('[dEzreal] Doom Ezreal Version [0.3]\nDeveloped by Luck#1337')
ui.separator()
if ui.beginmenu("Explorer Core"):
if ui.beginmenu("[Q] Mystic Shot"):
Q_Enabled = ui.checkbox('Enabled [Q]', Q_Enabled)
ui.endmenu()
if ui.beginmenu("[W] Essence Flux"):
W_Enabled = ui.checkbox('Enabled [W]', W_Enabled)
ui.endmenu()
if ui.beginmenu("[E] Arcane Shift"):
E_Enabled = ui.checkbox('Enabled [E]', E_Enabled)
E_Gap_Close = ui.checkbox('[E] Anti-Gap', E_Gap_Close)
E_Anti_Melee = ui.checkbox('[E] Anti-Melee', E_Anti_Melee)
E_Force_Evade = ui.checkbox('[E] Force Evade', E_Force_Evade)
E_Safe_KS = ui.checkbox('[E] Safe Kill Steal', E_Safe_KS)
ui.endmenu()
if ui.beginmenu("[R] Trueshot Barrage"):
R_Enabled = ui.checkbox('Enabled [R]', R_Enabled)
R_Min = ui.sliderint('Min. [R] Range', R_Min, 0, 1000)
R_Max = ui.sliderint('Max. [R] Range', R_Max, 1000, 5000)
ui.endmenu()
ui.endmenu()
ui.separator()
if ui.beginmenu("Farming Core"):
if ui.beginmenu("[Q] Mystic Shot"):
Q_Kill_Minion = ui.checkbox("[Q] Killable Minion", Q_Kill_Minion)
ui.endmenu()
ui.endmenu()
ui.separator()
if ui.beginmenu("Harass Core"):
if ui.beginmenu("[Q] Mystic Shot"):
Q_Harass = ui.checkbox("[Q] Harass", Q_Harass)
ui.endmenu()
ui.endmenu()
ui.separator()
def valkyrie_on_load(ctx: Context):
global target_selector, minion_selector
if not Orbwalker.Present:
target_selector = None
minion_selector = None
else:
target_selector = Orbwalker.SelectorChampion
minion_selector = Orbwalker.SelectorMonster
cfg = ctx.cfg
cfg.get_bool("Q", Q_Enabled)
cfg.get_bool("W", W_Enabled)
cfg.get_bool("E", E_Enabled)
cfg.get_bool("Q_KS_MINION", Q_Kill_Minion)
cfg.get_bool("E_Gap_Close", E_Gap_Close)
cfg.get_bool("E_Anti_Melee", E_Anti_Melee)
cfg.get_bool("E_Force_Evade", E_Force_Evade)
cfg.get_bool("E_Safe_KS", E_Safe_KS)
cfg.get_bool("R", R_Enabled)
cfg.get_int("R_Min", R_Min)
cfg.get_int("R_Max", R_Max)
def valkyrie_on_save(ctx: Context):
cfg = ctx.cfg
cfg.get_bool("Q", Q_Enabled)
cfg.get_bool("W", W_Enabled)
cfg.get_bool("E", E_Enabled)
cfg.get_bool("Q_KS_MINION", Q_Kill_Minion)
cfg.get_bool("E_Gap_Close", E_Gap_Close)
cfg.get_bool("E_Anti_Melee", E_Anti_Melee)
cfg.get_bool("E_Force_Evade", E_Force_Evade)
cfg.get_bool("E_Safe_KS", E_Safe_KS)
cfg.get_bool("R", R_Enabled)
cfg.get_int("R_Min", R_Min)
cfg.get_int("R_Max", R_Max)
def valkyrie_exec(ctx: Context):
global last_positions, last_pos_id
player = ctx.player
Q = ctx.player.spells[Slot.Q]
W = ctx.player.spells[Slot.W]
E = ctx.player.spells[Slot.E]
R = ctx.player.spells[Slot.R]
highestDistance = 0
bestPoint = None
ctx.pill("Ezreal", Col.Black, Col.Cyan)
if Orbwalker.Attacking:
return
if ctx.player.dead:
return
if ctx.player.curr_casting is not None:
return
if E_Force_Evade:
cols = ctx.collisions_for(ctx.player)
for col in cols:
if EvadeFlags.EvadeEndTime + 0.15 >= col.time_until_impact and EvadeFlags.CurrentEvadePriority >= 2 and ctx.player.can_cast_spell(E) and col.final:
ctx.cast_spell(E, ctx.player.pos + ((EvadeFlags.EvadePoint - ctx.player.pos).normalize() * ctx.player.pos.distance(EvadeFlags.EvadePoint)*4))
ctx.pill("Evading", Col.Black, Col.Blue)
if E_Anti_Melee:
targets_melee_range = get_enemy_targets(ctx, 250)
if len(targets_melee_range) > 0 and ctx.player.can_cast_spell(E):
for danger in targets_melee_range:
for point in range(0, 360, 20):
point_temp = math.radians(point)
pX, pY, pZ = E_Range * math.cos(point_temp) + ctx.player.pos.x, ctx.player.pos.y, E_Range * math.sin(point_temp) + ctx.player.pos.z
if Vec3(pX, pY, pZ).distance(danger.pos) > highestDistance:
if not point_has_enemy_champ(ctx, Vec3(pX, pY, pZ)) and not point_has_minion(ctx, Vec3(pX, pY, pZ)) and not ctx.is_wall_at(Vec3(pX, pY, pZ)) and not point_under_turret(ctx, Vec3(pX, pY, pZ)):
highestDistance = Vec3(pX, pY, pZ).distance(danger.pos)
bestPoint = Vec3(pX, pY, pZ)
if ctx.player.can_cast_spell(Q) and bestPoint is not None:
ctx.circle(ctx.w2s(bestPoint), 10, 20, 2, Col.Green)
ctx.cast_spell(E, bestPoint)
if E_Gap_Close:
targets_gap_range = get_enemy_targets(ctx, E_Range)
if len(targets_gap_range) > 0 and ctx.player.can_cast_spell(E):
for danger in targets_gap_range:
if danger.dashing:
for point in range(0, 360, 20):
point_temp = math.radians(point)
pX, pY, pZ = E_Range * math.cos(point_temp) + ctx.player.pos.x, ctx.player.pos.y, E_Range * math.sin(point_temp) + ctx.player.pos.z
if Vec3(pX, pY, pZ).distance(danger.pos) > highestDistance:
if not point_has_enemy_champ(ctx, Vec3(pX, pY, pZ)) and not ctx.is_wall_at(Vec3(pX, pY, pZ)) and not point_under_turret(ctx, Vec3(pX, pY, pZ)):
highestDistance = Vec3(pX, pY, pZ).distance(danger.pos)
bestPoint = Vec3(pX, pY, pZ)
if ctx.player.can_cast_spell(Q) and bestPoint is not None:
ctx.circle(ctx.w2s(bestPoint), 10, 20, 2, Col.Green)
ctx.cast_spell(E, bestPoint)
if Orbwalker.CurrentMode == Orbwalker.ModeKite:
combo_e(ctx, E, Q, W)
combo_w(ctx, W, Q)
combo_q(ctx, Q)
combo_r(ctx, R)
if Orbwalker.CurrentMode == Orbwalker.ModeLanePush:
lasthit_q(ctx, Q)
if Orbwalker.CurrentMode == Orbwalker.ModeLastHit:
harass_q(ctx, Q)
``` |
{
"source": "jpmorgan98/MCDC-TNT-2",
"score": 3
} |
#### File: numba_kernels/gpu/scatter.py
```python
import math
import numpy as np
from numba import cuda
@cuda.jit
def ScatterCuda(d_scatter_indices, p_dir_x, p_dir_y, p_dir_z, rands):
i = cuda.grid(1)
if (i < d_scatter_indices.size):
# Sample polar and azimuthal angles uniformly
mu = 2.0*rands[2*i] - 1.0
azi = 2.0*math.pi*rands[2*i+1]
# Convert to Cartesian coordinate
c = (1.0 - mu**2)**0.5
p_dir_y[d_scatter_indices[i]] = math.cos(azi)*c
p_dir_z[d_scatter_indices[i]] = math.sin(azi)*c
p_dir_x[d_scatter_indices[i]] = mu
def Scatter(scatter_indices, scat_count, p_dir_x, p_dir_y, p_dir_z, rands):
"""
NUMBA CUDA Kernel: Isotropically chosses new particle directions after a scatter event
Parameters
----------
scatter_indices : vector int
Indicies to PSV of particls that will be undergoing transport.
scat_count : int
number of particles to scatter.
p_dir_y : vector double
PSV: y direction unit value of phase space particles (index is particle value).
p_dir_z : vector double
PSV: z direction unit value of phase space particles (index is particle value).
p_dir_x : vector double
PSV: x direction unit value of phase space particles (index is particle value).
rands : vector doubles
from an rng, length: 2*scat_count.
Returns
-------
None.
"""
d_scatter_indices = cuda.to_device(scatter_indices)
d_p_dir_x = cuda.to_device(p_dir_x)
d_p_dir_y = cuda.to_device(p_dir_y)
d_p_dir_z = cuda.to_device(p_dir_z)
d_p_rands = cuda.to_device(rands)
threadsperblock = 32
blockspergrid = (scat_count + (threadsperblock - 1)) // threadsperblock
ScatterCuda[blockspergrid, threadsperblock](d_scatter_indices, d_p_dir_x, d_p_dir_y, d_p_dir_z, d_p_rands)
p_dir_x = d_p_dir_x.copy_to_host()
p_dir_y = d_p_dir_y.copy_to_host()
p_dir_z = d_p_dir_z.copy_to_host()
return(p_dir_x, p_dir_y, p_dir_z)
def test_Scatter():
scat_count = 3
scatter_indices = np.array([0,1,4], dtype=int)
p_dir_x = np.array([1,2,0,0,4])
p_dir_y = np.array([1,2,0,0,4])
p_dir_z = np.array([1,2,0,0,4])
rands = np.array([1,1,0,0,.5,.5])
[p_dir_x, p_dir_y, p_dir_z] = Scatter(scatter_indices, scat_count, p_dir_x, p_dir_y, p_dir_z, rands)
assert(p_dir_y[0] == 0)
assert(p_dir_z[0] == 0)
assert(p_dir_x[0] == 1)
assert(p_dir_y[1] == 0)
assert(p_dir_z[1] == 0)
assert(p_dir_x[1] == -1)
assert(p_dir_y[4] == -1)
assert(np.allclose(p_dir_z[4], 0))
assert(p_dir_x[4] == 0)
if __name__ == '__main__':
test_Scatter()
```
#### File: pyk_kernels/ad_o/advance.py
```python
import math
import numpy as np
import pykokkos as pk
@pk.workunit
def Advance_cycle(i: int,
p_pos_x: pk.View1D[pk.double], p_pos_y: pk.View1D[pk.double], p_pos_z: pk.View1D[pk.double],
p_dir_y: pk.View1D[pk.double], p_dir_z: pk.View1D[pk.double], p_dir_x: pk.View1D[pk.double],
p_mesh_cell: pk.View1D[int], p_speed: pk.View1D[pk.double], p_time: pk.View1D[pk.double],
dx: pk.double, mesh_total_xsec: pk.View1D[pk.double], L: pk.double,
p_dist_travled: pk.View1D[pk.double], p_end_trans: pk.View1D[int], rands: pk.View1D[pk.double]):
#pk.printf('%d %f\n',i, p_pos_x[i])
kicker: pk.double = 1e-8
if (p_end_trans[i] == 0):
if (p_pos_x[i] < 0): #exited rhs
p_end_trans[i] = 1
elif (p_pos_x[i] >= L): #exited lhs
p_end_trans[i] = 1
else:
dist: pk.double = -math.log(rands[i]) / mesh_total_xsec[p_mesh_cell[i]]
#pk.printf('%d %f %f %f\n', i, dist, rands[i], mesh_total_xsec[p_mesh_cell[i]])
#p_dist_travled[i] = dist
x_loc: pk.double = (p_dir_x[i] * dist) + p_pos_x[i]
LB: pk.double = p_mesh_cell[i] * dx
RB: pk.double = LB + dx
if (x_loc < LB): #move partilce into cell at left
p_dist_travled[i] = (LB - p_pos_x[i])/p_dir_x[i] + kicker
p_mesh_cell[i] -= 1
elif (x_loc > RB): #move particle into cell at right
p_dist_travled[i] = (RB - p_pos_x[i])/p_dir_x[i] + kicker
p_mesh_cell[i] += 1
else: #move particle in cell
p_dist_travled[i] = dist
p_end_trans[i] = 1
#pk.printf('%d: x pos before step %f\n', i, p_pos_x[i])
p_pos_x[i] = p_dir_x[i]*p_dist_travled[i] + p_pos_x[i]
p_pos_y[i] = p_dir_y[i]*p_dist_travled[i] + p_pos_y[i]
p_pos_z[i] = p_dir_z[i]*p_dist_travled[i] + p_pos_z[i]
#pk.printf('%d: x pos after step: %f should be: %f\n', i, p_pos_x[i], (temp_x))
p_time[i] += dist/p_speed[i]
@pk.workload
class DistTraveled:
def __init__(self, num_part, max_mesh_index, mesh_dist_traveled_pk, mesh_dist_traveled_squared_pk, p_dist_travled, mesh, p_end_trans, clever_out):
self.num_part: int = num_part
self.max_mesh_index: int = max_mesh_index
self.mesh_dist_traveled_pk: pk.View1D[pk.double] = mesh_dist_traveled_pk
self.mesh_dist_traveled_squared_pk: pk.View1D[pk.double] = mesh_dist_traveled_squared_pk
self.p_dist_travled: pk.View1D[pk.double] = p_dist_travled
self.mesh: pk.View1D[int] = mesh
self.p_end_trans: pk.View1D[int] = p_end_trans
self.clever_out: pk.View1D[int] = clever_out
@pk.main
def distTraveled_main(self):
end_flag: int = 1
cur_cell: int = 0
summer: int = 0
#pk.printf('1 %d\n', cur_cell)
#pk.printf('3 %f\n', mesh_dist_traveled_pk[cur_cell])
for i in range(self.num_part):
cur_cell = int(self.mesh[i])
if (0 < cur_cell) and (cur_cell < self.max_mesh_index):
self.mesh_dist_traveled_pk[cur_cell] += self.p_dist_travled[i]
self.mesh_dist_traveled_squared_pk[cur_cell] += self.p_dist_travled[i]**2
if self.p_end_trans[i] == 0:
end_flag = 0
summer += p_end_trans[i]
clever_out[0] = end_flag
clever_out[1] = summer
<EMAIL>
#def CellSum
# for i in range(num_parts)
#@profile
def Advance(p_pos_x, p_pos_y, p_pos_z, p_mesh_cell, dx, p_dir_y, p_dir_z, p_dir_x, p_speed, p_time,
num_part, mesh_total_xsec, mesh_dist_traveled, mesh_dist_traveled_squared, L):
space = pk.ExecutionSpace.OpenMP
pk.set_default_space(space)
print(L.dtype)
max_mesh_index = int(len(mesh_total_xsec)-1)
#this is only here while devloping eventually all variables will views
#allocate special data
p_pos_x_pk = pk.from_numpy(p_pos_x)
p_pos_y_pk = pk.from_numpy(p_pos_y)
p_pos_z_pk = pk.from_numpy(p_pos_z)
p_dir_y_pk = pk.from_numpy(p_dir_y)
p_dir_z_pk = pk.from_numpy(p_dir_z)
p_dir_x_pk = pk.from_numpy(p_dir_x)
p_mesh_cell_pk = pk.from_numpy(p_mesh_cell)
p_speed_pk = pk.from_numpy(p_speed)
p_time_pk = pk.from_numpy(p_time)
mesh_total_xsec_pk = pk.from_numpy(mesh_total_xsec)
mesh_dist_traveled_pk = pk.from_numpy(mesh_dist_traveled)
mesh_dist_traveled_squared_pk = pk.from_numpy(mesh_dist_traveled_squared)
#print(p_pos_x_pk.dtype)
#print(p_pos_y_pk.dtype)
#print(p_pos_z_pk.dtype)
#print(p_dir_y_pk.dtype)
#print(p_dir_z_pk.dtype)
#print(p_mesh_cell_pk.dtype)
#print(p_speed_pk.dtype)
#print(p_time_pk.dtype)
#print(mesh_total_xsec_pk.dtype)
#print(mesh_dist_traveled_pk.dtype)
#print(mesh_dist_traveled_squared_pk.dtype)
#print(mesh_total_xsec_pk)
#print(max_mesh_index)
#rands: pk.View1D[pk.double] = pk.View([num_part], pk.double) #allocation for rands
p_end_trans: pk.View1D[int] = pk.View([num_part], int) #flag
p_end_trans.fill(0)
clever_out: pk.View1D[int] = pk.View([4], int)
end_flag = 0
cycle_count = 0
pre_p_x = np.zeros(num_part)
post_p_x = np.zeros(num_part)
p_dist_travled: pk.View1D[pk.double] = pk.View([num_part], pk.double)
while end_flag == 0:
#allocate randoms
summer = 0
rands_np = np.random.random([num_part])
rands = pk.from_numpy(rands_np)
#vector of indicies for particle transport
p = pk.RangePolicy(pk.get_default_space(), 0, num_part)
p_dist_travled.fill(0)
pre_p_mesh = p_mesh_cell_pk
pk.parallel_for(num_part, Advance_cycle,
p_pos_x=p_pos_x_pk, p_pos_y=p_pos_y_pk, p_pos_z=p_pos_z_pk,
p_dir_y=p_dir_y_pk, p_dir_z=p_dir_z_pk, p_dir_x=p_dir_x_pk,
p_mesh_cell=p_mesh_cell_pk, p_speed=p_speed_pk, p_time=p_time_pk,
dx=dx, mesh_total_xsec=mesh_total_xsec_pk, L=L, p_dist_travled=p_dist_travled,
p_end_trans=p_end_trans, rands=rands)#pk for number still in transport
pk.execute(pk.ExecutionSpace.OpenMP,
DistTraveled(num_part, max_mesh_index, mesh_dist_traveled_pk, mesh_dist_traveled_squared_pk, p_dist_travled, pre_p_mesh, p_end_trans, clever_out))
end_flag = clever_out[0]
summer = clever_out[1]
#print(cycle_count)
if (cycle_count > int(1e3)):
print("************ERROR**********")
print(" Max itter hit")
print(p_end_trans)
print()
print()
return()
cycle_count += 1
print("Advance Complete:......{1}% ".format(cycle_count, int(100*summer/num_part)), end = "\r")
print()
for i in range(num_part):
p_pos_x[i] = p_pos_x_pk[i]
p_pos_y[i] = p_pos_y_pk[i]
p_pos_z[i] = p_pos_z_pk[i]
p_mesh_cell[i] = p_mesh_cell_pk[i]
p_speed[i] = p_speed_pk[i]
p_time[i] = p_time_pk[i]
for i in range(max_mesh_index+1):
mesh_dist_traveled[i] = mesh_dist_traveled_pk[i]
mesh_dist_traveled_squared[i] = mesh_dist_traveled_squared_pk[i]
return(p_pos_x, p_pos_y, p_pos_z, p_mesh_cell, p_dir_y, p_dir_z, p_dir_x, p_speed, p_time, mesh_dist_traveled, mesh_dist_traveled_squared)
def StillIn(p_pos_x, surface_distances, p_alive, num_part):
tally_left = 0
tally_right = 0
for i in range(num_part):
#exit at left
if p_pos_x[i] <= surface_distances[0]:
tally_left += 1
p_alive[i] = False
elif p_pos_x[i] >= surface_distances[len(surface_distances)-1]:
tally_right += 1
p_alive[i] = False
return(p_alive, tally_left, tally_right)
def test_Advance():
L = 1
dx = .25
N_m = 4
num_part = 6
p_pos_x = np.array([-.01, 0, .1544, .2257, .75, 1.1])
p_pos_y = 2.1*np.ones(num_part)
p_pos_z = 3.4*np.ones(num_part)
p_mesh_cell = np.array([-1, 0, 0, 1, 3, 4], dtype=int)
p_dir_x = np.ones(num_part)
p_dir_x[0] = -1
p_dir_y = np.zeros(num_part)
p_dir_z = np.zeros(num_part)
p_speed = np.ones(num_part)
p_time = np.zeros(num_part)
p_alive = np.ones(num_part, bool)
p_alive[5] = False
particle_speed = 1
mesh_total_xsec = np.array([0.1,1,.1,100])
mesh_dist_traveled_squared = np.zeros(N_m)
mesh_dist_traveled = np.zeros(N_m)
[p_pos_x, p_pos_y, p_pos_z, p_mesh_cell, p_dir_y, p_dir_z, p_dir_x, p_speed, p_time, mesh_dist_traveled, mesh_dist_traveled_squared] = Advance(p_pos_x, p_pos_y, p_pos_z, p_mesh_cell, dx, p_dir_y, p_dir_z, p_dir_x, p_speed, p_time, num_part, mesh_total_xsec, mesh_dist_traveled, mesh_dist_traveled_squared, L)
assert (np.sum(mesh_dist_traveled) > 0)
assert (np.sum(mesh_dist_traveled_squared) > 0)
assert (p_pos_x[0] == -.01)
assert (p_pos_x[5] == 1.1)
assert (p_pos_x[1:4].all() > .75)
def test_StillIn():
num_part = 7
surface_distances = [0,.25,.75,1]
p_pos_x = np.array([-.01, 0, .1544, .2257, .75, 1.1, 1])
p_alive = np.ones(num_part, bool)
[p_alive, tally_left, tally_right] = StillIn(p_pos_x, surface_distances, p_alive, num_part)
assert(p_alive[0] == False)
assert(p_alive[5] == False)
assert(tally_left == 2)
assert(tally_right == 2)
assert(p_alive[2:4].all() == True)
if __name__ == '__main__':
test_Advance()
test_StillIn()
```
#### File: pyk_kernels/ad_o/fissions_add.py
```python
import numpy as np
def FissionsAdd(p_pos_x, p_pos_y, p_pos_z, p_mesh_cell, p_dir_y, p_dir_z, p_dir_x, p_speed, p_time, p_alive,
fis_count, nu_new_neutrons, fission_event_index, num_part, particle_speed, rands):
"""
Run advance for a
Parameters
----------
p_pos_x : vector double
PSV: x position of phase space particles (index is particle value).
p_pos_y : vector double
PSV: y position of phase space particles (index is particle value).
p_pos_z : vector double
PSV: z position of phase space particles (index is particle value).
p_mesh_cell : vector int
PSV: mesh cell location of a given particle.
p_dir_y : vector double
PSV: y direction unit value of phase space particles (index is particle value).
p_dir_z : vector double
PSV: z direction unit value of phase space particles (index is particle value).
p_dir_x : vector double
PSV: x direction unit value of phase space particles (index is particle value).
p_speed : vector double
PSV: speed (energy) or a particle (index is particle).
p_time : vector double
PSV: particle clock.
p_alive : vector bool
PSV: is it alive?
fis_count : int
how many fissions where recorded in smaple event.
nu_new_neutrons : int
how many neutrons produced per fission.
fission_event_index : vector int
indicies of particles that underwent fission after sample event.
num_part : int
number of particles currently under transport (indxed form 1).
particle_speed : double
speed of fissioned particles.
rands : vector double
produced from an rng, needs to be fis_count*nu*2.
Returns
-------
Phase space variables with new fissions added.
"""
k=0 #index for fission temp vectors
for i in range(fis_count):
for j in range(nu_new_neutrons):
# Position
p_pos_x[k+num_part] = p_pos_x[fission_event_index[i]]
p_mesh_cell[k+num_part] = p_mesh_cell[fission_event_index[i]]
p_pos_y[k+num_part] = p_pos_y[fission_event_index[i]]
p_pos_z[k+num_part] = p_pos_z[fission_event_index[i]]
# print("fission particle produced")
# print("from particle {0} and indexed as particle {1}".format(fission_event_index[i], k+num_part))
# print("produced at: {0}".format(p_pos_x[k+num_part]))
# Direction
# Sample polar and azimuthal angles uniformly
mu = 2.0*rands[4*i+2*j] - 1.0
azi = 2.0*rands[4*i+2*j+1]
# Convert to Cartesian coordinate
c = (1.0 - mu**2)**0.5
p_dir_y[k+num_part] = np.cos(azi)*c
p_dir_z[k+num_part] = np.sin(azi)*c
p_dir_x[k+num_part] = mu
# Speed
p_speed[k+num_part] = particle_speed
# Time
p_time[k+num_part] = p_time[fission_event_index[i]]
# Flags
p_alive[k+num_part] = True
k+=1
return(p_pos_x, p_pos_y, p_pos_z, p_mesh_cell, p_dir_y, p_dir_z, p_dir_x, p_speed, p_time, p_alive, k)
def test_FissionsAdd():
L = 1
dx = .25
N_m = 4
num_part = 3
p_pos_x = np.array([.55, 3, 5])
p_pos_y = np.array([10, 3, 5])
p_pos_z = np.array([15, 3, 5])
p_mesh_cell = np.array([2, 87, -1])
p_dir_x = np.ones(num_part)
p_dir_y = np.zeros(num_part)
p_dir_z = np.zeros(num_part)
p_speed = np.ones(num_part)
p_time = np.zeros(num_part)
p_alive = np.ones(num_part, bool)
p_alive[0] = False
fis_count = 1
nu = 2
fission_event_index = [0]
rands = [1,1,1,1]
[p_pos_x, p_pos_y, p_pos_z, p_mesh_cell, p_dir_y, p_dir_z, p_dir_x, p_speed, p_time, p_alive, k] = FissionsAdd(p_pos_x, p_pos_y, p_pos_z, p_mesh_cell, p_dir_y, p_dir_z, p_dir_x, p_speed, p_time, p_alive, fis_count, nu, fission_event_index, 1, 1, rands)
print(p_pos_x)
print(p_pos_y)
print(p_pos_z)
assert(np.allclose(p_pos_x, [0.55, 0.55, 0.55]))
assert(np.allclose(p_pos_y, [10,10,10]))
assert(np.allclose(p_pos_z, [15,15,15]))
assert(p_dir_x.all() == 1)
assert(p_alive[1:2].all() == True)
if __name__ == '__main__':
test_FissionsAdd()
```
#### File: pyk_kernels/all/advance.py
```python
import math
import numpy as np
import pykokkos as pk
@pk.workload
class Advance_cycle:
def __init__(self, num_part, p_pos_x, p_pos_y, p_pos_z, p_dir_y, p_dir_z, p_dir_x, p_mesh_cell, p_speed, p_time, dx, mesh_total_xsec, L, p_dist_travled, p_end_trans, rands):
self.p_pos_x: pk.View1D[pk.double] = p_pos_x
self.p_pos_y: pk.View1D[pk.double] = p_pos_y
self.p_pos_z: pk.View1D[pk.double] = p_pos_z
self.p_dir_y: pk.View1D[pk.double] = p_dir_y
self.p_dir_z: pk.View1D[pk.double] = p_dir_z
self.p_dir_x: pk.View1D[pk.double] = p_dir_x
self.p_mesh_cell: pk.View1D[int] = p_mesh_cell
self.p_speed: pk.View1D[pk.double] = p_speed
self.p_time: pk.View1D[pk.double] = p_time
self.dx: pk.double = dx
self.L: pk.double = L
#print(dx)
#print(L)
self.num_part: int = num_part
self.mesh_total_xsec: pk.View1D[pk.double] = mesh_total_xsec
self.p_dist_travled: pk.View1D[pk.double] = p_dist_travled
self.p_end_trans: pk.View1D[int] = p_end_trans
self.rands: pk.View1D[pk.double] = rands
@pk.main
def run(self):
pk.parallel_for(self.num_part, self.advanceCycle_wu)
@pk.workunit
def advanceCycle_wu(self, i: int):
kicker: pk.double = 1e-8
if (self.p_end_trans[i] == 0):
if (self.p_pos_x[i] < 0): #exited rhs
self.p_end_trans[i] = 1
elif (self.p_pos_x[i] >= self.L): #exited lhs
self.p_end_trans[i] = 1
else:
dist: pk.double = -math.log(self.rands[i]) / self.mesh_total_xsec[self.p_mesh_cell[i]]
#pk.printf('%d %f %f %f\n', i, dist, rands[i], mesh_total_xsec[p_mesh_cell[i]])
#p_dist_travled[i] = dist
x_loc: pk.double = (self.p_dir_x[i] * dist) + self.p_pos_x[i]
LB: pk.double = self.p_mesh_cell[i] * self.dx
RB: pk.double = LB + self.dx
if (x_loc < LB): #move partilce into cell at left
self.p_dist_travled[i] = (LB - self.p_pos_x[i])/self.p_dir_x[i] + kicker
self.p_mesh_cell[i] -= 1
elif (x_loc > RB): #move particle into cell at right
self.p_dist_travled[i] = (RB - self.p_pos_x[i])/self.p_dir_x[i] + kicker
self.p_mesh_cell[i] += 1
else: #move particle in cell
self.p_dist_travled[i] = dist
self.p_end_trans[i] = 1
#pk.printf('%d: x pos before step %f\n', i, p_pos_x[i])
self.p_pos_x[i] = self.p_dir_x[i]*self.p_dist_travled[i] + self.p_pos_x[i]
self.p_pos_y[i] = self.p_dir_y[i]*self.p_dist_travled[i] + self.p_pos_y[i]
self.p_pos_z[i] = self.p_dir_z[i]*self.p_dist_travled[i] + self.p_pos_z[i]
#pk.printf('%d: x pos after step: %f should be: %f\n', i, p_pos_x[i], (temp_x))
self.p_time[i] += dist/self.p_speed[i]
@pk.workload
class DistTraveled:
def __init__(self, num_part, max_mesh_index, mesh_dist_traveled_pk, mesh_dist_traveled_squared_pk, p_dist_travled, mesh, p_end_trans, clever_out):
self.num_part: int = num_part
self.max_mesh_index: int = max_mesh_index
self.mesh_dist_traveled_pk: pk.View1D[pk.double] = mesh_dist_traveled_pk
self.mesh_dist_traveled_squared_pk: pk.View1D[pk.double] = mesh_dist_traveled_squared_pk
self.p_dist_travled: pk.View1D[pk.double] = p_dist_travled
self.mesh: pk.View1D[int] = mesh
self.p_end_trans: pk.View1D[int] = p_end_trans
self.clever_out: pk.View1D[int] = clever_out
@pk.main
def distTraveled_main(self):
end_flag: int = 1
cur_cell: int = 0
summer: int = 0
#pk.printf('1 %d\n', cur_cell)
#pk.printf('3 %f\n', mesh_dist_traveled_pk[cur_cell])
for i in range(self.num_part):
cur_cell = int(self.mesh[i])
if (0 < cur_cell) and (cur_cell < self.max_mesh_index):
self.mesh_dist_traveled_pk[cur_cell] += self.p_dist_travled[i]
self.mesh_dist_traveled_squared_pk[cur_cell] += self.p_dist_travled[i]**2
if self.p_end_trans[i] == 0:
end_flag = 0
summer += p_end_trans[i]
clever_out[0] = end_flag
clever_out[1] = summer
<EMAIL>
#def CellSum
# for i in range(num_parts)
#@profile
def Advance(p_pos_x, p_pos_y, p_pos_z, p_mesh_cell, dx, p_dir_y, p_dir_z, p_dir_x, p_speed, p_time,
num_part, mesh_total_xsec, mesh_dist_traveled, mesh_dist_traveled_squared, L):
max_mesh_index = int(len(mesh_total_xsec)-1)
p_end_trans: pk.View1D[int] = pk.View([num_part], int) #flag
p_end_trans.fill(0)
p_dist_travled: pk.View1D[pk.double] = pk.View([num_part], pk.double)
clever_out: pk.View1D[int] = pk.View([4], int)
end_flag = 0
cycle_count = 0
while end_flag == 0:
#allocate randoms
summer = 0
rands_np = np.random.random([num_part])
rands = pk.from_numpy(rands_np)
#vector of indicies for particle transport
p = pk.RangePolicy(pk.get_default_space(), 0, num_part)
p_dist_travled.fill(0)
pre_p_mesh = p_mesh_cell
L = float(L)
#space = pk.ExecutionSpace.OpenMP
pk.execute(pk.ExecutionSpace.OpenMP, Advance_cycle(num_part, p_pos_x, p_pos_y, p_pos_z, p_dir_y, p_dir_z, p_dir_x, p_mesh_cell, p_speed, p_time, dx, mesh_total_xsec, L, p_dist_travled, p_end_trans, rands))#pk for number still in transport
pk.execute(pk.ExecutionSpace.OpenMP,
DistTraveled(num_part, max_mesh_index, mesh_dist_traveled, mesh_dist_traveled_squared, p_dist_travled, pre_p_mesh, p_end_trans, clever_out))
end_flag = clever_out[0]
summer = clever_out[1]
#print(cycle_count)
if (cycle_count > int(1e3)):
print("************ERROR**********")
print(" Max itter hit")
print(p_end_trans)
print()
print()
return()
cycle_count += 1
print("Advance Complete:......{1}% ".format(cycle_count, int(100*summer/num_part)), end = "\r")
print()
@pk.workload
class StillIn:
def __init__(self, p_pos_x, surface_distances, p_alive, num_part, clever_out):
self.p_pos_x: pk.View1D[pk.double] = p_pos_x
self.clever_out: pk.View1D[int] = clever_out
self.surface_distances: pk.View1D[pk.double] = surface_distances
self.p_alive: pk.View1D[int] = p_alive
self.num_part: int = num_part
@pk.main
def run(self):
tally_left: int = 0
tally_right: int = 0
for i in range(self.num_part):
#exit at left
if self.p_pos_x[i] <= 0:
tally_left += 1
self.p_alive[i] = 0
elif self.p_pos_x[i] >= 1:
tally_right += 1
self.p_alive[i] = 0
self.clever_out[0] = tally_left
self.clever_out[1] = tally_right
def speedTestAdvance():
# Position
num_part = int(1e8)
phase_parts = num_parts
p_pos_x_np = np.zeros(phase_parts, dtype=float)
p_pos_y_np = np.zeros(phase_parts, dtype=float)
p_pos_z_np = np.zeros(phase_parts, dtype=float)
p_pos_x = pk.from_numpy(p_pos_x_np)
p_pos_y = pk.from_numpy(p_pos_y_np)
p_pos_z = pk.from_numpy(p_pos_z_np)
# Direction
p_dir_x_np = np.zeros(phase_parts, dtype=float)
p_dir_y_np = np.zeros(phase_parts, dtype=float)
p_dir_z_np = np.zeros(phase_parts, dtype=float)
p_dir_x = pk.from_numpy(p_dir_x_np)
p_dir_y = pk.from_numpy(p_dir_y_np)
p_dir_z = pk.from_numpy(p_dir_z_np)
# Speed
p_speed_np = np.zeros(phase_parts, dtype=float)
p_speed = pk.from_numpy(p_speed_np)
# Time
p_time_np = np.zeros(phase_parts, dtype=float)
p_time = pk.from_numpy(p_time_np)
# Region
p_mesh_cell_np = np.zeros(phase_parts, dtype=np.int32)
p_mesh_cell = pk.from_numpy(p_mesh_cell_np)
# Flags
p_alive_np = np.full(phase_parts, False, dtype=np.int32)
p_alive = pk.from_numpy(p_alive_np)
kernels.Advance(p_pos_x, p_pos_y, p_pos_z, p_mesh_cell, dx, p_dir_y, p_dir_z, p_dir_x, p_speed, p_time, num_part, mesh_total_xsec, mesh_dist_traveled, mesh_dist_traveled_squared, surface_distances[len(surface_distances)-1])
"""
def test_Advance():
L = 1
dx = .25
N_m = 4
num_part = 6
p_pos_x = np.array([-.01, 0, .1544, .2257, .75, 1.1])
p_pos_y = 2.1*np.ones(num_part)
p_pos_z = 3.4*np.ones(num_part)
p_mesh_cell = np.array([-1, 0, 0, 1, 3, 4], dtype=int)
p_dir_x = np.ones(num_part)
p_dir_x[0] = -1
p_dir_y = np.zeros(num_part)
p_dir_z = np.zeros(num_part)
p_speed = np.ones(num_part)
p_time = np.zeros(num_part)
p_alive = np.ones(num_part, bool)
p_alive[5] = False
particle_speed = 1
mesh_total_xsec = np.array([0.1,1,.1,100])
mesh_dist_traveled_squared = np.zeros(N_m)
mesh_dist_traveled = np.zeros(N_m)
[p_pos_x, p_pos_y, p_pos_z, p_mesh_cell, p_dir_y, p_dir_z, p_dir_x, p_speed, p_time, mesh_dist_traveled, mesh_dist_traveled_squared] = Advance(p_pos_x, p_pos_y, p_pos_z, p_mesh_cell, dx, p_dir_y, p_dir_z, p_dir_x, p_speed, p_time, num_part, mesh_total_xsec, mesh_dist_traveled, mesh_dist_traveled_squared, L)
assert (np.sum(mesh_dist_traveled) > 0)
assert (np.sum(mesh_dist_traveled_squared) > 0)
assert (p_pos_x[0] == -.01)
assert (p_pos_x[5] == 1.1)
assert (p_pos_x[1:4].all() > .75)
"""
def test_StillIn():
num_part = 7
surface_distances = [0,.25,.75,1]
p_pos_x = np.array([-.01, 0, .1544, .2257, .75, 1.1, 1])
p_alive = np.ones(num_part, bool)
[p_alive, tally_left, tally_right] = StillIn(p_pos_x, surface_distances, p_alive, num_part)
assert(p_alive[0] == False)
assert(p_alive[5] == False)
assert(tally_left == 2)
assert(tally_right == 2)
assert(p_alive[2:4].all() == True)
if __name__ == '__main__':
speedTestAdvance()
``` |
{
"source": "jpmorgan98/MCDC-TNT",
"score": 2
} |
#### File: mako_kernels/gpu/advance.py
```python
import math
import numpy as np
import numba as nb
import pycuda.autoinit
import pycuda.driver as drv
from pycuda.compiler import SourceModule
mod = SourceModule("""
__global__ void AdvanceCuda(float *p_pos_x, float *p_pos_y, float *p_pos_z,
float *p_dir_x, float *p_dir_y, float *p_dir_z,
int *p_mesh_cell, float *p_speed, float *p_time,
float *clever_in, float *mesh_total_xsec,
int *p_end_trans, float *rands,
float *mesh_dist_traveled, float *mesh_dist_traveled_squared,
int *num_dead)
{
float dx = clever_in[1];
float L = clever_in[0];
const int num_part = clever_in[2];
const int max_mesh_index = clever_in[3];
const int i = threadIdx.x;
const float kicker = 1e-10;
const int init_cell = p_mesh_cell[i];
float p_dist_travled = 0.0;
int cell_next;
if (i < num_part){
if (p_end_trans[i] == 0){
if (p_pos_x[i] < 0){
p_end_trans[i] = 1;
atomicAdd(&num_dead[0], 1);
}
else if (p_pos_x[i] >= L){
p_end_trans[i] = 1;
atomicAdd(&num_dead[0], 1);
}
else{
float dist = -log(rands[i]/mesh_total_xsec[p_mesh_cell[i]]);
float x_loc = (p_dir_x[i] * dist) + p_pos_x[i];
float LB = p_mesh_cell[i] * dx;
float RB = LB + dx;
if (x_loc < LB){
p_dist_travled = (LB - p_pos_x[i])/p_dir_x[i] + kicker; //29
cell_next = p_mesh_cell[i] - 1;
}
else if (x_loc > RB){
p_dist_travled = (RB - p_pos_x[i])/p_dir_x[i] + kicker;
cell_next = p_mesh_cell[i] + 1;
}
else{
p_dist_travled = dist;
p_end_trans[i] = 1;
atomicAdd(&num_dead[0], 1);
cell_next = p_mesh_cell[i];
}
p_pos_x[i] += p_dir_x[i]*p_dist_travled;
p_pos_y[i] += p_dir_y[i]*p_dist_travled;
p_pos_z[i] += p_dir_z[i]*p_dist_travled;
atomicAdd(&mesh_dist_traveled[init_cell], p_dist_travled);
atomicAdd(&mesh_dist_traveled_squared[init_cell], pow(p_dist_travled,2));
p_mesh_cell[i] = cell_next;
p_time[i] += p_dist_travled/p_speed[i];
}
}
}
}
""")
def Advance(p_pos_x, p_pos_y, p_pos_z, p_mesh_cell, dx, p_dir_y, p_dir_z, p_dir_x, p_speed, p_time,
num_part, mesh_total_xsec, mesh_dist_traveled, mesh_dist_traveled_squared, L):
p_end_trans = np.zeros(num_part, dtype=np.int32)
end_flag = 0
max_mesh_index = len(mesh_total_xsec)-1
cycle_count = 0
#copy data to cuda device
d_p_pos_x = drv.mem_alloc(p_pos_x.nbytes)
d_p_pos_y = drv.mem_alloc(p_pos_y.nbytes)
d_p_pos_z = drv.mem_alloc(p_pos_z.nbytes)
drv.memcpy_htod(d_p_pos_x, p_pos_x)
drv.memcpy_htod(d_p_pos_y, p_pos_y)
drv.memcpy_htod(d_p_pos_z, p_pos_z)
d_p_dir_y = drv.mem_alloc(p_dir_y.nbytes)
d_p_dir_z = drv.mem_alloc(p_dir_z.nbytes)
d_p_dir_x = drv.mem_alloc(p_dir_x.nbytes)
drv.memcpy_htod(d_p_dir_x, p_dir_x)
drv.memcpy_htod(d_p_dir_y, p_dir_y)
drv.memcpy_htod(d_p_dir_z, p_dir_z)
d_p_mesh_cell = drv.mem_alloc(p_mesh_cell.nbytes)
d_p_speed = drv.mem_alloc(p_speed.nbytes)
d_p_time = drv.mem_alloc(p_time.nbytes)
drv.memcpy_htod(d_p_mesh_cell, p_mesh_cell)
drv.memcpy_htod(d_p_speed, p_speed)
drv.memcpy_htod(d_p_time, p_time)
d_p_end_trans = drv.mem_alloc(p_end_trans.nbytes)
d_mesh_total_xsec = drv.mem_alloc(mesh_total_xsec.nbytes)
drv.memcpy_htod(d_p_end_trans, p_end_trans)
drv.memcpy_htod(d_mesh_total_xsec, mesh_total_xsec)
d_mesh_dist_traveled = drv.mem_alloc(mesh_dist_traveled.nbytes)
d_mesh_dist_traveled_squared = drv.mem_alloc(mesh_dist_traveled_squared.nbytes)
drv.memcpy_htod(d_mesh_dist_traveled, mesh_dist_traveled)
drv.memcpy_htod(d_mesh_dist_traveled_squared, mesh_dist_traveled_squared)
threadsperblock = 32
blockspergrid = (num_part + (threadsperblock - 1)) // threadsperblock
summer = num_part
number_done = np.zeros(1, dtype=np.int32)
d_number_done = drv.mem_alloc(number_done.nbytes)
drv.memcpy_htod(d_number_done, number_done)
#d_number_done = cuda.to_device(number_done)
AdvanceCuda = mod.get_function("AdvanceCuda")
clever_io = np.array([L, dx, num_part, max_mesh_index], np.float32)
while end_flag == 0 and cycle_count < 1000:
#allocate randoms
rands = np.random.random(num_part).astype(np.float32)
AdvanceCuda(d_p_pos_x, d_p_pos_y, d_p_pos_z,
d_p_dir_y, d_p_dir_z, d_p_dir_x,
d_p_mesh_cell, d_p_speed, d_p_time,
drv.In(clever_io), d_mesh_total_xsec,
d_p_end_trans, drv.In(rands), d_mesh_dist_traveled, d_mesh_dist_traveled_squared, d_number_done,
block=(threadsperblock, blockspergrid, 1))
if (number_done == num_part):
end_flag = 1
cycle_count += 1
#print("Number done (atomics): {0} Number done (classical): {1}".format(d_number_done[0], number_done_2))
print("Advance Complete:......{0}% ({1}/{2}) cycle: {3}".format(int(100*summer/num_part), summer, num_part, cycle_count), end = "\r")
print()
drv.memcpy_dtoh(p_pos_x, d_p_pos_x)
drv.memcpy_dtoh(p_pos_y, d_p_pos_y)
drv.memcpy_dtoh(p_pos_z, d_p_pos_z)
drv.memcpy_dtoh(p_dir_x, d_p_dir_x)
drv.memcpy_dtoh(p_dir_y, d_p_dir_y)
drv.memcpy_dtoh(p_dir_z, d_p_dir_z)
drv.memcpy_dtoh(p_speed, d_p_speed)
drv.memcpy_dtoh(p_time, d_p_time)
drv.memcpy_dtoh(p_mesh_cell, d_p_mesh_cell)
drv.memcpy_dtoh(mesh_dist_traveled, d_mesh_dist_traveled)
drv.memcpy_dtoh(mesh_dist_traveled_squared, d_mesh_dist_traveled_squared)
return(p_pos_x, p_pos_y, p_pos_z, p_mesh_cell, p_dir_y, p_dir_z, p_dir_x, p_speed, p_time, mesh_dist_traveled, mesh_dist_traveled_squared)
@nb.jit(nopython=True)
def StillIn(p_pos_x, surface_distances, p_alive, num_part):
tally_left = 0
tally_right = 0
for i in range(num_part):
#exit at left
if p_pos_x[i] <= surface_distances[0]:
tally_left += 1
p_alive[i] = False
elif p_pos_x[i] >= surface_distances[len(surface_distances)-1]:
tally_right += 1
p_alive[i] = False
return(p_alive, tally_left, tally_right)
def test_Advance():
L: float = 1
dx: float = .25
N_m: int = 4
num_part: int = 6
p_pos_x = np.array([-.01, 0, .1544, .2257, .75, 1.1], np.float32)
p_pos_y = 2.1*np.ones(num_part, np.float32)
p_pos_z = 3.4*np.ones(num_part, np.float32)
p_mesh_cell = np.array([-1, 0, 0, 1, 3, 4], np.int32)
p_dir_x = np.ones(num_part, np.float32)
p_dir_x[0] = -1
p_dir_y = np.zeros(num_part, np.float32)
p_dir_z = np.zeros(num_part, np.float32)
p_speed = np.ones(num_part, np.float32)
p_time = np.zeros(num_part, np.float32)
p_alive = np.ones(num_part, np.int32)
p_alive[5] = 0
particle_speed = 1
mesh_total_xsec = np.array([0.1,1,.1,100], np.float32)
mesh_dist_traveled_squared = np.zeros(N_m, np.float32)
mesh_dist_traveled = np.zeros(N_m, np.float32)
[p_pos_x, p_pos_y, p_pos_z, p_mesh_cell, p_dir_y, p_dir_z, p_dir_x, p_speed, p_time, mesh_dist_traveled, mesh_dist_traveled_squared] = Advance(p_pos_x, p_pos_y, p_pos_z, p_mesh_cell, dx, p_dir_y, p_dir_z, p_dir_x, p_speed, p_time, num_part, mesh_total_xsec, mesh_dist_traveled, mesh_dist_traveled_squared, L)
assert (np.sum(mesh_dist_traveled) > 0)
assert (np.sum(mesh_dist_traveled_squared) > 0)
assert (p_pos_x[0] == -.01)
assert (p_pos_x[5] == 1.1)
assert (p_pos_x[1:4].all() > .75)
def test_StillIn():
num_part = 7
surface_distances = [0,.25,.75,1]
p_pos_x = np.array([-.01, 0, .1544, .2257, .75, 1.1, 1])
p_alive = np.ones(num_part, bool)
[p_alive, tally_left, tally_right] = StillIn(p_pos_x, surface_distances, p_alive, num_part)
assert(p_alive[0] == False)
assert(p_alive[5] == False)
assert(tally_left == 2)
assert(tally_right == 2)
assert(p_alive[2:4].all() == True)
if __name__ == '__main__':
test_Advance()
#test_StillIn()
```
#### File: mcdc_tnt/numba_kernels/cleanup.py
```python
import numba as nb
@nb.jit(nopython=True)
def BringOutYourDead(p_pos_x, p_pos_y, p_pos_z, p_mesh_cell, p_dir_y, p_dir_z, p_dir_x, p_speed, p_time, p_time_cell, p_alive, num_part):
"""
Removes particles that died in the last round of particle transport by
rewriting there postiion with the alive ones
Parameters
----------
p_pos_x : vector double
PSV: x position of phase space particles (index is particle value).
p_pos_y : vector double
PSV: y position of phase space particles (index is particle value).
p_pos_z : vector double
PSV: z position of phase space particles (index is particle value).
p_mesh_cell : vector int
PSV: mesh cell location of a given particle.
p_dir_y : vector double
PSV: y direction unit value of phase space particles (index is particle value).
p_dir_z : vector double
PSV: z direction unit value of phase space particles (index is particle value).
p_dir_x : vector double
PSV: x direction unit value of phase space particles (index is particle value).
p_speed : vector double
PSV: speed (energy) or a particle (index is particle).
p_time : vector double
PSV: particle clock.
p_alive : vector bool
PSV: is it alive?
num_part : int
number of particles currently under transport (indxed form 1).
Returns
-------
PSV ready for next itteration of lifer cycle
"""
kept = 0
for i in range(num_part):
if p_alive[i] == True:
# if p_mesh_cell[i] > 9:
# print("index from this round:")
# print(i)
# print("index for next round:")
# print(kept)
p_pos_x[kept] = p_pos_x[i]
p_pos_y[kept] = p_pos_y[i]
p_pos_z[kept] = p_pos_z[i]
# Direction
p_dir_x[kept] = p_dir_x[i]
p_dir_y[kept] = p_dir_y[i]
p_dir_z[kept] = p_dir_z[i]
# Speed
p_speed[kept] = p_speed[i]
# Time
p_time[kept] = p_time[i]
p_time_cell[kept] = p_time_cell[i]
# Regions
p_mesh_cell[kept] = p_mesh_cell[i]
# Flags
p_alive[kept] = p_alive[i]
kept +=1
return(p_pos_x, p_pos_y, p_pos_z, p_mesh_cell, p_dir_y, p_dir_z, p_dir_x, p_speed, p_time, p_alive, kept)
def test_BOYD():
num_part = 3
p_pos_x = [1,2,3]
p_pos_y = [1,2,3]
p_pos_z = [1,2,3]
p_mesh_cell = [1,2,3]
p_dir_x = [1,2,3]
p_dir_y = [1,2,3]
p_dir_z = [1,2,3]
p_speed = [1,2,3]
p_time = [1,2,3]
p_alive = [False,True,False]
[p_pos_x, p_pos_y, p_pos_z, p_mesh_cell, p_dir_y, p_dir_z, p_dir_x, p_speed, p_time, p_alive, kept] = BringOutYourDead(p_pos_x, p_pos_y, p_pos_z, p_mesh_cell, p_dir_y, p_dir_z, p_dir_x, p_speed, p_time, p_alive, num_part)
assert(kept == 1)
assert(p_dir_x[0] == 2)
assert(p_dir_y[0] == 2)
assert(p_dir_z[0] == 2)
assert(p_pos_x[0] == 2)
assert(p_pos_y[0] == 2)
assert(p_pos_z[0] == 2)
assert(p_speed[0] == 2)
assert(p_time[0] == 2)
assert(p_alive[0] == True)
if __name__ == '__main__':
test_BOYD()
```
#### File: MCDC-TNT/mcdc_tnt/run.py
```python
import numpy as np
import sys
import argparse
import mcdc_tnt
import matplotlib.pyplot as plt
def flatLinePlot(x, y, lab):
for i in range(y.size):
xx = x[i:i+2]
yy = [y[i], y[i]]
plt.plot(xx, yy, label=lab)
def run(input_file, output_file=None, hard_targ=None):
"""
main function to run a single generation and plot the output
Returns
-------
Plots and output tables if requested.
"""
with open("title_print.txt", "r", encoding="utf-8") as file:
for line in file:
print(line.strip())
[comp_parms, sim_perams, mesh_cap_xsec, mesh_scat_xsec, mesh_fis_xsec, mesh_total_xsec, surface_distances] = mcdc_tnt.SimulationSetup(input_file)
if hard_targ != None:
comp_parms['hard_targ'] = hard_targ
if comp_parms['hard_targ'] == 'pp':
from mcdc_tnt.generations import Generations
print('>>>Running Prue Python kernels (slow)')
elif comp_parms['hard_targ'] == 'nb_cpu':
from mcdc_tnt.generations import Generations
print('>>>Running Numba CPU kernels')
elif comp_parms['hard_targ'] == 'nb_gpu':
from mcdc_tnt.generations import Generations
print('>>>Running Numba GPU kernels (slow)')
elif comp_parms['hard_targ'] == 'pyk_cpu':
from mcdc_tnt.generations import Generations
print('>>>Running PyKokkos CPU kernels')
print(' ensure correct conda enviroment is loaded!')
elif comp_parms['hard_targ'] == 'pyk_gpu':
print('>>>Feature not yet implemented, running pyk cpu kerenels')
from mcdc_tnt.generations_pyk import Generations
print('>>>Running PyKokkos CPU kernels')
print(' ensure correct conda enviroment is loaded!')
else:
print()
print('>>FATAL ERROR: NO HARDWARE TARGET<<')
print()
return()
print()
[scalar_flux, standard_deviation_flux] = Generations(comp_parms, sim_perams, mesh_cap_xsec, mesh_scat_xsec, mesh_fis_xsec, mesh_total_xsec, surface_distances)
print()
print('Simulation complete')
print()
x_mesh = np.linspace(0,sim_perams['L_slab'],80)#len(scalar_flux))
X = np.linspace(0, sim_perams['L_slab'], 80+1)#int(scalar_flux.size+1))
#print(scalar_flux)
#scalar_flux /= np.max(scalar_flux)
np.set_printoptions(threshold=np.inf)
if comp_parms['output file'] == True:
if (output_file == None):
output_file = 'output.out'
with open(output_file, 'w') as f:
print(comp_parms['sim name'],'output file', file=f)
for i in range(scalar_flux.shape[1]):
print((scalar_flux[:,i]), file=f)
#print('cell, center x, normalized scalar flux, associated error', file=f)
#for i in range(len(scalar_flux)):
# print('{0},{1},{2},{3}'.format(i, x_mesh[i], scalar_flux[i], standard_deviation_flux[i]), file=f)
print('Output written to',output_file)
print()
else:
print('No file outputs requested, Simulation Complete')
'''
if comp_parms['plot error'] == True:
import matplotlib.pyplot as plt
plt.figure(1)
plt.plot(x_mesh, standard_deviation_flux, '-k')
plt.title(["$σ^2$ ",comp_parms['sim name']])
plt.ylabel("$σ^2$")
plt.xlabel("x [cm]")
plt.savefig('error.png', dpi=500, facecolor='w', edgecolor='k',orientation='portrait')
print('Error figure printed to error.png')
print()
#np.sum(scalar_flux, axis=1)
'''
#print(scalar_flux)
#print()
#print()
#print(scalar_flux[0,:])
#print()
#print()
#print(scalar_flux[:,0])
import matplotlib.pyplot as plt
plt.figure(2)
for i in range(scalar_flux.shape[0]):
plt.plot(x_mesh, scalar_flux[i,:], label=i)
plt.show()
'''
if comp_parms['plot flux'] == True:
import matplotlib.pyplot as plt
plt.figure(2)
print(scalar_flux.shape)
for i in range(scalar_flux.shape[1]):
plt.plot(x_mesh, scalar_flux[:,i], label=i)
#flatLinePlot(X, scalar_flux[:, i], i)
#plt.ylim([0,2])
plt.grid(True)
plt.title(["Scalar Flux: ",comp_parms['sim name']])
plt.ylabel("$\phi [cm^{-2}s^{-1}]$")
plt.xlabel("x [cm]")
plt.legend(loc='right')
plt.savefig('sflux.pdf', dpi=500, facecolor='w', edgecolor='k',orientation='portrait')
print('Flux figure printed to sflux.png')
print()
'''
#print(scalar_flux.shape)
#print()
#plt.figure(3)
#plt.plot(x_mesh, scalar_flux[:, 0])
#plt.show()
#for i in range(scalar_flux.shape[1]):
# print(sum(scalar_flux[:, i]))
#print()
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Main file to run MC/DC-TNT')
parser.add_argument('-i', '--input', required=True,
help='input file in a .yaml format (see InputDeck.py)')
parser.add_argument('-o', '--output', required=False,
help='output file, if none then output.txt')
parser.add_argument('-t', '--target', required=False,
help='hardware target, if none then use one listed in input.yaml (pp = pure python, nb_cpu = numba cpu)')
args = parser.parse_args(sys.argv[1:])
input_file = args.input
output_file = args.output
hard_targ = args.target
run(input_file, output_file, hard_targ)
```
#### File: tests/integration/tests_hardware.py
```python
import numpy as np
import math
import mcdc_tnt
from timeit import default_timer as timer
def error(sim, bench):
error = np.linalg.norm(sim - bench) / np.linalg.norm(bench)
return(error)
if __name__ == '__main__':
print()
print('ATTENTION')
print('Entering Hardware Test Suite')
print('Ensure the proper conda enviorment is enabled')
print('Test Schedule ([x] will run, [c] can run (must be manually set)):')
print(' -[x] pure python')
print(' -[x] numba cpu')
print(' -[ ] numba gpu')
print(' -[c] pykokkos cpu')
print(' -[ ] pykokkos gpu')
print(' -[c] pyomp cpu')
print('This can take a while, recomended Pytest is not used')
print()
start_o = timer()
print('Entering Pure Python')
input_file = 'tc_1_pp.yaml'
output_file = 'pp.out'
start = timer()
mcdc_tnt.run(input_file, output_file, None)
end = timer()
time_pp = end-start
print()
print('Entering Numba CPU')
input_file = 'tc_1_numba_cpu.yaml'
output_file = 'numba_cpu.out'
start = timer()
mcdc_tnt.run(input_file, output_file, None)
end = timer()
time_nbc = end-start
#print()
#print('Entering Numba GPU')
#input_file = 'tc_1_numba_gpu.yaml'
#output_file = 'numba_gpu.out'
#start = timer()
#mcdc_tnt.run(input_file, output_file)
#end = timer()
#time_nbg = end-start
#print()
#print('Entering PyKokkos CPU')
#input_file = 'tc_1_pyk_cpu.yaml'
#output_file = 'pyk_cpu.out'
#start = timer()
#mcdc_tnt.run(input_file, output_file)
#end = timer()
#time_pykc = end-start
end_o = timer()
sf_actual = np.loadtxt('anwser.pout', comments='#', delimiter=',', skiprows=2)
sf_pp = np.loadtxt('pp.out', comments='#', delimiter=',', skiprows=2)
sf_nbc = np.loadtxt('numba_cpu.out', comments='#', delimiter=',', skiprows=2)
#sf_nbg = np.loadtxt('numba_gpu.out', comments='#', delimiter=',', skiprows=2)
#sf_pykc = np.loadtxt('pyk_cpu.out', comments='#', delimiter=',', skiprows=2)
assert(np.allclose(sf_actual[:,2], sf_pp[:,2], rtol=1e-01))
assert(np.allclose(sf_actual[:,2], sf_nbc[:,2], rtol=1e-01))
#assert(np.allclose(sf_actual[:,2], sf_nbg[:,2]))
#assert(np.allclose(sf_actual[:,2], sf_pykc[:,2], rtol=1e-01))
print()
print('Test Complete and all Passed!')
print('Total time to completion:')
print(' -pure python.....{0}'.format(time_pp))
print(' -numba cpu.......{0}'.format(time_nbc))
#print(' -numba gpu.......{0}'.format(time_nbg))
#print(' -pykokkos cpu....{0}'.format(time_pykc))
print()
print(' -total...........{0}'.format(end_o-start_o))
print()
print('Produced Errors Between Soultions')
print(' -pure python............{0}'.format(error(sf_actual, sf_pp)))
print(' -numba threading........{0}'.format(error(sf_actual, sf_nbc)))
#print(' -numba pyomp............{0}'.format(error(sf_actual, sf_pyomp)))
#print(' -pyk ompenmp............{0}'.format(error(sf_actual, sf_pykc)))
print()
import matplotlib.pyplot as plt
plt.figure(1)
f = plt.plot(sf_actual[:,1], sf_actual[:,2], '-b',
sf_pp[:,1], sf_pp[:,2], '-r',
sf_nbc[:,1], sf_nbc[:,2], 'g-')
plt.title("Scalar Flux")
plt.ylabel("$\phi [cm^{-2}s^{-1}]$")
plt.xlabel("x [cm]")
plt.legend(f, ['Actual','Pure Python','Numba CPU','Pyk CPU'])
plt.savefig('sflux.png', dpi=500, facecolor='w', edgecolor='k',orientation='portrait')
print('Flux figure printed to sflux.png')
print()
#sf_pykc[:,1], sf_pykc[:,2], 'k-')
print()
``` |
{
"source": "jpmorganchase/ABIDES-gym",
"score": 3
} |
#### File: agents/market_makers/adaptive_market_maker_agent.py
```python
import logging
from math import floor, ceil
from typing import Dict, List, Optional, Tuple
import numpy as np
from abides_core import Message, NanosecondTime
from ...utils import sigmoid
from ...messages.marketdata import (
MarketDataMsg,
L2SubReqMsg,
BookImbalanceDataMsg,
BookImbalanceSubReqMsg,
MarketDataEventMsg,
)
from ...messages.query import QuerySpreadResponseMsg, QueryTransactedVolResponseMsg
from ...orders import Side
from ..trading_agent import TradingAgent
ANCHOR_TOP_STR = "top"
ANCHOR_BOTTOM_STR = "bottom"
ANCHOR_MIDDLE_STR = "middle"
ADAPTIVE_SPREAD_STR = "adaptive"
INITIAL_SPREAD_VALUE = 50
logger = logging.getLogger(__name__)
class AdaptiveMarketMakerAgent(TradingAgent):
"""This class implements a modification of the Chakraborty-Kearns `ladder` market-making strategy, wherein the
the size of order placed at each level is set as a fraction of measured transacted volume in the previous time
period.
Can skew orders to size of current inventory using beta parameter, whence beta == 0 represents inventory being
ignored and beta == infinity represents all liquidity placed on one side of book.
ADAPTIVE SPREAD: the market maker's spread can be set either as a fixed or value or can be adaptive,
"""
def __init__(
self,
id: int,
symbol: str,
starting_cash: int,
name: Optional[str] = None,
type: Optional[str] = None,
random_state: Optional[np.random.RandomState] = None,
pov: float = 0.05,
min_order_size: int = 20,
window_size: float = 5,
anchor: str = ANCHOR_MIDDLE_STR,
num_ticks: int = 20,
level_spacing: float = 0.5,
wake_up_freq: NanosecondTime = 1_000_000_000, # 1 second
poisson_arrival: bool = True,
subscribe: bool = False,
subscribe_freq: float = 10e9,
subscribe_num_levels: int = 1,
cancel_limit_delay: int = 50,
skew_beta=0,
price_skew_param=None,
spread_alpha: float = 0.85,
backstop_quantity: int = 0,
log_orders: bool = False,
min_imbalance=0.9,
) -> None:
super().__init__(id, name, type, random_state, starting_cash, log_orders)
self.is_adaptive: bool = False
self.symbol: str = symbol # Symbol traded
self.pov: float = (
pov # fraction of transacted volume placed at each price level
)
self.min_order_size: int = (
min_order_size # minimum size order to place at each level, if pov <= min
)
self.anchor: str = self.validate_anchor(
anchor
) # anchor either top of window or bottom of window to mid-price
self.window_size: float = self.validate_window_size(
window_size
) # Size in ticks (cents) of how wide the window around mid price is. If equal to
# string 'adaptive' then ladder starts at best bid and ask
self.num_ticks: int = num_ticks # number of ticks on each side of window in which to place liquidity
self.level_spacing: float = (
level_spacing # level spacing as a fraction of the spread
)
self.wake_up_freq: str = wake_up_freq # Frequency of agent wake up
self.poisson_arrival: bool = (
poisson_arrival # Whether to arrive as a Poisson process
)
if self.poisson_arrival:
self.arrival_rate = self.wake_up_freq
self.subscribe: bool = subscribe # Flag to determine whether to subscribe to data or use polling mechanism
self.subscribe_freq: float = subscribe_freq # Frequency in nanoseconds^-1 at which to receive market updates
# in subscribe mode
self.min_imbalance = min_imbalance
self.subscribe_num_levels: int = (
subscribe_num_levels # Number of orderbook levels in subscription mode
)
self.cancel_limit_delay: int = cancel_limit_delay # delay in nanoseconds between order cancellations and new limit order placements
self.skew_beta = (
skew_beta # parameter for determining order placement imbalance
)
self.price_skew_param = (
price_skew_param # parameter determining how much to skew price level.
)
self.spread_alpha: float = spread_alpha # parameter for exponentially weighted moving average of spread. 1 corresponds to ignoring old values, 0 corresponds to no updates
self.backstop_quantity: int = backstop_quantity # how many orders to place at outside order level, to prevent liquidity dropouts. If None then place same as at other levels.
self.log_orders: float = log_orders
self.has_subscribed = False
## Internal variables
self.subscription_requested: bool = False
self.state: Dict[str, bool] = self.initialise_state()
self.buy_order_size: int = self.min_order_size
self.sell_order_size: int = self.min_order_size
self.last_mid: Optional[int] = None # last observed mid price
self.last_spread: float = (
INITIAL_SPREAD_VALUE # last observed spread moving average
)
self.tick_size: Optional[int] = (
None if self.is_adaptive else ceil(self.last_spread * self.level_spacing)
)
self.LIQUIDITY_DROPOUT_WARNING: str = (
f"Liquidity dropout for agent {self.name}."
)
self.two_side: bool = (
False if self.price_skew_param is None else True
) # switch to control self.get_transacted_volume
# method
def initialise_state(self) -> Dict[str, bool]:
"""Returns variables that keep track of whether spread and transacted volume have been observed."""
if self.subscribe:
return {"AWAITING_MARKET_DATA": True, "AWAITING_TRANSACTED_VOLUME": True}
else:
return {"AWAITING_SPREAD": True, "AWAITING_TRANSACTED_VOLUME": True}
def validate_anchor(self, anchor: str) -> str:
"""Checks that input parameter anchor takes allowed value, raises ``ValueError`` if not.
Arguments:
anchor:
Returns:
The anchor if validated.
"""
if anchor not in [ANCHOR_TOP_STR, ANCHOR_BOTTOM_STR, ANCHOR_MIDDLE_STR]:
raise ValueError(
f"Variable anchor must take the value `{ANCHOR_BOTTOM_STR}`, `{ANCHOR_MIDDLE_STR}` or "
f"`{ANCHOR_TOP_STR}`"
)
else:
return anchor
def validate_window_size(self, window_size: float) -> Optional[int]:
"""Checks that input parameter window_size takes allowed value, raises ``ValueError`` if not.
Arguments:
window_size:
Returns:
The window_size if validated
"""
try: # fixed window size specified
return int(window_size)
except:
if window_size.lower() == "adaptive":
self.is_adaptive = True
self.anchor = ANCHOR_MIDDLE_STR
return None
else:
raise ValueError(
f"Variable window_size must be of type int or string {ADAPTIVE_SPREAD_STR}."
)
def kernel_starting(self, start_time: NanosecondTime) -> None:
super().kernel_starting(start_time)
def wakeup(self, current_time: NanosecondTime):
"""Agent wakeup is determined by self.wake_up_freq."""
can_trade = super().wakeup(current_time)
if not self.has_subscribed:
super().request_data_subscription(
BookImbalanceSubReqMsg(
symbol=self.symbol,
min_imbalance=self.min_imbalance,
)
)
self.last_time_book_order = current_time
self.has_subscribed = True
if self.subscribe and not self.subscription_requested:
super().request_data_subscription(
L2SubReqMsg(
symbol=self.symbol,
freq=self.subscribe_freq,
depth=self.subscribe_num_levels,
)
)
self.subscription_requested = True
self.get_transacted_volume(self.symbol, lookback_period=self.subscribe_freq)
self.state = self.initialise_state()
elif can_trade and not self.subscribe:
self.cancel_all_orders()
self.delay(self.cancel_limit_delay)
self.get_current_spread(self.symbol, depth=self.subscribe_num_levels)
self.get_transacted_volume(self.symbol, lookback_period=self.wake_up_freq)
self.initialise_state()
def receive_message(
self, current_time: NanosecondTime, sender_id: int, message: Message
) -> None:
"""Processes message from exchange.
Main function is to update orders in orderbook relative to mid-price.
Arguments:
current_time: Simulation current time.
message: Message received by self from ExchangeAgent.
"""
super().receive_message(current_time, sender_id, message)
mid = None
if self.last_mid is not None:
mid = self.last_mid
if self.last_spread is not None and self.is_adaptive:
self._adaptive_update_window_and_tick_size()
if (
isinstance(message, QueryTransactedVolResponseMsg)
and self.state["AWAITING_TRANSACTED_VOLUME"] is True
):
self.update_order_size()
self.state["AWAITING_TRANSACTED_VOLUME"] = False
if isinstance(message, BookImbalanceDataMsg):
if message.stage == MarketDataEventMsg.Stage.START:
try:
self.place_orders(mid)
self.last_time_book_order = current_time
except:
pass
if not self.subscribe:
if (
isinstance(message, QuerySpreadResponseMsg)
and self.state["AWAITING_SPREAD"] is True
):
bid, _, ask, _ = self.get_known_bid_ask(self.symbol)
if bid and ask:
mid = int((ask + bid) / 2)
self.last_mid = mid
if self.is_adaptive:
spread = int(ask - bid)
self._adaptive_update_spread(spread)
self.state["AWAITING_SPREAD"] = False
else:
logger.debug("SPREAD MISSING at time {}", current_time)
self.state[
"AWAITING_SPREAD"
] = False # use last mid price and spread
if (
self.state["AWAITING_SPREAD"] is False
and self.state["AWAITING_TRANSACTED_VOLUME"] is False
and mid is not None
):
self.place_orders(mid)
self.state = self.initialise_state()
self.set_wakeup(current_time + self.get_wake_frequency())
else: # subscription mode
if (
isinstance(message, MarketDataMsg)
and self.state["AWAITING_MARKET_DATA"] is True
):
bid = (
self.known_bids[self.symbol][0][0]
if self.known_bids[self.symbol]
else None
)
ask = (
self.known_asks[self.symbol][0][0]
if self.known_asks[self.symbol]
else None
)
if bid and ask:
mid = int((ask + bid) / 2)
self.last_mid = mid
if self.is_adaptive:
spread = int(ask - bid)
self._adaptive_update_spread(spread)
self.state["AWAITING_MARKET_DATA"] = False
else:
logger.debug("SPREAD MISSING at time {}", current_time)
self.state["AWAITING_MARKET_DATA"] = False
if (
self.state["MARKET_DATA"] is False
and self.state["AWAITING_TRANSACTED_VOLUME"] is False
):
self.place_orders(mid)
self.state = self.initialise_state()
def _adaptive_update_spread(self, spread) -> None:
"""Update internal spread estimate with exponentially weighted moving average.
Arguments:
spread
"""
spread_ewma = (
self.spread_alpha * spread + (1 - self.spread_alpha) * self.last_spread
)
self.window_size = spread_ewma
self.last_spread = spread_ewma
def _adaptive_update_window_and_tick_size(self) -> None:
"""Update window size and tick size relative to internal spread estimate."""
self.window_size = self.last_spread
self.tick_size = round(self.level_spacing * self.window_size)
if self.tick_size == 0:
self.tick_size = 1
def update_order_size(self) -> None:
"""Updates size of order to be placed."""
buy_transacted_volume = self.transacted_volume[self.symbol][0]
sell_transacted_volume = self.transacted_volume[self.symbol][1]
total_transacted_volume = buy_transacted_volume + sell_transacted_volume
qty = round(self.pov * total_transacted_volume)
if self.skew_beta == 0: # ignore inventory
self.buy_order_size = (
qty if qty >= self.min_order_size else self.min_order_size
)
self.sell_order_size = (
qty if qty >= self.min_order_size else self.min_order_size
)
else:
holdings = self.get_holdings(self.symbol)
proportion_sell = sigmoid(holdings, self.skew_beta)
sell_size = ceil(proportion_sell * qty)
buy_size = floor((1 - proportion_sell) * qty)
self.buy_order_size = (
buy_size if buy_size >= self.min_order_size else self.min_order_size
)
self.sell_order_size = (
sell_size if sell_size >= self.min_order_size else self.min_order_size
)
def compute_orders_to_place(self, mid: int) -> Tuple[List[int], List[int]]:
"""Given a mid price, computes the orders that need to be removed from
orderbook, and adds these orders to bid and ask deques.
Arguments:
mid: Mid price.
"""
if self.price_skew_param is None:
mid_point = mid
else:
buy_transacted_volume = self.transacted_volume[self.symbol][0]
sell_transacted_volume = self.transacted_volume[self.symbol][1]
if (buy_transacted_volume == 0) and (sell_transacted_volume == 0):
mid_point = mid
else:
# trade imbalance, +1 means all transactions are buy, -1 means all transactions are sell
trade_imbalance = (
2
* buy_transacted_volume
/ (buy_transacted_volume + sell_transacted_volume)
) - 1
mid_point = int(mid + (trade_imbalance * self.price_skew_param))
if self.anchor == ANCHOR_MIDDLE_STR:
highest_bid = int(mid_point) - floor(0.5 * self.window_size)
lowest_ask = int(mid_point) + ceil(0.5 * self.window_size)
elif self.anchor == ANCHOR_BOTTOM_STR:
highest_bid = int(mid_point - 1)
lowest_ask = int(mid_point + self.window_size)
elif self.anchor == ANCHOR_TOP_STR:
highest_bid = int(mid_point - self.window_size)
lowest_ask = int(mid_point + 1)
lowest_bid = highest_bid - ((self.num_ticks - 1) * self.tick_size)
highest_ask = lowest_ask + ((self.num_ticks - 1) * self.tick_size)
bids_to_place = [
price
for price in range(lowest_bid, highest_bid + self.tick_size, self.tick_size)
]
asks_to_place = [
price
for price in range(lowest_ask, highest_ask + self.tick_size, self.tick_size)
]
return bids_to_place, asks_to_place
def place_orders(self, mid: int) -> None:
"""Given a mid-price, compute new orders that need to be placed, then
send the orders to the Exchange.
Arguments:
mid: Mid price.
"""
bid_orders, ask_orders = self.compute_orders_to_place(mid)
orders = []
if self.backstop_quantity != 0:
bid_price = bid_orders[0]
logger.debug(
"{}: Placing BUY limit order of size {} @ price {}",
self.name,
self.backstop_quantity,
bid_price,
)
orders.append(
self.create_limit_order(
self.symbol, self.backstop_quantity, Side.BID, bid_price
)
)
bid_orders = bid_orders[1:]
ask_price = ask_orders[-1]
logger.debug(
"{}: Placing SELL limit order of size {} @ price {}",
self.name,
self.backstop_quantity,
ask_price,
)
orders.append(
self.create_limit_order(
self.symbol, self.backstop_quantity, Side.ASK, ask_price
)
)
ask_orders = ask_orders[:-1]
for bid_price in bid_orders:
logger.debug(
"{}: Placing BUY limit order of size {} @ price {}",
self.name,
self.buy_order_size,
bid_price,
)
orders.append(
self.create_limit_order(
self.symbol, self.buy_order_size, Side.BID, bid_price
)
)
for ask_price in ask_orders:
logger.debug(
"{}: Placing SELL limit order of size {} @ price {}",
self.name,
self.sell_order_size,
ask_price,
)
orders.append(
self.create_limit_order(
self.symbol, self.sell_order_size, Side.ASK, ask_price
)
)
self.place_multiple_orders(orders)
def get_wake_frequency(self) -> NanosecondTime:
if not self.poisson_arrival:
return self.wake_up_freq
else:
delta_time = self.random_state.exponential(scale=self.arrival_rate)
return int(round(delta_time))
```
#### File: abides_markets/agents/utils.py
```python
from copy import deepcopy
from typing import Any, Dict, List, Optional, Tuple
from ..price_level import PriceLevel
################## STATE MANIPULATION ###############################
def list_dict_flip(ld: List[Dict[str, Any]]) -> Dict[str, List[Any]]:
"""
Utility that returns a dictionnary of list of dictionnary into a dictionary of list
Arguments:
- ld: list of dictionaary
Returns:
- flipped: dictionnary of lists
Example:
- ld = [{"a":1, "b":2}, {"a":3, "b":4}]
- flipped = {'a': [1, 3], 'b': [2, 4]}
"""
flipped = dict((k, []) for (k, v) in ld[0].items())
for rs in ld:
for k in flipped.keys():
flipped[k].append(rs[k])
return flipped
def identity_decorator(func):
"""
identy for decorators: take a function and return that same function
Arguments:
- func: function
Returns:
- wrapper_identity_decorator: function
"""
def wrapper_identity_decorator(*args, **kvargs):
return func(*args, **kvargs)
return wrapper_identity_decorator
def ignore_mkt_data_buffer_decorator(func):
"""
Decorator for function that takes as input self and raw_state.
Applies the given function while ignoring the buffering in the market data.
Only last element of the market data buffer is kept
Arguments:
- func: function
Returns:
- wrapper_mkt_data_buffer_decorator: function
"""
def wrapper_mkt_data_buffer_decorator(self, raw_state):
raw_state_copy = deepcopy(raw_state)
for i in range(len(raw_state)):
raw_state[i]["parsed_mkt_data"] = raw_state_copy[i]["parsed_mkt_data"][-1]
raw_state[i]["parsed_volume_data"] = raw_state_copy[i][
"parsed_volume_data"
][-1]
raw_state2 = list_dict_flip(raw_state)
flipped = dict((k, list_dict_flip(v)) for (k, v) in raw_state2.items())
return func(self, flipped)
return wrapper_mkt_data_buffer_decorator
def ignore_buffers_decorator(func):
"""
Decorator for function that takes as input self and raw_state.
Applies the given function while ignoring the buffering in both the market data and the general raw state.
Only last elements are kept.
Arguments:
- func: function
Returns:
- wrapper_mkt_data_buffer_decorator: function
"""
def wrapper_ignore_buffers_decorator(self, raw_state):
raw_state = raw_state[-1]
if len(raw_state["parsed_mkt_data"]) == 0:
pass
else:
raw_state["parsed_mkt_data"] = raw_state["parsed_mkt_data"][-1]
if raw_state["parsed_volume_data"]:
raw_state["parsed_volume_data"] = raw_state["parsed_volume_data"][-1]
return func(self, raw_state)
return wrapper_ignore_buffers_decorator
################# ORDERBOOK PRIMITIVES ######################
def get_mid_price(
bids: List[PriceLevel], asks: List[PriceLevel], last_transaction: int
) -> int:
"""
Utility that computes the mid price from the snapshot of bid and ask side
Arguments:
- bids: list of list snapshot of bid side
- asks: list of list snapshot of ask side
- last_trasaction: last transaction in the market, used for corner cases when one side of the OB is empty
Returns:
- mid_price value
"""
if len(bids) == 0 and len(asks) == 0:
return last_transaction
elif len(bids) == 0:
return asks[0][0]
elif len(asks) == 0:
return bids[0][0]
else:
return (bids[0][0] + asks[0][0]) / 2
def get_val(book: List[PriceLevel], level: int) -> Tuple[int, int]:
"""
utility to compute the price and level at the level-th level of the order book
Arguments:
- book: side of the order book (bid or ask)
- level: level of interest in the OB side (index starts at 0 for best bid/ask)
Returns:
- tuple price, volume for the i-th value
"""
if book == []:
return 0, 0
else:
try:
price = book[level][0]
volume = book[level][1]
return price, volume
except:
return 0, 0
def get_last_val(book: List[PriceLevel], mid_price: int) -> int:
"""
utility to compute the price of the deepest placed order in the side of the order book
Arguments:
- book: side of the order book (bid or ask)
- mid_price: current mid price used for corner cases
Returns:
- mid price value
"""
if book == []:
return mid_price
else:
return book[-1][0]
def get_volume(book: List[PriceLevel], depth: Optional[int] = None) -> int:
"""
utility to compute the volume placed between the top of the book (depth 0) and the depth
Arguments:
- book: side of the order book (bid or ask)
- depth: depth used to compute sum of the volume
Returns:
- volume placed
"""
if depth is None:
return sum([v[1] for v in book])
else:
return sum([v[1] for v in book[:depth]])
def get_imbalance(
bids: List[PriceLevel],
asks: List[PriceLevel],
direction: str = "BUY",
depth: Optional[int] = None,
) -> float:
"""
utility to compute the imbalance computed between the top of the book and the depth-th value of depth
Arguments:
- bids: list of list snapshot of bid side
- asks: list of list snapshot of ask side
- direction: side used to compute the numerator in the division
- depth: depth used to compute sum of the volume
Returns:
- imbalance
"""
# None corresponds to the whole book depth
if (bids == []) and (asks == []):
return 0.5
elif bids == []:
if direction == "BUY":
return 0
else:
return 1
elif asks == []:
if direction == "BUY":
return 1
else:
return 0
else:
if depth == None:
bid_vol = sum([v[1] for v in bids])
ask_vol = sum([v[1] for v in asks])
else:
bid_vol = sum([v[1] for v in bids[:depth]])
ask_vol = sum([v[1] for v in asks[:depth]])
if direction == "BUY":
return bid_vol / (bid_vol + ask_vol)
else:
return ask_vol / (bid_vol + ask_vol)
```
#### File: tests/orderbook/__init__.py
```python
from typing import List, Tuple
from abides_core import Message
from abides_markets.order_book import OrderBook
from abides_markets.orders import LimitOrder, Side
SYMBOL = "X"
TIME = 0
class FakeExchangeAgent:
def __init__(self):
self.messages = []
self.current_time = TIME
self.mkt_open = TIME
self.book_logging = None
self.stream_history = 10
def reset(self):
self.messages = []
def send_message(self, recipient_id: int, message: Message, _: int = 0):
self.messages.append((recipient_id, message))
def logEvent(self, *args, **kwargs):
pass
def setup_book_with_orders(
bids: List[Tuple[int, List[int]]] = [], asks: List[Tuple[int, List[int]]] = []
) -> Tuple[OrderBook, FakeExchangeAgent, List[LimitOrder]]:
agent = FakeExchangeAgent()
book = OrderBook(agent, SYMBOL)
orders = []
for price, quantities in bids:
for quantity in quantities:
order = LimitOrder(1, TIME, SYMBOL, quantity, Side.BID, price)
book.handle_limit_order(order)
orders.append(order)
for price, quantities in asks:
for quantity in quantities:
order = LimitOrder(1, TIME, SYMBOL, quantity, Side.ASK, price)
book.handle_limit_order(order)
orders.append(order)
agent.reset()
return book, agent, orders
``` |
{
"source": "jpmorganchase/ABIDES-jpmc-gym",
"score": 4
} |
#### File: abides-core/abides_core/generators.py
```python
from abc import abstractmethod, ABC
from typing import Generic, Optional, TypeVar
import numpy as np
T = TypeVar("T")
class BaseGenerator(ABC, Generic[T]):
"""
This is an abstract base class defining the interface for Generator objects in
ABIDES. This class is not used directly and is instead inherited from child classes.
Generators should produce an infinite amount of values.
"""
@abstractmethod
def next(self) -> T:
"""
Produces the next value from the generator.
"""
raise NotImplementedError
@abstractmethod
def mean(self) -> T:
"""
Returns the average of the distribution of values generated.
"""
raise NotImplementedError
class InterArrivalTimeGenerator(BaseGenerator[float], ABC):
"""
General class for time generation. These generators are used to generates a delta time between currrent time and the next wakeup of the agent.
"""
pass
class ConstantTimeGenerator(InterArrivalTimeGenerator):
"""
Generates constant delta time of length step_duration
Arguments:
step_duration: length of the delta time in ns
"""
def __init__(self, step_duration: float) -> None:
self.step_duration: float = step_duration
def next(self) -> float:
"""
returns constant time delta for next wakeup
"""
return self.step_duration
def mean(self) -> float:
"""
time delta is constant
"""
return self.step_duration
class PoissonTimeGenerator(InterArrivalTimeGenerator):
"""
Lambda must be specified either in second through lambda_time or seconds^-1
through lambda_freq.
Arguments:
random_generator: configuration random generator
lambda_freq: frequency (in s^-1)
lambda_time: period (in seconds)
"""
def __init__(
self,
random_generator: np.random.RandomState,
lambda_freq: Optional[float] = None,
lambda_time: Optional[float] = None,
) -> None:
self.random_generator: np.random.RandomState = random_generator
assert (lambda_freq is None and lambda_time is not None) or (
lambda_time is None and lambda_freq is not None
), "specify lambda in frequency OR in time"
self.lambda_s: float = lambda_freq or 1 / lambda_time
def next(self) -> Optional[float]:
"""
returns time delta for next wakeup with time delta following Poisson distribution
"""
seconds = self.random_generator.exponential(1 / self.lambda_s)
return seconds * 1_000_000_000 if seconds is not None else None
def mean(self) -> float:
"""
returns the mean of a Poisson(lambda) distribution (i.e., 1/lambda)
"""
return 1 / self.lambda_s
```
#### File: abides_gym/envs/markets_environment.py
```python
from copy import deepcopy
from abc import abstractmethod, ABC
from typing import Any, Callable, Dict, List, Optional, Tuple
import gym
import numpy as np
from gym.utils import seeding
import abides_markets.agents.utils as markets_agent_utils
from abides_core import Kernel, NanosecondTime
from abides_core.generators import InterArrivalTimeGenerator
from abides_core.utils import subdict
from abides_markets.utils import config_add_agents
from .core_environment import AbidesGymCoreEnv
from ..experimental_agents.financial_gym_agent import FinancialGymAgent
class AbidesGymMarketsEnv(AbidesGymCoreEnv, ABC):
"""
Abstract class for markets gym to inherit from to create usable specific ABIDES Gyms
Arguments:
- background_config_pair: tuple consisting in the background builder function and the inputs to use
- wakeup_interval_generator: generator used to compute delta time wakeup for the gym experimental agent
- starting_cash: cash of the agents at the beginning of the simulation
- state_history_length: length of the raw state buffer
- market_data_buffer_length: length of the market data buffer
- first_interval: how long the simulation is run before the first wake up of the gym experimental agent
- raw_state_pre_process: decorator used to pre-process raw_state
"""
raw_state_pre_process = markets_agent_utils.identity_decorator
def __init__(
self,
background_config_pair: Tuple[Callable, Optional[Dict[str, Any]]],
wakeup_interval_generator: InterArrivalTimeGenerator,
starting_cash: int,
state_buffer_length: int,
market_data_buffer_length: int,
first_interval: Optional[NanosecondTime] = None,
raw_state_pre_process=markets_agent_utils.identity_decorator,
) -> None:
super().__init__(
background_config_pair,
wakeup_interval_generator,
state_buffer_length,
first_interval=first_interval,
gymAgentConstructor=FinancialGymAgent,
)
self.starting_cash: int = starting_cash
self.market_data_buffer_length: int = market_data_buffer_length
self.extra_gym_agent_kvargs = {
"starting_cash": self.starting_cash,
"market_data_buffer_length": self.market_data_buffer_length,
}
self.extra_background_config_kvargs = {
"exchange_log_orders": False,
"book_logging": False, # may need to set to True if wants to return OB in terminal state when episode ends (gym2)
"log_orders": None,
}
```
#### File: abides_gym/envs/markets_execution_environment_v0.py
```python
import importlib
from dataclasses import asdict, dataclass, field
from typing import Any, Dict, List
from abc import ABC
import gym
import numpy as np
import abides_markets.agents.utils as markets_agent_utils
from abides_core import NanosecondTime
from abides_core.utils import str_to_ns
from abides_core.generators import ConstantTimeGenerator
from .markets_environment import AbidesGymMarketsEnv
class SubGymMarketsExecutionEnv_v0(AbidesGymMarketsEnv):
"""
Execution V0 environnement. It defines one of the ABIDES-Gym-markets environnement.
This environment presents an example of the algorithmic orderexecution problem.
The agent has either an initial inventory of the stocks it tries to trade out of or no initial inventory and
tries to acquire a target number of shares. The goal is to realize thistask while minimizing transaction cost from spreads
and marketimpact. It does so by splitting the parent order into several smallerchild orders.
Arguments:
- background_config: the handcrafted agents configuration used for the environnement
- mkt_close: time the market day ends
- timestep_duration: how long between 2 wakes up of the gym experimental agent
- starting_cash: cash of the agents at the beginning of the simulation
- order_fixed_size: size of the order placed by the experimental gym agent
- state_history_length: length of the raw state buffer
- market_data_buffer_length: length of the market data buffer
- first_interval: how long the simulation is run before the first wake up of the gym experimental agent
- parent_order_size: Total size the agent has to execute (eitherbuy or sell).
- execution_window: Time length the agent is given to proceed with 𝑝𝑎𝑟𝑒𝑛𝑡𝑂𝑟𝑑𝑒𝑟𝑆𝑖𝑧𝑒execution.
- direction: direction of the 𝑝𝑎𝑟𝑒𝑛𝑡𝑂𝑟𝑑𝑒𝑟 (buy or sell)
- not_enough_reward_update: it is a constant penalty per non-executed share atthe end of the𝑡𝑖𝑚𝑒𝑊𝑖𝑛𝑑𝑜𝑤
- just_quantity_reward_update: update reward if all order is completed
- reward_mode: can use a dense of sparse reward formulation
- done_ratio: ratio (mark2market_t/starting_cash) that defines when an episode is done (if agent has lost too much mark to market value)
- debug_mode: arguments to change the info dictionnary (lighter version if performance is an issue)
- background_config_extra_kvargs: dictionary of extra key value arguments passed to the background config builder function
Daily Investor V0:
- Action Space:
- MKT order_fixed_size
- LMT order_fixed_size
- Hold
- State Space:
- holdings_pct
- time_pct
- diff_pct
- imbalance_all
- imbalance_5
- price_impact
- spread
- direction
- returns
"""
raw_state_pre_process = markets_agent_utils.ignore_buffers_decorator
raw_state_to_state_pre_process = (
markets_agent_utils.ignore_mkt_data_buffer_decorator
)
@dataclass
class CustomMetricsTracker(ABC):
"""
Data Class used to track custom metrics that are output to rllib
"""
slippage_reward: float = 0
late_penalty_reward: float = 0 # at the end of the episode
executed_quantity: int = 0 # at the end of the episode
remaining_quantity: int = 0 # at the end of the episode
action_counter: Dict[str, int] = field(default_factory=dict)
holdings_pct: float = 0
time_pct: float = 0
diff_pct: float = 0
imbalance_all: float = 0
imbalance_5: float = 0
price_impact: int = 0
spread: int = 0
direction_feature: float = 0
num_max_steps_per_episode: float = 0
def __init__(
self,
background_config: Any = "rmsc04",
mkt_close: str = "16:00:00",
timestep_duration: str = "60s",
starting_cash: int = 1_000_000,
order_fixed_size: int = 10,
state_history_length: int = 4,
market_data_buffer_length: int = 5,
first_interval: str = "00:00:30",
parent_order_size: int = 1000,
execution_window: str = "00:10:00",
direction: str = "BUY",
not_enough_reward_update: int = -1000,
too_much_reward_update: int = -100,
just_quantity_reward_update: int = 0,
debug_mode: bool = False,
background_config_extra_kvargs: Dict[str, Any] = {},
) -> None:
self.background_config: Any = importlib.import_module(
"abides_markets.configs.{}".format(background_config), package=None
)
self.mkt_close: NanosecondTime = str_to_ns(mkt_close)
self.timestep_duration: NanosecondTime = str_to_ns(timestep_duration)
self.starting_cash: int = starting_cash
self.order_fixed_size: int = order_fixed_size
self.state_history_length: int = state_history_length
self.market_data_buffer_length: int = market_data_buffer_length
self.first_interval: NanosecondTime = str_to_ns(first_interval)
self.parent_order_size: int = parent_order_size
self.execution_window: str = str_to_ns(execution_window)
self.direction: str = direction
self.debug_mode: bool = debug_mode
self.too_much_reward_update: int = too_much_reward_update
self.not_enough_reward_update: int = not_enough_reward_update
self.just_quantity_reward_update: int = just_quantity_reward_update
self.entry_price: int = 1
self.far_touch: int = 1
self.near_touch: int = 1
self.step_index: int = 0
self.custom_metrics_tracker = (
self.CustomMetricsTracker()
) # init the custom metric tracker
##################
# CHECK PROPERTIES
assert background_config in [
"rmsc03",
"rmsc04",
"smc_01",
], "Select rmsc03 or rmsc04 as config"
assert (self.first_interval <= str_to_ns("16:00:00")) & (
self.first_interval >= str_to_ns("00:00:00")
), "Select authorized FIRST_INTERVAL delay"
assert (self.mkt_close <= str_to_ns("16:00:00")) & (
self.mkt_close >= str_to_ns("09:30:00")
), "Select authorized market hours"
assert (self.timestep_duration <= str_to_ns("06:30:00")) & (
self.timestep_duration >= str_to_ns("00:00:00")
), "Select authorized timestep_duration"
assert (type(self.starting_cash) == int) & (
self.starting_cash >= 0
), "Select positive integer value for starting_cash"
assert (type(self.order_fixed_size) == int) & (
self.order_fixed_size >= 0
), "Select positive integer value for order_fixed_size"
assert (type(self.state_history_length) == int) & (
self.state_history_length >= 0
), "Select positive integer value for order_fixed_size"
assert (type(self.market_data_buffer_length) == int) & (
self.market_data_buffer_length >= 0
), "Select positive integer value for order_fixed_size"
assert self.debug_mode in [
True,
False,
], "debug_mode needs to be True or False"
assert self.direction in [
"BUY",
"SELL",
], "direction needs to be BUY or SELL"
assert (type(self.parent_order_size) == int) & (
self.order_fixed_size >= 0
), "Select positive integer value for parent_order_size"
assert (self.execution_window <= str_to_ns("06:30:00")) & (
self.execution_window >= str_to_ns("00:00:00")
), "Select authorized execution_window"
assert (
type(self.too_much_reward_update) == int
), "Select integer value for too_much_reward_update"
assert (
type(self.not_enough_reward_update) == int
), "Select integer value for not_enough_reward_update"
assert (
type(self.just_quantity_reward_update) == int
), "Select integer value for just_quantity_reward_update"
background_config_args = {"end_time": self.mkt_close}
background_config_args.update(background_config_extra_kvargs)
super().__init__(
background_config_pair=(
self.background_config.build_config,
background_config_args,
),
wakeup_interval_generator=ConstantTimeGenerator(
step_duration=self.timestep_duration
),
starting_cash=self.starting_cash,
state_buffer_length=self.state_history_length,
market_data_buffer_length=self.market_data_buffer_length,
first_interval=self.first_interval,
)
# Action Space
# MKT order_fixed_size | LMT order_fixed_size | Hold
self.num_actions: int = 3
self.action_space: gym.Space = gym.spaces.Discrete(self.num_actions)
# instantiate the action counter
for i in range(self.num_actions):
self.custom_metrics_tracker.action_counter[f"action_{i}"] = 0
num_ns_episode = self.first_interval + self.execution_window
step_length = self.timestep_duration
num_max_steps_per_episode = num_ns_episode / step_length
self.custom_metrics_tracker.num_max_steps_per_episode = (
num_max_steps_per_episode
)
# State Space
# [holdings, imbalance,spread, direction_feature] + padded_returns
self.num_state_features: int = 8 + self.state_history_length - 1
# construct state space "box"
# holdings_pct, time_pct, diff_pct, imbalance_all, imbalance_5, price_impact, spread, direction, returns
self.state_highs: np.ndarray = np.array(
[
2, # holdings_pct
2, # time_pct
4, # diff_pct
1, # imbalance_all
1, # imbalance_5
np.finfo(np.float32).max, # price_impact
np.finfo(np.float32).max, # spread
np.finfo(np.float32).max,
]
+ (self.state_history_length - 1) # directiom
* [np.finfo(np.float32).max], # returns
dtype=np.float32,
).reshape(self.num_state_features, 1)
self.state_lows: np.ndarray = np.array(
[
-2, # holdings_pct
-2, # time_pct
-4, # diff_pct
0, # imbalance_all
0, # imbalance_5
np.finfo(np.float32).min, # price_impact
np.finfo(np.float32).min, # spread
np.finfo(np.float32).min,
]
+ (self.state_history_length - 1) # direction
* [np.finfo(np.float32).min], # returns
dtype=np.float32,
).reshape(self.num_state_features, 1)
self.observation_space: gym.Space = gym.spaces.Box(
self.state_lows,
self.state_highs,
shape=(self.num_state_features, 1),
dtype=np.float32,
)
# initialize previous_marked_to_market to starting_cash (No holding at the beginning of the episode)
self.previous_marked_to_market: int = self.starting_cash
def _map_action_space_to_ABIDES_SIMULATOR_SPACE(
self, action: int
) -> List[Dict[str, Any]]:
"""
utility function that maps open ai action definition (integers) to environnement API action definition (list of dictionaries)
The action space ranges [0, 1, 2] where:
- `0` MKT direction order_fixed_size
- '1' LMT direction order_fixed_size
- '2' DO NOTHING
Arguments:
- action: integer representation of the different actions
Returns:
- action_list: list of the corresponding series of action mapped into abides env apis
"""
self.custom_metrics_tracker.action_counter[
f"action_{action}"
] += 1 # increase counter
if action == 0:
return [
{"type": "CCL_ALL"},
{
"type": "MKT",
"direction": self.direction,
"size": self.order_fixed_size,
},
]
elif action == 1:
return [
{"type": "CCL_ALL"},
{
"type": "LMT",
"direction": self.direction,
"size": self.order_fixed_size,
"limit_price": self.near_touch,
},
]
elif action == 2:
return []
else:
raise ValueError(
f"Action {action} is not part of the actions supported by the function."
)
@raw_state_to_state_pre_process
def raw_state_to_state(self, raw_state: Dict[str, Any]) -> np.ndarray:
"""
method that transforms a raw state into a state representation
Arguments:
- raw_state: dictionnary that contains raw simulation information obtained from the gym experimental agent
Returns:
- state: state representation defining the MDP for the execution v0 environnement
"""
# 0) Preliminary
bids = raw_state["parsed_mkt_data"]["bids"]
asks = raw_state["parsed_mkt_data"]["asks"]
last_transactions = raw_state["parsed_mkt_data"]["last_transaction"]
# 1) Holdings
holdings = raw_state["internal_data"]["holdings"]
holdings_pct = holdings[-1] / self.parent_order_size
# 2) Timing
# 2)a) mkt_open
mkt_open = raw_state["internal_data"]["mkt_open"][-1]
# 2)b) time from beginning of execution (parent arrival)
current_time = raw_state["internal_data"]["current_time"][-1]
time_from_parent_arrival = current_time - mkt_open - self.first_interval
assert (
current_time >= mkt_open + self.first_interval
), "Agent has woken up earlier than its first interval"
# 2)c) time limit
time_limit = self.execution_window
# 2)d) compute percentage time advancement
time_pct = time_from_parent_arrival / time_limit
# 3) Advancement Comparison
diff_pct = holdings_pct - time_pct
# 3) Imbalance
imbalances_all = [
markets_agent_utils.get_imbalance(b, a, depth=None)
for (b, a) in zip(bids, asks)
]
imbalance_all = imbalances_all[-1]
imbalances_5 = [
markets_agent_utils.get_imbalance(b, a, depth=5)
for (b, a) in zip(bids, asks)
]
imbalance_5 = imbalances_5[-1]
# 4) price_impact
mid_prices = [
markets_agent_utils.get_mid_price(b, a, lt)
for (b, a, lt) in zip(bids, asks, last_transactions)
]
mid_price = mid_prices[-1]
if self.step_index == 0: # 0 order has been executed yet
self.entry_price = mid_price
entry_price = self.entry_price
book = (
raw_state["parsed_mkt_data"]["bids"][-1]
if self.direction == "BUY"
else raw_state["parsed_mkt_data"]["asks"][-1]
)
self.near_touch = book[0][0] if len(book) > 0 else last_transactions[-1]
# Compute the price impact
price_impact = (
np.log(mid_price / entry_price)
if self.direction == "BUY"
else np.log(entry_price / mid_price)
)
# 5) Spread
best_bids = [
bids[0][0] if len(bids) > 0 else mid
for (bids, mid) in zip(bids, mid_prices)
]
best_asks = [
asks[0][0] if len(asks) > 0 else mid
for (asks, mid) in zip(asks, mid_prices)
]
spreads = np.array(best_asks) - np.array(best_bids)
spread = spreads[-1]
# 6) direction feature
direction_features = np.array(mid_prices) - np.array(last_transactions)
direction_feature = direction_features[-1]
# 7) mid_price
mid_prices = [
markets_agent_utils.get_mid_price(b, a, lt)
for (b, a, lt) in zip(bids, asks, last_transactions)
]
returns = np.diff(mid_prices)
padded_returns = np.zeros(self.state_history_length - 1)
padded_returns[-len(returns) :] = (
returns if len(returns) > 0 else padded_returns
)
# log custom metrics to tracker
self.custom_metrics_tracker.holdings_pct = holdings_pct
self.custom_metrics_tracker.time_pct = time_pct
self.custom_metrics_tracker.diff_pct = diff_pct
self.custom_metrics_tracker.imbalance_all = imbalance_all
self.custom_metrics_tracker.imbalance_5 = imbalance_5
self.custom_metrics_tracker.price_impact = price_impact
self.custom_metrics_tracker.spread = spread
self.custom_metrics_tracker.direction_feature = direction_feature
# 8) Computed State
computed_state = np.array(
[
holdings_pct,
time_pct,
diff_pct,
imbalance_all,
imbalance_5,
price_impact,
spread,
direction_feature,
]
+ padded_returns.tolist(),
dtype=np.float32,
)
#
self.step_index += 1
return computed_state.reshape(self.num_state_features, 1)
@raw_state_pre_process
def raw_state_to_reward(self, raw_state: Dict[str, Any]) -> float:
"""
method that transforms a raw state into the reward obtained during the step
Arguments:
- raw_state: dictionnary that contains raw simulation information obtained from the gym experimental agent
Returns:
- reward: immediate reward computed at each step for the execution v0 environnement
"""
# here we define the reward as cash + position marked to market normalized by parent_order_size
# 1) entry_price
entry_price = self.entry_price
# 2) inter_wakeup_executed_orders
inter_wakeup_executed_orders = raw_state["internal_data"][
"inter_wakeup_executed_orders"
]
# 3) Compute PNL of the orders
if len(inter_wakeup_executed_orders) == 0:
pnl = 0
else:
pnl = (
sum(
(entry_price - order.fill_price) * order.quantity
for order in inter_wakeup_executed_orders
)
if self.direction == "BUY"
else sum(
(order.fill_price - entry_price) * order.quantity
for order in inter_wakeup_executed_orders
)
)
self.pnl = pnl
# 4) normalization
reward = pnl / self.parent_order_size
# log custom metrics to tracker
self.custom_metrics_tracker.slippage_reward = reward
return reward
@raw_state_pre_process
def raw_state_to_update_reward(self, raw_state: Dict[str, Any]) -> float:
"""
method that transforms a raw state into the final step reward update (if needed)
Arguments:
- raw_state: dictionnary that contains raw simulation information obtained from the gym experimental agent
Returns:
- reward: update reward computed at the end of the episode for the execution v0 environnement
"""
# can update with additional reward at end of episode depending on scenario normalized by parent_order_size
# 1) Holdings
holdings = raw_state["internal_data"]["holdings"]
# 2) parent_order_size
parent_order_size = self.parent_order_size
# 3) Compute update_reward
if (self.direction == "BUY") and (holdings >= parent_order_size):
update_reward = (
abs(holdings - parent_order_size) * self.too_much_reward_update
) # executed buy too much
elif (self.direction == "BUY") and (holdings < parent_order_size):
update_reward = (
abs(holdings - parent_order_size) * self.not_enough_reward_update
) # executed buy not enough
elif (self.direction == "SELL") and (holdings <= -parent_order_size):
update_reward = (
abs(holdings - parent_order_size) * self.too_much_reward_update
) # executed sell too much
elif (self.direction == "SELL") and (holdings > -parent_order_size):
update_reward = (
abs(holdings - parent_order_size) * self.not_enough_reward_update
) # executed sell not enough
else:
update_reward = self.just_quantity_reward_update
# 4) Normalization
update_reward = update_reward / self.parent_order_size
self.custom_metrics_tracker.late_penalty_reward = update_reward
return update_reward
@raw_state_pre_process
def raw_state_to_done(self, raw_state: Dict[str, Any]) -> bool:
"""
method that transforms a raw state into the flag if an episode is done
Arguments:
- raw_state: dictionnary that contains raw simulation information obtained from the gym experimental agent
Returns:
- done: flag that describes if the episode is terminated or not for the execution v0 environnement
"""
# episode can stop because market closes or because some condition is met
# here the condition is parent order fully executed
# 1) Holdings
holdings = raw_state["internal_data"]["holdings"]
# 2) parent_order_size
parent_order_size = self.parent_order_size
# 3) current time
current_time = raw_state["internal_data"]["current_time"]
# 4) time_limit
# 4)a) mkt_open
mkt_open = raw_state["internal_data"]["mkt_open"]
# 4)b time_limit
time_limit = mkt_open + self.first_interval + self.execution_window
# 5) conditions
if (self.direction == "BUY") and (holdings >= parent_order_size):
done = True # Buy parent order executed
elif (self.direction == "SELL") and (holdings <= -parent_order_size):
done = True # Sell parent order executed
elif current_time >= time_limit:
done = True # Mkt Close
else:
done = False
self.custom_metrics_tracker.executed_quantity = (
holdings if self.direction == "BUY" else -holdings
)
self.custom_metrics_tracker.remaining_quantity = (
parent_order_size - self.custom_metrics_tracker.executed_quantity
)
return done
@raw_state_pre_process
def raw_state_to_info(self, raw_state: Dict[str, Any]) -> Dict[str, Any]:
"""
method that transforms a raw state into an info dictionnary
Arguments:
- raw_state: dictionnary that contains raw simulation information obtained from the gym experimental agent
Returns:
- reward: info dictionnary computed at each step for the execution v0 environnement
"""
# Agent cannot use this info for taking decision
# only for debugging
# 1) Last Known Market Transaction Price
last_transaction = raw_state["parsed_mkt_data"]["last_transaction"]
# 2) Last Known best bid
bids = raw_state["parsed_mkt_data"]["bids"]
best_bid = bids[0][0] if len(bids) > 0 else last_transaction
# 3) Last Known best ask
asks = raw_state["parsed_mkt_data"]["asks"]
best_ask = asks[0][0] if len(asks) > 0 else last_transaction
# 4) Current Time
current_time = raw_state["internal_data"]["current_time"]
# 5) Holdings
holdings = raw_state["internal_data"]["holdings"]
if self.debug_mode == True:
return {
"last_transaction": last_transaction,
"best_bid": best_bid,
"best_ask": best_ask,
"current_time": current_time,
"holdings": holdings,
"parent_size": self.parent_order_size,
"pnl": self.pnl,
"reward": self.pnl / self.parent_order_size,
}
else:
return asdict(self.custom_metrics_tracker)
```
#### File: abides-markets/abides_markets/generators.py
```python
from abc import abstractmethod, ABC
import numpy as np
from abides_core.generators import BaseGenerator
################## ORDER SIZE MODEL ###############################
class OrderSizeGenerator(BaseGenerator[int], ABC):
pass
class ConstantOrderSizeGenerator(OrderSizeGenerator):
def __init__(self, order_size: int) -> None:
self.order_size: int = order_size
def next(self) -> int:
return self.order_size
def mean(self) -> int:
return self.order_size
class UniformOrderSizeGenerator(OrderSizeGenerator):
def __init__(
self,
order_size_min: int,
order_size_max: int,
random_generator: np.random.RandomState,
) -> None:
self.order_size_min: int = order_size_min
self.order_size_max: int = order_size_max + 1
self.random_generator: np.random.RandomState = random_generator
def next(self) -> int:
return self.random_generator.randint(self.order_size_min, self.order_size_max)
def mean(self) -> float:
return (self.order_size_max - self.order_size_min - 1) / 2
################## ORDER DEPTH MODEL ###############################
class OrderDepthGenerator(BaseGenerator[int], ABC):
pass
class ConstantDepthGenerator(OrderDepthGenerator):
def __init__(self, order_depth: int) -> None:
self.order_depth: int = order_depth
def next(self) -> int:
return self.order_depth
def mean(self) -> int:
return self.order_depth
class UniformDepthGenerator(OrderDepthGenerator):
def __init__(
self,
order_depth_min: int,
order_depth_max: int,
random_generator: np.random.RandomState,
) -> None:
self.random_generator: np.random.RandomState = random_generator
self.order_depth_min: int = order_depth_min
self.order_depth_max: int = order_depth_max + 1
def next(self) -> int:
return self.random_generator.randint(self.order_depth_min, self.order_depth_max)
def mean(self) -> float:
return (self.order_depth_max - self.order_depth_min - 1) / 2
```
#### File: abides-markets/abides_markets/orders.py
```python
import sys
from abc import ABC, abstractmethod
from copy import deepcopy
from enum import Enum
from typing import Any, Dict, Optional
from abides_core import NanosecondTime
from abides_core.utils import fmt_ts
from .utils import dollarize
class Side(Enum):
BID = "BID"
ASK = "ASK"
def is_bid(self) -> bool:
return self == Side.BID
def is_ask(self) -> bool:
return self == Side.ASK
class Order(ABC):
"""A basic Order type used by an Exchange to conduct trades or maintain an order book.
This should not be confused with order Messages agents send to request an Order.
Specific order types will inherit from this (like LimitOrder).
"""
_order_id_counter: int = 0
@abstractmethod
def __init__(
self,
agent_id: int,
time_placed: NanosecondTime,
symbol: str,
quantity: int,
side: Side,
order_id: Optional[int] = None,
tag: Any = None,
) -> None:
"""
Arguments:
agent_id: The ID of the agent that created this order.
time_placed: Time at which the order was created by the agent.
symbol: Equity symbol for the order.
quantity: Number of equity units affected by the order.
side: Indicates if an order is on the BID or ASK side of the market.
order_id: Either self generated or assigned. Should only be self
generated by the OrderBook class.
tag: A free-form user-defined field that can contain any information
relevant to the entity placing the order. Recommend keeping it
alphanumeric rather than shoving in objects, as it will be there
taking memory for the lifetime of the order and in all logging
mechanisms. Intent: for strategy agents to set tags to help keep
track of the intent of particular orders, to simplify their code.
"""
self.agent_id: int = agent_id
self.time_placed: NanosecondTime = time_placed
self.symbol: str = symbol
self.quantity: int = quantity
self.side: Side = side
if order_id is None:
order_id = Order._order_id_counter
Order._order_id_counter += 1
self.order_id: int = order_id
# Create placeholder fields that don't get filled in until certain events happen.
self.fill_price: Optional[int] = None
self.tag: Optional[Any] = tag
def to_dict(self) -> Dict[str, Any]:
as_dict = deepcopy(self).__dict__
as_dict["time_placed"] = fmt_ts(self.time_placed)
return as_dict
def __eq__(self, other):
return type(other) is type(self) and self.__dict__ == other.__dict__
def __deepcopy__(self, memodict={}):
raise NotImplementedError
class LimitOrder(Order):
"""
LimitOrder class that inherits from Order class and adds a limit price and a
hidden order flag.
These are the Orders that typically go in an Exchange's OrderBook.
"""
def __init__(
self,
agent_id: int,
time_placed: NanosecondTime,
symbol: str,
quantity: int,
side: Side,
limit_price: int,
is_hidden: bool = False,
is_price_to_comply: bool = False,
insert_by_id: bool = False,
is_post_only=False,
order_id: Optional[int] = None,
tag: Optional[Any] = None,
) -> None:
super().__init__(
agent_id, time_placed, symbol, quantity, side, order_id, tag=tag
)
# The limit price is the minimum price the agent will accept (for a sell order) or
# the maximum price the agent will pay (for a buy order).
self.limit_price: int = limit_price
self.is_hidden: bool = is_hidden
self.is_price_to_comply: bool = is_price_to_comply
self.insert_by_id: bool = insert_by_id
self.is_post_only: bool = is_post_only
def __str__(self) -> str:
filled = ""
if self.fill_price:
filled = " (filled @ {})".format(dollarize(self.fill_price))
# Until we make explicit market orders, we make a few assumptions that EXTREME prices on limit
# orders are trying to represent a market order. This only affects printing - they still hit
# the order book like limit orders, which is wrong.
return "(Agent {} @ {}{}) : {} {} {} @ {}{}".format(
self.agent_id,
fmt_ts(self.time_placed),
f" [{self.tag}]" if self.tag is not None else "",
self.side.value,
self.quantity,
self.symbol,
dollarize(self.limit_price)
if abs(self.limit_price) < sys.maxsize
else "MKT",
filled,
)
def __repr__(self) -> str:
return self.__str__()
def __deepcopy__(self, memodict={}) -> "LimitOrder":
tag = None if self.tag is None else deepcopy(self.tag)
order = LimitOrder(
self.agent_id,
self.time_placed,
self.symbol,
self.quantity,
self.side,
self.limit_price,
self.is_hidden,
self.is_price_to_comply,
self.insert_by_id,
order_id=self.order_id,
is_post_only=self.is_post_only,
tag=tag,
)
order.fill_price = self.fill_price
return order
class MarketOrder(Order):
"""MarketOrder class, inherits from Order class."""
def __init__(
self,
agent_id: int,
time_placed: NanosecondTime,
symbol: str,
quantity: int,
side: Side,
order_id: Optional[int] = None,
tag: Optional[Any] = None,
) -> None:
super().__init__(
agent_id, time_placed, symbol, quantity, side, order_id=order_id, tag=tag
)
def __str__(self) -> str:
return "(Agent {} @ {}) : MKT Order {} {} {}".format(
self.agent_id,
fmt_ts(self.time_placed),
self.side.value,
self.quantity,
self.symbol,
)
def __repr__(self) -> str:
return self.__str__()
def __deepcopy__(self, memodict={}) -> "MarketOrder":
tag = None if self.tag is None else deepcopy(self.tag)
order = MarketOrder(
self.agent_id,
self.time_placed,
self.symbol,
self.quantity,
self.side,
order_id=self.order_id,
tag=tag,
)
order.fill_price = self.fill_price
return order
```
#### File: abides_markets/utils/__init__.py
```python
import datetime
import sys
import traceback
import warnings
from contextlib import contextmanager
from typing import List, Union
import numpy as np
import pandas as pd
from scipy.spatial.distance import pdist, squareform
from abides_core import LatencyModel
# Utility method to flatten nested lists.
def delist(list_of_lists):
return [x for b in list_of_lists for x in b]
def numeric(s):
"""Returns numeric type from string, stripping commas from the right.
Adapted from https://stackoverflow.com/a/379966.
"""
s = s.rstrip(",")
try:
return int(s)
except ValueError:
try:
return float(s)
except ValueError:
return s
def get_value_from_timestamp(s: pd.Series, ts: datetime.datetime):
"""Get the value of s corresponding to closest datetime to ts.
Arguments:
s: Pandas Series with pd.DatetimeIndex.
ts: Timestamp at which to retrieve data.
"""
ts_str = ts.strftime("%Y-%m-%d %H:%M:%S")
s = s.loc[~s.index.duplicated(keep="last")]
locs = s.index.get_loc(ts_str, method="nearest")
out = (
s[locs][0]
if (isinstance(s[locs], np.ndarray) or isinstance(s[locs], pd.Series))
else s[locs]
)
return out
@contextmanager
def ignored(warning_str, *exceptions):
"""Context manager that wraps the code block in a try except statement, catching
specified exceptions and printing warning supplied by user.
Arguments:
warning_str: Warning statement printed when exception encountered.
exceptions: An exception type, e.g. ``ValueError``.
https://stackoverflow.com/a/15573313
"""
try:
yield
except exceptions:
warnings.warn(warning_str, UserWarning, stacklevel=1)
print(warning_str)
def generate_uniform_random_pairwise_dist_on_line(
left: float, right: float, num_points: int, random_state: np.random.RandomState
) -> np.ndarray:
"""Uniformly generate points on an interval, and return numpy array of pairwise
distances between points.
Arguments:
left: Left endpoint of interval.
right: Right endpoint of interval.
num_points: Number of points to use.
random_state: ``np.random.RandomState`` object.
"""
x_coords = random_state.uniform(low=left, high=right, size=num_points)
x_coords = x_coords.reshape((x_coords.size, 1))
out = pdist(x_coords, "euclidean")
return squareform(out)
def meters_to_light_ns(x):
"""Converts x in units of meters to light nanoseconds."""
x_lns = x / 299792458e-9
x_lns = x_lns.astype(int)
return x_lns
def validate_window_size(s):
"""Check if s is integer or string 'adaptive'."""
try:
return int(s)
except ValueError:
if s.lower() == "adaptive":
return s.lower()
else:
raise ValueError(f'String {s} must be integer or string "adaptive".')
def sigmoid(x, beta):
"""Numerically stable sigmoid function.
Adapted from https://timvieira.github.io/blog/post/2014/02/11/exp-normalize-trick/"
"""
if x >= 0:
z = np.exp(-beta * x)
return 1 / (1 + z)
else:
# if x is less than zero then z will be small, denom can't be
# zero because it's 1+z.
z = np.exp(beta * x)
return z / (1 + z)
def subdict(d, keys):
return dict((k, v) for k, v in d.items() if k in keys)
def restrictdict(d, keys):
inter = [k for k in d.keys() if k in keys]
return subdict(d, inter)
def dollarize(cents: Union[List[int], int]) -> Union[List[str], str]:
"""Dollarizes int-cents prices for printing.
Defined outside the class for utility access by non-agent classes.
Arguments:
cents:
"""
if isinstance(cents, list):
return [dollarize(x) for x in cents]
elif isinstance(cents, (int, np.int64)):
return "${:0.2f}".format(cents / 100)
else:
# If cents is already a float, there is an error somewhere.
raise ValueError(
f"dollarize(cents) called without int or list of ints: {cents} (got type '{type(cents)}')"
)
# LATENCY
def generate_latency_model(agent_count, latency_type="deterministic"):
assert latency_type in [
"deterministic",
"no_latency",
], "Please select a correct latency_type"
latency_rstate = np.random.RandomState(seed=np.random.randint(low=0, high=2 ** 32))
pairwise = (agent_count, agent_count)
if latency_type == "deterministic":
# All agents sit on line from Seattle to NYC
nyc_to_seattle_meters = 3866660
pairwise_distances = generate_uniform_random_pairwise_dist_on_line(
0.0, nyc_to_seattle_meters, agent_count, random_state=latency_rstate
)
pairwise_latencies = meters_to_light_ns(pairwise_distances)
else: # latency_type == "no_latency"
pairwise_latencies = np.zeros(pairwise, dtype=int)
latency_model = LatencyModel(
latency_model="deterministic",
random_state=latency_rstate,
connected=True,
min_latency=pairwise_latencies,
)
return latency_model
def config_add_agents(orig_config_state, agents):
agent_count = len(orig_config_state["agents"])
orig_config_state["agents"] = orig_config_state["agents"] + agents
# adding an agent to the config implies modifying the latency model #TODO: tell aymeric
lat_mod = generate_latency_model(agent_count + len(agents))
orig_config_state["agent_latency_model"] = lat_mod
return orig_config_state
```
#### File: tests/orderbook/test_price_to_comply.py
```python
from copy import deepcopy
from abides_markets.messages.orderbook import OrderAcceptedMsg, OrderExecutedMsg
from abides_markets.order_book import OrderBook
from abides_markets.orders import LimitOrder, MarketOrder, Side
from . import FakeExchangeAgent, SYMBOL, TIME
def test_create_price_to_comply_order():
order = LimitOrder(
agent_id=1,
time_placed=TIME,
symbol=SYMBOL,
quantity=10,
side=Side.BID,
is_price_to_comply=True,
limit_price=100,
)
agent = FakeExchangeAgent()
book = OrderBook(agent, SYMBOL)
book.handle_limit_order(deepcopy(order))
hidden_half = deepcopy(order)
hidden_half.is_hidden = True
hidden_half.limit_price += 1
visible_half = order
assert len(book.asks) == 0
assert len(book.bids) == 2
assert book.bids[0].hidden_orders == [
(hidden_half, dict(ptc_hidden=True, ptc_other_half=visible_half))
]
assert book.bids[0].visible_orders == []
assert book.bids[1].hidden_orders == []
assert book.bids[1].visible_orders == [
(visible_half, dict(ptc_hidden=False, ptc_other_half=hidden_half))
]
def test_fill_price_to_comply_order():
order = LimitOrder(
agent_id=1,
time_placed=TIME,
symbol=SYMBOL,
quantity=10,
side=Side.BID,
is_price_to_comply=True,
limit_price=100,
)
agent = FakeExchangeAgent()
book = OrderBook(agent, SYMBOL)
book.handle_limit_order(order)
hidden_half = deepcopy(order)
hidden_half.is_hidden = True
hidden_half.limit_price += 1
visible_half = order
market_order = MarketOrder(
agent_id=2,
time_placed=TIME,
symbol=SYMBOL,
quantity=10,
side=Side.ASK,
)
book.handle_market_order(market_order)
assert len(book.asks) == 0
assert len(book.bids) == 0
assert len(agent.messages) == 3
assert agent.messages[0][0] == 1
assert isinstance(agent.messages[0][1], OrderAcceptedMsg)
assert agent.messages[0][1].order.agent_id == 1
assert agent.messages[0][1].order.side == Side.BID
assert agent.messages[0][1].order.quantity == 10
assert agent.messages[1][0] == 1
assert isinstance(agent.messages[1][1], OrderExecutedMsg)
assert agent.messages[1][1].order.agent_id == 1
assert agent.messages[1][1].order.side == Side.BID
assert agent.messages[1][1].order.fill_price == 101
assert agent.messages[1][1].order.quantity == 10
assert agent.messages[2][0] == 2
assert isinstance(agent.messages[2][1], OrderExecutedMsg)
assert agent.messages[2][1].order.agent_id == 2
assert agent.messages[2][1].order.side == Side.ASK
assert agent.messages[2][1].order.fill_price == 101
assert agent.messages[2][1].order.quantity == 10
def test_cancel_price_to_comply_order():
order = LimitOrder(
agent_id=1,
time_placed=TIME,
symbol=SYMBOL,
quantity=10,
side=Side.BID,
is_price_to_comply=True,
limit_price=100,
)
agent = FakeExchangeAgent()
book = OrderBook(agent, SYMBOL)
book.handle_limit_order(order)
assert book.cancel_order(order) == True
assert len(book.asks) == 0
assert len(book.bids) == 0
def test_modify_price_to_comply_order():
pass
# TODO
def test_replace_price_to_comply_order():
old_order = LimitOrder(
agent_id=1,
time_placed=TIME,
symbol=SYMBOL,
quantity=10,
side=Side.BID,
is_price_to_comply=True,
limit_price=100,
)
agent = FakeExchangeAgent()
book = OrderBook(agent, SYMBOL)
book.handle_limit_order(old_order)
assert len(book.asks) == 0
assert len(book.bids) == 2
new_order = LimitOrder(
agent_id=1,
time_placed=TIME,
symbol=SYMBOL,
quantity=10,
side=Side.ASK,
is_price_to_comply=False,
limit_price=100,
)
book.replace_order(1, old_order, new_order)
assert len(book.asks) == 1
assert len(book.bids) == 0
```
#### File: ABIDES-jpmc-gym/profiling/profile_rmsc.py
```python
import logging
import coloredlogs
import numpy as np
import datetime as dt
from abides_core import Kernel
from abides_core.utils import subdict
from abides_markets.configs.rmsc04 import build_config
logger = logging.getLogger("profile_rmsc")
coloredlogs.install(
level="INFO", fmt="[%(process)d] %(levelname)s %(name)s %(message)s"
)
# from memory_profiler import profile
# @profile
def run(
config,
log_dir="",
kernel_seed=np.random.randint(low=0, high=2 ** 32, dtype="uint64"),
):
print()
print("╔═══════════════════════════════════════════════════════════╗")
print("║ ABIDES: Agent-Based Interactive Discrete Event Simulation ║")
print("╚═══════════════════════════════════════════════════════════╝")
print()
kernel = Kernel(
random_state=np.random.RandomState(seed=kernel_seed),
log_dir="",
**subdict(
config,
[
"start_time",
"stop_time",
"agents",
"agent_latency_model",
"default_computation_delay",
"custom_properties",
],
),
)
sim_start_time = dt.datetime.now()
logger.info(f"Simulation Start Time: {sim_start_time}")
end_state = kernel.run()
sim_end_time = dt.datetime.now()
logger.info(f"Simulation End Time: {sim_end_time}")
logger.info(f"Time taken to run simulation: {sim_end_time - sim_start_time}")
return end_state
if __name__ == "__main__":
run(build_config(seed=1, book_logging=False, end_time="16:00:00"))
# import os
# import subprocess
# from profilehooks import profile
# @profile(stdout=False, immediate=True, filename="rmsc03.prof")
# def _run():
# run(build_config(seed=1, book_freq=None, end_time="16:00:00"))
# _run()
# subprocess.call(
# f"gprof2dot rmsc03.prof -f pstats > rmsc03.dot",
# shell=True,
# )
# subprocess.call(
# f"dot -Tsvg -o rmsc03.svg rmsc03.dot",
# shell=True,
# )
# os.remove("rmsc03.dot")
# os.remove("rmsc03.prof")
```
#### File: ABIDES-jpmc-gym/version_testing/test_config.py
```python
import os
import pandas as pd
import datetime as dt
import numpy as np
from multiprocessing import Pool
import psutil
import pathlib
from tqdm import tqdm
from p_tqdm import p_map
import itertools
def get_path(level):
path = pathlib.Path(__file__).parent.absolute()
path = str(path)
if level == 0:
return path
else:
path = path.split("/")[:-level]
return ("/").join(path)
root_path_abides = get_path(1)
root_path_ec2 = get_path(3)
os.chdir(root_path_abides)
import sys
sys.path.insert(0, root_path_abides)
import version_testing.runasof as runasof
# TODO: use different end time in the new config
def get_paths(parameters):
specific_path = f'{parameters["new"]["config"]}/{parameters["shared"]["end-time"].replace(":", "-")}/{parameters["shared"]["seed"]}' # can add as many as there are parameters
specific_path_underscore = f'{parameters["new"]["config"]}_{parameters["shared"]["end-time"].replace(":", "-")}_{parameters["shared"]["seed"]}' # TODO: maybe something better
return specific_path, specific_path_underscore
def run_test(test_):
parameters, old_new_flag = test_
# run test for one parameter dictionnary
specific_path, specific_path_underscore = get_paths(parameters)
# compute a unique stamp for log folder
now = dt.datetime.now()
stamp = now.strftime("%Y%m%d%H%M%S")
# run old sha
time = runasof.run_command(
parameters["command"][old_new_flag],
commit_sha=parameters[old_new_flag]["sha"],
specific_path_underscore=specific_path_underscore,
git_path=root_path_abides,
old_new_flag=old_new_flag,
pass_logdir_sha=(
"--log_dir",
lambda x: root_path_ec2
+ f"/tmp/{old_new_flag}_{stamp}/"
+ x
+ "/"
+ specific_path,
),
)
# output = parameters
output = {}
output["sha"] = parameters[old_new_flag]["sha"]
output["config"] = parameters[old_new_flag]["config"]
output["end-time"] = parameters["shared"]["end-time"]
output["seed"] = parameters["shared"]["seed"]
output["time"] = time
## compare order book logs from the simulations
if parameters["with_log"]:
path_to_ob = (
root_path_ec2
+ f"/tmp/{old_new_flag}_{stamp}/{parameters[old_new_flag]['sha']}/{specific_path}/ORDERBOOK_ABM_FULL.bz2"
)
else:
path_to_ob = "no_log"
output["path_to_ob"] = path_to_ob
output["flag"] = old_new_flag
return output
def compute_ob(path_old, path_new):
ob_old = pd.read_pickle(path_old)
ob_new = pd.read_pickle(path_new)
if ob_old.equals(ob_new):
return 0
else:
return 1
def run_tests(LIST_PARAMETERS, varying_parameters):
old_new_flags = ["old", "new"]
tests = list(itertools.product(LIST_PARAMETERS, old_new_flags))
# test_ = tests[0]
# run_test(test_)
outputs = p_map(run_test, tests)
df = pd.DataFrame(outputs)
df_old = df[df["flag"] == "old"]
df_new = df[df["flag"] == "new"]
print(f"THERE ARE {len(df_new)} TESTS RESULTS.")
if LIST_PARAMETERS[0]["with_log"]:
path_olds = list(df_old["path_to_ob"])
path_news = list(df_new["path_to_ob"])
# compute_ob(path_olds[0], path_news[0])
ob_comps = p_map(compute_ob, path_olds, path_news)
if sum(ob_comps) == 0:
print("ALL TESTS ARE SUCCESS!")
else:
print(f"ALERT: {sum(ob_comps)}TEST FAILURE")
df_old = df_old[varying_parameters + ["seed", "time"]].set_index(
varying_parameters + ["seed"]
)
df_new = df_new[varying_parameters + ["seed", "time"]].set_index(
varying_parameters + ["seed"]
)
df_diff = df_old - df_new # /df_old
df_results = df_diff.groupby(["config", "end-time"])["time"].describe()[
["mean", "std"]
]
df_diff_pct = 100 * (df_old - df_new) / df_old
df_results_pct = df_diff_pct.groupby(["config", "end-time"])["time"].describe()[
["mean", "std"]
]
print("*********************************************")
print("*********************************************")
print("OLD RUNNING TIME")
# with pd.option_context('display.float_format', '{:0.2f}'.format):
print(df_old.groupby(["config", "end-time"])["time"].describe()[["mean", "std"]])
print("*********************************************")
print("*********************************************")
print("NEW RUNNING TIME")
with pd.option_context("display.float_format", "{:0.2f}".format):
print(
df_new.groupby(["config", "end-time"])["time"].describe()[["mean", "std"]]
)
print("*********************************************")
print("*********************************************")
print("TIME DIFFERENCE in seconds")
with pd.option_context("display.float_format", "{:0.2f}".format):
df_results["mean"] = df_results["mean"].dt.total_seconds()
df_results["std"] = df_results["std"].dt.total_seconds()
print(df_results)
print("*********************************************")
print("*********************************************")
print("TIME DIFFERENCE in %")
with pd.option_context("display.float_format", "{:0.2f}".format):
print(df_results_pct)
``` |
{
"source": "jpmorganchase/kallisti-core",
"score": 2
} |
#### File: kallisticore/lib/action.py
```python
import importlib
import inspect
from copy import deepcopy
from typing import Dict, Callable, Any, List, Optional
from kallisticore.exceptions import UnknownModuleName, CouldNotFindFunction
from kallisticore.lib.credential import Credential
from kallisticore.lib.expectation import Expectation
from kallisticore.models.step import Step
from kallisticore.utils.singleton import Singleton
class KallistiFunctionCache(metaclass=Singleton):
def __init__(self):
self.functions = {}
def add(self, module_name: str, function_name: str,
function_implementation: Callable) -> None:
key = self._function_key(function_name, module_name)
self.functions[key] = function_implementation
def get(self, module_name, function_name) -> Callable:
key = self._function_key(module_name, function_name)
return self.functions.get(key, None)
def _function_key(self, function_name: str, module_name: str):
return module_name, function_name
class FunctionLoader:
def __init__(self, module_map: dict, module_name: str):
"""
:param module_map: map of action modules
:param module_name: the name of the module to search
e.g. "cf".
"""
self._functions = KallistiFunctionCache()
self._module_path = module_name
self.module = FunctionLoader.get_module(module_map, self._module_path)
def get_function(self, function_name: str) -> Callable:
""" Get the function based on the type_name.
Caches the results for previous findings, and search the cache
before searching the modules.
:param function_name: the name of the function to search
e.g. "map_route_to_app".
:returns the function found or raise exception if no function can be
found.
"""
function_implementation = self._functions.get(self._module_path,
function_name)
if not function_implementation:
function_implementation = self._find_function(function_name)
self._functions.add(self._module_path, function_name,
function_implementation)
return function_implementation
def _find_function(self, function_name: str) -> Callable:
modules_to_search = self._get_modules_to_search()
for module in modules_to_search:
if hasattr(module, "__all__"):
declared_action_names = getattr(module, "__all__")
if function_name in declared_action_names:
function_implementation = getattr(module, function_name)
return function_implementation
raise CouldNotFindFunction(self._module_path + "." + function_name)
def _get_modules_to_search(self) -> list:
modules_to_search = [self.module]
if hasattr(self.module, "__actions_modules__"):
sub_modules = self._get_sub_modules_to_search()
modules_to_search.extend(sub_modules)
return modules_to_search
def _get_sub_modules_to_search(self) -> list:
sub_module_names = getattr(self.module, "__actions_modules__")
return [importlib.import_module(module_name) for module_name in
sub_module_names]
@staticmethod
def get_module(module_map: dict, namespace: str):
module_name = module_map.get(namespace)
if not module_name:
raise UnknownModuleName(namespace)
module = importlib.import_module(module_name)
return module
class Action:
func_loader_class = FunctionLoader
@classmethod
def build(cls, step: Step, action_module_map: dict,
credential_class_map: dict):
"""
:param step: Object of type Step
:param action_module_map: Map of action modules
:param credential_class_map: Map of credential classes
"""
description = step.description
arguments = deepcopy(step.where)
module_name = step.get_namespace()
function_name = step.get_function_name()
func_loader = cls.func_loader_class(action_module_map, module_name)
module_func = func_loader.get_function(function_name)
credential = None
if 'credentials' in arguments:
cred_dict = arguments.pop('credentials')
credential = Credential.build(credential_class_map, cred_dict)
expectations = []
for expect_spec in step.expect:
expectations.append(Expectation.build(expect_spec))
return cls(module_func, arguments, expectations, description,
credential)
def __init__(self, module_func: Callable, arguments: Dict,
expectations: Optional[List[Expectation]] = None,
name: str = None, credential: Credential = None):
"""
:param module_func: Action module function
:param arguments: Arguments required by function to be executed
:param expectations: Expectation of action's result
:param name: Description for the action to be execute
:param credential: Holds credential required for action to be executed
:type credential: Credential
"""
self.expectations = expectations if expectations else []
self.name = name
self.func = module_func
if inspect.isclass(self.func):
self.func = self.func(**arguments).execute
self.arguments = {}
else:
self.arguments = arguments
self.credential = credential
def execute(self) -> Any:
""" Execute the action, captures the exception if any.
:return True if the action has been executed successfully:
"""
result = self.func(**self.arguments)
self.check_result_for_expectations(result)
return result
def check_result_for_expectations(self, result):
for expect_spec in self.expectations:
expect_spec.execute(result)
def make_action(step: Step, action_module_map: dict,
credential_class_map: dict) -> Action:
""" Create Action based on the action type specified in action_spec.
:param step: the action specification in json
eg.g. {"step":"", "do":"", "where":{}}
:param action_module_map: Action module map
:param credential_class_map: Credential class map
:returns a Kallisti Action object.
"""
namespace = step.get_namespace()
module = FunctionLoader.get_module(action_module_map, namespace)
action_class = getattr(module, '__action_class__', Action)
return action_class.build(step, action_module_map, credential_class_map)
```
#### File: kallisticore/models/singleton_manager.py
```python
from django.db import models
class SingletonManager(models.Manager):
def get_queryset(self, **kwargs):
return super(SingletonManager, self).get_queryset().filter(id=1)
```
#### File: kallisticore/models/step.py
```python
import json
from typing import Dict, List, Tuple
from django.core.exceptions import ValidationError
from jinja2 import Template, Environment, meta
class Step:
ACTION_KEY = "do"
DESC_KEY = "step"
WHERE_KEY = "where"
EXPECT_KEY = "expect"
@classmethod
def build(cls, step_dict: Dict) -> "Step":
return Step(step_dict.get(cls.ACTION_KEY),
step_dict.get(cls.DESC_KEY),
step_dict.get(cls.WHERE_KEY),
step_dict.get(cls.EXPECT_KEY))
def __init__(self, action: str, description: str, where: Dict,
expect: List[Dict] = None):
self.action = action
self.description = description
self.where = where
self.expect = expect if expect else []
def is_valid(self) -> bool:
if self.action and self.where:
return True
return False
def interpolate_with_parameters(self, parameters):
where_template = Template(json.dumps(self.where))
self.where = json.loads(where_template.render(parameters))
def get_where_clause_template_variables(self):
env = Environment()
ast = env.parse(json.dumps(self.where))
return meta.find_undeclared_variables(ast)
def __eq__(self, o: "Step") -> bool:
return self.action == o.action and self.description == o.description \
and self.where == o.where
def items(self) -> List[Tuple]:
list_of_tuples = []
if self.description:
list_of_tuples.append((Step.DESC_KEY, self.description))
list_of_tuples += [(Step.ACTION_KEY, self.action),
(Step.WHERE_KEY, self.where)]
return list_of_tuples
def to_dict(self) -> Dict:
step_dict = {Step.ACTION_KEY: self.action, Step.WHERE_KEY: self.where}
if self.description:
step_dict[Step.DESC_KEY] = self.description
if self.expect:
step_dict[Step.EXPECT_KEY] = self.expect
return step_dict
@staticmethod
def encode_step(step: "Step") -> Dict:
if isinstance(step, Step):
return step.to_dict()
else:
type_name = step.__class__.__name__
raise TypeError(f"Object of type '{type_name}' is not JSON "
f"serializable using Step.encode_step")
@staticmethod
def convert_to_steps(steps_list: List[Dict]) -> List["Step"]:
steps = []
invalid_steps = []
for step_dict in steps_list:
step = Step.build(step_dict)
if step.is_valid():
steps.append(step)
else:
invalid_steps.append(step_dict)
if invalid_steps:
raise ValidationError(
message="Invalid Steps: Some steps provided are invalid. "
"Invalid Steps: " + json.dumps(invalid_steps),
code="invalid")
return steps
def get_namespace(self):
return self.action.split('.')[0]
def get_function_name(self):
parts = self.action.split('.')
return '.'.join(parts[1:])
```
#### File: modules/cloud_foundry/actions.py
```python
import random
from chaoscf.actions import terminate_app_instance, \
terminate_some_random_instance
from chaoscf.api import get_apps_for_org, get_app_instances
from chaoslib import Configuration, Secrets
from chaoslib.exceptions import FailedActivity
__all__ = ['get_app_states_by_org', 'terminate_random_app_instance',
'terminate_some_random_instances']
def get_app_states_by_org(org_name: str, configuration: Configuration,
secrets: Secrets):
apps = get_apps_for_org(org_name, configuration, secrets)['resources']
if not apps:
raise FailedActivity(
"no app was found under org: '{o}'.".format(o=org_name))
result = []
for app in apps:
result.append({
'name': app['entity']['name'],
'state': app['entity']['state']
})
return result
def terminate_random_app_instance(org_name: str, configuration: Configuration,
secrets: Secrets):
"""
Terminate a random instance under a randomly picked app for a specified
org name.
"""
apps = get_apps_for_org(org_name, configuration, secrets)
app_names = [app['entity']['name'] for app in apps['resources']]
app_name = random.choice(app_names)
terminate_some_random_instance(app_name, configuration, secrets, org_name)
def terminate_some_random_instances(app_name: str,
configuration: Configuration,
secrets: Secrets, count: int = 0,
percentage: int = 0, org_name: str = None,
space_name: str = None):
"""
Terminate random instances under a specified app.
The number of instances to terminate can be specified by count or
percentage. When both of count and percentage are specified, percentage
overrides the count. When the number of instances to terminate is bigger
than the one of existing instances, all instances will be terminated.
"""
instances = get_app_instances(
app_name, configuration, secrets, org_name=org_name,
space_name=space_name)
indices = [idx for idx in instances.keys()]
instance_count = len(indices)
if percentage > 0:
count = int(instance_count * percentage / 100)
indices_to_terminate = random.sample(indices, min(count, instance_count))
for idx in indices_to_terminate:
terminate_app_instance(
app_name, idx, configuration, secrets, org_name, space_name)
```
#### File: modules/common/__init__.py
```python
import base64
import json
import time
from json import JSONDecodeError
from typing import Dict, Optional
import requests
from django.conf import settings
from kallisticore import exceptions
from kallisticore.lib.credential import Credential, TokenCredential, \
UsernamePasswordCredential
__all__ = ["http_probe", "http_request", "wait"]
def wait(time_in_seconds: int):
if type(time_in_seconds) is not int:
raise exceptions.FailedAction(
"Expected integer for argument 'time_in_seconds' "
"(got %s)" % type(time_in_seconds).__name__)
time.sleep(time_in_seconds)
def http_request(url: str, method: str = "GET",
request_body: Optional[Dict] = None,
headers: Optional[Dict] = None,
authentication: Optional[Dict] = None) -> Dict:
headers = extract_authentication_headers(authentication, headers)
method = method.upper()
if method in ["GET", "DELETE"]:
response = requests.request(method, url=url, headers=headers)
elif method in ["POST", "PATCH", "PUT"]:
response = requests.request(method, url=url,
data=json.dumps(request_body),
headers=headers)
else:
raise exceptions.InvalidHttpRequestMethod(
"Invalid method: {}. Please specify a valid HTTP request "
"method".format(method))
duration = response.elapsed.total_seconds()
return _append_parsed_json_response(
{'status_code': response.status_code, 'response_text': response.text,
'response_headers': response.headers,
'response_time_in_seconds': duration})
def http_probe(url: str, method: str = "GET",
request_body: Optional[Dict] = None,
headers: Optional[Dict] = None,
authentication: Optional[Dict] = None) -> Dict:
headers = extract_authentication_headers(authentication, headers)
method = method.upper()
if method == "GET":
response = requests.get(url=url, headers=headers)
elif method == "POST":
response = requests.post(url=url, data=json.dumps(request_body),
headers=headers)
else:
raise exceptions.InvalidHttpProbeMethod(
"Invalid method: {}. "
"HTTP Probe allows only GET and POST methods".format(method))
duration = response.elapsed.total_seconds()
if response.status_code < 400:
return _append_parsed_json_response(
{'status_code': response.status_code,
'response_text': response.text,
'response_headers': response.headers,
'response_time_in_seconds': duration})
raise exceptions.FailedAction(
"Http probe failed after {} seconds for url {} with status code {}. "
"Details: {}".format(duration, url, response.status_code,
response.text))
def _append_parsed_json_response(result: dict) -> Dict:
try:
result['response'] = json.loads(result['response_text'])
return result
except (ValueError, KeyError, JSONDecodeError):
return result
def _get_oauth_token_for_http_request_auth_header(config: Dict) -> str:
response_token_key = 'access_token'
if 'token_key' in config:
response_token_key = config['token_key']
cred_class_map = getattr(settings, 'KALLISTI_CREDENTIAL_CLASS_MAP', {})
credential = Credential.build(cred_class_map, config['credentials'])
credential.fetch()
if isinstance(credential, TokenCredential):
return _format_oauth_token(credential.token)
if isinstance(credential, UsernamePasswordCredential):
request_body = {
'grant_type': 'password',
'username': credential.username,
'password': <PASSWORD>.password
}
if 'resource' in config:
request_body['resource'] = config['resource']
client_secret = config['client']['secret'] \
if 'secret' in config['client'] else ''
client_base64 = base64.b64encode('{}:{}'.format(
config['client']['id'], client_secret).encode()).decode('utf-8')
headers = {'Authorization': 'Basic {}'.format(client_base64)}
response = requests.post(config['url'], request_body, headers=headers)
if response.status_code >= 400:
raise exceptions.FailedAction(
"Authentication for http request failed with status code {}. "
"Details: {}".format(response.status_code, response.text))
response_body = response.json()
return _format_oauth_token(response_body[response_token_key])
raise exceptions.InvalidCredentialType(credential.__class__.__name__)
def _format_oauth_token(token: str) -> str:
auth_token_prefix = 'Bearer'
return '{} {}'.format(auth_token_prefix, token)
def extract_authentication_headers(authentication, headers):
if authentication:
if authentication['type'] == 'oauth2_token' and headers:
headers['Authorization'] = \
_get_oauth_token_for_http_request_auth_header(authentication)
elif authentication['type'] == 'oauth2_token' and not headers:
headers = {
'Authorization': _get_oauth_token_for_http_request_auth_header(
authentication)}
return headers
```
#### File: modules/examples/sample_module1.py
```python
__all__ = ["increment", "subtract", "multiply"]
def increment(a):
a += 1
return a
def subtract(a, b):
return a - b
def multiply(a, b):
return a * b
```
#### File: examples/sample_module2/increment_action.py
```python
__all__ = ["increment", "Add"]
def increment(a):
a += 1
return a
class Add:
def __init__(self, a, b):
self.a = a
self.b = b
def execute(self):
return self.a + self.b
```
#### File: modules/prometheus/__init__.py
```python
from typing import Dict, Callable
from kallisticore.lib.action import Action
from kallisticore.lib.credential import Credential
from kallisticore.lib.expectation import Expectation
class PrometheusAction(Action):
def __init__(self, module_func: Callable, arguments: Dict,
expectations: [Expectation] = None,
name: str = None, credential: Credential = None):
super(PrometheusAction, self).__init__(
module_func=module_func, arguments=arguments,
expectations=expectations, name=name, credential=credential)
self.arguments['configuration'] = {
'prometheus_base_url': arguments.pop('base_url')}
__action_class__ = PrometheusAction
__actions_modules__ = ['chaosprometheus.probes']
```
#### File: kallisti-core/kallisticore/serializers.py
```python
from collections import OrderedDict
from typing import List, Dict
from kallisticore.models import Trial
from kallisticore.models.experiment import Experiment
from kallisticore.models.notification import Notification
from kallisticore.models.step import Step
from kallisticore.models.trial_schedule import TrialSchedule, \
validate_recurrence_pattern
from rest_framework import serializers
class ListOfDictsField(serializers.ListField):
child = serializers.DictField()
def validate_step(value: List[Dict]):
"""
:param value: List of step dict
:return: None
:raise ValidationError when invalid step structure is passed
"""
Step.convert_to_steps(value)
def _get_kallisti_current_user_id(validated_data, key):
user = validated_data.pop(key, None)
if user is not None:
return getattr(user, 'user_id', None)
return None
class ExperimentSerializer(serializers.ModelSerializer):
description = serializers.CharField(required=False, allow_blank=True)
parameters = serializers.DictField(required=False)
metadata = serializers.DictField(required=False)
pre_steps = ListOfDictsField(required=False, validators=[validate_step])
steps = ListOfDictsField(validators=[validate_step])
post_steps = ListOfDictsField(required=False, validators=[validate_step])
creator = serializers.HiddenField(default=serializers.CurrentUserDefault())
created_by = serializers.CharField(read_only=True)
class Meta:
model = Experiment
fields = ('id', 'name', 'description', 'metadata', 'parameters',
'pre_steps', 'steps', 'post_steps', 'created_by', 'creator')
def create(self, validated_data):
validated_data['created_by'] = _get_kallisti_current_user_id(
validated_data, 'creator')
return super(ExperimentSerializer, self).create(validated_data)
class TrialSerializer(serializers.ModelSerializer):
trial_record = serializers.SerializerMethodField()
parameters = serializers.DictField(default={})
metadata = serializers.DictField(default={}, required=False)
ticket = serializers.DictField(default={})
status = serializers.CharField(read_only=True)
executed_at = serializers.DateTimeField(read_only=True)
completed_at = serializers.DateTimeField(read_only=True)
initiator = serializers.HiddenField(
default=serializers.CurrentUserDefault())
initiated_by = serializers.CharField(read_only=True)
def get_trial_record(self, instance: Trial) -> OrderedDict:
records = OrderedDict()
pre_steps = instance.records.get('pre_steps', None)
if pre_steps:
records['pre_steps'] = pre_steps
steps = instance.records.get('steps', None)
if steps:
records['steps'] = steps
post_steps = instance.records.get('post_steps', None)
if post_steps:
records['post_steps'] = post_steps
result = instance.records.get('result', None)
if result:
records['result'] = result
return records
def create(self, validated_data):
validated_data['initiated_by'] = _get_kallisti_current_user_id(
validated_data, 'initiator')
return super(TrialSerializer, self).create(validated_data)
class Meta:
model = Trial
fields = ('id', 'experiment', 'metadata', 'parameters', 'ticket',
'trial_record', 'status', 'executed_at', 'completed_at',
'initiated_by', 'initiator', 'initiated_from')
class TrialForReportSerializer(TrialSerializer):
trial_record = serializers.SerializerMethodField()
def get_trial_record(self, instance: Trial) -> dict:
if not instance or not instance.records:
return {}
else:
return instance.records
class Meta:
model = Trial
exclude = ('experiment',)
class ReportSerializer(serializers.ModelSerializer):
description = serializers.CharField(required=False)
parameters = serializers.DictField(required=False)
metadata = serializers.DictField(default={}, required=False)
pre_steps = ListOfDictsField(validators=[validate_step])
steps = ListOfDictsField(validators=[validate_step])
post_steps = ListOfDictsField(required=False, validators=[validate_step])
trials = serializers.SerializerMethodField()
def get_trials(self, experiment):
if self.context.get('trial_id'):
trial = experiment.trials.get(id=self.context.get('trial_id'))
return TrialForReportSerializer(many=True, instance=[trial]).data
return TrialForReportSerializer(many=True,
instance=experiment.trials).data
class Meta:
model = Experiment
fields = ('id', 'name', 'description', 'metadata', 'parameters',
'pre_steps', 'steps', 'post_steps', 'trials')
class TrialStatusSerializer(serializers.ModelSerializer):
class Meta:
model = Trial
fields = ('id', 'status', 'executed_at')
class TrialScheduleSerializer(serializers.ModelSerializer):
parameters = serializers.DictField(default={})
metadata = serializers.DictField(default={}, required=False)
ticket = serializers.DictField(default={})
recurrence_count = serializers.IntegerField(default=None, allow_null=True)
recurrence_left = serializers.IntegerField(read_only=True)
recurrence_pattern = serializers.CharField(required=True, validators=[
validate_recurrence_pattern])
creator = serializers.HiddenField(default=serializers.CurrentUserDefault())
created_by = serializers.CharField(read_only=True)
created_at = serializers.DateTimeField(read_only=True)
trials = serializers.SerializerMethodField()
def create(self, validated_data):
validated_data['created_by'] = _get_kallisti_current_user_id(
validated_data, 'creator')
assert Experiment.objects.get(
id=self.context.get('experiment_id')).deleted_at is None
validated_data['experiment_id'] = self.context.get('experiment_id')
return super(TrialScheduleSerializer, self).create(validated_data)
def get_trials(self, trial_schedule: TrialSchedule):
sorted_trials = trial_schedule.trials.order_by('executed_at')
return TrialStatusSerializer(sorted_trials, read_only=True,
many=True).data
class Meta:
model = TrialSchedule
fields = ('id', 'experiment_id', 'parameters', 'metadata', 'ticket',
'recurrence_pattern', 'recurrence_count', 'recurrence_left',
'creator', 'created_by', 'created_at', 'trials')
class NotificationSerializer(serializers.ModelSerializer):
emails = serializers.ListField(required=True)
class Meta:
model = Notification
fields = ('emails',)
```
#### File: kallisticore/views/experiment.py
```python
from django.conf import settings
from django.db.models.query import QuerySet
from kallisticore.models.experiment import Experiment
from kallisticore.serializers import ExperimentSerializer
from rest_framework import viewsets
from rest_framework.decorators import authentication_classes
@authentication_classes((settings.KALLISTI_API_AUTH_CLASS,))
class ExperimentViewSet(viewsets.ModelViewSet):
queryset = Experiment.objects.all()
serializer_class = ExperimentSerializer
permission_classes = (settings.KALLISTI_API_PERMISSION_CLASS,)
def get_queryset(self):
assert self.queryset is not None, (
"'%s' should either include a `queryset` attribute, or "
"override the `get_queryset()` method."
% self.__class__.__name__
)
if 'pk' in self.kwargs:
# Get all including deleted experiments if queried by primary key
queryset = Experiment.objects.get_queryset_all(**self.kwargs)
else:
queryset = self.queryset.filter(**self.kwargs)
if isinstance(queryset, QuerySet):
# Ensure queryset is re-evaluated on each request
queryset = queryset.all()
return queryset
```
#### File: lib/observe/test_subject.py
```python
import unittest
from unittest.mock import Mock
from kallisticore.lib.observe.observer import Observer
from kallisticore.lib.observe.subject import Subject
class TestSubject(unittest.TestCase):
class ConcreteSubject(Subject):
pass
def setUp(self) -> None:
self.mock_logger = Mock()
self.sub = self.ConcreteSubject()
self.sub.logger = self.mock_logger
def test_initialize(self):
self.assertEqual([], self.sub._observers)
def test_attach(self):
observer = Mock(spec=Observer)
self.sub.attach(observer)
self.assertEqual([observer], self.sub._observers)
def test_detach(self):
observer = Mock(spec=Observer)
self.sub._observers = [observer]
self.sub.detach(observer)
self.assertEqual([], self.sub._observers)
def test_notify(self):
observer = Mock(spec=Observer)
self.sub._observers = [observer]
self.sub.notify()
observer.update.assert_called_once_with()
def test_notify_should_pass_the_kwargs_to_observer(self):
observer = Mock(spec=Observer)
self.sub._observers = [observer]
self.sub.notify(key='value')
observer.update.assert_called_once_with(key='value')
def test_notify_should_call_all_observers_despite_failures(self):
observer1 = Mock(spec=Observer)
observer1.update.side_effect = Exception('something went wrong')
observer2 = Mock(spec=Observer)
self.sub._observers = [observer1, observer2]
self.sub.notify()
observer1.update.assert_called_once_with()
observer2.update.assert_called_once_with()
self.mock_logger.error.assert_called_once_with("something went wrong")
```
#### File: kallisticore/lib/test_action.py
```python
import os
from unittest.mock import patch
import kallisticore.lib.action
import kallisticore.modules.common
import kallisticore.modules.examples.sample_module1 as sample_module
import kallisticore.modules.examples.sample_module2.increment_action as \
sample_module2_increment
import kallisticore.modules.examples.sample_module2.subtract_action as \
sample_module2_subtract
from django.conf import settings
from django.test import TestCase
from kallisticore.exceptions import CouldNotFindFunction, UnknownModuleName
from kallisticore.lib.action import FunctionLoader, Action
from kallisticore.lib.action import make_action
from kallisticore.lib.credential import EnvironmentUserNamePasswordCredential
from kallisticore.lib.expectation import OperatorExpectation
from kallisticore.models.step import Step
from kallisticore.modules.cloud_foundry.cloud_foundry_action import \
CloudFoundryAction
class TestFunctionLoader(TestCase):
MODULE_MAP = {'eg': 'kallisticore.modules.examples.sample_module1',
'eg2': 'kallisticore.modules.examples.sample_module2'}
def test_single_module(self):
# Action functions are defined in single module
# with __all__ defined, and no __actions_modules__
# Check we can load the correct function.
action_function = FunctionLoader(self.MODULE_MAP, 'eg')\
.get_function('increment')
self.assertEqual(action_function, sample_module.increment)
def test_single_module_no_export(self):
# Action functions are defined in single module
# and __all__ and __actions_modules__ are undefined.
# Check raised exception.
module_path = 'kallisticore.modules.examples.sample_module1.__all__'
with patch(module_path, []), \
self.assertRaises(CouldNotFindFunction) as cm:
FunctionLoader(self.MODULE_MAP, 'eg').get_function('increment')
self.assertEqual(cm.exception.args[0], 'eg.increment')
def test_multiple_modules_with_actions_and_action_modules(self):
# Action functions are defined in multiple modules
# with __all__ and __actions_modules__ defined.
func = FunctionLoader(self.MODULE_MAP, 'eg2').get_function('increment')
self.assertEqual(func, sample_module2_increment.increment)
func = FunctionLoader(self.MODULE_MAP, 'eg2').get_function('Add')
self.assertEqual(func, sample_module2_increment.Add)
func = FunctionLoader(self.MODULE_MAP, 'eg2').get_function('subtract')
self.assertEqual(func, sample_module2_subtract.subtract)
def test_multiple_modules_with_only_action_modules(self):
# Action functions are defined in multiple modules
# with __all__ undefined in module file,
# and __actions_modules__ defined.
module_path = 'kallisticore.modules.examples.sample_module2.' \
'increment_action.__all__'
with patch(module_path, []):
func_loader = FunctionLoader(self.MODULE_MAP, 'eg2')
func = func_loader.get_function('subtract')
self.assertEqual(func, sample_module2_subtract.subtract)
with self.assertRaises(CouldNotFindFunction) as cm:
func_loader.get_function('increment')
self.assertEqual(cm.exception.args[0], 'eg2.increment')
def test_no_module_error(self):
# Kallisti module name defined in the experiment does not exist in
# ACTION_NAMESPACE.
with self.assertRaises(UnknownModuleName) as cm:
func_loader = FunctionLoader(self.MODULE_MAP,
'unknown_kallisti_module')
func_loader.get_function('function_name')
self.assertEqual(cm.exception.args[0], 'unknown_kallisti_module')
def test_no_function_error(self):
# One of the function name in the experiment does not exist.
with self.assertRaises(CouldNotFindFunction) as cm:
FunctionLoader(self.MODULE_MAP, 'eg').get_function('function_name')
self.assertEqual(cm.exception.args[0], 'eg.function_name')
class TestAction(TestCase):
MODULE_MAP = {'eg': 'kallisticore.modules.examples.sample_module1',
'eg2': 'kallisticore.modules.examples.sample_module2'}
CRED_CLS_MAP = {'ENV_VAR_USERNAME_PASSWORD':
'kallisticore.lib.credential.'
'EnvironmentUserNamePasswordCredential'}
def test_action_build(self):
# Check for action implemented as function.
action_arguments = {'a': 1}
step = Step.build({'step': 'increment',
'do': 'eg.increment',
'where': action_arguments})
action = Action.build(step, self.MODULE_MAP, {})
self.assertIsInstance(action, Action)
self.assertEqual('increment', action.name)
self.assertEqual(
getattr(kallisticore.modules.examples.sample_module1, 'increment'),
action.func)
self.assertEqual(action_arguments, action.arguments)
self.assertEqual(None, action.credential)
self.assertEqual([], action.expectations)
def test_action_build_with_expectations(self):
# Check for action implemented as function.
action_arguments = {'a': 1}
step = Step.build({'step': 'increment',
'do': 'eg.increment',
'where': action_arguments,
'expect': [{'operator': 'eq', 'value': 2}]})
action = Action.build(step, self.MODULE_MAP, {})
self.assertIsInstance(action, Action)
self.assertEqual('increment', action.name)
self.assertEqual(
getattr(kallisticore.modules.examples.sample_module1, 'increment'),
action.func)
self.assertEqual(action_arguments, action.arguments)
self.assertEqual(None, action.credential)
self.assertEqual(1, len(action.expectations))
self.assertIsInstance(action.expectations[0], OperatorExpectation)
def test_action_build_with_credentials_initialized(self):
username_key = 'TEST_USER'
password_key = 'TEST_PASSWORD'
os.environ[username_key] = 'my-username'
os.environ[password_key] = 'my-secret'
action_arguments = {
'a': 1,
'credentials': {'type': 'ENV_VAR_USERNAME_PASSWORD',
'username_key': username_key,
'password_key': password_key}}
step = Step.build({'step': 'increment',
'do': 'eg.increment',
'where': action_arguments})
action = Action.build(step, self.MODULE_MAP, self.CRED_CLS_MAP)
self.assertIsInstance(action.credential,
EnvironmentUserNamePasswordCredential)
self.assertEqual(username_key, action.credential.username_key)
self.assertEqual(password_key, action.credential.password_key)
def test_action_initialize_sets_up_the_attributes(self):
action_arguments = {'a': 1}
expect = {'operator': 'eq', 'value': 2}
step = Step.build({'step': 'increment',
'do': 'eg.increment',
'where': action_arguments,
'expect': [expect]})
action = Action.build(step, self.MODULE_MAP, {})
self.assertIsInstance(action, Action)
self.assertEqual('increment', action.name)
self.assertEqual(
getattr(kallisticore.modules.examples.sample_module1, 'increment'),
action.func)
self.assertEqual(action_arguments, action.arguments)
self.assertEqual(None, action.credential)
self.assertIsInstance(action.expectations[0], OperatorExpectation)
def test_execute_function_action(self):
# Check for action implemented as function.
step = Step.build({'step': 'increment',
'do': 'eg.increment',
'where': {'a': 1}})
action = Action.build(step, self.MODULE_MAP, {})
self.assertEqual(2, action.execute())
def test_execute_class_action(self):
step = Step.build({'step': 'add',
'do': 'eg2.Add',
'where': {'a': 1, 'b': 2}})
action = Action.build(step, self.MODULE_MAP, {})
self.assertEqual(3, action.execute())
class TestMakeAction(TestCase):
MODULE_MAP = {'eg': 'kallisticore.modules.examples.sample_module1',
'eg2': 'kallisticore.modules.examples.sample_module2'}
def test_unknown_module_name(self):
step = Step.build({'step': 'increment',
'do': 'unknown_module.increment',
'where': {'a': None}})
with self.assertRaises(UnknownModuleName) as cm:
make_action(step, self.MODULE_MAP, {})
self.assertEqual(cm.exception.args[0], 'unknown_module')
def test_with_factory_function_for_cloud_foundry(self):
module_map = {'cf': 'kallisticore.modules.cloud_foundry'}
step = Step.build({'step': 'get_app_stats',
'do': 'cf.get_app_stats',
'where': {'cf_api_url': 'https://cf-api.test',
'a': 1, 'b': 2}})
action = make_action(step, module_map, {})
self.assertIsInstance(action, CloudFoundryAction)
def test_default_action(self):
step = Step.build({'step': 'increment',
'do': 'eg.increment',
'where': {'a': 1}})
action = make_action(step, self.MODULE_MAP, {})
self.assertIsInstance(action, Action)
class TestActionNamespace(TestCase):
def test_action_namespace(self):
action_namespace = {'cf': 'kallisticore.modules.cloud_foundry',
'cm': 'kallisticore.modules.common',
'k8s': 'kallisticore.modules.kubernetes',
'istio': 'kallisticore.modules.kubernetes',
'prom': 'kallisticore.modules.prometheus',
'aws': 'kallisticore.modules.aws'}
default_module_map = getattr(settings, 'KALLISTI_MODULE_MAP', {})
self.assertEqual(action_namespace, default_module_map)
```
#### File: kallisticore/lib/test_credential.py
```python
import os
import unittest
from unittest.mock import patch
from kallisticore.exceptions import InvalidCredentialType
from kallisticore.lib.credential import Credential, \
EnvironmentUserNamePasswordCredential
class TestCredential(unittest.TestCase):
CRED_CLS_MAP = {
'ENV_VAR_USERNAME_PASSWORD': 'kallisticore.lib.credential.'
'EnvironmentUserNamePasswordCredential'}
def test_build_throws_an_error_when_dict_type_not_present(self):
credential_dict = {}
with self.assertRaises(InvalidCredentialType) as error:
Credential.build({}, credential_dict)
self.assertEqual("Invalid credential type: None",
error.exception.message)
def test_build_throws_an_error_when_dict_type_is_invalid(self):
credential_dict = {'type': 'INVALID_CRED'}
with self.assertRaises(InvalidCredentialType) as error:
Credential.build({}, credential_dict)
self.assertEqual("Invalid credential type: INVALID_CRED",
error.exception.message)
def test_build_env_var_username_password(self):
username_key = "USERNAME"
password_key = "PASSWORD"
credential_dict = {"type": "ENV_VAR_USERNAME_PASSWORD",
"username_key": username_key,
"password_key": password_key}
cred = Credential.build(self.CRED_CLS_MAP, credential_dict)
self.assertIsInstance(cred, Credential)
self.assertIsInstance(cred, EnvironmentUserNamePasswordCredential)
self.assertEqual(username_key, cred.username_key)
self.assertEqual(password_key, cred.password_key)
@patch.multiple(Credential, __abstractmethods__=set())
def test_fetch(self):
with self.assertRaises(NotImplementedError):
Credential().fetch()
class TestEnvironmentUserNamePasswordCredential(unittest.TestCase):
user_name = 'A111111'
user_pword = '<PASSWORD>'
def setUp(self):
self.username_key = "ENV_VAR_USERNAME"
self.password_key = "ENV_VAR_PASSWORD"
self.username = TestEnvironmentUserNamePasswordCredential.user_name
self.password = <PASSWORD>UserNamePasswordCredential.user_pword
os.environ[self.username_key] = self.username
os.environ[self.password_key] = self.password
self.credentials = EnvironmentUserNamePasswordCredential(
username_key=self.username_key,
password_key=self.password_key)
def tearDown(self):
os.environ.pop(self.username_key)
os.environ.pop(self.password_key)
def test_initialize(self):
self.assertEqual(self.username_key, self.credentials.username_key)
self.assertEqual(self.password_key, self.credentials.password_key)
def test_fetch(self):
self.credentials.fetch()
self.assertEqual(self.username, self.credentials.username)
self.assertEqual(self.password, self.credentials.password)
```
#### File: kallisticore/lib/test_trial_log_recorder.py
```python
import datetime
import json
import time
from collections import OrderedDict
from unittest import mock
from django.test import TestCase
from kallisticore.lib.trial_log_recorder import TrialLogRecord, \
TrialStepLogRecord, TrialLogRecorder
class TestTrialLogRecorder(TestCase):
def setUp(self):
self.trial_id = 'test-trial-id'
self.trial_log_recorder = TrialLogRecorder(self.trial_id)
def test_trial_log_recorder_init(self):
self.assertEqual(self.trial_log_recorder.trial_id, self.trial_id)
self.assertEqual(self.trial_log_recorder.trial_record, {})
def test_trial_log_append(self):
timestamp = time.time()
timestamp_string = datetime.datetime.fromtimestamp(timestamp).strftime(
'%Y-%m-%dT%H:%M:%SZ')
with mock.patch('time.time') as mock_time:
mock_time.return_value = timestamp
level = 'INFO'
message = 'Test message'
trial_log = TrialLogRecord('steps')
trial_log.append(level, message)
self.assertEqual(trial_log.logs, [
'[{} - {}] {}'.format(timestamp_string, level, message)
])
def test_trial_log_recorder_append_multiple_logs(self):
timestamp = time.time()
timestamp_string = datetime.datetime.fromtimestamp(timestamp).strftime(
'%Y-%m-%dT%H:%M:%SZ')
with mock.patch('time.time') as mock_time:
mock_time.return_value = timestamp
level = 'INFO'
message = 'Test message'
trial_stage = 'steps'
trial_log = TrialLogRecord(trial_stage)
trial_log.append(level, message)
trial_log.append(level, message)
self.assertEqual(trial_log.trial_stage, trial_stage)
self.assertEqual(trial_log.logs, [
'[{} - {}] {}'.format(timestamp_string, level, message),
'[{} - {}] {}'.format(timestamp_string, level, message)
])
def test_trial_log_recorder_make(self):
timestamp = time.time()
timestamp_string = datetime.datetime.fromtimestamp(timestamp).strftime(
'%Y-%m-%dT%H:%M:%SZ')
with mock.patch('time.time') as mock_time:
mock_time.return_value = timestamp
level = 'INFO'
message = 'Test message'
trial_stage = 'steps'
trial_log = TrialLogRecord(trial_stage)
trial_log.append(level, message)
trial_log.append(level, message)
self.assertEqual(trial_log.trial_stage, trial_stage)
self.assertEqual(trial_log.make(), OrderedDict(
[
('logs',
['[{} - INFO] Test message'.format(timestamp_string),
'[{} - INFO] Test message'.format(timestamp_string)])
]
))
def test_trial_step_log_recorder_make(self):
timestamp = time.time()
timestamp_string = datetime.datetime.fromtimestamp(timestamp).strftime(
'%Y-%m-%dT%H:%M:%SZ')
with mock.patch('time.time') as mock_time:
mock_time.return_value = timestamp
level = 'INFO'
message = 'Test message'
trial_stage = 'steps'
trial_log = TrialStepLogRecord(trial_stage, 'test name',
{'key': 'value'})
trial_log.append(level, message)
trial_log.append(level, message)
self.assertEqual(trial_log.trial_stage, trial_stage)
self.assertEqual(trial_log.make(), OrderedDict(
[
('step_name', 'test name'),
('step_parameters', {'key': 'value'}),
('logs',
['[{} - INFO] Test message'.format(timestamp_string),
'[{} - INFO] Test message'.format(timestamp_string)])
]
))
def test_trial_log_recorder_commit_logs(self):
timestamp = time.time()
timestamp_string = datetime.datetime.fromtimestamp(timestamp).strftime(
'%Y-%m-%dT%H:%M:%SZ')
with mock.patch('time.time') as mock_time, \
mock.patch('kallisticore.models.trial.Trial.objects.filter')\
as mock_trial_object_filter:
mock_filter = mock.Mock()
mock_filter.update = mock.Mock()
mock_trial_object_filter.return_value = mock_filter
mock_time.return_value = timestamp
level = 'INFO'
message = 'Test message'
trial_stage = 'steps'
trial_log = TrialLogRecord(trial_stage)
trial_log.append(level, message)
trial_log.append(level, message)
self.trial_log_recorder.commit(trial_log)
# trial_record shouldnt be reset after commit
self.assertEqual({trial_stage: [OrderedDict(
[('logs', ['[{} - INFO] Test message'.format(timestamp_string),
'[{} - INFO] Test message'.format(
timestamp_string)])]
)]}, self.trial_log_recorder.trial_record)
self.assertEqual(trial_log.trial_stage, trial_stage)
mock_trial_object_filter.assert_called_once_with(pk=self.trial_id)
mock_filter.update.assert_called_once_with(
records=json.dumps(self.trial_log_recorder.trial_record))
def test_trial_log_recorder_commit_trial_step_logs(self):
timestamp = time.time()
timestamp_string = datetime.datetime.fromtimestamp(timestamp).strftime(
'%Y-%m-%dT%H:%M:%SZ')
with mock.patch('time.time') as mock_time, \
mock.patch('kallisticore.models.trial.Trial.objects.filter')\
as mock_trial_object_filter:
mock_filter = mock.Mock()
mock_filter.update = mock.Mock()
mock_trial_object_filter.return_value = mock_filter
mock_time.return_value = timestamp
level = 'INFO'
message = 'Test message'
trial_stage = 'steps'
trial_log = TrialStepLogRecord(trial_stage, 'test-step',
{'key': 'value'})
trial_log.append(level, message)
self.trial_log_recorder.commit(trial_log)
# trial_record shouldnt be reset after commit
self.assertEqual({trial_stage: [OrderedDict(
[
('step_name', 'test-step'),
('step_parameters', {'key': 'value'}),
('logs',
['[{} - INFO] Test message'.format(timestamp_string)])
]
)]}, self.trial_log_recorder.trial_record)
mock_trial_object_filter.assert_called_once_with(pk=self.trial_id)
mock_filter.update.assert_called_once_with(
records=json.dumps(self.trial_log_recorder.trial_record))
def test_trial_log_recorder_commit_logs_exception(self):
with mock.patch('kallisticore.models.trial.Trial.objects.filter')\
as mock_trial_object_filter, mock.patch(
'kallisticore.lib.trial_log_recorder.TrialLogRecorder.'
'logger.warning') as mock_logger:
expected_exception = Exception('test error')
mock_trial_object_filter.side_effect = Exception('test error')
level = 'INFO'
message = 'Test message'
trial_stage = 'steps'
trial_log = TrialLogRecord(trial_stage)
trial_log.append(level, message)
trial_log.append(level, message)
self.trial_log_recorder.commit(trial_log)
self.assertEqual(trial_log.trial_stage, trial_stage)
mock_logger.assert_called_once_with(
"Failed to update 'records' column for trial {}, {}"
.format(self.trial_id, expected_exception))
```
#### File: kallisticore/models/test_experiment.py
```python
from django.test import TestCase
from kallisticore.models.experiment import Experiment
from kallisticore.models.step import Step
class TestExperiment(TestCase):
def setUp(self):
self.metadata = {"test_id": "123456"}
self.parameters = {"org": "my-org",
"app_name": "hello-world",
"url": "https://app.test"}
self.pre_steps = [{"step": "name",
"do": "cm.http_probe",
"where": {"url": "{{url}}"}}]
self.steps = [{"step": "name",
"do": "cf.stop_app",
"where": {"app_name": "{{app_name}}",
"org_name": "{{org}}"}}]
self.post_steps = [{"step": "name",
"do": "cf.start_app",
"where": {"app_name": "{{app_name}}",
"org_name": "{{org}}"}}]
self.experiment = Experiment.create(
name='one-action',
description='one action description',
metadata=self.metadata,
pre_steps=Step.convert_to_steps(self.pre_steps),
steps=Step.convert_to_steps(self.steps),
post_steps=Step.convert_to_steps(self.post_steps),
parameters=self.parameters
)
def test_experiment_fetch_all(self):
experiments = Experiment.objects.all()
self.assertEqual(len(experiments), 1)
experiment = experiments[0]
self.assertIsNotNone(experiment.id)
self.assertEqual(experiment.description, "one action description")
self.assertEqual(Step.convert_to_steps(self.steps), experiment.steps)
self.assertEqual(Step.convert_to_steps(self.pre_steps),
experiment.pre_steps)
self.assertEqual(Step.convert_to_steps(self.post_steps),
experiment.post_steps)
self.assertEqual(experiment.parameters, self.parameters)
self.assertEqual(experiment.metadata, self.metadata)
self.assertEqual(experiment.created_by, "unknown")
def test_experiment_soft_delete_fetch_all(self):
go_web_experiments = Experiment.objects.all()
self.assertEqual(len(go_web_experiments), 1)
Experiment.objects.get(name='one-action').delete()
go_web_experiments = Experiment.objects.all()
self.assertEqual(len(go_web_experiments), 0)
def test_experiment_fetch(self):
experiment = Experiment.objects.get(name='one-action')
self.assertIsNotNone(experiment.id)
self.assertEqual(experiment.description, "one action description")
self.assertEqual(Step.convert_to_steps(self.pre_steps),
experiment.pre_steps)
self.assertEqual(Step.convert_to_steps(self.steps), experiment.steps)
self.assertEqual(Step.convert_to_steps(self.post_steps),
experiment.post_steps)
self.assertEqual(experiment.parameters, self.parameters)
self.assertEqual(experiment.metadata, self.metadata)
self.assertEqual(experiment.created_by, "unknown")
def test_experiment_soft_delete_fetch(self):
experiment = Experiment.objects.get(name='one-action')
self.assertIsNotNone(experiment.id)
experiment.delete()
# objects.get should raise an DoesNotExist error
with self.assertRaises(Experiment.DoesNotExist):
Experiment.objects.get(id=experiment.id)
# objects.get_queryset_all().get() should return the deleted experiment
experiment = Experiment.objects.get_queryset_all().get(
id=experiment.id)
self.assertIsNotNone(experiment.id)
self.assertEqual(experiment.description, "one action description")
self.assertEqual(Step.convert_to_steps(self.pre_steps),
experiment.pre_steps)
self.assertEqual(Step.convert_to_steps(self.steps), experiment.steps)
self.assertEqual(Step.convert_to_steps(self.post_steps),
experiment.post_steps)
self.assertEqual(experiment.parameters, self.parameters)
self.assertEqual(experiment.metadata, self.metadata)
self.assertEqual(experiment.created_by, "unknown")
```
#### File: kallisticore/models/test_notification.py
```python
from django.test import TestCase
from kallisticore.models.notification import Notification
def update_notification_list():
notification = Notification.objects.get(pk=1)
notification.emails = ['<EMAIL>']
notification.save()
class TestNotification(TestCase):
def test_notification_fetch_for_empty_list(self):
notifications = Notification.objects.all()
notification = notifications[0]
self.assertEqual(notification.emails, [])
def test_notification_fetch_for_list(self):
update_notification_list()
notifications = Notification.objects.all()
notification = notifications[0]
self.assertEqual(notification.emails, ['<EMAIL>'])
def test_notification_delete(self):
update_notification_list()
self.assertNotEqual(len(Notification.objects.all()), 0)
Notification.delete()
self.assertEqual(len(Notification.objects.all()), 0)
```
#### File: modules/common/test_common.py
```python
import base64
import json
from unittest import TestCase, mock
from unittest.mock import Mock, mock_open
import requests_mock
from kallisticore.exceptions import FailedAction, InvalidHttpProbeMethod, \
InvalidCredentialType, InvalidHttpRequestMethod
from kallisticore.lib.credential import \
EnvironmentUserNamePasswordCredential, \
KubernetesServiceAccountTokenCredential
from kallisticore.modules import common
from kallisticore.modules.common import wait, http_probe, http_request
class TestCommonModule(TestCase):
def test_exported_functions(self):
self.assertListEqual(
['http_probe', 'http_request', 'wait'],
common.__all__)
class TestHttpProbe(TestCase):
test_uname = 'test-username'
test_pw = '<PASSWORD>'
def setUp(self):
self._url = "http://go.test/-/status/health"
self._headers = {"Content-type": "text/html"}
def test_exception_for_invalid_method(self):
method = "PUT"
with self.assertRaises(InvalidHttpProbeMethod) as error:
http_probe(url=self._url, method=method)
self.assertEqual(
"Invalid method: {}. HTTP Probe allows only GET and POST methods"
.format(method),
error.exception.message)
@requests_mock.mock()
def test_empty_response_without_request_headers(self, mock_request):
data = {'status': 'UP'}
response = json.dumps(data)
mock_request.get(url=self._url, text=response)
result = http_probe(url=self._url)
self.assertEqual(response, result['response_text'])
self.assertEqual(data, result['response'])
self.assertEqual(200, result['status_code'])
self.assertEqual({}, result['response_headers'])
@requests_mock.mock()
def test_non_json_response(self, mock_request):
mock_request.get(self._url, text='non-json response')
result = http_probe(url=self._url)
self.assertNotIn('response', result)
@requests_mock.mock()
def test_response_headers_with_request_headers(self, mock_request):
response = json.dumps({'status': 'UP'})
mock_request.get(url=self._url, text=response, headers=self._headers)
result = http_probe(url=self._url)
self.assertEqual(self._headers, result['response_headers'])
def test_exception_for_4xx_or_5xx_status_code_without_headers(self):
text = 'Not Found'
status_code = 404
mock_duration = 1
with self.assertRaises(FailedAction) as error:
with mock.patch('requests.get') as mock_get:
mock_get.return_value.status_code = status_code
mock_get.return_value.text = text
mock_get.return_value.elapsed.total_seconds.return_value = \
mock_duration
http_probe(url=self._url)
self.assertEqual(
"Http probe failed after {} seconds for url {} with status "
"code {}. Details: {}".format(mock_duration,
self._url, status_code, text),
error.exception.message)
@requests_mock.mock()
def test_response_with_headers(self, mock_request):
response = json.dumps({'status': 'UP'})
mock_request.get(url=self._url, text=response, headers=self._headers)
result = http_probe(url=self._url, headers=self._headers)
self.assertEqual(response, result['response_text'])
self.assertEqual(200, result['status_code'])
self.assertEqual(self._headers, result['response_headers'])
@requests_mock.mock()
def test_empty_response_headers_with_request_headers(self, mock_request):
response = json.dumps({'status': 'UP'})
mock_request.get(url=self._url, text=response)
result = http_probe(url=self._url, headers=self._headers)
self.assertEqual({}, result['response_headers'])
def test_exception_for_4xx_or_5xx_with_headers(self):
text = 'Not Found'
status_code = 404
mock_duration = 1
with self.assertRaises(FailedAction) as error:
with mock.patch('requests.get') as mock_get:
mock_get.return_value.status_code = status_code
mock_get.return_value.elapsed.total_seconds.return_value = \
mock_duration
mock_get.return_value.text = text
http_probe(url=self._url, headers=self._headers)
self.assertEqual("Http probe failed after {} seconds for url {} with "
"status code {}. Details: {}"
.format(mock_duration, self._url, status_code, text),
error.exception.message)
@requests_mock.mock()
def test_post_empty_response_headers(self, mock_request):
method = "POST"
response = json.dumps({'status': 'UP'})
mock_request.post(url=self._url, text=response)
result = http_probe(url=self._url, method=method)
self.assertEqual(response, result['response_text'])
self.assertEqual(200, result['status_code'])
self.assertEqual({}, result['response_headers'])
@requests_mock.mock()
def test_post_request_body_with_response_headers(self, mock_request):
method = "POST"
text = json.dumps({'key': 'value'})
mock_request.post(url=self._url, text=json.dumps(text),
headers=self._headers)
result = http_probe(url=self._url, request_body=text, method=method)
self.assertEqual(self._headers, result['response_headers'])
def test_post_exception_for_4xx_or_5xx_(self):
text = 'Not Found'
method = "POST"
status_code = 404
mock_duration = 1
with self.assertRaises(FailedAction) as error:
with mock.patch('requests.post') as mock_post:
mock_post.return_value.status_code = status_code
mock_post.return_value.elapsed.total_seconds.return_value = \
mock_duration
mock_post.return_value.text = text
http_probe(url=self._url, method=method)
self.assertEqual("Http probe failed after {} seconds for url {} "
"with status code {}. Details: {}"
.format(mock_duration, self._url, status_code, text),
error.exception.message)
@requests_mock.mock()
def test_post_with_headers(self, mock_request):
method = "POST"
response = json.dumps({'status': 'UP'})
mock_request.post(url=self._url, text=response, headers=self._headers)
result = http_probe(url=self._url, method=method,
headers=self._headers)
self.assertEqual(response, result['response_text'])
self.assertEqual(200, result['status_code'])
self.assertEqual(self._headers, result['response_headers'])
@requests_mock.mock()
def test_post_empty_response_header(self, mock_request):
method = "POST"
response = json.dumps({'status': 'UP'})
mock_request.post(url=self._url, text=response)
result = http_probe(url=self._url, method=method,
headers=self._headers)
self.assertEqual({}, result['response_headers'])
def test_post_exception_for_4xx_or_5xx_with_headers(self):
text = 'Not Found'
method = "POST"
status_code = 404
mock_duration = 1
with self.assertRaises(FailedAction) as error:
with mock.patch('requests.post') as mock_post:
mock_post.return_value.status_code = status_code
mock_post.return_value.elapsed.total_seconds.return_value = \
mock_duration
mock_post.return_value.text = text
http_probe(url=self._url, method=method, headers=self._headers)
self.assertEqual("Http probe failed after {} seconds for url {} "
"with status code {}. Details: {}"
.format(mock_duration, self._url, status_code, text),
error.exception.message)
@requests_mock.mock()
def test_k8s_auth_with_header(self, mock_request):
with mock.patch('kallisticore.modules.common.Credential') \
as mock_credential_module, \
mock.patch('builtins.open', mock_open(read_data='test-token')):
mock_k8s_creds = KubernetesServiceAccountTokenCredential()
mock_credential_module.build.return_value = mock_k8s_creds
auth_config = {
'type': 'oauth2_token',
'credentials': {}
}
expected_headers = {'Authorization': 'test-token', **self._headers}
response = json.dumps({'status': 'UP'})
mock_request.get(url=self._url, text=response,
headers=expected_headers)
result = http_probe(url=self._url, headers=self._headers,
authentication=auth_config)
self.assertEqual(response, result['response_text'])
self.assertEqual(200, result['status_code'])
@requests_mock.mock()
def test_env_pw_auth_without_header(self, mock_request):
with mock.patch('kallisticore.modules.common.Credential') \
as mock_credential_module:
mock_credential = Mock(spec=EnvironmentUserNamePasswordCredential)
mock_credential.username.return_value = TestHttpProbe.test_uname
mock_credential.password.return_value = TestHttpProbe.test_pw
mock_credential.fetch.side_effects = None
mock_credential_module.build.return_value = mock_credential
test_auth_url = 'https://test-auth.com'
auth_config = {
'type': 'oauth2_token',
'url': test_auth_url,
'credentials': {},
'client': {
'id': 'test-client-id',
'secret': 'test-client-secret'
}
}
expected_base64 = base64.b64encode(
(auth_config['client']['id'] + ':' +
auth_config['client']['secret']).encode()).decode('utf-8')
auth_expected_headers = {
'Authorization': 'Basic %s' % expected_base64}
auth_mock_response = json.dumps({'access_token': 'test-token'})
mock_request.post(url=test_auth_url, text=auth_mock_response,
headers=auth_expected_headers)
probe_expected_headers = {'Authorization': 'test-token'}
response = json.dumps({'status': 'UP'})
mock_request.get(url=self._url, text=response,
headers=probe_expected_headers)
result = http_probe(url=self._url, authentication=auth_config)
self.assertEqual(response, result['response_text'])
self.assertEqual(200, result['status_code'])
@requests_mock.mock()
def test_env_pw_auth_with_resource(self, mock_request):
with mock.patch('kallisticore.modules.common.Credential') \
as mock_credential_module:
mock_credential = Mock(spec=EnvironmentUserNamePasswordCredential)
mock_credential.username.return_value = TestHttpProbe.test_uname
mock_credential.password.return_value = TestHttpProbe.test_pw
mock_credential.fetch.side_effects = None
mock_credential_module.build.return_value = mock_credential
test_auth_url = 'https://test-auth.com'
auth_config = {
'type': 'oauth2_token',
'url': test_auth_url,
'credentials': {},
'client': {
'id': 'test-client-id',
'secret': 'test-client-secret'
},
'resource': 'test-resource-value',
'token_key': 'different_token_key'
}
expected_base64 = base64.b64encode(
(auth_config['client']['id'] + ':' +
auth_config['client']['secret']).encode()).decode('utf-8')
auth_expected_headers = {
'Authorization': 'Basic %s' % expected_base64}
auth_mock_response = json.dumps(
{'access_token': 'test-token',
'different_token_key': 'different-token'})
mock_auth_post = mock_request.post(url=test_auth_url,
text=auth_mock_response,
headers=auth_expected_headers)
probe_expected_headers = {'Authorization': 'different-token'}
response = json.dumps({'status': 'UP'})
mock_request.get(url=self._url, text=response,
headers=probe_expected_headers)
result = http_probe(url=self._url, authentication=auth_config)
self.assertEqual(response, result['response_text'])
self.assertEqual(200, result['status_code'])
self.assertTrue('resource=test-resource-value' in
mock_auth_post.last_request.body)
def test_env_pw_authentication_fail(self):
with mock.patch('kallisticore.modules.common.Credential') \
as mock_credential_module:
mock_credential = Mock(spec=EnvironmentUserNamePasswordCredential)
mock_credential.username.return_value = TestHttpProbe.test_uname
mock_credential.password.return_value = TestHttpProbe.test_pw
mock_credential.fetch.side_effects = None
mock_credential_module.build.return_value = mock_credential
test_auth_url = 'https://test-auth.com'
auth_config = {
'type': 'oauth2_token',
'url': test_auth_url,
'credentials': {},
'client': {
'id': 'test-client-id',
'secret': 'test-client-secret'
}
}
mock_response_status_code = 404
mock_response_text = 'test-error-message'
with self.assertRaises(FailedAction) as error:
with mock.patch('requests.post') as mock_post:
mock_post.return_value.status_code = \
mock_response_status_code
mock_post.return_value.text = mock_response_text
http_probe(url=self._url, authentication=auth_config)
self.assertEqual(
'Authentication for http request failed with status code {}. '
'Details: {}'.format(mock_response_status_code,
mock_response_text),
error.exception.message)
def test_authentication_unknown_credential(self):
with mock.patch('kallisticore.modules.common.Credential') \
as mock_credential_module:
mock_credential = Mock()
mock_credential_module.build.return_value = mock_credential
auth_config = {
'type': 'oauth2_token',
'credentials': {}
}
with self.assertRaises(InvalidCredentialType) as error:
http_probe(url=self._url, authentication=auth_config)
self.assertEqual('Invalid credential type: %s' %
mock_credential.__class__.__name__,
error.exception.message)
class TestHttpRequest(TestCase):
def setUp(self):
self._url = "http://test.com/-/status/health"
self._headers = {"Content-type": "text/html"}
def test_exception_when_invalid_method_is_provided(self):
method = "INVALID_METHOD"
with self.assertRaises(InvalidHttpRequestMethod) as error:
http_request(url=self._url, method=method)
self.assertEqual("Invalid method: {}. Please specify a valid HTTP "
"request method".format(method),
error.exception.message)
@requests_mock.mock()
def test_not_raise_exception_for_4xx_or_5xx_(self, mock_request):
text = 'Not Found'
status_code = 404
mock_request.get(url=self._url, text=text, status_code=status_code)
response = json.dumps({'status': 'UP'})
mock_request.get(url=self._url, text=response, headers=self._headers)
result = http_request(url=self._url, headers=self._headers)
self.assertEqual(response, result['response_text'])
self.assertEqual(200, result['status_code'])
self.assertEqual(self._headers, result['response_headers'])
@requests_mock.mock()
def test_non_json_response(self, mock_request):
mock_request.get(self._url, text='non-json response')
result = http_request(url=self._url)
self.assertNotIn('response', result)
@requests_mock.mock()
def test_get_response(self, mock_request):
data = {'status': 'UP'}
response = json.dumps(data)
mock_request.get(url=self._url, text=response, headers=self._headers)
result = http_request(url=self._url, headers=self._headers)
self.assertEqual(response, result['response_text'])
self.assertEqual(data, result['response'])
self.assertEqual(200, result['status_code'])
self.assertEqual(self._headers, result['response_headers'])
@requests_mock.mock()
def test_get_empty_response_headers(self, mock_request):
data = {'status': 'UP'}
response = json.dumps(data)
mock_request.get(url=self._url, text=response)
result = http_request(url=self._url)
self.assertEqual(response, result['response_text'])
self.assertEqual(data, result['response'])
self.assertEqual(200, result['status_code'])
self.assertEqual({}, result['response_headers'])
@requests_mock.mock()
def test_post_empty_response_headers(self, mock_request):
method = "POST"
response = json.dumps({'status': 'UP'})
mock_request.post(url=self._url, text=response)
result = http_request(url=self._url, method=method)
self.assertEqual(response, result['response_text'])
self.assertEqual(200, result['status_code'])
self.assertEqual({}, result['response_headers'])
@requests_mock.mock()
def test_post_request_body_without_request_header(self, mock_request):
method = "POST"
text = json.dumps({'key': 'data'})
mock_request.post(url=self._url, text=text, headers=self._headers)
result = http_request(url=self._url, method=method, request_body=text)
self.assertEqual(self._headers, result['response_headers'])
@requests_mock.mock()
def test_post_request_with_request_headers(self, mock_request):
method = "POST"
response = json.dumps({'status': 'UP'})
mock_request.post(url=self._url, text=response, headers=self._headers)
result = http_request(url=self._url, method=method,
headers=self._headers)
self.assertEqual(response, result['response_text'])
self.assertEqual(200, result['status_code'])
self.assertEqual(self._headers, result['response_headers'])
@requests_mock.mock()
def test_put_with_request_headers(self, mock_request):
method = "PUT"
response = json.dumps({'status': 'UP'})
mock_request.put(url=self._url, text=response)
result = http_request(url=self._url, method=method,
headers=self._headers)
self.assertEqual(response, result['response_text'])
self.assertEqual(200, result['status_code'])
self.assertEqual({}, result['response_headers'])
@requests_mock.mock()
def test_put_request_body_with_response_headers(self, mock_request):
method = "PUT"
text = json.dumps({'key': 'value'})
mock_request.put(url=self._url, text=text, headers=self._headers)
result = http_request(url=self._url, method=method, request_body=text)
self.assertEqual(text, result['response_text'])
self.assertEqual(200, result['status_code'])
self.assertEqual(self._headers, result['response_headers'])
@requests_mock.mock()
def test_patch_without_headers(self, mock_request):
method = "PATCH"
response = json.dumps({'status': 'UP'})
mock_request.patch(url=self._url, text=response)
result = http_request(url=self._url, method=method)
self.assertEqual(response, result['response_text'])
self.assertEqual(200, result['status_code'])
self.assertEqual({}, result['response_headers'])
@requests_mock.mock()
def test_patch_request_body_with_headers(self, mock_request):
method = "PATCH"
text = json.dumps({'key': 'value'})
mock_request.patch(url=self._url, text=text, headers=self._headers)
result = http_request(url=self._url, method=method, request_body=text)
self.assertEqual(text, result['response_text'])
self.assertEqual(200, result['status_code'])
self.assertEqual(self._headers, result['response_headers'])
@requests_mock.mock()
def test_delete_with_empty_response_headers(self, mock_request):
method = "DELETE"
response = json.dumps({'status': 'UP'})
mock_request.delete(url=self._url, text=response)
mock_request.delete(url=self._url, text=response, status_code=204)
result = http_request(url=self._url, method=method)
self.assertEqual(response, result['response_text'])
self.assertEqual(204, result['status_code'])
self.assertEqual({}, result['response_headers'])
@requests_mock.mock()
def test_delete_request_body_with_headers(self, mock_request):
method = "DELETE"
response = json.dumps({'status': 'UP'})
mock_request.delete(url=self._url, text=response, status_code=204,
headers=self._headers)
result = http_request(url=self._url, method=method)
self.assertEqual(response, result['response_text'])
self.assertEqual(204, result['status_code'])
self.assertEqual(self._headers, result['response_headers'])
@requests_mock.mock()
def test_authentication_k8s_with_header(self, mock_request):
with mock.patch('kallisticore.modules.common.Credential') \
as mock_credential_module, \
mock.patch('builtins.open', mock_open(read_data='test-token')):
mock_k8s_creds = KubernetesServiceAccountTokenCredential()
mock_credential_module.build.return_value = mock_k8s_creds
auth_config = {
'type': 'oauth2_token',
'credentials': {}
}
expected_headers = {'Authorization': 'test-token', **self._headers}
response = json.dumps({'status': 'UP'})
mock_request.get(url=self._url, text=response,
headers=expected_headers)
result = http_request(url=self._url, headers=self._headers,
authentication=auth_config)
self.assertEqual(response, result['response_text'])
self.assertEqual(200, result['status_code'])
@requests_mock.mock()
def test_env_pw_auth_without_header(self, mock_request):
with mock.patch('kallisticore.modules.common.Credential') \
as mock_credential_module:
mock_credential = Mock(spec=EnvironmentUserNamePasswordCredential)
mock_credential.username.return_value = TestHttpProbe.test_uname
mock_credential.password.return_value = TestHttpProbe.test_pw
mock_credential.fetch.side_effects = None
mock_credential_module.build.return_value = mock_credential
test_auth_url = 'https://test-auth.com'
auth_config = {
'type': 'oauth2_token',
'url': test_auth_url,
'credentials': {},
'client': {
'id': 'test-client-id',
'secret': 'test-client-secret'
}
}
expected_base64 = base64.b64encode(
(auth_config['client']['id'] + ':' +
auth_config['client']['secret']).encode()).decode('utf-8')
auth_expected_headers = {
'Authorization': 'Basic %s' % expected_base64}
auth_mock_response = json.dumps({'access_token': 'test-token'})
mock_request.post(url=test_auth_url, text=auth_mock_response,
headers=auth_expected_headers)
request_expected_headers = {'Authorization': 'test-token'}
response = json.dumps({'status': 'UP'})
mock_request.get(url=self._url, text=response,
headers=request_expected_headers)
result = http_request(url=self._url, authentication=auth_config)
self.assertEqual(response, result['response_text'])
self.assertEqual(200, result['status_code'])
def test_env_pw_auth_failure(self):
with mock.patch('kallisticore.modules.common.Credential') \
as mock_credential_module:
mock_credential = Mock(spec=EnvironmentUserNamePasswordCredential)
mock_credential.username.return_value = TestHttpProbe.test_uname
mock_credential.password.return_value = TestHttpProbe.test_pw
mock_credential.fetch.side_effects = None
mock_credential_module.build.return_value = mock_credential
test_auth_url = 'https://test-auth.com'
auth_config = {
'type': 'oauth2_token',
'url': test_auth_url,
'credentials': {},
'client': {
'id': 'test-client-id',
'secret': 'test-client-secret'
}
}
mock_response_status_code = 404
mock_response_text = 'test-error-message'
with self.assertRaises(FailedAction) as error:
with mock.patch('requests.post') as mock_post:
mock_post.return_value.status_code = \
mock_response_status_code
mock_post.return_value.text = mock_response_text
http_request(url=self._url, authentication=auth_config)
self.assertEqual(
'Authentication for http request failed with status code {}. '
'Details: {}'.format(mock_response_status_code,
mock_response_text),
error.exception.message)
def test_authentication_should_fail_with_unknown_credential(self):
with mock.patch('kallisticore.modules.common.Credential') \
as mock_credential_module:
mock_credential = Mock()
mock_credential_module.build.return_value = mock_credential
auth_config = {
'type': 'oauth2_token',
'credentials': {}
}
with self.assertRaises(InvalidCredentialType) as error:
http_request(url=self._url, authentication=auth_config)
self.assertEqual('Invalid credential type: %s' %
mock_credential.__class__.__name__,
error.exception.message)
class TestWait(TestCase):
@mock.patch('time.sleep')
def test_wait_for_15_seconds(self, mock_sleep):
wait(time_in_seconds=15)
mock_sleep.assert_called_once_with(15)
def test_exception_for_invalid_input(self):
with self.assertRaises(FailedAction) as error:
wait(time_in_seconds=None)
self.assertEqual(
"Expected integer for argument 'time_in_seconds' (got NoneType)",
error.exception.message)
```
#### File: modules/prometheus/test_prometheus_action.py
```python
from unittest import TestCase, mock
from kallisticore.lib.action import Action, FunctionLoader
from kallisticore.models.step import Step
from kallisticore.modules.prometheus import PrometheusAction
class TestPrometheusAction(TestCase):
module_map = {'prom': 'kallisticore.modules.prometheus'}
def setUp(self):
self.arguments = {
'base_url': 'http://prometheus.test',
'query': 'test-query',
'when': '5 minutes ago'
}
self.action_spec = {"step": "Test Prometheus Action",
"do": "prom.query",
"where": self.arguments}
self.step = Step.build(self.action_spec)
def test_initialization(self):
action = PrometheusAction.build(self.step, self.module_map, {})
expected_func = FunctionLoader(self.module_map, 'prom') \
.get_function('query')
self.assertIsInstance(action, PrometheusAction)
self.assertIsInstance(action, Action)
self.assertEqual('Test Prometheus Action', action.name)
self.assertEqual(expected_func, action.func)
self.assertEqual(self.arguments['query'], action.arguments['query'])
self.assertEqual(self.arguments['when'], action.arguments['when'])
self.assertEqual({'prometheus_base_url': 'http://prometheus.test'},
action.arguments['configuration'])
self.assertEqual(None, action.credential)
def test_execute(self):
with mock.patch('chaosprometheus.probes.query') as mock_prom_query:
mock_query_return_value = 'test query return value'
mock_prom_query.return_value = mock_query_return_value
action = PrometheusAction.build(self.step, self.module_map, {})
result = action.execute()
self.assertEqual(mock_query_return_value, result)
mock_prom_query.assert_called_once_with(
query='test-query', when='5 minutes ago',
configuration={'prometheus_base_url': 'http://prometheus.test'})
```
#### File: tests/kallisticore/test_exceptions.py
```python
from unittest import TestCase
from kallisticore.exceptions import KallistiCoreException, FailedAction, \
InvalidCredentialType, CredentialNotFound, StepsExecutionError
from kallisticore.lib.credential import EnvironmentUserNamePasswordCredential
from kallisticore.models.trial import TrialStepsType
class TestKallistiCoreException(TestCase):
def test_initialization(self):
error_message = "sample error message"
exception = KallistiCoreException(message=error_message)
self.assertEqual(error_message, exception.message)
self.assertIsInstance(exception, KallistiCoreException)
class TestFailedAction(TestCase):
def test_initialization(self):
error_message = "sample error message"
exception = FailedAction(message=error_message)
self.assertEqual(error_message, exception.message)
self.assertIsInstance(exception, FailedAction)
self.assertIsInstance(exception, KallistiCoreException)
class TestInvalidCredentialType(TestCase):
def test_initialization(self):
credential_type = "INVALID"
exception = InvalidCredentialType(credential_type)
self.assertEqual("Invalid credential type: " + credential_type,
exception.message)
self.assertIsInstance(exception, InvalidCredentialType)
self.assertIsInstance(exception, KallistiCoreException)
class TestCredentialNotFound(TestCase):
def test_initialization(self):
source = EnvironmentUserNamePasswordCredential.__name__
exception = CredentialNotFound(source, username_key='user',
password_key='pass')
self.assertEqual("Credential not found. Source: " + source +
", Details: {'username_key': 'user'"
", 'password_key': '<PASSWORD>'}",
exception.message)
self.assertIsInstance(exception, CredentialNotFound)
self.assertIsInstance(exception, KallistiCoreException)
class TestStepsExecutionError(TestCase):
def test_initialization(self):
exception = StepsExecutionError(TrialStepsType.PRE)
self.assertEqual(TrialStepsType.PRE, exception.step_type)
self.assertEqual("\"pre_steps\" failed.", exception.message)
self.assertIsInstance(exception, StepsExecutionError)
self.assertIsInstance(exception, KallistiCoreException)
def test_str(self):
exception = StepsExecutionError(TrialStepsType.PRE)
exception.__cause__ = RuntimeError("some-error")
self.assertEqual("[in: pre_steps, reason: some-error]", str(exception))
def test_is_pre_steps_exception_when_step_type_is_pre_steps(self):
exception = StepsExecutionError(TrialStepsType.PRE)
exception.__cause__ = RuntimeError("some-error")
self.assertTrue(exception.is_pre_steps_exception())
def test_is_pre_steps_exception_when_step_type_is_post_steps(self):
exception = StepsExecutionError(TrialStepsType.POST)
exception.__cause__ = RuntimeError("some-error")
self.assertFalse(exception.is_pre_steps_exception())
def test_is_pre_steps_exception_when_step_type_is_steps(self):
exception = StepsExecutionError(TrialStepsType.STEPS)
exception.__cause__ = RuntimeError("some-error")
self.assertFalse(exception.is_pre_steps_exception())
```
#### File: tests/kallisticore/test_signals.py
```python
from unittest import mock
from django.test import TestCase
from kallisticore import signals
from kallisticore.models import Experiment
from kallisticore.models.step import Step
from kallisticore.models.trial import Trial
from kallisticore.signals import execute_plan_for_trial
class TestSignalExecutePlanForTrial(TestCase):
def setUp(self):
self.env = 'dev'
self.step_to_get_app_by_name = {
'step': 'Get CF App by Name',
'do': 'cf.get_app_by_name',
'where': {
'app_name': 'hello-world',
'cf_api_url': 'https://cf-api.test'
}}
self.test_k8s_step = {
'step': 'K8s Test Step',
'do': 'k8s.test_do',
'where': {
'where-key-1': 'where-value-1',
'where-key-2': 'where-key-2'
}}
self._list_of_commands = [self.step_to_get_app_by_name,
self.test_k8s_step]
self._experiment = Experiment.create(
name='test_experiment', description='detailed text description',
steps=Step.convert_to_steps(self._list_of_commands))
@mock.patch("kallisticore.signals.execute_trial")
def test_executor_not_invoked_for_existing_trial(self, mock_exec_trial):
trial = create_a_trial_object(self._experiment)
trial.save()
mock_exec_trial.assert_not_called()
def create_a_trial_object(experiment):
signals.post_save.disconnect(execute_plan_for_trial, sender=Trial)
trial = Trial.create(experiment=experiment)
signals.post_save.connect(execute_plan_for_trial, sender=Trial)
return trial
```
#### File: kallisticore/utils/test_fields.py
```python
import json
from django.test import TestCase
from kallisticore.models.step import Step
from kallisticore.utils.fields import DictField, ListField, ValidationError, \
StepsField
class TestListField(TestCase):
def setUp(self):
self._list = ['a', 'b']
self._json_input = json.dumps(self._list)
"""
Tests for method from_db_value
"""
def test_from_db_value(self):
field = ListField().from_db_value(self._json_input)
self.assertEqual(2, len(field))
self.assertEqual(self._list[0], field[0])
self.assertEqual(self._list[1], field[1])
def test_from_db_value_with_None(self):
field = ListField().from_db_value(None)
self.assertEqual(0, len(field))
def test_from_db_value_with_empty_str(self):
field = ListField().from_db_value("")
self.assertEqual(0, len(field))
def test_from_db_value_with_invalid_field(self):
with self.assertRaises(ValidationError) as error:
ListField().from_db_value("[")
self.assertEqual(error.exception.message,
"Invalid format: Expecting value")
"""
Tests for method to_python
"""
def test_to_python_for_valid_list(self):
field = ListField().to_python(self._json_input)
self.assertEqual(2, len(field))
self.assertEqual(self._list[0], field[0])
self.assertEqual(self._list[1], field[1])
def test_to_python_with_None(self):
field = ListField().to_python(None)
self.assertEqual(0, len(field))
def test_to_python_with_empty_str(self):
field = ListField().to_python("")
self.assertEqual(0, len(field))
def test_to_python_with_python_list(self):
field = ListField().to_python(self._list)
self.assertEqual(2, len(field))
self.assertEqual(self._list[0], field[0])
self.assertEqual(self._list[1], field[1])
"""
Tests for method get_prep_value
"""
def test_get_prep_value_with_list(self):
value = ListField().get_prep_value(self._list)
self.assertEqual(self._json_input, value)
def test_get_prep_value_with_json(self):
value = ListField().get_prep_value(self._json_input)
self.assertEqual(self._json_input, value)
class TestStepsField(TestCase):
def setUp(self):
self._step1 = {
"do": "cm.http_health_check",
"where": {"url": "https://app.test"}
}
self._step2 = {
"do": "cm.wait",
"where": {"time_in_seconds": 2}
}
"""
Tests for method from_db_value
"""
def test_from_db_value_with_valid_step_list(self):
steps = StepsField().from_db_value('[' + json.dumps(self._step1) + ']')
self.assertEqual(1, len(steps))
step1 = steps[0]
self.assertIsInstance(step1, Step)
self.assertEqual(Step.build(self._step1), step1)
def test_from_db_value_with_none(self):
steps = StepsField().from_db_value(None)
self.assertEqual(0, len(steps))
def test_from_db_value_with_empty_str(self):
steps = StepsField().from_db_value("")
self.assertEqual(0, len(steps))
def test_from_db_value_with_invalid_value(self):
with self.assertRaises(ValidationError) as error:
StepsField().from_db_value("[")
self.assertEqual(error.exception.message,
"Invalid format: Expecting value")
def test_from_db_value_with_invalid_step(self):
invalid_step = {"do": "cm.wait"}
invalid_step_json = json.dumps(invalid_step)
with self.assertRaises(ValidationError) as error:
StepsField().from_db_value("[" + invalid_step_json + "]")
self.assertEqual(
"Invalid Steps: Some steps provided are invalid. "
"Invalid Steps: [" + invalid_step_json + "]",
error.exception.message)
"""
Tests for method to_python
"""
def test_to_python_with_valid_step_list(self):
steps = StepsField().to_python(json.dumps([self._step1]))
self.assertEqual(1, len(steps))
step1 = steps[0]
self.assertIsInstance(step1, Step)
self.assertEqual(Step.build(self._step1), step1)
def test_to_python_with_none(self):
steps = StepsField().to_python(None)
self.assertEqual(0, len(steps))
def test_to_python_with_empty_str(self):
steps = StepsField().to_python("")
self.assertEqual(0, len(steps))
def test_to_python_with_step_list(self):
steps = StepsField().to_python([self._step1])
self.assertEqual(1, len(steps))
self.assertIsInstance(steps[0], Step)
self.assertEqual(steps[0], Step.build(self._step1))
"""
Tests for method get_prep_value
"""
def test_get_prep_value_with_step_list(self):
value = StepsField().get_prep_value([Step.build(self._step1)])
self.assertEqual(json.dumps([self._step1]), value)
def test_get_prep_value_with_json_string(self):
value = StepsField().get_prep_value(
'[' + json.dumps(self._step1) + ']')
self.assertEqual([self._step1], json.loads(value))
class TestDictField(TestCase):
def setUp(self):
self._parameters = {"instances_to_kill": 1, "app_name": "my-app",
"org": "my-org", "key1": "value1",
"key2": "value2"}
"""
Tests for method from_db_value
"""
def test_from_db_value_with_valid_param(self):
parameters = DictField().from_db_value(json.dumps(self._parameters))
self.assertEqual(5, len(parameters))
self.assertIsInstance(parameters, dict)
self.assertEqual(parameters, self._parameters)
def test_from_db_value_with_none(self):
parameters = DictField().from_db_value(None)
self.assertEqual(0, len(parameters))
def test_from_db_value_with_empty_str(self):
parameters = DictField().from_db_value("")
self.assertEqual(0, len(parameters))
def test_from_db_value_with_invalid_value(self):
with self.assertRaises(ValidationError) as error:
DictField().from_db_value("[")
self.assertEqual(error.exception.message,
"Invalid format: Expecting value")
"""
Tests for method to_python
"""
def test_to_python_valid_json(self):
parameters = DictField().to_python(json.dumps(self._parameters))
self.assertEqual(5, len(parameters))
self.assertIsInstance(parameters, dict)
self.assertEqual(parameters, self._parameters)
def test_to_python_with_none(self):
parameters = DictField().to_python(None)
self.assertEqual(0, len(parameters))
def test_to_python_with_empty_str(self):
parameters = DictField().to_python("")
self.assertEqual(0, len(parameters))
def test_to_python_with_dict(self):
parameters = DictField().to_python(self._parameters)
self.assertEqual(5, len(parameters))
self.assertIsInstance(parameters, dict)
self.assertEqual(parameters, self._parameters)
"""
Tests for method get_prep_value
"""
def test_get_prep_value_with_dict(self):
value = DictField().get_prep_value(self._parameters)
self.assertEqual(json.dumps(self._parameters), value)
def test_get_prep_value_with_json_string(self):
value = DictField().get_prep_value(json.dumps(self._parameters))
self.assertEqual(self._parameters, json.loads(value))
```
#### File: kallisticore/utils/test_sanitizer.py
```python
from kallisticore.utils.sanitizer import Sanitizer
from unittest import TestCase
from tests.kallisticore.utils.fixture.trial_result_data import \
sanitizer_real_example_test, sanitizer_real_example_test_expected, \
sanitizer_theoretical_test, sanitizer_theoretical_test_expected
class TestSanitizer(TestCase):
def test_clean_sensitive_data_string(self):
test_string = 'test-string'
self.assertEqual(test_string,
Sanitizer.clean_sensitive_data(test_string))
def test_clean_sensitive_data_theoretical(self):
self.assertEqual(
sanitizer_theoretical_test_expected,
Sanitizer.clean_sensitive_data(sanitizer_theoretical_test))
def test_clean_sensitive_data_real_example(self):
self.assertEqual(
sanitizer_real_example_test_expected,
Sanitizer.clean_sensitive_data(sanitizer_real_example_test))
```
#### File: kallisticore/views/test_experiment.py
```python
from django.urls import reverse
from kallisticore.models.experiment import Experiment
from kallisticore.serializers import ExperimentSerializer
from rest_framework import status
from tests.kallisticore.base import KallistiTestSuite
class TestExperimentListAPI(KallistiTestSuite):
def setUp(self):
super(TestExperimentListAPI, self).setUp()
self._token = '1<PASSWORD>'
self.client.credentials(HTTP_AUTHORIZATION='Bearer ' + self._token)
def tearDown(self):
self.client.credentials()
super(TestExperimentListAPI, self).tearDown()
def test_list_empty_experiments(self):
url = reverse('experiment-list')
response = self.client.get(url, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data, [])
def test_list_experiments(self):
experiment1 = Experiment.create(
name='kill-my-web-cf-app-instance', description='HA experiment')
experiment2 = Experiment.create(
name='stop-my-web-cf-app', description='DR experiment')
url = reverse('experiment-list')
response = self.client.get(url, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
response_data = response.data
self.assertEqual(len(response_data), 2)
self.assertIn(ExperimentSerializer(experiment1).data, response_data)
self.assertIn(ExperimentSerializer(experiment2).data, response_data)
class TestExperimentGetAPI(KallistiTestSuite):
def setUp(self):
super(TestExperimentGetAPI, self).setUp()
self._token = '1231<PASSWORD>'
self.client.credentials(HTTP_AUTHORIZATION='Bearer ' + self._token)
def tearDown(self):
self.client.credentials()
super(TestExperimentGetAPI, self).tearDown()
def test_get_details_with_invalid_id(self):
url = reverse('experiment-detail', args=['non-existent'])
response = self.client.get(url, format='json')
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
self.assertEqual(response.data, {'detail': 'Not found.'})
def test_get_details(self):
experiment = Experiment.create(
name='kill-my-web-cf-app-instance', description='HA experiment')
url = reverse('experiment-detail', args=[experiment.id])
response = self.client.get(url, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data, ExperimentSerializer(experiment).data)
class TestExperimentCreateAPI(KallistiTestSuite):
def setUp(self):
super(TestExperimentCreateAPI, self).setUp()
self._token = '<PASSWORD>'
self.client.credentials(HTTP_AUTHORIZATION='Bearer ' + self._token)
def tearDown(self):
self.client.credentials()
super(TestExperimentCreateAPI, self).tearDown()
def test_post_with_valid_details(self):
data = {'name': 'go-redirection-would-work-when-database-dies',
'description': 'This experiment would prove go redirection '
'would be resilient to DB failures',
'parameters': {},
'steps': []}
url = reverse('experiment-list')
response = self.client.post(url, data=data, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertEqual(Experiment.objects.count(), 1)
def test_post_with_invalid_data(self):
url = reverse('experiment-list')
response = self.client.post(url, data={}, format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(Experiment.objects.count(), 0)
class TestExperimentDeleteAPI(KallistiTestSuite):
def setUp(self):
super(TestExperimentDeleteAPI, self).setUp()
self._token = '<PASSWORD>23123123123'
self.client.credentials(HTTP_AUTHORIZATION='Bearer ' + self._token)
def tearDown(self):
self.client.credentials()
super(TestExperimentDeleteAPI, self).tearDown()
def test_delete_with_invalid_id(self):
url = reverse('experiment-detail', args=['non-existent'])
response = self.client.delete(url, format='json')
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
self.assertEqual(response.data, {'detail': 'Not found.'})
def test_delete(self):
experiment = Experiment.create(
name='kill-my-web-cf-app-instance', description='HA experiment')
url = reverse('experiment-detail', args=[experiment.id])
response = self.client.delete(url, format='json')
self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)
self.assertEqual(Experiment.objects.count(), 0)
class TestExperimentPatchAPI(KallistiTestSuite):
def setUp(self):
super(TestExperimentPatchAPI, self).setUp()
self._data = {'description': 'This experiment would prove go '
'redirection would be resilient to DB '
'failures'}
self._token = '<PASSWORD>'
self.client.credentials(HTTP_AUTHORIZATION='Bearer ' + self._token)
def tearDown(self):
self.client.credentials()
super(TestExperimentPatchAPI, self).tearDown()
def test_patch_with_invalid_id(self):
url = reverse('experiment-detail', args=['non-existent'])
response = self.client.patch(url, data=self._data, format='json')
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
self.assertEqual(response.data, {'detail': 'Not found.'})
def test_patch(self):
experiment = Experiment.create(
name='kill-my-web-cf-app-instance', description='HA experiment')
url = reverse('experiment-detail', args=[experiment.id])
response = self.client.patch(url, data=self._data, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(Experiment.objects.count(), 1)
```
#### File: kallisticore/views/test_report.py
```python
from django.urls import reverse
from rest_framework import status
from kallisticore.models import Experiment, Trial
from kallisticore.serializers import ReportSerializer
from kallisticore.views.report import trial_id_query_param
from tests.kallisticore.base import KallistiTestSuite
class TestQueryParam(KallistiTestSuite):
def test_trial_id_query_param(self):
self.assertEqual(trial_id_query_param.name, 'trial-id')
self.assertEqual(trial_id_query_param.in_, 'query')
self.assertEqual(trial_id_query_param.description,
'[Optional] A UUID string identifying a trial to get '
'report for the trial')
self.assertEqual(trial_id_query_param.type, 'string')
class TestReportAPI(KallistiTestSuite):
def setUp(self):
super(TestReportAPI, self).setUp()
self._token = '<PASSWORD>'
self.client.credentials(HTTP_AUTHORIZATION='Bearer ' + self._token)
def tearDown(self):
self.client.credentials()
super(TestReportAPI, self).tearDown()
def test_list_report_empty_experiment(self):
url = reverse('report')
response = self.client.get(url, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data, [])
def test_list_report_experiments_without_trials(self):
experiment1 = Experiment.create(
name='kill-my-web-cf-app-instance', description='HA experiment')
experiment2 = Experiment.create(
name='stop-my-web-cf-app', description='DR experiment')
url = reverse('report')
response = self.client.get(url, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
response_data = response.data
self.assertEqual(len(response_data), 2)
self.assertIn(ReportSerializer(experiment1).data, response_data)
self.assertIn(ReportSerializer(experiment2).data, response_data)
def test_list_report_experiments_with_trials(self):
experiment = Experiment.create(
name='kill-my-web-cf-app-instance', description='HA experiment')
Trial.create(experiment=experiment)
url = reverse('report')
response = self.client.get(url, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
response_data = response.data
self.assertEqual(len(response_data), 1)
self.assertIn(ReportSerializer(experiment).data, response_data)
self.assertEqual(ReportSerializer(experiment).data['trials'][0]['id'],
response_data[0]['trials'][0]['id'])
def test_get_report_by_trial_id(self):
experiment_1 = Experiment.create(name='test-experiment-1',
description='Test experiment 1.')
# make sure report is generated only for only corresponding experiment
Experiment.create(name='test-experiment-2',
description='Test experiment 2.')
trial = Trial.create(experiment=experiment_1)
query_param = '?trial-id=%s' % trial.id
# make sure report is generated only for only specified trial
Trial.create(experiment=experiment_1)
url = reverse('report')
response = self.client.get(url + query_param, format='json')
expected_report_serializer = ReportSerializer(experiment_1, context={
'trial_id': trial.id})
self.assertEqual(response.status_code, status.HTTP_200_OK)
response_data = response.data
self.assertEqual(len(response_data), 1)
self.assertIn(expected_report_serializer.data, response_data)
self.assertEqual(expected_report_serializer.data['trials'][0]['id'],
response_data[0]['trials'][0]['id'])
``` |
{
"source": "jpmorgen/IoIO",
"score": 2
} |
#### File: jpmorgen/IoIO/cormultipipe.py
```python
import inspect
import os
import re
import time
import datetime
import glob
import psutil
import argparse
from pathlib import Path
import numpy as np
import numpy.ma as ma
from scipy import signal, stats, interpolate
import matplotlib.pyplot as plt
from matplotlib.ticker import AutoMinorLocator
import pandas as pd
from astropy import log
from astropy import units as u
from astropy.io.fits import Header, getheader
from astropy.nddata import CCDData, StdDevUncertainty
from astropy.table import QTable
from astropy.time import Time
from astropy.stats import mad_std, biweight_location
from astropy.wcs import FITSFixedWarning
from photutils import Background2D, MedianBackground
import ccdproc as ccdp
from bigmultipipe import num_can_process, WorkerWithKwargs, NestablePool
from bigmultipipe import multi_proc, multi_logging, prune_pout
from ccdmultipipe import CCDMultiPipe
from ccdmultipipe.utils import FilterWarningCCDData
import sx694
from corobsdata import CorData, overscan_estimate
# Processing global variables. Since I avoid use of the global
# statement and don't reassign these at global scope, they stick to
# these values and provide handy defaults for routines and object
# inits. It is also a way to be lazy about documenting all of the
# code :-o --> This is causing some problems with the way I refer to
# the same names in the code. Consider all caps for these constants
# Tests with first iteration of pipeline showed that the real gain in
# speed is from the physical processors, not the logical processes
# (threads). Threads automatically make the physical processes
# faster. Going to num_processes greater than the number of physical
# processes does go faster, but only asymptotically, probably because
# wait times are minimized. Rather than try to milk the asymptote for
# speed, just max out on physical processors to get the steepest gains
# and leave the asymptote for other jobs
MAX_NUM_PROCESSES = psutil.cpu_count(logical=False)
MAX_MEM_FRAC = 0.85
# Calculate the maximum CCDdata size based on 64bit primary & uncert +
# 8 bit mask / 8 bits per byte. It will be compared to
# psutil.virtual_memory() at runtime to optimize computational tasks
# is my do-it-yourself multiprocessing routines
MAX_CCDDATA_BITPIX = 2*64 + 8
COR_PROCESS_EXPAND_FACTOR = 3.5
GRIDDATA_EXPAND_FACTOR = 20
# These are use to optimize parallelization until such time as
# ccdproc.combiner can be parallelized
NUM_CCDTS = int((35 - (-10)) / 5)
NUM_DARK_EXPTIMES = 8
NUM_FILTS = 9
NUM_CALIBRATION_FILES = 11
DATA_ROOT = '/data/io/IoIO'
RAW_DATA_ROOT = os.path.join(DATA_ROOT, 'raw')
REDUCED_ROOT = os.path.join(DATA_ROOT, 'reduced')
CALIBRATION_ROOT = os.path.join(REDUCED_ROOT, 'Calibration')
CALIBRATION_SCRATCH = os.path.join(CALIBRATION_ROOT, 'scratch')
# string to append to processed files to avoid overwrite of raw data
OUTNAME_APPEND = "_p"
# Lockfiles to prevent multiple upstream parallel processes from
# simultanously autoreducing calibration data
LOCKFILE = '/tmp/calibration_reduce.lock'
# Raw (and reduced) data are stored in directories by UT date, but
# some have subdirectories that contain calibration files.
CALIBRATION_SUBDIRS = ['Calibration', 'AutoFlat']
# Put the regular expressions for the biases, darks, and flats here so
# that they can be found quickly without having to do a ccd.Collection
# on a whold directory. The later is the rock solid most reliable,
# but slow in big directories, since ccdproc.Collection has to read
# each file
BIAS_GLOB = ['Bias*', '*_bias.fit']
DARK_GLOB = ['Dark*', '*_dark.fit']
FLAT_GLOB = '*Flat*'
# During the creation of master biases and darks files are grouped by
# CCD temperature. This is the change in temperature seen as a
# function of time that is used to trigger the creation of a new group
DCCDT_TOLERANCE = 0.5
# During reduction of files, biases and darks need to be matched to
# each file by temperature. This is the tolerance for that matching
CCDT_TOLERANCE = 2
# When I was experimenting with bias collection on a per-night basis,
# I got lots of nights with a smattering of biases. Discard these
MIN_NUM_BIASES = 7
MIN_NUM_FLATS = 3
# Accept as match darks with this much more exposure time
DARK_EXP_MARGIN = 3
# Number of pixels to expand the ND filter over what CorData finds.
# This is the negative of the CorData edge_mask parameter, since
# that is designed to mask pixels inside the ND filter to make
# centering of object more reliable
ND_EDGE_EXPAND = 40
FLAT_CUT = 0.75
# Wed Mar 03 09:59:08 2021 EST <EMAIL>
# 2020 -- early 2021
NA_OFF_ON_RATIO = 4.74
SII_OFF_ON_RATIO = 4.87
#######################
# Utilities #
#######################
def assure_list(x):
"""Assures x is type `list`"""
if x is None:
x = []
if not isinstance(x, list):
x = [x]
return x
def reduced_dir(rawdir, create=False):
"""Create a parallel directory to raw for reduced
files. e.g. /data/io/IoIO/raw/20241111 ->
/data/io/IoIO/reduced/20241111. Tries to do so in an
OS-independent way using os.path.sep. If `rawdir` is not in the
raw directory tree, just return rawdir
Paramters
---------
create : bool
Create reduced directory (and parents) if they don't exist
Returns
-------
reddir: str
Directory name in reduced directory tree structure
"""
ps = os.path.sep
# This ends up returning rawdir if directory doesn't have /raw/
reddir = rawdir.replace(f'{ps}raw{ps}', f'{ps}reduced{ps}')
if reddir == rawdir:
# catch top-level case
reddir = rawdir.replace(f'{ps}raw', f'{ps}reduced')
if create:
os.makedirs(reddir, exist_ok=True)
return reddir
def get_dirs_dates(directory,
filt_list=None,
start=None,
stop=None):
"""Starting a root directory "directory," returns list of tuples
(subdir, date) sorted by date. Handles two cases of directory
date formatting YYYYMMDD (ACP) and YYYY-MM-DD (MaxIm)
Parameters
----------
directory : string
Directory in which to look for subdirectories
filt_list : list of strings
Used to filter out bad directories (e.g. ["cloudy", "bad"]
will omit listing of, e.g., 2018-02-02_cloudy and
2018-02-03_bad_focus)
start : string YYYY-MM-DD
Start date (inclusive). Default = first date
stop : string YYYY-MM-DD
Stop date (inclusive). Default = last date
"""
assert os.path.isdir(directory)
fulldirs = [os.path.join(directory, d) for d in os.listdir(directory)]
# Filter out bad directories first
dirs = [os.path.basename(d) for d in fulldirs
if (not os.path.islink(d)
and os.path.isdir(d)
and (filt_list is None
or not np.any([filt in d for filt in filt_list])))]
# Prepare to pythonically loop through date formats, trying each on
date_formats = ["%Y-%m-%d", "%Y%m%d"]
ddlist = []
for thisdir in dirs:
d = thisdir
dirfail = True
for idf in date_formats:
# The date formats are two characters shorter than the
# length of the strings I am looking for (%Y is two
# shorter than YYYY, but %M is the same as MM, etc.)
d = d[0:min(len(d),len(idf)+2)]
try:
thisdate = datetime.datetime.strptime(d, idf)
ddlist.append((thisdir, thisdate))
dirfail = False
except:
pass
if dirfail:
pass
#log.debug('Skipping non-date formatted directory: ' + thisdir)
# Thanks to https://stackoverflow.com/questions/9376384/sort-a-list-of-tuples-depending-on-two-elements
if len(ddlist) == 0:
return []
ddsorted = sorted(ddlist, key=lambda e:e[1])
if start is None:
start = ddsorted[0][1]
elif isinstance(start, str):
start = datetime.datetime.strptime(start, "%Y-%m-%d")
elif isinstance(start, Time):
start = start.datetime
if stop is None:
stop = ddsorted[-1][1]
elif isinstance(stop, str):
stop = datetime.datetime.strptime(stop, "%Y-%m-%d")
elif isinstance(stop, Time):
stop = stop.datetime
if start > stop:
log.warning('start date {} > stop date {}, returning empty list'.format(start, stop))
return []
ddsorted = [dd for dd in ddsorted
if start <= dd[1] and dd[1] <= stop]
dirs, dates = zip(*ddsorted)
dirs = [os.path.join(directory, d) for d in dirs]
return list(zip(dirs, dates))
def valid_long_exposure(r):
"""Inspects FITS header or ImageFileCollection row for condition"""
return (r['imagetyp'].lower() == 'light'
and r['xbinning'] == 1
and r['ybinning'] == 1
and r['exptime'] > 10)
def multi_row_selector(table, keyword, value_list,
row_selector=None, **kwargs):
"""Returns lists of indices into a table, one list per value of `keyword`
Parameters
----------
table : `~astropy.table.Table`
Usually a `ccdproc.ImageFileCollection.summary`, hence the use
of "keyword" instead of "tag".
keyword : str
The primary keyword used for selection (e.g. 'FILTER')
value_list : list
List of allowed values of keyword. One list of indices into
`table` will be returned for each member of value_list
row_selector : func
Function applied on a per-row basis to filter out unwanted
rows. `row_selector` must accept one argument, a
`~astropy.table.Table` row and return a `bool` value. If
`row_selector` is `None` no filtering is done
Default is `None`
"""
if row_selector is None:
row_selector = True
retval = {}
for value in value_list:
idx = [i for i, r in enumerate(table)
if (r[keyword.lower()] == value
and row_selector(r, **kwargs))]
retval[value] = idx
return retval
#return [[i for i, r in enumerate(summary_table)
# if (r[keyword.lower()] == value
# and row_selector(r, **kwargs))]
# for value in value_list]
def closest_in_time(collection, value_pair,
row_selector=None,
keyword='filter',
directory=None):
"""Returns list of filename pairs. In all pairs, the second
observation is the closest in time to the first observation,
relative to all other second observations. Example use: for each
on-band image, find the off-band image recorded closest in time
Parameters
----------
collection : `~astropy.ccdproc.ImageFileCollection`
value_pair : tuple
Values of `keyword` used to construct pairs
keyword : str
FITS keyword used to select pairs
Default is ``filter``
TODO
----
Could possibly be generalized to finding groups
"""
directory = collection.location or directory
if directory is None:
raise ValueError('Collection does not have a location. Specify directory')
summary_table = collection.summary
row_dict = multi_row_selector(summary_table,
keyword, value_pair,
row_selector)
pair_list = []
for i_on in row_dict[value_pair[0]]:
t_on = Time(summary_table[i_on]['date-obs'], format='fits')
# Seach all second values
t_offs = Time(summary_table[row_dict[value_pair[1]]]['date-obs'],
format='fits')
if len(t_offs) == 0:
continue
dts = [t_on - T for T in t_offs]
idx_best1 = np.argmin(np.abs(dts))
# Unwrap
i_off = row_dict[value_pair[1]][idx_best1]
pair = [os.path.join(directory,
summary_table[i]['file'])
for i in (i_on, i_off)]
pair_list.append(pair)
return pair_list
#######################
# RedCorData object #
#######################
class RedCorData(CorData):
pass
class FwRedCorData(FilterWarningCCDData, RedCorData):
warning_filter_list = [FITSFixedWarning]
######### CorMultiPipe object
class CorMultiPipe(CCDMultiPipe):
ccddata_cls = RedCorData
def __init__(self,
calibration=None,
auto=False,
outname_append=OUTNAME_APPEND,
naxis1=sx694.naxis1,
naxis2=sx694.naxis2,
process_expand_factor=COR_PROCESS_EXPAND_FACTOR,
**kwargs):
self.calibration = calibration
self.auto = auto
super().__init__(outname_append=outname_append,
naxis1=naxis1,
naxis2=naxis2,
process_expand_factor=process_expand_factor,
**kwargs)
def pre_process(self, data, **kwargs):
"""Add full-frame check permanently to pipeline."""
kwargs = self.kwargs_merge(**kwargs)
if full_frame(data, **kwargs) is None:
return None
return super().pre_process(data, **kwargs)
def data_process(self, data,
calibration=None,
auto=None,
**kwargs):
kwargs = self.kwargs_merge(**kwargs)
if calibration is None:
calibration = self.calibration
if auto is None:
auto = self.auto
if isinstance(data, CCDData):
data = cor_process(data,
calibration=calibration,
auto=auto,
**kwargs)
return data
# Allow processing of individual CCDData in the case where an
# input file is actually a list (of lists...) of input files
return [self.data_process(d,
calibration=calibration,
auto=auto,
**kwargs)
for d in data]
class FwCorMultiPipe(CorMultiPipe):
ccddata_cls = FwRedCorData
######### CorMultiPipe prepossessing routines
def full_frame(data,
naxis1=sx694.naxis1,
naxis2=sx694.naxis2,
**kwargs):
"""CorMultiPipe pre-processing routine to select full-frame images.
In the case where data is a list, if any ccd is not full
frame, the entire list fails. Currently permanently installed
into CorMultiPipe.pre_process.
"""
if isinstance(data, CCDData):
s = data.shape
# Note Pythonic C index ordering
if s != (naxis2, naxis1):
return None
return data
for ccd in data:
ff = full_frame(ccd, naxis1=naxis1,
naxis2=naxis2,
**kwargs)
if ff is None:
return None
return data
def im_med_min_max(im):
"""Returns median values of representative dark and light patches
of images recorded by the IoIO coronagraph"""
s = np.asarray(im.shape)
m = s/2 # Middle of CCD
q = s/4 # 1/4 point
m = m.astype(int)
q = q.astype(int)
# Note Y, X. Use the left middle to avoid any first and last row
# issues in biases
dark_patch = im[m[0]-50:m[0]+50, 0:100]
light_patch = im[m[0]-50:m[0]+50, q[1]:q[1]+100]
mdp = np.median(dark_patch)
mlp = np.median(light_patch)
return (mdp, mlp)
def light_image(im, light_tolerance=3, **kwargs):
"""CorMultiPipe pre-processing routine to reject light-contaminated bias & dark images
"""
mdp, mlp = im_med_min_max(im)
if (mlp - mdp > light_tolerance):
log.debug('light, dark patch medians ({:.4f}, {:.4f})'.format(mdp, mlp))
return None
return im
######### CorMultiPipe postpossessing routines
def mask_above_key(ccd_in, bmp_meta=None, key=None, margin=0.1, **kwargs):
"""CorMultiPipe post-processing routine to mask pixels > input key
"""
if key is None:
raise ValueError('key must be specified')
masklevel = ccd_in.meta.get(key.lower())
if masklevel is None:
return ccd
ccd = ccd_in.copy()
# Saturation level is subject to overscan subtraction and
# multiplication by gain, so don't do strict = testing, but give
# ourselves a little margin.
mask = ccd.data >= masklevel - margin
n_masked = np.count_nonzero(mask)
if n_masked > 0:
log.info(f'Masking {n_masked} pixels above {key}')
if len(key) > 6:
h = 'HIERARCH '
else:
h = ''
n_masked_key = h + 'N_' + key
ccd.meta[n_masked_key] = (n_masked, f'masked pixels > {key}')
# Avoid creating a mask of all Falses & supplement any existing mask
if n_masked > 0:
if ccd.mask is None:
ccd.mask = mask
else:
ccd.mask = ccd.mask + mask
if bmp_meta is not None:
bmp_meta[n_masked_key] = n_masked
return ccd
def mask_nonlin_sat(ccd, bmp_meta=None, margin=0.1, **kwargs):
"""CorMultiPipe post-processing routine to mask pixels > NONLIN and SATLEVEL
"""
ccd = mask_above_key(ccd, bmp_meta=bmp_meta, key='SATLEVEL')
ccd = mask_above_key(ccd, bmp_meta=bmp_meta, key='NONLIN')
return ccd
def combine_masks(data, **kwargs):
"""Combine CCDData masks in a list of CCDData"""
# Avoid working with large arrays if we don't have to
newmask = None
for ccd in data:
if ccd.mask is None:
continue
if newmask is None:
newmask = ccd.mask
continue
newmask += ccd.mask
if newmask is None:
return data
# Write mask into ccds
newdata = []
for ccd in data:
ccd.mask = newmask
newdata.append(ccd)
return data
def multi_filter_proc(data, **kwargs):
#return multi_proc(nd_filter_mask, **kwargs)(data)
return multi_proc(nd_filter_mask,
element_type=CCDData,
**kwargs)(data)
def jd_meta(ccd, bmp_meta=None, **kwargs):
"""CorMultiPipe post-processing routine to return JD
"""
tm = Time(ccd.meta['DATE-OBS'], format='fits')
if bmp_meta is not None:
bmp_meta['jd'] = tm.jd
return ccd
def bias_stats(ccd, bmp_meta=None, gain=sx694.gain, **kwargs):
"""CorMultiPipe post-processing routine for bias_combine
Returns dictionary of bias statistics for pandas dataframe
"""
im = ccd.data
hdr = ccd.meta
# Calculate readnoise. This is time-consuming
diffs2 = (im[1:] - im[0:-1])**2
rdnoise = np.sqrt(biweight_location(diffs2))
# Skip uncertainty creation, since it is not used in any
# subsequent calcs
#uncertainty = np.multiply(rdnoise, np.ones(im.shape))
#ccd.uncertainty = StdDevUncertainty(uncertainty)
# Prepare to create a pandas data frame to track relevant
# quantities
tm = Time(ccd.meta['DATE-OBS'], format='fits')
ccdt = ccd.meta['CCD-TEMP']
tt = tm.tt.datetime
# We have already subtracted overscan, so add it back in where
# appropriate
median = hdr['OVERSCAN_MEDIAN']
stats = {'time': tt,
'ccdt': ccdt,
'median': median,
'mean': np.mean(im) + median,
'std': np.std(im)*gain,
'rdnoise': rdnoise*gain,
'min': np.min(im) + median,
'max': np.max(im) + median}
if bmp_meta is not None:
bmp_meta['bias_stats'] = stats
return ccd
def nd_filter_mask(ccd_in, nd_edge_expand=ND_EDGE_EXPAND, **kwargs):
"""CorMultiPipe post-processing routine to mask ND filter
"""
ccd = ccd_in.copy()
mask = np.zeros(ccd.shape, bool)
# Return a copy of ccd with the edge_mask property adjusted. Do
# it this way to keep ccd's ND filt parameters intact
emccd = RedCorData(ccd, edge_mask=-nd_edge_expand)
mask[emccd.ND_coords] = True
if ccd.mask is None:
ccd.mask = mask
else:
ccd.mask = ccd.mask + mask
return ccd
def detflux(ccd_in, exptime_unit=None, **kwargs):
ccd = ccd_in.copy()
# The exptime_unit stuff may become obsolete with Card Quantities
if exptime_unit is None:
exptime_unit = u.s
exptime = ccd.meta['EXPTIME'] * exptime_unit
exptime_uncertainty = ccd.meta.get('EXPTIME-UNCERTAINTY')
if exptime_uncertainty is None:
ccd = ccd.divide(exptime, handle_meta='first_found')
else:
exptime_array = np.full_like(ccd, exptime.value)
exptime_uncertainty_array = \
np.full_like(ccd, exptime_uncertainty)
exptime_uncertainty_std = \
StdDevUncertainty(exptime_uncertainty_array,
unit=exptime_unit,
copy=False)
exp_ccd = CCDData(exptime_array,
uncertainty=exptime_uncertainty_std,
unit=exptime_unit)
ccd = ccd.divide(exp_ccd, handle_meta='first_found')
return ccd
######### cor_process routines
def get_filt_name(f, date_obs):
"""Used in standardize_filt_name. Returns standarized filter name
for all cases in IoIO dataset.
Parameters
----------
f : str
filter name
date_obs : str
FITS format DATE-OBS keyword representing the date on which
filter f was recorded into the FITS header
"""
# Dates and documentation from IoIO.notebk
if date_obs > '2020-03-01':
# Fri Feb 28 11:43:39 2020 EST jpmorgen@byted
# Filters in latest form. Subsequent renames should hopfully
# follow similiar conventions (e.g. I for Bessel I-band
# filter, <primary>_on and <primary>_off)
# 1 R
# 2 1.25" SII_on
# 3 1.25" SII_off
# 4 Na_off
# 5 1.25" V
# 6 1.25" U
# 7 Na_on
# 8 1.25" B
# 9 1.25" R
return f
if date_obs > '2019-03-31':
# On 2019-03-31 the 9-position SX "Maxi" falter wheel was
# installed. There was considerable confusion during the
# installation due to incorrectly machined adapters and
# clearance issues. The adapters and clearance issues were
# straightned out, but an unfortunate byproduct of the
# debugging process was to end up swapping the labeled
# positions of the U and V and RC and H2O+ filter pairs. This
# was fixed on 2020-03-01
#
#
# Sat Apr 13 21:38:43 2019 EDT <EMAIL>
# Documentation of where I thought they were (numbers starting
# from 1 inserted)
# Filter #0 1 (R) offset: 0
# Filter #1 2(SII_on) offset: 108.6
# Filter #2 3(SII_off) offset: 86.2
# Filter #3 4 (Na_off) offset: -220.6
# Filter #4 5 (H2O+) offset: -327
# Filter #5 6 (RC) offset: 1323.6
# Filter #6 7 (Na_on) offset: -242.4
# Filter #7 8 (V) offset: 265.8
# Filter #8 9 (U) offset: 286.2
#
# Tue Feb 25 21:19:11 2020 EST <EMAIL>
# Documentation of where filters really were from
# 2019-03-31 -- 2020-02-25
# 1 R
# 2 SII_on
# 3 SII_off
# 4 Na_off
# 5 V
# 6 UV
# 7 Na_on
# 8 H2O+
# 9 RC
if f == 'H2O+':
return 'V'
if f == 'RC':
return 'U'
if f == 'V':
return 'H2O+'
if f == 'U':
return 'RC'
if f == 'UV':
# Just in case some slipped in with this transient
return 'U'
# everything else should be OK
return f
# On 20190218, just before the large filter wheel installation, I
# changed filter naming from verbose to the current convention.
# The verbose names were Na_continuum_50A_FWHM, Na_5892A_10A_FWHM,
# Na_5890A_10A_FWHM, [SII]_continuum_40A_FWHM, and
# [SII]_6731A_10A_FWHM. The R filter was I think always R, but
# may have been 'R-band.' Also had an "open" slot before I got
# the R filter. The following code should grab both the old a
# current naming cases
if 'R' in f:
return f
if 'open' in f:
return f
if 'cont' in f or 'off' in f:
on_off = 'off'
else:
# Hopefully the above catches the on/off cases and other filters
on_off = 'on'
if 'SII' in f:
line = 'SII'
elif 'Na' in f:
line = 'Na'
else:
# We only had 5 slots, so this has covered all the bases
raise ValueError(f'unknown filter {f}')
return f'{line}_{on_off}'
def standardize_filt_name(hdr_in):
"""Standardize FILTER keyword across all IoIO data
Parameters
----------
hdr_in : `~astropy.fits.io.Header`
input FITS header
Returns
-------
`~astropy.fits.io.Header` with FILTER card updated to standard
form and OFILTER card (original FILTER) added, if appropriate
"""
if hdr_in.get('ofilter') is not None:
# We have been here before, so exit quietly
return hdr_in
old_filt_name = hdr_in.get('FILTER')
if old_filt_name is None:
# Probably a BIAS or DARK
return hdr_in
new_filt_name = get_filt_name(old_filt_name, hdr_in['DATE-OBS'])
if old_filt_name == new_filt_name:
return hdr_in
# Only copy if we are going to change the hdr
hdr = hdr_in.copy()
hdr['FILTER'] = new_filt_name
hdr.insert('FILTER',
('OFILTER', old_filt_name, 'Original filter name'),
after=True)
return hdr
def kasten_young_airmass(hdr_in):
"""Record airmass considering curvature of Earth
Uses formula of <NAME> and <NAME>., “Revised optical air mass
tables and approximation formula”, Applied Optics, vol. 28,
pp. 4735–4738, 1989 found at
https://www.pveducation.org/pvcdrom/properties-of-sunlight/air-mass
"""
if hdr_in.get('oairmass') is not None:
# We have been here before, so exit quietly
return hdr_in
if hdr_in.get('objctalt') is None:
# We have no alt to work with
# --> double-check this
return hdr_in
hdr = hdr_in.copy()
alt = float(hdr['OBJCTALT'])
zd = 90 - alt
airmass = hdr['AIRMASS']
hdr.insert('AIRMASS',
('OAIRMASS', airmass, 'Original airmass'),
after=True)
denom = np.cos(np.radians(zd)) + 0.50572 * (96.07995 - zd)**(-1.6364)
hdr['AIRMASS'] = (1/denom, 'Curvature-corrected (Kasten and Young 1989)')
return(hdr)
def subtract_overscan(ccd, oscan=None, *args, **kwargs):
"""Subtract overscan, estimating it, if necesesary, from image.
Also subtracts overscan from SATLEVEL keyword
Note: ccdproc's native subtract_overscan function can't be used
because it assumes the overscan region is specified by a simple
rectangle.
"""
if ccd.meta.get('overscan_value') is not None:
# We have been here before, so exit quietly
return ccd
nccd = ccd.copy()
if oscan is None:
oscan = overscan_estimate(ccd, meta=nccd.meta,
*args, **kwargs)
nccd = nccd.subtract(oscan*u.adu, handle_meta='first_found')
nccd.meta['HIERARCH OVERSCAN_VALUE'] = (oscan, 'overscan value subtracted (adu)')
nccd.meta['HIERARCH SUBTRACT_OVERSCAN'] \
= (True, 'Overscan has been subtracted')
## Keep track of our precise saturation level
#satlevel = nccd.meta.get('satlevel')
#if satlevel is not None:
# satlevel -= oscan
# nccd.meta['SATLEVEL'] = satlevel # still in adu
return nccd
def cor_process(ccd,
calibration=None,
auto=False,
imagetyp=None,
ccd_meta=True,
fix_filt_name=True,
exp_correct=True,
date_beg_avg_add=True,
airmass_correct=True,
oscan=None,
trim=None,
error=False,
master_bias=None,
dark_frame=None,
master_flat=None,
bad_pixel_mask=None,
gain=None,
gain_key=None,
readnoise=None,
readnoise_key=None,
oscan_median=True,
oscan_model=None,
min_value=None,
min_value_key=None,
flat_norm_value=1,
dark_exposure=None,
data_exposure=None,
exposure_key=None,
exposure_unit=None,
dark_scale=True,
gain_corrected=True,
*args, **kwargs):
"""Perform basic CCD processing/reduction of IoIO ccd data
The following steps can be included:
* add CCD metadata (:func:`sx694.metadata`)
* correct CCD exposure time (:func:`sx694.exp_correct`)
* overscan correction (:func:`subtract_overscan`)
* trimming of the image (:func:`trim_image`)
* create deviation frame (:func:`create_deviation`)
* gain correction (:func:`gain_correct`)
* add a mask to the data
* subtraction of master bias (:func:`subtract_bias`)
* subtraction of a dark frame (:func:`subtract_dark`)
* correction of flat field (:func:`flat_correct`)
The task returns a processed `~astropy.nddata.CCDData` object.
Parameters
----------
fname_or_ccd : str or `~astropy.nddata.CCDData`
Filename or CCDData of image to be reduced.
multi : bool
Internal flat signaling that this call is being used as part
of a multi-process run. If True, assures input and output of
CCDData are via files rather than CCDData objects
calibration : `~Calibration`, bool, or None, optional
Calibration object to be used to find best bias, dark, and
flatfield files. If True, a Calibration object is
instantiated locally with no arguments (dangerous if
calibration reductions have not been completed!)
Default is ``None``.
auto : bool
If True, do reduction automatically based on IMAGETYP
keyword. See imagetyp documentation.
Default is ``False``
imagetyp : bool, str, or None
If True, do reduction based on IMAGETYP keyword. If string,
use that as IMAGETYP. Requires calibration object
bias -> oscan=True
dark -> oscan=True, master_bias=True
flat -> oscan=True, master_bias=True, dark_frame=True
light-> oscan=True, error=True, master_bias=True,
dark_frame=True, master_flat=True
Default is ``None``
ccd_meta : bool
Add CCD metadata
Default is ``True``
fix_filt_name : bool
Put all filters into namoing convention used starting when the
9-position SX Maxi filter wheel was installed in late Feb 2020.
Default is ``True``
exp_correct : bool
Correct for exposure time problems
Default is ``True``
date_beg_avg_add : bool
Add DATE-BEG and DATE-AVG FITS keywords, which reflect
best-estimate shutter time and observation midpoint. DATE-AVG
should be used for all ephemeris calculations.
Default is ``True``
airmass_correct : bool
Correct for curvature of earth airmass for very low elevation
observations
Default is ``True``
oscan : number, bool, or None, optional
Single pedistal value to subtract from image. If True, oscan
is estimated using :func:`sx694.overscan_estimate` and subtracted
Default is ``None``.
error : bool, optional
If True, create an uncertainty array for ccd.
Default is ``False``.
master_bias : bool, str, `~astropy.nddata.CCDData` or None, optional
Master bias frame to be subtracted from ccd image. The unit of the
master bias frame should match the unit of the image **after
gain correction** if ``gain_corrected`` is True. If True,
master_bias is determined using :func`Calibration.best_bias`.
NOTE: master_bias RDNOISE card, if present, is propagated
to output ccddata metadata. This is helpful in systems where
readnoise is measured on a per-masterbias basis and harmless
when a manufacturer's value is used.
Default is ``None``.
dark_frame : bool, str, `~astropy.nddata.CCDData` or None, optional
A dark frame to be subtracted from the ccd. The unit of the
master dark frame should match the unit of the image **after
gain correction** if ``gain_corrected`` is True. If True,
dark_frame is determined using :func`Calibration.best_dark`.
Default is ``None``.
master_flat : `~astropy.nddata.CCDData` or None, optional
A master flat frame to be divided into ccd. The unit of the
master flat frame should match the unit of the image **after
gain correction** if ``gain_corrected`` is True. If True,
master_bias is determined using :func`Calibration.best_flat`.
Default is ``None``.
bad_pixel_mask : `numpy.ndarray` or None, optional
A bad pixel mask for the data. The bad pixel mask should be in given
such that bad pixels have a value of 1 and good pixels a value of 0.
Default is ``None``.
gain : `~astropy.units.Quantity`, bool or None, optional
Gain value to multiple the image by to convert to electrons.
If True, read metadata using gain_key
Default is ``None``.
gain_key : `~ccdproc.Keyword`
Name of key in metadata that contains gain value.
Default is "GAIN" with units `~astropy.units.electron`/`~astropy.units.adu`
readnoise : `~astropy.units.Quantity`, bool or None, optional
Read noise for the observations. The read noise should be in
electrons. If True, read from the READNOISE keyword and
associated with readnoise_unit
Default is ``None``.
readnoise_key : `astropy.units.core.UnitBase`
Name of key in metadata that contains gain value.
Default is "RDNOISE" with units `astropy.units.electron`
min_value : float, bool, or None, optional
Minimum value for flat field. To avoid division by small
number problems, all values in the flat below min_value will
be replaced by this value. If True, value read from FLAT_CUT
keyword of flat. If None, no replacement will be done.
Default is ``None``.
flat_norm_value : float
Normalize flat by this value
Default is 1 (no normalization -- flat is already normalized).
dark_exposure : `~astropy.units.Quantity` or None, optional
Exposure time of the dark image; if specified, must also provided
``data_exposure``.
Default is ``None``.
data_exposure : `~astropy.units.Quantity` or None, optional
Exposure time of the science image; if specified, must also provided
``dark_exposure``.
Default is ``None``.
exposure_key : `~ccdp.Keyword`, str or None, optional
Name of key in image metadata that contains exposure time.
Default is ``None``.
exposure_unit : `~astropy.units.Unit` or None, optional
Unit of the exposure time if the value in the meta data does not
include a unit.
Default is ``None``.
dark_scale : bool, optional
If True, scale the dark frame by the exposure times.
Default is ``True``.
gain_corrected : bool, optional
If True, the ``master_bias``, ``master_flat``, and ``dark_frame``
have already been gain corrected.
Default is ``True``.
Returns
-------
ccd : `~astropy.nddata.CCDData`
Processed image
Examples --> fix these
--------
1. To overscan, trim and gain correct a data set::
>>> import numpy as np
>>> from astropy import units as u
>>> from astropy.nddata import CCDData
>>> from ccdproc import ccd_process
>>> ccd = CCDData(np.ones([100, 100]), unit=u.adu)
>>> nccd = ccd_process(ccd, oscan='[1:10,1:100]',
... trim='[10:100, 1:100]', error=False,
... gain=2.0*u.electron/u.adu)
"""
if gain_key is None:
gain_key = ccdp.Keyword('GAIN', u.electron/u.adu)
if readnoise_key is None:
readnoise_key = ccdp.Keyword('RDNOISE', u.electron)
if min_value_key is None:
min_value_key = ccdp.Keyword('FLAT_CUT', u.dimensionless_unscaled)
if exposure_key is None:
exposure_key = ccdp.Keyword('EXPTIME', u.s)
# make a copy of the object
nccd = ccd.copy()
# Handle our calibration object
if calibration is True:
calibration = Calibration()
# Enable autocalibration through imagetyp keyword
if auto:
imagetyp = nccd.meta.get('imagetyp')
if imagetyp is None:
raise ValueError("CCD metadata contains no IMAGETYP keyword, can't proceed with automatic reduction")
# Enable imagetyp to select reduction level
if imagetyp is None:
pass
elif imagetyp.lower() == 'bias':
oscan=True; error=True
elif imagetyp.lower() == 'dark':
oscan=True; gain=True; error=True; master_bias=True
elif imagetyp.lower() == 'flat':
oscan=True; gain=True; error=True; master_bias=True; dark_frame=True
elif imagetyp.lower() == 'light':
oscan=True; gain=True; error=True; master_bias=True; dark_frame=True; master_flat=True; min_value=True
else:
raise ValueError(f'Unknown IMAGETYP keyword {imagetyp}')
# Convert "yes use this calibration" to calibration _filenames_
try:
if isinstance(calibration, Calibration):
if master_bias is True:
master_bias = calibration.best_bias(nccd)
if dark_frame is True:
dark_frame = calibration.best_dark(nccd)
if master_flat is True:
master_flat = calibration.best_flat(nccd.meta)
if master_bias is True:
raise ValueError('master_bias=True but no Calibration object supplied')
if dark_frame is True:
raise ValueError('dark_frame=True but no Calibration object supplied')
if master_flat is True:
raise ValueError('master_flat=True but no Calibration object supplied')
except Exception as e:
log.error(f'No calibration available: calibration system problem {e}')
raise
if ccd_meta:
# Put in our SX694 camera metadata
nccd.meta = sx694.metadata(nccd.meta, *args, **kwargs)
if fix_filt_name:
# Fix my indecision about filter names!
nccd.meta = standardize_filt_name(nccd.meta)
if exp_correct:
# Correct exposure time for driver bug
nccd.meta = sx694.exp_correct(nccd.meta, *args, **kwargs)
if date_beg_avg_add:
# Add DATE-BEG and DATE-AVG FITS keywords
nccd.meta = sx694.date_beg_avg(nccd.meta, *args, **kwargs)
if airmass_correct:
# I think this is better at large airmass than what ACP uses,
# plus it standardizes everything for times I didn't use ACP
nccd.meta = kasten_young_airmass(nccd.meta)
# Apply overscan correction unique to the IoIO SX694 CCD. This
# uses the string version of master_bias, if available for
# metadata
if oscan is True:
nccd = subtract_overscan(nccd, master_bias=master_bias,
*args, **kwargs)
elif oscan is None or oscan is False:
pass
else:
# Hope oscan is a number...
nccd = subtract_overscan(nccd, oscan=oscan,
*args, **kwargs)
# The rest of the code uses stock ccdproc routines for the most
# part, so convert calibration filenames to CCDData objects,
# capturing the names for metadata purposes
if isinstance(master_bias, str):
subtract_bias_keyword = \
{'HIERARCH SUBTRACT_BIAS': 'subbias',
'SUBBIAS': 'ccdproc.subtract_bias ccd=<CCDData>, master=BIASFILE',
'BIASFILE': master_bias}
master_bias = RedCorData.read(master_bias)
else:
subtract_bias_keyword = None
if isinstance(dark_frame, str):
subtract_dark_keyword = \
{'HIERARCH SUBTRACT_DARK': 'subdark',
'SUBDARK': 'ccdproc.subtract_dark ccd=<CCDData>, master=DARKFILE',
'DARKFILE': dark_frame}
dark_frame = RedCorData.read(dark_frame)
else:
subtract_dark_keyword = None
if isinstance(master_flat, str):
flat_correct_keyword = \
{'HIERARCH FLAT_CORRECT': 'flatcor',
'FLATCOR': 'ccdproc.flat_correct ccd=<CCDData>, master=FLATFILE',
'FLATFILE': master_flat}
master_flat = RedCorData.read(master_flat)
else:
flat_correct_keyword = None
# apply the trim correction
if isinstance(trim, str):
nccd = ccdp.trim_image(nccd, fits_section=trim)
elif trim is None:
pass
else:
raise TypeError('trim is not None or a string.')
if isinstance(master_bias, CCDData):
if master_bias.unit == u.electron:
# Apply some knowledge of our reduction scheme to ease the
# number of parameters to supply
gain_corrected = True
# Copy over measured readnoise, if present
rdnoise = nccd.meta.get('rdnoise')
if rdnoise is not None:
nccd.meta['RDNOISE'] = rdnoise
nccd.meta.comments['RDNOISE'] = master_bias.meta.comments['RDNOISE']
if gain is True:
gain = gain_key.value_from(nccd.meta)
if error and readnoise is None:
# We want to make an error frame but the user has not
# specified readnoise. See if we can read from metadata
readnoise = readnoise_key.value_from(nccd.meta)
# Create the error frame. Do this differently than ccdproc for
# two reasons: (1) bias error should read the readnoise (2) I
# can't trim my overscan, so there are lots of pixels at the
# overscan level. After overscan and bias subtraction, many of
# them that are probably normal statitical outliers are negative
# enough to overwhelm the readnoise in the deviation calculation.
# But I don't want the error estimate on them to be NaN, since the
# error is really the readnoise.
if error and imagetyp is not None and imagetyp.lower() == 'bias':
if gain is None:
# We don't want to gain-correct, so we need to prepare to
# convert readnoise (which is in electron) to adu
gain_for_bias = gain_key.value_from(nccd.meta)
else:
# Bias will be gain-corrected to read in electrons
gain_for_bias = 1*u.electron
readnoise_array = np.full_like(nccd,
readnoise.value/gain_for_bias.value)
nccd.uncertainty = StdDevUncertainty(readnoise_array,
unit=nccd.unit,
copy=False)
else:
if error and gain is not None and readnoise is not None:
nccd = ccdp.create_deviation(nccd, gain=gain,
readnoise=readnoise,
disregard_nan=True)
elif error and (gain is None or readnoise is None):
raise ValueError(
'gain and readnoise must be specified to create error frame.')
# apply the bad pixel mask
if isinstance(bad_pixel_mask, np.ndarray):
nccd.mask = bad_pixel_mask
elif bad_pixel_mask is None:
pass
else:
raise TypeError('bad_pixel_mask is not None or numpy.ndarray.')
# apply the gain correction
if not (gain is None or isinstance(gain, u.Quantity)):
raise TypeError('gain is not None or astropy.units.Quantity.')
# Gain-correct now if bias, etc. are gain corrected (otherwise at end)
if gain is not None and gain_corrected:
nccd = ccdp.gain_correct(nccd, gain)
# Subtract master bias, adding metadata that refers to bias
# filename, if supplied
if isinstance(master_bias, CCDData):
nccd = ccdp.subtract_bias(nccd, master_bias,
add_keyword=subtract_bias_keyword)
elif master_bias is None:
pass
else:
raise TypeError(
'master_bias is not None, fname or a CCDData object.')
# Correct OVERSCAN_MASTER_BIAS keyword, if possible
hdr = nccd.meta
osbias = hdr.get('osbias')
biasfile = hdr.get('biasfile')
if osbias is None or biasfile is None:
pass
elif osbias != biasfile:
multi_logging('warning', pipe_meta,
'OSBIAS and BIASFILE are not the same')
else:
del hdr['OSBIAS']
hdr['OVERSCAN_MASTER_BIAS'] = 'BIASFILE'
# Subtract the dark frame. Generally this will just use the
# default exposure_key we create in our parameters to ccd_process
if isinstance(dark_frame, CCDData):
nccd = ccdp.subtract_dark(nccd, dark_frame,
dark_exposure=dark_exposure,
data_exposure=data_exposure,
exposure_time=exposure_key,
exposure_unit=exposure_unit,
scale=dark_scale,
add_keyword=subtract_dark_keyword)
elif dark_frame is None:
pass
else:
raise TypeError(
'dark_frame is not None or a CCDData object.')
if master_flat is None:
pass
else:
if min_value is True:
min_value = min_value_key.value_from(master_flat.meta)
flat_correct_keyword['FLATCOR'] += f', min_value={min_value}'
flat_correct_keyword['FLATCOR'] += f', norm_value={flat_norm_value}'
nccd = ccdp.flat_correct(nccd, master_flat,
min_value=min_value,
norm_value=flat_norm_value,
add_keyword=flat_correct_keyword)
for i in range(2):
for j in range(2):
ndpar = master_flat.meta.get(f'ndpar{i}{j}')
if ndpar is None:
break
ndpar_comment = master_flat.meta.comments[f'NDPAR{i}{j}']
ndpar_comment = 'FLAT ' + ndpar_comment
nccd.meta[f'FNDPAR{i}{j}'] = (ndpar, ndpar_comment)
# apply the gain correction only at the end if gain_corrected is False
if gain is not None and not gain_corrected:
nccd = ccdp.gain_correct(nccd, gain)
return nccd
####### bias, dark, and flat generation routines
def add_history(header, text='', caller=1):
"""Add a HISTORY card to a FITS header with the caller's name inserted
Parameters
----------
header : astropy.fits.Header object
Header to write HISTORY into. No default.
text : str
String to write. Default '' indicates FITS-formatted current
time will be used
caller : int or str
If int, number of levels to go up the call stack to get caller
name. If str, string to use for caller name
Raises
------
ValueError if header not astropy.io.fits.Header
"""
# if not isinstance(header, fits.Header):
# raise ValueError('Supply a valid FITS header object')
# If not supplied, get our caller name from the stack
# http://stackoverflow.com/questions/900392/getting-the-caller-function-name-inside-another-function-in-python
# https://docs.python.org/3.6/library/inspect.html
if type(caller) == int:
try:
caller = inspect.stack()[caller][3]
except IndexError:
caller = 'unknown'
elif type(caller) != str:
raise TypeError('Type of caller must be int or str')
# If no text is supplied, put in the date in FITS format
if text == '':
now = Time.now()
now.format = 'fits'
text = now.value
towrite = '(' + caller + ')' + ' ' + text
# astropy.io.fits automatically wraps long entries
#if len('HISTORY ') + len(towrite) > 80:
# log.warning('Truncating long HISTORY card: ' + towrite)
header['HISTORY'] = towrite
return
def fdict_list_collector(fdict_list_creator,
directory=None,
collection=None,
subdirs=None,
glob_include=None,
imagetyp=None,
**kwargs):
if subdirs is None:
subdirs = []
glob_include = assure_list(glob_include)
if not isinstance(glob_include, list):
glob_include = [glob_include]
fdict_list = []
if collection is None:
# Prepare to call ourselves recursively to build up a list of
# fnames in the provided directory and optional subdirectories
if not os.path.isdir(directory):
# This is the end of our recursive line
return fdict_list
for sd in subdirs:
subdir = os.path.join(directory, sd)
sub_fdict_list = fdict_list_collector \
(fdict_list_creator,
subdir,
imagetyp=imagetyp,
glob_include=glob_include,
**kwargs)
for sl in sub_fdict_list:
fdict_list.append(sl)
# After processing our subdirs, process 'directory.'
# Make loop runs at least once
if len(glob_include) == 0:
glob_include = [None]
for gi in glob_include:
# Speed things up considerably by allowing globbing. As
# per comment above, if None passed to glob_include, this
# runs once with None passed to ccdp.ImageFileCollection's
# glob_include
# Avoid anoying warning about empty collection
flist = glob.glob(os.path.join(directory, gi))
# Tricky! Catch the case where AutoFlat is a subdir AND
# matches glob_include
flist = [f for f in flist if os.path.basename(f) not in subdirs]
if len(flist) == 0:
continue
collection = ccdp.ImageFileCollection(directory,
filenames=flist)
# Call ourselves recursively, but using the code below,
# since collection is now defined
gi_fdict_list = fdict_list_collector \
(fdict_list_creator,
collection=collection,
imagetyp=imagetyp,
**kwargs)
for gi in gi_fdict_list:
fdict_list.append(gi)
# Here is the end of our recursive line if directory and
# optional subdirs were specified
return fdict_list
if collection.summary is None:
# We were probably called on a glob_include that yielded no results
return fdict_list
# If we made it here, we have a collection, possibly from calling
# ourselves recursively. Hand off to our fdict_list_creator to do
# all the work
return fdict_list_creator(collection, imagetyp=imagetyp, **kwargs)
def bias_dark_fdict_creator(collection,
imagetyp=None,
dccdt_tolerance=DCCDT_TOLERANCE,
debug=False):
# Create a new collection narrowed to our imagetyp.
directory = collection.location
# We require imagetyp designation and are not polite in its absence
collection = collection.filter(imagetyp=imagetyp)
# --> Oops, this recycles binned biaes I took for a while to just
# waste some time. For now, let them be rejected later on
#
# Reject binned and non-full frame images, such as I took early
# on. Note, this currently doesn't leave the directory with a
# "bad" marker. To do that, just uncomment this code and the non
# full-frame shapes will be caught later. If we really wanted to
# process other modes properly, we would add ccd.shape and binning
# info to the filenames.
#try:
# collection = collection.filter(naxis1=sx694.naxis1,
# naxis2=sx694.naxis2)
#except Exception as e:
# log.error(f'Problem collecting full-frame files of imagetyp {imagetyp} in {directory}: {e}')
# return []
# Guide camera biases would add another layer of complexity with
# no CCD-TEMP
if 'ccd-temp' not in collection.keywords:
log.error(f'CCD-TEMP not found in any {imagetyp} files in {directory}')
return []
# Create a summary table narrowed to our imagetyp
narrow_to_imagetyp = collection.summary
ts = narrow_to_imagetyp['ccd-temp']
# ccd-temp is recorded as a string. Convert it to a number so
# we can sort +/- values properly
ts = np.asarray(ts)
# If some Lodestar guide camera biases snuck in, filter them
# here
tidx = np.flatnonzero(ts != None)
narrow_to_imagetyp = narrow_to_imagetyp[tidx]
ts = ts[tidx]
# Get the sort indices so we can extract fnames in proper order
tsort_idx = np.argsort(ts)
# For ease of use, re-order everything in terms of tsort
ts = ts[tsort_idx]
narrow_to_imagetyp = narrow_to_imagetyp[tsort_idx]
# Spot jumps in t and translate them into slices into ts
dts = ts[1:] - ts[0:-1]
jump = np.flatnonzero(dts > dccdt_tolerance)
# Note, when jump if an empty array, this just returns [0]
tslices = np.append(0, jump+1)
# Whew! This was a tricky one!
# https://stackoverflow.com/questions/509211/understanding-slice-notation
# showed that I needed None and explicit call to slice(), below,
# to be able to generate an array element in tslices that referred
# to the last array element in ts. :-1 is the next to the last
# element because of how slices work. Appending just None to an
# array avoids depreciation complaint from numpy if you try to do
# np.append(0, [jump+1, None])
tslices = np.append(tslices, None)
if debug:
print(ts)
print(dts)
print(tslices)
fdict_list = []
for it in range(len(tslices)-1):
these_ts = ts[slice(tslices[it], tslices[it+1])]
mean_ccdt = np.mean(these_ts)
# Create a new summary Table that inlcudes just these Ts
narrow_to_t = narrow_to_imagetyp[tslices[it]:tslices[it+1]]
exps = narrow_to_t['exptime']
# These are sorted by increasing exposure time
ues = np.unique(exps)
for ue in ues:
exp_idx = np.flatnonzero(exps == ue)
files = narrow_to_t['file'][exp_idx]
full_files = [os.path.join(directory, f) for f in files]
fdict_list.append({'directory': directory,
'CCDT': mean_ccdt,
'EXPTIME': ue,
'fnames': full_files})
return fdict_list
def discard_intermediate(out_fnames, sdir,
calibration_scratch, keep_intermediate):
if not keep_intermediate:
for f in out_fnames:
try:
os.remove(f)
except Exception as e:
# We do not expect this, since we created these with
# our local process
log.error(f'Unexpected! Remove {f} failed: ' + str(e))
# These we expect to fail until all of our other parallel
# processes have finished
try:
os.rmdir(sdir)
except Exception as e:
pass
try:
os.rmdir(calibration_scratch)
except Exception as e:
pass
def bias_combine_one_fdict(fdict,
outdir=CALIBRATION_ROOT,
calibration_scratch=CALIBRATION_SCRATCH,
keep_intermediate=False,
show=False,
min_num_biases=MIN_NUM_BIASES,
camera_description=sx694.camera_description,
gain=sx694.gain,
satlevel=sx694.satlevel,
readnoise=sx694.example_readnoise,
readnoise_tolerance=sx694.readnoise_tolerance,
gain_correct=False,
num_processes=MAX_NUM_PROCESSES,
mem_frac=MAX_MEM_FRAC,
naxis1=sx694.naxis1,
naxis2=sx694.naxis2,
bitpix=MAX_CCDDATA_BITPIX,
**kwargs):
"""Worker that allows the parallelization of calibrations taken at one
temperature, exposure time, filter, etc.
gain_correct : Boolean
Effects unit of stored images. True: units of electron.
False: unit of adu. Default: False
"""
fnames = fdict['fnames']
num_files = len(fnames)
mean_ccdt = fdict['CCDT']
directory = fdict['directory']
tmp = RedCorData.read(fnames[0])
tm = tmp.meta['DATE-OBS']
this_dateb1, _ = tm.split('T')
outbase = os.path.join(outdir, this_dateb1)
bad_fname = outbase + '_ccdT_XXX' + '_bias_combined_bad.fits'
if num_files < min_num_biases:
log.warning(f"Not enough good biases found at CCDT = {mean_ccdt} C in {directory}")
Path(bad_fname).touch()
return False
# Make a scratch directory that is the date of the first file.
# Not as fancy as the biases, but, hey, it is a scratch directory
sdir = os.path.join(calibration_scratch, this_dateb1)
#mem = psutil.virtual_memory()
#num_files_can_fit = \
# int(min(num_files,
# mem.available*mem_frac/ccddata_size))
#num_can_process = min(num_processes, num_files_can_fit)
#print('bias_combine_one_fdict: num_processes = {}, mem_frac = {}, num_files= {}, num_files_can_fit = {}, num_can_process = {}'.format(num_processes, mem_frac, num_files, num_files_can_fit, num_can_process))
# Use CorMultiPipe to subtract the median from each bias and
# create a dict of stats for a pandas dataframe
cmp = CorMultiPipe(num_processes=num_processes,
mem_frac=mem_frac,
naxis1=naxis1,
naxis2=naxis2,
bitpix=bitpix,
outdir=sdir,
create_outdir=True,
overwrite=True,
pre_process_list=[light_image],
post_process_list=[bias_stats, jd_meta])
#combined_base = outbase + '_bias_combined'
pout = cmp.pipeline(fnames, **kwargs)
pout, fnames = prune_pout(pout, fnames)
if len(pout) == 0:
log.warning(f"Not enough good biases {len(pout)} found at CCDT = {mean_ccdt} C in {directory}")
Path(bad_fname).touch()
return False
out_fnames, pipe_meta = zip(*pout)
if len(out_fnames) < min_num_biases:
log.warning(f"Not enough good biases {len(pout)} found at CCDT = {mean_ccdt} C in {directory}")
discard_intermediate(out_fnames, sdir,
calibration_scratch, keep_intermediate)
Path(bad_fname).touch()
return False
stats = [m['bias_stats'] for m in pipe_meta]
jds = [m['jd'] for m in pipe_meta]
df = pd.DataFrame(stats)
tm = Time(np.mean(jds), format='jd')
this_date = tm.fits
this_dateb = this_date.split('T')[0]
if this_dateb != this_dateb1:
log.warning(f"first bias is on {this_dateb1} but average is {this_dateb}")
this_ccdt = '{:.1f}'.format(mean_ccdt)
f = plt.figure(figsize=[8.5, 11])
# In the absence of a formal overscan region, this is the best
# I can do
medians = df['median']
overscan = np.mean(medians)
ax = plt.subplot(6, 1, 1)
plt.title('CCDT = {} C on {}'.format(this_ccdt, this_dateb))
ax.yaxis.set_minor_locator(AutoMinorLocator())
ax.tick_params(which='both', bottom=True, top=True, left=True, right=True)
plt.plot(df['time'], df['ccdt'], 'k.')
plt.ylabel('CCDT (C)')
ax = plt.subplot(6, 1, 2)
ax.yaxis.set_minor_locator(AutoMinorLocator())
ax.tick_params(which='both', bottom=True, top=True, left=True, right=True)
plt.plot(df['time'], df['max'], 'k.')
plt.ylabel('max (adu)')
ax = plt.subplot(6, 1, 3)
ax.yaxis.set_minor_locator(AutoMinorLocator())
ax.tick_params(which='both', bottom=True, top=True, left=True, right=False)
plt.plot(df['time'], df['median'], 'k.')
plt.plot(df['time'], df['mean'], 'r.')
plt.ylabel('median & mean (adu)')
plt.legend(['median', 'mean'])
secax = ax.secondary_yaxis \
('right',
functions=(lambda adu: (adu - overscan)*gain,
lambda e: e/gain + overscan))
secax.set_ylabel('Electrons')
ax=plt.subplot(6, 1, 4)
ax.yaxis.set_minor_locator(AutoMinorLocator())
ax.tick_params(which='both', bottom=True, top=True, left=True, right=True)
plt.plot(df['time'], df['min'], 'k.')
plt.ylabel('min (adu)')
ax=plt.subplot(6, 1, 5)
ax.yaxis.set_minor_locator(AutoMinorLocator())
ax.tick_params(which='both', bottom=True, top=True, left=True, right=True)
plt.plot(df['time'], df['std'], 'k.')
plt.ylabel('std (electron)')
ax=plt.subplot(6, 1, 6)
ax.yaxis.set_minor_locator(AutoMinorLocator())
ax.tick_params(which='both', bottom=True, top=True, left=True, right=True)
plt.plot(df['time'], df['rdnoise'], 'k.')
plt.ylabel('rdnoise (electron)')
plt.gcf().autofmt_xdate()
# At the 0.5 deg level, there seems to be no correlation between T and bias level
#plt.plot(df['ccdt'], df['mean'], 'k.')
#plt.xlabel('ccdt')
#plt.ylabel('mean')
#plt.show()
# Make sure outdir exists
os.makedirs(outdir, exist_ok=True)
outbase = os.path.join(outdir, this_dateb + '_ccdT_' + this_ccdt)
out_fname = outbase + '_bias_combined.fits'
plt.savefig((outbase + '_bias_vs_time.png'), transparent=True)
if show:
plt.show()
plt.close()
# Do a sanity check of readnoise
av_rdnoise = np.mean(df['rdnoise'])
if (np.abs(av_rdnoise/sx694.example_readnoise - 1)
> readnoise_tolerance):
log.warning('High readnoise {}, skipping {}'.format(av_rdnoise, out_fname))
Path(outbase + '_bad.fits').touch()
return False
# Use ccdp.combine since it enables memory management by breaking
# up images to smaller chunks (better than throwing images away).
# --> eventually it would be great to parallelize this primitive,
# since it is very slow. In the mean time I have parallelized all
# the higher steps!
mem = psutil.virtual_memory()
im = \
ccdp.combine(list(out_fnames),
method='average',
sigma_clip=True,
sigma_clip_low_thresh=5,
sigma_clip_high_thresh=5,
sigma_clip_func=np.ma.median,
sigma_clip_dev_func=mad_std,
mem_limit=mem.available*mem_frac)
im.meta = sx694.metadata(im.meta)
if gain_correct:
im = ccdp.gain_correct(im, gain*u.electron/u.adu)
im_gain = 1
else:
im_gain = gain
im = mask_above_key(im, key='SATLEVEL')
im = mask_above_key(im, key='NONLIN')
# Collect image metadata. For some reason, masked pixels
# aren't ignored by std, etc. even though they output masked
# arrays (which is annoying in its own right -- see example
# commented mean). So just create a new array, if needed, and
# only put into it the good pixels
if im.mask is None:
# This is not a copy! But don't worry, we don't change tim,
# just collect info from it
tim = im
else:
# This is a new array with fewer elements. We will collect
# stats and write the original im, below
tim = im.data[im.mask == 0]
std = np.std(tim)*im_gain
med = np.median(tim)*im_gain
#mean = np.asscalar(np.mean(tim).data )
mean = np.mean(tim)*im_gain
tmin = np.min(tim)*im_gain
tmax = np.max(tim)*im_gain
print('std, mean, med, tmin, tmax (electron)')
print(std, mean, med, tmin, tmax)
im.meta['DATE-OBS'] = (this_date, 'Average of DATE-OBS from set of biases')
im.meta['CCD-TEMP'] = (mean_ccdt, 'Average CCD temperature for combined biases')
im.meta['RDNOISE'] = (av_rdnoise, 'Measured readnoise (electron)')
im.meta['STD'] = (std, 'Standard deviation of image (electron)')
im.meta['MEDIAN'] = (med, 'Median of image (electron)')
im.meta['MEAN'] = (mean, 'Mean of image (electron)')
im.meta['MIN'] = (tmin, 'Min of image (electron)')
im.meta['MAX'] = (tmax, 'Max of image (electron)')
im.meta['HIERARCH OVERSCAN_VALUE'] = (overscan, 'Average of raw bias medians (adu)')
im.meta['HIERARCH SUBTRACT_OVERSCAN'] = (True, 'Overscan has been subtracted')
im.meta['NCOMBINE'] = (len(out_fnames), 'Number of biases combined')
# Record each filename
for i, f in enumerate(fnames):
im.meta['FILE{0:02}'.format(i)] = f
add_history(im.meta,
'Combining NCOMBINE biases indicated in FILENN')
add_history(im.meta,
'SATLEVEL and NONLIN apply to pre-overscan subtraction')
# Leave these large for fast calculations downstream and make
# final results that primarily sit on disk in bulk small
#im.data = im.data.astype('float32')
#im.uncertainty.array = im.uncertainty.array.astype('float32')
im.write(out_fname, overwrite=True)
# Always display image in electrons
impl = plt.imshow(im.multiply(im_gain), origin='lower',
cmap=plt.cm.gray,
filternorm=0, interpolation='none',
vmin=med-std, vmax=med+std)
plt.title('CCDT = {} C on {} (electrons)'.format(this_ccdt, this_dateb))
plt.savefig((outbase + '_bias_combined.png'), transparent=True)
if show:
plt.show()
plt.close()
discard_intermediate(out_fnames, sdir,
calibration_scratch, keep_intermediate)
def bias_combine(directory=None,
collection=None,
subdirs=CALIBRATION_SUBDIRS,
glob_include=BIAS_GLOB,
dccdt_tolerance=DCCDT_TOLERANCE,
num_processes=MAX_NUM_PROCESSES,
mem_frac=MAX_MEM_FRAC,
num_calibration_files=NUM_CALIBRATION_FILES,
naxis1=sx694.naxis1,
naxis2=sx694.naxis2,
bitpix=MAX_CCDDATA_BITPIX,
process_expand_factor=COR_PROCESS_EXPAND_FACTOR,
**kwargs):
"""Combine biases in a directory
Parameters
----------
directory : string
Directory in which to find biases. Default: ``None``
collection : ccdp.Collection
Collection of directory in which to find calibration data.
Default: ``None``
subdirs : list
List of subdirectories in which to search for calibration
data. Default: :value:`CALIBRATION_SUBDIRS`
glob_include : list
List of `glob` expressions for calibration filenames
dccdt_tolerance : float
During the creation of master biases and darks files, are
grouped by CCD temperature (ccdt). This is the change in
temperature seen as a function of time that is used to trigger
the creation of a new group
num_processes : int
Number of processes available to this task for
multiprocessing. Default: :value:`MAX_NUM_PROCESSES`
mem_frac : float
Fraction of memory available to this task. Default:
:value:`MAX_MEM_FRAC`
**kwargs passed to bias_combine_one_fdict
"""
fdict_list = \
fdict_list_collector(bias_dark_fdict_creator,
directory=directory,
collection=collection,
subdirs=subdirs,
imagetyp='BIAS',
glob_include=glob_include,
dccdt_tolerance=DCCDT_TOLERANCE)
if collection is not None:
# Make sure 'directory' is a valid variable
directory = collection.location
nfdicts = len(fdict_list)
if nfdicts == 0:
log.debug('No usable biases found in: ' + directory)
return False
one_fdict_size = (num_calibration_files
* naxis1 * naxis2
* bitpix/8
* process_expand_factor)
our_num_processes = num_can_process(nfdicts,
num_processes=num_processes,
mem_frac=mem_frac,
process_size=one_fdict_size)
num_subprocesses = int(num_processes / our_num_processes)
subprocess_mem_frac = mem_frac / our_num_processes
log.debug(f'bias_combine: {directory} nfdicts = {nfdicts}, num_processes = {num_processes}, mem_frac = {mem_frac}, our_num_processes = {our_num_processes}, num_subprocesses = {num_subprocesses}, subprocess_mem_frac = {subprocess_mem_frac}')
wwk = WorkerWithKwargs(bias_combine_one_fdict,
num_processes=num_subprocesses,
mem_frac=subprocess_mem_frac,
**kwargs)
if nfdicts == 1:
for fdict in fdict_list:
wwk.worker(fdict)
else:
with NestablePool(processes=our_num_processes) as p:
p.map(wwk.worker, fdict_list)
def dark_combine_one_fdict(fdict,
outdir=CALIBRATION_ROOT,
calibration_scratch=CALIBRATION_SCRATCH,
keep_intermediate=False,
show=False,
mask_threshold=sx694.dark_mask_threshold,
num_processes=MAX_NUM_PROCESSES,
mem_frac=MAX_MEM_FRAC,
naxis1=sx694.naxis1,
naxis2=sx694.naxis2,
bitpix=MAX_CCDDATA_BITPIX,
**kwargs):
"""Worker that allows the parallelization of calibrations taken at one
temperature, exposure time, filter, etc.
"""
fnames = fdict['fnames']
mean_ccdt = fdict['CCDT']
exptime = fdict['EXPTIME']
directory = fdict['directory']
tmp = RedCorData.read(fnames[0])
tm = tmp.meta['DATE-OBS']
this_dateb1, _ = tm.split('T')
badbase = '{}_ccdT_{:.1f}_exptime_{}s'.format(
this_dateb1, mean_ccdt, exptime)
badbase = os.path.join(outdir, badbase)
bad_fname = badbase + '_dark_combined_bad.fits'
# Make a scratch directory that is the date of the first file.
# Not as fancy as the biases, but, hey, it is a scratch directory
sdir = os.path.join(calibration_scratch, this_dateb1)
cmp = CorMultiPipe(num_processes=num_processes,
mem_frac=mem_frac,
naxis1=naxis1,
naxis2=naxis2,
bitpix=bitpix,
outdir=sdir,
create_outdir=True,
overwrite=True,
pre_process_list=[light_image],
post_process_list=[jd_meta])
pout = cmp.pipeline(fnames, **kwargs)
pout, fnames = prune_pout(pout, fnames)
if len(pout) == 0:
log.warning(f"No good darks found at CCDT = {mean_ccdt} C in {directory}")
Path(bad_fname).touch()
return False
out_fnames, pipe_meta = zip(*pout)
jds = [m['jd'] for m in pipe_meta]
tm = Time(np.mean(jds), format='jd')
this_date = tm.fits
this_dateb = this_date.split('T')[0]
if this_dateb != this_dateb1:
log.warning(f"first dark is on {this_dateb1} but average is {this_dateb}")
this_ccdt = '{:.1f}'.format(mean_ccdt)
outbase = '{}_ccdT_{}_exptime_{}s'.format(
this_dateb, this_ccdt, exptime)
mem = psutil.virtual_memory()
out_fnames = list(out_fnames)
if len(out_fnames) == 1:
print('single out_fname = ', out_fnames)
im = RedCorData.read(out_fnames[0])
else:
im = \
ccdp.combine(out_fnames,
method='average',
sigma_clip=True,
sigma_clip_low_thresh=5,
sigma_clip_high_thresh=5,
sigma_clip_func=np.ma.median,
sigma_clip_dev_func=mad_std,
mem_limit=mem.available*mem_frac)
im = mask_above_key(im, key='SATLEVEL')
im = mask_above_key(im, key='NONLIN')
# Create a mask that blanks out all our pixels that are just
# readnoise. Multiply this in as zeros, not a formal mask,
# otherwise subsequent operations with the dark will mask out
# all but the dark current-affected pixels!
measured_readnoise = im.meta['RDNOISE']
is_dark_mask = im.data > measured_readnoise * mask_threshold
n_dark_pix = np.count_nonzero(is_dark_mask)
im.meta['NDARKPIX'] \
= (n_dark_pix, 'number of pixels with dark current')
if n_dark_pix > 0:
im.data = im.data * is_dark_mask
if im.uncertainty is not None:
im.uncertainty.array = im.uncertainty.array*is_dark_mask
# Collect image metadata. For some reason, masked pixels
# aren't ignored by std, etc. even though they output masked
# arrays (which is annoying in its own right -- see example
# commented mean). So just create a new array, if needed, and
# only put into it the good pixels
bad_mask = is_dark_mask == 0
if im.mask is not None:
bad_mask = bad_mask | im.mask
# Flip bad mask around so we get only the dark pixels in the
# linear range
tim = im.data[bad_mask == 0]
std = np.std(tim)
#std = np.asscalar(std.data )
med = np.median(tim)
#med = np.asscalar(med.data )
mean = np.mean(tim)
#mean = np.asscalar(mean.data )
tmin = np.min(tim)
tmax = np.max(tim)
rdnoise = np.sqrt(np.median((tim[1:] - tim[0:-1])**2))
print('combined dark statistics for ' + outbase)
print('std, rdnoise, mean, med, min, max, n_dark_pix')
print(std, rdnoise, mean, med, tmin, tmax, n_dark_pix)
im.meta['STD'] = (std, 'Standard deviation of image (electron)')
im.meta['MEDIAN'] = (med, 'Median of image (electron)')
im.meta['MEAN'] = (mean, 'Mean of image (electron)')
im.meta['MIN'] = (tmin, 'Min of image (electron)')
im.meta['MAX'] = (tmax, 'Max of image (electron)')
im.meta['NCOMBINE'] = (len(out_fnames), 'Number of darks combined')
add_history(im.meta,
'Combining NCOMBINE biases indicated in FILENN')
im.meta['HIERARCH MASK_THRESHOLD'] \
= (mask_threshold, '*RDNOISE (electron)')
add_history(im.meta,
'Setting pixes below MASK_THRESHOLD to zero; prevents subtraction noise')
# Record each filename
for i, f in enumerate(fnames):
im.meta['FILE{0:02}'.format(i)] = f
# Prepare to write
if not os.path.exists(outdir):
os.mkdir(outdir)
outbase = os.path.join(outdir, outbase)
out_fname = outbase + '_dark_combined.fits'
# Leave these large for fast calculations downstream and make
# final results that primarily sit on disk in bulk small
#im.data = im.data.astype('float32')
#im.uncertainty.array = im.uncertainty.array.astype('float32')
im.write(out_fname, overwrite=True)
if show:
impl = plt.imshow(im, origin='lower', cmap=plt.cm.gray,
filternorm=0, interpolation='none',
vmin=med-std, vmax=med+std)
plt.show()
plt.close()
discard_intermediate(out_fnames, sdir,
calibration_scratch, keep_intermediate)
def dark_combine(directory=None,
collection=None,
subdirs=CALIBRATION_SUBDIRS,
glob_include=DARK_GLOB,
dccdt_tolerance=DCCDT_TOLERANCE,
num_processes=MAX_NUM_PROCESSES,
mem_frac=MAX_MEM_FRAC,
num_calibration_files=NUM_CALIBRATION_FILES,
naxis1=sx694.naxis1,
naxis2=sx694.naxis2,
bitpix=MAX_CCDDATA_BITPIX,
process_expand_factor=COR_PROCESS_EXPAND_FACTOR,
**kwargs):
fdict_list = \
fdict_list_collector(bias_dark_fdict_creator,
directory=directory,
collection=collection,
subdirs=subdirs,
imagetyp='DARK',
glob_include=glob_include,
dccdt_tolerance=dccdt_tolerance)
if collection is not None:
# Make sure 'directory' is a valid variable
directory = collection.location
nfdicts = len(fdict_list)
if len(fdict_list) == 0:
log.debug('No usable darks found in: ' + directory)
return False
one_fdict_size = (num_calibration_files
* naxis1 * naxis2
* bitpix/8
* process_expand_factor)
our_num_processes = num_can_process(nfdicts,
num_processes=num_processes,
mem_frac=mem_frac,
process_size=one_fdict_size)
num_subprocesses = int(num_processes / our_num_processes)
subprocess_mem_frac = mem_frac / our_num_processes
log.debug('dark_combine: {} num_processes = {}, mem_frac = {}, our_num_processes = {}, num_subprocesses = {}, subprocess_mem_frac = {}'.format(directory, num_processes, mem_frac, our_num_processes, num_subprocesses, subprocess_mem_frac))
wwk = WorkerWithKwargs(dark_combine_one_fdict,
num_processes=num_subprocesses,
mem_frac=subprocess_mem_frac,
**kwargs)
if nfdicts == 1:
for fdict in fdict_list:
wwk.worker(fdict)
else:
with NestablePool(processes=our_num_processes) as p:
p.map(wwk.worker, fdict_list)
def flat_fdict_creator(collection,
imagetyp=None):
# Create a new collection narrowed to our imagetyp
directory = collection.location
collection = collection.filter(imagetyp=imagetyp)
# --> Oops, this recycles wrong-sized flats which are better
# rejected later
#try:
# collection = collection.filter(naxis1=sx694.naxis1,
# naxis2=sx694.naxis2)
#except Exception as e:
# log.error(f'Problem collecting full-frame files of imagetyp {imagetyp} in {directory}: {e}')
# return []
if 'filter' not in collection.keywords:
log.error(f'filter not found in any {imagetyp} files in {directory}')
return []
# Keep in mind filters will have our old names
ofilters = collection.values('filter', unique=True)
fdict_list = []
for ofilt in ofilters:
# The regexp_match=True is necessary for the H2O+ for some
# reason. re.escape is used for the [] stuff in some of the
# older filters, though I am not positive if it is necessary.
fcollection = collection.filter(filter=re.escape(ofilt),
regex_match=True)
fnames = fcollection.files_filtered(include_path=True)
date_obss = fcollection.values('date-obs')
# This is where we associated the old file names with the new
# filter designations
filt = get_filt_name(ofilt, date_obss[0])
fdict_list.append({'directory': directory,
'filter': filt,
'ofilter': ofilt,
'fnames': fnames})
return fdict_list
def flat_process(ccd, bmp_meta=None,
init_threshold=100, # units of readnoise
nd_edge_expand=ND_EDGE_EXPAND,
in_name=None,
**kwargs):
if ccd.meta.get('flatdiv') is not None:
raise ValueError('Trying to reprocess a processed flat')
# Use basic patch medians to spot pathological cases
mdp, mlp = im_med_min_max(ccd)
if mlp < 1000:
log.warning(f'flat median of {mlp} {ccd.unit} too low {in_name}')
return None
if mlp > ccd.meta['NONLIN']:
log.warning(f'flat median of {mlp} {ccd.unit} too high {in_name}')
return None
# Use photutils.Background2D to smooth each flat and get a
# good maximum value. Mask edges and ND filter so as to
# increase quality of background map
mask = np.zeros(ccd.shape, bool)
# Return a copy of ccd with the edge_mask property adjusted. Do
# it this way to keep ccd's ND filt parameters intact
emccd = RedCorData(ccd, edge_mask=-nd_edge_expand)
try:
mask[emccd.ND_coords] = True
except Exception as e:
# We should have caught all nasty cases above
log.error(f'ND_coords gave error: {e} for {in_name}')
return None
del emccd
rdnoise = ccd.meta['RDNOISE']
mask[ccd.data < rdnoise * init_threshold] = True
ccd.mask = mask
bkg_estimator = MedianBackground()
b = Background2D(ccd, 20, mask=mask, filter_size=5,
bkg_estimator=bkg_estimator)
max_flat = np.max(b.background)
if max_flat > ccd.meta['NONLIN']*ccd.unit:
log.debug(f'flat max value of {max_flat.value} {max_flat.unit} too bright: {in_name}')
return None
ccd.mask = None
ccd = ccd.divide(max_flat, handle_meta='first_found')
# --> This will get better if Card units are implemented
ccd.meta['FLATDIV'] = (max_flat.value, f'Normalization value (smoothed max) ({max_flat.unit})')
# Get ready to capture the mean DATE-OBS
tm = Time(ccd.meta['DATE-OBS'], format='fits')
if bmp_meta is not None:
bmp_meta['jd'] = tm.jd
return ccd
def flat_combine_one_fdict(fdict,
outdir=CALIBRATION_ROOT,
calibration_scratch=CALIBRATION_SCRATCH,
keep_intermediate=False,
min_num_flats=MIN_NUM_FLATS,
num_processes=MAX_NUM_PROCESSES,
mem_frac=MAX_MEM_FRAC,
naxis1=sx694.naxis1,
naxis2=sx694.naxis2,
bitpix=MAX_CCDDATA_BITPIX,
show=False,
nd_edge_expand=ND_EDGE_EXPAND,
flat_cut=FLAT_CUT,
**kwargs):
fnames = fdict['fnames']
num_files = len(fnames)
this_filter = fdict['filter']
directory = fdict['directory']
tmp = RedCorData.read(fnames[0])
tm = tmp.meta['DATE-OBS']
this_dateb1, _ = tm.split('T')
outbase = os.path.join(outdir, this_dateb1)
bad_fname = outbase + '_' + this_filter + '_flat_bad.fits'
if len(fnames) < min_num_flats:
log.warning(f"Not enough good flats found for filter {this_filter} in {directory}")
Path(bad_fname).touch()
return False
# Make a scratch directory that is the date of the first file.
# Not as fancy as the biases, but, hey, it is a scratch directory
tmp = RedCorData.read(fnames[0])
tm = tmp.meta['DATE-OBS']
sdir = os.path.join(calibration_scratch, this_dateb1)
cmp = CorMultiPipe(num_processes=num_processes,
mem_frac=mem_frac,
naxis1=naxis1,
naxis2=naxis2,
bitpix=bitpix,
outdir=sdir,
create_outdir=True,
overwrite=True,
post_process_list=[flat_process, jd_meta],
**kwargs)
pout = cmp.pipeline(fnames, **kwargs)
pout, fnames = prune_pout(pout, fnames)
if len(pout) == 0:
log.warning(f"Not enough good flats found for filter {this_filter} in {directory}")
Path(bad_fname).touch()
return False
out_fnames, pipe_meta = zip(*pout)
if len(out_fnames) < min_num_flats:
log.warning(f"Not enough good flats found for filter {this_filter} in {directory}")
discard_intermediate(out_fnames, sdir,
calibration_scratch, keep_intermediate)
Path(bad_fname).touch()
return False
jds = [m['jd'] for m in pipe_meta]
# Combine our flats
mem = psutil.virtual_memory()
#print(f'flat_combine_one_filt: mem_frac {mem_frac}; num_processes {num_processes}')
#print(f'flat_combine_one_filt: mem_limit {mem.available*mem_frac/2**20}')
im = \
ccdp.combine(list(out_fnames),
method='average',
sigma_clip=True,
sigma_clip_low_thresh=5,
sigma_clip_high_thresh=5,
sigma_clip_func=np.ma.median,
sigma_clip_dev_func=mad_std,
mem_limit=mem.available*mem_frac)
im.meta['NCOMBINE'] = (len(out_fnames), 'Number of flats combined')
# Record each filename
for i, f in enumerate(fnames):
im.meta['FILE{0:02}'.format(i)] = f
add_history(im.meta,
'Combining NCOMBINE biases indicated in FILENN')
# Interpolate over our ND filter
#print(f'flat_combine_one_filt pre CorObsData: mem available: {mem.available/2**20}')
emccd = RedCorData(im, edge_mask=-nd_edge_expand)
good_mask = np.ones(im.shape, bool)
good_mask[emccd.ND_coords] = False
points = np.nonzero(good_mask)
values = im[points]
xi = emccd.ND_coords
log.debug(f'flat_combine_one_filt post CorObsData: mem available: {mem.available/2**20}')
# Linear behaved much better
nd_replacement = interpolate.griddata(points,
values,
xi,
method='linear')
#method='cubic')
log.debug(f'flat_combine_one_filt post interpolate.griddata mem available: {mem.available/2**20}')
im.data[xi] = nd_replacement
# Do one last smoothing and renormalization
bkg_estimator = MedianBackground()
b = Background2D(im, 20, mask=(im.data<flat_cut), filter_size=5,
bkg_estimator=bkg_estimator)
max_flat = np.max(b.background)
log.debug(f'flat_combine_one_filt post Background2D mem available: {mem.available/2**20}')
im = im.divide(max_flat, handle_meta='first_found')
im.mask = im.data < flat_cut
im.meta['FLAT_CUT'] = (flat_cut, 'Value below which flat is masked')
# Prepare to write
tm = Time(np.mean(jds), format='jd')
this_date = tm.fits
this_dateb = this_date.split('T')[0]
if this_dateb != this_dateb1:
log.warning(f"first flat is on {this_dateb1} but average is {this_dateb}")
outbase = '{}_{}'.format(this_dateb, this_filter)
if not os.path.exists(outdir):
os.mkdir(outdir)
outbase = os.path.join(outdir, outbase)
out_fname = outbase + '_flat.fits'
im.write(out_fname, overwrite=True)
if show:
impl = plt.imshow(im, origin='upper', cmap=plt.cm.gray)
plt.show()
plt.close()
discard_intermediate(out_fnames, sdir,
calibration_scratch, keep_intermediate)
def flat_combine(directory=None,
collection=None,
subdirs=CALIBRATION_SUBDIRS,
glob_include=FLAT_GLOB,
num_processes=MAX_NUM_PROCESSES,
mem_frac=MAX_MEM_FRAC,
num_calibration_files=NUM_CALIBRATION_FILES,
naxis1=sx694.naxis1,
naxis2=sx694.naxis2,
bitpix=64, # uncertainty and mask not used in griddata
griddata_expand_factor=GRIDDATA_EXPAND_FACTOR,
**kwargs):
print(f'flat_combine directory: {directory}')
fdict_list = \
fdict_list_collector(flat_fdict_creator,
directory=directory,
collection=collection,
subdirs=subdirs,
imagetyp='FLAT',
glob_include=glob_include)
if collection is not None:
# Make sure 'directory' is a valid variable
directory = collection.location
nfdicts = len(fdict_list)
if nfdicts == 0:
log.debug('No usable flats found in: ' + directory)
return False
one_filt_size = (num_calibration_files
* naxis1 * naxis2
* bitpix/8
* griddata_expand_factor)
our_num_processes = num_can_process(nfdicts,
num_processes=num_processes,
mem_frac=mem_frac,
process_size=one_filt_size,
error_if_zero=False)
our_num_processes = max(1, our_num_processes)
# Combining files is the slow part, so we want the maximum of
# processes doing that in parallel
log.debug(f'flat_combine: {directory}, nfdicts = {nfdicts}, our_num_processes = {our_num_processes}')
# Number of sub-processes in each process we will spawn
num_subprocesses = int(num_processes / our_num_processes)
# Similarly, the memory fraction for each process we will spawn
subprocess_mem_frac = mem_frac / our_num_processes
log.debug('flat_combine: {} num_processes = {}, mem_frac = {}, our_num_processes = {}, num_subprocesses = {}, subprocess_mem_frac = {}'.format(directory, num_processes, mem_frac, our_num_processes, num_subprocesses, subprocess_mem_frac))
# Combining files is the slow part, so we want the maximum of
# processes doing that in parallel
log.debug(f'flat_combine: {directory}, nfdicts = {nfdicts}, our_num_processes = {our_num_processes}')
# Number of sub-processes in each process we will spawn
num_subprocesses = int(num_processes / our_num_processes)
# Similarly, the memory fraction for each process we will spawn
subprocess_mem_frac = mem_frac / our_num_processes
log.debug('flat_combine: {} num_processes = {}, mem_frac = {}, our_num_processes = {}, num_subprocesses = {}, subprocess_mem_frac = {}'.format(directory, num_processes, mem_frac, our_num_processes, num_subprocesses, subprocess_mem_frac))
wwk = WorkerWithKwargs(flat_combine_one_fdict,
num_processes=num_subprocesses,
mem_frac=subprocess_mem_frac,
**kwargs)
if nfdicts == 1:
for fdict in fdict_list:
wwk.worker(fdict)
else:
with NestablePool(processes=our_num_processes) as p:
p.map(wwk.worker, fdict_list)
######### Calibration object
def dir_has_calibration(directory, glob_include, subdirs=None):
"""Returns True if directory has calibration files matching pattern(s)
in glob_include. Optionally checks subdirs"""
if not os.path.isdir(directory):
# This is the end of our recursive line
return False
if subdirs is None:
subdirs = []
for sd in subdirs:
subdir = os.path.join(directory, sd)
if dir_has_calibration(subdir, glob_include):
return True
# If we made it here, our subdirs had no calibration files or we
# have been called recursively and are in one
for gi in glob_include:
flist = glob.glob(os.path.join(directory, gi))
if len(flist) > 0:
return True
return False
class Lockfile():
def __init__(self,
fname=None,
check_every=10):
assert fname is not None
self._fname = fname
self.check_every = check_every
@property
def is_set(self):
return os.path.isfile(self._fname)
# --> could add a timeout and a user-specified optional message
def wait(self):
if not self.is_set:
return
while self.is_set:
with open(self._fname, "r") as f:
log.error(f'lockfile {self._fname} detected for {f.read()}')
time.sleep(self.check_every)
log.error(f'(error cleared) lockfile {self._fname} removed')
def create(self):
self.wait()
with open(self._fname, "w") as f:
f.write('PID: ' + str(os.getpid()))
def clear(self):
os.remove(self._fname)
class Calibration():
"""Class for conducting CCD calibrations"""
def __init__(self,
reduce=False,
raw_data_root=RAW_DATA_ROOT,
calibration_root=CALIBRATION_ROOT,
subdirs=CALIBRATION_SUBDIRS,
keep_intermediate=False,
ccdt_tolerance=CCDT_TOLERANCE,
dark_exp_margin=DARK_EXP_MARGIN,
start_date=None,
stop_date=None,
gain_correct=True, # This is gain correcting the bias and dark
num_processes=MAX_NUM_PROCESSES,
mem_frac=MAX_MEM_FRAC,
num_ccdts=NUM_CCDTS,
num_dark_exptimes=NUM_DARK_EXPTIMES,
num_filts=NUM_FILTS,
num_calibration_files=NUM_CALIBRATION_FILES,
naxis1=sx694.naxis1,
naxis2=sx694.naxis2,
bitpix=MAX_CCDDATA_BITPIX,
process_expand_factor=COR_PROCESS_EXPAND_FACTOR,
griddata_expand_factor=GRIDDATA_EXPAND_FACTOR,
bias_glob=BIAS_GLOB,
dark_glob=DARK_GLOB,
flat_glob=FLAT_GLOB,
flat_cut=FLAT_CUT,
nd_edge_expand=ND_EDGE_EXPAND,
lockfile=LOCKFILE):
self._raw_data_root = raw_data_root
self._calibration_root = calibration_root
self._subdirs = subdirs
self.keep_intermediate = keep_intermediate
self._ccdt_tolerance = ccdt_tolerance
self._dark_exp_margin=dark_exp_margin
self._bias_table = None
self._dark_table = None
self._flat_table = None
# gain_correct is set only in the biases and propagated
# through the rest of the pipeline in cor_process
self._gain_correct = gain_correct
self._bias_glob = assure_list(bias_glob)
self._dark_glob = assure_list(dark_glob)
self._flat_glob = assure_list(flat_glob)
self._lockfile = lockfile
self.flat_cut = flat_cut
self.nd_edge_expand = nd_edge_expand
self.num_processes = num_processes
self.mem_frac = mem_frac
self.num_ccdts = num_ccdts
self.num_dark_exptimes = num_dark_exptimes
self.num_filts = num_filts
self.num_calibration_files = num_calibration_files
self.naxis1 = naxis1
self.naxis2 = naxis2
self.bitpix = bitpix
self.process_expand_factor = process_expand_factor
self.griddata_expand_factor = griddata_expand_factor
if start_date is None:
self._start_date = datetime.datetime(1,1,1)
else:
self._start_date = datetime.datetime.strptime(start_date,
"%Y-%m-%d")
if stop_date is None:
# Make stop time tomorrow in case we are analyzing on the
# UT boundary
self._stop_date = datetime.datetime.today() + datetime.timedelta(days=1)
else:
self._stop_date = datetime.datetime.strptime(stop_date, "%Y-%m-%d")
assert self._start_date <= self._stop_date
# These need to be on a per-instantiation basis, since they
# depend on our particular start-stop range. These are also
# important, since we don't take calibrations every night. The
# cost of checking for new reductions is relatively low, since
# it is mostly a directory listing exercise
self._bias_dirs_dates_checked = None
self._dark_dirs_dates_checked = None
self._flat_dirs_dates_checked = None
if reduce:
self.reduce()
@property
def gain_correct(self):
return self._gain_correct
def dirs_dates_to_reduce(self, table_creator,
glob_include,
dirs_dates_checked=None,
subdirs=None):
to_check = get_dirs_dates(self._raw_data_root,
start=self._start_date,
stop=self._stop_date)
# See if we have reduced/checked any/everything in this
# instantiation. This is not as efficient as it could be
# since we have sorted lists, but we don't have many elements,
# so there is not much point in getting fancier
if dirs_dates_checked is not None:
to_check = [dt for dt in to_check
if not dt in dirs_dates_checked]
if len(to_check) == 0:
return []
# Take any reductions on disk out of the list. Note, we check
# for date only, since we have lost the original directory
# information once reduced
tbl = table_creator(autoreduce=False, rescan=True)
if tbl is not None:
reduced_ts = [tm.to_datetime() for tm in tbl['dates']]
# Remove duplicates
reduced_ts = list(set(reduced_ts))
to_check = [dt for dt in to_check
if not dt[1] in reduced_ts]
if len(to_check) == 0:
return []
to_reduce = [dt for dt in to_check
if dir_has_calibration(dt[0],
glob_include,
subdirs=subdirs)]
# Remove duplicates
return sorted(list(set(to_reduce)))
def reduce_bias(self):
dirs_dates = \
self.dirs_dates_to_reduce(self.bias_table_create,
self._bias_glob,
self._bias_dirs_dates_checked,
self._subdirs)
ndirs_dates = len(dirs_dates)
if ndirs_dates == 0:
return
# If we made it here, we have some real work to do
# Set a simple lockfile so we don't have multiple processes reducing
lock = Lockfile(self._lockfile)
lock.create()
one_fdict_size = (self.num_calibration_files
* self.naxis1 * self.naxis2
* self.bitpix/8
* self.process_expand_factor)
ncp = num_can_process(self.num_ccdts,
num_processes=self.num_processes,
mem_frac=self.mem_frac,
process_size=self.num_ccdts * one_fdict_size,
error_if_zero=False)
our_num_processes = max(1, ncp)
num_subprocesses = int(self.num_processes / our_num_processes)
subprocess_mem_frac = self.mem_frac / our_num_processes
log.debug(f'Calibration.reduce_bias: ndirs_dates = {ndirs_dates}')
log.debug('Calibration.reduce_bias: self.num_processes = {}, our_num_processes = {}, num_subprocesses = {}, subprocess_mem_frac = {}'.format(self.num_processes, our_num_processes, num_subprocesses, subprocess_mem_frac))
#return
wwk = WorkerWithKwargs(bias_combine,
subdirs=self._subdirs,
glob_include=self._bias_glob,
outdir=self._calibration_root,
auto=True, # A little dangerous, but just one place for changes
gain_correct=self._gain_correct,
num_processes=self.num_processes,
naxis1=self.naxis1,
naxis2=self.naxis2,
process_expand_factor=self.process_expand_factor,
num_calibration_files=self.num_calibration_files,
mem_frac=self.mem_frac,
keep_intermediate=self.keep_intermediate)
dirs = [dt[0] for dt in dirs_dates]
if our_num_processes == 1:
for d in dirs:
wwk.worker(d)
else:
with NestablePool(processes=our_num_processes) as p:
p.map(wwk.worker, dirs)
self.bias_table_create(rescan=True, autoreduce=False)
# This could potentially get set in dirs_dates_to_reduce, but
# it seems better to set it after we have actually done the work
all_dirs_dates = get_dirs_dates(self._raw_data_root,
start=self._start_date,
stop=self._stop_date)
self._bias_dirs_dates_checked = all_dirs_dates
lock.clear()
def reduce_dark(self):
dirs_dates = \
self.dirs_dates_to_reduce(self.dark_table_create,
self._dark_glob,
self._dark_dirs_dates_checked,
self._subdirs)
ndirs_dates = len(dirs_dates)
if ndirs_dates == 0:
return
# If we made it here, we have some real work to do
# Set a simple lockfile so we don't have multiple processes reducing
lock = Lockfile(self._lockfile)
lock.create()
one_fdict_size = (self.num_calibration_files
* self.naxis1 * self.naxis2
* self.bitpix/8
* self.process_expand_factor)
ncp = num_can_process(self.num_ccdts,
num_processes=self.num_processes,
mem_frac=self.mem_frac,
process_size=self.num_ccdts * one_fdict_size,
error_if_zero=False)
our_num_processes = max(1, ncp)
num_subprocesses = int(self.num_processes / our_num_processes)
subprocess_mem_frac = self.mem_frac / our_num_processes
log.debug(f'Calibration.reduce_dark: ndirs_dates = {ndirs_dates}')
log.debug('Calibration.reduce_dark: self.num_processes = {}, our_num_processes = {}, num_subprocesses = {}, subprocess_mem_frac = {}'.format(self.num_processes, our_num_processes, num_subprocesses, subprocess_mem_frac))
#return
wwk = WorkerWithKwargs(dark_combine,
subdirs=self._subdirs,
glob_include=self._dark_glob,
outdir=self._calibration_root,
calibration=self,
auto=True, # A little dangerous, but just one place for changes
num_processes=self.num_processes,
naxis1=self.naxis1,
naxis2=self.naxis2,
process_expand_factor=self.process_expand_factor,
num_calibration_files=self.num_calibration_files,
mem_frac=self.mem_frac,
keep_intermediate=self.keep_intermediate)
dirs = [dt[0] for dt in dirs_dates]
if our_num_processes == 1:
for d in dirs:
wwk.worker(d)
else:
with NestablePool(processes=our_num_processes) as p:
p.map(wwk.worker, dirs)
self.dark_table_create(rescan=True, autoreduce=False)
# This could potentially get set in dirs_dates_to_reduce, but
# it seems better to set it after we have actually done the work
all_dirs_dates = get_dirs_dates(self._raw_data_root,
start=self._start_date,
stop=self._stop_date)
self._dark_dirs_dates_checked = all_dirs_dates
lock.clear()
def reduce_flat(self):
dirs_dates = \
self.dirs_dates_to_reduce(self.flat_table_create,
self._flat_glob,
self._flat_dirs_dates_checked,
self._subdirs)
ndirs_dates = len(dirs_dates)
if ndirs_dates == 0:
return
# If we made it here, we have some real work to do
# Set a simple lockfile so we don't have multiple processes reducing
lock = Lockfile(self._lockfile)
lock.create()
one_filt_size = (self.num_calibration_files
* self.naxis1 * self.naxis2
* self.bitpix/8
* self.griddata_expand_factor)
# Our sub-process can divide and conquer if necessary
ncp = num_can_process(self.num_filts,
num_processes=self.num_processes,
mem_frac=self.mem_frac,
process_size=self.num_filts * one_filt_size,
error_if_zero=False)
our_num_processes = max(1, ncp)
num_subprocesses = int(self.num_processes / our_num_processes)
subprocess_mem_frac = self.mem_frac / our_num_processes
log.debug(f'Calibration.reduce_flat: ndirs_dates = {ndirs_dates}')
log.debug('Calibration.reduce_flat: self.num_processes = {}, our_num_processes = {}, num_subprocesses = {}, subprocess_mem_frac = {}'.format(self.num_processes, our_num_processes, num_subprocesses, subprocess_mem_frac))
wwk = WorkerWithKwargs(flat_combine,
subdirs=self._subdirs,
glob_include=self._flat_glob,
outdir=self._calibration_root,
calibration=self,
auto=True, # A little dangerous, but just one place for changes
num_processes=self.num_processes,
mem_frac=self.mem_frac,
num_calibration_files=self.num_calibration_files,
naxis1=self.naxis1,
naxis2=self.naxis2,
griddata_expand_factor=self.griddata_expand_factor,
keep_intermediate=self.keep_intermediate,
flat_cut=self.flat_cut,
nd_edge_expand=self.nd_edge_expand)
dirs = [dt[0] for dt in dirs_dates]
if our_num_processes == 1:
for d in dirs:
wwk.worker(d)
else:
with NestablePool(processes=our_num_processes) as p:
p.map(wwk.worker, dirs)
self.flat_table_create(rescan=True, autoreduce=False)
# This could potentially get set in dirs_dates_to_reduce, but
# it seems better to set it after we have actually done the work
all_dirs_dates = get_dirs_dates(self._raw_data_root,
start=self._start_date,
stop=self._stop_date)
self._flat_dirs_dates_checked = all_dirs_dates
lock.clear()
def reduce(self):
self.reduce_bias()
self.reduce_dark()
self.reduce_flat()
def bias_table_create(self,
rescan=False, # Set to True after new biases have been added
autoreduce=True): # Set to False to break recursion
# when first looking for
"""Create table of bias info from calibration directory"""
if autoreduce:
# By default always do auto reduction to catch the latest downloads
self.reduce_bias()
return self._bias_table
# If we made it here, autoreduce is guaranteed to be false
if rescan:
self._bias_table = None
if self._bias_table is not None:
return self._bias_table
if not os.path.isdir(self._calibration_root):
# We haven't reduced any calibration images yet and we
# don't want to automatically do so (just yet)
return None
fnames = glob.glob(os.path.join(self._calibration_root,
'*_bias_combined*'))
fnames = [f for f in fnames if '.fits' in f]
if len(fnames) == 0:
# Catch the not autoreduce case when we still have no files
return None
# If we made it here, we have files to populate our table
dates = []
ccdts = []
bads = []
for fname in fnames:
bfname = os.path.basename(fname)
sfname = bfname.split('_')
date = Time(sfname[0], format='fits')
bad = 'bad' in bfname
if bad:
ccdt = np.NAN
else:
ccdt = float(sfname[2])
dates.append(date)
ccdts.append(ccdt)
bads.append(bad)
self._bias_table = QTable([fnames, dates, ccdts, bads],
names=('fnames', 'dates', 'ccdts', 'bad'),
meta={'name': 'Bias information table'})
return self._bias_table
def dark_table_create(self,
rescan=False, # Set to True after new biases have been added
autoreduce=True): # Set to False to break recursion
# when first looking for
"""Create table of bias info from calibration directory"""
if autoreduce:
# By default always do auto reduction to catch the latest downloads
self.reduce_dark()
return self._dark_table
# If we made it here, autoreduce is guaranteed to be false
if rescan:
self._dark_table = None
if self._dark_table is not None:
return self._dark_table
if not os.path.isdir(self._calibration_root):
# We haven't reduced any calibration images yet and we
# don't want to automatically do so (just yet)
return None
fnames = glob.glob(os.path.join(self._calibration_root,
'*_dark_combined*'))
fnames = [f for f in fnames if '.fits' in f]
if len(fnames) == 0:
# Catch the not autoreduce case when we still have no files
return None
# If we made it here, we have files to populate our table
dates = []
ccdts = []
exptimes = []
bads = []
for fname in fnames:
bfname = os.path.basename(fname)
sfname = bfname.split('_')
date = Time(sfname[0], format='fits')
bad = 'bad' in bfname
if bad:
ccdt = np.NAN
exptime = np.NAN
else:
ccdt = float(sfname[2])
exptime = sfname[4]
exptime = float(exptime[:-1])
dates.append(date)
ccdts.append(ccdt)
exptimes.append(exptime)
bads.append(bad)
self._dark_table = \
QTable([fnames, dates, ccdts, exptimes, bads],
names=('fnames', 'dates', 'ccdts', 'exptimes', 'bad'),
meta={'name': 'Dark information table'})
return self._dark_table
def flat_table_create(self,
rescan=False, # Set to True after new biases have been added
autoreduce=True): # Set to False to break recursion
# when first looking for
"""Create table of bias info from calibration directory"""
if autoreduce:
# By default always do auto reduction to catch the latest downloads
self.reduce_flat()
return self._flat_table
# If we made it here, autoreduce is guaranteed to be false
if rescan:
self._flat_table = None
if self._flat_table is not None:
return self._flat_table
if not os.path.isdir(self._calibration_root):
# We haven't reduced any calibration images yet and we
# don't want to automatically do so (just yet)
return None
fnames = glob.glob(os.path.join(self._calibration_root,
'*_flat*'))
fnames = [f for f in fnames if '.fits' in f]
if len(fnames) == 0:
# Catch the not autoreduce case when we still have no files
return None
# If we made it here, we have files to populate our table
dates = []
filts = []
bads = []
for fname in fnames:
bfname = os.path.basename(fname)
sfname = bfname.split('_', 1)
date = Time(sfname[0], format='fits')
bad = 'bad' in bfname
filttail = sfname[1]
filt_tail = filttail.split('_flat')
filt = filt_tail[0]
dates.append(date)
filts.append(filt)
bads.append(bad)
self._flat_table = \
QTable([fnames, dates, filts, bads],
names=('fnames', 'dates', 'filters', 'bad'),
meta={'name': 'Flat information table'})
return self._flat_table
@property
def bias_table(self):
return self.bias_table_create()
@property
def dark_table(self):
return self.dark_table_create()
@property
def flat_table(self):
return self.flat_table_create()
def best_bias(self, fname_ccd_or_hdr, ccdt_tolerance=None):
"""Returns filename of best-matched bias for a file"""
if ccdt_tolerance is None:
ccdt_tolerance = self._ccdt_tolerance
if isinstance(fname_ccd_or_hdr, Header):
hdr = fname_ccd_or_hdr
elif isinstance(fname_ccd_or_hdr, CCDData):
hdr = fname_ccd_or_hdr.meta
elif isinstance(fname_ccd_or_hdr, str):
ccd = RedCorData.read(fname_ccd_or_hdr)
hdr = ccd.meta
tm = Time(hdr['DATE-OBS'], format='fits')
ccdt = hdr['CCD-TEMP']
# This is the entry point for reduction
bad = self.bias_table['bad']
dccdts = ccdt - self.bias_table['ccdts']
within_tol = np.abs(dccdts) < ccdt_tolerance
good = np.logical_and(within_tol, ~bad)
good_ccdt_idx = np.flatnonzero(good)
if len(good_ccdt_idx) == 0:
log.warning(f'No biases found within {ccdt_tolerance} C, broadening by factor of 2')
return self.best_bias(hdr, ccdt_tolerance=ccdt_tolerance*2)
ddates = tm - self.bias_table['dates']
best_ccdt_date_idx = np.argmin(np.abs(ddates[good_ccdt_idx]))
# unwrap
best_ccdt_date_idx = good_ccdt_idx[best_ccdt_date_idx]
return self._bias_table['fnames'][best_ccdt_date_idx]
def best_dark(self,
fname_ccd_or_hdr,
ccdt_tolerance=None,
dark_exp_margin=None):
"""Returns filename of best-matched dark for a file"""
if ccdt_tolerance is None:
ccdt_tolerance = self._ccdt_tolerance
if dark_exp_margin is None:
dark_exp_margin = self._dark_exp_margin
if isinstance(fname_ccd_or_hdr, Header):
hdr = fname_ccd_or_hdr
elif isinstance(fname_ccd_or_hdr, CCDData):
hdr = fname_ccd_or_hdr.meta
elif isinstance(fname_ccd_or_hdr, str):
ccd = RedCorData.read(fname_ccd_or_hdr)
hdr = ccd.meta
tm = Time(hdr['DATE-OBS'], format='fits')
ccdt = hdr['CCD-TEMP']
exptime = hdr['EXPTIME']
# This is the entry point for reduction
bad = self.dark_table['bad']
dccdts = ccdt - self.dark_table['ccdts']
within_tol = np.abs(dccdts) < ccdt_tolerance
good = np.logical_and(within_tol, ~bad)
good_ccdt_idx = np.flatnonzero(good)
if len(good_ccdt_idx) == 0:
log.warning(f'No darks found within {ccdt_tolerance} C, broadening by factor of 2')
return self.best_dark(hdr, ccdt_tolerance=ccdt_tolerance*2)
# Find the longest exposure time in our collection of darks
# that matches our exposure. Prefer longer exposure times by
# dark_exp_margin
dexptimes = exptime - self.dark_table['exptimes']
good_exptime_idx = np.flatnonzero(
abs(dexptimes[good_ccdt_idx]) < dark_exp_margin)
if len(good_exptime_idx) == 0:
log.warning(f'No darks found with exptimes within {dark_exp_margin} s, broadening margin by factor of 2')
return self.best_dark(hdr,
ccdt_tolerance=ccdt_tolerance,
dark_exp_margin=dark_exp_margin*2)
# unwrap
good_exptime_idx = good_ccdt_idx[good_exptime_idx]
ddates = tm - self.dark_table['dates']
best_exptime_date_idx = np.argmin(np.abs(ddates[good_exptime_idx]))
# unwrap
best_exptime_date_idx = good_exptime_idx[best_exptime_date_idx]
return self._dark_table['fnames'][best_exptime_date_idx]
# --> TODO: possibly put in the number of darks as a factor as
# --> well, weighted by difference in time
def best_flat(self, fname_ccd_or_hdr):
"""Returns filename of best-matched flat for a file"""
if isinstance(fname_ccd_or_hdr, Header):
hdr = fname_ccd_or_hdr
elif isinstance(fname_ccd_or_hdr, CCDData):
hdr = fname_ccd_or_hdr.meta
elif isinstance(fname_ccd_or_hdr, str):
ccd = RedCorData.read(fname_ccd_or_hdr)
hdr = ccd.meta
tm = Time(hdr['DATE-OBS'], format='fits')
filt = hdr['FILTER']
# This is the entry point for reduction
bad = self.flat_table['bad']
this_filt = filt == self.flat_table['filters']
good = np.logical_and(this_filt, ~bad)
good_filt_idx = np.flatnonzero(good)
if len(good_filt_idx) == 0:
raise ValueError(f'No {filt} flats found')
ddates = tm - self.flat_table['dates']
best_filt_date_idx = np.argmin(np.abs(ddates[good_filt_idx]))
# unwrap
best_filt_date_idx = good_filt_idx[best_filt_date_idx]
return self._flat_table['fnames'][best_filt_date_idx]
##########################
# Command line functions
##########################
def calibrate_cmd(args):
c = Calibration(raw_data_root=args.raw_data_root,
calibration_root=args.calibration_root,
start_date=args.start,
stop_date=args.stop,
reduce=True,
num_processes=args.num_processes)
def filt_check_dir(directory):
log.info(f'Checking {directory}')
fnames = os.listdir(directory)
for f in fnames:
_, extension = os.path.splitext(f)
if extension not in ['.fits', '.fit']:
continue
isbadname = False
badnames = ['oving_to', 'Mercury', 'Venus', '_sequence', 'PinPoint']
for bn in badnames:
if bn in f:
isbadname = True
break
if isbadname:
continue
try:
hdr = getheader(os.path.join(directory, f))
if hdr['IMAGETYP'] != 'LIGHT':
continue
hdr = standardize_filt_name(hdr)
filt = hdr['FILTER']
ofilt = hdr.get('OFILTER')
except Exception as e:
log.info(f'{e} {os.path.join(directory, f)}')
continue
if filt == 'open':
continue
# See if we can match our filt to the fname
if filt in f:
# Success, so just move on quietly
continue
if ofilt and ofilt in f:
# Old filter matches
continue
if ('IPT_Na_R' in f
or 'Na_IPT_R' in f
or 'PrecisionGuideDataFile' in f):
# These are sequence names in early 2018 that is just too inscrutable
continue
## Try some cases I have run across in 2017 and 2018
if 'IPT-' in f:
line = 'SII'
elif 'Na' in f:
line = 'Na'
else:
line = ''
#if 'on-band' in f:
# on_off = 'on'
#elif 'off-band' in f:
# on_off = 'off'
#else:
# on_off = ''
if 'cont' in f:
on_off = 'off'
if 'on' in f:
on_off = 'on'
elif 'off' in f:
on_off = 'off'
else:
on_off = ''
if f'{line}_{on_off}' != filt:
log.error(f'FILTER = {filt}; {os.path.join(directory, f)}')
#else:
# fname_compare = f
#
# if 'on-band.fit' in bf:
# on_off = 'on'
# elif 'off-band.fit' in bf:
# on_off = 'off'
def filt_check_tree(directory=RAW_DATA_ROOT):
dirs = [dd[0] for dd in get_dirs_dates(directory)]
for d in dirs:
filt_check_dir(d)
def filt_check_cmd(args):
if args.directory:
filt_check_dir(args.directory)
else:
filt_check_tree(args.tree)
if __name__ == "__main__":
log.setLevel('DEBUG')
parser = argparse.ArgumentParser(
description='IoIO pipeline processing system')
subparsers = parser.add_subparsers(dest='one of the subcommands in {}, above', help='sub-command help')
subparsers.required = True
#### calibrate
calibrate_parser = subparsers.add_parser(
'calibrate', help='Run calibration to generate bias, dark, flat frames')
calibrate_parser.add_argument(
'--raw_data_root', help=f'raw data root (default: {RAW_DATA_ROOT})',
default=RAW_DATA_ROOT)
calibrate_parser.add_argument(
'--calibration_root',
help=f'calibration root (default: {CALIBRATION_ROOT})',
default=CALIBRATION_ROOT)
calibrate_parser.add_argument(
'--start', help='start directory/date (default: earliest -- dangerous!)')
calibrate_parser.add_argument(
'--stop', help='stop directory/date (default: latest)')
calibrate_parser.add_argument(
'--num_processes', type=float, default=0,
help='number of subprocesses for parallelization; 0=all cores, <1 = fraction of total cores')
calibrate_parser.set_defaults(func=calibrate_cmd)
#### filt_check
filt_check_parser = subparsers.add_parser(
'filt_check', help='Spot inconsistencies between filter names')
filt_check_parser.add_argument(
'--tree',
help=f'(default action) Root of directory tree to check (default directory: {RAW_DATA_ROOT})',
metavar='DIRECTORY',
default=RAW_DATA_ROOT)
filt_check_parser.add_argument(
'--directory',
help=f'Single directory to check')
filt_check_parser.set_defaults(func=filt_check_cmd)
# Final set of commands that makes argparse work
args = parser.parse_args()
# This check for func is not needed if I make subparsers.required = True
if hasattr(args, 'func'):
args.func(args)
#c = Calibration(start_date='2019-09-01', stop_date='2021-12-31', reduce=True)
#c = Calibration(start_date='2020-01-01', stop_date='2021-12-31', reduce=True)
##c = Calibration(start_date='2020-01-01', stop_date='2021-02-28', reduce=True)
###t = c.dark_table_create(autoreduce=False, rescan=True)
##fname1 = '/data/Mercury/raw/2020-05-27/Mercury-0005_Na-on.fit'
##fname2 = '/data/Mercury/raw/2020-05-27/Mercury-0005_Na_off.fit'
##cmp = CorMultiPipe(auto=True, calibration=c,
## post_process_list=[detflux, nd_filter_mask])
##pout = cmp.pipeline([fname1, fname2], outdir='/tmp', overwrite=True)
##pout = cmp.pipeline([fname1], outdir='/tmp', overwrite=True)
#
##ccd = RedCorData.read(fname1)
##ccd = cor_process(ccd, calibration=c, auto=True)
##ccd.write('/tmp/test.fits', overwrite=True)
#
#flat = '/data/io/IoIO/raw/2020-06-06/Sky_Flat-0002_B.fit'
#cmp = CorMultiPipe(auto=True, calibration=c,
# post_process_list=[flat_process])
#pout = cmp.pipeline([flat], outdir='/tmp', overwrite=True)
##fname1 = '/data/io/IoIO/raw/20210310/HD 132052-S001-R001-C002-R.fts'
#fname1 = '/data/Mercury/raw/2020-05-27/Mercury-0005_Na-on.fit'
#pgd = RedCorData.read(fname1)
#pgd.meta = sx694.metadata(pgd.meta)
#pgd.meta = sx694.exp_correct(pgd.meta)
#pgd.meta = sx694.date_beg_avg(pgd.meta)
#print(pgd.meta)
##
#pgd = detflux(pgd)
#print(pgd.meta)
##print(reduced_dir('/data/io/IoIO/raw/20210513'))
##print(reduced_dir('/data/io/IoIO/raw/20210513', create=True))
##print(reduced_dir('/data/io/IoIO/raw'))
#c = Calibration(start_date='2019-02-18', stop_date='2021-12-31', reduce=True)
#c = Calibration(start_date='2019-02-12', stop_date='2019-02-12', reduce=True)
#c = Calibration(reduce=True)
#f = fdict_list_collector(flat_fdict_creator, directory='/data/io/IoIO/raw/2019-08-25', imagetyp='flat', subdirs=CALIBRATION_SUBDIRS, glob_include=FLAT_GLOB)
#print(f[0])
#c = Calibration(start_date='2017-03-15', stop_date='2017-03-15', reduce=True)
#c = Calibration(stop_date='2017-05-10', reduce=True)
#c = Calibration(stop_date='2017-05-10')
#c.reduce_bias()
#c = Calibration(start_date='2020-07-11', stop_date='2020-07-11', reduce=True)
#c = Calibration(reduce=True)
#na_back_on = '/data/io/IoIO/raw/20210525/Jupiter-S007-R001-C001-Na_off.fts'
#ccd = CorData.read(na_back_on)
#nd_filter_mask(ccd)
```
#### File: jpmorgen/IoIO/SII_im.py
```python
import os
import numpy as np
from matplotlib import gridspec
import matplotlib.pyplot as plt
import matplotlib.patches as patches
from matplotlib.colors import LogNorm
from skimage.measure import block_reduce
from astropy.io import fits
# Eventually I want to get propert C* WCS keywords into headers
from ReduceCorObs import plate_scale
origin = 'lower'
vmin = 20
vmax = 3000
block_size = 1
binning = 1
linewidth = 2
#rdir = '/data/io/IoIO/reduced/2018-05-05/'
#fnums = range(3,47,5)
#fig, axes = plt.subplots(nrows=len(fnums), ncols=1, figsize=(5,12))
#
#rdir = '/data/io/IoIO/reduced/2018-05-14/'
#fnums = range(1,21,3)
rdir = '/data/io/IoIO/reduced/2018-06-06/'
fnums = range(2,30,5)
fig, axes = plt.subplots(nrows=len(fnums), ncols=1, figsize=(5,7.6))
# https://scipy-cookbook.readthedocs.io/items/Rebinning.html
def rebin( a, newshape ):
'''Rebin an array to a new shape.
'''
assert len(a.shape) == len(newshape)
slices = [ slice(0,old, float(old)/new) for old,new in zip(a.shape,newshape) ]
coordinates = np.mgrid[slices]
indices = coordinates.astype('i') #choose the biggest smaller integer index
return a[tuple(indices)]
def rebin_factor( a, newshape ):
'''Rebin an array to a new shape.
newshape must be a factor of a.shape.
'''
assert len(a.shape) == len(newshape)
assert not np.sometrue(np.mod( a.shape, newshape ))
slices = [ slice(None,None, old/new) for old,new in zip(a.shape,newshape) ]
return a[slices]
fnames = [os.path.join(rdir,
f'SII_on-band_{i:03d}r.fits') for i in fnums]
# https://stackoverflow.com/questions/6963035/pyplot-axes-labels-for-subplots
ax = fig.add_subplot(111, frameon=False)
# Turn off axis lines and ticks of the big subplot
ax.spines['top'].set_color('none')
ax.spines['bottom'].set_color('none')
ax.spines['left'].set_color('none')
ax.spines['right'].set_color('none')
ax.tick_params(labelcolor='none', top='off', bottom='off', left='off', right='off')
ax.grid(False)
# Set common label
ax.set_xlabel('Rj')
ax.set_ylabel('Rj')
# https://stackoverflow.com/questions/13784201/matplotlib-2-subplots-1-colorbar
for fname, ax in zip(fnames, axes):
with fits.open(fname) as HDUList:
# https://stackoverflow.com/questions/7066121/how-to-set-a-single-main-title-above-all-the-subplots-with-pyplot
header = HDUList[0].header
#fig.suptitle(header['DATE-OBS'].split('T')[0])
im = HDUList[0].data
center = (np.asarray(im.shape)/2).astype(int)
im = im[center[0]-80:center[0]+80, center[1]-300:center[1]+300]
im = im
im = block_reduce(im, block_size=(block_size, block_size), func=np.median)
im = rebin(im, np.asarray(im.shape)/binning)
badc = np.where(im < 0)
im[badc] = 1
Rjpix = header['ANGDIAM']/2/plate_scale / (block_size*binning) # arcsec / (arcsec/pix) / (pix/bin)
nr, nc = im.shape
x = (np.arange(nc) - nc/2) / Rjpix
y = (np.arange(nr) - nr/2) / Rjpix
X, Y = np.meshgrid(x, y)
#plt.pcolormesh(X, Y, im, norm=LogNorm(vmin=vmin, vmax=vmax), cmap='YlOrRd')
plotted = ax.pcolormesh(X, Y, im, norm=LogNorm(vmin=vmin, vmax=vmax), cmap='gist_heat')
#plt.pcolormesh(X, Y, im, vmin=vmin, vmax=vmax, cmap='gist_heat')
# https://stackoverflow.com/questions/2934878/matplotlib-pyplot-preserve-aspect-ratio-of-the-plot
ax.axis('scaled')
ax.xaxis.set_visible(False)
ax.xaxis.set_visible(True)
#plt.xlabel('Rj')
fig.subplots_adjust(top=1,bottom=0.07,hspace=0,right=1)
#fig.subplots_adjust(right=0.85)
#cbar_ax = fig.add_axes([0.8, 0.10, 0.05, 0.8])
#cbar = fig.colorbar(plotted, cax=cbar_ax)
#cbar.ax.set_ylabel('Surface brightness (R)')
plt.savefig('SII_seq_transparent.png', transparent=True)
plt.show()
``` |
{
"source": "jpmorris33/syntheyes",
"score": 3
} |
#### File: pi/sopare/__init__.py
```python
import os
def run(readable_results, data, rawbuf):
print readable_results
if('sigh' in readable_results):
os.system('sudo killall -SIGUSR2 eyes')
if('dammit' in readable_results):
os.system('sudo killall -SIGUSR1 eyes')
if('angry' in readable_results):
os.system('sudo killall -SIGUSR1 eyes')
if('gasp' in readable_results):
os.system('sudo killall -SIGPOLL eyes')
``` |
{
"source": "jpm/papercut",
"score": 2
} |
#### File: papercut/storage/maildir.py
```python
import dircache
from fnmatch import fnmatch
import glob
import os
import mailbox
import rfc822
import settings
import socket
import strutil
import string
import time
def maildir_date_cmp(a, b):
"""compare maildir file names 'a' and 'b' for sort()"""
a = os.path.basename(a)
b = os.path.basename(b)
a = int(a[: a.find(".")])
b = int(b[: b.find(".")])
return cmp(a, b)
class Papercut_Storage:
"""
Storage backend interface for mbox files
"""
_proc_post_count = 0
def __init__(self, group_prefix="papercut.maildir."):
self.maildir_dir = settings.maildir_path
self.group_prefix = group_prefix
def _get_group_dir(self, group):
return os.path.join(self.maildir_dir, group)
def _groupname2group(self, group_name):
return group_name.replace(self.group_prefix, '')
def _group2groupname(self, group):
return self.group_prefix + group
def _new_to_cur(self, group):
groupdir = self._get_group_dir(group)
for f in dircache.listdir(os.path.join(groupdir, 'new')):
ofp = os.path.join(groupdir, 'new', f)
nfp = os.path.join(groupdir, 'cur', f + ":2,")
os.rename(ofp, nfp)
def get_groupname_list(self):
groups = dircache.listdir(self.maildir_dir)
return ["papercut.maildir.%s" % k for k in groups]
def get_group_article_list(self, group):
self._new_to_cur(group)
groupdir = self._get_group_dir(group)
articledir = os.path.join(self._get_group_dir(group), 'cur')
articles = dircache.listdir(articledir)
articles.sort(maildir_date_cmp)
return articles
def get_group_article_count(self, group):
self._new_to_cur(group)
articles = dircache.listdir(os.path.join(self.maildir_dir, group))
return len(articles)
def group_exists(self, group_name):
groupnames = self.get_groupname_list()
found = False
for name in groupnames:
# group names are supposed to be case insensitive
if string.lower(name) == string.lower(group_name):
found = True
break
return found
def get_first_article(self, group_name):
return 1
def get_group_stats(self, group_name):
total, max, min = self.get_maildir_stats(group_name)
return (total, min, max, group_name)
def get_maildir_stats(self, group_name):
cnt = len(self.get_group_article_list(group_name))
return cnt, cnt, 1
def get_message_id(self, msg_num, group_name):
msg_num = int(msg_num)
group = self._groupname2group(group_name)
return '<%s@%s>' % (self.get_group_article_list(group)[msg_num - 1],
group_name)
def get_NEWGROUPS(self, ts, group='%'):
return None
# UNTESTED
def get_NEWNEWS(self, ts, group='*'):
gpaths = glob.glob(os.path.join(self.maildir_dir, group))
articles = []
for gpath in gpaths:
articles = dircache.listdir(os.path.join(gpath, "cur"))
group = os.path.basename(gpath)
group_name = self._group2groupname(group)
for article in articles:
apath = os.path.join(gpath, "cur", article)
if os.path.getmtime(apath) < ts:
continue
articles.append("<%s@%s" % (article, group_name))
if len(articles) == 0:
return ''
else:
return "\r\n".join(articles)
def get_GROUP(self, group_name):
group = self._groupname2group(group_name)
result = self.get_maildir_stats(group)
return (result[0], result[2], result[1])
def get_LIST(self, username=""):
result = self.get_groupname_list()
if len(result) == 0:
return ""
else:
groups = []
mutable = ('y', 'n')[settings.server_type == 'read-only']
for group_name in result:
group = self._groupname2group(group_name)
total, maximum, minimum = self.get_maildir_stats(group)
groups.append("%s %s %s %s" % (group_name, maximum,
minimum, mutable))
return "\r\n".join(groups)
def get_STAT(self, group_name, id):
# check if the message exists
id = int(id)
group = self._groupname2group(group_name)
return id <= self.get_group_article_count(group)
def get_message(self, group_name, id):
group = self._groupname2group(group_name)
id = int(id)
try:
article = self.get_group_article_list(group)[id - 1]
file = os.path.join(self.maildir_dir, group, "cur", article)
return rfc822.Message(open(file))
except IndexError:
return None
def get_ARTICLE(self, group_name, id):
msg = self.get_message(group_name, id)
if not msg:
return None
return ("\r\n".join(["%s" % string.strip(k) for k in msg.headers]), msg.fp.read())
def get_LAST(self, group_name, current_id):
if current_id <= 1:
return None
return current_id - 1
def get_NEXT(self, group_name, current_id):
group = self._groupname2group(group_name)
if current_id >= self.get_group_article_count(group):
return None
return current_id + 1
def get_HEAD(self, group_name, id):
msg = self.get_message(group_name, id)
headers = []
headers.append("Path: %s" % (settings.nntp_hostname))
headers.append("From: %s" % (msg.get('from')))
headers.append("Newsgroups: %s" % (group_name))
headers.append("Date: %s" % (msg.get('date')))
headers.append("Subject: %s" % (msg.get('subject')))
headers.append("Message-ID: <%s@%s>" % (id, group_name))
headers.append("Xref: %s %s:%s" % (settings.nntp_hostname,
group_name, id))
return "\r\n".join(headers)
def get_BODY(self, group_name, id):
msg = self.get_message(group_name, id)
if msg is None:
return None
else:
return strutil.format_body(msg.fp.read())
def get_XOVER(self, group_name, start_id, end_id='ggg'):
group = self._groupname2group(group_name)
start_id = int(start_id)
if end_id == 'ggg':
end_id = self.get_group_article_count(group)
else:
end_id = int(end_id)
overviews = []
for id in range(start_id, end_id + 1):
msg = self.get_message(group_name, id)
if msg is None:
break
author = msg.get('from')
formatted_time = msg.get('date')
message_id = self.get_message_id(id, group_name)
line_count = len(msg.fp.read().split('\n'))
xref = 'Xref: %s %s:%d' % (settings.nntp_hostname, group_name, id)
if msg.get('references') is not None:
reference = msg.get('references')
else:
reference = ""
# message_number <tab> subject <tab> author <tab> date <tab>
# message_id <tab> reference <tab> bytes <tab> lines <tab> xref
overviews.append("%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s" % \
(id, msg.get('subject'), author,
formatted_time, message_id, reference,
len(strutil.format_body(msg.fp.read())),
line_count, xref))
return "\r\n".join(overviews)
# UNTESTED
def get_XPAT(self, group_name, header, pattern, start_id, end_id='ggg'):
group = self._groupname2group(group_name)
header = header.upper()
start_id = int(start_id)
if end_id == 'ggg':
end_id = self.get_group_article_count(group)
else:
end_id = int(end_id)
hdrs = []
for id in range(start_id, end_id + 1):
if header == 'MESSAGE-ID':
msg_id = self.get_message_id(id, group_name)
if fnmatch(msg_id, pattern):
hdrs.append('%d %s' % (id, msg_id))
continue
elif header == 'XREF':
xref = '%s %s:%d' % (settings.nntp_hostname, group_name, id)
if fnmatch(xref, pattern):
hdrs.append('%d %s' % (id, xref))
continue
msg = self.get_message(group_name, id)
if header == 'BYTES':
msg.fp.seek(0, 2)
bytes = msg.fp.tell()
if fnmatch(str(bytes), pattern):
hdrs.append('%d %d' % (id, bytes))
elif header == 'LINES':
lines = len(msg.fp.readlines())
if fnmatch(str(lines), pattern):
hdrs.append('%d %d' % (id, lines))
else:
hdr = msg.get(header)
if hdr and fnmatch(hdr, pattern):
hdrs.append('%d %s' % (id, hdr))
if len(hdrs):
return "\r\n".join(hdrs)
else:
return ""
def get_LISTGROUP(self, group_name):
ids = range(1, self.get_group_article_count(group) + 1)
ids = [str(id) for id in ids]
return "\r\n".join(ids)
def get_XGTITLE(self, pattern=None):
# XXX no support for this right now
return ''
def get_XHDR(self, group_name, header, style, ranges):
print group_name, header, style, ranges
group = self._groupname2group(group_name)
header = header.upper()
if style == 'range':
if len(ranges) == 2:
range_end = int(ranges[1])
else:
range_end = self.get_group_article_count(group)
ids = range(int(ranges[0]), range_end + 1)
else:
ids = (int(ranges[0]))
hdrs = []
for id in ids:
if header == 'MESSAGE-ID':
hdrs.append('%d %s' % \
(id, self.get_message_id(id, group_name)))
continue
elif header == 'XREF':
hdrs.append('%d %s %s:%d' % (id, settings.nntp_hostname,
group_name, id))
continue
msg = self.get_message(group_name, id)
if header == 'BYTES':
msg.fp.seek(0, 2)
hdrs.append('%d %d' % (id, msg.fp.tell()))
elif header == 'LINES':
hdrs.append('%d %d' % (id, len(msg.fp.readlines())))
else:
hdr = msg.get(header)
if hdr:
hdrs.append('%d %s' % (id, hdr))
if len(hdrs) == 0:
return ""
else:
return "\r\n".join(hdrs)
def do_POST(self, group_name, body, ip_address, username=''):
self._proc_post_count += 1
count = self._proc_post_count
ts = [int(x) for x in str(time.time()).split(".")]
file = "%d.M%dP%dQ%d.%s" % (ts[0], ts[1], os.getpid(),
count, socket.gethostname())
group = self._groupname2group(group_name)
groupdir = self._get_group_dir(group)
tfpath = os.path.join(self.maildir_dir, groupdir, "tmp", file)
nfpath = os.path.join(self.maildir_dir, groupdir, "new", file)
fd = open(tfpath, 'w')
fd.write(body)
fd.close
os.rename(tfpath, nfpath)
return 1
``` |
{
"source": "jpmv27/android-scraper",
"score": 3
} |
#### File: jpmv27/android-scraper/android_scraper_2018.py
```python
import argparse
import os
import resource
import subprocess
import time
from urllib.parse import urljoin
from bs4 import BeautifulSoup as bs
from PyPDF2 import PdfFileReader, PdfFileWriter
import requests
# Use html5lib because html.parser makes a mess of malformed HTML
PARSER = 'html5lib'
def remove_wayback_header(url):
'''
If the URL is a Wayback Machine URL, modify the URL to hide the
Wayback Machine toolbar
'''
if not url.find('web.archive.org'):
return url
spot = url.find('/http')
return url[:spot] + 'if_' + url[spot:]
def save_url_to_pdf(url, file_name):
'''
Save the URL to the specified PDF file
'''
url = remove_wayback_header(url)
subprocess.run(('google-chrome', '--headless', '--print-to-pdf=' + \
file_name, url), stderr=subprocess.DEVNULL, check=True)
def url_to_filename(url):
'''
Convert URL to filename
'''
name = url[url.find('//') + 2:]
for char in '"!/. ?=:\'':
name = name.replace(char, '_')
return name
class PdfOutput:
'''
Save URLs to PDF and accumulate into one output file
'''
class Bookmark: # pylint: disable=too-few-public-methods
'''
Represents a bookmark for a heading
'''
def __init__(self, title, *, indent=True):
self.title = title
self.pdf_ref = None
self.indent = indent
def is_pending(self):
'''
Check whether the bookmark has been added or not
'''
return self.pdf_ref is None
def __init__(self, file_name, *, delay=1, no_exec=False):
self.file_name = file_name
self.delay = delay
self.no_exec = no_exec
self.writer = PdfFileWriter()
self.files_to_clean_up = []
self.bookmark_stack = []
def add_heading(self, bookmark_title):
'''
Add a heading
'''
self.bookmark_stack.append(self.Bookmark(bookmark_title, indent=False))
def add_page(self, url, bookmark_title, *, bookmark=True):
'''
Add the URL to the PDF
'''
if url.endswith('.pdf'):
return
time.sleep(self.delay)
file_name = self.make_unique_filename_ext(url_to_filename(url), '.pdf')
if self.no_exec:
print('Adding page', url)
else:
save_url_to_pdf(url, file_name)
page_index = self.writer.getNumPages()
self.append_pdf_to_output(file_name)
self.create_pending_bookmarks(page_index)
if bookmark:
self.bookmark_page(bookmark_title, page_index)
def append_pdf_to_output(self, file_name):
'''
Append the PDF file to the output, remember file to clean up
'''
input_file = open(file_name, 'rb')
input_stream = PdfFileReader(input_file)
self.writer.appendPagesFromReader(input_stream)
self.files_to_clean_up.append(file_name)
def bookmark_page(self, title, page_num):
'''
Bookmark the page
'''
parent = None
if self.bookmark_stack:
parent = self.bookmark_stack[-1].pdf_ref
self.writer.addBookmark(title, page_num, parent=parent)
def clean_up_files(self):
'''
Delete all the files to be cleaned-up
'''
for file in self.files_to_clean_up:
os.remove(file)
def create_pending_bookmarks(self, page_num):
'''
Create heading bookmarks that have not yet been created
'''
parent = None
for bookmark in self.bookmark_stack:
if bookmark.is_pending():
bookmark.pdf_ref = self.writer.addBookmark( \
bookmark.title, page_num, parent=parent, \
italic=not bookmark.indent)
if bookmark.indent:
parent = bookmark.pdf_ref
def finish(self):
'''
Wrap-up processing by writing the output file and cleaning-up
'''
if not self.no_exec:
self.write_output()
self.clean_up_files()
def make_unique_filename_ext(self, file_name, ext):
'''
Check a file name and extension for uniqueness and append
a suffix if necessary to make it unique
'''
suffix = 2
tentative_name = file_name
while tentative_name + ext in self.files_to_clean_up:
tentative_name = file_name + str(suffix)
suffix += 1
return tentative_name + ext
def pop_heading(self):
'''
Outdent subsequent bookmarks
'''
self.bookmark_stack.pop()
while self.bookmark_stack and \
(not self.bookmark_stack[-1].indent):
self.bookmark_stack.pop()
def push_heading(self, bookmark_title):
'''
Add a heading and make subsequent bookmarks a child of
this heading
'''
self.bookmark_stack.append(self.Bookmark(bookmark_title))
def write_output(self):
'''
Generate the output file
'''
output_file = open(self.file_name, 'wb')
self.writer.write(output_file)
output_file.close()
def title_to_bookmark_title(title):
'''
Extract the bookmark name from a page title
'''
vertical_bar = title.find('|')
if not vertical_bar:
return title
return title[:vertical_bar - 1].strip()
def read_page(url):
'''
Read page at URL
'''
response = requests.get(url)
response.raise_for_status()
return bs(response.text, PARSER)
def url_to_absolute(site_url, page_url):
'''
Resolve page URL to absolute URL if relative
'''
return urljoin(site_url, page_url)
def scrape_side_menu_item(site_url, item, output):
'''
Scrape a chapter with sub-chapters, represented by an expandable
side menu item
Iterate through the chapters in the item, or save the item if
there are no sub-items
'''
if 'devsite-nav-item-section-expandable' in item['class']:
nav_text = item.find('span')
output.push_heading(nav_text.text.strip())
for subitem in item.find('ul').find_all('li', recursive=False):
scrape_side_menu_item(site_url, subitem, output)
output.pop_heading()
return
a_tag = item.find('a')
output.add_page(url_to_absolute(site_url, a_tag['href']), \
a_tag.text.strip())
def scrape_upper_tab(site_url, tab, output):
'''
Scrape a major section, represented by an upper tab
Iterate through the chapters in the side menu, or save the upper
tab page if there is no side menu. Side menu items may be nested
'''
a_tag = tab.find('a')
tab_url = a_tag['href']
page = read_page(url_to_absolute(site_url, tab_url))
tag = page.select_one('nav.devsite-section-nav')
if tag:
side_menu = tag.select_one('ul.devsite-nav-list')
else:
side_menu = None
if side_menu:
output.push_heading(a_tag.text.strip())
for item in side_menu.find_all('li', recursive=False):
scrape_side_menu_item(site_url, item, output)
output.pop_heading()
return
output.add_page(url_to_absolute(site_url, tab_url), \
title_to_bookmark_title(page.title.string))
def scrape_site(url, output):
'''
Scrape the site
Save the site main page, then iterate through all the upper tabs
'''
page = read_page(url)
output.push_heading(page.title.string.strip())
output.add_page(url, url, bookmark=False)
for tag in page.select('div.devsite-header-upper-tabs'):
for tab in tag.find_all('li'):
scrape_upper_tab(url, tab, output)
output.pop_heading()
def parse_command_line():
'''
Parse the command line and save options
'''
parser = argparse.ArgumentParser('Scrape an android.com site to PDF')
parser.add_argument('url', type=str, metavar='URL')
parser.add_argument('-o', '--output', type=str, metavar='OUTPUT', \
default='scraper.pdf', help='output file name')
parser.add_argument('--delay', type=int, default=1, \
metavar='DELAY', help='delay in seconds between requests')
parser.add_argument('-N', '--no-exec', action='store_true', \
help="don't execute, just show what would be done")
return parser.parse_args()
def main():
'''
Parse arguments and perform scraping
'''
try:
args = parse_command_line()
output = PdfOutput(args.output, no_exec=args.no_exec, delay=args.delay)
# developer.android.com causes "too many open files" error
resource.setrlimit(resource.RLIMIT_NOFILE, (10000, 10000))
scrape_site(args.url, output)
output.finish()
print('Done')
except KeyboardInterrupt:
print('Cancelled')
main()
``` |
{
"source": "jpmvferreira/gwcatalog",
"score": 3
} |
#### File: gwcatalog/gwcatalog/auxiliary.py
```python
from scipy.optimize import fsolve
from random import uniform, gauss
import numpy as np
import sys
import os
# local imports
from .cosmology import H, dL
# get N randomly generated events from a given distribution, using rejection
def GetRandom(distribution, x_min, x_max, y_min, y_max, N=1):
counter = 0
events = []
while counter < N:
x = uniform(x_min, x_max)
y = uniform(y_min, y_max)
if y < distribution(x):
events.append(x)
counter += 1
return events
# get the theoretical line for luminosity distance
def dL_line(zmin, zmax, N=1000):
# protection against invalid arguments
if (zmin < 0 or zmax < 0) or (zmax < zmin):
raise Exception("Please specify a valid redshifts interval.")
# create a "solid line" and compute distances for that line
line = np.linspace(zmin, zmax, N)
distances = [dL(i, H) for i in line]
return line, distances
# convert luminosity distance to redshift
def dL_to_redshift(distance, z0=0):
# auxiliary function to solve using scipy
def func(z, distance, H):
return distance - dL(z, H)
# compute the redshift for the provided luminosity distance
redshift = fsolve(func, z0, args=(distance, H))[0]
return redshift
# distribute the events around the most likely value using a gaussian distribution, with protection against negative values
def distribute(distances, errors):
for i in range(0, len(distances)):
newdistance = -1
while newdistance < 0:
newdistance = gauss(distances[i], errors[i])
distances[i] = newdistance
return distances, errors
```
#### File: gwcatalog/gwcatalog/LIGO.py
```python
from scipy.interpolate import CubicSpline
from scipy.misc import derivative
import matplotlib.pyplot as plt
import numpy as np
# local imports
from .auxiliary import GetRandom, distribute, dL_to_redshift
from .cosmology import dL, H
# non-normalized luminosity distance probability distribution (in Gpc)
# from figure 2 of arXiv:1901.03321, LIGO A+ design
def dLdist():
distances = [0.0, 0.01, 0.02, 0.03, 0.04, 0.05, 0.06, 0.07, 0.08, 0.09, 0.1, 0.11, 0.12, 0.13, 0.14, 0.15, 0.16, 0.17, 0.18, 0.19, 0.2, 0.21, 0.22, 0.23, 0.24, 0.25, 0.26, 0.27, 0.28, 0.29, 0.3, 0.31, 0.32, 0.33, 0.34, 0.35, 0.36, 0.37, 0.38, 0.39, 0.4, 0.41, 0.42, 0.43, 0.44, 0.45, 0.46, 0.47, 0.48, 0.49, 0.5, 0.51, 0.52, 0.53, 0.54, 0.55, 0.56, 0.57, 0.58, 0.59, 0.6, 0.61, 0.62, 0.63, 0.64, 0.65, 0.66, 0.67, 0.68, 0.69, 0.7, 0.71, 0.72, 0.73, 0.74, 0.75, 0.76, 0.77, 0.78, 0.79, 0.8, 0.81, 0.82, 0.83, 0.84, 0.85, 0.86, 0.87, 0.88, 0.89, 0.9, 0.91, 0.92, 0.93, 0.94, 0.95, 0.96]
probabilities = [0.0, 0.00234, 0.00608, 0.01286, 0.0288, 0.04904, 0.07096, 0.09843, 0.12853, 0.1602, 0.19794, 0.23544, 0.27527, 0.31834, 0.35993, 0.40257, 0.44712, 0.48987, 0.53307, 0.57755, 0.61882, 0.65742, 0.69545, 0.73415, 0.76922, 0.80232, 0.83092, 0.85656, 0.88051, 0.90033, 0.91959, 0.93793, 0.94671, 0.95304, 0.9611, 0.96806, 0.97041, 0.97367, 0.96772, 0.96098, 0.95561, 0.93931, 0.92554, 0.91545, 0.89386, 0.87365, 0.86377, 0.84349, 0.82681, 0.8074, 0.78603, 0.7683, 0.75526, 0.73969, 0.71781, 0.69353, 0.67879, 0.66231, 0.64267, 0.61952, 0.59743, 0.58821, 0.57504, 0.55758, 0.53855, 0.5067, 0.47277, 0.45254, 0.42478, 0.40875, 0.38262, 0.362, 0.33489, 0.31254, 0.29549, 0.2717, 0.24877, 0.22818, 0.20365, 0.18635, 0.17054, 0.15791, 0.14347, 0.1261, 0.11144, 0.09308, 0.07149, 0.05694, 0.04795, 0.04173, 0.0327, 0.02416, 0.01511, 0.00749, 0.00295, 0.0015, 0.0]
# get the luminosity distance limits
dLmin = min(distances)
dLmax = max(distances)
# get probability limits
dmin = min(probabilities)
dmax = max(probabilities)
# interpolate luminosity distance probability function
f = CubicSpline(distances, probabilities)
return (f, dLmin, dLmax, dmin, dmax)
# errors for the luminosity distance
# from arXiv:2007.13791
def dLerror(z, dL, H):
return 0.5625*dL(z, H)**2
def zerror(z):
return 0.005*(1+z)
def error(z, dL, H):
# luminosity distance error
distanceerror = dLerror(z, dL, H)
# redshift error
redshifterror = zerror(z)
# propagate the redshift error to the luminosity distance
propagatedredshifterror = derivative(dL, z, dx=1e-6, args=(H,)) * redshifterror
# get total error
error = (distanceerror**2 + propagatedredshifterror**2)**0.5
return error
# generate the forecast LIGO events
def generate(events=0, redshifts=[], ideal=False):
# specify either events or redshifts
if bool(events) + bool(redshifts) != 1:
raise Exception("Specify either the number of events or their redshifts")
# get luminosity distance distribution function
f, dLmin, dLmax, dmin, dmax = dLdist()
# get luminosity distance and error for specific redshifts
if redshifts:
# compute valid redshift limits
zmin = dL_to_redshift(dLmin)
zmax = dL_to_redshift(dLmax)
# protect against out of bound redshifts
if min(redshifts) < zmin or max(redshifts) > zmax:
raise Exception(f"Redshift limits are out of bounds. Lowest and highest redshift for LIGO are z={zmin} and z={zmax} correspondingly")
distances = [dL(z, H) for z in redshifts]
errors = [error(z, dL, H) for z in redshifts]
# generate events according to the redshift distribution
else:
distances = GetRandom(f, dLmin, dLmax, dmin, dmax, N=events)
# get the corresponding redshift for each luminosity distance
redshifts = [dL_to_redshift(i) for i in distances]
# get the error for each event
errors = [error(z, dL, H) for z in redshifts]
# distribute the events around the most likely value using a gaussian distribution
if not ideal:
distances, errors = distribute(distances, errors)
return redshifts, distances, errors
# plot the luminosity distance distribution
def plot_dist(output=None):
# get luminosity distances distribution
distances, probabilities = dLdist()
# plot and show
plt.plot(distances, probabilities)
plt.title("Replicating figure 2 of arXiv:1901.03321")
plt.gca().axes.yaxis.set_ticklabels([]) # somehow removes ticks without removing grid
plt.grid()
plt.xlabel("luminosity distance (Gpc)")
plt.ylabel("Probability distribution function")
# output or show
if output:
plt.savefig(output, transparent=True)
else:
plt.show()
return
# plot the error as a function of redshift FIX-ME
def plot_error(output=None):
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(12, 5))
# get redshift boundaries
f, zmin, zmax, dmin, dmax = dist()
# draw a line for the redshifts
redshifts = np.linspace(zmin, zmax, 1000)
# get luminosity distances
distances = [dL(z, H) for z in redshifts]
# get total error
errors = [error(z, dL, H) for z in redshifts]
# get the luminosity distance error
dLerrors = [dLerror(z, dL, H) for z in redshifts]
# plot luminosity distance error
ax1.plot(distances, dLerrors, label="$\sigma_{d_L}(d_L)$")
ax1.plot(distances, errors, label="$\sigma(d_L)$")
ax1.grid()
ax1.set_xlabel("luminosity distance (Gpc)")
ax1.set_ylabel("error (Gpc)")
ax1.legend()
# get error for redshift
zerrors = [zerror(z) for z in redshifts]
# plot redshift error
ax2.plot(redshifts, zerrors, label="error")
ax2.grid()
ax2.set_xlabel("redshift")
ax2.set_ylabel("error")
ax2.legend()
# output or show
if output:
plt.savefig(output, transparent=True)
else:
plt.show()
return
``` |
{
"source": "jpmvferreira/simplifiedmc",
"score": 2
} |
#### File: simplifiedmc/simplifiedmc/emcee.py
```python
from multiprocessing import cpu_count
import matplotlib.patches as mpatches
from random import gauss, uniform
import matplotlib.pyplot as plt
import numpy as np
import yaml
import sys
# fetch the arguments from CLI and the configuration file
# check for incompatible or missing arguments
# set default arguments value
def load(args):
# required arguments
model = args.model
data = args.data
# config file
if args.yml:
yml = args.yml
else:
os.system("echo 'none: none' > /tmp/dummy.yml")
yml = "/tmp/dummy.yml"
# get config arguments from file and overwrite if provided in the CLI
with open(yml, "r") as file:
yml_loaded = yaml.full_load(file)
names = eval(args.names) if args.names else yml_loaded.get("names")
labels = eval(args.labels) if args.labels else yml_loaded.get("labels")
initial = eval(args.initial) if args.initial else yml_loaded.get("initial")
markers = eval(args.markers) if args.markers else yml_loaded.get("markers")
percentage = args.percentage / 100 if args.percentage else yml_loaded.get("percentage") / 100
samples = args.samples if args.samples else yml_loaded.get("samples")
check = args.check if args.check else yml_loaded.get("check")
maxsteps = args.maxsteps if args.maxsteps else yml_loaded.get("maxsteps")
walkers = args.walkers if args.walkers else yml_loaded.get("walkers")
processes = args.processes if args.processes else yml_loaded.get("processes")
# output arguments
output = args.output
savechain = args.save_chain
gzip = args.gzip
lzf = args.lzf
tmp = args.tmp
shm = args.shm
thin = args.thin
timeseries = args.time_series
noshow = args.no_show
noprogress = args.no_progress
# check if everything that is required is provided
if not names:
raise Exception("Parameters names must either be provided in CLI or in the configuration file")
if not labels:
labels = names
if not initial:
raise Exception("Initial confitions must either be provided in CLI or in the configuration file")
if not percentage:
raise Exception("The percentage to consider that convergence is met must either be provided in the CLI or in the configuration file")
if not samples:
raise Exception("The number of samples to compute when converge is met must either be provided in CLI or in the configuration file")
# set defaults
if not check:
check = 1000
if not maxsteps:
maxsteps = 100000
if not walkers:
walkers = 32
if not processes:
processes = cpu_count()
if not markers:
markers = {}
for name in names:
try:
markers[name]
except KeyError:
markers[name] = None
# check if sizes match
if not ( len(names) == len(labels) == len(initial) ):
raise Exception(f"number of dimensions missmatch: len(names) = {len(names)}, len(labels) = {len(labels)}, len(initial) = {len(initial)}")
# exit if output is not provided and noshow is
if not output and noshow:
raise Exception("Flag -n, --noshow is provided without providing -o, --output. This means that the output will not be shown nor saved")
# exit if savechain is provided but output is not
if not output and savechain:
raise Exception("Flag --save-chain is set, but output wasn't provided (using --output or -o)")
# exit if both tmp and shm are provided
if tmp and shm:
raise Exception("Flags --tmp and --shm are mutually exclusive, pick the one which is mounted as tmpfs in your system")
# exit if tmp or shm is provided, but chain is not
if (tmp or shm) and not savechain:
raise Exception("Flag --tmp requires the usage of --save-chain")
# exit if the number of steps to compute is lower than the maximum number of steps
if maxsteps < samples:
raise Exception("The maximum number of steps (-M, --maxsteps) must always be larger than the number of steps to compute (-s, --samples)")
# check for single compression algorithm
if gzip and lzf:
raise Exception("--gzip and --lzf are mutually exclusive, pick one compression algorithm")
# evaluate initial conditions to Python functions, and turn into an emcee compatible numpy array
# we're returning both init and initial because the latest is required to output the configuration used
init = np.empty([walkers, len(names)])
for i in range(0, walkers):
for j in range(0, len(names)):
init[i][j] = eval(initial[names[j]])
# auxiliary varialbel required here and there
ndim = len(names)
return model, data, yml, names, labels, initial, markers, percentage, samples, check, maxsteps, walkers, processes, output, savechain, gzip, lzf, tmp, shm, thin, timeseries, noshow, noprogress, init, ndim
# save configuration and output arguments to files
def save(yml, names, labels, initial, markers, percentage, samples, check, maxsteps, walkers, processes, outputyml, output, savechain, gzip, lzf, tmp, shm, thin, timeseries, noshow, noprogress):
# orderly save the configuration arguments
with open(yml, "w") as file:
file.write("## config.yml\n")
file.write("# backup of all the configuration arguments used for this specific run\n")
file.write("\n")
yaml.dump({"names": names}, file)
file.write("\n")
yaml.dump({"labels": labels}, file)
file.write("\n")
yaml.dump({"initial": initial}, file, sort_keys=False)
file.write("\n")
yaml.dump({"markers": markers}, file, sort_keys=False)
file.write("\n")
yaml.dump({"percentage": percentage * 100}, file)
file.write("\n")
yaml.dump({"samples": samples}, file)
file.write("\n")
yaml.dump({"check": check}, file)
file.write("\n")
yaml.dump({"maxsteps": maxsteps}, file)
file.write("\n")
yaml.dump({"walkers": walkers}, file)
file.write("\n")
yaml.dump({"processes": processes}, file)
file.write("\n")
# orderly save the output arguments
with open(outputyml, "w") as file:
file.write("## output.yml\n")
file.write("# backup of all the output arguments used for this specific run\n")
file.write("\n")
yaml.dump({"output": output}, file)
file.write("\n")
yaml.dump({"save-chain": savechain}, file)
file.write("\n")
yaml.dump({"gzip": gzip}, file)
file.write("\n")
yaml.dump({"lzf": lzf}, file)
file.write("\n")
yaml.dump({"tmp": tmp}, file)
file.write("\n")
yaml.dump({"shm": shm}, file)
file.write("\n")
yaml.dump({"thin": thin}, file)
file.write("\n")
yaml.dump({"timeseries": timeseries}, file)
file.write("\n")
yaml.dump({"noshow": noshow}, file)
file.write("\n")
yaml.dump({"noprogress": noprogress}, file)
file.write("\n")
return
def autocorrelation(correlation, samples, check, index, laststep, delta, output=None, noshow=False):
if type(correlation) != tuple:
correlation = (correlation,)
for autocorr in correlation:
n = check * np.arange(1, index + 1)
y = autocorr[:index]
xmin = 0
xmax = n.max() + check
ymin = y.min() - 0.1 * (y.max() - y.min())
ymax = y.max() + 0.1 * (y.max() - y.min())
plt.plot(n, y, marker=".")
region = mpatches.Rectangle((laststep-samples, autocorr[index-1] - delta), samples, 2*delta, color="red", alpha=0.2, label="convergence region")
plt.gca().add_patch(region)
plt.grid()
plt.xlim(xmin, xmax)
plt.ylim(ymin, ymax)
plt.legend(handles=[region])
plt.xlabel("number of steps")
plt.ylabel(r"mean $\hat{\tau}$")
if output:
plt.savefig(output)
if not noshow:
plt.show()
plt.close()
return
# plot the time series
def timeseries(steps, labels, ndim, discard, output=None, noshow=False):
fig, axes = plt.subplots(ndim, figsize=(10, 7), sharex=True)
for i in range(ndim):
ax = axes[i]
ax.plot(steps[:, :, i], "k", alpha=0.3)
ax.set_xlim(0, len(steps))
ax.set_ylabel("$" + labels[i] + "$")
ax.axvline(x=discard, linestyle="--", color="red")
ax.yaxis.set_label_coords(-0.1, 0.5)
axes[-1].set_xlabel("step number")
if output:
plt.savefig(output)
if not noshow:
plt.show()
plt.close()
return
# print run information
def runlog(timeelapsed, samples, discard, converged, file=sys.stdout):
if file != sys.stdout:
file = open(file, "w")
print("## run.log", file=file)
print("# information regarding the execution of this program", file=file)
print("# the execution time has the format hours:minutes:seconds", file=file)
print("", file=file)
print(f"time: {timeelapsed}", file=file)
print(f"converged: {converged > samples}", file=file)
print(f"samples: {samples}", file=file)
print(f"discard: {discard}", file=file)
if file != sys.stdout:
file.close()
return
```
#### File: simplifiedmc/simplifiedmc/shared.py
```python
import matplotlib.pyplot as plt
from getdist import plots
import sys
import os
# corner plot
def corner(mcsamples, markers, output=None, noshow=False, filled_alpha=None, contour_alpha=0.5):
colors = ["#006FED", "#E03424", "#008000", "#9c5500", "#9224e0", "#ed00e6", "#f2e400", "#00f2e4", "#6fd95f"]
contour_colors = ["#006FED", "#E03424", "#008000", "#9c5500", "#9224e0", "#ed00e6", "#f2e400", "#00f2e4", "#6fd95f"]
g = plots.get_subplot_plotter()
g.settings.alpha_factor_contour_lines=contour_alpha
if filled_alpha:
alpha = []
for i in range(len(mcsamples)):
alpha.append({"alpha": filled_alpha})
g.triangle_plot(mcsamples, filled=True, markers=markers, colors=colors, contour_colors=contour_colors, contour_args=alpha)
else:
g.triangle_plot(mcsamples, filled=True, markers=markers, contour_colors=contour_colors, colors=colors)
if output:
plt.savefig(output, transparent=True)
if not noshow:
plt.show()
plt.close()
return
# print system information
def syslog(file=sys.stdout):
if file != sys.stdout:
file = open(file, "w")
print("## sys.log", file=file)
print("# information regarding the system and the date in which this run was executed", file=file)
print("", file=file)
print("$ date", file=file)
date = os.popen("date").read()[:-1]
print(f"{date}", file=file)
print("", file=file)
print("$ uname -a", file=file)
uname = os.popen("uname -a").read()[:-1]
print(f"{uname}", file=file)
print("", file=file)
print("$ lscpu", file=file)
lscpu = os.popen("lscpu").read()[:-1]
print(f"{lscpu}", file=file)
print("", file=file)
if file != sys.stdout:
file.close()
return
# print confidence intervals in a latex table
def CIs(mcsamples, file=sys.stdout):
if file != sys.stdout:
file = open(file, "w")
print("## CIs.tex", file=file)
print("# latex table for the 1 and 2 sigma distribution of each parameter", file=file)
print("", file=file)
print(mcsamples.getTable(limit=1).tableTex().replace("\n\n", "\n"), file=file)
print("", file=file)
print(mcsamples.getTable().tableTex().replace("\n\n", "\n"), file=file)
if file != sys.stdout:
file.close()
return
```
#### File: simplifiedmc/simplifiedmc/stan.py
```python
from multiprocessing import cpu_count
from random import gauss, uniform
import matplotlib.pyplot as plt
import numpy as np
import yaml
import sys
import os
# fetch the arguments from CLI and from the configuration file and check for incompatible or missing arguments
def load(args):
# required argumentss
model = args.model
data = args.data
# config file
if args.yml:
yml = args.yml
else:
os.system("echo 'none: none' > /tmp/dummy.yml")
yml = "/tmp/dummy.yml"
# get config arguments from file or CLI
with open(yml, "r") as file:
yml_loaded = yaml.full_load(file)
names = eval(args.names) if args.names else yml_loaded.get("names")
labels = eval(args.labels) if args.labels else yml_loaded.get("labels")
initial = eval(args.initial) if args.initial else yml_loaded.get("initial")
markers = eval(args.markers) if args.markers else yml_loaded.get("markers")
samples = args.samples if args.samples else yml_loaded.get("samples")
warmup = args.warmup if args.warmup else yml_loaded.get("warmup")
chains = args.chains if args.chains else yml_loaded.get("chains")
# output arguments
output = args.output
savechain = args.save_chain
gzip = args.gzip
lzf = args.lzf
noshow = args.no_show
# check if everything is provided
if not names:
raise Exception("Parameters names must be provided either in CLI or configuration file")
if not labels:
labels = names
if not initial:
raise Exception("Initial confitions must be provided either in CLI or configuration file")
if not samples:
raise Exception("The number of steps to sample the posterior distribution, after the warmup, must be provided either in CLI or configuration file")
if not warmup:
raise Exception("The number of steps to warmup each chain must be provided either in CLI or configuration file")
# set default values if not provided
if not chains:
chains = cpu_count()
if not markers:
markers = {}
for name in names:
try:
markers[name]
except KeyError:
markers[name] = None
if gzip == []:
gzip = 4
elif gzip:
gzip = int(gzip[0])
# check gzip values
if gzip:
if gzip < 0 or gzip > 9:
raise Exception(f"Value of gzip must be between 0 and 9 (inclusive), provided value was {gzip}")
# check if sizes match
if not ( len(names) == len(labels) == len(initial) ):
raise Exception(f"number of dimensions missmatch: len(names) = {len(names)}, len(labels) = {len(labels)}, len(initial) = {len(initial)}")
# check for single compression algorithm
if gzip and lzf:
raise Exception("--gzip and --lzf are mutually exclusive, pick one compression algorithm")
# if noshow is provided, output must also be provided
if noshow and not output:
raise Exception("Toggling -n, --noshow requires to provide an output folder, otherwise output will not be shown nor saved.")
# evaluate initial conditions to Python function(s), for each chain
# we're returning both init and initial because the latest is required to output the configuration used
init = []
for i in range(0, chains):
init.append({})
for name in names:
init[i][name] = eval(initial[name])
# number of parameters, useful later
ndim = len(names)
return model, data, yml, names, labels, initial, markers, samples, warmup, chains, output, savechain, gzip, lzf, noshow, init, ndim
# save configuration used to file
def save(yml, names, labels, initial, markers, samples, warmup, chains, outputyml, output, savechain, gzip, lzf, noshow):
# orderly save the configuration options
with open(yml, "w") as file:
file.write("## config.yml\n")
file.write("# backup of all the configuration arguments used for this specific run\n")
file.write("\n")
yaml.dump({"names": names}, file)
file.write("\n")
yaml.dump({"labels": labels}, file)
file.write("\n")
yaml.dump({"initial": initial}, file, sort_keys=False)
file.write("\n")
yaml.dump({"markers": markers}, file, sort_keys=False)
file.write("\n")
yaml.dump({"samples": samples}, file)
file.write("\n")
yaml.dump({"warmup": warmup}, file)
file.write("\n")
yaml.dump({"chains": chains}, file)
# orderly save the output arguments
with open(outputyml, "w") as file:
file.write("## output.yml\n")
file.write("# backup of all the output arguments used for this specific run\n")
file.write("\n")
yaml.dump({"output": output}, file)
file.write("\n")
yaml.dump({"save-chain": savechain}, file)
file.write("\n")
yaml.dump({"gzip": gzip}, file)
file.write("\n")
yaml.dump({"lzf": lzf}, file)
file.write("\n")
yaml.dump({"noshow": noshow}, file)
file.write("\n")
return
# convert fit to a numpy array of size [steps, chains, ndim], with all of the computed steps
def getsteps(fit, names, samples, warmup, chains, ndim):
totalsteps = np.empty([samples+warmup, chains, ndim])
for i in range(ndim):
for j in range(chains):
totalsteps[:, j, i] = fit[names[i]][0][j::chains]
return totalsteps
# flatten total steps (i.e. remove chain information) and remove warmup to a numpy array of size [steps, ndim]
def getflatsamples(samples, warmup, chains, ndim, totalsteps):
flatsamples = np.empty([samples*chains, ndim])
for i in range(ndim):
start = 0
for j in range(chains):
flatsamples[start::chains, i] = totalsteps[warmup:, j, i]
start += 1
return flatsamples
# plot time series
def timeseries(totalsteps, names, labels, markers, samples, warmup, chains, ndim, output=None, noshow=False):
fig, axes = plt.subplots(ndim, figsize=(10, 7), sharex=True)
steps = np.arange(samples+warmup)
for i in range(ndim):
ax = axes[i]
for j in range(chains):
ax.plot(steps, totalsteps[:, j, i], alpha=0.75)
ax.set_xlim(0, samples+warmup)
ax.set_ylabel("$" + labels[i] + "$")
ax.axvline(x=warmup, linestyle="--", color="black", alpha=0.5)
if markers[names[i]]:
ax.axhline(y=markers[names[i]], linestyle="--", color="black", alpha=0.5)
ax.yaxis.set_label_coords(-0.1, 0.5)
ax.grid()
axes[-1].set_xlabel("step number")
if output:
plt.savefig(output)
if not noshow:
plt.show()
plt.close()
return
# print run information
def runlog(timeelapsed, file=sys.stdout):
if file != sys.stdout:
file = open(file, "w")
print("## run.log", file=file)
print("# information regarding the execution of this program", file=file)
print("# the execution time has the format hours:minutes:seconds", file=file)
print("", file=file)
print(f"time: {timeelapsed}", file=file)
if file != sys.stdout:
file.close()
return
``` |
{
"source": "jpmvferreira/wip1-2",
"score": 2
} |
#### File: model/emcee/LCDM_GW.py
```python
from scipy.integrate import quad
from math import log, pi
import numpy as np
# define the natural logarithm of the likelihood
def ln_likelihood(θ, redshifts, distances, errors):
N = len(redshifts)
h, Ωm = θ
sum = 0
for i in range(0, N):
dL = (1+redshifts[i]) * (2.9979/h) * quad(lambda Z: 1/(Ωm*(1+Z)**3 + (1-Ωm))**0.5, 0, redshifts[i])[0]
sum += -log(errors[i]) - (distances[i] - dL)**2 / (2*errors[i]**2)
return -N*log(2*pi)/2 + sum
# define the natural logarithm of the priors
def ln_prior(θ):
h, Ωm = θ
# flat priors
if 0.2 < h < 1.2 and 0 < Ωm < 1:
return 0.0
return -np.inf
# define the probability using the prior and likelihood
def ln_probability(θ, redshifts, distances, errors):
prior = ln_prior(θ)
if not np.isfinite(prior):
return -np.inf
return prior + ln_likelihood(θ, redshifts, distances, errors)
``` |
{
"source": "jpnadas/django-seed",
"score": 2
} |
#### File: django-seed/django_seed/tests.py
```python
import random
from contextlib import contextmanager
from datetime import datetime
from django import VERSION as django_version
from django.conf import settings
from django.core.management import call_command
from django.core.validators import validate_comma_separated_integer_list
from django.db import models
from django.utils import timezone
from django_seed.guessers import NameGuesser, FieldTypeGuesser
from django_seed.seeder import Seeder
from django_seed.exceptions import SeederException, SeederCommandError
from django_seed import Seed
from faker import Faker
from alphabet_detector import AlphabetDetector
from jsonfield import JSONField
try:
from django.utils.unittest import TestCase
except:
from django.test import TestCase
from unittest import skipIf
fake = Faker()
DEF_LD = "default long description"
DEF_SD = "default short description"
@contextmanager
def django_setting(name, value):
"""
Generator that mutates the django.settings object during the context of a test run.
:param name: The setting name to be affected
:param value: The setting value to be defined during the execution
:return:
"""
original_value = getattr(settings, name)
setattr(settings, name, value)
try:
yield
finally:
setattr(settings, name, original_value)
# Game models
class Game(models.Model):
title = models.CharField(max_length=200)
slug = models.SlugField(max_length=200)
description = models.TextField()
game_started = models.DateTimeField()
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
updated_date = models.DateField()
updated_time = models.TimeField()
active = models.BooleanField()
max_score = models.BigIntegerField()
levels = models.SmallIntegerField()
likes = models.IntegerField()
random_binary = models.BinaryField()
class Player(models.Model):
nickname = models.CharField(max_length=100)
tagline = models.CharField(max_length=128)
avatar = models.FilePathField()
score = models.BigIntegerField()
last_login_at = models.DateTimeField()
game = models.ForeignKey(to=Game, on_delete=models.CASCADE)
ip = models.GenericIPAddressField()
achievements = models.CharField(validators=[validate_comma_separated_integer_list], max_length=1000)
friends = models.PositiveIntegerField()
balance = models.FloatField()
class Action(models.Model):
ACTION_FIRE = 'fire'
ACTION_MOVE = 'move'
ACTION_STOP = 'stop'
ACTIONS = (
(ACTION_FIRE, 'Fire'),
(ACTION_MOVE, 'Move'),
(ACTION_STOP, 'Stop'),
)
name = models.CharField(max_length=4, choices=ACTIONS)
executed_at = models.DateTimeField()
duration = models.DurationField()
uuid = models.UUIDField()
actor = models.ForeignKey(to=Player,on_delete=models.CASCADE,related_name='actions', null=False)
target = models.ForeignKey(to=Player,on_delete=models.CASCADE, related_name='enemy_actions+', null=True)
# Product models
class Product(models.Model):
name = models.CharField(max_length=100)
short_description = models.CharField(max_length=100, default=DEF_SD)
description = models.TextField(default=DEF_LD)
enabled = models.BooleanField(default=True)
class Customer(models.Model):
name = models.CharField(max_length=255)
country = models.CharField(max_length=30)
address = models.CharField(max_length=50)
created_at = models.DateTimeField(auto_now=False, auto_now_add=True)
comments = models.TextField(max_length=500)
# Reporter models
class Pen(models.Model):
ink_left = models.PositiveIntegerField()
class Reporter(models.Model):
name = models.CharField(max_length=100)
pen = models.OneToOneField(
Pen,
on_delete=models.CASCADE,
)
class Article(models.Model):
title = models.CharField(max_length=100)
reporter = models.ForeignKey(Reporter, on_delete=models.CASCADE)
class Newspaper(models.Model):
name = models.CharField(max_length=100)
address = models.CharField(max_length=80)
articles = models.ForeignKey(Article, on_delete=models.CASCADE)
# A reporter works for multiple newspapers
reporters = models.ManyToManyField(Reporter)
class NotCoveredFields(models.Model):
json = JSONField()
class NameGuesserTestCase(TestCase):
def setUp(self):
self.instance = NameGuesser(fake)
def test_guess_format_timezone(self):
test_names = ('something_at', 'something_At', 'gameUpdated_At', 'game_created_at')
with django_setting('USE_TZ', True):
for name in test_names:
value = self.instance.guess_format(name)(datetime.now())
self.assertTrue(timezone.is_aware(value))
with django_setting('USE_TZ', False):
for name in test_names:
value = self.instance.guess_format(name)(datetime.now())
self.assertFalse(timezone.is_aware(value))
class FieldTypeGuesserTestCase(TestCase):
def setUp(self):
self.instance = FieldTypeGuesser(fake)
def test_guess_with_datetime(self):
generator = self.instance.guess_format(models.DateTimeField())
with django_setting('USE_TZ', True):
value = generator(datetime.now())
self.assertTrue(timezone.is_aware(value))
with django_setting('USE_TZ', False):
value = generator(datetime.now())
self.assertFalse(timezone.is_aware(value))
# TODO: Find model field with _default_hint to use in test
# def test_guess_not_in_format(self):
# generator = self.instance.guess_format(JSONField())
# self.assertEquals(generator(), '{}')
class SeederTestCase(TestCase):
def test_population(self):
faker = fake
seeder = Seeder(faker)
seeder.add_entity(Game, 10)
self.assertEqual(len(seeder.execute()[Game]), 10)
self.assertEqual(len(Game.objects.all()), 10)
seeder.add_entity(Game, 40)
self.assertEqual(len(seeder.execute()[Game]), 40)
self.assertEqual(len(Game.objects.all()), 50)
def test_same_model_unique_fields(self):
faker = fake
seeder = Seeder(faker)
seeder.add_entity(Game, 10, {
"title": "First Game"
})
seeder.add_entity(Game, 20, {
"title": "Second Game"
})
inserted_pks = seeder.execute()
self.assertEqual(len(inserted_pks[Game]), 30)
self.assertEqual(len(Game.objects.all()), 30)
self.assertEqual(Game.objects.get(id=inserted_pks[Game][0]).title, "First Game")
self.assertEqual(Game.objects.get(id=inserted_pks[Game][-1]).title, "Second Game")
def test_guesser(self):
faker = fake
def title_fake(arg):
title_fake.count += 1
name = faker.company()
return name
title_fake.count = 0
seeder = Seeder(faker)
seeder.add_entity(Game, 10, {
'title': title_fake
})
self.assertEqual(len(seeder.execute()[Game]), title_fake.count)
def valid_player(self, player):
p = player
return 0 <= p.score <= 1000 and '@' in p.nickname
def test_formatter(self):
faker = fake
seeder = Seeder(faker)
seeder.add_entity(Game, 5)
seeder.add_entity(Player, 10, {
'score': lambda x: random.randint(0, 1000),
'nickname': lambda x: fake.email()
})
seeder.add_entity(Action, 30)
inserted_pks = seeder.execute()
self.assertTrue(len(inserted_pks[Game]) == 5)
self.assertTrue(len(inserted_pks[Player]) == 10)
players = Player.objects.all()
self.assertTrue(any([self.valid_player(p) for p in players]))
@skipIf(django_version[0] < 2, "JSONField does not work with Django 1.11")
def test_not_covered_fields(self):
"""
Tell the django-seed how to work with fields which are
not covered by the code. Avoids AttributeError(field).
:return:
"""
faker = fake
seeder = Seeder(faker)
seeder.add_entity(NotCoveredFields, 10, {
'json': lambda x: {seeder.faker.domain_name(): {'description': seeder.faker.text()}},
})
inserted_pks = seeder.execute()
self.assertTrue(len(inserted_pks[NotCoveredFields]) == 10)
self.assertTrue(all([field.json for field in NotCoveredFields.objects.all()]))
def test_locale(self):
ad = AlphabetDetector()
faker = Faker('ru_RU')
seeder = Seeder(faker)
seeder.add_entity(Game, 5)
seeder.execute()
self.assertTrue(all([ad.is_cyrillic(game.title) for game in Game.objects.all()]))
def test_null_foreign_key(self):
faker = fake
seeder = Seeder(faker)
try:
seeder.add_entity(Action, 1)
seeder.execute()
except Exception as e:
self.assertTrue(isinstance(e, SeederException))
pass
def test_no_entities_added(self):
faker = fake
seeder = Seeder(faker)
try:
seeder.execute()
except Exception as e:
self.assertTrue(isinstance(e, SeederException))
def test_auto_now_add(self):
date = datetime(1957, 3, 6, 13, 13)
faker = fake
seeder = Seeder(faker)
seeder.add_entity(Game, 10, {
'created_at': lambda x: date
})
inserted_pks = seeder.execute()[Game]
games = Game.objects.filter(pk__in=inserted_pks)
self.assertTrue(all(game.created_at == date for game in games))
def test_auto_now(self):
date = datetime(1957, 3, 6, 13, 13)
faker = fake
seeder = Seeder(faker)
seeder.add_entity(Game, 10, {
'updated_at': lambda x: date
})
inserted_pks = seeder.execute()[Game]
games = Game.objects.filter(pk__in=inserted_pks)
self.assertTrue(all(game.updated_at == date for game in games))
class APISeedTestCase(TestCase):
def setUp(self):
self.seed1 = Seed()
self.seed2 = Seed()
def test_django_seed_singleton(self):
self.assertEqual(self.seed1, self.seed2)
self.assertIs(self.seed1, self.seed1)
def test_faker_cache_faker(self):
gen1 = self.seed1.faker()
gen2 = self.seed2.faker()
self.assertIs(gen1, gen2)
gen1 = self.seed1.faker(codename='default')
gen2 = self.seed2.faker(codename='default')
self.assertIs(gen1, gen2)
gen1 = self.seed1.faker(locale='it_IT')
gen2 = self.seed2.faker(locale='it_IT')
self.assertIs(gen1, gen2)
def test_faker_cache_seeder(self):
seeder1 = self.seed1.seeder()
seeder2 = self.seed2.seeder()
self.assertIs(seeder1, seeder2)
gen1 = seeder1.faker
gen2 = seeder2.faker
self.assertIs(gen1, gen2)
seeder1 = self.seed1.seeder(locale='it_IT')
seeder2 = self.seed2.seeder(locale='it_IT')
self.assertIs(seeder1, seeder2)
class SeedCommandTestCase(TestCase):
def test_seed_command(self):
call_command('seed', 'django_seed', number=10)
def test_invalid_number_arg(self):
try:
call_command('seed', 'django_seed', number='asdf')
except Exception as e:
self.assertTrue(isinstance(e, SeederCommandError))
pass
class DefaultValueTestCase(TestCase):
def test_default_value_guessed_by_field_type(self):
faker = fake
seeder = Seeder(faker)
seeder.add_entity(Product, 1, {'name':'Awesome Product'})
_id = seeder.execute()
self.assertIsNotNone(_id)
product = Product.objects.get(id=_id[Product][0])
self.assertEquals(product.short_description, DEF_SD)
self.assertTrue(product.enabled)
def test_default_value_guessed_by_field_name(self):
faker = fake
seeder = Seeder(faker)
seeder.add_entity(Product, 1, {'name':'Great Product'})
_id = seeder.execute()
self.assertIsNotNone(_id)
product = Product.objects.get(id=_id[Product][0])
self.assertEquals(product.description, DEF_LD)
class LengthRulesTestCase(TestCase):
def test_max_length(self):
faker = fake
seeder = Seeder(faker)
name_max_len = Customer._meta.get_field('name').max_length
country_max_len = Customer._meta.get_field('country').max_length
address_max_len = Customer._meta.get_field('address').max_length
comments_max_len = Customer._meta.get_field('comments').max_length
rand = random.randint(1, 10)
data = {
'name': 'x' * (name_max_len + rand),
'country': 'p' * (country_max_len + rand),
'address': 't' * (address_max_len + rand),
'comments': 'o' * (comments_max_len + rand),
}
seeder.add_entity(Customer, 1, data)
_id = seeder.execute()
customer = Customer.objects.get(id=_id[Customer][0])
self.assertTrue(len(customer.name) <= name_max_len,
"name with length {}, does not respect max length restriction of {}"
.format(len(customer.name), name_max_len))
self.assertTrue(len(customer.country) <= country_max_len,
"country with length {}, does not respect max length restriction of {}"
.format(len(customer.name), country_max_len))
self.assertTrue(len(customer.address) <= address_max_len,
"address with length {}, does not respect max length restriction of {}"
.format(len(customer.name), address_max_len))
self.assertTrue(len(customer.comments) <= comments_max_len,
"comments with length {}, does not respect max length restriction of {}"
.format(len(customer.comments), comments_max_len))
def test_default_with_max_length(self):
faker = fake
seeder = Seeder(faker)
seeder.add_entity(Product, 1)
_id = seeder.execute()
product = Product.objects.get(id=_id[Product][0])
self.assertTrue(len(DEF_LD) == len(product.description))
class RelationshipTestCase(TestCase):
def test_one_to_one(self):
faker = fake
seeder = Seeder(faker)
seeder.add_entity(Pen, 1)
seeder.add_entity(Reporter, 1)
seeder.execute()
self.assertEqual(Reporter.objects.get(id=1).pen.pk, 1)
def test_one_to_one_wrong_order(self):
faker = fake
seeder = Seeder(faker)
seeder.add_entity(Reporter, 1)
seeder.add_entity(Pen, 1)
self.assertRaises(SeederException, seeder.execute)
def test_many_to_one(self):
faker = fake
seeder = Seeder(faker)
seeder.add_entity(Pen, 1)
seeder.add_entity(Reporter, 1)
seeder.add_entity(Article, 1)
seeder.execute()
self.assertNotEqual(Reporter.objects.get(id=1), None)
self.assertNotEqual(Article.objects.get(id=1), None)
self.assertEqual(Article.objects.get(id=1).reporter.pk, 1)
def test_many_to_one_wrong_order(self):
faker = fake
seeder = Seeder(faker)
seeder.add_entity(Article, 1)
seeder.add_entity(Pen, 1)
seeder.add_entity(Reporter, 1)
self.assertRaises(SeederException, seeder.execute)
def test_many_to_many(self):
faker = fake
seeder = Seeder(faker)
seeder.add_entity(Pen, 1)
seeder.add_entity(Reporter, 1)
seeder.add_entity(Article, 1)
seeder.add_entity(Newspaper, 1)
results = seeder.execute()
self.assertNotEqual(Newspaper.objects.get(id=1), None)
self.assertNotEqual(Reporter.objects.get(id=1), None)
self.assertNotEqual(Article.objects.get(id=1), None)
self.assertEqual(len(Reporter.objects.get(id=1).newspaper_set.all()), 1)
# TODO: This test should work once
# https://github.com/Brobin/django-seed/issues/79 is resolved
# def test_many_to_many_separate_executes(self):
# faker = fake
# seeder = Seeder(faker)
# seeder.add_entity(Pen, 1)
# seeder.add_entity(Reporter, 1)
# seeder.add_entity(Article, 1)
# seeder.execute()
# seeder.add_entity(Newspaper, 1)
# seeder.execute()
# self.assertNotEqual(Newspaper.objects.get(id=1), None)
# self.assertNotEqual(Reporter.objects.get(id=1), None)
# self.assertNotEqual(Article.objects.get(id=1), None)
# self.assertEqual(len(Reporter.objects.get(id=1).newspaper_set.all()), 1)
``` |
{
"source": "jpn--/addict_yaml",
"score": 3
} |
#### File: addict_yaml/addict_yaml/yaml_checker.py
```python
default_config = """---
rules:
braces:
min-spaces-inside: 0
max-spaces-inside: 0
min-spaces-inside-empty: -1
max-spaces-inside-empty: -1
brackets:
min-spaces-inside: 0
max-spaces-inside: 0
min-spaces-inside-empty: -1
max-spaces-inside-empty: -1
colons:
max-spaces-before: 0
max-spaces-after: 999
commas:
max-spaces-before: 0
min-spaces-after: 1
max-spaces-after: 1
comments:
level: warning
require-starting-space: false
min-spaces-from-content: 1
comments-indentation:
level: warning
document-end: disable
document-start: disable
# level: warning
# present: true
empty-lines:
max: 99
max-start: 99
max-end: 99
empty-values:
forbid-in-block-mappings: false
forbid-in-flow-mappings: false
hyphens:
max-spaces-after: 1
indentation:
spaces: consistent
indent-sequences: true
check-multi-line-strings: false
key-duplicates: enable
key-ordering: disable
line-length:
max: 880
allow-non-breakable-words: true
allow-non-breakable-inline-mappings: false
new-line-at-end-of-file: enable
new-lines:
type: unix
octal-values:
forbid-implicit-octal: false
forbid-explicit-octal: false
trailing-spaces: disable
truthy: disable
# level: warning
"""
class Format(object):
@staticmethod
def parsable(problem, filename):
return ('%(file)s:%(line)s:%(column)s: [%(level)s] %(message)s' %
{'file': filename,
'line': problem.line,
'column': problem.column,
'level': problem.level,
'message': problem.message})
@staticmethod
def standard(problem, filename):
line = ' %d:%d' % (problem.line, problem.column)
line += max(12 - len(line), 0) * ' '
line += problem.level
line += max(21 - len(line), 0) * ' '
line += problem.desc
if problem.rule:
line += ' (%s)' % problem.rule
return line
@staticmethod
def standard_color(problem, filename):
line = ' \033[2m%d:%d\033[0m' % (problem.line, problem.column)
line += max(20 - len(line), 0) * ' '
if problem.level == 'warning':
line += '\033[33m%s\033[0m' % problem.level
else:
line += '\033[31m%s\033[0m' % problem.level
line += max(38 - len(line), 0) * ' '
line += problem.desc
if problem.rule:
line += ' \033[2m(%s)\033[0m' % problem.rule
return line
def yaml_check(file, config_file=None, logger=None, encoding='utf-8'):
if logger is None:
log = print
else:
log = logger.error
try:
from yamllint import linter
from yamllint.config import YamlLintConfig
from yamllint.linter import PROBLEM_LEVELS
except ImportError:
log("yamllint is not installed, cannot inspect yaml file for formatting quality")
else:
filepath = file[2:] if file.startswith('./') else file
if config_file is not None:
conf = YamlLintConfig(file=config_file)
else:
conf = YamlLintConfig(content=default_config)
first = True
any_errors = False
max_level = 0
with open(filepath, 'r', encoding=encoding) as f:
for problem in linter.run(f, conf, filepath):
if first:
log(f"FOUND YAML ERRORS IN {file}")
first = False
any_errors = True
log(Format.standard(problem, file))
max_level = max(max_level, PROBLEM_LEVELS[problem.level])
if any_errors:
log(f"END OF YAML ERRORS IN {file}")
``` |
{
"source": "jpnaterer/sonicboards",
"score": 3
} |
#### File: sonicboards/scripts/scrape_bandcamp.py
```python
import requests
import json
import csv
import time
import os
import scrape
from datetime import datetime
# A dict of numerical location codes used by the bandcamp API.
LOCATION_CODES = {'novascotia': 6091530, 'ottawa': 6094817, 'pei': 6113358,
'newbrunswick': 6087430, 'saskatchewan': 6141242, 'newfoundland': 6354959,
'victoria': 6174041, 'edmonton': 5946768, 'calgary': 5913490,
'manitoba': 6065171, 'ontario': 6093943, 'quebec': 6115047,
'britishcolumbia': 5909050, 'alberta': 5883102}
GENRES_TO_IGNORE = ['metal', 'podcasts', 'classical', 'latin',
'spoken word', 'comedy', 'kids', 'audiobooks']
BASE_URL = 'https://bandcamp.com/api/hub/2/dig_deeper'
def get_bandcamp_releases(tag_str, page_count=10,
location_id=0, region_str=None, sort_str='pop'):
albums = list()
# If no region input, assume it is the same as the input tag.
if not(region_str):
region_str = tag_str
# Search by popularity not date, to remove bandcamp bloat.
post_requests = [{"filters": {"format": "all", "location": location_id,
"sort": sort_str, "tags": [tag_str]}, "page": i}
for i in range(1, page_count + 1)]
for post_request in post_requests:
tmp, continue_flag = scrape_response(post_request, region_str)
albums.extend(tmp)
if not continue_flag:
break
return albums
# Scrape an individual bandcamp post response.
def scrape_response(post_request, region_str):
# Attempt search, if fail, wait 5s to try again.
x = requests.post(BASE_URL, json=post_request)
if (not x.ok):
print("*** Failed Search, Continuing in 5s ***")
time.sleep(5)
x = requests.post(BASE_URL, json=post_request)
request_results = x.json()
albums = list()
for result in request_results['items']:
# Skip albums that have genre within the ignore list.
genre_str = result['genre']
if genre_str in GENRES_TO_IGNORE:
continue
# Skip albums that have not released, aka, are up for pre-order.
if result['is_preorder']:
continue
artist_str = result['artist']
title_str = result['title']
url_str = result['tralbum_url']
albums.append({'artist': artist_str, 'title': title_str,
'genre': genre_str, 'region': region_str, 'url': url_str})
# Stop searching for pages if we reach the final page.
if(not request_results['more_available']):
return albums, False
return albums, True
# A utility function to effectively search each region w/o duplicates.
def get_bandcamp_releases_util(albums,
tag_str='canada', location_id=0, region_str=None):
# Complete one large recent release search and one small popular search.
if region_str is None:
res1 = get_bandcamp_releases(tag_str, page_count=10, sort_str='date')
res2 = get_bandcamp_releases(tag_str, page_count=1, sort_str='pop')
else:
res1 = get_bandcamp_releases('canada', page_count=10,
location_id=location_id, region_str=region_str, sort_str='date')
res2 = get_bandcamp_releases('canada', page_count=1,
location_id=location_id, region_str=region_str, sort_str='pop')
# Ensure the url is not yet in the current list.
url_list = [r['url'] for r in albums]
for result in res1:
if result['url'] not in url_list:
albums.append(result)
url_list = [r['url'] for r in albums]
for result in res2:
if result['url'] not in url_list:
albums.append(result)
return albums
# Generate recommendation scores. These are likely overwritten when the
# data json files are transferred into the mongo database.
def get_bandcamp_scores(albums):
for r in albums:
if not('sp_popularity' in r):
r['sp_popularity'] = 0
date_obj = datetime.strptime(r['sp_date'], "%Y-%m-%dT00:00.000Z")
time_score = max(60 - (datetime.now() - date_obj).days, 0)
r['score'] = r['sp_popularity'] / 100 * 40 + time_score / 60 * 60
r['score'] = round(r['score'], 3)
albums = sorted(albums, key=lambda k: k['score'], reverse=True)
return albums
# WHERE THE SEARCHING TAKES PLACE ######################################
# Retrieve primary locations by popularity.
albums = list()
for tag_str in ['toronto', 'montreal', 'vancouver']:
print("Scraping Bandcamp %s" % tag_str)
albums = get_bandcamp_releases_util(albums, tag_str)
# Retrieve secondary locations by date.
for region_str, location_id in LOCATION_CODES.items():
print("Scraping Bandcamp %s" % region_str)
albums = get_bandcamp_releases_util(albums,
tag_str='canada', location_id=location_id, region_str=region_str)
# Write results to a csv file before the spotify search for debugging.
# with open('results/canada_pre.csv', mode='w') as csv_file:
# fieldnames = ['artist', 'title', 'genre', 'url', 'region']
# csv_writer = csv.DictWriter(csv_file, fieldnames=fieldnames)
# csv_writer.writeheader()
# csv_writer.writerows(albums)
print('Fetching %d Spotify Results' % len(albums), end='', flush=True)
current_time = datetime.now()
bandcamp_scraper = scrape.AlbumScraper(albums)
albums = bandcamp_scraper.run()
albums = get_bandcamp_scores(albums)
print(", Completed in %ds" % (datetime.now() - current_time).seconds)
# Write results to csv and json files.
script_loc = os.path.dirname(os.path.realpath(__file__))
with open(script_loc + '/results/canada.csv', mode='w+') as csv_file:
fieldnames = ['artist', 'title', 'genre', 'url',
'region', 'score', 'sp_popularity',
'sp_date', 'sp_img', 'sp_album_id', 'sp_artist_id']
csv_writer = csv.DictWriter(csv_file, fieldnames=fieldnames)
csv_writer.writeheader()
csv_writer.writerows(albums)
with open(script_loc + '/results/canada.json', 'w+') as json_file:
json.dump(albums, json_file, indent=4)
``` |
{
"source": "jpnauta/collectfast",
"score": 2
} |
#### File: collectfast/storage_extensions/s3boto3.py
```python
from storages.utils import safe_join
from collectfast import settings
from collectfast.storage_extensions.base import BaseStorageExtensions, check_preload_metadata
class S3Boto3StorageExtensions(BaseStorageExtensions):
"""
Storage extensions for django-storage's `S3Boto3Storage`
"""
def __init__(self, storage):
super(S3Boto3StorageExtensions, self).__init__(storage)
check_preload_metadata(storage)
def reset_connection(self):
"""
Reset connection if thread pooling is enabled
"""
if settings.threads:
self.storage._connection = None
def get_etag(self, path):
normalized_path = safe_join(self.storage.location, path).replace('\\', '/')
try:
return self.storage.bucket.Object(normalized_path).e_tag
except:
pass
```
#### File: collectfast/storage_extensions/s3boto.py
```python
from storages.utils import safe_join
from collectfast.storage_extensions.base import BaseStorageExtensions, check_preload_metadata
class S3BotoStorageExtensions(BaseStorageExtensions):
"""
Storage extensions for django-storage's `S3BotoStorage`
"""
def __init__(self, storage):
super(S3BotoStorageExtensions, self).__init__(storage)
check_preload_metadata(storage)
def get_etag(self, path):
normalized_path = safe_join(self.storage.location, path).replace('\\', '/')
try:
return self.storage.bucket.get_key(normalized_path).etag
except AttributeError:
return None
``` |
{
"source": "jpnavarro/mp-auth",
"score": 2
} |
#### File: mp_auth/backends/base.py
```python
import logging
import time
from django.conf import settings
from rest_framework import exceptions
from rest_framework.authentication import get_authorization_header, BaseAuthentication
from ..models import AccessToken
logger = logging.getLogger(__name__)
class MultiproviderBaseAuthentication(BaseAuthentication):
"""
The base class with methods that are common for all types of Identity Providers:
All provider authentication classes should derive from this class instead of
rest_framework.authentication.BaseAuthentication
"""
def __init__(self):
self.jwt_idps = settings.MULTIPROVIDER_AUTH.get("JWT")
self.opaque_token_idps = settings.MULTIPROVIDER_AUTH.get("BearerTokens")
def get_token(self, request):
"""Extract a bearer token from the HTTP header"""
auth = get_authorization_header(request)
if not auth:
msg = "No authorization header."
raise exceptions.AuthenticationFailed(msg)
auth = auth.split()
len_auth = len(auth)
if len_auth == 0:
msg = "Empty authorization header."
raise exceptions.AuthenticationFailed(msg)
elif len_auth == 1:
msg = "Invalid bearer header."
raise exceptions.AuthenticationFailed(msg)
elif len_auth > 2:
msg = "Invalid bearer header. Token string must not contain any spaces."
raise exceptions.AuthenticationFailed(msg)
elif auth[0].lower() != b'bearer':
msg = "Invalid bearer header. Missing Bearer."
raise exceptions.AuthenticationFailed(msg)
return auth[1]
def check_cache(self, access_token, providers):
"""Look for the access token cached in the database
Parameters
----------
access_token : str
access token
providers : str|list
string or list of strings with providers specified
in MULTIPROVIDER_AUTH dict in settings.py
"""
if isinstance(providers, str):
providers = [providers]
try:
access_token = AccessToken.objects.get(access_token=access_token)
unix_time = int(time.time())
if (access_token.exp >= unix_time and
access_token.user_association.provider.iss in providers):
user = access_token.user_association.user
return user, access_token
except AccessToken.DoesNotExist:
pass
return None, None
```
#### File: mp_auth/backends/jwt.py
```python
import logging
import time
import requests
import jwt
from cryptography.x509 import load_pem_x509_certificate
from cryptography.hazmat.backends import default_backend
from django.contrib.auth import get_user_model
from rest_framework import exceptions
from ..models import Provider, UserAssociation, AccessToken, JsonWebKey
from .base import MultiproviderBaseAuthentication
logger = logging.getLogger(__name__)
UserModel = get_user_model()
class JWTAuthentication(MultiproviderBaseAuthentication):
name = "jwt"
def authenticate(self, request):
# Extract token from HTTP Authorization header
bearer_token = self.get_token(request)
# Authenticate against the database where old access tokens were stored
user, token = self.check_cache(bearer_token, self.jwt_idps.keys())
if user:
logger.info("{} successfully authenticated".format(user.username))
return user, token
# Introspect the token
user, token = self.introspect_token(bearer_token)
logger.info("{} successfully authenticated".format(user.username))
return user, token
def introspect_token(self, bearer_token):
"""
Introspect the token and, if the token is valid:
1) store the token with user information in the database
2) associate the token with an existing user or create a user
if it does not exist
"""
try:
jwt_header = jwt.get_unverified_header(bearer_token)
except Exception as e:
msg = "Error when decoding the JWT header: {}".format(e)
raise exceptions.AuthenticationFailed(msg)
try:
jwt_payload = jwt.decode(bearer_token, verify=False)
except Exception as e:
msg = "Error when decoding the JWT token payload: {}".format(e)
raise exceptions.AuthenticationFailed(msg)
typ = jwt_header.get("typ")
alg = jwt_header.get("alg")
kid = jwt_header.get("kid")
if typ != "JWT":
msg = "Unsupported JWT token type"
raise exceptions.AuthenticationFailed(msg)
if alg != "RS256":
msg = "Unsupported JWT token algorithm"
raise exceptions.AuthenticationFailed(msg)
iss = jwt_payload.get("iss")
sub = jwt_payload.get("sub")
aud = jwt_payload.get("aud")
exp = jwt_payload.get("exp")
nbf = jwt_payload.get("nbf")
idp = self.jwt_idps.get(iss)
if not idp:
msg = "Prohibited JWT token issuer"
raise exceptions.AuthenticationFailed(msg)
if idp.get("aud") != aud:
msg = "Invalid audience"
raise exceptions.AuthenticationFailed(msg)
if not sub:
msg = "No sub claim in the JWT token"
raise exceptions.AuthenticationFailed(msg)
unix_time = int(time.time())
# Check if the 'exp' (Expiration Time) claim applies
if exp and int(exp) < unix_time:
msg = "Token expired"
raise exceptions.AuthenticationFailed(msg)
# Check if the 'nbf' (Not Before) claim applies
if nbf and int(nbf) > unix_time:
msg = "Token is not valid yet"
raise exceptions.AuthenticationFailed(msg)
provider, _created = Provider.objects.get_or_create(iss=iss)
# Try to get JWKS from the issuer server and update keys in the database
try:
resp = requests.get(iss + ".well-known/jwks.json")
jwks = resp.json()
for jwk in jwks.get("keys"):
kid = jwk.get("kid")
alg = jwk.get("alg")
kty = jwk.get("kty")
x5c = jwk.get("x5c")[0]
key, _created = JsonWebKey.objects.update_or_create(
iss=provider,
kid=kid,
defaults={"alg": alg, "kty": kty, "x5c": x5c}
)
except Exception:
logger.warn("Could not download JWKS from {}".format(iss))
# Try to get a corresponding key from the database
try:
key = JsonWebKey.objects.get(kid=kid)
except JsonWebKey.DoesNotExist:
msg = "Could not obtain a corresponding JWK"
raise exceptions.AuthenticationFailed(msg)
# Verify the JWT token
cert_str = "-----BEGIN CERTIFICATE-----\n" + key.x5c + "\n-----END CERTIFICATE-----"
cert_obj = load_pem_x509_certificate(cert_str.encode(), default_backend())
try:
jwt.decode(bearer_token, cert_obj.public_key(),
audience=idp.get("aud"), algorithms=key.alg)
except Exception as e:
logger.debug("Error when verifying the JWT token: {}".format(e))
raise exceptions.AuthenticationFailed(e)
try:
user_association = UserAssociation.objects.get(provider=provider, uid=sub)
user = user_association.user
except UserAssociation.DoesNotExist:
user = UserModel.objects.create(
username=sub,
)
logger.debug("New user '{}' created".format(user.username))
user_association = UserAssociation.objects.create(
user=user, uid=sub, provider=provider)
AccessToken.objects.create(
user_association=user_association, access_token=bearer_token, exp=exp)
logger.debug("New access token (JWT) '{}' added to the database".format(bearer_token))
return user, None
``` |
{
"source": "jp-nerdery/chainermn",
"score": 2
} |
#### File: chainermn/communicators/mpi_communicator_base.py
```python
import collections
import mpi4py
import numpy
import chainer.cuda
import chainer.utils
from chainermn.communicators import _communication_utility
from chainermn.communicators._communication_utility import chunked_bcast_obj
from chainermn.communicators import _memory_utility
from chainermn.communicators import communicator_base
_dtype_mpi_type = {
# see the definition of mpi4py.MPI._typedict (in mpi4py/MPI/typemap.pxi)
numpy.dtype(numpy.int32): mpi4py.MPI._typedict['i'],
numpy.dtype(numpy.int64): mpi4py.MPI._typedict['l'],
numpy.dtype(numpy.float32): mpi4py.MPI._typedict['f'],
numpy.dtype(numpy.float64): mpi4py.MPI._typedict['d'],
}
def _check_dtype(caller, msgtype):
dtype = msgtype.dtype
if dtype not in _dtype_mpi_type.keys():
raise TypeError(
'{} does not support dtype {}'.format(caller, dtype))
def _check_dtypes_are_same(msgtypes):
dtypes = [msgtype.dtype for msgtype in msgtypes]
if any(dtypes[0] != dtype for dtype in dtypes):
raise TypeError('all dtypes must be the same')
def _is_numpy_array(array):
return isinstance(array, numpy.ndarray)
def _is_cupy_array(array):
return chainer.cuda.get_array_module(array) is not numpy
def _cnt_to_dsp(cnt):
"""Utility to convert length array to cumulative array."""
return [0] + numpy.cumsum(cnt)[:-1].tolist()
def _get_mpi_type(msgtype):
dtype = msgtype.dtype
if dtype not in _dtype_mpi_type.keys():
raise TypeError(
'dtype {} is not supported by MpiCommunicator'.format(dtype))
return _dtype_mpi_type[dtype]
class _MessageType(object):
def __init__(self, obj):
if _is_numpy_array(obj) or _is_cupy_array(obj):
self.is_host = _is_numpy_array(obj)
self.is_tuple = False
self.narr = 1
self.ndims = [obj.ndim]
self.shapes = [obj.shape]
self.dtype = obj.dtype
elif isinstance(obj, collections.Iterable):
if all(map(_is_numpy_array, obj)):
self.is_host = True
elif all(map(_is_cupy_array, obj)):
self.is_host = False
else:
raise ValueError(
'All message objects must be either numpy or cupy arrays.')
self.is_tuple = True
self.narr = len(obj)
self.ndims = [x.ndim for x in obj]
self.shapes = [x.shape for x in obj]
dtypes = [x.dtype for x in obj]
if not all(dtype == dtypes[0] for dtype in dtypes):
raise TypeError(
'Message objects must be the same dtype')
self.dtype = dtypes[0]
else:
raise TypeError(
'Message object must be numpy/cupy array or its tuple.')
def get_array_module(self):
if self.is_host:
return numpy
else:
import cupy
return cupy
class MpiCommunicatorBase(communicator_base.CommunicatorBase):
'''MpiCommunicatorBase
Implementation of communicator interface defined by
:class:`CommunicatorBase`. This communicator assumes MPI4py and
all ChainerMN processes are invoked by ``mpirun`` (``mpiexec``)
command. Although this lacks several important methods such as
``allreduce_grad`` to be impelmented with speficic algorithm. See
hierarcical communicator or pure_nccl communicator for example.
'''
def __init__(self, mpi_comm):
self.mpi_comm = mpi_comm
self._init_ranks()
@property
def rank(self):
return self.mpi_comm.rank
@property
def size(self):
return self.mpi_comm.size
@property
def intra_rank(self):
return self._intra_rank
@property
def intra_size(self):
return self._intra_size
@property
def inter_rank(self):
return self._inter_rank
@property
def inter_size(self):
return self._inter_size
def split(self, color, key):
return self.__class__(mpi_comm=self.mpi_comm.Split(color, key))
def alltoall(self, xs):
"""A primitive of inter-process all-to-all function.
This method tries to invoke all-to-all communication within the
communicator. All processes in the communicator are expected to
invoke ``alltoall()``. This method relies on mpi4py fast communication
optimized for numpy arrays, as well as ``send()`` and ``recv()``.
If ``xs`` is numpy array, the returned array will also be allocated
as numpy array. Additionally, when ``xs`` is cupy array, the returned
array will be placed at current device
(``https://docs-cupy.chainer.org/en/stable/tutorial/basic.html#current-device``)
regardless of which device the argument is placed at remote nodes.
Args:
xs (tuple of numpy/cupy array)
Returns:
ys (tuple of numpy/cupy array):
Received arrays. The length of tuple equals to
the communicator size.
"""
chainer.utils.experimental(
'chainermn.communicators.MpiCommunicatorBase.alltoall')
if len(xs) != self.size:
raise ValueError(
'The length of data must be same as communicator size.')
# Type check.
msgtypes = [_MessageType(x) for x in xs]
for msgtype in msgtypes:
_check_dtype('alltoall', msgtype)
_check_dtypes_are_same(msgtypes)
send_msgtype = msgtypes[0]
msgtypes = self.mpi_comm.alltoall(msgtypes)
_check_dtypes_are_same(msgtypes)
recv_msgtype = msgtypes[0]
# Collective communication.
slens = [numpy.prod(x.shape) for x in xs]
xp = chainer.cuda.get_array_module(*xs)
sbuf = xp.hstack([x.reshape(-1) for x in xs])
shapes = [msgtype.shapes[0] for msgtype in msgtypes]
rlens = [numpy.prod(s) for s in shapes]
rbuf = xp.empty([sum(rlens)], dtype=msgtype.dtype)
if xp is not numpy:
sbuf = _memory_utility.get_device_memory_pointer(sbuf)
chainer.cuda.Stream.null.synchronize()
self.mpi_comm.Alltoallv(
[sbuf, (slens, _cnt_to_dsp(slens)), _get_mpi_type(send_msgtype)],
[_memory_utility.get_device_memory_pointer(rbuf),
(rlens, _cnt_to_dsp(rlens)), _get_mpi_type(recv_msgtype)])
ys = [rbuf[i:i + l].reshape(s)
for i, l, s in zip(_cnt_to_dsp(rlens), rlens, shapes)]
return tuple(ys)
def send(self, data, dest, tag):
"""A primitive for inter-process transmitter.
This method sends numpy-array to target process.
The target process is expected to invoke ``recv()``.
This method relies on mpi4py fast communication optimized for
numpy arrays, which discards any information attached to
chainer.Variable objects. Please be sure.
Args:
data: data to be sent (tuple, list or raw numpy/cupy array)
dest (int): Target process specifier.
tag (int): Message ID (MPI feature).
"""
chainer.utils.experimental(
'chainermn.communicators.MpiCommunicatorBase.send')
msgtype = _MessageType(data)
_check_dtype('send', msgtype)
"""We use ssend() instead of send() to pass unittests.
If we don't use it, an error occurs in
test_point_to_point_communication.py
when using MVAPICH2-2.2 and GPUs.
"""
self.mpi_comm.ssend(msgtype, dest=dest, tag=tag)
# Type check.
if not msgtype.is_tuple:
data = [data]
for array in data:
if chainer.cuda.get_array_module(array) is not numpy:
chainer.cuda.Stream.null.synchronize()
array = (_memory_utility.get_device_memory_pointer(array),
_get_mpi_type(msgtype))
else:
array = numpy.ascontiguousarray(array)
"""We use Ssend() for the same reason as using ssend()."""
self.mpi_comm.Ssend(array, dest=dest, tag=tag)
def recv(self, source, tag):
"""A primitive of inter-process receiver.
This method tries to receive numpy-array from target process.
The target process is expected to invoke ``send()``.
This method relies on mpi4py fast communication optimized for
numpy arrays, which discards any information attached to
chainer.Variable objects. Please be sure.
If the corresponding ``send()`` is invoked with cupy array,
the returned array will be placed at current device
(``https://docs-cupy.chainer.org/en/stable/tutorial/basic.html#current-device``)
regardless of which device the argument is placed at remote nodes.
Args:
source (int): Target process specifier.
tag (int): Message ID (MPI feature).
Returns:
data (tuple of numpy/cupy array or numpy/cupy array):
Received data. If ``send()`` is invoked with tuple data,
it is also tuple. Otherwise, it is a vanilla numpy/cupy array.
"""
chainer.utils.experimental(
'chainermn.communicators.MpiCommunicatorBase.recv')
msgtype = self.mpi_comm.recv(source=source, tag=tag)
xp = msgtype.get_array_module()
if msgtype.is_tuple:
msg = []
for shape in msgtype.shapes:
buf = xp.empty([numpy.prod(shape)], dtype=msgtype.dtype)
rtype = _get_mpi_type(msgtype)
self.mpi_comm.Recv(
_memory_utility.array_to_buffer_object(buf, rtype),
source=source, tag=tag)
msg.append(buf.reshape(shape))
return tuple(msg)
else:
assert len(msgtype.shapes) == 1
shape = msgtype.shapes[0]
buf = xp.empty([numpy.prod(shape)], dtype=msgtype.dtype)
rtype = _get_mpi_type(msgtype)
self.mpi_comm.Recv(
_memory_utility.array_to_buffer_object(buf, rtype),
source=source, tag=tag)
return buf.reshape(shape)
def bcast(self, x, root=0):
"""A primitive of inter-process broadcast communication.
This method tries to invoke broadcast communication within the
communicator. All processes in the communicator are expected to
invoke ``broadcast()``. This method relies on mpi4py fast communication
optimized for numpy arrays, as well as ``send()`` and ``recv()``.
If ``bcast()`` is invoked with cupy array in the root process,
the returned array will be placed at current device
(``https://docs-cupy.chainer.org/en/stable/tutorial/basic.html#current-device``)
regardless of which device the argument is placed at remote nodes.
Args:
x (numpy/cupy array): Array to be broadcasted.
root (int): Rank of root process.
Returns:
ys (tuple of numpy/cupy array): Received arrays.
"""
chainer.utils.experimental(
'chainermn.communicators.MpiCommunicatorBase.bcast')
is_master = self.mpi_comm.rank == root
if is_master:
msgtype = _MessageType(x)
_check_dtype('bcast', msgtype)
if msgtype.is_tuple:
raise TypeError('Tuple data cannot be broadcasted')
msgtype = self.mpi_comm.bcast(msgtype, root)
shape = msgtype.shapes[0]
buf = _memory_utility.array_to_buffer_object(
x, _get_mpi_type(msgtype))
self.mpi_comm.Bcast(buf, root)
return x
else:
msgtype = self.mpi_comm.bcast(None, root)
xp = msgtype.get_array_module()
shape = msgtype.shapes[0]
buf = xp.empty([numpy.prod(shape)], dtype=msgtype.dtype)
buftype = _get_mpi_type(msgtype)
self.mpi_comm.Bcast(
_memory_utility.array_to_buffer_object(buf, buftype),
root)
return buf.reshape(shape)
def gather(self, x, root=0):
"""A primitive of inter-process gather communication.
This method tries to invoke gather communication within the
communicator. All processes in the communicator are expected to
invoke ``gather()``. This method relies on mpi4py fast communication
optimized for numpy arrays, as well as ``send()`` and ``recv()``.
If ``x`` is numpy array, the received data will also be allocated
as numpy array. Additionally, when ``x`` is cupy array, the returned
array will be placed at current device
(``https://docs-cupy.chainer.org/en/stable/tutorial/basic.html#current-device``)
regardless of which device the argument is placed at remote nodes.
Args:
x (numpy/cupy array): Array to be gathered.
root (int): Rank of root process.
Returns:
ys (tuple of numpy/cupy array):
Received arrays. ``None`` for non-root processes.
"""
chainer.utils.experimental(
'chainermn.communicators.MpiCommunicatorBase.gather')
is_master = self.mpi_comm.rank == root
msgtype = _MessageType(x)
_check_dtype('gather', msgtype)
msgtypes = self.mpi_comm.gather(msgtype, root)
if is_master:
_check_dtypes_are_same(msgtypes)
for msgtype in msgtypes:
if msgtype.is_tuple:
raise TypeError('gather cannot handle tuple data')
assert len(msgtype.shapes) == 1
xp = chainer.cuda.get_array_module(x)
sbuf = _memory_utility.array_to_buffer_object(
x, _get_mpi_type(msgtype))
shapes = [mty.shapes[0] for mty in msgtypes]
rlens = [numpy.prod(s) for s in shapes]
rbuf = xp.empty([sum(rlens)], dtype=msgtype.dtype)
if xp is not numpy:
chainer.cuda.Stream.null.synchronize()
self.mpi_comm.Gatherv(
sbuf,
[_memory_utility.get_device_memory_pointer(rbuf),
(rlens, _cnt_to_dsp(rlens)), _get_mpi_type(msgtype)],
root)
ys = [rbuf[i:i + l].reshape(s)
for i, l, s in zip(_cnt_to_dsp(rlens), rlens, shapes)]
return tuple(ys)
else:
sbuf = _memory_utility.array_to_buffer_object(
x, _get_mpi_type(msgtype))
self.mpi_comm.Gatherv(sbuf, None, root)
return None
def allgather(self, x):
chainer.utils.experimental(
'chainermn.communicators.MpiCommunicatorBase.allgather')
msgtype = _MessageType(x)
_check_dtype('allgather', msgtype)
msgtypes = self.mpi_comm.allgather(msgtype)
_check_dtypes_are_same(msgtypes)
# Type check.
for msgtype in msgtypes:
if msgtype.is_tuple:
raise TypeError('allgather cannot handle tuple data')
assert len(msgtype.shapes) == 1
# Collective communication.
xp = chainer.cuda.get_array_module(x)
shapes = [msgtype.shapes[0] for msgtype in msgtypes]
sbuf = _memory_utility.array_to_buffer_object(
x, _get_mpi_type(msgtype))
rlens = [numpy.prod(s) for s in shapes]
rbuf = xp.empty([sum(rlens)], dtype=msgtype.dtype)
if xp is not numpy:
chainer.cuda.Stream.null.synchronize()
self.mpi_comm.Allgatherv(
sbuf,
[_memory_utility.get_device_memory_pointer(rbuf),
(rlens, _cnt_to_dsp(rlens)), _get_mpi_type(msgtype)])
ys = [rbuf[i:i + l].reshape(s)
for i, l, s in zip(_cnt_to_dsp(rlens), rlens, shapes)]
return tuple(ys)
def allreduce(self, x):
"""A primitive of inter-process allreduce communication.
This method tries to invoke allreduce communication within the
communicator. All processes in the communicator are expected to
invoke ``allreduce()``. This method relies on mpi4py fast communication
optimized for numpy arrays, as well as ``send()`` and ``recv()``.
Note that this method can only handle the same shapes of data
over all processes, and cannot handle tuple data.
If ``x`` is numpy array, the received data will also be allocated
as numpy array. Additionally, when ``x`` is cupy array, the returned
array will be placed at current device
(``https://docs-cupy.chainer.org/en/stable/tutorial/basic.html#current-device``)
regardless of which device the argument is placed at remote nodes.
Args:
x (numpy/cupy array): An array to apply allreduce operation.
Returns:
ys (numpy/cupy array): An array that allreduce (currently SUM only)
has been applied.
"""
chainer.utils.experimental(
'chainermn.communicators.CommunicatorBase.allreduce')
msgtype = _MessageType(x)
_check_dtype('allreduce', msgtype)
if msgtype.is_tuple:
raise TypeError('allreduce cannot handle tuple data')
if msgtype.is_tuple:
raise TypeError('allreduce cannot handle tuple data')
xp = chainer.cuda.get_array_module(x)
# TODO(kuenishi): do we check all messages have same shape and dims?
# Source buffer
sbuf = _memory_utility.array_to_buffer_object(
x, _get_mpi_type(msgtype))
# Destination buffer
dbuf = xp.empty([numpy.prod(msgtype.shapes[0])], dtype=msgtype.dtype)
dbuf = _memory_utility.array_to_buffer_object(
dbuf, _get_mpi_type(msgtype))
self.mpi_comm.Allreduce(sbuf, dbuf)
return dbuf.reshape(msgtype.shapes[0])
# Objects
def send_obj(self, obj, dest):
self.mpi_comm.send(obj, dest=dest)
def recv_obj(self, source):
return self.mpi_comm.recv(source=source)
def bcast_obj(self, obj, max_buf_len=256 * 1024 * 1024, root=0):
return chunked_bcast_obj(obj, self.mpi_comm,
max_buf_len=max_buf_len,
root=root)
def gather_obj(self, obj, root=0):
return self.mpi_comm.gather(obj, root=root)
def scatter(self, xs, root=0):
"""A primitive of inter-process scatter communication.
This method tries to invoke scatter communication within the
communicator. All processes in the communicator are expected to
invoke ``scatter()``. This method relies on mpi4py fast communication
optimized for numpy arrays, as well as ``send()`` and ``recv()``.
If ``xs`` is tuple, each element is send to different processes.
The length of the tuple must be the same as the communicator size.
If ``xs`` is ``numpy.ndarrray``, it is splitted with the first
axis and sent to different processes. For slave processes, ``xs``
is allowed to be any value (will be ignored).
If ``scatter()`` is invoked with cupy array in the root process,
the returned array will be placed at current device
(``https://docs-cupy.chainer.org/en/stable/tutorial/basic.html#current-device``)
regardless of which device the argument is placed at remote nodes.
Args:
xs (tuple of numpy/cupy array): Arrays to be scattered.
root (int): Rank of root process.
Returns:
ys (numpy/cupy array): Received arrays.
"""
chainer.utils.experimental(
'chainermn.communicators.CommunicatorBase.scatter')
is_master = self.mpi_comm.rank == root
if is_master:
# Type check.
msgtype = _MessageType(xs)
_check_dtype('scatter', msgtype)
if msgtype.is_tuple:
if len(msgtype.shapes) != self.size:
raise ValueError(
'the length of xs must be consistent '
'with communicator size')
xp = chainer.cuda.get_array_module(*xs)
msgtype = tuple([_MessageType(x) for x in xs])
shapes = [mty.shapes[0] for mty in msgtype]
# concatenate([x.reshape(-1) ... ], axis=0) will fail
xs = xp.concatenate([x.reshape(1, -1) for x in xs], axis=1)
else:
assert len(msgtype.shapes) == 1
if msgtype.shapes[0][0] != self.mpi_comm.size:
raise ValueError(
'scatter received inconsistent number of inputs '
'with communicator size')
xp = chainer.cuda.get_array_module(xs)
msgtype = tuple([_MessageType(xs[0])
for _ in range(self.size)])
shapes = [xs.shape[1:] for _ in range(self.size)]
msgtype = self.mpi_comm.scatter(msgtype, root)
shape = msgtype.shapes[0]
# Collective communication.
slens = [numpy.prod(s) for s in shapes]
sbuf = _memory_utility.get_device_memory_pointer(xs)
rbuf = xp.empty([numpy.prod(shape)], dtype=msgtype.dtype)
rtype = _get_mpi_type(msgtype)
if xp is not numpy:
chainer.cuda.Stream.null.synchronize()
self.mpi_comm.Scatterv(
[sbuf, (slens, _cnt_to_dsp(slens)), _get_mpi_type(msgtype)],
_memory_utility.array_to_buffer_object(rbuf, rtype), root)
return rbuf.reshape(shape)
else: # slave processes
msgtypes = self.mpi_comm.scatter(None, root)
xp = msgtypes.get_array_module()
shape = msgtypes.shapes[0]
rbuf = xp.empty([numpy.prod(shape)], dtype=msgtypes.dtype)
rtype = _get_mpi_type(msgtypes)
self.mpi_comm.Scatterv(
None,
_memory_utility.array_to_buffer_object(rbuf, rtype),
root)
return rbuf.reshape(shape)
def allreduce_obj(self, obj):
# Summation by default
return self.mpi_comm.allreduce(obj)
def bcast_data(self, model):
for _, param in sorted(model.namedparams()):
buf = _memory_utility.array_to_buffer_object(param.data)
self.mpi_comm.Bcast(buf)
# Private methods
def _init_ranks(self):
my_ranks = _communication_utility.init_ranks(self.mpi_comm)
assert my_ranks[0] == self.mpi_comm.rank
self._intra_rank = my_ranks[1]
self._intra_size = my_ranks[2]
self._inter_rank = my_ranks[3]
self._inter_size = my_ranks[4]
``` |
{
"source": "jp-nerdery/raspi_hack",
"score": 3
} |
#### File: raspi_run_py/data_function/data_control.py
```python
import psycopg2
import mysql.connector
import base64
import codecs
from datetime import datetime
import pandas as pd
import numpy as np
import pandas as psql
from jerp_src.db import sql_db_config
class My_pg_Access():
def connect():
''' Connection Method
provide postgreSQL Only
SQL_CONFIG Class is define SQL Connection Setting of server enviroment value or specify installed postgreSQL DB param
'''
db_flg =0
conn =None
h,pt ,dn, u, ps ,_= sql_db_config.SQL_CONFIG.configs()
if db_flg ==0:
conn = mysql.connector.connect(user=u, password=ps, host=h, database=dn)
else:
conn = psycopg2.connect(host= h,port=pt ,dbname=dn, user=u, password=ps)
cur = conn.cursor()
print('Connect Success')
return conn,cur
def select_data(SQL_COM,dataparam=None):
''' SELECT DB TABLE
'''
conn ,_ = My_pg_Access.connect()
if dataparam is None:
sql_data = psql.read_sql(SQL_COM, conn)
print('SELECT Success')
conn.close
columns_name = [(item,item) if isinstance(item,str) else (item,item.decode("utf-8")) for item in sql_data.columns.values]
sql_data = sql_data.rename(columns=dict(columns_name))
return sql_data
else:
sql_data = psql.read_sql(SQL_COM, conn,params=dataparam)
print('SELECT Success')
conn.close
columns_name = [(item,item) if isinstance(item,str) else (item,item.decode("utf-8")) for item in sql_data.columns.values]
sql_data = sql_data.rename(columns=dict(columns_name))
return sql_data
def insert_data(table_name,dataparam):
''' INSERT DB table data
table_name = Specify DataBase Table Name
dataparam = Specify column names and data of SQL syntax Insert statement in dictionary format
'''
conn ,cur = My_pg_Access.connect()
SQL_COM = ''
SQL_COM = 'INSERT INTO ' + table_name
SQL_COM += ' ( '
for i,(key, val) in enumerate(dataparam.items()):
if len(dataparam.items()) != i +1 :
SQL_COM += key + ','
else:
SQL_COM += key
SQL_COM += ' ) VALUES( '
for i,(key, val) in enumerate(dataparam.items()):
if len(dataparam.items()) != i +1 :
SQL_COM += '%(' + key + ')s' + ','
else:
SQL_COM += '%(' + key + ')s'
SQL_COM += ')'
cur.execute(SQL_COM,dataparam)
conn.commit()
conn.close
print('INSERT Success')
def update_data(table_name,dataparam,whereparam =None):
''' UPDATE DB table data
table_name = Specify DataBase Table Name
dataparam = Specify column names and data of SQL syntax Update statement in dictionary format
whereparam = Specify column names and data in the WHERE section of the Update statement of SQL syntax in dictionary format
'''
conn ,cur = My_pg_Access.connect()
_,_,_, _, _,scm = sql_db_config.SQL_CONFIG.configs()
SQL_COM = ''
SQL_COM = 'UPDATE ' + table_name
SQL_COM += ' SET '
for i,(key, val) in enumerate(dataparam.items()):
if len(dataparam.items()) != i +1 :
SQL_COM += key + '=' + '%(' + key + ')s' + ','
else:
SQL_COM += key + '=' + '%(' + key + ')s'
if whereparam is not None:
SQL_COM += ' WHERE '
for i,(key, val) in enumerate(whereparam.items()):
if len(whereparam.items()) != i +1 :
SQL_COM += key + '=' + '%(' + key + ')s' + ' AND '
else:
SQL_COM += key + '=' + '%(' + key + ')s'
dataparam.update(whereparam)
cur.execute(SQL_COM,dataparam)
conn.commit()
conn.close
print('UPDATE Success')
def delete_data(table_name,dataparam):
''' DELETE DB table data
dataparam = SQL Define WHERE param
'''
conn ,cur = My_pg_Access.connect()
_,_,_, _, _,scm = sql_db_config.SQL_CONFIG.configs()
SQL_COM = ''
SQL_COM = 'DELETE FROM ' + table_name
SQL_COM += ' WHERE '
for i,(key, val) in enumerate(dataparam.items()):
if len(dataparam.items()) != i +1 :
SQL_COM += key + '=' + '%(' + key + ')s' + ' AND '
else:
SQL_COM += key + '=' + '%(' + key + ')s'
SQL_COM += ''
cur.execute(SQL_COM,dataparam)
conn.commit()
conn.close
print('DELETE Success')
def truncate_data(table_name):
'''
You can truncate DB Table in using this method to easily.
'''
conn ,cur = My_pg_Access.connect()
_,_,_, _, _,scm = sql_db_config.SQL_CONFIG.configs()
SQL_COM = ''
SQL_COM = 'TRUNCATE TABLE ' + scm + '.' + table_name
cur.execute(SQL_COM)
conn.commit()
conn.close
print('TRUNCATE Success')
```
#### File: python/raspi_run_py/MPU6050.py
```python
import smbus
import math
from time import sleep
import data_function.data_control
#DEVICE Address
DEV_ADDR = 0x68
ACCEL_XOUT = 0x3b
ACCEL_YOUT = 0x3d
ACCEL_ZOUT = 0x3f
TEMP_OUT = 0x41
GYRO_XOUT = 0x43
GYRO_YOUT = 0x45
GYRO_ZOUT = 0x47
PWR_MGMT_1 = 0x6b
PWR_MGMT_2 = 0x6c
INTERVAL_TIME = 0.5 # IF YOU WANT FPS 30.SET 1/30
bus = smbus.SMBus(1)
bus.write_byte_data(DEV_ADDR, PWR_MGMT_1, 0)
def read_byte(adr):
return bus.read_byte_data(DEV_ADDR, adr)
def read_word(adr):
high = bus.read_byte_data(DEV_ADDR, adr)
low = bus.read_byte_data(DEV_ADDR, adr+1)
val = (high << 8) + low
return val
def read_word_sensor(adr):
val = read_word(adr)
if (val >= 0x8000):
return -((65535 - val) + 1)
else:
return val
def get_temp():
temp = read_word_sensor(TEMP_OUT)
x = temp / 340 + 36.53
return x
def get_gyro_data_lsb():
x = read_word_sensor(GYRO_XOUT)
y = read_word_sensor(GYRO_YOUT)
z = read_word_sensor(GYRO_ZOUT)
return [x, y, z]
def get_gyro_data_deg():
x,y,z = get_gyro_data_lsb()
x = x / 131.0
y = y / 131.0
z = z / 131.0
return [x, y, z]
def get_accel_data_lsb():
x = read_word_sensor(ACCEL_XOUT)
y = read_word_sensor(ACCEL_YOUT)
z = read_word_sensor(ACCEL_ZOUT)
return [x, y, z]
def get_accel_data_g():
x,y,z = get_accel_data_lsb()
x = x / 16384.0
y = y / 16384.0
z = z / 16384.0
return [x, y, z]
while True:
data_param = {}
temp = get_temp()
gyro_x,gyro_y,gyro_z = get_gyro_data_deg()
accel_x,accel_y,accel_z = get_accel_data_g()
data_param['temp'] =temp
data_param['gyro_x'] = gyro_x
data_param['gyro_y'] = accel_y
data_param['gyro_z'] = gyro_z
data_param['accel_x'] = accel_x
data_param['accel_y'] = accel_y
data_param['accel_z'] = accel_z
data_function.data_control.My_pg_Access.insert_data(table_name = "insert_table_name",dataparam= data_param)
sleep(INTERVAL_TIME)
``` |
{
"source": "jpneto/yocto",
"score": 4
} |
#### File: yocto/src/engine.py
```python
from fractions import Fraction
import operators as op
#########################################################################
def runProgram(program, args, verbose=False):
"""
program : list of strings with function definitions (one per index)
args : list of arguments
verbose : if true prints final stack state
"""
### strip comments
for (i,line) in enumerate(program):
line = line.lstrip(' \t') # remove leftmost whitespaces
if line.count('#') > 0:
if line.index('#') == 0: # entire line is a comment
continue
else:
program[i] = program[i][1:program.index('#')]
### initialize stack with arguments
stack = list(args) # save a copy of args
### prepare program state
state = {}
state['func_code'] = program[-1] # last line is main function
state['arity'] = len(args) # function arity
state['vars'] = {} # local variables
state['iterators'] = {} # local iterators
state['blocks'] = [] # stacked blocks
# push inputs into parameters variables and save them in state
parameter_symbols = '¹²³⁴⁵⁶⁷⁸⁹⁰'[:state['arity']][::-1]
for i in range(state['arity']):
state[parameter_symbols[i]] = args.pop()
### end prepare state
### run program with initial stack and state
stack = run(program, stack, state)
### in verbose mode, show remaining stack values (if any)
if verbose:
print('----- top stack -----')
result = None
for x in stack[::-1]:
result = outputValue(x)
if verbose:
print(result)
print('------ bottom ------')
### by default, we output the stack's top (or None if empty)
return outputValue(stack[-1]) if len(stack)>0 else None
#########################################################################
def run(program, stack, state, counter=0):
"""
program : a list of strings containing the entire program
stack : the current values at program stack
state : the current information defining the program state
counter : which index should be processed next from string
state['func_code'] (the current function)
"""
code = state['func_code']
while counter < len(code):
symbol = code[counter]
if symbol in r' \t\n': # skip spaces
counter += 1
elif symbol in '_0123456789': # a number
num, counter = readNumber(code, counter)
stack.append(Fraction(num,1))
elif symbol in '¹²³⁴⁵⁶⁷⁸⁹⁰' : # a function parameter
stack.append(state[symbol])
counter += 1
elif symbol == '"': # a string
string, counter = readString(code, counter)
stack.append(string)
counter += 1
elif symbol == '’': # a string with one char
string = code[counter+1]
stack.append(string)
counter += 2
elif symbol in 'ẇẋẏż': # a variable definition
idx = 'ẇẋẏż'.index(symbol)
x = stack.pop()
if isinstance(x,list):
x = list(x)
state['vars']['wxyz'[idx]] = x
counter += 1
elif symbol in 'wxyzï': # a variable use
stack.append(state['vars'][symbol])
counter += 1
elif symbol in 'ẉỵẓ': # iterator use
if symbol not in state['iterators']: # if NA, define it
state['iterators'][symbol] = 0 # init current index
variable = 'wxy'['ẉỵẓ'.index(symbol)]
var_index = state['iterators'][symbol]
var_list = state['vars'][variable]
stack.append(var_list[var_index])
state['iterators'][symbol] = (state['iterators'][symbol]+1) % len(var_list)
counter += 1
elif symbol in '?⁇': # conditionals
counter = runIf(program, stack, state, counter, symbol!='?')
elif symbol in 'FG': # FOR loop
counter = runForLoop(program, stack, state, counter, symbol=='G')
elif symbol in 'R': # REPEAT loop
counter = runRepeatLoop(program, stack, state, counter)
elif symbol in 'W': # WHILE loop
counter = runWhileLoop(program, stack, state, counter)
elif symbol in 'Y': # FIXED-POINT loop
counter = runFixedPointLoop(program, stack, state, counter)
elif symbol in 'Ṁ': # functional map
counter = runMap(program, stack, state, counter)
elif symbol in 'Ċ': # functional clear
counter = runClear(program, stack, state, counter)
elif symbol in 'Ḟ': # functional filter
counter = runFilter(program, stack, state, counter)
elif symbol in 'Ṙ': # functional reduce
counter = runReduce(program, stack, state, counter)
elif symbol in 'Ṡ': # functional scan
counter = runScan(program, stack, state, counter)
elif symbol in 'Ż': # functional binary zipWith
counter = runZipWith2(program, stack, state, counter)
elif symbol in '{': # block expression
block, counter = readBlock(code, counter+1) # +1 skips '{'
state['blocks'].append(block)
elif symbol in '¨': # one char block
state['blocks'].append(code[counter+1])
counter += 2
elif symbol in op.mapping: # default operation
operation = op.mapping[symbol]
operation(stack)
counter += 1
elif symbol in 'δλνμ': # a function call
counter += 1 # δ nullary function, λ unary (etc.)
func_state = {}
func_state['blocks'] = [] # function calls don't share blocks
# prepare state for function call
if code[counter] in '0123456789':
function_id, counter = readNumber(code, counter)
func_state['arity'] = 'δλνμ'.index(symbol)
func_state['vars'] = state['vars']
func_state['iterators'] = {}
func_state['func_code'] = program[function_id]
runFunction(program, stack, func_state)
elif symbol in 'ə': # eval command
counter += 1
eval_state = {}
eval_state['blocks'] = [] # eval calls don't share blocks
eval_state['arity'] = state['arity']
eval_state['vars'] = state['vars']
eval_state['iterators'] = state['iterators']
expression = stack.pop()
if isinstance(expression, Fraction):
expression = op.fracToStr(expression)
elif isinstance(expression, str):
eval_state['func_code'] = expression # must be string
else:
eval_state['func_code'] = '¤' # if list, do nothing
run(program, stack, eval_state)
else:
# unassigned symbol (do nothing). If needed, use symbol ¤
counter += 1
return stack
############################################
def runFunction(program, stack, state, counter=0):
# consume enough stack to initialize parameter symbols
parameter_symbols = '¹²³⁴⁵⁶⁷⁸⁹⁰'[:state['arity']][::-1]
for i in range(state['arity']):
state[parameter_symbols[i]] = stack.pop()
# prepare stack to run function, ie, automatically push
# parameters into stack to use them implicitly in function
for symbol in '¹²³⁴⁵⁶⁷⁸⁹⁰'[:state['arity']]:
stack.append(state[symbol])
run(program, stack, state)
############################################
def runMap(program, stack, state, counter):
function_id = int(stack.pop()) # which unary function to execute
xs = stack.pop() # a list to map function
if isinstance(xs, Fraction):
xs = [xs]
elif isinstance(xs, str):
xs = list(xs)
# prepare state for function call
func_state = {}
func_state['blocks'] = [] # function calls don't share blocks
func_state['arity'] = 1
func_state['vars'] = state['vars']
func_state['iterators'] = {}
func_state['func_code'] = program[function_id]
results = []
for x in xs:
stack.append(x)
runFunction(program, stack, func_state)
results.append(stack.pop())
stack.append(results)
return counter + 1
############################################
def runClear(program, stack, state, counter):
function_id = int(stack.pop()) # which unary predicate to execute
xs = stack.pop() # a list to clear elements
if isinstance(xs, Fraction):
xs = [xs]
elif isinstance(xs, str):
xs = list(xs)
# prepare state for function call
func_state = {}
func_state['blocks'] = [] # function calls don't share blocks
func_state['arity'] = 1
func_state['vars'] = state['vars']
func_state['iterators'] = {}
func_state['func_code'] = program[function_id]
results = []
for x in xs:
stack.append(x)
runFunction(program, stack, func_state)
output = stack.pop()
if output == 0:
results.append('')
else:
results.append(x)
stack.append(results)
return counter + 1
############################################
def runFilter(program, stack, state, counter):
function_id = int(stack.pop()) # which unary function to execute
xs = stack.pop() # a list to filter function
if isinstance(xs, Fraction):
xs = [xs]
elif isinstance(xs, str):
xs = list(xs)
# prepare state for function call
func_state = {}
func_state['blocks'] = [] # function calls don't share blocks
func_state['arity'] = 1
func_state['vars'] = state['vars']
func_state['iterators'] = {}
func_state['func_code'] = program[function_id]
results = []
for x in xs:
stack.append(x)
runFunction(program, stack, func_state)
output = stack.pop()
if output != 0:
results.append(x)
stack.append(results)
return counter + 1
############################################
def runReduce(program, stack, state, counter):
function_id = int(stack.pop()) # which binary function to execute
default_val = stack.pop() # neutral element of function operation
xs = stack.pop() # a list to filter function
if isinstance(xs, Fraction):
xs = [xs]
elif isinstance(xs, str):
xs = list(xs)
# prepare state for function call
func_state = {}
func_state['blocks'] = [] # function calls don't share blocks
func_state['arity'] = 2
func_state['vars'] = state['vars']
func_state['iterators'] = {}
func_state['func_code'] = program[function_id]
result = default_val
for x in xs[::-1]: # f(x1, f(x2, ... f(xn,default)))
stack.append(x)
stack.append(result)
runFunction(program, stack, func_state)
result = stack.pop()
stack.append(result)
return counter + 1
############################################
def runScan(program, stack, state, counter):
function_id = int(stack.pop()) # which binary function to execute
default_val = stack.pop() # neutral element of function operation
xs = stack.pop() # a list to filter function
if isinstance(xs, Fraction):
xs = [xs]
elif isinstance(xs, str):
xs = list(xs)
# prepare state for function call
func_state = {}
func_state['blocks'] = [] # function calls don't share blocks
func_state['arity'] = 2
func_state['vars'] = state['vars']
func_state['iterators'] = {}
func_state['func_code'] = program[function_id]
results = [default_val]
for x in xs: # accumulate reduce results
stack.append(x)
stack.append(results[-1])
runFunction(program, stack, func_state)
results.append( stack.pop() )
stack.append(results)
return counter + 1
############################################
def runZipWith2(program, stack, state, counter):
function_id = int(stack.pop()) # which binary function to execute
ys = stack.pop() # second list
xs = stack.pop() # first list
if isinstance(xs, Fraction):
xs = [xs]
elif isinstance(xs, str):
xs = list(xs)
# prepare state for function call
func_state = {}
func_state['blocks'] = [] # function calls don't share blocks
func_state['arity'] = 2
func_state['vars'] = state['vars']
func_state['iterators'] = {}
func_state['func_code'] = program[function_id]
results = []
for x,y in zip(xs,ys): # zip lists
stack.append(x)
stack.append(y)
runFunction(program, stack, func_state)
results.append( stack.pop() )
stack.append(results)
return counter + 1
############################################
def runIf(program, stack, state, counter, isIfThenElse):
block1 = state['blocks'].pop()
if isIfThenElse: # block2 block1 IF
block2 = state['blocks'].pop() # then else
x = stack.pop()
if op.isTrue(x) or isIfThenElse:
ifState = {}
ifState['arity'] = state['arity']
ifState['vars'] = state['vars']
ifState['iterators'] = state['iterators']
ifState['blocks'] = []
parameter_symbols = '¹²³⁴⁵⁶⁷⁸⁹⁰'[:state['arity']][::-1]
for i in range(state['arity']):
ifState[parameter_symbols[i]] = state[parameter_symbols[i]]
ifState['func_code'] = block2 if op.isTrue(x) and isIfThenElse else block1
run(program, stack, ifState)
return counter + 1
############################################
# stack top can be Fraction, list of Fractions
def runForLoop(program, stack, state, counter, invert):
loopBlock = state['blocks'].pop()
loopState = {}
loopState['arity'] = state['arity']
loopState['vars'] = state['vars']
loopState['iterators'] = state['iterators']
parameter_symbols = '¹²³⁴⁵⁶⁷⁸⁹⁰'[:state['arity']][::-1]
for i in range(state['arity']):
loopState[parameter_symbols[i]] = state[parameter_symbols[i]]
loopState['blocks'] = []
x = stack.pop()
# there are two options, stack's top is a number or a indexed type
if (isinstance(x, Fraction)):
# regular for loop
for i in range(int(x)):
if invert:
i = int(x)-i-1
# a for-loop place the progress variable at the block's beginning
loopState['func_code'] = op.fracToStr(i) + ' ' + loopBlock # loop body
loopState['vars']['ï'] = Fraction(i) # progress variable
run(program, stack, loopState)
else: # or else, it is a for-each
if invert:
x = x[::-1]
for i in x:
# a for-loop place the progress variable at the block's beginning
loopState['func_code'] = op.fracToStr(i) + ' ' + loopBlock # loop body
loopState['vars']['ï'] = Fraction(i) # progress variable
run(program, stack, loopState)
return counter+1
############################################
# same as For without the progress variable
def runRepeatLoop(program, stack, state, counter):
loopBlock = state['blocks'].pop()
loopState = {}
loopState['arity'] = state['arity']
loopState['vars'] = state['vars']
loopState['iterators'] = state['iterators']
parameter_symbols = '¹²³⁴⁵⁶⁷⁸⁹⁰'[:state['arity']][::-1]
for i in range(state['arity']):
loopState[parameter_symbols[i]] = state[parameter_symbols[i]]
loopState['blocks'] = []
x = stack.pop()
# there are two options, stack's top is a number or a indexed type
if (isinstance(x, Fraction)):
# regular for loop
for _ in range(int(x)):
# a for-loop place the progress variable at the block's beginning
loopState['func_code'] = loopBlock # loop body
run(program, stack, loopState)
else: # or else, it is a for-each
for _ in x:
# a for-loop place the progress variable at the block's beginning
loopState['func_code'] = loopBlock # loop body
run(program, stack, loopState)
return counter+1
############################################
def runWhileLoop(program, stack, state, counter):
loopBlock = state['blocks'].pop()
loopState = {}
loopState['arity'] = state['arity']
loopState['vars'] = state['vars']
loopState['iterators'] = state['iterators']
parameter_symbols = '¹²³⁴⁵⁶⁷⁸⁹⁰'[:state['arity']][::-1]
for i in range(state['arity']):
loopState[parameter_symbols[i]] = state[parameter_symbols[i]]
loopState['blocks'] = []
loopState['func_code'] = loopBlock # loop body
while op.isTrue(stack.pop()):
run(program, stack, loopState)
return counter + 1
############################################
def runFixedPointLoop(program, stack, state, counter):
loopBlock = state['blocks'].pop()
loopState = {}
loopState['arity'] = state['arity']
loopState['vars'] = state['vars']
loopState['iterators'] = state['iterators']
parameter_symbols = '¹²³⁴⁵⁶⁷⁸⁹⁰'[:state['arity']][::-1]
for i in range(state['arity']):
loopState[parameter_symbols[i]] = state[parameter_symbols[i]]
loopState['blocks'] = []
loopState['func_code'] = loopBlock # loop body
guard = True
while guard: # do-while
previous_top = stack[-1]
run(program, stack, loopState)
guard = previous_top != stack[-1] # repeat if changed
return counter + 1
############################################
def readNumber(code, counter):
if code[counter] == '_':
negative = True
counter += 1
else:
negative = False
num = 0
while counter < len(code) and code[counter] in '0123456789':
num = num*10 + int(code[counter])
counter += 1
num = -num if negative else num
return (num, counter)
############################################
def readString(code, counter):
result = ''
counter += 1
while code[counter] != '"':
result += code[counter]
counter += 1
return (result, counter)
############################################
def readBlock(code, counter):
level = 0
result = ''
while code[counter] != '}' or level > 0:
result += code[counter]
if code[counter] == '{':
level += 1
if code[counter] == '}':
level -= 1
counter += 1
return (result, counter+1) # +1 skips last '}'
############################################
# translate final result (which can be from diff types) for simple output
def outputValue(value):
if (isinstance(value, list) and len(value)>0 and
all([isinstance(x, str) and len(x)==1 for x in value])): # it is a string
return ''.join(value)
elif isinstance(value, list):
return [ outputValue(x) for x in value ]
else:
return getValue(value)
def getValue(value):
if isinstance(value, Fraction):
if int(value) == value:
return int(value)
else:
return float(value)
else:
return str(value)
############################################
# checks which is the highest parameter mentioned
# at the function code, that is its arity
def compute_arity(function_code):
params = '¹²³⁴⁵⁶⁷⁸⁹'[::-1]
for (i,c) in enumerate(params):
if c in function_code:
return 9-i
return 0
############################################
#prints the 5 largets prime numbers under 100, in descending order.
# program = ['¹>','¹┅ṗ¹Ḟ0_5:ḷ'] # filter using func 0
# data = [Fraction(100)]
# print(runProgram(program, data))
# program = ['"abc"…'] # filter using func 0
# data = []
# runProgram(program, data, True)
# program = ['¹ẇ1{w,↓ẇw}W']
# program = ['¹{ ↑ẇ1{ w,↓ẇw } W¶ }F']
# program = ['¹{┅.}F']
# program = ['{↑’**.}F']
# data = [Fraction(5)]
# program = ['5┅ 2î']
# program = ['"dz" 4… *']
# data = [Fraction(5)]
# program = ['5┅ḷẇ wḣ. wṫ. wḥ. wṭ. Ȧḥḷ.']
# data = []
#program = ['50ẇ 0Lṫ{w}?']
############
``` |
{
"source": "jpnevrones/pracmln",
"score": 2
} |
#### File: mln/inference/gibbs.py
```python
from pracmln.mln.inference.maxwalk import SAMaxWalkSAT
from pracmln.mln.mrfvars import BinaryVariable
from pracmln.mln.inference.mcmc import MCMCInference
import random
import math
from pracmln.mln.constants import ALL
from pracmln.mln.util import ProgressBar, out, stop
import numpy
from collections import defaultdict
from pracmln.mln.grounding.fastconj import FastConjunctionGrounding
from pracmln.logic.common import Logic
from pracmln.mln.errors import SatisfiabilityException
class GibbsSampler(MCMCInference):
def __init__(self, mrf, queries=ALL, **params):
MCMCInference.__init__(self, mrf, queries, **params)
self.var2gf = defaultdict(set)
grounder = FastConjunctionGrounding(mrf, simplify=True, unsatfailure=True, cache=None)
for gf in grounder.itergroundings():
if isinstance(gf, Logic.TrueFalse): continue
vars_ = set(map(lambda a: self.mrf.variable(a).idx, gf.gndatoms()))
for v in vars_: self.var2gf[v].add(gf)
@property
def chains(self):
return self._params.get('chains', 1)
@property
def maxsteps(self):
return self._params.get('maxsteps', 500)
class Chain(MCMCInference.Chain):
def __init__(self, infer, queries):
MCMCInference.Chain.__init__(self, infer, queries)
mrf = infer.mrf
def _valueprobs(self, var, world):
sums = [0] * var.valuecount()
for gf in self.infer.var2gf[var.idx]:
possible_values = []
for i, value in var.itervalues(self.infer.mrf.evidence):
possible_values.append(i)
world_ = var.setval(value, list(world))
truth = gf(world_)
if truth == 0 and gf.ishard:
sums[i] = None
elif sums[i] is not None and not gf.ishard:
sums[i] += gf.weight * truth
# set all impossible values to None (i.e. prob 0) since they
# might still be have a value of 0 in sums
for i in [j for j in range(len(sums)) if j not in possible_values]: sums[i] = None
expsums = numpy.array([numpy.exp(s) if s is not None else 0 for s in sums])
Z = sum(expsums)
probs = expsums / Z
return probs
def step(self):
mrf = self.infer.mrf
# reassign values by sampling from the conditional distributions given the Markov blanket
state = list(self.state)
for var in mrf.variables:
# compute distribution to sample from
values = list(var.values())
if len(values) == 1: # do not sample if we have evidence
continue
probs = self._valueprobs(var, self.state)
# check for soft evidence and greedily satisfy it if possible
idx = None
# if isinstance(var, BinaryVariable):
# atom = var.gndatoms[0]
# p = mrf.evidence[var.gndatoms[0]]
# if p is not None:
# belief = self.soft_evidence_frequency(atom)
# if p > belief and expsums[1] > 0:
# idx = 1
# elif p < belief and expsums[0] > 0:
# idx = 0
# sample value
if idx is None:
r = random.uniform(0, 1)
idx = 0
s = probs[0]
while r > s:
idx += 1
s += probs[idx]
var.setval(values[idx], self.state)
# update results
self.update(self.state)
def _run(self, **params):
'''
infer one or more probabilities P(F1 | F2)
what: a ground formula (string) or a list of ground formulas (list of strings) (F1)
given: a formula as a string (F2)
set evidence according to given conjunction (if any)
'''
# if softEvidence is None:
# self.softEvidence = self.mln.softEvidence
# else:
# self.softEvidence = softEvidence
# initialize chains
chains = MCMCInference.ChainGroup(self)
for i in range(self.chains):
chain = GibbsSampler.Chain(self, self.queries)
chains.chain(chain)
# if self.softEvidence is not None:
# chain.setSoftEvidence(self.softEvidence)
# do Gibbs sampling
# if verbose and details: print "sampling..."
converged = 0
steps = 0
if self.verbose:
bar = ProgressBar(width=100, color='green', steps=self.maxsteps)
while converged != self.chains and steps < self.maxsteps:
converged = 0
steps += 1
for chain in chains.chains:
chain.step()
if self.verbose:
bar.inc()
bar.label('%d / %d' % (steps, self.maxsteps))
# if self.useConvergenceTest:
# if chain.converged and numSteps >= minSteps:
# converged += 1
# if verbose and details:
# if numSteps % infoInterval == 0:
# print "step %d (fraction converged: %.2f)" % (numSteps, float(converged) / numChains)
# if numSteps % resultsInterval == 0:
# chainGroup.getResults()
# chainGroup.printResults(shortOutput=True)
# get the results
return chains.results()[0]
```
#### File: pracmln/utils/widgets.py
```python
from Tkinter import _setit, Menu, TclError, Frame, StringVar, Button, Text,\
IntVar, Checkbutton, Entry, OptionMenu, Scrollbar, Grid, Place, Pack
from ScrolledText import ScrolledText
from string import ascii_letters, digits, punctuation
import re
from Tkconstants import NONE, INSERT, LEFT, W, END, DISABLED, NORMAL, RIGHT, Y,\
TOP, BOTTOM, X, BOTH, HORIZONTAL, SEL
from tkFileDialog import askopenfilename
import tkMessageBox
import tkSimpleDialog
from pracmln import praclog
from pracmln.utils.project import mlnpath
from pracmln.mln.util import trace, out
try:
import Pmw # @UnresolvedImport
havePMW = True
except:
havePMW = False
import os
import ntpath
from fnmatch import fnmatch
#import keyword
BOLDFONT = '*-Monospace-Bold-R-Normal-*-12-*'
ITALICFONT = '*-Monospace-Medium-O-Normal-*-12-*'
logger = praclog.logger(__name__)
class ScrolledText2(Text):
def __init__(self, master=None, change_hook=None, **kw):
self.frame = Frame(master)
self.vbar = Scrollbar(self.frame)
self.vbar.pack(side=RIGHT, fill=Y)
self.hbar = Scrollbar(self.frame, orient=HORIZONTAL)
self.hbar.pack(side=BOTTOM,fill=X)
kw.update({'yscrollcommand': self.vbar.set})
kw.update({'xscrollcommand': self.hbar.set})
kw.update({'wrap': 'none'})
Text.__init__(self, self.frame, **kw)
self.pack(side=LEFT, fill=BOTH, expand=True)
self.vbar['command'] = self.yview
self.hbar['command'] = self.xview
# Copy geometry methods of self.frame without overriding Text
# methods -- hack!
text_meths = vars(Text).keys()
methods = vars(Pack).keys() + vars(Grid).keys() + vars(Place).keys()
methods = set(methods).difference(text_meths)
for m in methods:
if m[0] != '_' and m != 'config' and m != 'configure':
setattr(self, m, getattr(self.frame, m))
def __str__(self):
return str(self.frame)
class Highlighter(object):
def __init__(self):
# syntax highlighting definitions
self.tags = {
'com': dict(foreground='#22aa22',font=ITALICFONT), # comment
'mlcom': dict(foreground='#22aa22',font=ITALICFONT), # multi-line comment
'str': dict(foreground='darkcyan'), # string
'kw': dict(foreground='blue'), # keyword
'obj': dict(foreground='#00F'), # function/class name
'number': dict(foreground='darkred'), # number
'op' : dict(foreground='blue'), # operator
'bracket_hl': dict(background="yellow"), # bracket highlighting
'var': dict(font=ITALICFONT), # variable highlighting
'pred': dict(font=BOLDFONT) # predicate hightlighting
}
self.brackets = (('(',')'), ('{', '}'))
self.open_brackets = map(lambda x: x[0], self.brackets)
self.close_brackets = map(lambda x: x[1], self.brackets)
self.operators = ['v', '^', '!', '+', '=>', '<=>']
self.keywords = [] #keyword.kwlist
class BLNHighlighter(Highlighter):
def __init__(self):
Highlighter.__init__(self)
self.keywords = ["type", "Type", "fragments", "isa", "random", "logical", "relationKey", "constraints", "guaranteed", "combining-rule", "uniform-default", "prolog"]
class SyntaxHighlightingText(ScrolledText2):
# constructor
def __init__(self, root, change_hook = None, highlighter = None, grammar=None):
ScrolledText2.__init__(self,root,change_hook)
# Non-wrapping, no border, undo turned on, max undo 50
self.text = self # For the methods taken from IDLE
self.root = root
self.change_hook = change_hook
self.characters = ascii_letters + digits + punctuation
self.tabwidth = 8 # for IDLE use, must remain 8 until Tk is fixed
self.indentwidth = 4
self.indention = 0 # The current indention level
self.set_tabwidth(self.indentwidth) # IDLE...
self.previous_line = "0"
# create a popup menu
self.menu = Menu(root, tearoff=0)
self.menu.add_command(label="Undo", command=self.edit_undo)
self.menu.add_command(label="Redo", command=self.edit_redo)
#self.menu.add_command(type="separator")
self.menu.add_command(label="Cut", command=self.cut)
self.menu.add_command(label="Copy", command=self.copy)
self.menu.add_command(label="Paste", command=self.paste)
self.bind('<KeyRelease>', self.key_release) # For scanning input
self.bind('<Return>',self.autoindent) # Overides default binding
#self.bind('<Tab>',self.autoindent) # increments self.indention
#self.bind('<BackSpace>',self.autoindent) # decrements self.indention
self.bind('<Button-3>', self.popup) # right mouse button opens popup
self.bind('<Button-1>', self.recolorCurrentLine) # left mouse can reposition cursor, so recolor (e.g. bracket highlighting necessary)
self.bind('<Control-Any-KeyPress>', self.ctrl)
self.grammar = grammar
self.setHighlighter(highlighter)
def setHighlighter(self, highlighter):
if highlighter == None:
highlighter = Highlighter()
self.highlighter = highlighter
# sets up the tags
for tag, settings in self.highlighter.tags.items():
self.tag_config(tag, **settings)
def popup(self, event):
self.menu.post(event.x_root, event.y_root)
def get_tabwidth(self):
# From IDLE
current = self['tabs'] or 5000
return int(current)
def set_tabwidth(self, newtabwidth):
# From IDLE
text = self
if self.get_tabwidth() != newtabwidth:
pixels = text.tk.call("font", "measure", text["font"],
"-displayof", text.master,
"n" * newtabwidth)
text.configure(tabs=pixels)
def remove_singleline_tags(self, start, end):
for tag in self.highlighter.tags.keys():
if tag[:2] != 'ml':
self.tag_remove(tag, start, end)
def get_selection_indices(self):
# If a selection is defined in the text widget, return (start,
# end) as Tkinter text indices, otherwise return (None, None)
try:
first = self.text.index("sel.first")
last = self.text.index("sel.last")
return first, last
except TclError:
return None
# Select all the text in textbox
def select_all(self):
self.tag_add(SEL, "1.0", END)
self.mark_set(INSERT, END)
self.see(INSERT)
self.focus_set()
return 'break'
def cut(self,event=0):
self.clipboard_clear()
Selection=self.get_selection_indices()
if Selection is not None:
SelectedText = self.get(Selection[0],Selection[1])
self.delete(Selection[0],Selection[1])
self.clipboard_append(SelectedText)
self.onChange()
def copy(self,event=0):
self.clipboard_clear()
Selection=self.get_selection_indices()
if Selection is not None:
SelectedText = self.get(Selection[0],Selection[1])
self.clipboard_append(SelectedText)
def paste(self,event=0):
# This should call colorize for the pasted lines.
SelectedText = self.root.selection_get(selection='CLIPBOARD')
Selection=self.get_selection_indices()
if Selection is not None:
self.delete(Selection[0],Selection[1])
self.insert(INSERT, SelectedText)
self.onChange()
return "break"
def autoindent(self,event):
if event.keysym == 'Return':
self.edit_separator() # For undo/redo
index = self.index(INSERT).split('.')
#print index
line = int(index[0])
column = int(index[1])
if self.get('%s.%d'%(line, column-1)) == ':':
self.indention += 1
#print '\n',
#print '\t'*self.indention
self.insert(INSERT,'\n')
self.insert(INSERT,'\t'*self.indention)
return 'break' # Overides standard bindings
elif event.keysym == 'Tab':
self.edit_separator()
self.indention += 1
#print self.indention
elif event.keysym == 'BackSpace':
self.edit_separator()
index = self.index(INSERT).split('.')
#print index
line = int(index[0])
column = int(index[1])
if self.get('%s.%d'%(line, column-1)) == '\t':
self.indention -= 1
def recolorCurrentLine(self, *foo):
pos = self.index(INSERT)
cline = pos.split('.')[0]
#print "recoloring %s, %s" % (cline, self.previous_line)
if cline != self.previous_line: self.colorize(self.previous_line)
self.colorize(cline)
self.previous_line = cline
def key_release(self, key):
#print "pressed", key.keysym, dir(key)
if key.char in ' :[(]),"\'':
self.edit_separator() # For undo/redo
# recolorize the current line and the previous line (if it's a different one)
self.recolorCurrentLine()
# if delete or backspace were pressed, check if a multiline comment has to be removed
pos = self.index(INSERT)
if key.keysym in ("BackSpace", "Delete"):
#print "removal at %s" % pos
ranges = self.tag_ranges('mlcom')
i = 0
while i < len(ranges):
range = ranges[i:i+2]
second_range = (self.index(str(range[0]) + " + 1 char"), self.index(str(range[1]) + " - 1 char"))
#print pos, range, second_range
if pos in range or pos in second_range:
self.tag_remove('mlcom', range[0], range[1])
i += 2
# notify of change if any. masks for the key.state variable
# 0x0001 Shift.
# 0x0002 Caps Lock.
# 0x0004 Control.
# 0x0008 Left-hand Alt.
# 0x0010 Num Lock.
# 0x0080 Right-hand Alt.
# 0x0100 Mouse button 1.
# 0x0200 Mouse button 2.
# 0x0400 Mouse button 3.
if key.char != '' and not (key.state & 4) or key.keysym in ("BackSpace", "Delete"):
self.onChange()
else:
pass
#print key
def onChange(self):
if self.change_hook is not None:
self.change_hook()
def delete_current_line(self):
selection = self.get_selection_indices()
if selection is None:
start = int(self.index(INSERT).split('.')[0])
end = start
else:
start = int(selection[0].split('.')[0])
end = int(selection[1].split('.')[0])
self.delete('%d.0' % start, '%d.end' % end)
self.onChange()
# return 'break'
def ctrl(self, key):
if key.keysym == 'c': return self.copy()
elif key.keysym == 'x': return self.cut()
elif key.keysym == 'v': return self.paste()
elif key.keysym == 'a': return self.select_all()
elif key.keysym == 'd': return self.delete_current_line()
#pass # apparently now implemented in the control itself
# edit: yes, but with counterintuitive behavior
def colorize(self, cline):
cursorPos = self.index(INSERT)
buffer = self.get('%s.%d' % (cline,0), '%s.end' % (cline))
# remove non-multiline tags
self.remove_singleline_tags('%s.%d'% (cline, 0), '%s.end'% (cline))
in_quote = False
quote_start = 0
for i in range(len(buffer)):
here = '%s.%d' % (cline, i)
# strings
if buffer[i] in ['"',"'"]: # Doesn't distinguish between single and double quotes...
if in_quote:
self.tag_add('str', '%s.%d' % (cline, quote_start), '%s.%d' % (cline, i+1))
in_quote = False
else:
quote_start = i
in_quote = True
if not in_quote:
# operators
if False:
for op in self.highlighter.operators:
if buffer[i:i+len(op)] == op:
self.tag_add('op', "%s.%d" % (cline, i), "%s.%d" % (cline, i+len(op)))
# comments
if buffer[i:i+2] == "//":
self.tag_add('com', '%s.%d' % (cline, i), '%s.end' % (cline))
# multiline comments
elif buffer[i:i+2] == "/*":
if not here in self.tag_ranges('mlcom'):
end_pos = self.search("*/", here, forwards=True) # get the end of the comment
if not end_pos:
continue
if self.search("/*", here + " + 2 chars", stopindex=end_pos): # if there's a nested comment, ignore it (it might just be a nested /* with a */)
continue
#!!! make sure the area does not contain any "/*", because the "*/" is not the right one otherwise
#print "multiline comment from %s to %s" % (here, str(end_pos))
self.tag_add('mlcom', here, str(end_pos) + " + 2 chars")
elif buffer[i:i+2] == "*/":
end_pos = self.index(here + " + 2 chars")
if not end_pos in self.tag_ranges('mlcom'):
start_pos = self.search("/*", here, backwards=True) # get the beginning of the comment
if not start_pos:
continue
if self.search("*/", here, stopindex=start_pos, backwards=True): # if there's a nested comment, ignore it (it might just be a nested */ without a /*)
continue
#print "multiline comment from %s to %s" % (start_pos, end_pos)
self.tag_add('mlcom', start_pos, end_pos)
# bracket highlighting
elif buffer[i] in self.highlighter.open_brackets and here == cursorPos:
idxBracketType = self.highlighter.open_brackets.index(buffer[i])
openb, closeb = self.highlighter.brackets[idxBracketType]
stack = 1
for j,c in enumerate(buffer[i+1:]):
if c == openb:
stack += 1
elif c == closeb:
stack -= 1
if stack == 0:
self.tag_add('bracket_hl', here, here + " + 1 char")
self.tag_add('bracket_hl', "%s.%d" % (cline, i+1+j), "%s.%d" % (cline, i+1+j+1))
break
elif buffer[i] in self.highlighter.close_brackets and self.index(here + " + 1 char") == cursorPos:
idxBracketType = self.highlighter.close_brackets.index(buffer[i])
openb, closeb = self.highlighter.brackets[idxBracketType]
stack = 1
l = list(buffer[:i])
l.reverse()
for j,c in enumerate(l):
if c == closeb:
stack += 1
elif c == openb:
stack -= 1
if stack == 0:
self.tag_add('bracket_hl', here, here + " + 1 char")
self.tag_add('bracket_hl', "%s.%d" % (cline, i-1-j), "%s.%d" % (cline, i-1-j+1))
break
# tokens
start, end = 0, 0
obj_flag = 0
# variable and predicate highlighting
for match in re.finditer('(\\?[a-zA-Z0-9]+|[\w]*[a-zA-Z]\\()', buffer):
token = match.group(0)
if self.grammar is not None and self.grammar.isvar(token):
self.tag_add('var', '%s.%d' % (cline, match.start()), '%s.%d' % (cline, match.end()))
elif token[-1] == '(':
self.tag_add('pred', '%s.%d' % (cline, match.start()), '%s.%d' % (cline, match.end()-1))
for token in buffer.split(' '):
end = start + len(token)
start_index = '%s.%d' % (cline, start)
end_index = '%s.%d' % (cline, end)
if obj_flag:
self.tag_add('obj', start_index, end_index)
obj_flag = 0
# keywords
if token.strip() in self.highlighter.keywords:
self.tag_add('kw', start_index, end_index)
if token.strip() in ['def','class']:
obj_flag = 1
else:
# numbers
try:
float(token)
except ValueError:
pass
else:
self.tag_add('number', '%s.%d' % (cline, start), "%s.%d" % (cline, end))
start += len(token)+1
def insert(self, index, text, *args):
line = int(self.index(index).split(".")[0])
Text.insert(self, index, text, *args)
for i in range(text.count("\n")):
self.colorize(str(line+i))
def disable(self, disable):
Text.config(self, state=DISABLED if disable else NORMAL)
class FileEditBar(Frame, object):
def __init__(self, master, dir='.', filesettings=None, defaultname='*unknown{}', importhook=None,
deletehook=None, projecthook=None, filecontenthook=None, selectfilehook=None,
fileslisthook=None, updatehook=None, onchangehook=None):
self.master = master
Frame.__init__(self, master)
self._dirty = False
self._dirty_file_name = ''
self._editor_dirty = False
self.dir = dir
self.fsettings = filesettings
self.defaultname = defaultname
# hooks
self.import_hook = importhook
self.delete_hook = deletehook
self.save_project_hook = projecthook
self.filecontent_hook = filecontenthook
self.update_hook = updatehook
self.select_file_hook = selectfilehook
self.files_list_hook = fileslisthook
self.onchange_hook = onchangehook
Frame.__init__(self, master)
row = 0
self.columnconfigure(1, weight=2)
self.selected_file = StringVar(master)
files = []
self.file_buffer = {}
self.file_reload = True
if len(files) == 0: files.append("")
self.list_files = apply(OptionMenu, (self, self.selected_file) + tuple(files))
self.list_files.grid(row=row, column=1, sticky="NWE")
self.selected_file.trace("w", self.select_file)
# new file
self.btn_newfile = Button(self, text='New', command=self.new_file)
self.btn_newfile.grid(row=row, column=2, sticky="E")
# import file
self.btn_importfile = Button(self, text='Import', command=self.import_file)
self.btn_importfile.grid(row=row, column=3, sticky="E")
# delete file
self.btn_delfile = Button(self, text='Delete', command=self.delete_file)
self.btn_delfile.grid(row=row, column=4, sticky="E")
# save button
self.btn_update_file = Button(self, text='Save', command=self.save_file)
self.btn_update_file.grid(row=row, column=6, sticky="E")
# save as.. button
self.btn_saveas_file = Button(self, text='Save as...', command=self.saveas_file)
self.btn_saveas_file.grid(row=row, column=7, sticky="E")
# editor
row += 1
self.editor = SyntaxHighlightingText(self, change_hook=self.onchange_filecontent)
self.editor.grid(row=row, column=1, columnspan=7, sticky="NWES")
self.rowconfigure(row, weight=1)
@property
def dirty(self):
return self._dirty or self.file_buffer != {}
@dirty.setter
def dirty(self, d):
self._dirty = (d or self.file_buffer != {})
if self.onchange_hook:
self.onchange_hook(dirty=self._dirty)
def new_file(self):
self.list_files['menu'].add_command(label=self.defaultname.format(self.fsettings.get('extension', '.mln')), command=_setit(self.selected_file, self.defaultname.format(self.fsettings.get('extension', '.mln'))))
self.selected_file.set(self.defaultname.format(self.fsettings.get('extension', '.mln')))
self.file_buffer[self.defaultname.format(self.fsettings.get('extension', '.mln'))] = ''
self.editor.delete("1.0", END)
self.dirty = True
def import_file(self):
filename = askopenfilename(initialdir=self.dir, filetypes=self.fsettings.get('ftypes'), defaultextension=self.fsettings.get('extension', '.mln'))
if filename:
fpath, fname = ntpath.split(filename)
self.dir = os.path.abspath(fpath)
content = mlnpath(filename).content
if self.import_hook is not None:
self.import_hook(fname, content)
self.update_file_choices()
self.selected_file.set(fname)
self.dirty = True
def delete_file(self):
fname = self.selected_file.get().strip()
# remove element from project mlns and buffer
if fname in self.file_buffer:
del self.file_buffer[fname]
if self.delete_hook is not None:
self.delete_hook(fname)
f = self.update_file_choices()
out(f)
# select first element from remaining list
if f: self.list_files['menu'].invoke(0)
else:
self.selected_file.set('')
self.editor.delete("1.0", END)
self.dirty = True
def save_all_files(self):
current = self.selected_file.get().strip()
for f in self.file_buffer:
content = self.file_buffer[f]
if f == current:
content = self.editor.get("1.0", END).strip()
if self.update_hook is not None:
self.update_hook(f, f.strip('*'), content)
# reset buffer, dirty flag for editor and update mln selections
self.file_buffer.clear()
self._editor_dirty = False
self.update_file_choices()
self.dirty = False
if self.save_project_hook is not None:
self.save_project_hook()
def save_file(self):
oldfname = self.selected_file.get().strip()
if oldfname == self.defaultname.format(self.fsettings.get('extension', '.mln')):
self.saveas_file()
else:
self.update_file(oldfname, new=oldfname.strip('*'), askoverwrite=False)
def saveas_file(self):
oldfname = self.selected_file.get().strip()
res = tkSimpleDialog.askstring('Save as', "Enter a filename", initialvalue=oldfname.strip('*'))
if res is None: return
elif res:
if not res.endswith(self.fsettings.get('extension')):
res = res + self.fsettings.get('extension')
self.update_file(oldfname, new=res)
def update_file(self, old, new=None, askoverwrite=True):
success = 1
content = self.editor.get("1.0", END).strip()
if self.update_hook is not None:
success = self.update_hook(old.strip('*'), new, content, askoverwrite=askoverwrite)
if success != -1:
if old in self.file_buffer:
del self.file_buffer[old]
# reset dirty flag for editor and update mln selections
self._editor_dirty = False
self.update_file_choices()
fn = new if new is not None and new != '' else old
if new != '': self.selected_file.set(fn)
self.dirty = False
if self.save_project_hook is not None:
self.save_project_hook()
def select_file(self, *args):
filename = self.selected_file.get().strip()
self.dirty = True
if filename is not None and filename != '':
# filename is neither None nor empty
if self._editor_dirty:
# save current state to buffer before updating editor
self.file_buffer[self._dirty_file_name] = self.editor.get("1.0", END).strip()
self._editor_dirty = True if '*' in filename else False
if not self.file_reload:
self.file_reload = True
return
if '*' in filename:# is edited
# load previously edited content from buffer instead of mln file in project
content = self.file_buffer.get(filename, '').strip()
self.editor.delete("1.0", END)
content = content.replace("\r", "")
self.editor.insert(INSERT, content)
self._editor_dirty = True
self._dirty_file_name = '*' + filename if '*' not in filename else filename
return
if self.files_list_hook is not None and self.filecontent_hook is not None:
files = self.files_list_hook()
if filename in files:
# load content from mln file in project
content = self.filecontent_hook(filename)
self.editor.delete("1.0", END)
content = content.replace("\r", "")
self.editor.insert(INSERT, content)
self._editor_dirty = False
else:
# should not happen
self.editor.delete("1.0", END)
self.list_files['menu'].delete(0, 'end')
def update_file_choices(self):
self.list_files['menu'].delete(0, 'end')
files = []
if self.files_list_hook is not None:
files = self.files_list_hook()
new_files = sorted([i for i in files if '*'+i not in self.file_buffer] + self.file_buffer.keys())
for f in new_files:
self.list_files['menu'].add_command(label=f, command=_setit(self.selected_file, f))
return new_files
def onchange_filecontent(self, *args):
if not self._editor_dirty:
self._editor_dirty = True
self.dirty = True
self.file_reload = False # do not reload file, only change filename to *filename
fname = self.selected_file.get().strip()
fname = '*' + fname if '*' not in fname else fname
self._dirty_file_name = fname
self.file_buffer[self._dirty_file_name] = self.editor.get("1.0", END).strip()
self.update_file_choices()
self.selected_file.set(self._dirty_file_name)
def clear(self, keep=False):
self.file_buffer.clear()
if not keep:
self.editor.delete("1.0", END)
self.dirty = False
class FilePickEdit(Frame):
def __init__(self, master, file_mask, default_file, edit_height = None, user_onChange = None,
rename_on_edit=0, font = None, coloring=True, allowNone=False, highlighter=None, directory='.'):
'''
file_mask: file mask (e.g. "*.foo") or list of file masks (e.g. ["*.foo", "*.abl"])
'''
self.master = master
self.directory = directory
self.user_onChange = user_onChange
Frame.__init__(self, master)
row = 0
self.unmodified = True
self.allowNone = allowNone
self.file_extension = ""
if type(file_mask) != list:
file_mask = [file_mask]
if "." in file_mask[0]:
self.file_extension = file_mask[0][file_mask[0].rfind('.'):]
# read filenames
self.file_mask = file_mask
self.updateList()
# filename frame
self.list_frame = Frame(self)
self.list_frame.grid(row=row, column=0, sticky="WE")
self.list_frame.columnconfigure(0, weight=1)
# create list
self.picked_name = StringVar(self)
self.makelist()
# refresh button
self.refresh_button = Button(self.list_frame, text='<- refresh', command=self.refresh, height=1)
self.refresh_button.grid(row=0, column=1, sticky='E')
# save button
self.save_button = Button(self.list_frame, text="save", command=self.save, height=1)
self.save_button.grid(row=0, column=2, sticky="E")
# editor
row += 1
if coloring:
self.editor = SyntaxHighlightingText(self, self.onEdit, highlighter=highlighter)
else:
self.editor = ScrolledText2(self, self.onEdit)
if font != None:
self.editor.configure(font=font)
if edit_height is not None:
self.editor.configure(height=edit_height)
self.editor.grid(row=row, column=0, sticky="NEWS")
self.rowconfigure(row, weight=1)
self.columnconfigure(0, weight=1)
# option to change filename on edit
row += 1
self.options_frame = Frame(self)
self.options_frame.grid(row=row, column=0, sticky=W)
self.rename_on_edit = IntVar()
self.cb = Checkbutton(self.options_frame, text="rename on edit", variable=self.rename_on_edit)
self.cb.pack(side=LEFT)
self.cb.configure(command=self.onChangeRename)
self.rename_on_edit.set(rename_on_edit)
# filename frame
row += 1
self.filename_frame = Frame(self)
self.filename_frame.grid(row=row, column=0, sticky="WE")
self.filename_frame.columnconfigure(0, weight=1)
# save as filename
self.save_name = StringVar(self)
self.save_edit = Entry(self.filename_frame, textvariable = self.save_name)
self.save_edit.grid(row=0, column=0, sticky="WE")
self.save_name.trace("w", self.onSaveChange)
# pick default if applicableButton
self.select(default_file)
self.row = row
def setDirectory(self, directory, keep=False):
self.directory = directory
self.updateList()
self.makelist()
# menu = self.list["menu"] scrolledlist
# menu = self.list.listbox#["scrolledlist"]
# menu.delete(0, 'end')
# add the new ones
# for filename in self.files:
# menu.add_command(label=filename, command=_setit(self.picked_name, filename, None))
# if keep is true, only the files list will be updated but the content of the
# text area will not be altered/removed
if not keep: self.select("")
def refresh(self):
sel = self.get()
self.updateList()
self.select(sel, notify=False)
def reloadFile(self):
self.editor.delete("1.0", END)
filename = self.picked_name.get()
if os.path.exists(os.path.join(self.directory, filename)):
new_text = file(os.path.join(self.directory, filename)).read()
if new_text.strip() == "":
new_text = "// %s is empty\n" % filename;
new_text = new_text.replace("\r", "")
else:
new_text = ""
self.editor.insert(INSERT, new_text)
def setText(self, txt):
'''
Replaces the text in the edit field as by typing
into it.
'''
self.select("")
if txt.strip() == "":
txt = "// empty database\n";
self.editor.insert(INSERT, txt)
self.onEdit()
def onSelChange(self, name, index=0, mode=0):
self.reloadFile()
filename = self.picked_name.get()
self.save_name.set(filename)
self.save_edit.configure(state=DISABLED)
self.unmodified = True
if self.user_onChange != None:
self.user_onChange(filename)
def onSaveChange(self, name, index, mode): pass
# if self.user_onChange != None:
# self.user_onChange(self.save_name.get())
def autoRename(self):
# modify "save as" name
filename = self.picked_name.get()
if filename == "": filename = "new" + self.file_extension # if no file selected, create new filename
ext = ""
extpos = filename.rfind(".")
if extpos != -1: ext = filename[extpos:]
base = filename[:extpos]
hpos = base.rfind("-")
num = 0
if hpos != -1:
try:
num = int(base[hpos+1:])
base = base[:hpos]
except:
pass
while True:
num += 1
filename = "%s-%d%s" % (base, num, ext)
if not os.path.exists(filename):
break
self.save_name.set(filename)
# user callback
if self.user_onChange != None:
self.user_onChange(filename)
def onEdit(self):
if self.unmodified == True:
self.unmodified = False
# do auto rename if it's enabled or there is no file selected (editing new file)
if self.rename_on_edit.get() == 1 or self.picked_name.get() == "":
self.autoRename()
# enable editing of save as name
self.save_edit.configure(state=NORMAL)
def onChangeRename(self):
# called when clicking on "rename on edit" checkbox
if self.rename_on_edit.get() == 1:
if (not self.unmodified) and self.save_name.get() == self.picked_name.get():
self.autoRename()
else:
self.save_name.set(self.picked_name.get())
def updateList(self):
self.files = []
if self.allowNone:
self.files.append("")
if os.path.exists(self.directory):
for filename in os.listdir(self.directory):
for fm in self.file_mask:
if fnmatch(filename, fm):
self.files.append(filename)
self.files.sort()
if len(self.files) == 0 and not self.allowNone: self.files.append("(no %s files found)" % str(self.file_mask ))
def select(self, filename, notify=True):
''' selects the item given by filename '''
if filename in self.files:
if not havePMW:
self.picked_name.set(filename)
else:
self.list.selectitem(self.files.index(filename))
if notify: self.onSelChange(filename)
else:
self.editor.delete("1.0", END)
def makelist(self):
if havePMW:
self.list = Pmw.ComboBox(self.list_frame,
selectioncommand = self.onSelChange,
scrolledlist_items = self.files,
)
self.list.grid(row=0, column=0, padx=0, pady=0, sticky="NEWS")
self.list.component('entryfield').component('entry').configure(state = 'readonly', relief = 'raised')
self.picked_name = self.list
else:
self.list = apply(OptionMenu, (self.list_frame, self.picked_name) + tuple(self.files))
self.list.grid(row=0, column=0, sticky="NEW")
self.picked_name.trace("w", self.onSelChange)
def save(self):
self.get()
def set(self, selected_item):
self.select(selected_item)
def get(self):
''' gets the name of the currently selected file, saving it first if necessary '''
filename = self.save_name.get()
if self.unmodified == False:
self.unmodified = True
# save the file
f = file(os.path.join(self.directory, filename), "w")
f.write(self.editor.get("1.0", END).encode('utf-8'))
f.close()
# add it to the list of files
# if not filename in self.files:
# self.files.append(filename)
# self.files.sort()
# self.list.destroy()
# self.makelist()
# set it as the new pick
#if havePMW:
# self.picked_name.selectitem(self.files.index(filename), 1)
#else:
# self.picked_name.set(filename)
# self.select(filename)
self.refresh()
self.select(filename, notify=False)
self.save_edit.configure(state=DISABLED)
return filename
def get_text(self):
return self.editor.get("1.0", END)
def get_filename(self):
return self.save_name.get()
def set_enabled(self, state):
self.editor.configure(state=state)
if havePMW:
self.list.component('entryfield_entry').configure(state=state)
# self.list.component('arrowbutton').configure(state=state)
self.list.component('arrowbutton').bind('<1>', (lambda a: 'break') if state==DISABLED else self.list._postList)
else:
self.list.configure(state=state)
self.save_button.configure(state=state)
self.cb.configure(state=state)
self.save_edit.configure(state=state)
class FilePick(Frame):
def __init__(self, master, file_mask, default_file, user_onChange = None, font = None, dirs = (".", ), allowNone = False):
''' file_mask: file mask or list of file masks '''
self.master = master
self.user_onChange = user_onChange
Frame.__init__(self, master)
self.columnconfigure(0, weight=1)
self.unmodified = True
self.file_extension = ""
if "." in file_mask:
self.file_extension = file_mask[file_mask.rfind('.'):]
if type(file_mask) != list:
file_mask = [file_mask]
self.file_masks = file_mask
self.allowNone = allowNone
self.dirs = dirs
# create list of files
self.updateList()
# pick default if applicable
self.set(default_file)
def onSelChange(self, name, index=0, mode=0):
filename = self.picked_name.get()
if self.user_onChange != None:
self.user_onChange(filename)
def updateList(self):
prev_sel = self.get()
# get list of files (paths)
self.files = []
if self.allowNone:
self.files.append("")
for fm in self.file_masks:
for dir in self.dirs:
try:
for filename in os.listdir(dir):
if fnmatch(filename, fm):
if dir != ".":
path = os.path.join(dir, filename)
else:
path = filename
self.files.append(path)
except:
pass
self.files.sort()
if len(self.files) == 0: self.files.append("(no %s files found)" % self.file_masks)
# create list object
self._makelist()
# reselect
self.set(prev_sel)
def getList(self):
''' returns the current list of files '''
return self.files
def _makelist(self):
if havePMW:
self.list = Pmw.ComboBox(self,
selectioncommand = self.onSelChange,
scrolledlist_items = self.files,
)
self.list.grid(row=0, column=0, padx=0, sticky="NEWS")
self.list.component('entryfield').component('entry').configure(state = 'readonly', relief = 'raised')
self.picked_name = self.list
else:
self.picked_name = StringVar(self)
self.list = apply(OptionMenu, (self, self.picked_name) + tuple(self.files))
self.list.grid(row=0, column=0, sticky="NEW")
self.picked_name.trace("w", self.onSelChange)
def set(self, filename):
default_file = filename
if default_file in self.files:
if not havePMW:
self.picked_name.set(default_file) # default value
else:
self.list.selectitem(self.files.index(default_file))
self.onSelChange(default_file)
pass
def get(self):
if not hasattr(self, 'picked_name'):
return None
return self.picked_name.get()
class DropdownList:
def __init__(self, master, filemask='*.mln', default=None, allowNone=False, onselchange=None, directory='.'):
self.allowNone = allowNone
self.directory = directory
self.list_frame = master
self.onchange = onselchange
if type(filemask) != list:
filemask = [filemask]
self.file_mask = filemask
self.updateList()
if havePMW:
self.list = Pmw.ComboBox(master, selectioncommand = onselchange, scrolledlist_items = self.files)
self.list.component('entryfield').component('entry').configure(state = 'readonly', relief = 'raised')
self.picked_name = self.list
else:
self.picked_name = StringVar(master)
self.list = apply(OptionMenu, (master, self.picked_name) + tuple(self.files))
if onselchange is not None:
self.picked_name.trace("w", onselchange)
if default is not None:
self.select(default)
else:
self.select(self.files[0])
def __getattr__(self, name):
return getattr(self.list, name)
def get(self):
return self.picked_name.get()
def select(self, item):
if item in self.files:
if not havePMW:
self.picked_name.set(item)
else:
self.list.selectitem(item)
def updateList(self):
self.files = []
if self.allowNone:
self.files.append("")
if os.path.exists(self.directory):
for filename in os.listdir(self.directory):
for fm in self.file_mask:
if fnmatch(filename, fm):
self.files.append(filename)
self.files.sort()
if len(self.files) == 0 and not self.allowNone: self.files.append("(no %s files found)" % str(self.file_mask))
def makelist(self):
if havePMW:
self.list = Pmw.ComboBox(self.list_frame,
selectioncommand = self.onSelChange,
scrolledlist_items = self.files,
)
self.list.grid(row=0, column=0, padx=0, pady=0, sticky="NEWS")
self.list.component('entryfield').component('entry').configure(state = 'readonly', relief = 'raised')
self.picked_name = self.list
else:
self.list = apply(OptionMenu, (self.list_frame, self.picked_name) + tuple(self.files))
self.list.grid(row=0, column=0, sticky="NEW")
self.picked_name.trace("w", self.onSelChange)
self.select(self.files[0])
def setDirectory(self, directory, keep=False):
self.directory = directory
self.updateList()
self.makelist()
# if keep is true, only the files list will be updated but the content of the
# text area will not be altered/removed
if not keep: self.select("")
def onSelChange(self, name, index=0, mode=0):
filename = self.picked_name.get()
if self.onchange != None:
self.onchange(filename)
class Checkbox(Checkbutton):
def __init__(self, master, text, default=None, **args):
self.var = IntVar()
Checkbutton.__init__(self, master, text=text, variable=self.var, **args)
if default is not None:
self.var.set(default)
def get(self):
return self.var.get()
``` |
{
"source": "jpnewman/ansible-role-elk-kibana",
"score": 2
} |
#### File: files/kibana/sync_kibana_objects.py
```python
import argparse
import urllib2
import fnmatch
import codecs
import json
import sys
import os
import re
FOLDER_OBJECT_KEY_DICT = {
"dashboards": {
"index": ".kibana",
"type": "dashboard",
"include": [
"^artifactory$",
"^redis$"
]
},
"searches": {
"index": ".kibana",
"type": "search",
"include": [
"^Errors$",
"^Gerrit-.*$",
"^artifactory_.*$",
"^grokparsefailure$"
]
},
"visualizations": {
"index": ".kibana",
"type": "visualization",
"include": [
"^artifactory_.*$",
"^redis-.*$"
"^gerrit-.*$"
]
},
"templates": {
"index": "_template",
"type": "",
"command": "",
"exclude": [
"^filebeat$",
"^packetbeat$",
"^topbeat$",
"^triggered_watches$",
"^watch_history$",
"^watches$"
]
},
"mapping": {
"index": "_mapping",
"type": "",
"command": "",
"exclude": [
"^.*$"
]
}
}
# http://blog.mathieu-leplatre.info/colored-output-in-console-with-python.html
BLACK, RED, GREEN, YELLOW, BLUE, MAGENTA, CYAN, WHITE = range(8)
def __has_colors(stream, allow_piping=False):
"""Check if Console Has Color."""
if not hasattr(stream, "isatty"):
return False
if not stream.isatty(): # not being piped or redirected
return allow_piping # auto color only on TTYs
try:
import curses
curses.setupterm()
return curses.tigetnum("colors") > 2
except:
# guess false in case of error
return False
# Has Color Init
has_colors = __has_colors(sys.stdout, True)
# Support methods
def colorText(text, color=WHITE):
"""Color Text."""
if has_colors:
return "\x1b[1;%dm" % (30 + color) + str(text) + "\x1b[0m"
return text
def print_color_text(msg, color=WHITE):
"""Print Color Text."""
print(colorText(msg, color))
def header(msg, overline_char='=', underline_char='='):
"""Print Header."""
print_color_text(overline_char * 80, CYAN)
print_color_text(msg, CYAN)
print_color_text(underline_char * 80, CYAN)
def sub_header(msg, overline_char='-', underline_char='-'):
"""Print Sub-Header."""
header(msg, overline_char, underline_char)
def get_local_files(folder, file_filter='*.json'):
"""Get local Objects."""
found_files = []
for root, dirs, files in os.walk(folder):
for filename in fnmatch.filter(files, file_filter):
found_files.append(os.path.join(root, filename))
return found_files
# Kibana API
def kibana_api_request(url, method='GET', filename=None):
"""Kibana API request."""
data = {}
if filename:
curl_url = "curl -X%s %s -T %s" % (method, url, filename)
else:
curl_url = "curl -X%s %s" % (method, url)
print_color_text(curl_url, MAGENTA)
opener = urllib2.build_opener(urllib2.HTTPHandler)
if filename:
with open(filename) as f:
file_data = f.read()
request = urllib2.Request(url,
data=file_data)
request.add_header('Content-Type', 'application/json')
else:
request = urllib2.Request(url)
request.get_method = lambda: method
try:
response = opener.open(request)
data = json.loads(response.read())
report_api_response(data)
except urllib2.HTTPError as err:
if err.code == 404:
print("WARN: File not found: %s" % url)
else:
raise
return data
def report_api_response(json_data):
"""Report API response."""
output_data_name_dict = {
"_version": "Version",
"created": "Created",
"acknowledged": "Acknowledged"
}
response_arr = []
for json_name, name in output_data_name_dict.iteritems():
if json_name in json_data:
response_arr.append("%s: %r" % (name,
json_data[json_name]))
print('\t'.join(response_arr))
if '_shards' in json_data and json_data['_shards']['failed']:
print("ERROR: Upload failed!")
# Download methods
def download_via_api(es_url_data,
elasticsearch_host,
max_size,
folder,
save_all=False):
"""Download from kibana."""
if not os.path.isdir(folder):
os.makedirs(folder)
es_url = '/'.join([elasticsearch_host,
es_url_data['index'],
es_url_data['type']])
es_url = es_url.rstrip('/')
es_command = False
if re.match('^_', es_url_data['index']):
url = es_url
es_command = True
else:
url = "%s/_search" % (es_url)
# url += '?pretty=true' # NOTE: pretty output is done by 'json.dumps' in function 'save_objects'
url += "?size=%s" % max_size
data = kibana_api_request(url, 'GET')
if es_command:
save_templates(es_url_data, data, folder, save_all)
else:
save_objects(es_url_data, data, folder, save_all)
def should_save_data(es_url_data, test_string, save_all=False):
"""Filter Data."""
if save_all:
return True
if 'include' not in es_url_data and 'exclude' not in es_url_data:
sys.stdout.write('+ ')
return True
if 'include' in es_url_data:
combined_include = "(" + ")|(".join(es_url_data['include']) + ")"
if re.match(combined_include, test_string):
sys.stdout.write('+ ')
return True
if 'exclude' in es_url_data:
combined_exclude = "(" + ")|(".join(es_url_data['exclude']) + ")"
if re.match(combined_exclude, test_string):
sys.stdout.write('- ')
return False
else:
sys.stdout.write('- ')
return False
sys.stdout.write('+ ')
return True
def save_objects(es_url_data, data, folder, save_all=False):
"""Save Objects."""
print("Total '%s' objects found: %s" % (colorText(es_url_data['type'], WHITE),
colorText(data['hits']['total'], WHITE)))
for obj in data['hits']['hits']:
if should_save_data(es_url_data, obj['_id'], save_all):
print_color_text(obj['_id'], GREEN)
ouput_file_path = os.path.join(folder, obj['_id']) + '.json'
file = codecs.open(ouput_file_path, "w", "utf-8")
file.write(json.dumps(obj['_source'], indent=4, sort_keys=False))
file.close()
else:
print(obj['_id'])
def save_templates(es_url_data, data, folder, save_all=False):
"""Save Templates."""
print("Total templates found: %d" % len(data))
for template, template_data in data.iteritems():
if should_save_data(es_url_data, template, save_all):
print_color_text(template, GREEN)
ouput_file_path = os.path.join(folder, template) + '.json'
file = codecs.open(ouput_file_path, "w", "utf-8")
file.write(json.dumps(template_data, indent=4, sort_keys=False))
file.close()
else:
print(template)
# Upload methods
def upload_via_api(es_url_data, elasticsearch_host, folder):
"""Upload to kibana."""
sub_header("Uploading...")
files = get_local_files(folder)
for filename in files:
file_title = os.path.basename(os.path.splitext(filename)[0])
print(file_title)
es_url = '/'.join([elasticsearch_host,
es_url_data['index'],
es_url_data['type']])
es_url = es_url.rstrip('/')
url = "%s/%s" % (es_url,
file_title)
kibana_api_request(url, 'PUT', filename)
# Delete methods
def delete_via_api(es_url_data, elasticsearch_host, folder):
"""Delete found local objects from kibana."""
sub_header("Deleting...")
files = get_local_files(folder)
for filename in files:
file_title = os.path.basename(os.path.splitext(filename)[0])
print(file_title)
es_url = '/'.join([elasticsearch_host,
es_url_data['index'],
es_url_data['type']])
es_url = es_url.rstrip('/')
url = "%s/%s" % (es_url,
file_title)
kibana_api_request(url, 'DELETE')
# Main
def main():
"""Main."""
parser = argparse.ArgumentParser(description='Get Kibana Templates')
parser.add_argument('elasticsearch_host',
nargs='?',
default='http://10.10.10.10:9200',
help='Elasticsearch Host')
parser.add_argument('--download',
action='store_true',
default=True,
help='Download objects and templates from kibana')
parser.add_argument('--upload',
action='store_true',
default=False,
help='Upload objects and templates to kibana')
parser.add_argument('--delete',
action='store_true',
default=False,
help='Delete objects and templates from kibana')
parser.add_argument('--save_all',
action='store_true',
default=False,
help='Save All Data')
parser.add_argument('--max_size',
type=int,
default='1024',
help='Elasticsearch Max Hit Size')
args = parser.parse_args()
if args.upload or args.delete:
args.download = False
args.elasticsearch_host = args.elasticsearch_host.rstrip('/')
header('Sync Kibana Objects and Templates\n' +
args.elasticsearch_host)
for folder, es_url_data in FOLDER_OBJECT_KEY_DICT.iteritems():
sub_header(folder)
if args.download:
download_via_api(es_url_data,
args.elasticsearch_host,
args.max_size,
folder,
args.save_all)
elif args.upload:
upload_via_api(es_url_data,
args.elasticsearch_host,
folder)
elif args.delete:
delete_via_api(es_url_data,
args.elasticsearch_host,
folder)
if __name__ == "__main__":
main()
``` |
{
"source": "jpnewman/ansible-role-locale-timezone",
"score": 3
} |
#### File: ansible-role-locale-timezone/tests/test_default.py
```python
import testinfra.utils.ansible_runner
testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner(
'.molecule/ansible_inventory').get_hosts('all')
def test_python_package(host):
"""Test Python is installed."""
python = host.package('python')
assert python.is_installed
def test_timezone(host):
"""Test Timezone."""
timezone = host.command('cat /etc/timezone')
assert timezone.stdout == 'Etc/UTC'
def test_timedatectl(host):
"""Test timedatectl."""
timezone = host.command(r'timedatectl | grep -E "Time\s{0,1}zone"')
assert 'Etc/UTC' in timezone.stdout
``` |
{
"source": "jpnewman/elasticsearch-scripts",
"score": 2
} |
#### File: sync_elasticsearch_objects/commands/delete.py
```python
import os
from utils.output import *
from utils.files import get_local_files
from es_api import elasticsearch_api_request
def delete_via_api(sync_object, es_url_data, elasticsearch_host, folder, debug=False, dry_run=False):
"""Delete found local objects from Elasticsearch."""
header("Deleting ({0})...\n{1}".format(sync_object, elasticsearch_host))
sub_header(folder)
files = get_local_files(folder)
for filename in files:
file_title = os.path.basename(os.path.splitext(filename)[0])
print(file_title)
es_url = '/'.join([elasticsearch_host,
es_url_data['index'],
es_url_data['type']])
es_url = es_url.rstrip('/')
url = "%s/%s" % (es_url,
file_title)
elasticsearch_api_request(url, 'DELETE', debug=debug, dry_run=dry_run)
```
#### File: sync_elasticsearch_objects/commands/download.py
```python
import os
import re
from utils.output import *
from es_api import elasticsearch_api_request
from es_api.save import *
def download_via_api(sync_object,
es_url_data,
elasticsearch_host,
max_size,
folder,
save_all=False,
debug=False,
dry_run=False):
"""Download from Elasticsearch."""
header("Downloading ({0})...\n{1}".format(sync_object, elasticsearch_host))
sub_header(folder)
if not os.path.isdir(folder):
os.makedirs(folder)
es_url = '/'.join([elasticsearch_host,
es_url_data['index'],
es_url_data['type']])
es_url = es_url.rstrip('/')
es_command = False
if re.match('^_', es_url_data['index']):
url = es_url
es_command = True
else:
url = "%s/_search" % (es_url)
# url += '?pretty=true' # NOTE: pretty output is done by 'json.dumps' in function 'save_objects'
url += "?size=%s" % max_size
data = elasticsearch_api_request(url, 'GET', debug=debug, dry_run=dry_run)
if dry_run:
return
if es_command:
save_templates(es_url_data, data, folder, save_all)
else:
save_objects(es_url_data, data, folder, save_all)
```
#### File: sync_elasticsearch_objects/commands/reindex.py
```python
import sys
import elasticsearch
import elasticsearch.helpers
from utils.output import *
from utils.es_helpers import get_all_indices
def reindex_via_api(es, index_names, reindex_suffix, reindex_force_delete, elasticsearch_host):
"""Reindex Elasticsearch"""
header("Reindexing...\n{0}".format(elasticsearch_host))
test_new_indices(es, index_names, reindex_suffix, reindex_force_delete, elasticsearch_host)
indices = get_all_indices(es, elasticsearch_host, index_names)
if not indices:
return
for index in indices:
new_index_name = get_new_index_name(index, reindex_suffix)
print_color_text("Creating index: {0}".format(new_index_name), GREEN)
es.indices.create(new_index_name)
print_color_text("Reindexing: {0} -> {1}".format(index, new_index_name), BLUE)
elasticsearch.helpers.reindex(client=es, source_index=index, target_index=new_index_name)
print_color_text("Closing index: {0}".format(index), YELLOW)
es.indices.close(index=index)
def test_new_indices(es, index_names, reindex_suffix, reindex_force_delete, elasticsearch_host):
"""Test new indices."""
indices = get_all_indices(es, elasticsearch_host, index_names)
if not indices:
return
indices = sorted(indices)
for index in indices:
new_index_name = get_new_index_name(index, reindex_suffix)
if new_index_name in indices:
if reindex_force_delete:
print_color_text("Deleting index: {0}".format(new_index_name), MAGENTA)
es.indices.delete(new_index_name)
else:
print_color_text("ERROR: Target index already exists: {0}".format(new_index_name), RED)
sys.exit(-1)
def get_new_index_name(index_name, reindex_suffix):
"""Get New Index Name."""
new_index_name = index_name + reindex_suffix
return new_index_name.lower()
def reindex_process():
"""Reindex Process."""
pass
```
#### File: sync_elasticsearch_objects/es_api/save.py
```python
import os
import re
import sys
import json
import codecs
from utils.output import *
def should_save_data(es_url_data, test_string, save_all=False):
"""Filter Data."""
if save_all:
return True
if 'include' not in es_url_data and 'exclude' not in es_url_data:
sys.stdout.write('+ ')
return True
if 'include' in es_url_data:
combined_include = "(" + ")|(".join(es_url_data['include']) + ")"
if re.match(combined_include, test_string):
sys.stdout.write('+ ')
return True
if 'exclude' in es_url_data:
combined_exclude = "(" + ")|(".join(es_url_data['exclude']) + ")"
if re.match(combined_exclude, test_string):
sys.stdout.write('- ')
return False
else:
sys.stdout.write('- ')
return False
sys.stdout.write('+ ')
return True
def save_objects(es_url_data, data, folder, save_all=False):
"""Save Objects."""
print("Total '%s' objects found: %s" % (colorText(es_url_data['type'], WHITE),
colorText(data['hits']['total'], WHITE)))
for obj in data['hits']['hits']:
if should_save_data(es_url_data, obj['_id'], save_all):
print_color_text(obj['_id'], GREEN)
ouput_file_path = os.path.join(folder, obj['_id']) + '.json'
file = codecs.open(ouput_file_path, "w", "utf-8")
file.write(json.dumps(obj['_source'], indent=4, sort_keys=False))
file.close()
else:
print(obj['_id'])
def save_templates(es_url_data, data, folder, save_all=False):
"""Save Templates."""
print("Total templates found: %d" % len(data))
for template, template_data in data.iteritems():
if should_save_data(es_url_data, template, save_all):
print_color_text(template, GREEN)
ouput_file_path = os.path.join(folder, template) + '.json'
file = codecs.open(ouput_file_path, "w", "utf-8")
file.write(json.dumps(template_data, indent=4, sort_keys=False))
file.close()
else:
print(template)
```
#### File: sync_elasticsearch_objects/utils/args.py
```python
import os
import sys
import argparse
from argparse import ArgumentDefaultsHelpFormatter,RawDescriptionHelpFormatter
from utils.output import *
# https://stackoverflow.com/questions/34544752/argparse-and-argumentdefaultshelpformatter-formatting-of-default-values-when-sy
class CustomFormatter(argparse.ArgumentDefaultsHelpFormatter, argparse.RawDescriptionHelpFormatter):
def _get_help_string(self, action):
help = action.help
if '%(default)' not in action.help:
if action.default is not argparse.SUPPRESS:
defaulting_nargs = [argparse.OPTIONAL, argparse.ZERO_OR_MORE]
if action.option_strings or action.nargs in defaulting_nargs:
if type(action.default) == type(sys.stdin):
print(action.default.name)
help += ' (default: ' + str(action.default.name) + ')'
else:
help += ' (default: %(default)s)'
return help
def parse_args():
"""Parse Args."""
args_epilog = """
e.g.
{0} http://elk-server:9200 <TASK>
""".format(os.path.basename(__file__))
parser = argparse.ArgumentParser(description='Syncs Elasticsearch and Kibana Objects',
formatter_class=CustomFormatter,
epilog=args_epilog)
parser.add_argument('elasticsearch_host',
nargs='?',
default='http://10.10.10.10:9200',
help='Elasticsearch Host')
task_args_group = parser.add_argument_group('TASKS')
task_args_group.add_argument('--download',
action='store_true',
default=False,
help='Download objects and templates from Elasticsearch')
task_args_group.add_argument('--upload',
action='store_true',
default=False,
help='Upload objects and templates to Elasticsearch')
task_args_group.add_argument('--reindex',
nargs='+',
default=[],
help='Reindex Elasticsearch indices')
task_args_group.add_argument('--delete',
action='store_true',
default=False,
help='Delete objects and templates from Elasticsearch')
download_args_group = parser.add_argument_group('Download Options')
download_args_group.add_argument('--save-all',
action='store_true',
default=False,
help='Saves All Data')
download_args_group.add_argument('--max_size',
type=int,
default='1024',
help='Elasticsearch Download Max Hit Size')
reindex_args_group = parser.add_argument_group('Reindex Options')
reindex_args_group.add_argument('--reindex_suffix',
type=str,
default='_v1',
help='Suffix for the new target index')
reindex_args_group.add_argument('--reindex_force_delete',
action='store_true',
default=False,
help='Delete new target index if it exists')
options_args_group = parser.add_argument_group('General Options')
options_args_group.add_argument('--config',
type=str,
default='sync_elasticsearch_objects.yml',
help='Config File')
options_args_group.add_argument('--sync_local_folder',
type=str,
default='_OUTPUT',
help='Sync local Folder')
options_args_group.add_argument('--debug',
action='store_true',
default=False,
help='Debug output')
options_args_group.add_argument('--dry-run',
action='store_true',
default=False,
help='Dry-run. No action taken')
args = parser.parse_args()
if not args.download \
and not args.upload \
and not args.reindex \
and not args.delete \
and not args.save_all:
parser.print_help()
sys.exit(-1)
args.elasticsearch_host = args.elasticsearch_host.rstrip('/')
if args.dry_run:
print_color_text("WARN: Executing in Dry-Run mode. No action will be taken!", RED)
return args
```
#### File: sync_elasticsearch_objects/utils/output.py
```python
import sys
# http://blog.mathieu-leplatre.info/colored-output-in-console-with-python.html
BLACK, RED, GREEN, YELLOW, BLUE, MAGENTA, CYAN, WHITE = range(8)
def __has_colors(stream, allow_piping=False):
"""Check if Console Has Color."""
if not hasattr(stream, "isatty"):
return False
if not stream.isatty(): # not being piped or redirected
return allow_piping # auto color only on TTYs
try:
import curses
curses.setupterm()
return curses.tigetnum("colors") > 2
except:
# guess false in case of error
return False
# Has Color Init
has_colors = __has_colors(sys.stdout, True)
# Support methods
def colorText(text, color=WHITE):
"""Color Text."""
if has_colors:
return "\x1b[1;%dm" % (30 + color) + str(text) + "\x1b[0m"
return text
def print_color_text(msg, color=WHITE):
"""Print Color Text."""
print(colorText(msg, color))
def header(msg, overline_char='=', underline_char='='):
"""Print Header."""
print_color_text(overline_char * 80, CYAN)
print_color_text(msg, CYAN)
print_color_text(underline_char * 80, CYAN)
def sub_header(msg, overline_char='-', underline_char='-'):
"""Print Sub-Header."""
header(msg, overline_char, underline_char)
``` |
{
"source": "jpnewman/jenkins-scripts",
"score": 3
} |
#### File: jenkins-scripts/jenkins-decrypt/decrypt_secrets.py
```python
import re
import base64
import argparse
import bcrypt
from hashlib import sha256
from Crypto.Cipher import AES
from lxml import etree
from enum import Enum
MAGIC = b'::::MAGIC::::'
OUTPUT_WIDTH = 80
DEBUG = False
class DecryptType(Enum):
unknown = 1
hudson_secret_key = 2
passwordHash = 3
class Secrets:
def __init__(self,
username,
description,
encrypted_secret,
decrypted_secret=None,
decrypt_type=DecryptType.unknown):
self.username = username
self.description = description
self.encrypted_secret = encrypted_secret
self.decrypted_secret = decrypted_secret
self.decrypt_type = decrypt_type
def __str__(self):
return "{0} : {1} : {2}".format(self.username,
self.description,
self.decrypted_secret)
def __repr__(self):
return self.__str__()
def print_header(msg, header_char='='):
"""Print Header."""
header_line = header_char * OUTPUT_WIDTH
print(header_line)
print(msg)
print(header_line)
def get_hudson_secret_key(master_key_file, hudson_secret_key_file):
master_key = open(master_key_file).read()
if DEBUG: print(master_key)
hudson_secret_key = open(hudson_secret_key_file, 'rb').read()
# if DEBUG: print(hudson_secret_key)
hashed_master_key = sha256(master_key).digest()[:16]
# if DEBUG: print(hashed_master_key)
o = AES.new(hashed_master_key, AES.MODE_ECB)
x = o.decrypt(hudson_secret_key)
assert MAGIC in x
return x
def parse_file(xml_file, secrets):
try:
tree = etree.parse(xml_file)
except Exception:
print("ERROR: Parsing XML File.")
return
root = tree.getroot()
'''
username_elem: The username element name
description_elem: The description element name
secret_elem: The secret element name.
decrypt_type: Decrypt type.
'''
data_elements = (
{'username_elem': 'username',
'secret_elem': 'password',
'decrypt_type': DecryptType.hudson_secret_key
},
{'username_elem': 'bindName',
'secret_elem': 'bindPassword',
'decrypt_type': DecryptType.hudson_secret_key
},
{'secret_elem': 'privateKeyPassword',
'decrypt_type': DecryptType.unknown
},
{'username_elem': 'gerritUserName',
'secret_elem': 'gerritAuthKeyFilePassword',
'decrypt_type': DecryptType.unknown
},
{'username_elem': 'username',
'secret_elem': 'passphrase',
'decrypt_type': DecryptType.hudson_secret_key
},
{'username_elem': '../username',
'description_elem': '../description',
'secret_elem': 'privateKey',
'decrypt_type': DecryptType.hudson_secret_key
},
{'username_elem': 'id',
'description_elem': 'description',
'secret_elem': 'secret',
'decrypt_type': DecryptType.hudson_secret_key
},
{'username_elem': '../../fullName',
'secret_elem': 'passwordHash',
'decrypt_type': DecryptType.passwordHash
}
)
for data_element in data_elements:
for secret_elem in root.iter(data_element['secret_elem']):
parent_elem = secret_elem.getparent()
username = ''
if 'username_elem' in data_element and \
data_element['username_elem'] is not None:
username_elem = parent_elem.find(data_element['username_elem'])
if username_elem is not None:
username = username_elem.text
description = ''
# if data_element['description_elem'] is not None:
if 'description_elem' in data_element and \
data_element['description_elem'] is not None:
description_elem = parent_elem.find(data_element['description_elem'])
if description_elem is not None:
description = description_elem.text
secret = Secrets(username,
description,
secret_elem.text,
None,
data_element['decrypt_type'])
# print(secret)
secrets.append(secret)
def decrypt_string(hudson_secret_key, encrypted_string):
k = hudson_secret_key[:-16]
k = k[:16]
p = base64.decodestring(encrypted_string)
o = AES.new(k, AES.MODE_ECB)
x = o.decrypt(p)
assert MAGIC in x
return x.split(MAGIC)[0]
def decrypt_hudson_secret_key_data(hudson_secret_key, secret_string):
if hudson_secret_key is None:
raise ValueError('ERROR: hudson_secret_key is None')
return decrypt_string(hudson_secret_key,
secret_string)
def check_jbcrypt_hash(jbcrypt_hash, secret_to_test):
if 'jbcrypt:' not in jbcrypt_hash:
# raise ValueError('ERROR: Not BCrypt string')
return "WARN: Not BCrypt string: {0}".format(jbcrypt_hash)
jbcrypt_hash = jbcrypt_hash.replace('#jbcrypt:', '')
if DEBUG: print(jbcrypt_hash)
if bcrypt.checkpw(secret_to_test, jbcrypt_hash):
return "OK: Secret matches string: {0}".format(secret_to_test)
return "INFO: Secret does not match string: {0}".format(secret_to_test)
def decrypt_data(secrets, hudson_secret_key=None, string_to_test=None):
for secret in secrets:
if secret.decrypt_type == DecryptType.hudson_secret_key:
secret.decrypted_secret = decrypt_hudson_secret_key_data(hudson_secret_key, secret.encrypted_secret)
elif secret.decrypt_type == DecryptType.passwordHash:
if string_to_test:
secret.decrypted_secret = check_jbcrypt_hash(secret.encrypted_secret, string_to_test)
def _parse_args():
"""Parse Command Arguments."""
global DEBUG
desc = 'Backups / Restore Gerrit Repos'
parser = argparse.ArgumentParser(description=desc)
parser.add_argument('master_key_file',
help='Master Key File')
parser.add_argument('secrets_file',
help='Secret File')
parser.add_argument('string',
nargs='?',
help='String')
parser.add_argument('-x', '--xml-file',
help='XML File')
parser.add_argument('-s', '--string-to-test',
help='String to test against hashed secret')
parser.add_argument('--debug',
action='store_true',
default=False,
help='Debug output')
args = parser.parse_args()
DEBUG = args.debug
return args
def output_secrets(secrets):
if len(secrets) > 0:
header_username = 'Username '.ljust(OUTPUT_WIDTH / 2)
header = "{0}Secret".format(header_username)
print_header(header, '-')
for secret in secrets:
left_column_width = (OUTPUT_WIDTH / 2) - 1
if secret.username or secret.decrypted_secret:
left_column = ''
if secret.username:
left_column = "{0}".format(secret.username)
if secret.description:
left_column = "{0} ({1})".format(left_column, secret.description)
decrypted_secret = ''
if secret.decrypted_secret:
decrypted_secret = secret.decrypted_secret
if len(decrypted_secret.split('\n')) == 1:
print("{0} {1}".format(left_column.ljust(left_column_width), decrypted_secret))
else:
print(left_column.ljust(left_column_width))
print(decrypted_secret)
def main():
args = _parse_args()
hudson_secret_key = get_hudson_secret_key(args.master_key_file,
args.secrets_file)
if args.xml_file:
print_header(args.xml_file)
secrets = []
parse_file(args.xml_file, secrets)
decrypt_data(secrets, hudson_secret_key, args.string_to_test)
output_secrets(secrets)
if args.string:
if 'jbcrypt:' in args.string and args.string_to_test:
decrypted_string = check_jbcrypt_hash(args.string, args.string_to_test)
else:
decrypted_string = decrypt_string(hudson_secret_key, args.string)
print(decrypted_string)
if __name__ == '__main__':
main()
``` |
{
"source": "jpnewman/job_hunt",
"score": 2
} |
#### File: job_hunt/app/__init__.py
```python
__author__ = '<NAME>'
import os
import sys
from flask import Flask, request, render_template, send_from_directory
from flask_debugtoolbar import DebugToolbarExtension
from flask_sqlalchemy import SQLAlchemy
from flask_migrate import Migrate
app = Flask(__name__)
app.config.from_object('config.DebugConfiguration')
# Scss
from flask_scss import Scss
Scss(app)
# Database
db = SQLAlchemy(app)
migrate = Migrate(app, db)
# Debug Toolbar
toolbar = DebugToolbarExtension(app)
# Register module blueprint
from app.jobs.views import mod as jobsModule
app.register_blueprint(jobsModule)
from app.agencies.views import mod as agenciesModule
app.register_blueprint(agenciesModule)
from app.recruiters.views import mod as recruitersModule
app.register_blueprint(recruitersModule)
from app.emails.views import mod as emailsModule
app.register_blueprint(emailsModule)
# Controllers
@app.route('/favicon.ico')
def favicon():
return send_from_directory(os.path.join(app.root_path, 'static'), 'ico/favicon.ico')
@app.route("/")
def index():
return render_template('index.html')
@app.errorhandler(404)
def page_not_found(e):
return render_template('404.html'), 404
@app.errorhandler(400)
def key_error(e):
app.logger.warning('Invalid request resulted in KeyError', exc_info=e)
return render_template('400.html'), 400
@app.errorhandler(500)
def internal_server_error(e):
app.logger.warning('An unhandled exception is being displayed to the end user', exc_info=e)
return render_template('generic.html'), 500
@app.errorhandler(Exception)
def unhandled_exception(e):
app.logger.error('An unhandled exception is being displayed to the end user', exc_info=e)
return render_template('generic.html'), 500
@app.before_request
def log_entry():
app.logger.debug("Handling request")
@app.teardown_request
def log_exit(exc):
app.logger.debug("Finished handling request", exc_info=exc)
# Logging
import logging
class ContextualFilter(logging.Filter):
def filter(self, log_record):
log_record.url = request.path
log_record.method = request.method
log_record.ip = request.environ.get("REMOTE_ADDR")
return True
context_provider = ContextualFilter()
app.logger.addFilter(context_provider)
del app.logger.handlers[:]
handler = logging.StreamHandler()
log_format = "%(asctime)s\t%(levelname)s\t%(ip)s\t%(method)s\t%(url)s\t%(message)s"
formatter = logging.Formatter(log_format)
handler.setFormatter(formatter)
app.logger.addHandler(handler)
from logging import ERROR, DEBUG
from logging.handlers import TimedRotatingFileHandler
# Only set up a file handler if we know where to put the logs
if app.config.get("ERROR_LOG_PATH"):
log_folder = os.path.dirname(app.config["ERROR_LOG_PATH"])
if not os.path.exists(log_folder):
os.makedirs(log_folder)
# Create one file for each day. Delete logs over 7 days old.
file_handler = TimedRotatingFileHandler(app.config["ERROR_LOG_PATH"], when="D", backupCount=7)
file_formatter = logging.Formatter(log_format)
# Filter out all log messages that are lower than Error.
file_handler.setLevel(DEBUG) # ERROR
file_handler.setFormatter(file_formatter)
app.logger.addHandler(file_handler)
``` |
{
"source": "jpnewman/jpnewman_ansible_elk",
"score": 3
} |
#### File: test_app/gerrit/utils.py
```python
import re
from xml.dom.minidom import parseString
from dicttoxml import dicttoxml
from urllib.parse import quote_plus
def change_joined_id(project, branch, change_id):
return "{0}~{1}~{2}".format(quote_plus(project),
branch,
change_id)
def to_json(items):
message = '{"' + '", "'.join(['"=>"'.join([key, str(val)]) for key, val in items]) + '"}'
message = message.replace('"nil"', 'nil')
return message
def to_xml(dict_obj, custom_root_str):
xml = dicttoxml(dict_obj, custom_root=custom_root_str, attr_type=False)
dom = parseString(xml)
xml_message = dom.toprettyxml(indent=' ')
xml_message = re.sub('^<\?[\w\s=\"\.]+\?>', '', xml_message)
xml_message = xml_message.strip()
return xml_message
```
#### File: jpnewman_ansible_elk/spec/test_elk-log.py
```python
def test_disk_free_space(Command):
command = Command("df -P / | awk '/%/ {print $5}' | sed -e 's/%//'")
assert int(command.stdout.strip()) <= 95
# logstash
def test_logstash_running_and_enabled(Service):
logstash = Service("logstash")
assert logstash.is_running
assert logstash.is_enabled
# def test_logstash_config(Command):
# command = Command('/usr/share/logstash/bin/logstash -t -f /etc/logstash/conf.d --path.settings=/etc/logstash/')
# assert command.rc == 0
# # assert command.stdout.rstrip() == 'Configuration OK'
def test_logstash_indexer_lumberjack_tcp_is_listening(Socket):
assert Socket("tcp://:::5000").is_listening
def test_logstash_indexer_beats_tcp_is_listening(Socket):
assert Socket("tcp://:::5044").is_listening
def test_logstash_indexer_syslog_tcp_is_listening(Socket):
assert Socket("tcp://:::5545").is_listening
def test_logstash_indexer_syslog_udp_is_listening(Socket):
assert Socket("udp://:::5545").is_listening
# log indexer
def test_ping_to_elk_server(Command):
command = Command('ping -c 4 -q elk-server')
assert command.rc == 0
assert '0% packet loss' in command.stdout.rstrip()
def test_connection_to_redis_server(Command):
command = Command('nc -z -v -w 5 elk-server 6379')
assert command.rc == 0
assert 'succeeded!' in command.stderr.rstrip()
def test_redis_command_output(Command):
command = Command('redis-cli -h elk-server ping')
assert command.rc == 0
assert command.stdout.rstrip() == 'PONG'
def test_connection_to_elasticsearch(Command):
command = Command('nc -z -v -w 5 elk-server 9200')
assert command.rc == 0
assert 'succeeded!' in command.stderr.rstrip()
# packetbeat
# def test_packetbeat_running_and_enabled(Service):
# packetbeat = Service("packetbeat")
# assert packetbeat.is_running
# assert packetbeat.is_enabled
# topbeat
# def test_topbeat_running_and_enabled(Service):
# topbeat = Service("topbeat")
# assert topbeat.is_running
# assert topbeat.is_enabled
``` |
{
"source": "jpnewman/papers",
"score": 3
} |
#### File: api/utils/decorators.py
```python
import re
from jose import jwt
from jose.exceptions import JWTError
from functools import wraps
from flask import current_app, request, g
from flask_restful import abort
from api.models.user import User
from api.models.file import File
def _decode_jwt():
token = request.headers.get('authorization').strip()
pattern = re.compile(r'^JWT\s+', re.IGNORECASE)
token = re.sub(pattern, '', token)
payload = jwt.decode(token, current_app.config['SECRET_KEY'],
algorithms=['HS256'])
return payload
def login_required(f):
'''
This decorator checks the header to ensure a valid token is set
'''
@wraps(f)
def func(*args, **kwargs):
try:
if 'authorization' not in request.headers:
abort(404, message="You need to be logged in to access this resource")
payload = _decode_jwt()
g.user_id = payload['id']
g.user = User.find(g.user_id)
if g.user is None:
abort(404, message="The user id is invalid")
return f(*args, **kwargs)
except JWTError as e:
abort(400, message="There was a problem while trying to parse your token -> {0}".format(e))
return func
def validate_user(f):
'''
This decorate ensures that the user logged in is the actually the same user we're operating on
'''
@wraps(f)
def func(*args, **kwargs):
payload = _decode_jwt()
g.user_id = payload['id']
if g.user_id != g.user['id']:
abort(404, message="You do not have permission to the resource you are trying to access")
return f(*args, **kwargs)
return func
def belongs_to_user(f):
'''
This decorator ensures that the file we're trying to access actually belongs to us
'''
@wraps(f)
def func(*args, **kwargs):
file_id = kwargs.get('file_id')
payload = _decode_jwt()
g.user_id = payload['id']
file = File.find(file_id, True)
if not file or file['creator'] != g.user_id:
abort(404, message="The file you are trying to access was not found")
g.file = file
return f(*args, **kwargs)
return func
``` |
{
"source": "jpnewman/print_photo_exif_info",
"score": 2
} |
#### File: jpnewman/print_photo_exif_info/print_exif_info.py
```python
import os
import glob
import yaml
from datetime import datetime
from multiprocessing import Pool
from functools import partial
from ProcessImage import ProcessImage
from geopy.geocoders import Nominatim, GoogleV3
IMAGE_FOLDER = './_TestData/'
OUTPUT_FOLDER = '_Output'
CONFIG_FILE = 'config.yml'
EXTENSIONS = ['.jpg', '.jpeg']
USE_GOOGLE_API = False
MULTI_PROCESSING = True
USER_AGENT = 'Photos'
def load_config(config_file):
"""Load Config."""
if not USE_GOOGLE_API:
return {'google_api_key': ''}
if not os.path.isfile(config_file):
raise ValueError("Configuration file {0} not found.".format(config_file))
with open(config_file, 'r') as ymlfile:
return yaml.load(ymlfile)
def multi_processing(images, geolocator):
pool = Pool(os.cpu_count())
pool.map(partial(ProcessImage,
geolocator=geolocator,
output_folder=OUTPUT_FOLDER,
use_google_api=USE_GOOGLE_API), images)
def single_processing(images, geolocator):
for image in images:
ProcessImage(image, geolocator, OUTPUT_FOLDER, USE_GOOGLE_API)
def main():
"""Main."""
start_time = datetime.now().replace(microsecond=0)
cfg = load_config(CONFIG_FILE)
if USE_GOOGLE_API:
geolocator = GoogleV3(user_agent=USER_AGENT, api_key=cfg['google_api_key'])
else:
geolocator = Nominatim(user_agent=USER_AGENT)
if not os.path.isdir(OUTPUT_FOLDER):
os.makedirs(OUTPUT_FOLDER)
images = []
for root, _dirs, files in os.walk(IMAGE_FOLDER):
for file in files:
if file.lower().endswith(tuple(EXTENSIONS)):
images.append(os.path.join(root, file))
if MULTI_PROCESSING:
multi_processing(images, geolocator)
else:
single_processing(images, geolocator)
end_time = datetime.now().replace(microsecond=0)
print("Processing {0} Images in {1} seconds.".format(len(images), str(end_time - start_time)))
if __name__ == '__main__':
main()
``` |
{
"source": "jpnewman/python_ocr_api",
"score": 3
} |
#### File: workers/validate_address/AddressCompare.py
```python
import json
import requests
from celery.utils.log import get_task_logger
logger = get_task_logger(__name__)
GOOGLE_API_KEY = '<YOUR_GOOGLE_API_KEY>'
def get_address_scan(firstname, surname, postcode_end, mcs_data):
logger.debug(firstname)
logger.debug(surname)
logger.debug(postcode_end)
logger.debug(mcs_data)
# regions = json.loads(mcs_data)
# logger.debug(regions)
address = u''
capture = 0
for region in mcs_data['regions']:
for line in region['lines']:
for word in line['words']:
if word["text"].lower() == firstname.lower() or word[
"text"].lower() == surname.lower() and capture == 0:
capture = 1
if capture == 1:
# logic below excludes name if at beginning of address
if not (address == u'' and (
word["text"].lower() == firstname.lower() or word["text"].lower() == surname.lower())):
address = address + word["text"] + u' '
if word["text"].lower() == postcode_end.lower():
capture = 2
if capture == 2:
break
if capture == 2:
break
if capture == 2:
break
return address
```
#### File: workers/validate_address/validate_address.py
```python
import os
import requests
import json
from AddressCompare import get_address_scan
from celery import Celery
from celery.utils.log import get_task_logger
DEBUG_FIRSTNAME = u'John'
DEBUG_SURNAME = u'Stein'
DEBUG_POSTCODE_END = u'2RX'
logger = get_task_logger(__name__)
app = Celery('validate_address', broker='pyamqp://guest@localhost//')
@app.task(name='workers.validate_address.validate_address', queue='validate_address')
def validate_address(*args, **kwargs):
logger.info(args[0])
address = get_address_scan(DEBUG_FIRSTNAME,
DEBUG_SURNAME,
DEBUG_POSTCODE_END,
args[0][0][0]['mcs_data']) # FIX: arg get boxed with each call
# Validate address
logger.debug(address)
url_addr = "https://maps.googleapis.com/maps/api/geocode/json"
payload = {'address': address, 'key': '<KEY>'}
res = requests.get(url_addr, params=payload)
logger.debug(res.url)
out = res.json()
logger.debug(out)
google_address = ''
match = ''
partial_match = False
if len(out['results']):
google_address = out['results'][0]['formatted_address']
if out['results'][0].has_key('partial_match'):
partial_match = out['results'][0]['partial_match']
logger.info("{0} : {1}".format(google_address, partial_match))
# return google_address, partial_match
return args
```
#### File: image_processing/debug_regions/debug_regions.py
```python
import codecs
import json
import os
from PIL import Image, ImageDraw
from collections import OrderedDict
from ast import literal_eval as make_tuple
RABBIT_CONFIG = None
LOGFILE = None
DEBUG_IMAGE = '../_TestData/Microsoft_computer-vision-api/bgbill12/bgbill12.png'
DEBUG_REGIONS = '../_TestData/Microsoft_computer-vision-api/bgbill12/bgbill12.json'
DEBUG_OUTPUT_FOLDER = '_Output'
class DebugRegions(object):
def __init__(cls, rabbit_config=None, logfile=None):
# super(DebugRegions, cls).__init__(rabbit_config, logfile)
pass
def _calc_rect(cls, rect_string):
rect = make_tuple(rect_string)
rect_list = list(rect)
rect_list[2] += rect_list[0]
rect_list[3] += rect_list[1]
return tuple(rect_list)
def on_request(cls, ch, method, props, body):
img = Image.open(DEBUG_IMAGE)
draw = ImageDraw.Draw(img)
regions = json.load(codecs.open(DEBUG_REGIONS, 'r', 'utf-8-sig'),
object_pairs_hook=OrderedDict)
for region in regions['regions']:
rect = cls._calc_rect(region['boundingBox'])
draw.rectangle(rect, outline='red')
for line in region['lines']:
rect = cls._calc_rect(line['boundingBox'])
draw.rectangle(rect, outline='green')
for word in line['words']:
rect = cls._calc_rect(word['boundingBox'])
draw.rectangle(rect, outline='blue')
# Save
if not os.path.exists(DEBUG_OUTPUT_FOLDER):
os.makedirs(DEBUG_OUTPUT_FOLDER)
output_filepath = os.path.join(DEBUG_OUTPUT_FOLDER, 'debug_regions.png')
img.save(output_filepath)
# Show
img.show()
# Remove
def run(cls):
cls.on_request(None, None, None, None)
# Remove
def stop(cls):
# raise NotImplementedError()
pass
def main():
ocr_rpc_orker = DebugRegions(RABBIT_CONFIG, LOGFILE)
try:
ocr_rpc_orker.run()
except KeyboardInterrupt:
ocr_rpc_orker.stop()
if __name__ == '__main__':
main()
```
#### File: python_ocr_api/open_ocr/rpc_server_base.py
```python
import logging
import pika
LOGGER = logging.getLogger(__name__)
class RpcServer(object):
def __init__(cls, rabbit_config, logfile=None):
cls.rabbit_config = rabbit_config
cls.logfile = logfile
def _setup_logging(cls):
formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s')
if cls.logfile:
file_hdlr = logging.FileHandler(cls.logfile)
file_hdlr.setFormatter(formatter)
LOGGER.addHandler(file_hdlr)
LOGGER.setLevel(logging.DEBUG)
console_hdlr = logging.StreamHandler()
console_hdlr.setFormatter(formatter)
LOGGER.addHandler(console_hdlr)
console_hdlr.setLevel(logging.INFO)
def _setup_rabbitmq(cls):
parameters = pika.URLParameters(cls.rabbit_config['AmqpURI'])
connection = pika.BlockingConnection(parameters)
channel = connection.channel()
channel.exchange_declare(exchange=cls.rabbit_config['Exchange'],
exchange_type=cls.rabbit_config['ExchangeType'],
durable=cls.rabbit_config['Reliable'],
auto_delete=False,
internal=False,
arguments=None)
channel.queue_declare(queue=cls.rabbit_config['QueueName'],
durable=True)
channel.queue_bind(queue=cls.rabbit_config['QueueName'],
exchange=cls.rabbit_config['Exchange'],
routing_key=cls.rabbit_config['RoutingKey'])
channel.basic_qos(prefetch_count=1)
channel.basic_consume(cls.on_request, queue=cls.rabbit_config['QueueName'])
LOGGER.info(' [*] Waiting for messages. To exit press CTRL+C')
channel.start_consuming()
def _request_info(cls, ch, method, props, body):
LOGGER.debug("Channel: {0!r}".format(ch))
LOGGER.debug("Method: {0!r}".format(method))
LOGGER.debug("Properties: {0!r}".format(props))
LOGGER.debug("Properties: {0!r}".format(body))
def response_text(cls, ch, method, props, response):
basic_properties = pika.BasicProperties(content_type='text/plain',
delivery_mode=props.delivery_mode,
correlation_id=props.correlation_id)
ch.basic_publish(exchange='', # Default Exchange
routing_key=props.reply_to,
properties=basic_properties,
body=str(response))
ch.basic_ack(delivery_tag = method.delivery_tag)
def run(cls):
cls._setup_logging()
cls._setup_rabbitmq()
def stop(cls):
# raise NotImplementedError()
pass
```
#### File: open_ocr/workers/decode_ocr.py
```python
import json
import sys
sys.path.append('..')
from rpc_server_base import RpcServer
from rpc_server_base import LOGGER
RABBIT_CONFIG = {
'AmqpURI': 'amqp://admin:Phaish9ohbaidei6oole@localhost:5672/',
'Exchange': 'open-ocr-exchange',
'ExchangeType': 'direct',
'QueueName': 'decode-ocr',
'RoutingKey': 'decode-ocr',
'Reliable': True
}
LOGFILE = 'decode_ocr.log'
class OcrRpcWorker(RpcServer):
def __init__(cls, rabbit_config, logfile=None):
super(OcrRpcWorker, cls).__init__(rabbit_config, logfile)
def on_request(cls, ch, method, props, body):
LOGGER.info("Handling request...")
cls._request_info(ch, method, props, body)
response = 'decode-ocr\n'
json_data = json.loads(body)
for k,v in json_data.items():
if k != 'img_bytes':
response += "{0} : {1}\n".format(k, v)
else:
response += "{0} : {1}\n".format(k, len(v))
cls.response_text(ch, method, props, response)
def main():
ocr_rpc_orker = OcrRpcWorker(RABBIT_CONFIG, LOGFILE)
try:
ocr_rpc_orker.run()
except KeyboardInterrupt:
ocr_rpc_orker.stop()
if __name__ == '__main__':
main()
``` |
{
"source": "jpnewman/rental_market",
"score": 3
} |
#### File: app/agencies/models.py
```python
from app import db
from app.mixins import CRUDMixin
class Agency(CRUDMixin, db.Model):
__tablename__ = 'agencies'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.Text)
address = db.Column(db.Text)
phone_number = db.Column(db.Text)
email = db.Column(db.Text)
# agents = db.relationship('Agent', back_populates='agency_id',
# lazy='select')
agents = db.relationship('Agent', back_populates='agency',
lazy='select')
def __init__(self,
name=None,
address=None,
phone_number=None,
email=None):
self.name = name
self.address = address
self.phone_number = phone_number
self.email = email
def __repr__(self):
return '%s' % (self.name)
``` |
{
"source": "jpnewman/zabbix-scripts",
"score": 2
} |
#### File: zabbix-scripts/sync-zabbix-config/sync-zabbix-config.py
```python
import argparse
import logging
import inspect
import json
import sys
import log
import os
import re
from zabbix.api import ZabbixAPI
from zabbix_objects import objects as zo
from collections import OrderedDict
# shorten, refer, extend
DEFAULT_OUTPUT = 'extend'
DEFAULT_QUARY_OUTPUT = 'extend'
SCRIPT_DIR = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
DEBUG = False
SKIP_ERRORS = False
# Based on code from: http://stackoverflow.com/questions/27838319/python-delete-all-specific-keys-in-json
def remove_dict_item_by_keys(d, key_pattens=[]):
"""Remove items from dict by keys."""
if not key_pattens:
return d
combined_pattern = "(" + ")|(".join(key_pattens) + ")"
return remove_dict_item_by_key_patten(d, combined_pattern)
def remove_dict_item_by_key_patten(d, key_patten):
"""Returns items from dict that don't match key regex patten."""
if not isinstance(d, (dict, list)):
return d
if isinstance(d, list):
return [remove_dict_item_by_key_patten(v, key_patten) for v in d]
return {k: remove_dict_item_by_key_patten(v, key_patten) for k, v in d.items()
if not re.match(key_patten, k)}
# http://stackoverflow.com/questions/27973988/python-how-to-remove-all-empty-fields-in-a-nested-dict/35263074
def remove_empty_dict_items(d):
"""Returns items from dict without empty values."""
if not isinstance(d, (dict, list)):
return d
if isinstance(d, list):
return [v for v in (remove_empty_dict_items(v) for v in d) if v]
return {k: v for k, v in ((k, remove_empty_dict_items(v)) for k, v in d.items()) if v}
def _parse_args():
"""Parse Command Arguments."""
global DEBUG
global SKIP_ERRORS
desc = 'Python zabbix api'
parser = argparse.ArgumentParser(description=desc)
parser.add_argument('action',
choices=['export', 'import'],
help='Sync Action')
parser.add_argument('server',
help='Zabbix Server')
parser.add_argument('-u', '--user',
default='Admin',
help='Zabbix Username')
parser.add_argument('-p', '--password',
default='<PASSWORD>',
help='Zabbix Password')
parser.add_argument('-o', '--objects',
nargs='+',
type=str,
default=[],
help='Sync Objects')
parser.add_argument('-a', '--data-path',
default=os.path.join(SCRIPT_DIR, '_data'),
help='Archive Data Path')
parser.add_argument('-q', '--exclude-queries',
action='store_true',
default=False,
help='Exclude Get Queries')
parser.add_argument('-e', '--exclude-empty-objects',
action='store_true',
default=False,
help='Exclude Empty Objects')
parser.add_argument('-l', '--log-level',
default='INFO',
help='Log Level')
parser.add_argument('-s', '--skip-errors',
action='store_true',
default=False,
help="Skip Errors")
parser.add_argument('--debug',
action='store_true',
default=False,
help='Debug output')
parser.add_argument('-d', '--dry-run',
action='store_true',
default=False,
help='Dry-run. No action taken')
args = parser.parse_args()
print("{0} Zabbix server '{1}' objects...".format(args.action.title(), args.server))
args.data_path = os.path.abspath(args.data_path)
print("Data Path: {0}".format(args.data_path))
if len(args.objects) > 0:
if args.action.lower() == 'export':
print('Exporting objects: -')
elif args.action.lower() == 'import':
print('Importing objects: -')
else:
print('Syncing objects: -')
for object_name in args.objects:
if object_name in zo.ZABBIX_OBJECTS.keys():
print(" {0}".format(object_name))
else:
log.error("'{0}' is not a known Zabbix object!".format(object_name))
raise
else:
args.objects = zo.ZABBIX_OBJECTS.keys()
DEBUG = args.debug
SKIP_ERRORS = args.skip_errors
return args
def export_object(zapi,
object_name,
output_file,
exclude_empty_objects=False,
params={
'output': DEFAULT_OUTPUT
}):
"""Export Zabbix Object."""
try:
objects = zapi.do_request(object_name + '.get', params)
except:
log.error("Get Failed!")
log.debug(sys.exc_info(),
print_data_type=False,
skip_errors=SKIP_ERRORS)
return
if not objects['result'] and exclude_empty_objects:
print("{0} (EMPTY)".format(object_name.title()))
else:
print("{0}: -".format(object_name.title()))
for object_data in objects['result']:
if 'name' in object_data:
print(" {0}".format(object_data['name']))
with open(output_file, 'w') as outfile:
json.dump(objects, outfile, indent=4, sort_keys=True)
def export_objects(zapi,
object_names,
data_path,
exclude_queries=False,
exclude_empty_objects=False,
params={
'output': DEFAULT_OUTPUT
}):
"""Export Zabbix Objects."""
for object_name in object_names:
params = {'output': DEFAULT_OUTPUT}
if not exclude_queries:
if 'get_query' in zo.ZABBIX_OBJECTS[object_name]:
for query in zo.ZABBIX_OBJECTS[object_name]['get_query']:
params[query] = DEFAULT_QUARY_OUTPUT
output_file = os.path.join(data_path, object_name + '.json')
export_object(zapi, object_name, output_file, exclude_empty_objects, params)
def create_object(zapi,
object_name,
object_data):
"""Create Single Object."""
# Remove keys with empty values
object_data = {k: v for k, v in object_data.items() if v}
# Cannot create items, so remove them.
remove_keys = []
if 'exclude_create_keys' in zo.ZABBIX_OBJECTS[object_name]:
remove_keys = zo.ZABBIX_OBJECTS[object_name]['exclude_update_keys']
objectid_key = get_object_id_key(object_name)
remove_keys.append(objectid_key)
# object_data = remove_empty_dict_items(object_data)
# object_data = remove_dict_item_by_keys(object_data, remove_keys)
# print(json.dumps(object_data))
try:
return zapi.do_request(object_name + '.create', object_data)
except:
log.error("Create Failed!")
log.debug(sys.exc_info(),
print_data_type=False,
skip_errors=SKIP_ERRORS)
return {}
def update_object(zapi,
object_name,
object_data):
"""Update Single Object."""
# Cannot update items, so remove them.
remove_keys = []
if 'exclude_update_keys' in zo.ZABBIX_OBJECTS[object_name]:
remove_keys = zo.ZABBIX_OBJECTS[object_name]['exclude_update_keys']
object_data = remove_empty_dict_items(object_data)
object_data = remove_dict_item_by_keys(object_data, remove_keys)
try:
return zapi.do_request(object_name + '.update', object_data)
except:
log.error("Update Failed!")
log.debug(sys.exc_info(),
print_data_type=False,
skip_errors=SKIP_ERRORS)
return {}
def get_object_id_key(object_name):
"""Get Object Id Key."""
object_id_key = object_name + 'id'
if 'id' in zo.ZABBIX_OBJECTS[object_name]:
object_id_key = zo.ZABBIX_OBJECTS[object_name]['id']
return object_id_key
def get_object_ids(zapi, object_name):
"""Get Object Ids"""
current_objects_ids = []
if not should_import(object_name):
return current_objects_ids
objects = zapi.do_request(object_name + '.get', {'output': 'refer'})
object_id_key = get_object_id_key(object_name)
for object_data in objects['result']:
current_objects_ids.append(int(object_data[object_id_key]))
return current_objects_ids
def should_import(object_name):
"""Should Import."""
import_obj = True
if 'import' in zo.ZABBIX_OBJECTS[object_name]:
import_obj = zo.ZABBIX_OBJECTS[object_name]['import']
return import_obj
def import_object(zapi,
object_name,
input_file,
exclude_empty_objects=False):
"""Import Zabbix Object."""
sys.stdout.write(object_name)
if not os.path.isfile(input_file):
print(' (FILE NOT FOUND)')
return
if not should_import(object_name):
print(' (SKIPPED)')
return
print('')
object_id_key = get_object_id_key(object_name)
current_object_ids = get_object_ids(zapi, object_name)
with open(input_file) as data_file:
data = json.load(data_file, object_pairs_hook=OrderedDict)
for object_data in data['result']:
if object_id_key in object_data:
if int(object_data[object_id_key]) in current_object_ids:
if 'name' in object_data:
print(" {0} ({1})".format(object_data['name'],
log.colorText('UPDATING', log.CYAN)))
update_object(zapi, object_name, object_data)
else:
if 'name' in object_data:
print(" {0} ({1})".format(object_data['name'],
log.colorText('CREATING', log.GREEN)))
create_object(zapi, object_name, object_data)
def import_objects(zapi,
object_names,
data_path,
exclude_empty_objects=False):
""""Import Zabbix Obejcts."""
for object_name in object_names:
input_file = os.path.join(data_path, object_name + '.json')
import_object(zapi, object_name, input_file, exclude_empty_objects)
def main():
"""Main."""
args = _parse_args()
if SKIP_ERRORS:
log.warn('Skipping Errors!')
# Create ZabbixAPI class instance
zapi = ZabbixAPI(url=args.server, user='Admin', password='<PASSWORD>')
print("Auth: {0}".format(zapi.auth))
# Logging
stream = logging.StreamHandler(sys.stdout)
stream.setLevel(logging.DEBUG)
logger = logging.getLogger('pyzabbix')
logger.addHandler(stream)
log_level = logging.getLevelName(args.log_level)
logger.setLevel(log_level)
if args.action.lower() == 'export':
if not os.path.exists(args.data_path):
os.makedirs(args.data_path)
export_objects(zapi,
args.objects,
args.data_path,
args.exclude_queries,
args.exclude_empty_objects)
elif args.action.lower() == 'import':
import_objects(zapi,
args.objects,
args.data_path,
args.exclude_empty_objects)
if __name__ == "__main__":
main()
``` |
{
"source": "jpniels/Bachelor",
"score": 3
} |
#### File: jpniels/Bachelor/apriori.py
```python
import numpy as np
import pandas as pd
#Look for new combinations
def newCombinations(oldCombos):
previousStep = np.unique(oldCombos.flatten())
for oldCombi in oldCombos:
maxCombi = max(oldCombi)
for value in previousStep:
if value > maxCombi:
res = tuple(oldCombi) + (value,)
yield res
#Apriori algorithm calculating the support of the itemsets
def apriori(df, minSupport):
values = df.values
index = np.arange(values.shape[1])
support = (np.sum(values, axis=0) / float(values.shape[0]))
supportDict = {1: support[support >= minSupport]}
valuesetDict = {1: index[support >= minSupport].reshape(-1, 1)}
maxValueset = 1
while maxValueset:
newMaxValueset = maxValueset + 1
combin = newCombinations(valuesetDict[maxValueset])
frequentValues = []
frequentValuesSupport = []
for c in combin:
combined = values[:, c].all(axis=1)
support = combined.sum() / len(values)
if support >= minSupport:
frequentValues.append(c)
frequentValuesSupport.append(support)
if frequentValues:
valuesetDict[newMaxValueset] = np.array(frequentValues)
supportDict[newMaxValueset] = np.array(frequentValuesSupport)
maxValueset = newMaxValueset
else:
maxValueset = 0
resultsDataFrame = concatSets(supportDict, valuesetDict)
return resultsDataFrame
#Concat the support and valueset into dataframe
def concatSets(supportSet,valueSet):
allResults = []
for k in sorted(valueSet):
support = pd.Series(supportSet[k])
valuesets = pd.Series([i for i in valueSet[k]])
result = pd.concat((support, valuesets), axis=1)
allResults.append(result)
supportDf = pd.concat(allResults)
supportDf.columns = ['support', 'itemsets']
supportDf = supportDf.reset_index(drop=True)
return supportDf
#Calculate the confidence for all values
def allConfidence(df, min_confidence):
df2 = df.loc[df['itemsets'].str.len() > 1] #df with 2 values
df3 = df.loc[df['itemsets'].str.len() <= 1] #df with less than 2 values
#empty arrays for filling up
ante = []
conse = []
conf = []
for index, row in df2.iterrows(): #going through each element that contains 2 values
for index, row2 in df3.iterrows(): #go though each element that contains less than 2 values
if (row['itemsets']==row2['itemsets']).any() == True: #check if a value is a part of the other
confvalue = row['support']/row2['support'] #calculate confidence
if confvalue >= min_confidence: #fill arrays if confidence is above min_confidence
ante.append(row2['itemsets'])
conse.append(row['itemsets'])
conf.append(confvalue)
confDf = pd.DataFrame(list(zip(ante, conse, conf)),columns=['antecedants','consequents', 'confidence']) #create dataframe with values
return confDf
#Calculate the lift for all values
def allLift(df, min_lift):
df2 = df.loc[df['itemsets'].str.len() > 1]
df3 = df.loc[df['itemsets'].str.len() <= 1]
#empty arrays for filling up
ante = []
conse = []
lift = []
for index, row in df2.iterrows():
for index, row2 in df3.iterrows():
if (row['itemsets']==row2['itemsets']).any() == True:
for index, row3 in df3.iterrows():
testingvalue = np.append(row2['itemsets'], (row3['itemsets']))
if(np.sort(testingvalue) == row['itemsets']).all() == True:
liftvalue = row['support']/(row2['support']*row3['support'])
if liftvalue >= min_lift:
ante.append(row2['itemsets'])
conse.append(row3['itemsets'])
lift.append(liftvalue)
liftDf = pd.DataFrame(list(zip(ante, conse, lift)),columns=['antecedants','consequents', 'lift'])
return liftDf
#Calculate the conviction for all calues
def allConviction(supp, conf):
conviction = []
tempConf = conf
for i in range(0,len(supp)):
for j in range(0, len(supp['itemsets'][i])):
for ii in range(0, len(conf)):
for jj in range(0, len(conf['consequents'][ii])):
if supp['itemsets'][i][j] != conf['antecedants'][ii][0] and len(supp['itemsets'][i]) <=1:
if supp['itemsets'][i][j] == conf['consequents'][ii][jj]:
conviction.append((1-supp['support'][i])/(1-conf['confidence'][ii]))
conf.drop([ii])
supp.drop([i])
tempConf['conviction'] = conviction
return tempConf
```
#### File: jpniels/Bachelor/gui.py
```python
import sys, random
from PyQt5.QtWidgets import *
from PyQt5.QtGui import *
from PyQt5.QtCore import *
from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.figure import Figure
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
import GetFromJson
import PandasModel
#Main Window
class mainWindow(QMainWindow):
#Application Stylesheet
def mainStyle(self):
self.setStyleSheet("""
background-color: #2A3036;
color: #FFF;
""")
def __init__(self):
super().__init__(parent=None)
self.mainStyle()
self.setGeometry(20, 20, 800, 600)
self.app_widget = App()
self.setCentralWidget(self.app_widget)
self.setWindowTitle('PyQAR Project')
#Global Menu
mainMenu = self.menuBar()
fileMenu = mainMenu.addMenu('File')
editMenu = mainMenu.addMenu('Edit')
viewMenu = mainMenu.addMenu('View')
toolsMenu = mainMenu.addMenu('Tools')
helpMenu = mainMenu.addMenu('Help')
#File Menu
openFileButton = QAction('Open File', self)
openFileButton.setShortcut('Ctrl+O')
openFileButton.triggered.connect(self.openFile)
fileMenu.addAction(openFileButton)
exitButton = QAction('Exit', self)
exitButton.setShortcut('Ctrl+Q')
exitButton.triggered.connect(self.close)
fileMenu.addAction(exitButton)
#Edit Menu
undoButton = QAction('Undo', self)
undoButton.setShortcut('Ctrl+Z')
editMenu.addAction(undoButton)
redoButton = QAction('Redo', self)
redoButton.setShortcut('Ctrl+Y')
editMenu.addAction(redoButton)
#View Menu
viewQAR = QAction('View Association Rules', self, checkable=True)
viewQAR.setChecked(True)
viewQAR.triggered.connect(self.toggleQAR)
viewMenu.addAction(viewQAR)
viewPlot = QAction('View Plot', self, checkable=True)
viewPlot.setChecked(True)
viewPlot.triggered.connect(self.togglePlot)
viewMenu.addAction(viewPlot)
#Tools Menu
globalSettingsButton = QAction('Global Settings', self)
toolsMenu.addAction(globalSettingsButton)
#Help Menu
documentationButton = QAction('Documentation', self )
documentationButton.triggered.connect(self.doclink)
helpMenu.addAction(documentationButton)
aboutButton = QAction('About', self)
aboutButton.triggered.connect(self.about)
helpMenu.addAction(aboutButton)
#About Function
def about(self):
QMessageBox.information(self, "About", "Version: 1.0.0.0.0.0.0.0.1 \n Program made by: \n \n <NAME> \n <NAME> \n ")
#Open File Function
def openFile(self):
options = QFileDialog.Options()
options |= QFileDialog.DontUseNativeDialog
fileName, _ = QFileDialog.getOpenFileName(self,"QFileDialog.getOpenFileName()", "","All Files (*);;Python Files (*.py)", options=options)
if fileName:
GetFromJson.read_file_path(fileName)
#Documentation Function
def doclink(self):
QDesktopServices.openUrl(QUrl('https://github.com/jpniels/Bachelor'))
#Settings Function
def globalSettings(self):
print('hej')
#Global CloseEvent function
def closeEvent(self, event):
reply = QMessageBox.question(self, 'Quit Dialog',
"\n Are you sure to quit?", QMessageBox.Yes |
QMessageBox.Cancel, QMessageBox.Cancel)
if reply == QMessageBox.Yes:
event.accept()
else:
event.ignore()
def toggleQAR(self, state):
if state:
self.app_widget.supportbutton.show()
self.app_widget.allbutton.show()
self.app_widget.liftbutton.show()
self.app_widget.confidencebutton.show()
self.app_widget.tableWidget.show()
else:
self.app_widget.supportbutton.hide()
self.app_widget.allbutton.hide()
self.app_widget.liftbutton.hide()
self.app_widget.confidencebutton.hide()
self.app_widget.tableWidget.hide()
def togglePlot(self, state):
if state:
self.app_widget.canvas.show()
else:
self.app_widget.canvas.hide()
#Central widget within mainWindow
class App(QWidget):
#Application Stylesheet
def appStyle(self):
self.setStyleSheet("""
.QWidget {
background-color: #2A3036;
}
.QComboBox, .QLineEdit, .QSpinBox, .QDoubleSpinBox{
background-color: #434C55;
color: #fff;
height: 30px;
selection-color: #434C55;
selection-background-color: #FFB36C;
}
.QTableView {
selection-color: #434C55;
selection-background-color: #FFB36C;
border: none;
width: 100%;
}
.QRadioButton {
color: #fff;
}
.QRadioButton::indicator::unchecked{
border: 1px solid #5C656E;
background-color: #434C55;
height: 13px;
}
.QRadioButton::indicator::checked{
border: 1px solid #434C55;
background-color: #FFB36C;
height: 13px;
}
.QLabel {
color: darkgrey;
}
""")
#Global initialization
def __init__(self):
super().__init__()
self.initUI()
self.appStyle()
def initUI(self):
#Plot Styling
plt.style.use('seaborn-pastel')
plt.rcParams['font.monospace'] = 'Ubuntu Mono'
plt.rcParams['font.size'] = 10
plt.rcParams['xtick.color'] = '#96A391'
plt.rcParams['ytick.color'] = '#96A391'
plt.rcParams['axes.labelsize'] = 10
plt.rcParams['axes.labelcolor'] = 'darkgrey'
plt.rcParams['axes.labelweight'] = 'normal'
plt.rcParams['xtick.labelsize'] = 10
plt.rcParams['ytick.labelsize'] = 10
plt.rcParams['legend.fontsize'] = 10
plt.rcParams['figure.titlesize'] = 12
plt.rcParams['figure.facecolor'] = '#2A3036'
plt.rcParams['axes.edgecolor'] = '#96A391'
plt.rcParams['axes.linewidth'] = 1
plt.rcParams['axes.facecolor'] = '#2A3036'
plt.rcParams['axes.grid'] = True
plt.rcParams['grid.color'] = '#343B43'
plt.rcParams['text.color'] = 'darkgrey'
plt.xticks(rotation=90)
#Grid/layout handling
l = QGridLayout(self)
subhorizontallayout = QHBoxLayout()
sublayout = QVBoxLayout()
sublayout2 = QVBoxLayout()
sublayout3 = QVBoxLayout()
sublayout.setAlignment(Qt.AlignTop)
sublayout2.setAlignment(Qt.AlignTop)
sublayout3.setAlignment(Qt.AlignTop)
subsublayout1 = QHBoxLayout()
subsublayout2 = QVBoxLayout()
subsublayout3 = QVBoxLayout()
subsublayout2.setAlignment(Qt.AlignTop)
subsublayout3.setAlignment(Qt.AlignTop)
self.figure = plt.figure(figsize=(5,7))
self.canvas = FigureCanvas(self.figure)
self.canvas.setMinimumWidth(800)
self.canvas.setMaximumHeight(800)
sublayout2.addWidget(self.canvas)
self.threshold = QDoubleSpinBox(self)
self.threshold.setValue(0.1)
self.threshold.valueChanged.connect(self.plot)
self.threshold.setFixedWidth(250)
self.threshold.setSuffix(' Threshold')
self.threshold.setRange(0.1, 5)
self.threshold.setSingleStep(0.1)
subsublayout2.addWidget(self.threshold)
#Support Button
self.supportbutton = QRadioButton("Calculate Support", self)
self.supportbutton.toggled.connect(self.aprioritoggled)
subsublayout2.addWidget(self.supportbutton)
#Conviction Button
self.confidencebutton = QRadioButton("Calculate Confidence", self)
self.confidencebutton.toggled.connect(self.aprioritoggled)
subsublayout2.addWidget(self.confidencebutton)
#Lift Button
self.liftbutton = QRadioButton("Calculate Lift", self)
self.liftbutton.toggled.connect(self.aprioritoggled)
subsublayout2.addWidget(self.liftbutton)
#Conviction Button
self.convictionbutton = QRadioButton("Calculate Conviction", self)
self.convictionbutton.toggled.connect(self.aprioritoggled)
subsublayout2.addWidget(self.convictionbutton)
#Lift Button
self.allbutton = QRadioButton("Calculate All", self)
self.allbutton.toggled.connect(self.aprioritoggled)
subsublayout2.addWidget(self.allbutton)
####################################################################################################################
### Grid 1
####################################################################################################################
#Room Box 1
self.roomBoxlabel = QLabel("Select Room:")
self.roomBox = QComboBox(self)
self.roomBox.addItem('Room')
self.roomBox.model().item(0).setEnabled(False)
for element in GetFromJson.getRooms():
self.roomBox.addItem(element)
self.roomBox.currentTextChanged.connect(self.roomBoxChanged)
self.roomBox.setFixedWidth(250)
sublayout.addWidget(self.roomBoxlabel)
sublayout.addWidget(self.roomBox)
#Media Box 1
self.mediaBoxlabel = QLabel("Select Media:")
self.mediaBox = QComboBox(self)
self.mediaBox.setEnabled(False)
self.mediaBox.addItem('Media')
self.mediaBox.model().item(0).setEnabled(False)
self.mediaBox.currentTextChanged.connect(self.plot)
self.mediaBox.setFixedWidth(250)
sublayout.addWidget(self.mediaBoxlabel)
sublayout.addWidget(self.mediaBox)
#Outliers Radiobutton 1
self.outlierBtn = QRadioButton("Remove Outliers", self)
self.outlierBtn.setAutoExclusive(False)
self.outlierBtn.toggled.connect(self.outlierstoggled)
sublayout.addWidget(self.outlierBtn)
#Outliers Selection Box
self.outliermethod = QComboBox(self)
self.outliermethod.hide()
self.outliermethod.addItem('Standard Deviation')
self.outliermethod.addItem('Interquartile Range')
self.outliermethod.currentTextChanged.connect(self.plot)
self.outliermethod.setFixedWidth(250)
sublayout.addWidget(self.outliermethod)
#Interolate Radiobutton 1
self.interpolateBtn = QRadioButton("Interpolate data", self)
self.interpolateBtn.setAutoExclusive(False)
self.interpolateBtn.toggled.connect(self.interpolatetoggled)
sublayout.addWidget(self.interpolateBtn)
#Interpolate Selection Box
self.interpolateBox = QComboBox(self)
self.interpolateBox.hide()
self.interpolateBox.addItem('5Min')
self.interpolateBox.addItem('15Min')
self.interpolateBox.addItem('30Min')
self.interpolateBox.addItem('45Min')
self.interpolateBox.addItem('1H')
self.interpolateBox.addItem('2H')
self.interpolateBox.currentTextChanged.connect(self.plot)
self.interpolateBox.setFixedWidth(250)
sublayout.addWidget(self.interpolateBox)
#Intervals Radiobutton 1
self.intervalsBtn = QRadioButton("Use intervals", self)
self.intervalsBtn.setAutoExclusive(False)
self.intervalsBtn.toggled.connect(self.intervalstoggled)
sublayout.addWidget(self.intervalsBtn)
#Intervals spinbox 1
self.spinbox = QSpinBox(self)
self.spinbox.setValue(1)
self.spinbox.valueChanged.connect(self.plot)
self.spinbox.hide()
self.spinbox.setFixedWidth(250)
self.spinbox.setSuffix(' Intervals')
self.spinbox.setRange(1, 25)
sublayout.addWidget(self.spinbox)
#Time Frequency Radiobutton
self.freqButton = QRadioButton("Set Time Frequency", self)
self.freqButton.setAutoExclusive(False)
self.freqButton.toggled.connect(self.frequencytoggled)
sublayout.addWidget(self.freqButton)
#Time Frequency Box
self.timefreqBox = QComboBox(self)
self.timefreqBox.hide()
self.timefreqBox.addItem('30Min')
self.timefreqBox.addItem('1H')
self.timefreqBox.addItem('2H')
self.timefreqBox.addItem('12H')
self.timefreqBox.addItem('1D')
self.timefreqBox.addItem('1W')
self.timefreqBox.addItem('2W')
self.timefreqBox.addItem('1M')
self.timefreqBox.currentTextChanged.connect(self.plot)
self.timefreqBox.setFixedWidth(250)
sublayout.addWidget(self.timefreqBox)
#Calendar From Widget
self.dateTimelabel = QLabel("Select Start Date: ")
self.calendar = QCalendarWidget(self)
format = QTextCharFormat()
format.setBackground(QColor('#434C55'))
weekendformat = QTextCharFormat()
weekendformat.setForeground(QColor('#fff'))
self.calendar.setHeaderTextFormat(format)
self.calendar.setStyleSheet('selection-background-color: #FFB36C; selection-color: #434C55;')
self.calendar.setWeekdayTextFormat(Qt.Saturday, weekendformat)
self.calendar.setWeekdayTextFormat(Qt.Sunday, weekendformat)
self.calendar.setFixedWidth(250)
self.calendar.setMaximumHeight(220)
sublayout.addWidget(self.dateTimelabel)
sublayout.addWidget(self.calendar)
#Date time From widget for converting to ms - nonvisible
self.datetime = QDateTimeEdit()
self.datetime.setCalendarPopup(True)
self.datetime.setCalendarWidget(self.calendar)
self.datetime.dateTimeChanged.connect(self.plot)
self.datetime.setVisible(False)
sublayout.addStretch()
####################################################################################################################
### Grid 2
####################################################################################################################
#Room Box 2
self.roomBoxlabel2 = QLabel("Select Second Room:")
self.roomBox2 = QComboBox(self)
self.roomBox2.addItem('Room')
self.roomBox2.model().item(0).setEnabled(False)
for element in GetFromJson.getRooms():
self.roomBox2.addItem(element)
self.roomBox2.currentTextChanged.connect(self.roomBox2Changed)
self.roomBox2.setFixedWidth(250)
sublayout3.addWidget(self.roomBoxlabel2)
sublayout3.addWidget(self.roomBox2)
#Media Box 2
self.mediaBoxlabel2 = QLabel("Select Second Media:")
self.mediaBox2 = QComboBox(self)
self.mediaBox2.setEnabled(False)
self.mediaBox2.addItem('Media')
self.mediaBox2.model().item(0).setEnabled(False)
self.mediaBox2.currentTextChanged.connect(self.plot)
self.mediaBox2.setFixedWidth(250)
sublayout3.addWidget(self.mediaBoxlabel2)
sublayout3.addWidget(self.mediaBox2)
#Outliers Radiobutton 2
self.outlierBtn2 = QRadioButton("Remove Outliers", self)
self.outlierBtn2.setAutoExclusive(False)
self.outlierBtn2.toggled.connect(self.outlierstoggled2)
sublayout3.addWidget(self.outlierBtn2)
#Outliers Selection Box
self.outliermethod2 = QComboBox(self)
self.outliermethod2.hide()
self.outliermethod2.addItem('Standard Deviation', 1)
self.outliermethod2.addItem('Interquartile Range', 2)
self.outliermethod2.currentTextChanged.connect(self.plot)
self.outliermethod2.setFixedWidth(250)
sublayout3.addWidget(self.outliermethod2)
#Interpolate Radiobutton 2
self.interpolateBtn2 = QRadioButton("Interpolate data", self)
self.interpolateBtn2.setAutoExclusive(False)
self.interpolateBtn2.toggled.connect(self.interpolatetoggled2)
sublayout3.addWidget(self.interpolateBtn2)
#Interpolate Selection Box
self.interpolateBox2 = QComboBox(self)
self.interpolateBox2.hide()
self.interpolateBox2.addItem('5Min')
self.interpolateBox2.addItem('15Min')
self.interpolateBox2.addItem('30Min')
self.interpolateBox2.addItem('45Min')
self.interpolateBox2.addItem('1H')
self.interpolateBox2.addItem('2H')
self.interpolateBox2.currentTextChanged.connect(self.plot)
self.interpolateBox2.setFixedWidth(250)
sublayout3.addWidget(self.interpolateBox2)
#Intervals Radiobutton 2
self.intervalsBtn2 = QRadioButton("Use intervals", self)
self.intervalsBtn2.setAutoExclusive(False)
self.intervalsBtn2.toggled.connect(self.intervalstoggled2)
sublayout3.addWidget(self.intervalsBtn2)
#Intervals spinbox 2
self.spinbox2 = QSpinBox(self)
self.spinbox2.setValue(1)
self.spinbox2.valueChanged.connect(self.plot)
self.spinbox2.hide()
self.spinbox2.setFixedWidth(250)
self.spinbox2.setSuffix(' Intervals')
self.spinbox2.setRange(1, 25)
sublayout3.addWidget(self.spinbox2)
#Time Frequency Radiobutton
self.freqButton2 = QRadioButton("Set Time Frequency", self)
self.freqButton2.setAutoExclusive(False)
self.freqButton2.toggled.connect(self.frequencytoggled2)
sublayout3.addWidget(self.freqButton2)
#Time Frequency Box 2
self.timefreqBox2 = QComboBox(self)
self.timefreqBox2.hide()
self.timefreqBox2.addItem('30Min')
self.timefreqBox2.addItem('1H')
self.timefreqBox2.addItem('2H')
self.timefreqBox2.addItem('12H')
self.timefreqBox2.addItem('1D')
self.timefreqBox2.addItem('1W')
self.timefreqBox2.addItem('2W')
self.timefreqBox2.addItem('1M')
self.timefreqBox2.currentTextChanged.connect(self.plot)
self.timefreqBox2.setFixedWidth(250)
sublayout3.addWidget(self.timefreqBox2)
#Calendar To Widget
self.dateTimelabelto = QLabel("Select End Date: ")
self.calendarto = QCalendarWidget(self)
self.calendarto.setHeaderTextFormat(format)
self.calendarto.setStyleSheet('selection-background-color: #FFB36C; selection-color: #434C55;')
self.calendarto.setWeekdayTextFormat(Qt.Saturday, weekendformat)
self.calendarto.setWeekdayTextFormat(Qt.Sunday, weekendformat)
self.calendarto.setFixedWidth(250)
self.calendarto.setMaximumHeight(220)
sublayout3.addWidget(self.dateTimelabelto)
sublayout3.addWidget(self.calendarto)
#Date time From widget for converting to ms - nonvisible
self.datetimeto = QDateTimeEdit(QDate.currentDate())
self.datetimeto.setCalendarPopup(True)
self.datetimeto.setCalendarWidget(self.calendarto)
self.datetimeto.dateTimeChanged.connect(self.plot)
self.datetimeto.setVisible(False)
sublayout3.addStretch()
##########################################################################################################################
#Table Widget
self.tableWidget = QTableView()
self.header = self.tableWidget.horizontalHeader()
self.header.setStretchLastSection(True)
subsublayout3.addWidget(self.tableWidget)
#Add layouts to grid
subsublayout1.addLayout(subsublayout2)
subsublayout1.addLayout(subsublayout3)
sublayout2.addLayout(subsublayout1)
subhorizontallayout.addLayout(sublayout)
subhorizontallayout.addLayout(sublayout2)
subhorizontallayout.addLayout(sublayout3)
sizeable = QWidget()
sizeable.setLayout(subhorizontallayout)
l.addWidget(sizeable, 1, 1, 1, 1)
l.setAlignment(Qt.AlignCenter)
self.compute_initial_figure()
#When a room is selected get the medias and show them
def roomBoxChanged(self):
self.mediaBox.setEnabled(True)
self.mediaBox.clear()
medialist = []
for k, v in GetFromJson.getMedias(self.roomBox.currentText()).items():
if v not in medialist:
medialist.append(v)
self.mediaBox.addItems(medialist)
#Same as above for room2 selected
def roomBox2Changed(self):
self.mediaBox2.setEnabled(True)
self.mediaBox2.clear()
medialist2 = []
for k, v in GetFromJson.getMedias(self.roomBox2.currentText()).items():
if v not in medialist2:
medialist2.append(v)
self.mediaBox2.addItems(medialist2)
def outlierstoggled(self, state):
if state:
if self.mediaBox.currentText() != 'Media':
self.outliermethod.show()
self.plot()
else:
self.outlierBtn.setChecked(False)
QMessageBox.warning(self, "Error", "You must pick a room and media before using this function.")
else:
self.outliermethod.hide()
self.plot()
def interpolatetoggled(self, state):
if state:
if self.mediaBox.currentText() != 'Media':
self.interpolateBox.show()
self.plot()
else:
self.interpolateBtn.setChecked(False)
QMessageBox.warning(self, "Error", "You must pick a room and media before using this function.")
else:
self.interpolateBox.hide()
self.plot()
def intervalstoggled(self, state):
if state:
if self.mediaBox.currentText() != 'Media':
self.spinbox.show()
self.plot()
else:
self.intervalsBtn.setChecked(False)
QMessageBox.warning(self, "Error", "You must pick a room and media before using this function.")
else:
self.spinbox.hide()
self.plot()
def frequencytoggled(self, state):
if state:
if self.mediaBox.currentText() != 'Media':
self.timefreqBox.show()
self.plot()
else:
self.freqButton.setChecked(False)
QMessageBox.warning(self, "Error", "You must pick a room and media before using this function.")
else:
self.timefreqBox.hide()
self.plot()
def outlierstoggled2(self, state):
if state:
if self.mediaBox2.currentText() != 'Media':
self.outliermethod2.show()
self.plot()
else:
self.outlierBtn2.setChecked(False)
QMessageBox.warning(self, "Error", "You must pick a room and media before using this function.")
else:
self.outliermethod2.hide()
self.plot()
def interpolatetoggled2(self, state):
if state:
if self.mediaBox2.currentText() != 'Media':
self.interpolateBox2.show()
self.plot()
else:
self.interpolateBtn2.setChecked(False)
QMessageBox.warning(self, "Error", "You must pick a room and media before using this function.")
else:
self.interpolateBox2.hide()
self.plot()
def intervalstoggled2(self, state):
if state:
if self.mediaBox2.currentText() != 'Media':
self.spinbox2.show()
self.plot()
else:
self.intervalsBtn2.setChecked(False)
QMessageBox.warning(self, "Error", "You must pick a room and media before using this function.")
else:
self.spinbox2.hide()
self.plot()
def frequencytoggled2(self, state):
if state:
if self.mediaBox2.currentText() != 'Media':
self.timefreqBox2.show()
self.plot()
else:
self.freqButton2.setChecked(False)
QMessageBox.warning(self, "Error", "You must pick a room and media before using this function.")
else:
self.timefreqBox2.hide()
self.plot()
def aprioritoggled(self, state):
if state:
if self.mediaBox.currentText() != 'Media' or self.mediaBox2.currentText() != 'Media':
self.plot()
else:
QMessageBox.warning(self, "Error", "You must pick two rooms and medias before using this function.")
else:
self.plot()
#Dont mess with this shit, just the initial empty plot.. useless
def compute_initial_figure(self):
axes=self.figure.add_subplot(111)
axes.plot(1,1)
self.canvas.draw()
#Plotting the data selected
def plot(self):
axes=self.figure.add_subplot(111)
axes.cla()
test = GetFromJson.getMediaIndex(self.mediaBox.currentText(), self.roomBox.currentText())
df = GetFromJson.getDataframe(test)
df = GetFromJson.dataframeFromTime(df, self.datetime.dateTime().toMSecsSinceEpoch(), self.datetimeto.dateTime().toMSecsSinceEpoch())
if self.outlierBtn.isChecked() == True:
if self.outliermethod.currentText() == 'Standard Deviation':
df = GetFromJson.removeOutliersSD(df)
else:
df = GetFromJson.removeOutliersIQR(df)
if self.interpolateBtn.isChecked() == True:
df = GetFromJson.createInterpolation(df, self.interpolateBox.currentText())
if self.freqButton.isChecked() == True:
df = GetFromJson.getDataframeFreq(df, self.timefreqBox.currentText())
if self.intervalsBtn.isChecked() == True:
df = GetFromJson.setReadingIntervals(df, self.spinbox.value())
df['readings'] = df['readings'].astype(str)
axes.plot(df.index.values, df['readings'], 'r-', linewidth=1, linestyle='-', color='#E9B955')
self.canvas.draw()
test2 = GetFromJson.getMediaIndex(self.mediaBox2.currentText(), self.roomBox2.currentText())
df2 = GetFromJson.getDataframe(test2)
df2 = GetFromJson.dataframeFromTime(df2, self.datetime.dateTime().toMSecsSinceEpoch(), self.datetimeto.dateTime().toMSecsSinceEpoch())
if self.outlierBtn2.isChecked() == True:
if self.outliermethod2.currentText() == 'Standard Deviation':
df2 = GetFromJson.removeOutliersSD(df2)
else:
df2 = GetFromJson.removeOutliersIQR(df2)
if self.interpolateBtn2.isChecked() == True:
df2 = GetFromJson.createInterpolation(df2, self.interpolateBox2.currentText())
if self.freqButton2.isChecked() == True:
df2 = GetFromJson.getDataframeFreq(df2, self.timefreqBox2.currentText())
if self.intervalsBtn2.isChecked() == True:
df2 = GetFromJson.setReadingIntervals(df2, self.spinbox2.value())
df2['readings'] = df2['readings'].astype(str)
#Plot the graph
axes.plot(df2.index.values, df2['readings'], 'r-', linewidth=1, linestyle='-', color='#2D4CC5')
axes.set_title(self.mediaBox.currentText() + ' & ' + self.mediaBox2.currentText() + ' in rooms ' + self.roomBox.currentText() + ', ' + self.roomBox2.currentText())
axes.set_xlabel('Time')
axes.set_ylabel('Readings')
#Fill table testing!
if self.liftbutton.isChecked() == True:
df3 = GetFromJson.getBooleanAssociationRules(df, df2)
df3 = GetFromJson.ap.apriori(df3, 0.1)
df3 = GetFromJson.ap.allLift(df3, 0)
model = PandasModel.PandasModel(df3)
self.tableWidget.setModel(model)
if self.supportbutton.isChecked() == True:
df = GetFromJson.getBooleanAssociationRules(df, df2)
df = GetFromJson.ap.apriori(df, self.threshold.value())
model = PandasModel.PandasModel(df)
self.tableWidget.setModel(model)
if self.confidencebutton.isChecked() == True:
df3 = GetFromJson.getBooleanAssociationRules(df, df2)
df3 = GetFromJson.ap.apriori(df3, 0)
df3 = GetFromJson.ap.allConfidence(df3, self.threshold.value())
model = PandasModel.PandasModel(df3)
self.tableWidget.setModel(model)
if self.convictionbutton.isChecked() == True:
df3 = GetFromJson.getBooleanAssociationRules(df, df2)
supp = GetFromJson.ap.apriori(df3, 0)
conf = GetFromJson.ap.allConfidence(supp, self.threshold.value())
df3 = GetFromJson.ap.allConviction(supp, conf)
model = PandasModel.PandasModel(df3)
self.tableWidget.setModel(model)
self.canvas.draw()
class LoginWindow(QMainWindow):
#Login Stylesheet
def loginStyle(self):
self.setStyleSheet("""
.QPushButton {
background-color: #1AB186;
height: 25px;
}
.QLineEdit {
background-color: #fff;
height: 25px;
}
""")
#Login Window
def __init__(self):
super().__init__()
self.setWindowTitle('PyQAR Login')
self.mainWindow = mainWindow()
centralWidget = QWidget()
self.setFixedSize(320,200)
self.setCentralWidget(centralWidget)
gridLayout = QGridLayout()
centralWidget.setLayout(gridLayout)
self.loginStyle()
#Login Image
label = QLabel(self)
label.resize(275, 73)
pixmap = QPixmap('assets/SDU.png')
pixmap = pixmap.scaled(179, 50)
label.setPixmap(pixmap)
#Login Form
self.uName = QLineEdit(self)
self.uName.setPlaceholderText('Username')
self.pWord = QLineEdit(self)
self.pWord.setPlaceholderText('Password')
self.pWord.setEchoMode(QLineEdit.Password)
loginBtn = QPushButton('Login', self)
loginBtn.clicked.connect(self.loginHandler)
layout = QVBoxLayout()
layout.addWidget(self.uName)
layout.addWidget(self.pWord)
layout.addWidget(loginBtn)
#Add elements to Grid Layout
gridLayout.addWidget(label, 0, 0, Qt.AlignCenter)
gridLayout.addItem(layout, 1, 0, Qt.AlignCenter)
#Handle Login Button
def loginHandler(self):
if (self.uName.text() == 'foo' and
self.pWord.text() == 'bar'):
self.mainWindow.show()
self.close()
else:
QMessageBox.warning(
self, 'Error', 'Bad username or password')
if __name__ == '__main__':
app = QApplication(sys.argv)
app.setStyle(QStyleFactory.create('Fusion'))
ex = LoginWindow()
ex.show()
sys.exit(app.exec_())
```
#### File: jpniels/Bachelor/test_sample.py
```python
import pytest
from time import sleep
import gui
from PyQt5.QtCore import *
def testgui(qtbot):
window = gui.App()
qtbot.addWidget(window)
window.show()
assert window.isVisible()
```
#### File: jpniels/Bachelor/tests.py
```python
import unittest
import sys
import gui
import GetFromJson
import apriori
import pandas as pd
import numpy as np
import numpy.testing as npt
from gui import mainWindow
from time import sleep
from PyQt5.QtCore import *
from PyQt5.QtWidgets import *
class TestStringMethods(unittest.TestCase):
def setUp(self):
#Create dataframes for testing
df = pd.DataFrame({'readings': [20,21, 20, 21, 22, 21, 24, 24, 22, 21], 'timestamp':[pd.to_datetime('2017-04-01'),
pd.to_datetime('2017-04-02'), pd.to_datetime('2017-04-03'),pd.to_datetime('2017-04-04'),
pd.to_datetime('2017-04-05'),pd.to_datetime('2017-04-06'),pd.to_datetime('2017-04-07'),
pd.to_datetime('2017-04-08'),pd.to_datetime('2017-04-09'),pd.to_datetime('2017-04-10')]})
df = df.set_index('timestamp')
df = GetFromJson.setReadingIntervals(df, 5)
df2 = pd.DataFrame({'readings': [250, 400, 330, 400, 290, 500, 700, 600, 300, 275], 'timestamp':[pd.to_datetime('2017-04-01'),
pd.to_datetime('2017-04-02'), pd.to_datetime('2017-04-03'),pd.to_datetime('2017-04-04'),
pd.to_datetime('2017-04-05'),pd.to_datetime('2017-04-06'),pd.to_datetime('2017-04-07'),
pd.to_datetime('2017-04-08'),pd.to_datetime('2017-04-09'),pd.to_datetime('2017-04-10')]})
df2 = df2.set_index('timestamp')
df2 = GetFromJson.setReadingIntervals(df2, 5)
self.df = GetFromJson.getBooleanAssociationRules(df, df2)
def testSupport(self):
df = apriori.apriori(self.df, 0.00001)
npt.assert_array_equal(df.iloc[8][0], [0.2])
def testConfidence(self):
df = apriori.apriori(self.df, 0.00001)
df = apriori.allConfidence(df, 0)
npt.assert_array_equal(df.iloc[0][2], [1.0])
def testLift(self):
df = apriori.apriori(self.df, 0.00001)
df = apriori.allLift(df, 0)
npt.assert_array_equal(df.iloc[0][2], [2.0])
if __name__ == '__main__':
suite = unittest.TestLoader().loadTestsFromTestCase(TestStringMethods)
unittest.TextTestRunner(verbosity=2).run(suite)
``` |
{
"source": "jpnm561/HAR-UP",
"score": 3
} |
#### File: HAR-UP/CameraOF_files/Decompressor.py
```python
import zipfile as zf
import os
import shutil
from createFolder import createFolder
from progressBar import progressBar
"""
----------------------------------------------------------------------------------------
Functions
----------------------------------------------------------------------------------------
"""
#A function that unzips folders froma a directory, into a given path
def UnzipFolders(directory, path, task):
createFolder(path)
try:
p = 0
progressBar(task,p,len(os.listdir(directory)))
for filen in os.listdir(directory):
zipf = zf.ZipFile(directory + '//' + filen)
zipf.extractall(path)
zipf.close()
p+=1
progressBar(task,p,len(os.listdir(directory)))
except:
print('--------The following direcory was not found: ' + directory)
#A function that removes temporal files creaeted during the process
def DeleteFolder(n_dir,
n_sub=[1,17],
n_act=[1,11],
n_trl=[1,3],
n_cam=[1,2]):
print('Deleating temporal files')
p = 0
q = (n_sub[1] + 1 - n_sub[0])*(n_act[1] + 1 - n_act[0])*(n_trl[1] + 1 - n_trl[0])*(n_cam[1] + 1 - n_cam[0])
progressBar('--Progress',p,q)
#Subjects
for i in range(n_sub[0],n_sub[1]+1):
sub = 'Subject' + str(i)
#Activities
for j in range(n_act[0],n_act[1]+1):
act = 'Activity' + str(j)
#Trials
for k in range(n_trl[0],n_trl[1]+1):
trl = 'Trial' + str(k)
gral = sub+'//'+act+'//'+trl+'//'
#Cameras
for l in range(n_cam[0],n_cam[1]+1):
path = n_dir + gral +sub+act+trl+ 'Camera' + str(l) + '_OF_temp'
try:
shutil.rmtree(path)
except:
print('An error ocurred while deleting: ' + path)
p+=1
progressBar('--Progress',p,q)
#a function that decompresses the folders
def Decompressor(o_dir,n_dir,
n_sub=[1,17],
n_act=[1,11],
n_trl=[1,3],
n_cam=[1,2]):
#Subjects
for i in range(n_sub[0],n_sub[1]+1):
sub = 'Subject' + str(i)
print('--%s' % (sub))
#Activities
for j in range(n_act[0],n_act[1]+1):
act = 'Activity' + str(j)
print('--S%s--%s' % (str(i),act))
#Trials
for k in range(n_trl[0],n_trl[1]+1):
trl = 'Trial' + str(k)
print('--S%s--A%s--%s' % (str(i),str(j),trl))
gral = sub+'//'+act+'//'+trl+'//'
#Cameras
for l in range(n_cam[0],n_cam[1]+1):
directory = o_dir + gral
path = n_dir + gral +sub+act+trl+ 'Camera' + str(l) + '_OF_temp'
print('----Unzipping outer folders')
UnzipFolders(directory, path, '------Camera'+str(l))
directory = n_dir + gral + sub+act+trl + 'Camera' + str(l) + '_OF_temp'
path = n_dir + gral + sub+act+trl + 'Camera' + str(l) + '_OF_UZ'
print('----Unzipping optical flow files')
UnzipFolders(directory, path, '------Camera'+str(l))
DeleteFolder(n_dir, n_sub, n_act, n_trl, n_cam)
"""
----------------------------------------------------------------------------------------
End of functions
----------------------------------------------------------------------------------------
"""
def main():
original_directory = ''
new_directory = ''
Decompressor(original_directory,new_directory)
print('End of task')
if __name__=="__main__":
main()
```
#### File: HAR-UP/DataBaseDownload/Decompressor.py
```python
import zipfile as zf
import os
from createFolder import createFolder
#A function that unzips folders in a direcory
def UnzipFolders(o_dir,n_dir,
n_sub=[1,17],
n_act=[1,11],
n_trl=[1,3],
n_cam=[1,2]):
#Subjects
for i in range(n_sub[0],n_sub[1]+1):
sub = 'Subject' + str(i)
print(sub + ':')
#Activities
for j in range(n_act[0],n_act[1]+1):
act = 'Activity' + str(j)
print('\t' + act + ':')
#Trials
for k in range(n_trl[0],n_trl[1]+1):
trl = 'Trial' + str(k)
print('\t\t' + trl + ':')
gral = sub+'//'+act+'//'+trl+'//'
#Cameras
for l in range(n_cam[0],n_cam[1]+1):
directory = o_dir + gral
path = n_dir + gral +sub+act+trl+ 'Camera' + str(l) + '_OF'
createFolder(path)
try:
for filen in os.listdir(directory):
zipf = zf.ZipFile(directory + '//' + filen)
zipf.extractall(path)
zipf.close()
except:
print('The following direcory was not found: ' + directory)
print('\t\t\t Unzipped:' + sub + act + trl)
#a function that decompresses the folders
def Decompressor(o_dir,n_dir,
n_sub=[1,17],
n_act=[1,11],
n_trl=[1,3],
n_cam=[1,2]):
#Subjects
for i in range(n_sub[0],n_sub[1]+1):
sub = 'Subject' + str(i)
print(sub + ':')
#Activities
for j in range(n_act[0],n_act[1]+1):
act = 'Activity' + str(j)
print('\t' + act + ':')
#Trials
for k in range(n_trl[0],n_trl[1]+1):
trl = 'Trial' + str(k)
print('\t\t' + trl + ':')
gral = sub+'//'+act+'//'+trl+'//'+sub+act+trl
#Cameras
for l in range(n_cam[0],n_cam[1]+1):
directory = o_dir + gral + 'Camera' + str(l) + '_OF'
path = n_dir + gral + 'Camera' + str(l) + '_OF_UZ'
createFolder(path)
try:
for filen in os.listdir(directory):
zipf = zf.ZipFile(directory + '//' + filen)
zipf.extractall(path)
zipf.close()
except:
print('The following direcory was not found: ' + directory)
print('\t\t\t Unzipped:' + sub + act + trl)
def main():
original_directory = ''
new_directory = ''
UnzipFolders(original_directory,new_directory)
Decompressor(new_directory,new_directory)
print('End of task')
if __name__=="__main__":
main()
```
#### File: HAR-UP/FeatureExtraction/createFolder.py
```python
import os
def createFolder(directory):
try:
if not os.path.exists(directory):
os.makedirs(directory)
except OSError:
print ('An error ocurred while creating direcory: "' + directory +'"')
```
#### File: HAR-UP/K-crossValidation/k-crossvalidation.py
```python
import pandas as pd
from sklearn.model_selection import KFold
def k_crossFiles(concept,
t_window = ['1&0.5','2&1','3&1.5'],
K=10):
for cncpt in concept:
print(cncpt)
for twnd in t_window:
print('---%s' % twnd)
path = '%s//%s//' % (cncpt, twnd)
training = pd.read_csv('%s//SelectedFTS_%s_%s.csv' % (path,twnd,cncpt))
header = []
for i in range(0,len(training.columns)):
header.append(training.columns[i])
training_set = training.values
kfold = KFold(K, True, 1)
# enumerate splits
i = 1
for train, test in kfold.split(training_set):
print('------Fold %s' % i)
wtr = open(path + 'SelectedFeatures_'+twnd+'_'+cncpt+'_train'+str(i)+'.csv', 'w')
wts = open(path + 'SelectedFeatures_'+twnd+'_'+cncpt+'_test'+str(i)+'.csv', 'w')
try:
bnd = True
for feature in header:
if bnd:
wtr.write(feature)
wts.write(feature)
bnd = False
else:
wtr.write(',' + feature)
wts.write(',' + feature)
wtr.write('\n')
wts.write('\n')
for j in range(0,training_set[train].shape[0]-1):
for k in range(0,training_set[train].shape[1]-1):
wtr.write(str(training_set[train][j][k]) + ',')
wtr.write(str(training_set[train][j][training_set[train].shape[1]-1]) + '\n')
for k in range(0,training_set[train].shape[1]-1):
wtr.write(str(training_set[train][training_set[train].shape[0]-1][k]) + ',')
wtr.write(str(training_set[train][training_set[train].shape[0]-1][training_set[train].shape[1]-1]))
for j in range(0,training_set[test].shape[0]-1):
for k in range(0,training_set[test].shape[1]-1):
wts.write(str(training_set[test][j][k]) + ',')
wts.write(str(training_set[test][j][training_set[test].shape[1]-1]) + '\n')
for k in range(0,training_set[test].shape[1]-1):
wts.write(str(training_set[test][training_set[test].shape[0]-1][k]) + ',')
wts.write(str(training_set[test][training_set[test].shape[0]-1][training_set[test].shape[1]-1]))
except Exception as e:
print('----Unexpected error ' + str(e))
wtr.close()
wts.close()
i += 1
def main():
concept = []
k_crossFiles(concept)
print('\nEnd of task')
if __name__=="__main__":
main()
```
#### File: HAR-UP/K-crossValidation/Training_function.py
```python
from sklearn import svm
from sklearn.ensemble import RandomForestClassifier as RndFC
from sklearn.neural_network import MLPClassifier as ffp
from sklearn.neighbors import KNeighborsClassifier as KNN
import pandas as pd
from sklearn import metrics as met
def training(concept,
t_window = ['1&0.5','2&1','3&1.5'],
methods = ['RF','SVM', 'MLP', 'KNN'],
K=10):
for cncpt in concept:
print(cncpt)
for twnd in t_window:
print('--%s' % twnd)
path = '%s//%s//' % (cncpt,twnd)
#Each fold's accuracy is stored
acc_k = []
for k in range(1,K+1):
print('-----Fold %d:' % k)
#Training and testing sets are opened with pandas
training_set = pd.read_csv('%sSelectedFeatures_%s_%s_train%d.csv'%(path,twnd,cncpt,k))
testing_set = pd.read_csv('%sSelectedFeatures_%s_%s_test%d.csv'%(path,twnd,cncpt,k))
#Training data set is split into inputs (X) and outputs (Y)
training_set_X = training_set.drop(training_set.columns[-1],axis=1)
training_set_Y = training_set[training_set.columns[-1]]
#Testing data is split
testing_set_X = testing_set.drop(testing_set.columns[-1],axis=1)
expected_output = testing_set[testing_set.columns[-1]].values
#Each method's accuracy is stored
acc_method = []
for method in methods:
if method == 'RF':
classifier = RndFC(n_estimators=100)
elif method == 'SVM':
classifier = svm.SVC(gamma='auto', kernel = 'poly')
elif method == 'MLP':
classifier = ffp()
else:
classifier = KNN()
classifier.fit(training_set_X, training_set_Y)
#The classifier is tested
estimates = classifier.predict(testing_set_X)
accuracy = met.accuracy_score(expected_output,estimates)
print('-----------%s Accuracy: %f' % (method, accuracy))
acc_method.append(accuracy)
acc_k.append(acc_method)
print('---%s scores:' % twnd)
for i in range(0,len(methods)):
avg_accuracy = 0
for k in range(0,K):
avg_accuracy += acc_k[k][i]
avg_accuracy = avg_accuracy/K
print('------%s Avg. Accuracy: %f' %(methods[i],avg_accuracy))
def main():
concept = []
training(concept)
print('\nEnd of task')
if __name__=="__main__":
main()
```
#### File: HAR-UP/Training/MC_Training.py
```python
from sklearn.ensemble import RandomForestClassifier as RndFC
from sklearn import svm
from sklearn import metrics as met
from sklearn.neural_network import MLPClassifier as ffp
from sklearn.neighbors import KNeighborsClassifier as KNN
import numpy as np
import random as rnd
from createFolder import createFolder
import matplotlib.pyplot as plt
from scorePlots import plotScore, plot_confusion_matrix
"""
-----------------------------------------------------------------------------------------------------
Functions
-----------------------------------------------------------------------------------------------------
"""
def cleanLine(line, header = False):
line = line.replace("'",'')
line = line.replace("[",'')
line = line.replace("]",'')
if header:
line = line.replace('\ ','')
else:
line = line.replace("?",'NaN')
arr = line.split(',')
return arr
def MC_Training(concept,
t_window=['1&0.5','2&1','3&1.5'],
methods = ['RF','SVM','MLP','KNN']):
for cncpt in concept:
for twnd in t_window:
d_base = []
f_dbase = 'SelectedFeatures_'+twnd+'_'+cncpt+'.csv'
r = open(cncpt + '//' + twnd + '//' + f_dbase,'r')
txt = r.read()
r.close()
d_base = txt.split('\n')
#an array to store the features (columns in the csv, without the tag)
features = []
feat = cleanLine(d_base[0],True)
for i in range(0, len(feat) - 1):
features.append(feat[i])
for k in range(0, 10):
#70% of the data base is randomly selected
n_tr = int((len(d_base)*0.7)//1)
r_arr = []
for l in range(0, n_tr):
while(True):
n_rnd = rnd.randint(1, len(d_base) - 1)
if len(r_arr) == 0:
r_arr.append(n_rnd)
break
else:
flg = True
for num in r_arr:
if num == n_rnd:
flg = False
break
if flg == True:
r_arr.append(n_rnd)
break
#input (x) and output (y) arrays for training
x_tr = []
y_tr = []
#input (x) and output (y) arrays for validation
x_30 = []
y_30 = []
#To know if something should be ignored
ignr = -1
for i in range(1,len(d_base)):
ln = str(d_base[i])
q = cleanLine(ln)
if (q[0]!=' ')and(q[0]!=''):
p = []
for j in range(0, len(features)):
if(j < len(q)-1):
p.append(float(q[j]))
flg = False
for num in r_arr:
if num == i:
flg = True
break
if flg == True:
y_tr.append(float(q[len(features)]))
x_tr.append(p)
else:
y_30.append(float(q[len(features)]))
x_30.append(p)
else:
if (i + 1) == len(d_base):
ignr = i
else:
print('There is an empty line!!!\n \tLine: '+str(i+1)+' / '+str(len(d_base)))
X = np.array([np.array(z) for z in x_tr])
for method in methods:
if method == 'RF':
clsf = RndFC(n_estimators=10)
elif method == 'SVM':
clsf = svm.SVC(gamma='auto')
elif method == 'MLP':
clsf = ffp()
else:
clsf = KNN()
clsf.fit(X,y_tr)
Xr = np.array([np.array(z) for z in x_30])
Y = clsf.predict(Xr)
st = ""
for i in range(0, len(features)):
st += features[i] +','
st += 'Output,Expected' + '\n'
path = cncpt+'//'+twnd+'//'+method
createFolder(path)
doc = path+'//Result_'+twnd+'_'+method+'_'+str(k+1)+'.csv'
w = open(doc, 'w')
print('-----Writing...')
try:
w.write(st)
a = 0
for i in range(1, len(d_base)):
if i!= ignr:
flg = True
for nmb in r_arr:
if nmb == i:
flg = False
break
if flg == True:
q = d_base[i].split(',')
for j in range(0, len(features)):
w.write(q[j] + ',')
w.write(str(Y[a]) + ',' + str(y_30[a]) + '\n')
a += 1
except Exception as e:
print('----Unexpected error: ' + str(e))
w.close()
print('-----' + str(a) + '/' + str(len(d_base)-1))
print('----...Prediction ' + str(k+1) + ' ' + method + ' finished')
def write_score(w, score, arr,ln_flg = False):
w.write(score + '\n')
for i in range(0, len(arr)):
w.write(str(arr[i]))
if i < len(arr) - 1:
w.write(',')
w.write('\n')
#mean
mu = np.mean(arr)
#standard deviation
sig = np.std(arr)
w.write('Mean,' + str(mu) + '\n')
w.write('SD,' + str(sig))
if ln_flg:
w.write('\n')
return [mu,sig]
def writeFinal(w, score, arr, t_window, methods, ln_flg = False):
w.write(score + '\n')
for method in methods:
w.write(',' + method +',')
for i in range(0,len(t_window)):
w.write('\n' + t_window[i])
for j in range(0,len(methods)):
for k in range(0,2):
w.write(','+str(arr[i][j][k]))
if ln_flg:
w.write('\n')
def MC_Scores(concept,
t_window=['1&0.5','2&1','3&1.5'],
methods = ['RF','SVM','MLP','KNN']):
for cncpt in concept:
print(cncpt)
acc_e = []
ppv_e = []
fsc_e = []
rec_e = []
cmat_e = []
for twnd in t_window:
print('--' + twnd)
acc_arr = []
ppv_arr = []
fsc_arr = []
rec_arr = []
cmat_arr = []
for method in methods:
acc_prom = []
ppv_prom = []
fsc_prom = []
rec_prom = []
cmat_prom = []
for i in range(1,11):
tp = 0
tn = 0
pos = 0
neg = 0
y_true = []
y_pred = []
doc = cncpt+'//'+twnd+'//'+method+'//Result_'+twnd+'_'+method+'_'+str(i)+'.csv'
r = open(doc,'r')
txt = r.read()
r.close()
d_base = txt.split('\n')
for j in range(1,len(d_base)):
ln = str(d_base[j])
q = ln.split(',')
if(len(q) > 1):
pred = float(q[len(q)-2])
y_pred.append(pred)
yesp = float(q[len(q)-1])
y_true.append(yesp)
bnd = True
if yesp == 0:
neg += 1
else:
pos += 1
bnd = False
if(pred == yesp):
if bnd:
tn += 1
else:
tp += 1
if(pos == 0):
print('No falls in: ' + cncpt + '-' + twnd + '-' + str(j + 1))
acc = 100*met.accuracy_score(y_true,y_pred)
ppv = 100*met.precision_score(y_true,y_pred,average='macro')
fsc = 100*met.f1_score(y_true,y_pred,average='macro')
rec = 100*met.recall_score(y_true,y_pred,average='macro')
acc_prom.append(acc)
ppv_prom.append(ppv)
fsc_prom.append(fsc)
rec_prom.append(rec)
c_mat = met.confusion_matrix(y_true,y_pred)
c_mat = c_mat.astype('float') / c_mat.sum(axis=1)[:,np.newaxis]
cmat_prom.append(c_mat)
odoc = cncpt+'//'+twnd+'//Score_'+twnd+'_'+method+'.csv'
w = open(odoc, 'w')
try:
acc_arr.append(write_score(w,'Accuracy',acc_prom,True))
ppv_arr.append(write_score(w,'Precision',ppv_prom,True))
rec_arr.append(write_score(w,'Recall',rec_prom,True))
fsc_arr.append(write_score(w,'F1Score',fsc_prom))
print('----' + method + ' ' + twnd + ' done')
except Exception as e:
print('----ERROR: ' + str(e))
w.close()
#Graphs
temp_arr = []
for e_y in range(0,12):
tmp_ln = []
for e_x in range(0,12):
tmp_var = 0
for mat in range(0,len(cmat_prom)):
tmp_var += cmat_prom[mat][e_y][e_x]
tmp_var = tmp_var/len(cmat_prom)
tmp_ln.append(tmp_var)
temp_arr.append(tmp_ln)
cmat_arr.append(temp_arr)
title = 'Avg. Confusion Matrix '+twnd+' '+method+' '+cncpt
title_save = 'AvgConfusionMatrix_'+twnd+'_'+method+'_'+cncpt
classes = ['1','2','3','4','5','6','7','8','9','10','11','20']
cf_mat = np.array(temp_arr)
np.set_printoptions(precision=2)
plt.figure()
plot_confusion_matrix(cf_mat,classes = classes,normalize=True,title=title)
plt.savefig(cncpt+'//'+twnd+'//'+title_save+'.jpg',dpi=100)
plt.close()
acc_e.append(acc_arr)
ppv_e.append(ppv_arr)
rec_e.append(rec_arr)
fsc_e.append(fsc_arr)
cmat_e.append(cmat_arr)
w = open(cncpt + '//Score_' + cncpt +'_temp.csv','w')
try:
writeFinal(w,'Accuracy',acc_e,t_window,methods,True)
writeFinal(w,'Precision',ppv_e,t_window,methods,True)
writeFinal(w,'Recall',rec_e,t_window,methods,True)
writeFinal(w,'F1Score',fsc_e,t_window,methods)
except Exception as e:
print(e)
w.close()
#plotting the scores
plotScore([acc_e,ppv_e,rec_e,fsc_e],cncpt,t_window=t_window,methods=methods)
#Average confusion matrix
for mthd in range(0,len(methods)):
temp_arr = []
method = methods[mthd]
print('----'+methods[mthd])
for e_y in range(0,12):
tmp_ln = []
for e_x in range(0,12):
tmp_var = 0
for tw in range(0,len(t_window)):
tmp_var += cmat_e[tw][mthd][e_y][e_x]
tmp_var = tmp_var/len(t_window)
tmp_ln.append(tmp_var)
temp_arr.append(tmp_ln)
title = 'Avg. Confusion Matrix ' + method+ ' ' + cncpt
classes = ['1','2','3','4','5','6','7','8','9','10','11','20']
cf_mat = np.array(temp_arr)
np.set_printoptions(precision=2)
plt.figure()
plot_confusion_matrix(cf_mat,classes = classes,normalize=True,title=title)
plt.savefig(cncpt+'//AvgConfusionMatrix_'+method+'_'+cncpt+'.jpg',dpi=100)
plt.close()
"""
-----------------------------------------------------------------------------------------------------
End of functions
-----------------------------------------------------------------------------------------------------
"""
def main():
concept = []
MC_Training(concept)
MC_Scores(concept)
if __name__ == "__main__":
main()
``` |
{
"source": "jpn--/omx-freepascal",
"score": 2
} |
#### File: jpn--/omx-freepascal/hdf5pas.py
```python
from __future__ import print_function
import sys
import os.path
import argparse
import networkx as nx
import datetime
import re
from collections import *
from itertools import *
parser = argparse.ArgumentParser(description = 'Generate Delphi wrapper for HDF5 library.')
parser.add_argument('srcdir', help = 'directory containing HDF5 *.h files.',
nargs = '?', default = '.')
args = parser.parse_args()
def parsedeps(header, graph):
if header.startswith('H5') and header not in graph.onodes:
graph.onodes.append(header)
for line in open(os.path.join(args.srcdir, header)):
m = re.match('#include "(H5.*public.h)".*', line)
if m:
include = m.group(1)
if header.startswith('H5'):
if include not in graph.onodes:
graph.onodes.append(include)
graph.add_edge(header, include)
parsedeps(include, graph)
defs = ''
classname = 'THDF5Dll'
types = ''
fields = ''
props = ''
init = ''
cinit = ''
template = \
'''unit hdf5dll;
// Delphi wrapper for HDF5 library.
// Auto-generated {date} by hdf5pas.py.
interface
uses
windows;
{{$ALIGN ON}}
{{$MINENUMSIZE 4}}
type
int32_t = Integer;
Pint32_t = ^int32_t;
uint32_t = Cardinal;
Puint32_t = ^uint32_t;
int64_t = Int64;
Pint64_t = ^int64_t;
uint64_t = UInt64;
Puint64_t = ^uint64_t;
time_t = NativeInt;
Ptime_t = ^time_t;
size_t = NativeUInt;
Psize_t = ^size_t;
ssize_t = NativeInt;
Pssize_t = ^ssize_t;
off_t = NativeInt;
Poff_t = ^off_t;
PFILE = Pointer;
type
hsize_t = UInt64;
Phsize_t = ^hsize_t;
hssize_t = Int64;
Phssize_t = ^hssize_t;
haddr_t = UInt64;
Phaddr_t = ^haddr_t;
const
HADDR_UNDEF = haddr_t(-1);
{defs}
type
{classname} = class
private
type
{types}
private
FHandle: THandle;
{fields}
public
constructor Create(APath: string);
destructor Destroy; override;
{props}
property Handle: THandle read FHandle;
function IsValid: Boolean;
end;
implementation
{{ {classname} }}
constructor {classname}.Create(APath: string);
function GetDllProc(AModule: THandle; AName: string): Pointer;
begin
Result := GetProcAddress(AModule, PChar(AName));
Assert(Assigned(Result));
end;
begin
inherited Create;
FHandle := LoadLibrary(PChar(APath));
{init}
H5open;
{cinit}
end;
destructor {classname}.Destroy;
begin
if FHandle <> 0 then
FreeLibrary(FHandle);
inherited;
end;
function {classname}.IsValid: Boolean;
begin
Result := (FHandle <> 0);
end;
end.
'''
def parse(header):
def smartjoin(sep, *args):
if args[0]:
return sep.join(args)
else:
return args[1]
def stripcomment(s):
return re.sub(' *(\(\*.*\*\))?$', '', s)
def strtoint(value):
value = re.sub('^\(\(.*\)\)$', r'\1', value.strip())
if value.startswith('('):
tokens = re.findall('(\((.*?)\)( *|$))|([^()]+$)', value)
value = (tokens[-1][1] or tokens[-1][3]).strip()
else:
tokens = None
value = value.rstrip('uL')
try:
result = int(value, 0)
if tokens:
for token in reversed(tokens[:-1]):
typ = token[1].strip()
(name, typ) = convnametype('', typ)
result = '{}({})'.format(typ, result)
except ValueError:
m = re.match('(.*) << (.*)', value)
if m:
result = '{} shl {}'.format(m.group(1), int(m.group(2), 0))
else:
return
return result
def strtofloat(value):
try:
value = value.rstrip('f')
return float(value)
except ValueError:
pass
def parseprocdecl(signature, istype):
signature = re.sub('\(\*[^()]*?\*\)', '', signature).replace('*', ' * ')
if istype:
(rettype, name, args) = re.match('(.*) ?\( \* ([^ ]*)\) ?\((.*)\);', signature).groups()
else:
(rettype, name, args) = re.match('(.*) ([^ ]*) ?\((.*)\);', signature).groups()
if args != 'void':
args = [s.strip() for s in args.split(',')]
else:
args = []
varargs = False
for i in range(len(args)):
arg = args[i].strip().split(' ')
if len([p for p in arg if p != '*']) < 2 and args[i] != '...':
arg.append('p')
atyp = ' '.join(arg[:-1])
aname = arg[-1]
(aname, atyp) = convnametype(aname, atyp, arraytypes = False)
if args[i] != '...':
args[i] = '{}: {}'.format(aname, atyp)
else:
args[i] = None
varargs = True
args = [s for s in args if s]
rettype = convnametype('', rettype, arraytypes = False)[-1]
return name, args, rettype, varargs
def getnametype(signature):
while ' ' in signature:
signature = signature.replace(' ', ' ')
m = re.match('([^\[\]]*)(\[(.+)\])?', signature.strip())
lexems = m.group(1).split(' ')
if lexems[0] == 'enum':
lexems = lexems[1:]
arr = m.group(2) or ''
return lexems[-1] + arr, ' '.join(lexems[:-1])
def convnametype(cname, ctype, arraytypes = True):
# Convert C-style variable/constant/field declaration to Delphi-style
def replace(where, olditems, newitem):
items = where
for item in olditems:
if item in items:
items = [s for s in items if s != item]
else:
return where
return items + [newitem]
typ = ctype.replace('*', ' * ')
while ' ' in typ:
typ = typ.replace(' ', ' ')
typ = typ.strip().split(' ')
stars = len([s for s in cname if s == '*'])
name = cname.strip('* ')
typ += ['*']*stars
if name.endswith('[]'):
name = name.rstrip('[]')
typ += ['*']
m = re.match('([^\[\]]*)(\[(.+)\])?', name)
arrsize = m.group(3)
name = m.group(1)
if name == 'type':
name = 'typ'
elif name == 'object':
name = 'obj'
elif name == 'end':
name = 'end_'
elif name == 'file':
name = 'file_'
typ = [s for s in typ if s != 'const']
typ = replace(typ, ['unsigned', 'long', 'long', 'int'], 'UInt64')
typ = replace(typ, ['unsigned', 'long', 'long'], 'UInt64')
typ = replace(typ, ['long', 'long', 'int'], 'Int64')
typ = replace(typ, ['long', 'long'], 'Int64')
typ = replace(typ, ['unsigned', 'long', 'int'], 'Cardinal')
typ = replace(typ, ['unsigned', 'long'], 'Cardinal')
typ = replace(typ, ['long', 'int'], 'Integer')
typ = replace(typ, ['long'], 'Integer')
typ = replace(typ, ['unsigned', 'short', 'int'], 'Word')
typ = replace(typ, ['unsigned', 'short'], 'Word')
typ = replace(typ, ['short', 'int'], 'ShortInt')
typ = replace(typ, ['short'], 'ShortInt')
typ = replace(typ, ['unsigned', 'int'], 'Cardinal')
typ = replace(typ, ['int'], 'Integer')
typ = replace(typ, ['unsigned', 'char'], 'Byte')
typ = replace(typ, ['char'], 'AnsiChar')
typ = replace(typ, ['unsigned'], 'Cardinal')
typ = replace(typ, ['bool'], 'Boolean')
typ = replace(typ, ['double'], 'Double')
if '*' in typ:
typ = replace(typ, ['void'], 'ointer')
stars = len([s for s in typ if s == '*'])
typ = 'P'*stars + ''.join([s for s in typ if s != '*'])
if arrsize:
if arraytypes:
if arrsize.endswith(' + 1'):
typ = 'array[0..{}] of {}'.format(arrsize[0:len(arrsize) - 4], typ)
else:
typ = 'array[0..{} - 1] of {}'.format(arrsize, typ)
else:
typ = 'P' + typ
return (name, typ)
def preprocess(lines):
'''
Parse and strip off pre-processor directives.
Currently all #if/#ifdef/#ifndef are considered as false.
'''
print('{}: Pre-processing...'.format(header), file = sys.stderr)
ifdef = 0
result = []
for line in lines:
line = line.strip('\n').expandtabs()
if line.strip() == '':
line = ''
m = re.match('(.*)(/\*.*)', line)
if m and not re.search('\*/', m.group(2)):
if m.group(1).strip() == '':
sublines = [m.group(2)]
else:
sublines = m.groups()
else:
sublines = [line]
for line in sublines:
line = line.replace('/*', '(*').replace('*/', '*)')
hdef = '_{}_H'.format(os.path.splitext(header)[0])
if re.match('#ifndef {}'.format(hdef), line) or \
re.match('#define {}'.format(hdef), line):
pass
elif line.startswith('#if') or \
line.startswith('#ifdef') or \
line.startswith('#ifndef'):
ifdef += 1
elif line.startswith('#endif'):
ifdef -= 1
elif not ifdef:
if line.startswith('#include') or line.startswith('#undef'):
pass
else:
result.append(line)
print('{}: {} of {} lines left'.format(header, len(result), len(lines)), file = sys.stderr)
return result
lines = open(os.path.join(args.srcdir, header)).readlines()
lines = preprocess(lines)
print('{}: Parsing...'.format(header), file = sys.stderr)
def process(state, stateinfo, comment):
def procdefine(lines):
'''
Process sequence of #define's.
'''
global props
result = ''
comment = False
for line in lines.split('\n'):
m = re.match(' *?(((\(\*)|( \*)).*)', line)
if m:
comment = True
if len(result) > 0:
result += '\n' + m.group(1)
else:
m = re.match(r'#define +(.*?) +([^\\]+)$', stripcomment(line))
if m:
comment = False
mm = re.search('\(\*(.*)\*\)', line.strip())
comment = mm.group(1) if mm else None
(name, value) = m.groups()
value = re.sub('^\((.*)\)$', r'\1', value)
value = re.sub('^H5CHECK ', '', value)
if name.startswith('H5F_ACC_'):
value = re.sub('^H5OPEN ', '', value)
value = value.replace('sizeof', 'SizeOf')
comment = ' '.join(['(*', comment.strip(), '*)'] if comment else '')
if '?' in value or ',' in value:
print('WARN: {}'.format(line), file = sys.stderr)
elif value.startswith('H5OPEN'):
props += ' property {}: hid_t read F{};\n'.format(name, value.split(' ')[-1].strip('_g'))
elif 'SIZEOF' in name:
pass
elif strtoint(value) != None:
result += '\n {} = {}; {}'.format(name, strtoint(value), comment)
elif strtofloat(value) != None:
result += '\n {} = {}; {}'.format(name, strtofloat(value), comment)
elif value.startswith('"') and value.endswith('"'):
result += "\n {} = '{}'; {}".format(name, value.strip('"'), comment)
elif len(value.split('|')) > 1:
result += '\n {} = {}; {}'.format(name,
' or '.join([item.strip()
for item in value.split('|')]),
comment)
elif name.startswith('H5T_INTEL') or \
name.startswith('H5T_ALPHA') or \
name.startswith('H5T_MIPS'):
props += ' property {}: hid_t read F{};\n'.format(name, value)
else:
result += '\n {} = {}; {}'.format(name, value, comment)
elif comment:
result += '\n' + line
else:
print('WARN: {}'.format(line), file = sys.stderr)
return result
def proctypedef(lines):
'''
Process sequence of typedefs.
'''
def process(prevstate, state, stateinfo):
'''
Process one typedef.
'''
result = ''
if len(stateinfo) == 1:
if state == 'enum':
stateinfo[0] = stateinfo[0].replace('typedef enum', 'typedef')
elif state == 'struct':
stateinfo[0] = stateinfo[0].replace('typedef struct', 'typedef')
state = 'other'
if state == 'enum':
'''
Enumerated type declaration.
'''
result += '\ntype'
name = stateinfo[-1].strip('}; ') or stateinfo[0].split(' ')[2]
result += '\n P{name} = ^{name};'.format(name = name)
result += '\n {} ='.format(name)
lines = list()
Line = namedtuple('Line', ['line', 'name', 'value', 'comment'])
lastname = None
for line in stateinfo[1:len(stateinfo) - 1]:
if stripcomment(line).strip() == '{':
continue
m = re.match(' *([^ *(),]+)( *= ?([^,]+))?,?', stripcomment(line))
if m:
(name, dummy, value) = m.groups()
value = strtoint(value) if value else None
mm = re.search('\(\*(.*)\*\)', line.strip())
comment = mm.group(1) if mm else None
comment = ' '.join(['(*', comment.strip(), '*)'] if comment else '')
lines.append(Line(line = None, name = name, value = value, comment = comment))
lastname = name
elif not stripcomment(line).strip():
lines.append(Line(line = line.strip(), name = None, value = None, comment = None))
elif re.match(' *([( ]\*.*)', line):
lines.append(Line(line = re.sub(' *([( ]\*.*)', r'\1', line), name = None, value = None, comment = None))
else:
print('WARN: {}'.format(line), file = sys.stderr)
firstline = True
for line in lines:
if line.line != None:
result += '\n' + line.line
else:
result += '\n {}{}{}{} {}'.format(
'(' if firstline else ' ', line.name, ' = {}'.format(line.value) if line.value else '',
');' if line.name == lastname else ',', line.comment)
firstline = False
elif state == 'struct':
'''
Compound type (struct) declaration.
'''
result += '\ntype'
def procstruct(lines, offset, pointertypes = False):
result = ''
typename = lines[-1].strip('}; ') or lines[0].split(' ')[2]
if pointertypes:
result += '\n{}P{name} = ^{name};'.format(' '*offset, name = typename)
result += '\n{}PP{name} = ^P{name};'.format(' '*offset, name = typename)
result += '\n{}{} = record'.format(' '*offset, typename)
else:
result += '\n{}{}: record'.format(' '*offset, typename)
item = ''
nested = []
for line in lines[1:len(lines) - 1]:
if stripcomment(line).strip() in ('', '{'):
continue
item += line.strip()
if stripcomment(item).strip()[-1] not in ('{', ';'):
continue
mm = re.search('\(\*(.*)\*\)$', item.strip())
comment = ' '.join(['(*', mm.group(1).strip(), '*)'] if mm else '')
if item.startswith('struct') or item.startswith('union'):
nested += [item]
elif nested:
nested += [item]
if item.startswith('}'):
if nested[0].startswith('union'):
result += '\n {}case Integer of'.format(' '*offset)
for n, line in zip(count(1), nested[1:len(nested) - 1]):
mm = re.search('\(\*(.*)\*\)$', line.strip())
comment = ' '.join(['(*', mm.group(1).strip(), '*)'] if mm else '')
(cname, ctype) = getnametype(stripcomment(line).rstrip(';'));
(name, typ) = convnametype(cname, ctype)
result += '\n {}{}: ({}: {}); {}'.format(' '*offset, n, name, typ, comment).rstrip()
else:
result += procstruct(nested, offset + 2)
nested = []
else:
if item.endswith(');'):
name, args, rettype, varargs = parseprocdecl(item, True)
if typename == 'H5FD_class_t':
args = [arg.replace('PH5FD_t', 'Pointer {PH5FD_t}') for arg in args]
rettype = rettype.replace('PH5FD_t', 'Pointer {PH5FD_t}')
if args:
args = '({})'.format('; '.join(args))
else:
args = ''
if rettype == 'void':
result += '\n {}{}: procedure{}; cdecl; {}'.format(' '*offset, name, args, comment).rstrip()
else:
result += '\n {}{}: function{}: {}; cdecl; {}'.format(' '*offset, name, args, rettype, comment).rstrip()
else:
(cname, ctype) = getnametype(stripcomment(item).rstrip(';'));
(name, typ) = convnametype(cname, ctype)
if typename == 'H5FD_class_t':
typ = typ.replace('array[0..H5FD_MEM_NTYPES - 1]', 'array[H5FD_MEM_DEFAULT..Pred(H5FD_MEM_NTYPES)]')
result += '\n {}{}: {}; {}'.format(' '*offset, name, typ, comment).rstrip()
item = ''
result += '\n{}end;'.format(' '*offset)
return result
result += procstruct(stateinfo, 2, True)
elif state == 'other':
comments = None
for i in range(len(stateinfo)):
if stateinfo[i].startswith('(*'):
comments = stateinfo[i:]
stateinfo = stateinfo[:i]
break
if len(stateinfo) == 1 and re.match('typedef *([^(),]*) +([^(),]*);', stateinfo[0]):
'''
Type synonym.
'''
(typ, name) = re.match('typedef *([^(),]*) +([^(),]*);', stateinfo[0]).groups()
(name, typ) = convnametype(name.strip(), typ.strip())
if name != typ:
if prevstate != 'other':
result += 'type\n'
if name.endswith(']'):
result += ' P{} = P{};'.format(re.sub('\[.*', '', name), typ)
else:
result += ' {} = {};'.format(name, typ)
result += '\n P{name} = ^{name};'.format(name = name)
else:
'''
Procedural type declaration.
'''
if prevstate != 'other':
result += 'type\n'
signature = ' '.join(stateinfo)
name, args, rettype, varargs = parseprocdecl(re.match('typedef (.*;)', signature.strip()).group(1), True)
if rettype == 'void':
result += ' {} = procedure({}); cdecl;'.format(name, '; '.join(args))
else:
result += ' {} = function({}): {}; cdecl;'.format(name, '; '.join(args), rettype)
result += '\n P{name} = ^{name};'.format(name = name)
if comments:
result += '\n'.join([''] + comments)
return result
result = ''
prevstate = None
state = None
stateinfo = []
for line in lines.split('\n'):
line = re.sub('^enum', 'typedef enum', line)
line = re.sub('^struct', 'typedef struct', line)
if line.startswith('typedef enum'):
result += '\n' + process(prevstate, state, stateinfo)
prevstate = state
state = 'enum'
stateinfo = []
elif line.startswith('typedef struct'):
result += '\n' + process(prevstate, state, stateinfo)
prevstate = state
state = 'struct'
stateinfo = []
elif line.startswith('typedef '):
result += '\n' + process(prevstate, state, stateinfo)
prevstate = state
state = 'other'
stateinfo = []
if state:
stateinfo.append(line)
else:
print('WARN: {}'.format(line), file = sys.stderr)
if state:
result += '\n' + process(prevstate, state, stateinfo)
return result
def procexport(lines):
'''
Process sequence of exported symbols.
'''
global defs, types, fields, props, init, cinit
signature = None
for line in lines.split('\n'):
if line.startswith('(*') or line.startswith(' *'):
continue
line = re.sub('[(/]\*.*?\*[)/]', '', line.strip())
if line.startswith('H5_DLLVAR'):
'''
Exported variable.
'''
(dummy, ctype, cname) = line.split(' ')
cname = cname.strip('_g;')
(cname, ctype) = convnametype(cname, ctype)
fields += ' F{}: {};\n'.format(cname, ctype)
cinit += " F{cname} := P{ctype}(GetDllProc(FHandle, '{cname}_g'))^;\n".format(cname = cname, ctype = ctype)
else:
'''
Exported procedure.
'''
signature = smartjoin(' ', signature, line)
if not ')' in line:
continue
signature = signature.replace(' (', '(')
fname, args, rettype, varargs = parseprocdecl(re.match('H5_DLL (.*;)', signature.strip()).group(1), False)
if len(args) > 0:
fdef = '(' + '; '.join(args) + ')'
else:
fdef = ''
fdef = fdef + ': ' + rettype
if varargs:
types += ' // T{} = function{}; cdecl; varargs;\n'.format(fname, fdef)
fields += ' // F{}: T{};\n'.format(fname, fname)
props += ' // property {}: T{} read {};\n'.format(fname, fname, fname)
print('ERROR: Ignoring varargs procedure {}.'.format(fname), file = sys.stderr)
else:
types += ' T{} = function{}; cdecl;\n'.format(fname, fdef)
fields += ' F{}: T{};\n'.format(fname, fname)
props += ' property {}: T{} read F{};\n'.format(fname, fname, fname)
init += " @F{0} := GetDllProc(FHandle, '{0}');\n".format(fname)
signature = None
global defs, types, fields, props, init, cinit
if stateinfo:
stateinfo = stateinfo.strip('\n')
if state == 'define':
newdefs = procdefine(stateinfo).lstrip('\n')
if len(newdefs) > 0:
if comment:
defs += '\n'
defs += comment.strip('\n') + '\n'
defs += 'const\n'
defs += newdefs
defs += '\n'
elif state == 'typedef':
newdefs = proctypedef(stateinfo).lstrip('\n')
if len(newdefs) > 0:
if comment:
defs += '\n'
defs += comment.strip('\n') + '\n'
defs += newdefs
defs += '\n'
elif state == 'export':
newdefs = procexport(stateinfo)
global state, stateinfo, comment
state = None
stateinfo = None
comment = None
def setstate(newstate):
global state, stateinfo, comment
if stateinfo and stateinfo.endswith('\n'):
if state:
process(state, stateinfo, comment)
state = newstate
stateinfo = None
comment = None
elif newstate != state:
if newstate == 'comment':
if state:
return
else:
if state == 'comment':
comment = stateinfo
process(state, stateinfo, comment)
if state != 'comment':
comment = None
state = newstate
stateinfo = None
for line in lines + ['']:
if line.startswith('(*') or line.startswith(' *'):
setstate('comment')
elif not state and line.lstrip(' ').startswith('(*'):
setstate('comment')
elif line.startswith('#define'):
setstate('define')
elif line.startswith('typedef') or \
line.startswith('struct') or \
line.startswith('enum'):
setstate('typedef')
elif line.startswith('H5_DLL'):
setstate('export')
elif line and not state:
raise Exception(header, line)
if state:
stateinfo = smartjoin('\n', stateinfo, line)
setstate(None)
print(file = sys.stderr)
graph = nx.DiGraph()
graph.onodes = []
parsedeps('hdf5.h', graph)
paths = nx.all_pairs_shortest_path_length(graph)
for header in sorted(graph.onodes, key = lambda header: len(paths[header])):
parse(header)
for line in template.format(date = datetime.date.today(),
defs = defs.strip('\n'),
classname = classname,
types = types.strip('\n'),
fields = fields.strip('\n'),
props = props.strip('\n'),
init = init.strip('\n'),
cinit = cinit.strip('\n')).split('\n'):
print(line.rstrip())
``` |
{
"source": "jpn--/pine",
"score": 2
} |
#### File: pine/pines/addict.py
```python
import copy
class adict(dict):
def __init__(__self, *args, **kwargs):
object.__setattr__(__self, '__parent', kwargs.pop('__parent', None))
object.__setattr__(__self, '__key', kwargs.pop('__key', None))
for arg in args:
if not arg:
continue
elif isinstance(arg, dict):
for key, val in arg.items():
__self[key] = __self._hook(val)
elif isinstance(arg, tuple) and (not isinstance(arg[0], tuple)):
__self[arg[0]] = __self._hook(arg[1])
else:
for key, val in iter(arg):
__self[key] = __self._hook(val)
for key, val in kwargs.items():
__self[key] = __self._hook(val)
def __setattr__(self, name, value):
if hasattr(self.__class__, name):
raise AttributeError("'adict' object attribute "
"'{0}' is read-only".format(name))
else:
self[name] = value
def __setitem__(self, name, value):
super().__setitem__(name, value)
try:
p = object.__getattribute__(self, '__parent')
key = object.__getattribute__(self, '__key')
except AttributeError:
p = None
key = None
if p is not None:
p[key] = self
object.__delattr__(self, '__parent')
object.__delattr__(self, '__key')
def __add__(self, other):
if not self.keys():
return other
else:
self_type = type(self).__name__
other_type = type(other).__name__
msg = "unsupported operand type(s) for +: '{}' and '{}'"
raise TypeError(msg.format(self_type, other_type))
@classmethod
def _hook(cls, item):
if isinstance(item, dict):
return cls(item)
elif isinstance(item, (list, tuple)):
return type(item)(cls._hook(elem) for elem in item)
return item
def __getattr__(self, item):
return self.__getitem__(item)
def __getitem__(self, name):
if name not in self:
return self.__class__(__parent=self, __key=name)
return super().__getitem__(name)
def __delattr__(self, name):
del self[name]
def to_dict(self):
base = {}
for key, value in self.items():
if isinstance(value, type(self)):
base[key] = value.to_dict()
elif isinstance(value, (list, tuple)):
base[key] = type(value)(
item.to_dict() if isinstance(item, type(self)) else
item for item in value)
else:
base[key] = value
return base
def copy(self):
return copy.copy(self)
def deepcopy(self):
return copy.deepcopy(self)
def __deepcopy__(self, memo):
other = self.__class__()
memo[id(self)] = other
for key, value in self.items():
other[copy.deepcopy(key, memo)] = copy.deepcopy(value, memo)
return other
def update(self, *args, **kwargs):
other = {}
if args:
if len(args) > 1:
raise TypeError()
other.update(args[0])
other.update(kwargs)
for k, v in other.items():
if ((k not in self) or
(not isinstance(self[k], dict)) or
(not isinstance(v, dict))):
self[k] = v
else:
self[k].update(v)
def __getnewargs__(self):
return tuple(self.items())
def __getstate__(self):
return self
def __setstate__(self, state):
self.update(state)
def setdefault(self, key, default=None):
if key in self:
return self[key]
else:
self[key] = default
return default
def __repr__(self):
if self.keys():
m = max(map(len, list(self.keys()))) + 1
return '\n'.join(['┣'+k.rjust(m) + ': ' + repr(v).replace('\n','\n┃'+' '*(m+2)) for k, v in self.items()])
else:
return self.__class__.__name__ + "()"
def __xml__(self):
from .xhtml import Elem
x = Elem('div')
t = x.put('table')
for k, v in self.items():
tr = t.put('tr')
tr.put('td', text=k, style='font-weight:bold;vertical-align:top;')
try:
v_xml = v.__xml__()
except AttributeError:
tr.put('td').put('pre', text=repr(v))
else:
tr.append(v_xml)
return x
def _repr_html_(self):
return self.__xml__().tostring()
```
#### File: pine/pines/codex.py
```python
import pickle, base64, zlib, hashlib
def squeeze(item):
return base64.standard_b64encode(zlib.compress(pickle.dumps(item)))
def inflate(squeezed):
return pickle.loads(zlib.decompress(base64.standard_b64decode(squeezed)))
def phash(x):
return hashlib.sha256(pickle.dumps(x)).hexdigest()
```
#### File: pine/pines/configure.py
```python
import os.path
import json
from .attribute_dict import quickdot, add_to_quickdot
def add_directory(filepath):
"""
Add '~/.pines' in a platform independent way.
"""
if os.path.isabs(filepath):
return filepath
return os.path.join(os.path.expanduser('~'), '.pines', filepath)
def load(filename=None):
"""
Load configuration from a JSON file.
If filename is None, ~/.pines/configure.json will be loaded.
If filename is not an absolute path, it will be prefixed with ~/.pines/
Returns loaded config as a dictionary on success and {} on failure.
"""
filename = add_directory(filename or 'configure.json')
try:
with open(filename, "r") as f:
return quickdot(json.load(f))
except IOError:
pass
return quickdot()
_cached_values = None
def cached(filename=None):
global _cached_values
if _cached_values is None:
_cached_values = load(filename)
return _cached_values
def save(config, filename=None):
"""
Save configuration to a JSON file.
If filename is not an absolute path, it will be prefixed with ~/.pines/
"""
filename = add_directory(filename or 'configure.json')
directory = os.path.dirname(filename)
if not os.path.exists(directory):
os.makedirs(directory, 0o700)
with open(filename, "w") as f:
json.dump(config, f, indent=2, sort_keys=True)
def add(tag, val):
q = load()
try:
val = int(val)
except ValueError:
try:
val = float(val)
except ValueError:
pass
q = add_to_quickdot(q,tag,val)
save(q)
def print_config(args=None):
import argparse
parser = argparse.ArgumentParser(prog='pines_config')
parser.add_argument('--add', nargs=2, action='append')
space = parser.parse_args()
q = load()
if space.add:
for tag,val in space.add:
print('setting',tag,'to',val)
try:
val = int(val)
except ValueError:
try:
val = float(val)
except ValueError:
pass
q = add_to_quickdot(q,tag,val)
save(q)
print(q)
def check_config(checklist, secrets, window_title="PINES CONFIG"):
global _top_cfg, _secret_cfg
_top_cfg = load()
_secret_cfg = quickdot()
from tkinter import Tk, Entry, Button, mainloop, END, Label, LEFT, BOTTOM
master = Tk()
master.wm_title(window_title)
def makeentry(parent, caption_, width=None, row=None, **options):
if row is None:
Label(parent, text=caption_).pack(side=LEFT)
else:
Label(parent, text=caption_).grid(row=row,column=0)
entry = Entry(parent, **options)
if width:
entry.config(width=width)
if row is None:
entry.pack(side=LEFT)
else:
entry.grid(row=row,column=1)
return entry
ents = []
rownum = 0
for rownum, check in enumerate(checklist):
ents.append(makeentry(master, check, width=90, row=rownum))
ents[-1].delete(0, END)
if _top_cfg[check] is None or (isinstance(_top_cfg[check], quickdot) and len(_top_cfg[check])==0):
this_str = "<None>"
else:
this_str = str(_top_cfg[check])
ents[-1].insert(0, this_str)
secret_ents = []
for rownum, secret in enumerate(secrets, start=rownum+1):
secret_ents.append(makeentry(master, secret, width=90, row=rownum, show="\u2022"))
secret_ents[-1].delete(0, END)
if secret in _top_cfg:
if _top_cfg[secret] is None or (isinstance(_top_cfg[secret], quickdot) and len(_top_cfg[secret])==0):
this_str = "<None>"
else:
this_str = str(_top_cfg[secret])
else:
this_str = ""
secret_ents[-1].insert(0, this_str)
ents[0].focus_set()
def callback_onetime():
global _top_cfg, _secret_cfg
for check, ent in zip(checklist, ents):
this_str = (ent.get())
if this_str == "<None>":
_top_cfg[check] = quickdot()
else:
try:
this_str = int(this_str)
except ValueError:
try:
this_str = float(this_str)
except ValueError:
pass
_top_cfg[check] = this_str
for check, ent in zip(secrets, secret_ents):
this_str = (ent.get())
if this_str == "<None>":
_secret_cfg[check] = quickdot()
else:
try:
this_str = int(this_str)
except ValueError:
try:
this_str = float(this_str)
except ValueError:
pass
_secret_cfg[check] = this_str
master.destroy()
def callback_save():
callback_onetime()
save(_top_cfg)
b = Button(master, text = "OK - One Time", width = 20, command = callback_onetime)
b.grid(row=rownum+1,column=0, columnspan=2)
b2 = Button(master, text = "OK - Save to Config File", width = 20, command = callback_save)
b2.grid(row=rownum+2,column=0, columnspan=2)
mainloop()
return _top_cfg + _secret_cfg
```
#### File: pine/pines/counter.py
```python
from pines.attribute_dict import dicta
class Counter(dicta):
def one(self, key):
if key in self:
self[key] += 1
else:
self[key] = 1
def add(self, other_counter):
for key, val in other_counter.items():
if key in self:
self[key] += val
else:
self[key] = val
```
#### File: pine/pines/daskworker.py
```python
from . import configure
from . import egnyte as pe
from distributed import Worker, Nanny as _Nanny
import os
import logging, logging.handlers
_time_format = '%b %d %H:%M:%S'
_mess_format = '%(asctime)15s %(name)s %(levelname)s %(message)s'
_worker_local_dir = None
class Nanny(_Nanny):
def change_ncores(self, *arg, **kwarg):
new_ncores = kwarg.pop('new_ncores')
logging.getLogger('distributed').info(f"changing ncores to {new_ncores}")
if new_ncores is not None:
self.nthreads = new_ncores
if self.process:
self.process.worker_kwargs['nthreads'] = new_ncores
def __init__(self, *arg, **kwarg):
super().__init__(*arg, **kwarg)
self.handlers['change_ncores'] = self.change_ncores
async def _new_worker(scheduler=None, name=None, cfg=None, gui_loop_callback=None, resources=None, **kwargs):
global _worker_local_dir
if cfg is None:
cfg = configure.check_config(['cluster.worker_log', 'cluster.scheduler'],
window_title="PINES CLUSTER WORKER CONFIG")
if 'worker_log' in cfg.cluster:
handler = logging.handlers.RotatingFileHandler(cfg.cluster['worker_log'], 'a', 1000000, 10)
formatter = logging.Formatter(fmt=_mess_format, datefmt=_time_format)
handler.setFormatter(formatter)
logging.getLogger().addHandler(handler)
logging.getLogger().setLevel(logging.INFO)
logging.getLogger('distributed').info(f"opening log for {name}")
if scheduler is None:
scheduler = cfg.cluster['scheduler']
if scheduler is None: # still...
raise ValueError('no scheduler known, set one in pines.configure .cluster')
# from tornado.ioloop import IOLoop
# from threading import Thread
if name is None:
if 'worker_name' in cfg.cluster:
name = cfg.cluster['worker_name']
else:
import socket
name = socket.getfqdn()
# loop = IOLoop.current()
# t = Thread(target=loop.start, daemon=True)
# t.start()
scheduler_location = f'tcp://{scheduler}:8786'
logging.getLogger('distributed').info(f"starting worker {name} for {scheduler_location}")
if resources:
logging.getLogger('distributed').info(f"worker {name} has resources {str(resources)}")
w = Nanny(scheduler_location, name=name, resources=resources, **kwargs)
w.cfg = cfg
_worker_local_dir = w.local_directory
# if gui_loop_callback is not None:
# gui_loop_callback(w, cfg)
await w.start() # choose randomly assigned port
await w.finished()
logging.getLogger('distributed').critical(f"ending nanny {name} for {scheduler_location}")
def new_worker(*args, **kwargs):
import asyncio
asyncio.run(_new_worker(*args, **kwargs))
def receive_tar_package(s, packagename=None):
global _worker_local_dir
from .tar import extract_targz_string
use_path = _worker_local_dir or "."
result = extract_targz_string(s, path=use_path)
mod = None
if packagename is not None:
logging.getLogger('distributed').critical(f"received package {packagename} attempting to import")
import sys, importlib
importlib.invalidate_caches()
import importlib.util
spec = importlib.util.find_spec(packagename)
print("spec", spec)
if packagename in sys.modules:
logging.getLogger('distributed').critical(f"received package {packagename} already exists, reloading")
mod = importlib.reload(sys.modules[packagename])
else:
logging.getLogger('distributed').critical(
f"received package {packagename} does not already exist, importing")
try:
mod = importlib.import_module(packagename)
except ModuleNotFoundError:
logging.getLogger('distributed').critical(f"ModuleNotFoundError on {packagename}")
mod = None
if mod is not None:
logging.getLogger('distributed').critical(f"Adding {packagename} to sys.modules")
import sys
sys.modules[packagename] = mod
importlib.invalidate_caches()
return result, mod
def send_package_to_dask_workers(directory, scheduler_ip=None, client=None):
"""
Send a package to all workers
One of client and scheduler_ip should be given.
Parameters
----------
directory : str
scheduler_ip : str
ignored if client is given
client : dask.distributed.Client
"""
from .tar import directory_to_targz_string
if client is None:
if scheduler_ip is None:
raise ValueError("must give scheduler or client")
from dask.distributed import Client
if isinstance(scheduler_ip, Client):
client = scheduler_ip
elif isinstance(scheduler_ip, str):
client = Client(f"{scheduler_ip}:8786")
else:
raise TypeError("bad scheduler")
package_name = os.path.basename(directory.rstrip("/").rstrip("\\"))
s = directory_to_targz_string(directory)
return client.run(receive_tar_package, s, package_name)
def new_worker_with_egnyte():
cfg = configure.check_config(
['cluster.worker_name', 'cluster.worker_log', 'cluster.working_dir', 'cluster.scheduler',
'cluster.ncores', 'cluster.ratelimit', 'egnyte.access_token', 'private_pip.python_packages'],
secrets=['egnyte.username', 'egnyte.password', ],
window_title="CLUSTER WORKER CONFIG")
if not cfg['egnyte.access_token']:
token = pe.get_access_token(username=cfg.egnyte.username, password=<PASSWORD>, return_token=True)
cfg['egnyte.access_token'] = token
configure.add('egnyte.access_token', token)
else:
pe.set_access_token(cfg['egnyte.access_token'])
if cfg.private_pip.python_packages:
from .private_pip import pip_install
pip_install(cfg.private_pip.python_packages)
try:
ncores = int(cfg.cluster.ncores)
except:
ncores = None
new_worker(cfg=cfg, gui_loop_callback=None, ncores=ncores)
if __name__ == '__main__':
w = new_worker()
```
#### File: pine/pines/__init__.py
```python
__version__ = '2.94'
import sys, os
def info():
print( f"┌── PINES TOOLKIT {__version__} " + "─"*(57-len(__version__)) )
v = '\n│'.join(sys.version.split('\n'))
print(f"│Python {v}")
print(f"│EXE ─ {sys.executable}")
print(f"│CWD ─ {os.getcwd()}", )
for p in sys.path[:1]:
print(f"│PTH ┬ {p}")
for p in sys.path[1:-1]:
print(f"│ ├ {p}")
for p in sys.path[-1:]:
print(f"│ └ {p}")
print("└───────────────────────────────────────────────────────────────────────────")
class Info:
def __init__(self, appname='Pines Toolkit', extra=True, version=None):
self.appname = appname
self.extra = extra
self.version = version or __version__
def __repr__(self):
r = (f"┌── {self.appname.upper()} {self.version} " + "─" * (57 - len(self.version)))
v = '\n│'.join(sys.version.split('\n'))
r += (f"\n│Python {v}")
r += (f"\n│EXE ─ {sys.executable}")
r += (f"\n│CWD ─ {os.getcwd()}" )
for p in sys.path[:1]:
r += (f"\n│PTH ┬ {p}")
for p in sys.path[1:-1]:
r += (f"\n│ ├ {p}")
for p in sys.path[-1:]:
r += (f"\n│ └ {p}")
r += ("\n└───────────────────────────────────────────────────────────────────────────")
return r
def _repr_html_(self):
from .xhtml import Elem
xsign = Elem("div", {'class': 'larch_head_tag'})
from .img import favicon
p = Elem('p', {'style': 'float:left;margin-top:6px'})
p << Elem('img', {
'width': "32",
'height': "32",
'src': "data:image/png;base64,{}".format(favicon),
'style': 'float:left;position:relative;top:-3px;padding-right:0.2em;'
}, tail=f" {self.appname} ")
p << Elem('span', {'class': 'larch_head_tag_ver'}, text=self.version)
xsign << p
from .img import camsyslogo_element
xsign << camsyslogo_element
if 'larch4' in sys.modules:
from .img import georgiatechlogo_element
xsign << georgiatechlogo_element
if self.extra:
v = '\n│'.join(sys.version.split('\n'))
xsign << Elem('br')
xinfo = Elem('div', {'class': 'larch_head_tag_more', 'style':'margin-top:10px; padding:7px'}, text=f'Python {v}')
xsign << xinfo
xinfo << Elem('br', tail=f"EXE - {sys.executable}")
xinfo << Elem('br', tail=f"CWD - {os.getcwd()}")
xinfo << Elem('br', tail=f"PATH - ")
ul = Elem('ul', {'style': 'margin-top:0; margin-bottom:0;'})
xinfo << ul
for p in sys.path:
ul << Elem('li', text=p)
return xsign.tostring()
def ipython_status(magic_matplotlib=True):
message_set = set()
try:
cfg = get_ipython().config
except:
message_set.add('Not IPython')
else:
import IPython
message_set.add('IPython')
# Caution: cfg is an IPython.config.loader.Config
if cfg['IPKernelApp']:
message_set.add('IPython QtConsole')
try:
if cfg['IPKernelApp']['pylab'] == 'inline':
message_set.add('pylab inline')
else:
message_set.add('pylab loaded but not inline')
except:
message_set.add('pylab not loaded')
elif cfg['TerminalIPythonApp']:
try:
if cfg['TerminalIPythonApp']['pylab'] == 'inline':
message_set.add('pylab inline')
else:
message_set.add('pylab loaded but not inline')
except:
message_set.add('pylab not loaded')
return message_set
_i = Info()
def show():
if 'IPython' in ipython_status():
from IPython.display import display
try:
if 'larch' not in sys.modules and 'larch4' not in sys.modules:
from .styles import stylesheet
stylesheet()
display(_i)
except:
if 'larch' not in sys.modules and 'larch4' not in sys.modules:
print(repr(_i))
jupyter_active = False
else:
jupyter_active = True
else:
jupyter_active = False
if 'larch' not in sys.modules and 'larch4' not in sys.modules:
print(repr(_i))
## most common items here
from .attribute_dict import fdict, quickdot
from .codex import phash
```
#### File: pine/pines/rate_limiter.py
```python
from collections import Iterator
from threading import Lock
import time
import functools
class RateLimiter(Iterator):
"""Iterator that yields a value at most once every 'interval' seconds."""
def __init__(self, interval):
self.lock = Lock()
self.interval = interval
self.next_yield = 0
def __next__(self):
with self.lock:
t = time.monotonic()
if t < self.next_yield:
time.sleep(self.next_yield - t)
t = time.monotonic()
self.next_yield = t + self.interval
_global_rate_limiters = {}
def GlobalRateLimiter(tag, interval=1, wait_now=True):
global _global_rate_limiters
if tag not in _global_rate_limiters:
_global_rate_limiters[tag] = RateLimiter(interval)
if wait_now:
return next(_global_rate_limiters[tag])
class NonBlockingRateLimiter():
def __init__(self, interval):
self.lock = Lock()
self.interval = interval
self.next_greenlight = 0
def __call__(self, fn):
@functools.wraps(fn)
def decorated(*args, **kwargs):
t = time.monotonic()
if t >= self.next_greenlight:
with self.lock:
self.next_greenlight = t + self.interval
fn(*args, **kwargs)
return decorated
```
#### File: pine/pines/repeater.py
```python
import pandas
import itertools
import csv
import io
class call_me():
def __init__(self, func):
self._func = func
def __call__(self, *arg, **kwarg):
return self._func(*arg, **kwarg)
def create_csv_repeat_set(*loopers, filename=None, return_buffer=False):
result = []
for i in itertools.product(*loopers):
x = {}
for j in i:
x.update(j)
result.append(x)
heads = result[0].keys()
if filename is None:
f = io.StringIO()
else:
f = open(filename, 'w')
writer = csv.DictWriter(f, heads)
writer.writeheader()
writer.writerows(result)
if filename is None:
if return_buffer:
f.seek(0)
return f
return f.getvalue()
else:
f.close()
def loop_repeater(func, *loopers, **kwargs):
buffer = create_csv_repeat_set(*loopers, filename=None, return_buffer=True)
return external_repeater(func, buffer, **kwargs)
def external_repeater(func, kwarg_filename, *args, **kwargs):
"""
Use an external CSV file to iterate over keyword args passed to a function.
Parameters
----------
func : callable
This function gets called once for each row of the CSV file
kwarg_filename : str
A csv file containing keywork args (simple data types as read by pandas)
Other Parameters
----------------
args
Positional arguments always passed to `func`
kwargs
Common keyword arguments always passed to `func`
Returns
-------
list
A list containing the return value of `func` for each row
of the csv file.
"""
result = []
df = pandas.read_csv(kwarg_filename)
direct_kw = {}
indirect_kw = {}
for k,v in kwargs.items():
if isinstance(v,call_me):
indirect_kw[k] = v
else:
direct_kw[k] = v
for row in df.iterrows():
local_kwargs = row[1].to_dict()
indirect_kwargs = {k:v() for k,v in indirect_kw.items()}
result.append(func(*args, **direct_kw, **indirect_kwargs, **local_kwargs))
return result
if __name__=='__main__':
from pprint import pprint
f = lambda *a, **k: str(a)+"|"+str(k)
iter = 1
def hh():
global iter
iter += 1
return iter
t = external_repeater(f,'test/random_csv.csv', 12,13,14,fat='FAT', hhat=call_me(hh))
for i in t:
pprint(i)
```
#### File: pine/pines/smartread.py
```python
import gzip, os, struct, zipfile, io
class SmartFileReader(object):
def __init__(self, file, *args, **kwargs):
if file[-3:]=='.gz':
with open(file, 'rb') as f:
f.seek(-4, 2)
self._filesize = struct.unpack('I', f.read(4))[0]
self.file = gzip.open(file, *args, **kwargs)
elif file[-4:]=='.zip':
zf = zipfile.ZipFile(file, 'r')
zf_info = zf.infolist()
if len(zf_info)!=1:
raise TypeError("zip archive files must contain a single member file for SmartFileReader")
zf_info = zf_info[0]
self.file = zf.open(zf_info.filename, 'r', *args, **kwargs)
self._filesize = zf_info.file_size
else:
self.file = open(file, 'rt', *args, **kwargs)
self._filesize = os.fstat(self.file.fileno()).st_size
def __getattr__(self, name):
return getattr(self.file, name)
def __setattr__(self, name, value):
if name in ['file', 'percentread', '_filesize']:
return object.__setattr__(self, name, value)
return setattr(self.file, name, value)
def __delattr__(self, name):
return delattr(self.file, name)
def percentread(self):
try:
return (float(self.file.tell())/float(self._filesize)*100)
except io.UnsupportedOperation:
return 1.0-(float(self.file._left)/float(self._filesize)*100)
def __iter__(self):
return self.file.__iter__()
def bytesread(self):
try:
b = float(self.file.tell())
except:
return "error in bytesread"
labels = ['B','KB','MB','GB','TB']
scale = 0
while scale < 4 and b > 1024:
b /= 1024
scale += 1
return "{:.2f}{}".format(b,labels[scale])
```
#### File: pine/pines/stats.py
```python
import scipy.stats
import numpy
def beta_pert( x_min, x_mode, x_max, lamb= 4, mode_as_fraction=None ):
"""
Beta-PERT
To transform a [0,1] random uniform `x` to a beta-PERT random,
use beta_pert(*arg).ppf(x)
Parameters
----------
x_min, x_mode, x_max : float
The min, mode, and max for the beta-pert distribution
lamb : float
The pert shape modifier
mode_as_fraction : float, optional
The mode is replaced with the fraction of the distance from the min to the max.
Returns
-------
rv_frozen
"""
if mode_as_fraction is not None:
x_mode = x_min + mode_as_fraction*(x_max-x_min)
if ( x_min > x_max or x_mode > x_max or x_mode < x_min ):
raise ValueError( "invalid parameters" )
x_range = x_max - x_min
if ( x_range == 0 ):
return numpy.full_like(q, fill_value=x_min)
mu = ( x_min + x_max + lamb * x_mode ) / ( lamb + 2 )
# special case if mu == mode
if ( mu == x_mode ):
v = ( lamb / 2 ) + 1
else:
v = (( mu - x_min ) * ( 2 * x_mode - x_min - x_max )) / (( x_mode - mu ) * ( x_max - x_min ))
w = ( v * ( x_max - mu )) / ( mu - x_min )
return scipy.stats.beta( v, w, loc=x_min, scale=x_range )
def triangular( x_min, x_mode, x_max, mode_as_fraction=None ):
if mode_as_fraction is not None:
x_mode = x_min + mode_as_fraction*(x_max-x_min)
if ( x_min > x_max or x_mode > x_max or x_mode < x_min ):
raise ValueError( "invalid parameters" )
scale = x_max - x_min
if scale==0:
peak = x_mode
else:
peak = (x_mode-x_min)/scale
return scipy.stats.triang( peak, loc=x_min, scale=scale )
def uniform( x_min, x_max ):
if ( x_min > x_max ):
raise ValueError( "invalid parameters" )
scale = x_max - x_min
return scipy.stats.uniform( loc=x_min, scale=scale )
def binary( p ):
if (p < 0) or (p > 1):
raise ValueError( "invalid parameters" )
return scipy.stats.binom( n=1, p=p )
def _mod_linspace(start, stop, num=50, dtype=None):
y, step = numpy.linspace(start, stop, num=num, endpoint=False, retstep=True, dtype=dtype)
y += step/2
return y
def prod_two_dists_ppf_approx(dist1, dist2, q, draws=500):
x = _mod_linspace(0,1,draws)
x1 = dist1.ppf(x)
x2 = dist2.ppf(x)
x1x2 = numpy.outer(x1,x2).flatten()
return numpy.percentile(x1x2,q*100)
def sum_two_dists_ppf_approx(dist1, dist2, q, draws=500):
x = _mod_linspace(0,1,draws)
x1 = dist1.ppf(x)
x2 = dist2.ppf(x)
x1x2 = numpy.zeros([draws,draws])
x1x2 += x1[:,None]
x1x2 += x2[None,:]
return numpy.percentile(x1x2,q*100)
def prod_two_triangular_ppf_approx(q, x1_min, x1_mode, x1_max, x2_min, x2_mode, x2_max):
x = numpy.linspace(0,1,500)
x1 = triangular( x1_min, x1_mode, x1_max ).ppf(x)
x2 = triangular( x2_min, x2_mode, x2_max ).ppf(x)
x1x2 = numpy.outer(x1,x2).flatten()
return numpy.percentile(x1x2,q*100)
def quick_linear_regression(X, y, log=None):
import statsmodels.api as sm
import pandas
Xc = sm.add_constant(X)
m = sm.OLS(y, Xc, hasconst=True)
statsmodel_results = m.fit()
if log is not None:
log(statsmodel_results.summary())
sm_df = pandas.concat((statsmodel_results.params,
statsmodel_results.bse,
statsmodel_results.tvalues,
statsmodel_results.pvalues,
statsmodel_results.conf_int()), axis=1)
sm_df.columns = ['coef', 'std err', 't', 'P>|t|', '[0.025', '0.975]']
return sm_df
```
#### File: pine/pines/timesize.py
```python
def timesize(t):
if t<60:
return f"{t:.2f}s"
elif t<3600:
return f"{t/60:.2f}m"
elif t<86400:
return f"{t/3600:.2f}h"
else:
return f"{t/86400:.2f}d"
```
#### File: pine/pines/xdrive.py
```python
import os.path
import re
import time
import hashlib
import pickle
import io, gzip
import shutil
import json
import fnmatch
import glob
import sys
from .logger import flogger
from .bytesize import bytes_scaled
from .codex import phash
from .configure import load as load_config
elog = flogger(label='XDRIVE')
## init
_updates = False
config = load_config()
if 'root_dir' in config.xdrive:
ROOT = config.xdrive.root_dir
else:
ROOT = "X:"
class Folder():
def __init__(self, path=None):
self.path = os.path.join(ROOT, path)
_, self.folders, self.files = next(os.walk(self.path, topdown=True))
self.is_folder = True
def folder(self, f):
return Folder(os.path.join(self.path, f))
def file(self, f, **kwargs):
return File(os.path.join(self.path, f))
def create(self, *args):
os.makedirs(self.path, exist_ok=True)
class File():
def __init__(self, path=None):
self.path = os.path.join(ROOT, path)
self.is_folder = False
def upload(self, in_stream):
os.makedirs(os.path.dirname(self.path),exist_ok=True)
with open(self.path, mode='wb') as f:
shutil.copyfileobj(in_stream, f)
def download(self, out_stream):
with open(self.path, mode='rb') as f:
shutil.copyfileobj(f, out_stream)
@property
def size(self):
return os.path.getsize(self.path)
@property
def checksum(self):
return _sha512_checksum(self.path)
def FileOrFolder(path):
if os.path.isdir(path):
return Folder(path)
else:
return File(path)
def _folder_to_path(f):
if isinstance(f,Folder):
return f.path
if isinstance(f,File):
return f.path
return f
def pth(*arg):
return "/".join(_folder_to_path(f) for f in arg).replace('//','/').replace('\\','/')
def create_folder(folder_path, retries=10, interval=1):
"""
Create a new folder within Egnyte.
:param folder_path:
:return: egnyte.resources.Folder
"""
if not os.path.exists(folder_path):
os.makedirs(folder_path, exist_ok=True)
return Folder(folder_path)
def create_subfolder(folder, subfoldername):
f = pth(folder,subfoldername)
os.makedirs(f, exist_ok=True)
return Folder(f)
def upload_file(local_file, xdrive_path, rename=None, add_suffix=None):
if rename is None:
basename = os.path.basename(local_file)
else:
basename = rename
if add_suffix:
basename = "{1}{0}{2}".format(add_suffix, *os.path.splitext(basename))
file_obj = File( pth(xdrive_path,basename) )
with open(local_file, "rb") as fp:
file_obj.upload(fp)
return
def upload_file_gz(local_file, egnyte_path, progress_callbacks=None):
if progress_callbacks is None:
progress_callbacks = ProgressCallbacks()
basename = os.path.basename(local_file)+'.gz'
file_obj = File(pth(egnyte_path, basename))
buffer = io.BytesIO()
with open(local_file, 'rb') as f_in:
with gzip.open(buffer, 'wb') as buffer_out:
shutil.copyfileobj(f_in, buffer_out)
progress_callbacks.upload_start(local_file, file_obj, buffer.tell())
buffer.seek(0)
file_obj.upload(buffer)
progress_callbacks.upload_finish(file_obj)
def upload_dict_json(dictionary, filename, egnyte_path, progress_callbacks=None):
"""
Parameters
----------
dictionary : dict
The dictionary to convert to json and upload to egnyte
filename : str
A filename for the file that will be created in egnyte
egnyte_path : str
The (existing) folder in egnyte where the file will be created
progress_callbacks
"""
if progress_callbacks is None:
progress_callbacks = ProgressCallbacks()
basename = os.path.basename(filename)
if basename[-5:] != '.json':
basename += '.json'
file_obj = File(pth(egnyte_path, basename))
buffer = io.BytesIO(json.dumps(dictionary).encode('UTF-8'))
progress_callbacks.upload_start("dictionary", file_obj, buffer.tell())
file_obj.upload(buffer)
progress_callbacks.upload_finish(file_obj)
def download_file(egnyte_file, local_path, overwrite=False, mkdir=True, progress_callbacks=None):
if not os.path.exists(local_path) and mkdir:
os.makedirs(local_path)
bulk_download([egnyte_file], local_path, overwrite=overwrite, log=(progress_callbacks is not None))
def download_file_gz(egnyte_file, local_path, overwrite=False, mkdir=True, progress_callbacks=None, retries=10, interval=1):
if progress_callbacks is None:
progress_callbacks = ProgressCallbacks()
if not os.path.exists(local_path) and mkdir:
os.makedirs(local_path)
if isinstance(egnyte_file, str) and egnyte_file[-3:] != '.gz':
egnyte_file = egnyte_file+'.gz'
basename = os.path.basename(egnyte_file)[:-3]
if not overwrite and os.path.exists(os.path.join(local_path, basename)):
raise FileExistsError(os.path.join(local_path, basename))
file_obj = File(pth(egnyte_file))
buffer = io.BytesIO()
progress_callbacks.download_start(local_path, file_obj, file_obj.size)
file_obj.download(buffer)
buffer.seek(0)
with gzip.open(buffer, 'rb') as buffer_in:
with open(os.path.join(local_path, basename), 'wb') as f_out:
shutil.copyfileobj(buffer_in, f_out)
progress_callbacks.download_finish(file_obj)
from .zipdir import verify_hash_file
if os.path.exists(egnyte_file[:-3] + ".sha256.txt"):
verify_hash_file(os.path.join(local_path, basename), hash_dir=os.path.dirname(egnyte_file))
def download_dict_json(egnyte_file, progress_callbacks=None, retries=10, interval=1):
"""
Parameters
----------
egnyte_file : str
The location in egnyte for the json file to be loaded.
progress_callbacks
Returns
-------
dict
"""
if progress_callbacks is None:
progress_callbacks = ProgressCallbacks()
import json, io
if isinstance(egnyte_file, str) and egnyte_file[-5:] != '.json':
egnyte_file = egnyte_file+'.json'
file_obj = File(pth(egnyte_file))
buffer = io.BytesIO()
progress_callbacks.download_start('dictionary', file_obj, file_obj.size)
file_obj.download(buffer)
buffer.seek(0)
result = json.loads(buffer.getvalue().decode('UTF-8'))
progress_callbacks.download_finish(file_obj)
return result
class ProgressCallbacks():
"""
This object is used for bulk transfers (uploads and downloads)
Inherit this and add override any of the callabcks you'd like to handle.
"""
def getting_info(self, cloud_path):
"""Getting information about an object. Called for directories and unknown paths."""
elog("getting info on {}".format(cloud_path))
def got_info(self, cloud_obj):
"""Got information about an object."""
def creating_directory(self, cloud_folder):
"""Creating a directory."""
elog("creating directory {}".format(cloud_folder))
def download_start(self, local_path, cloud_file, size):
"""Starting to download a file."""
elog("downloading {1} ({2})".format(local_path, cloud_file.path, bytes_scaled(size)))
def download_progress(self, cloud_file, size, downloaded):
"""Some progress in file download."""
def download_finish(self, cloud_file):
"""Finished downloading a file."""
def upload_start(self, local_path, cloud_file, size):
"""Starting to upload a file."""
elog("uploading {1} ({2})".format(local_path, cloud_file.path, bytes_scaled(size)))
def upload_progress(self, cloud_file, size, uploaded):
"""Some progress in file upload."""
def upload_finish(self, cloud_file):
"""Finished uploading a file."""
def finished(self):
"""Called after all operations."""
elog("finished")
def skipped(self, cloud_obj, reason):
"""Object has been skipped because of 'reason'"""
elog("skipped {} ({})".format(cloud_obj, reason))
DEFAULT_EXCLUDES = fnmatch.translate(".*")
DEFAULT_EXCLUDES_RE = re.compile(DEFAULT_EXCLUDES).match
def make_excluded(excludes=None):
if excludes is None:
return DEFAULT_EXCLUDES_RE
patterns = [DEFAULT_EXCLUDES]
patterns.extend(fnmatch.translate(x) for x in excludes)
return re.compile("|".join(patterns)).match
def generate_paths(roots, excludes=None):
"""
Walk set of paths in local filesystem, and for each file and directory generate a tuple of
(is directory, absolute path, path relative root used to get to that file)
"""
excluded = make_excluded(excludes)
for root in roots:
base = os.path.basename(root)
if not excluded(base):
is_dir = os.path.isdir(root)
yield is_dir, root, base
if is_dir:
prefix_len = len(os.path.dirname(root))
for dirpath, dirnames, filenames in os.walk(root, topdown=True, followlinks=True):
relpath = dirpath[prefix_len:].strip('/')
for is_dir, names in ((False, filenames), (True, dirnames)):
for name in names:
if not excluded(name):
yield is_dir, os.path.join(dirpath, name), "%s/%s" % (relpath, name)
def bulk_upload(local_dir, xdrive_path, exclude=None, progress_callbacks=None):
"""
Transfer many files or directories to Cloud File System.
* paths - list of local file paths
* target - Path in CFS to upload to
* progress_callbacks - Callback object (see ProgressCallbacks)
"""
if not local_dir:
return
if progress_callbacks is None:
progress_callbacks = ProgressCallbacks() # no-op callbacks
target_folder = Folder(xdrive_path)
progress_callbacks.creating_directory(target_folder)
target_folder.create(True)
for is_dir, local_path, cloud_path in generate_paths(local_dir, exclude):
if is_dir:
cloud_dir = target_folder.folder(cloud_path)
progress_callbacks.creating_directory(cloud_dir)
cloud_dir.create(True)
else:
size = os.path.getsize(local_path)
if size: # empty files cannot be uploaded
cloud_file = target_folder.file(cloud_path, size=size)
with open(local_path, "rb") as fp:
progress_callbacks.upload_start(local_path, cloud_file, size)
cloud_file.upload(fp)
progress_callbacks.upload_finish(cloud_file)
progress_callbacks.finished()
def _sha512_checksum(filename, block_size=65536):
sha512 = hashlib.sha512()
with open(filename, 'rb') as f:
for block in iter(lambda: f.read(block_size), b''):
sha512.update(block)
return sha512.hexdigest()
def _pines_bulk_download_worker(items, root_path, local_dir, overwrite, progress_callbacks):
import collections, shutil
any_updates = False
root_len = len(root_path.rstrip('/')) + 1
queue = collections.deque(items)
while True:
try:
obj = queue.popleft()
except IndexError:
break
relpath = obj.path[root_len:].strip('/')
local_path = os.path.join(local_dir, relpath.replace('/', os.sep))
dir_path = os.path.dirname(local_path)
if not os.path.isdir(dir_path):
if os.path.exists(dir_path):
if overwrite:
os.unlink(local_path)
else:
progress_callbacks.skipped(obj, "Existing file conflicts with cloud folder")
continue
os.makedirs(dir_path)
if obj.is_folder:
# schedule contents for later, files first
if obj.files is None:
progress_callbacks.getting_info(obj.path)
obj.list()
progress_callbacks.got_info(obj)
queue.extend(obj.files)
queue.extend(obj.folders)
else:
if os.path.exists(local_path):
if overwrite:
# read local checksum
if _sha512_checksum(local_path) != obj.checksum:
if os.path.isdir(local_path) and not os.path.islink(local_path):
shutil.rmtree(local_path)
else:
os.unlink(local_path)
else:
continue
else:
progress_callbacks.skipped(obj, "Existing file conflicts with cloud file")
continue
progress_callbacks.download_start(local_path, obj, obj.size)
obj.download(local_path)
any_updates = True
progress_callbacks.download_finish(obj)
return any_updates
def _pines_bulk_download( paths, local_dir, overwrite=False, progress_callbacks=None):
"""
Transfer many files or directories to Cloud File System.
* paths - list of local file paths
* target - Path in CFS to upload to
* progress_callbacks - Callback object (see ProgressCallbacks)
"""
any_updates = False
if progress_callbacks is None:
progress_callbacks = ProgressCallbacks()
for path in paths:
progress_callbacks.getting_info(path)
obj = FileOrFolder(path)
root_path = path[:path.rstrip('/').rfind('/')] # take all segments expect last one
if obj.is_folder:
items = obj.files + obj.folders
else:
items = (obj,)
any_updates = _pines_bulk_download_worker(items, root_path, local_dir, overwrite, progress_callbacks)
progress_callbacks.finished()
return any_updates
def bulk_download( egnyte_path, local_dir, log=True, overwrite=False, progress_callbacks=None ):
p_callbacks = progress_callbacks or (ProgressCallbacks() if log else None)
if isinstance(egnyte_path, str):
return _pines_bulk_download([egnyte_path], local_dir, overwrite=overwrite, progress_callbacks=p_callbacks)
else:
return _pines_bulk_download(egnyte_path, local_dir, overwrite=overwrite, progress_callbacks=p_callbacks)
def import_remote_python_package( egnyte_path, package_name=None, log=True ):
if package_name is None:
if egnyte_path[-1] in ('/','\\'):
package_name = os.path.basename(egnyte_path[:-1])
else:
package_name = os.path.basename(egnyte_path[:])
import sys, importlib
from .temporary import TemporaryDirectory
tempdir = TemporaryDirectory()
any_updates = bulk_download([egnyte_path], tempdir.name, overwrite=True, log=log)
if tempdir.name not in sys.path:
sys.path.insert(0, tempdir.name)
importlib.invalidate_caches()
if package_name in sys.modules:
if any_updates:
return importlib.reload(sys.modules[package_name])
else:
return sys.modules[package_name]
else:
return importlib.import_module(package_name)
# from pines.egnyte import import_remote_python_package
# import_remote_python_package('/Private/jnewman/PyAccess/werter', 'werter')
def glob_upload_gz(pattern, egnyte_path, log=True, dryrun=False):
"""
Upload a gzipped version of all files matching pattern into egynte.
Parameters
----------
pattern : str
A glob pattern
egnyte_path : str or egnyte.Folder
log : bool, default True
Log the results
dryrun : bool, default False
If true, just log what would be done, don't actually upload the files.
"""
for filename in glob.glob(pattern):
if log:
elog(f"found file for upload:{filename}")
if not dryrun:
upload_file_gz(filename, egnyte_path, progress_callbacks=ProgressCallbacks() if log else None)
def pip_install_1(xdrive_python_package_file):
import pip
pip.main(['install', xdrive_python_package_file])
def pip_install(package_names=None, xdrive_repo="X:/Share/CHI/Shared/JPN/PythonRepo/simple/"):
import pip
if package_names is None:
if len(sys.argv)>0 and (('pines_pip' in sys.argv[0]) or ('pines-pip' in sys.argv[0])):
if len(sys.argv)>1 and sys.argv[1]=='install': # ignore install command, it is implied here
package_names = " ".join(sys.argv[2:])
else:
package_names = " ".join(sys.argv[1:])
try:
pkgs = package_names.split()
except AttributeError:
print("NO PACKAGES GIVEN")
else:
for pkg in pkgs:
result = pip.main(["install", "--upgrade", f'--index-url=file:///{xdrive_repo}', pkg])
if result!=0:
# failure
raise ModuleNotFoundError(pkg)
def _pip_install_entry(args=None):
return pip_install()
def pip_rebuild(xdrive_repo="X:/Share/CHI/Shared/JPN/PythonRepo", private_repo=r"\\camtdm01\c$\Apache24\htdocs"):
import libpip2pi.commands
libpip2pi.commands.dir2pi(argv=["dir2pi",xdrive_repo, '-S'])
import shutil, os
shutil.copytree(os.path.join(xdrive_repo, 'simple'), private_repo)
```
#### File: pine/pines/zipdir.py
```python
import os
import zipfile
import hashlib
def _rec_split(s):
rest, tail = os.path.split(s)
if rest in ('', os.path.sep):
return tail,
return _rec_split(rest) + (tail,)
def _any_dot(s):
for i in _rec_split(s):
if len(i)>0 and i[0]=='.':
return True
return False
def _zipdir(path, ziph, skip_dots=True, extra_layer=True):
# ziph is zipfile handle
keep_dots = not skip_dots
for root, dirs, files in os.walk(path):
folder = os.path.basename(root)
if keep_dots or not _any_dot(folder):
print('zipping folder:', folder, "in", root)
for file in files:
if keep_dots or not _any_dot(file):
ziph.write(os.path.join(root, file), os.path.relpath(os.path.join(root, file), os.path.join(path, '..' if extra_layer else '.')))
else:
print('not zipping folder:', folder, "in", root)
def zipdir(source_dir, zip_file_name=None, skip_dots=True, extra_layer=False):
"""
Parameters
----------
source_dir
zip_file_name : str
If not given, uses the name of the sourcedir.
skip_dots : bool, defaults True
Ignore files and dirs that start with a dot.
Returns
-------
str
zip_file_name
"""
if zip_file_name is None:
if source_dir[-1] in ('/', '\\'):
usepath = source_dir[:-1]
else:
usepath = source_dir
zip_file_name = usepath + '.zip'
with zipfile.ZipFile(zip_file_name, 'w', zipfile.ZIP_DEFLATED) as zipf:
_zipdir(source_dir, zipf, skip_dots=skip_dots, extra_layer=extra_layer)
return zip_file_name
def zipmod(module, zip_file_name, skip_dots=True):
"""
Create a zipfile from a module
Parameters
----------
module
zip_file_name
skip_dots
Returns
-------
"""
with zipfile.ZipFile(zip_file_name, 'w', zipfile.ZIP_DEFLATED) as zipf:
_zipdir(module.__path__[0], zipf, skip_dots=skip_dots)
def zipmod_temp(module, skip_dots=True):
import tempfile
tempdir = tempfile.TemporaryDirectory()
zip_file_name = os.path.join(tempdir.name, module.__name__+".zip")
zipmod(module, zip_file_name, skip_dots=skip_dots)
return zip_file_name, tempdir
def make_hash_file(fname):
hash256 = hashlib.sha256()
if fname[-3:]=='.gz':
import gzip
with gzip.open(fname, "rb") as f:
for chunk in iter(lambda: f.read(4096), b""):
hash256.update(chunk)
h = hash256.hexdigest()
with open(fname[:-3] + ".sha256.txt", "w") as fh:
fh.write(h)
else:
with open(fname, "rb") as f:
for chunk in iter(lambda: f.read(4096), b""):
hash256.update(chunk)
h = hash256.hexdigest()
with open( fname+".sha256.txt" , "w") as fh:
fh.write(h)
def verify_hash_file(fname, hash_dir=None, max_retries=5):
hash256 = hashlib.sha256()
retries = 0
while retries < max_retries:
try:
with open(fname, "rb") as f:
for chunk in iter(lambda: f.read(4096), b""):
hash256.update(chunk)
except PermissionError:
import time
time.sleep(5)
retries += 1
except:
raise
else:
break
h = hash256.hexdigest()
if hash_dir is None:
with open( fname+".sha256.txt" , "r") as fh:
h_x = fh.read()
else:
with open( os.path.join(hash_dir, os.path.basename(fname)+".sha256.txt" ) , "r") as fh:
h_x = fh.read()
if h != h_x:
if hash_dir:
raise ValueError(f"bad hash on {fname} with hash_dir={hash_dir}")
else:
raise ValueError(f"bad hash on {fname}")
def gzip_dir(source_dir, pattern="*.*", make_hash=True, exclude=".sha256.txt"):
"""Individually gzip every file matching pattern in source_dir."""
import gzip, glob
import shutil, os
for f in glob.glob(os.path.join(source_dir, pattern)):
if exclude in f:
continue # don't re-gzip the hash files by default
if make_hash:
make_hash_file(f)
if f[-3:]!='.gz':
with open(f, 'rb') as f_in:
with gzip.open(f + '.gz', 'wb') as f_out:
shutil.copyfileobj(f_in, f_out)
os.remove(f)
``` |
{
"source": "jpn--/sharrow",
"score": 3
} |
#### File: sharrow/sharrow/example_data.py
```python
import os
import numpy as np
import pandas as pd
def get_skims():
import openmatrix
from . import dataset
zfilename = os.path.join(os.path.dirname(__file__), "example_data", "skims.zarr")
if os.path.exists(zfilename):
skims = dataset.from_zarr(zfilename, consolidated=False)
else:
filename = os.path.join(os.path.dirname(__file__), "example_data", "skims.omx")
with openmatrix.open_file(filename) as f:
skims = dataset.from_omx_3d(
f,
index_names=("otaz", "dtaz", "time_period"),
indexes=None,
time_periods=["EA", "AM", "MD", "PM", "EV"],
time_period_sep="__",
max_float_precision=32,
).compute()
skims.to_zarr(zfilename)
return skims
def get_households():
filename = os.path.join(
os.path.dirname(__file__), "example_data", "households.csv.gz"
)
return pd.read_csv(filename, index_col="HHID")
def get_persons():
filename = os.path.join(os.path.dirname(__file__), "example_data", "persons.csv.gz")
return pd.read_csv(filename, index_col="PERID")
def get_land_use():
filename = os.path.join(
os.path.dirname(__file__), "example_data", "land_use.csv.gz"
)
return pd.read_csv(filename, index_col="TAZ")
def get_data():
result = {
"hhs": get_households(),
"persons": get_persons(),
"land_use": get_land_use(),
"skims": get_skims(),
}
try:
from addicty import Dict
except ImportError:
pass
else:
result = Dict(result)
result.freeze()
return result
def get_tour_mode_choice_spec(purpose="work"):
filename = os.path.join(
os.path.dirname(__file__), "example_data", "tour_mode_choice_spec.csv"
)
coeffs_filename = os.path.join(
os.path.dirname(__file__), "example_data", "tour_mode_choice_coefs.csv"
)
coeffs_template_filename = os.path.join(
os.path.dirname(__file__), "example_data", "tour_mode_choice_coef_template.csv"
)
spec = pd.read_csv(filename, comment="#")
coefs = pd.read_csv(coeffs_filename, index_col="coefficient_name", comment="#")
template = pd.read_csv(
coeffs_template_filename, index_col="coefficient_name", comment="#"
)
spec_numeric = (
spec.iloc[:, 3:]
.applymap(lambda i: template[purpose].get(i, i))
.applymap(lambda i: coefs.value.get(i, i))
.astype(np.float32)
.fillna(0)
)
return pd.concat([spec.iloc[:, :3], spec_numeric], axis=1)
```
#### File: sharrow/sharrow/relationships.py
```python
import ast
import logging
import networkx as nx
import numpy as np
import pandas as pd
import xarray as xr
from .dataset import Dataset, construct
try:
from ast import unparse
except ImportError:
from astunparse import unparse as _unparse
unparse = lambda *args: _unparse(*args).strip("\n")
logger = logging.getLogger("sharrow")
well_known_names = {
"nb",
"np",
"pd",
"xr",
"pa",
"log",
"exp",
"log1p",
"expm1",
"max",
"min",
"piece",
"hard_sigmoid",
"transpose_leading",
"clip",
}
def _require_string(x):
if not isinstance(x, str):
raise ValueError("must be string")
return x
def _iat(source, *, _names=None, _load=False, _index_name=None, **idxs):
loaders = {}
if _index_name is None:
_index_name = "index"
for k, v in idxs.items():
if v.ndim == 1:
loaders[k] = xr.DataArray(v, dims=[_index_name])
else:
loaders[k] = xr.DataArray(
v, dims=[f"{_index_name}{n}" for n in range(v.ndim)]
)
if _names:
ds = source[_names]
else:
ds = source
if _load:
ds = ds._load()
return ds.isel(**loaders)
def _at(source, *, _names=None, _load=False, _index_name=None, **idxs):
loaders = {}
if _index_name is None:
_index_name = "index"
for k, v in idxs.items():
if v.ndim == 1:
loaders[k] = xr.DataArray(v, dims=[_index_name])
else:
loaders[k] = xr.DataArray(
v, dims=[f"{_index_name}{n}" for n in range(v.ndim)]
)
if _names:
ds = source[_names]
else:
ds = source
if _load:
ds = ds._load()
return ds.sel(**loaders)
def gather(source, indexes):
"""
Extract values by label on the coordinates indicated by columns of a DataFrame.
Parameters
----------
source : xarray.DataArray or xarray.Dataset
The source of the values to extract.
indexes : Mapping[str, array-like]
The keys of `indexes` (if given as a dataframe, the column names)
should match the named dimensions of `source`. The resulting extracted
data will have a shape one row per row of `df`, and columns matching
the data variables in `source`, and each value is looked up by the labels.
Returns
-------
pd.DataFrame
"""
result = _at(source, **indexes).reset_coords(drop=True)
return result
def igather(source, positions):
"""
Extract values by position on the coordinates indicated by columns of a DataFrame.
Parameters
----------
source : xarray.DataArray or xarray.Dataset
positions : pd.DataFrame or Mapping[str, array-like]
The columns (or keys) of `df` should match the named dimensions of
this Dataset. The resulting extracted DataFrame will have one row
per row of `df`, columns matching the data variables in this dataset,
and each value is looked up by the positions.
Returns
-------
pd.DataFrame
"""
result = _iat(source, **positions).reset_coords(drop=True)
return result
def xgather(source, positions, indexes):
if len(indexes) == 0:
return igather(source, positions)
elif len(positions) == 0:
return gather(source, indexes)
else:
return gather(igather(source, positions), indexes)
class Relationship:
"""
Defines a linkage between datasets in a `DataTree`.
"""
def __init__(
self,
parent_data,
parent_name,
child_data,
child_name,
indexing="label",
analog=None,
):
self.parent_data = _require_string(parent_data)
"""str: Name of the parent dataset."""
self.parent_name = _require_string(parent_name)
"""str: Variable in the parent dataset that references the child dimension."""
self.child_data = _require_string(child_data)
"""str: Name of the child dataset."""
self.child_name = _require_string(child_name)
"""str: Dimension in the child dataset that is used by this relationship."""
if indexing not in {"label", "position"}:
raise ValueError("indexing must be by label or position")
self.indexing = indexing
"""str: How the target dimension is used, either by 'label' or 'position'."""
self.analog = analog
"""str: Original variable that defined label-based relationship before digitization."""
def __eq__(self, other):
if isinstance(other, self.__class__):
return repr(self) == repr(other)
def __repr__(self):
return f"<Relationship by {self.indexing}: {self.parent_data}[{self.parent_name!r}] -> {self.child_data}[{self.child_name!r}]>"
def attrs(self):
return dict(
parent_name=self.parent_name,
child_name=self.child_name,
indexing=self.indexing,
)
@classmethod
def from_string(cls, s):
"""
Construct a `Relationship` from a string.
Parameters
----------
s : str
The relationship definition.
To create a label-based relationship, the string should look like
"ParentNode.variable_name @ ChildNode.dimension_name". To create
a position-based relationship, give
"ParentNode.variable_name -> ChildNode.dimension_name".
Returns
-------
Relationship
"""
if "->" in s:
parent, child = s.split("->", 1)
i = "position"
elif "@":
parent, child = s.split("@", 1)
i = "label"
p1, p2 = parent.split(".", 1)
c1, c2 = child.split(".", 1)
p1 = p1.strip()
p2 = p2.strip()
c1 = c1.strip()
c2 = c2.strip()
return cls(
parent_data=p1,
parent_name=p2,
child_data=c1,
child_name=c2,
indexing=i,
)
class DataTree:
"""
A tree representing linked datasets, from which data can flow.
Parameters
----------
graph : networkx.MultiDiGraph
root_node_name : str
The name of the node at the root of the tree.
extra_funcs : Tuple[Callable]
Additional functions that can be called by Flow objects created
using this DataTree. These functions should have defined `__name__`
attributes, so they can be called in expressions.
extra_vars : Mapping[str,Any], optional
Additional named constants that can be referenced by expressions in
Flow objects created using this DataTree.
cache_dir : Path-like, optional
The default directory where Flow objects are created.
relationships : Iterable[str or Relationship]
The relationship definitions used to define this tree. All dataset
nodes named in these relationships should also be included as
keyword arguments for this constructor.
force_digitization : bool, default False
Whether to automatically digitize all relationships (converting them
from label-based to position-based). Digitization is required to
evaluate Flows, but doing so automatically on construction may be
inefficient.
dim_order : Tuple[str], optional
The order of dimensions to use in Flow outputs. Generally only needed
if there are multiple dimensions in the root dataset.
"""
DatasetType = Dataset
def __init__(
self,
graph=None,
root_node_name=None,
extra_funcs=(),
extra_vars=None,
cache_dir=None,
relationships=(),
force_digitization=False,
dim_order=None,
**kwargs,
):
if isinstance(graph, Dataset):
raise ValueError("datasets must be given as keyword arguments")
# raw init
if graph is None:
graph = nx.MultiDiGraph()
self._graph = graph
self._root_node_name = None
self.force_digitization = force_digitization
self.dim_order = dim_order
self.dim_exclude = set()
# defined init
if root_node_name is not None and root_node_name in kwargs:
self.add_dataset(root_node_name, kwargs[root_node_name])
self.root_node_name = root_node_name
self.extra_funcs = extra_funcs
self.extra_vars = extra_vars or {}
self.cache_dir = cache_dir
self._cached_indexes = {}
for k, v in kwargs.items():
if root_node_name is not None and k == root_node_name:
continue
self.add_dataset(k, v)
for r in relationships:
self.add_relationship(r)
if force_digitization:
self.digitize_relationships(inplace=True)
# These filters are applied to incoming datasets when using `replace_datasets`.
self.replacement_filters = {}
"""Dict[Str,Callable]: Filters that are automatically applied to data on replacement.
When individual datasets are replaced in the tree, the incoming dataset is
passed through the filter with a matching name-key (if it exists). The filter
should be a function that accepts one argument (the incoming dataset) and returns
one value (the dataset to save in the tree). These filters can be used to ensure
data quality, e.g. renaming variables, ensuring particular data types, etc.
"""
self.subspace_fallbacks = {}
"""Dict[Str:List[Str]]: Allowable fallback subspace lookups.
When a named variable is not found in a given subspace, the default result is
raising a KeyError. But, if fallbacks are defined for a given subspace, the
fallbacks are searched in order for the desired variable.
"""
@property
def shape(self):
"""Tuple[int]: base shape of arrays that will be loaded when using this DataTree."""
if self.dim_order:
dim_order = self.dim_order
else:
from .flows import presorted
dim_order = presorted(self.root_dataset.dims, self.dim_order)
return tuple(
self.root_dataset.dims[i] for i in dim_order if i not in self.dim_exclude
)
def __shallow_copy_extras(self):
return dict(
extra_funcs=self.extra_funcs,
extra_vars=self.extra_vars,
cache_dir=self.cache_dir,
force_digitization=self.force_digitization,
)
def __repr__(self):
s = f"<{self.__module__}.{self.__class__.__name__}>"
if len(self._graph.nodes):
s += "\n datasets:"
if self.root_node_name:
s += f"\n - {self.root_node_name}"
for k in self._graph.nodes:
if k == self.root_node_name:
continue
s += f"\n - {k}"
else:
s += "\n datasets: none"
if len(self._graph.edges):
s += "\n relationships:"
for e in self._graph.edges:
s += f"\n - {self._get_relationship(e)!r}".replace(
"<Relationship ", ""
).rstrip(">")
else:
s += "\n relationships: none"
return s
def _hash_features(self):
h = []
if len(self._graph.nodes):
if self.root_node_name:
h.append(f"dataset:{self.root_node_name}")
for k in self._graph.nodes:
if k == self.root_node_name:
continue
h.append(f"dataset:{k}")
else:
h.append("datasets:none")
if len(self._graph.edges):
for e in self._graph.edges:
r = f"relationship:{self._get_relationship(e)!r}".replace(
"<Relationship ", ""
).rstrip(">")
h.append(r)
else:
h.append("relationships:none")
h.append(f"dim_order:{self.dim_order}")
return h
@property
def root_node_name(self):
"""str: The root node for this data tree, which is only ever a parent."""
if self._root_node_name is None:
for nodename in self._graph.nodes:
if self._graph.in_degree(nodename) == 0:
self._root_node_name = nodename
break
return self._root_node_name
@root_node_name.setter
def root_node_name(self, name):
if name is None:
self._root_node_name = None
return
if not isinstance(name, str):
raise TypeError(f"root_node_name must be str not {type(name)}")
if name not in self._graph.nodes:
raise KeyError(name)
self._root_node_name = name
def add_relationship(self, *args, **kwargs):
"""
Add a relationship to this DataTree.
The new relationship will point from a variable in one dataset
to a dimension of another dataset in this tree. Both the parent
and the child datasets should already have been added.
Parameters
----------
*args, **kwargs
All arguments are passed through to the `Relationship`
contructor, unless only a single `str` argument is provided,
in which case the `Relationship.from_string` class constructor
is used.
"""
if len(args) == 1 and isinstance(args[0], Relationship):
r = args[0]
elif len(args) == 1 and isinstance(args[0], str):
s = args[0]
if "->" in s:
parent, child = s.split("->", 1)
i = "position"
elif "@":
parent, child = s.split("@", 1)
i = "label"
p1, p2 = parent.split(".", 1)
c1, c2 = child.split(".", 1)
p1 = p1.strip()
p2 = p2.strip()
c1 = c1.strip()
c2 = c2.strip()
r = Relationship(
parent_data=p1,
parent_name=p2,
child_data=c1,
child_name=c2,
indexing=i,
)
else:
r = Relationship(*args, **kwargs)
# check for existing relationships, don't duplicate
for e in self._graph.edges:
r2 = self._get_relationship(e)
if r == r2:
return
# confirm correct pointer
r.parent_data = self.finditem(r.parent_name, maybe_in=r.parent_data)
self._graph.add_edge(r.parent_data, r.child_data, **r.attrs())
if self.force_digitization:
self.digitize_relationships(inplace=True)
def get_relationship(self, parent, child):
attrs = self._graph.edges[parent, child]
return Relationship(parent_data=parent, child_data=child, **attrs)
def add_dataset(self, name, dataset, relationships=(), as_root=False):
"""
Add a new Dataset node to this DataTree.
Parameters
----------
name : str
dataset : Dataset or pandas.DataFrame
Will be coerced into a `Dataset` object if it is not already
in that format, using a no-copy process if possible.
relationships : Tuple[str or Relationship]
Also add these relationships.
as_root : bool, default False
Set this new node as the root of the tree, displacing any existing
root.
"""
self._graph.add_node(name, dataset=construct(dataset))
if self.root_node_name is None or as_root:
self.root_node_name = name
if isinstance(relationships, str):
relationships = [relationships]
for r in relationships:
# TODO validate relationships before adding.
self.add_relationship(r)
if self.force_digitization:
self.digitize_relationships(inplace=True)
def add_items(self, items):
from collections.abc import Mapping, Sequence
if isinstance(items, Sequence):
for i in items:
self.add_items(i)
elif isinstance(items, Mapping):
if "name" in items and "dataset" in items:
self.add_dataset(items["name"], items["dataset"])
preload = True
else:
preload = False
for k, v in items.items():
if preload and k in {"name", "dataset"}:
continue
if k == "relationships":
for r in v:
self.add_relationship(r)
else:
self.add_dataset(k, v)
else:
raise ValueError("add_items requires Sequence or Mapping")
@property
def root_node(self):
return self._graph.nodes[self.root_node_name]
@property
def root_dataset(self):
return self._graph.nodes[self.root_node_name]["dataset"]
@root_dataset.setter
def root_dataset(self, x):
from .dataset import Dataset
if not isinstance(x, Dataset):
x = construct(x)
if self.root_node_name in self.replacement_filters:
x = self.replacement_filters[self.root_node_name](x)
self._graph.nodes[self.root_node_name]["dataset"] = x
def _get_relationship(self, edge):
return Relationship(
parent_data=edge[0], child_data=edge[1], **self._graph.edges[edge]
)
def __getitem__(self, item):
if isinstance(item, (list, tuple)):
from .dataset import Dataset
return Dataset({k: self[k] for k in item})
try:
return self._getitem(item)
except KeyError:
return self._getitem(item, include_blank_dims=True)
def finditem(self, item, maybe_in=None):
if maybe_in is not None and maybe_in in self._graph.nodes:
dataset = self._graph.nodes[maybe_in].get("dataset", {})
if item in dataset:
return maybe_in
return self._getitem(item, just_node_name=True)
def _getitem(
self, item, include_blank_dims=False, only_dims=False, just_node_name=False
):
if isinstance(item, (list, tuple)):
from .dataset import Dataset
return Dataset({k: self[k] for k in item})
if "." in item:
item_in, item = item.split(".", 1)
else:
item_in = None
queue = [self.root_node_name]
examined = set()
while len(queue):
current_node = queue.pop(0)
if current_node in examined:
continue
dataset = self._graph.nodes[current_node].get("dataset", {})
try:
by_name = item in dataset and not only_dims
except TypeError:
by_name = False
try:
by_dims = not by_name and include_blank_dims and (item in dataset.dims)
except TypeError:
by_dims = False
if (by_name or by_dims) and (item_in is None or item_in == current_node):
if just_node_name:
return current_node
if current_node == self.root_node_name:
if by_dims:
return xr.DataArray(
pd.RangeIndex(dataset.dims[item]), dims=item
)
else:
return dataset[item]
else:
_positions = {}
_labels = {}
if by_dims:
if item in dataset.variables:
coords = {item: dataset.variables[item]}
else:
coords = None
result = xr.DataArray(
pd.RangeIndex(dataset.dims[item]),
dims=item,
coords=coords,
)
else:
result = dataset[item]
dims_in_result = set(result.dims)
for path in nx.algorithms.simple_paths.all_simple_edge_paths(
self._graph, self.root_node_name, current_node
):
path_dim = self._graph.edges[path[-1]].get("child_name")
if path_dim not in dims_in_result:
continue
# path_indexing = self._graph.edges[path[-1]].get('indexing')
t1 = None
# intermediate nodes on path
for (e, e_next) in zip(path[:-1], path[1:]):
r = self._get_relationship(e)
r_next = self._get_relationship(e_next)
if t1 is None:
t1 = self._graph.nodes[r.parent_data].get("dataset")
t2 = self._graph.nodes[r.child_data].get("dataset")[
[r_next.parent_name]
]
if r.indexing == "label":
t1 = t2.sel(
{r.child_name: t1[r.parent_name].to_numpy()}
)
else: # by position
t1 = t2.isel(
{r.child_name: t1[r.parent_name].to_numpy()}
)
# final node in path
e = path[-1]
r = Relationship(
parent_data=e[0], child_data=e[1], **self._graph.edges[e]
)
if t1 is None:
t1 = self._graph.nodes[r.parent_data].get("dataset")
if r.indexing == "label":
_labels[r.child_name] = t1[r.parent_name].to_numpy()
else: # by position
_idx = t1[r.parent_name].to_numpy()
if not np.issubdtype(_idx.dtype, np.integer):
_idx = _idx.astype(np.int64)
_positions[r.child_name] = _idx
y = xgather(result, _positions, _labels)
if len(result.dims) == 1 and len(y.dims) == 1:
y = y.rename({y.dims[0]: result.dims[0]})
elif len(dims_in_result) == len(y.dims):
y = y.rename({_i: _j for _i, _j in zip(y.dims, result.dims)})
return y
else:
examined.add(current_node)
for _, next_up in self._graph.out_edges(current_node):
if next_up not in examined:
queue.append(next_up)
raise KeyError(item)
def get_expr(self, expression, engine="sharrow"):
"""
Access or evaluate an expression.
Parameters
----------
expression : str
Returns
-------
DataArray
"""
try:
result = self[expression]
except (KeyError, IndexError):
if engine == "sharrow":
result = (
self.setup_flow({expression: expression})
.load_dataarray()
.isel(expressions=0)
)
elif engine == "numexpr":
from xarray import DataArray
result = DataArray(
pd.eval(expression, resolvers=[self], engine="numexpr"),
)
return result
@property
def subspaces(self):
"""Mapping[str,Dataset] : Direct access to node Dataset objects by name."""
spaces = {}
for k in self._graph.nodes:
s = self._graph.nodes[k].get("dataset", None)
if s is not None:
spaces[k] = s
return spaces
def subspaces_iter(self):
for k in self._graph.nodes:
s = self._graph.nodes[k].get("dataset", None)
if s is not None:
yield (k, s)
def namespace_names(self):
namespace = set()
for spacename, spacearrays in self.subspaces_iter():
for k, arr in spacearrays.coords.items():
namespace.add(f"__{spacename or 'base'}__{k}")
for k, arr in spacearrays.items():
namespace.add(f"__{spacename or 'base'}__{k}")
return namespace
@property
def dims(self):
"""
Mapping from dimension names to lengths across all dataset nodes.
"""
dims = {}
for k, v in self.subspaces_iter():
for name, length in v.dims.items():
if name in dims:
if dims[name] != length:
raise ValueError(
"inconsistent dimensions\n" + self.dims_detail()
)
else:
dims[name] = length
return xr.core.utils.Frozen(dims)
def dims_detail(self):
"""
Report on the names and sizes of dimensions in all Dataset nodes.
Returns
-------
str
"""
s = ""
for k, v in self.subspaces_iter():
s += f"\n{k}:"
for name, length in v.dims.items():
s += f"\n - {name}: {length}"
return s[1:]
def drop_dims(self, dims, inplace=False, ignore_missing_dims=True):
"""
Drop dimensions from root Dataset node.
Parameters
----------
dims : str or Iterable[str]
One or more named dimensions to drop.
inplace : bool, default False
Whether to drop dimensions in-place.
ignore_missing_dims : bool, default True
Simply ignore any dimensions that are not present.
Returns
-------
DataTree
Returns self if dropping inplace, otherwise returns a copy
with dimensions dropped.
"""
if isinstance(dims, str):
dims = [dims]
if inplace:
obj = self
else:
obj = self.copy()
if not ignore_missing_dims:
obj.root_dataset = obj.root_dataset.drop_dims(dims)
else:
for d in dims:
if d in obj.root_dataset.dims:
obj.root_dataset = obj.root_dataset.drop_dims(d)
obj.dim_order = tuple(x for x in self.dim_order if x not in dims)
return obj
def get_indexes(
self,
position_only=True,
as_dict=True,
replacements=None,
use_cache=True,
check_shapes=True,
):
if use_cache and (position_only, as_dict) in self._cached_indexes:
return self._cached_indexes[(position_only, as_dict)]
if not position_only:
raise NotImplementedError
dims = [
d
for d in self.dims
if d[-1:] != "_" or (d[-1:] == "_" and d[:-1] not in self.dims)
]
if replacements is not None:
obj = self.replace_datasets(replacements)
else:
obj = self
result = {}
result_shape = None
for k in sorted(dims):
result_k = obj._getitem(k, include_blank_dims=True, only_dims=True)
if result_shape is None:
result_shape = result_k.shape
if result_shape != result_k.shape:
if check_shapes:
raise ValueError(
f"inconsistent index shapes {result_k.shape} v {result_shape} (probably an error on {k} or {sorted(dims)[0]})"
)
result[k] = result_k
if as_dict:
result = {k: v.to_numpy() for k, v in result.items()}
else:
result = Dataset(result)
if use_cache:
self._cached_indexes[(position_only, as_dict)] = result
return result
def replace_datasets(self, other=None, validate=True, redigitize=True, **kwargs):
"""
Replace one or more datasets in the nodes of this tree.
Parameters
----------
other : Mapping[str,Dataset]
A dictionary of replacement datasets.
validate : bool, default True
Raise an error when replacing downstream datasets that
are referenced by position, unless the replacement is identically
sized. If validation is deactivated, and an incompatible dataset
is placed in this tree, flows that rely on that relationship will
give erroneous results or crash with a segfault.
redigitize : bool, default True
Automatically re-digitize relationships that are label-based and
were previously digitized.
**kwargs : Mapping[str,Dataset]
Alternative format to `other`.
Returns
-------
DataTree
A new DataTree with data replacements completed.
"""
replacements = {}
if other is not None:
replacements.update(other)
replacements.update(kwargs)
graph = self._graph.copy()
for k in replacements:
if k not in graph.nodes:
raise KeyError(k)
x = construct(replacements[k])
if validate:
if x.dims != graph.nodes[k]["dataset"].dims:
# when replacement dimensions do not match, check for
# any upstream nodes that reference this dataset by
# position... which will potentially be problematic.
for e in self._graph.edges:
if e[1] == k:
indexing = self._graph.edges[e].get("indexing")
if indexing == "position":
raise ValueError(
f"dimensions mismatch on "
f"positionally-referenced dataset {k}: "
f"receiving {x.dims} "
f"expected {graph.nodes[k]['dataset'].dims}"
)
if k in self.replacement_filters:
x = self.replacement_filters[k](x)
graph.nodes[k]["dataset"] = x
result = type(self)(graph, self.root_node_name, **self.__shallow_copy_extras())
if redigitize:
result.digitize_relationships(inplace=True)
return result
def setup_flow(
self,
definition_spec,
*,
cache_dir=None,
name=None,
dtype="float32",
boundscheck=False,
error_model="numpy",
nopython=True,
fastmath=True,
parallel=True,
readme=None,
flow_library=None,
extra_hash_data=(),
write_hash_audit=True,
hashing_level=1,
dim_exclude=None,
):
"""
Set up a new Flow for analysis using the structure of this DataTree.
Parameters
----------
definition_spec : Dict[str,str]
Gives the names and expressions that define the variables to
create in this new `Flow`.
cache_dir : Path-like, optional
A location to write out generated python and numba code. If not
provided, a unique temporary directory is created.
name : str, optional
The name of this Flow used for writing out cached files. If not
provided, a unique name is generated. If `cache_dir` is given,
be sure to avoid name conflicts with other flow's in the same
directory.
dtype : str, default "float32"
The name of the numpy dtype that will be used for the output.
boundscheck : bool, default False
If True, boundscheck enables bounds checking for array indices, and
out of bounds accesses will raise IndexError. The default is to not
do bounds checking, which is faster but can produce garbage results
or segfaults if there are problems, so try turning this on for
debugging if you are getting unexplained errors or crashes.
error_model : {'numpy', 'python'}, default 'numpy'
The error_model option controls the divide-by-zero behavior. Setting
it to ‘python’ causes divide-by-zero to raise exception like
CPython. Setting it to ‘numpy’ causes divide-by-zero to set the
result to +/-inf or nan.
nopython : bool, default True
Compile using numba's `nopython` mode. Provided for debugging only,
as there's little point in turning this off for production code, as
all the speed benefits of sharrow will be lost.
fastmath : bool, default True
If true, fastmath enables the use of "fast" floating point transforms,
which can improve performance but can result in tiny distortions in
results. See numba docs for details.
parallel : bool, default True
Enable or disable parallel computation for certain functions.
readme : str, optional
A string to inject as a comment at the top of the flow Python file.
flow_library : Mapping[str,Flow], optional
An in-memory cache of precompiled Flow objects. Using this can result
in performance improvements when repeatedly using the same definitions.
extra_hash_data : Tuple[Hashable], optional
Additional data used for generating the flow hash. Useful to prevent
conflicts when using a flow_library with multiple similar flows.
write_hash_audit : bool, default True
Writes a hash audit log into a comment in the flow Python file, for
debugging purposes.
hashing_level : int, default 1
Level of detail to write into flow hashes. Increase detail to avoid
hash conflicts for similar flows. Level 2 adds information about
names used in expressions and digital encodings to the flow hash,
which prevents conflicts but requires more pre-computation to generate
the hash.
dim_exclude : Collection[str], optional
Exclude these root dataset dimensions from this flow.
Returns
-------
Flow
"""
from .flows import Flow
return Flow(
self,
definition_spec,
cache_dir=cache_dir or self.cache_dir,
name=name,
dtype=dtype,
boundscheck=boundscheck,
nopython=nopython,
fastmath=fastmath,
parallel=parallel,
readme=readme,
flow_library=flow_library,
extra_hash_data=extra_hash_data,
hashing_level=hashing_level,
error_model=error_model,
write_hash_audit=write_hash_audit,
dim_order=self.dim_order,
dim_exclude=dim_exclude,
)
def _spill(self, all_name_tokens=()):
"""
Write backup code for sharrow-lite.
Parameters
----------
all_name_tokens
Returns
-------
"""
cmds = []
return "\n".join(cmds)
def get_named_array(self, mangled_name):
if mangled_name[:2] != "__":
raise KeyError(mangled_name)
name1, name2 = mangled_name[2:].split("__", 1)
dataset = self._graph.nodes[name1].get("dataset")
return dataset[name2].to_numpy()
_BY_OFFSET = "digitizedOffset"
def digitize_relationships(self, inplace=False, redigitize=True):
"""
Convert all label-based relationships into position-based.
Parameters
----------
inplace : bool, default False
redigitize : bool, default True
Re-compute position-based relationships from labels, even
if the relationship had previously been digitized.
Returns
-------
DataTree or None
Only returns a copy if not digitizing in-place.
"""
if inplace:
obj = self
else:
obj = self.copy()
for e in obj._graph.edges:
r = obj._get_relationship(e)
if redigitize and r.analog:
p_dataset = obj._graph.nodes[r.parent_data].get("dataset", None)
if p_dataset is not None:
if r.parent_name not in p_dataset:
r.indexing = "label"
r.parent_name = r.analog
if r.indexing == "label":
p_dataset = obj._graph.nodes[r.parent_data].get("dataset", None)
c_dataset = obj._graph.nodes[r.child_data].get("dataset", None)
upstream = p_dataset[r.parent_name]
downstream = c_dataset[r.child_name]
# vectorize version
mapper = {i: j for (j, i) in enumerate(downstream.to_numpy())}
offsets = xr.apply_ufunc(np.vectorize(mapper.get), upstream)
# candidate name for write back
r_parent_name_new = (
f"{self._BY_OFFSET}{r.parent_name}_{r.child_data}_{r.child_name}"
)
# it is common to have mirrored offsets in various dimensions.
# we'd like to retain only the same data in memory once, so we'll
# check if these offsets match any existing ones and if so just
# point to that memory.
for k in p_dataset:
if isinstance(k, str) and k.startswith(self._BY_OFFSET):
if p_dataset[k].equals(offsets):
# we found a match, so we'll assign this name to
# the match's memory storage instead of replicating it.
obj._graph.nodes[r.parent_data][
"dataset"
] = p_dataset.assign({r_parent_name_new: p_dataset[k]})
# r_parent_name_new = k
break
else:
# no existing offset arrays match, make this new one
obj._graph.nodes[r.parent_data]["dataset"] = p_dataset.assign(
{r_parent_name_new: offsets}
)
obj._graph.edges[e].update(
dict(
parent_name=r_parent_name_new,
indexing="position",
analog=r.parent_name,
)
)
if not inplace:
return obj
@property
def relationships_are_digitized(self):
"""bool : Whether all relationships are digital (by position)."""
for e in self._graph.edges:
r = self._get_relationship(e)
if r.indexing != "position":
return False
return True
def _arg_tokenizer(self, spacename, spacearray, exclude_dims=None):
if spacename == self.root_node_name:
root_dataset = self.root_dataset
from .flows import presorted
root_dims = list(presorted(root_dataset.dims, self.dim_order, exclude_dims))
if isinstance(spacearray, str):
from_dims = root_dataset[spacearray].dims
else:
from_dims = spacearray.dims
return tuple(
ast.parse(f"_arg{root_dims.index(dim):02}", mode="eval").body
for dim in from_dims
)
if isinstance(spacearray, str):
spacearray_ = self._graph.nodes[spacename]["dataset"][spacearray]
else:
spacearray_ = spacearray
from_dims = spacearray_.dims
offset_source = spacearray_.attrs.get("digital_encoding", {}).get(
"offset_source", None
)
if offset_source is not None:
from_dims = self._graph.nodes[spacename]["dataset"][offset_source].dims
tokens = []
n_missing_tokens = 0
for dimname in from_dims:
found_token = False
for e in self._graph.in_edges(spacename, keys=True):
this_dim_name = self._graph.edges[e]["child_name"]
if dimname != this_dim_name:
continue
parent_name = self._graph.edges[e]["parent_name"]
parent_data = e[0]
upside_ast = self._arg_tokenizer(
parent_data, parent_name, exclude_dims=exclude_dims
)
try:
upside = ", ".join(unparse(t) for t in upside_ast)
except: # noqa: E722
for t in upside_ast:
print(f"t:{t}")
raise
tokens.append(f"__{parent_data}__{parent_name}[{upside}]")
found_token = True
break
if not found_token:
ix = self.subspaces[spacename].indexes[dimname]
ix = {i: n for n, i in enumerate(ix)}
tokens.append(ix)
n_missing_tokens += 1
if n_missing_tokens > 1:
raise ValueError("at most one missing dimension is allowed")
result = []
for t in tokens:
if isinstance(t, str):
result.append(ast.parse(t, mode="eval").body)
else:
result.append(t)
return tuple(result)
@property
def coords(self):
return self.root_dataset.coords
def get_index(self, dim):
for spacename, subspace in self.subspaces.items():
if dim in subspace.coords:
return subspace.indexes[dim]
def copy(self):
return type(self)(
self._graph.copy(), self.root_node_name, **self.__shallow_copy_extras()
)
``` |
{
"source": "jpobeid/sast_flask",
"score": 3
} |
#### File: jpobeid/sast_flask/models.py
```python
import os
import numpy as np
from flask_restful import fields
from flask_sqlalchemy import SQLAlchemy
db = SQLAlchemy()
def init_db():
if not np.any([e.split('.')[-1] == 'db' for e in os.listdir()]):
db.create_all()
print('Initialized database')
class UserModel(db.Model):
id = db.Column(db.Integer, primary_key=True)
email = db.Column(db.String(50))
token = db.Column(db.Integer)
salt_exp = db.Column(db.Integer)
password = db.Column(db.String(100))
resource_fields = {
'id': fields.Integer,
'email': fields.String,
'token': fields.Integer,
'salt_exp': fields.Integer,
'password': fields.String,
}
def __init__(self, email, token, salt_exp, password):
self.email = email
self.token = token
self.salt_exp = salt_exp
self.password = password
``` |
{
"source": "jpocentek/ch-gallery",
"score": 2
} |
#### File: chgallery/auth/__init__.py
```python
from flask import (
Blueprint,
abort,
current_app,
g,
redirect,
render_template,
session,
url_for
)
from sqlalchemy.orm.exc import NoResultFound
from werkzeug.security import check_password_hash, generate_password_hash
from chgallery.auth.decorators import login_required
from chgallery.auth.forms import LoginForm, RegisterForm
from chgallery.db import get_db_session
from chgallery.db.declarative import Image, User
bp = Blueprint('auth', __name__, url_prefix='/auth')
@bp.route('/')
@login_required
def dashboard():
images = (
g.db_session.query(Image)
.filter(Image.author == g.user)
.order_by(Image.creation_date.desc())
)
return render_template('auth/dashboard.html', images=images)
@bp.route('/register', methods=('GET', 'POST'))
def register():
if current_app.config.get('REGISTRATION_DISABLED'):
abort(404)
# Redirect user directly to dashboard if already authorized
if g.user:
return redirect(url_for('auth.dashboard'))
form = RegisterForm()
if form.validate_on_submit():
db_session = get_db_session()
user = User(
username=form.username.data,
email=form.email.data,
password=generate_password_hash(form.password.data),
)
db_session.add(user)
db_session.commit()
return redirect(url_for('auth.login'))
return render_template('auth/register.html', form=form)
@bp.route('/login', methods=('GET', 'POST'))
def login():
# Redirect user directly to dashboard if already authorized
if g.user:
return redirect(url_for('auth.dashboard'))
form = LoginForm()
error = None
if form.validate_on_submit():
db_session = get_db_session()
try:
user = db_session.query(User).filter(User.username == form.username.data).one()
except NoResultFound:
user = None
if user is None or not check_password_hash(user.password, form.password.data):
error = 'Invalid login credentials'
else:
session.clear()
session['user_id'] = user.id
return redirect(url_for('auth.dashboard'))
return render_template('auth/login.html', form=form, error=error)
@bp.route('/logout')
def logout():
session.clear()
return redirect(url_for('auth.login'))
@bp.before_app_request
def load_logged_in_user():
user_id = session.get('user_id')
g.user = None
if user_id is not None:
try:
g.user = get_db_session().query(User).filter(User.id == user_id).one()
except NoResultFound:
session.clear()
```
#### File: chgallery/image/forms.py
```python
from flask_wtf import FlaskForm
from flask_wtf.file import FileField, FileRequired
from wtforms import StringField
from wtforms.validators import Length, StopValidation
from chgallery.image.utils import is_allowed_image_file
class ImageFileRequired(FileRequired):
def __call__(self, form, field):
super().__call__(form, field)
if not is_allowed_image_file(field.data):
raise StopValidation(
"An image of type 'jpg', 'png' or 'gif' is required"
)
class UploadForm(FlaskForm):
image = FileField('image', validators=[ImageFileRequired()])
description = StringField('description', validators=[Length(min=0, max=128)])
```
#### File: ch-gallery/chgallery/middleware.py
```python
from typing import Any, Dict, Union
class PrefixMiddleware:
"""Provides common root url prefix when application is mounted outside server root."""
def __init__(self, app: Any, prefix: str = "") -> None:
self.app = app
self.prefix = prefix
def __call__(
self, environ: Dict[str, Any], start_response: Any
) -> Union[Any, list[bytes]]:
if environ["PATH_INFO"].startswith(self.prefix):
environ["PATH_INFO"] = environ["PATH_INFO"][len(self.prefix) :]
environ["SCRIPT_NAME"] = self.prefix
return self.app(environ, start_response)
start_response("404", [("Content-Type", "text/plain")])
return ["This url does not belong to the app.".encode()]
```
#### File: ch-gallery/tests/test_image.py
```python
import io
import os
import pytest
from PIL import Image as PILImage
from werkzeug.datastructures import FileStorage
from werkzeug.security import generate_password_hash
from chgallery.db import get_db_session
from chgallery.db.declarative import Image, User
from chgallery.image.utils import is_allowed_image_file, smart_resize
TEST_PICTURE = os.path.join(os.getcwd(), 'tests', 'assets', 'test_picture.jpg')
@pytest.fixture
def mock_txt_file():
return FileStorage(
stream=io.BytesIO(b'Hello, world!'),
filename='example.txt',
content_type='text/plain',
)
@pytest.fixture
def mock_jpg_file():
return FileStorage(
stream=open(TEST_PICTURE, 'rb'),
filename='test_picture.jpg',
content_type='image/jpeg',
)
@pytest.fixture
def image_copy():
""" Copy original picture for safety """
img = PILImage.open(TEST_PICTURE)
img_copy = img.copy()
img.close()
return img_copy
def test_image_validation_with_invalid_file(mock_txt_file):
assert not is_allowed_image_file(mock_txt_file)
def test_image_validation_with_valid_image(mock_jpg_file):
assert is_allowed_image_file(mock_jpg_file)
def test_resize_wide_image(image_copy):
image_copy = smart_resize(image_copy)
assert image_copy.width == 2000
assert image_copy.height == 1333
def test_resize_high_image(image_copy):
# Rotate image by 90 degrees and swap width and height
image_copy = image_copy.rotate(90, expand=1)
image_copy = smart_resize(image_copy)
assert image_copy.height == 2000
assert image_copy.width == 1333
class TestUploadImageClass:
def test_only_authorized_user_can_upload_file(self, client):
response = client.post('/image/upload', data={'image': mock_txt_file})
assert response.status_code == 302
assert response.headers['location'].endswith('/auth/login')
def test_upload_with_wrong_mime_type(self, client, auth, mock_txt_file):
auth.login()
response = client.post(
'/image/upload',
data={'image': mock_txt_file},
content_type='multipart/form-data',
)
assert b'An image of type' in response.data
def test_successful_upload(self, app, client, auth, mock_jpg_file):
auth.login()
data = {
'image': mock_jpg_file,
'description': 'Test Image',
}
client.post('/image/upload', data=data)
with app.app_context():
db_session = get_db_session()
image = db_session.query(Image).filter(Image.name == 'test_picture.jpg').one()
db_session.close()
assert image.description == 'Test Image'
assert image.url().endswith('/image/uploads/{}'.format(image.name))
assert image.thumbnail_url().endswith('/image/uploads/thumbs/{}'.format(image.name))
assert image.preview_url().endswith('/image/uploads/previews/{}'.format(image.name))
assert image.name in os.listdir(app.config['UPLOAD_PATH'])
assert image.name in os.listdir(os.path.join(app.config['UPLOAD_PATH'], 'thumbs'))
assert image.name in os.listdir(os.path.join(app.config['UPLOAD_PATH'], 'previews'))
response = client.get('/image/uploads/{}'.format(image.name))
assert response.status_code == 200
assert response.headers['content-type'] == 'image/jpeg'
response = client.get('/image/uploads/thumbs/{}'.format(image.name))
assert response.status_code == 200
assert response.headers['content-type'] == 'image/jpeg'
response = client.get('/image/uploads/previews/{}'.format(image.name))
assert response.status_code == 200
assert response.headers['content-type'] == 'image/jpeg'
def test_unique_name_creation(self, app, client, auth, mock_jpg_file):
auth.login()
for i in range(3):
test_image = FileStorage(
stream=open(TEST_PICTURE, 'rb'),
filename='repeated_name.jpg',
content_type='image/jpeg',
)
data = {
'image': test_image,
'description': 'Repeated name {}'.format(i),
}
client.post('/image/upload', data=data, content_type='multipart/form-data')
with app.app_context():
db_session = get_db_session()
items = (
db_session.query(Image)
.filter(Image.name.like('repeated_name%'))
.order_by(Image.id).all()
)
db_session.close()
assert len(items) == 3
assert items[0].name == 'repeated_name.jpg'
assert items[1].name == 'repeated_name(1).jpg'
assert items[2].name == 'repeated_name(2).jpg'
class TestDeleteImageClass:
def test_delete_with_non_existing_object(self, auth, client):
auth.login()
response = client.post('/image/delete/1/')
assert response.status_code == 404
def test_that_only_owner_can_delete_image(self, app, auth, client, mock_jpg_file):
# Create image on behalf of test user
auth.login()
client.post('/image/upload', data={'image': mock_jpg_file})
auth.logout()
# Check if not authorized user has no access to delete view
response = client.post('/image/delete/1')
assert response.status_code == 302
# Check if other authorized user cannot remove image
other_user = User(
username='otheruser',
email='<EMAIL>',
password=generate_password_hash('<PASSWORD>'),
)
with app.app_context():
db_session = get_db_session()
db_session.add(other_user)
db_session.commit()
auth.login(username='otheruser', password='<PASSWORD>')
response = client.post('/image/delete/1')
assert response.status_code == 403
def test_that_image_is_properly_deleted(self, app, auth, client, mock_jpg_file):
auth.login()
# Create test object
client.post('/image/upload', data={'image': mock_jpg_file}, content_type='multipart/form-data')
# Check if it exists in database
assert client.get('/image/uploads/test_picture.jpg').status_code == 200
# Now delete image and check if cleanup is working
client.post('/image/delete/1')
with app.app_context():
db_session = get_db_session()
assert not db_session.query(Image).all()
assert 'test_picture.jpg' not in os.listdir(app.config['UPLOAD_PATH'])
assert 'test_picture.jpg' not in os.listdir(os.path.join(app.config['UPLOAD_PATH'], 'thumbs'))
assert 'test_picture.jpg' not in os.listdir(os.path.join(app.config['UPLOAD_PATH'], 'previews'))
def test_that_images_are_deleted_along_with_author(self, app, auth, client, mock_jpg_file):
other_user = User(
username='otheruser',
email='<EMAIL>',
password=generate_password_hash('<PASSWORD>'),
)
with app.app_context():
db_session = get_db_session()
db_session.add(other_user)
db_session.commit()
data = {
'image': mock_jpg_file,
'description': 'Test Image',
}
auth.login(username='otheruser', password='<PASSWORD>')
client.post('/image/upload', data=data, content_type='multipart/form-data')
# Ensure that image was actually uploaded
assert db_session.query(Image).count() == 1
# Delete Other User and ensure his images are deleted as well
db_session.delete(other_user)
db_session.commit()
assert db_session.query(Image).count() == 0
class TestDashboardClass:
def test_image_display_in_dashboard(self, app, auth, client, mock_jpg_file):
# Create test image
auth.login()
client.post('/image/upload', data={'image': mock_jpg_file})
# Enter dashboard page where an image should be displayed
response = client.get('/auth/')
assert response.status_code == 200
assert b'/image/uploads/previews/test_picture.jpg' in response.data
``` |
{
"source": "jpochetedmead/Flask-Adventure-Game",
"score": 3
} |
#### File: Flask-Adventure-Game/escape_game/auth.py
```python
import functools
from flask import (
Blueprint, flash, g, redirect, render_template, request, session, url_for
)
from werkzeug.security import check_password_hash, generate_password_hash
from escape_game.db import get_db
# This creates a Blueprint named 'auth'.
# Like the application object, the blueprint needs to know where it’s defined, so __name__ is passed as the second argument.
# The url_prefix will be prepended to all the URLs associated with the blueprint.
bp = Blueprint('auth', __name__, url_prefix='/auth')
# @bp.route associates the URL /register with the register view function.
# When Flask receives a request to /auth/register, it will call the register view and use the return value as the response.
@bp.route('/register', methods=('GET', 'POST'))
def register():
if request.method == 'POST':
name = request.form['name']
username = request.form['username']
email = request.form['email']
password = request.form['password']
db = get_db()
error = None
if not username:
error = 'Username is required.'
elif not password:
error = 'Password is required.'
elif not name:
error = 'Name is required.'
elif not email:
error = 'Email is required.'
elif db.execute(
'SELECT id FROM user WHERE username = ?', (username,)
).fetchone() is not None:
error = 'User {} is already registered.'.format(username)
elif db.execute(
'SELECT id FROM user WHERE name = ?', (name,)
).fetchone() is not None:
error = 'Name {} is already registered.'.format(name)
elif db.execute(
'SELECT id FROM user WHERE email = ?', (email,)
).fetchone() is not None:
error = 'Email {} is already registered.'.format(email)
if error is None:
db.execute(
'INSERT INTO user (name, username, email, password) VALUES (?, ?, ?, ?)',
(name, username, email, generate_password_hash(password)) # generate_password_hash() is used to securely hash the password, and that hash is stored.
)
db.commit() # Since this query modifies data, db.commit() needs to be called afterwards to save the changes.
return redirect(url_for('auth.login')) # redirect() generates a redirect response to the generated URL.
# If validation fails, the error is shown to the user.
flash(error) # flash() stores messages that can be retrieved when rendering the template.
return render_template('auth/register.html') # render_template() will render a template containing the HTML.
@bp.route('/login', methods=('GET', 'POST'))
def login():
if request.method == 'POST':
username = request.form['username']
password = request.form['password']
email = request.form['email']
db = get_db()
error = None
user = db.execute(
'SELECT * FROM user WHERE username = ?', (username,)
).fetchone()
email = db.execute(
'SELECT * FROM user WHERE email = ?', (email,)
).fetchone()
if user is None or email is None:
error = 'Incorrect Username or Email.'
# check_password_hash() hashes the submitted password in the same way as the stored hash and securely compares them. If they match, the password is valid.
elif not check_password_hash(user['password'], password):
error = 'Incorrect password. Try again.'
if error is None:
session.clear()
session['user_id'] = user['id']
return redirect(url_for('index'))
flash(error)
return render_template('auth/login.html')
# bp.before_app_request() registers a function that runs before the view function, no matter what URL is requested.
@bp.before_app_request
# load_logged_in_user checks if a user id is stored in the session and gets that user’s data from the database, storing it on g.user, which lasts for the length of the request.
def load_logged_in_user():
user_id = session.get('user_id')
# If there is no user id, or if the id doesn’t exist, g.user will be None.
if user_id is None:
g.user = None
else:
g.user = get_db().execute(
'SELECT * FROM user WHERE id = ?', (user_id,)
).fetchone()
# To log out, you need to remove the user id from the session. Then load_logged_in_user won’t load a user on subsequent requests.
@bp.route('/logout')
def logout():
session.clear()
return redirect(url_for('index'))
``` |
{
"source": "jpodeszwik/openai",
"score": 3
} |
#### File: openai/CartPole-v0/genetic_neural_network.py
```python
import gym
import time
import random
import copy
class GeneticNetworkHelper:
def crossing(net1, net2):
crossing_point = random.randint(1, 3)
new_weights = []
for i in range(crossing_point):
new_weights.append(net1.weights[i])
for i in range(crossing_point, 4):
new_weights.append(net2.weights[i])
return Network(new_weights)
def mutate(net):
mutations = random.randint(1, 3)
mutated_genes = random.sample([0, 1, 2, 3], mutations)
new_weights = copy.copy(net.weights)
for idx in mutated_genes:
new_weights[idx] = random.random()*2 - 1
return Network(new_weights)
class GeneticSearcher:
def __init__(self, pop_size):
self.pop = [Network.random_network() for i in range(pop_size)]
self.nt = NetTester()
def rate_network(self, net):
return self.nt.test_n_times_and_return_min(net, 3)
def selection(self):
population_fitness = [(net, self.rate_network(net)) for net in self.pop]
population_fitness = sorted(population_fitness, reverse=True, key=lambda x: x[1])
pop_size = len(population_fitness)
old_survivors = list(map(lambda x: x[0], population_fitness[:int(pop_size/3)]))
children = []
while len(children) < pop_size/3:
parents = random.sample(set(old_survivors), 2)
children.append(GeneticNetworkHelper.crossing(parents[0], parents[1]))
new_generation = old_survivors + children
while len(new_generation) < pop_size:
new_generation.append(GeneticNetworkHelper.mutate(random.choice(old_survivors)))
self.pop = new_generation
return population_fitness[0][1]
def show_best(self):
population_fitness = [(net, self.rate_network(net)) for net in self.pop]
population_fitness = sorted(population_fitness, reverse=True, key=lambda x: x[1])
best = population_fitness[0][0]
self.nt.render(best)
class Network:
def __init__(self, weights):
self.weights = weights
def weighted_sum(self, observation):
sum = 0.0
for i in range(4):
sum += self.weights[i] * observation[i]
return sum
def output(self, observation):
if self.weighted_sum(observation) > 0:
return 1
else:
return 0
def __str__(self):
return str(self.weights)
def random_network():
return Network([random.random() * 2 - 1.0 for i in range(4)])
class NetTester:
def __init__(self):
self.env = gym.make('CartPole-v0')
def test_n_times_and_return_min(self, net, n):
results = [self.test(net) for i in range(n)]
return min(results)
def test(self, net):
observation = self.env.reset()
action = 0
for t in range(10000):
observation, reward, done, info = self.env.step(action)
action = net.output(observation)
if done:
break
return t+1
def test_with_render(self, net):
observation = self.env.reset()
action = 0
for t in range(100000):
self.env.render()
observation, reward, done, info = self.env.step(action)
action = net.output(observation)
if done:
break
return t+1
def render(self, net):
val = self.test_with_render(net)
print ('result: {}', val)
gs = GeneticSearcher(20)
for i in range(20):
print('generation {}'.format(i))
best = gs.selection()
print('best: {}'.format(best))
gs.show_best()
if best == 10000:
break
```
#### File: openai/Pendulum-v0/genetic_neural_network.py
```python
from concurrent.futures import ProcessPoolExecutor
import copy
import gym
import numpy
import random
class GeneticSearcher:
def __init__(self, pop_size, problem):
self.problem = problem
self.pop = [Network.random_network() for i in range(pop_size)]
self.fitness_cache = {}
self.best = None
self.nt = NetTester(problem)
self.pp = ProcessPoolExecutor(max_workers=4)
self.ntf = NetworkTesterFactory(problem)
self.pop_size = pop_size
def recalculate_fitness(self):
nets_to_rate = [net for net in self.pop if net not in self.fitness_cache]
for net, res in self.pp.map(self.ntf.rate_network, nets_to_rate):
self.fitness_cache[net] = res
def selection(self):
population_fitness = [(net, self.fitness_cache[net]) for net in self.pop]
population_fitness = sorted(population_fitness, reverse=True, key=lambda x: x[1])
self.best = population_fitness[0]
return list(map(lambda x: x[0], population_fitness[:int(self.pop_size / 3)]))
def crossing(self, parents):
children = []
while len(children) < self.pop_size / 3:
parents = random.sample(set(parents), 2)
children.append(self.problem.crossing(parents[0], parents[1]))
return children
def mutation(self, population):
mutants = []
while len(mutants) < 0.3 * self.pop_size:
mutants.append(self.problem.mutate(random.choice(population)))
return mutants
def iteration(self):
self.recalculate_fitness()
old_survivors = self.selection()
children = self.crossing(old_survivors)
mutants = self.mutation(old_survivors)
new_generation = old_survivors + children + mutants
while len(new_generation) < self.pop_size:
new_generation.append(Network.random_network())
self.pop = new_generation
return self.best[1]
def show_best(self):
self.nt.test(self.best[0], render=True)
class Network:
def __init__(self, weights):
self.weights = weights
def __hash__(self):
return hash(frozenset(self.weights))
def __eq__(self, other):
return self.weights.__eq__(other.weights)
def weighted_sum(self, observation):
s = 0.0
for i in range(3):
s += self.weights[i] * observation[i]
return s + self.weights[3]
def output(self, observation):
val = self.weighted_sum(observation) / 2
if val > 2:
return 2
elif val < -2:
return -2
return val
def __str__(self):
return str(self.weights)
@staticmethod
def random_network():
return Network([random.random() * 2 - 1 for i in range(4)])
class NetTester:
def __init__(self, problem):
self.problem = problem
self.env = problem.make_env()
def test_n_times_and_return_min(self, net, n):
results = [self.test(net) for _ in range(n)]
return min(results)
def test(self, net, render=False):
observation = self.env.reset()
res = 0.0
for t in range(1000):
if render:
self.env.render()
self.problem.scale_observation(self.env, observation)
action = numpy.array([net.output(observation)])
observation, reward, done, info = self.env.step(action)
res += reward
if done:
break
return res
class NetworkTesterFactory:
def __init__(self, problem):
self.problem = problem
def rate_network(self, net):
nt = NetTester(self.problem)
return net, nt.test_n_times_and_return_min(net, 10)
class PendulumV0:
@staticmethod
def crossing(net1, net2):
crossing_point = random.randint(1, 3)
new_weights = []
for i in range(crossing_point):
new_weights.append(net1.weights[i])
for i in range(crossing_point, 4):
new_weights.append(net2.weights[i])
return Network(new_weights)
@staticmethod
def mutate(net):
mutations = random.randint(1, 3)
mutated_genes = random.sample([0, 1, 2, 3], mutations)
new_weights = copy.copy(net.weights)
for idx in mutated_genes:
new_weights[idx] = random.random() * 2 - 1
return Network(new_weights)
@staticmethod
def make_env():
return gym.make('Pendulum-v0')
@staticmethod
def scale_observation(env, observation):
for i in range(3):
observation[i] /= env.observation_space.high[i]
def main():
gs = GeneticSearcher(100, PendulumV0)
for i in range(20):
print('generation {}'.format(i))
best = gs.iteration()
print('best: {}'.format(best))
gs.show_best()
if __name__ == '__main__':
main()
``` |
{
"source": "jpodivin/facesnap",
"score": 3
} |
#### File: jpodivin/facesnap/facesnap.py
```python
import pydbus
from gi.repository import GLib
import cv2
import os
import time
def take_pic():
"""Take a picture with default camera and store it.
"""
camera = cv2.VideoCapture(0)
for _ in range(3): # snap 3 times in a row
ret_val, frame = camera.read()
pic_path = os.path.join(
os.path.expanduser("~/facesnaps"),
"capture_{}.png".format(time.time()))
cv2.imwrite(pic_path, frame)
camera.release()
def _event_handler(*args):
if isinstance(args[1], dict) and not args[1].get('LidIsClosed', True):
time.sleep(1) # wait 1 sec before snapping
take_pic()
def main():
loop = GLib.MainLoop()
bus = pydbus.SystemBus()
lid_proxy = bus.get("org.freedesktop.UPower", "/org/freedesktop/UPower")
lid_proxy.onPropertiesChanged = _event_handler
loop.run()
if __name__ == '__main__':
main()
```
#### File: jpodivin/facesnap/setup.py
```python
import setuptools
import os
with open("README.md", "r") as fh:
long_description = fh.read()
PIC_DIR = os.path.expanduser('~/facesnaps')
def _ensure_pic_path():
if not os.path.exists(PIC_DIR):
os.mkdir(PIC_DIR)
setuptools.setup(
name="facesnap",
version="0.1.0",
author="<NAME>",
author_email="<EMAIL>",
description="Open lid, take a pic",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/jpodivin/facesnap",
scripts=["./facesnap.py"],
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: Apache Software License",
"Operating System :: OS Independent",
],
python_requires=">=3.6",
install_requires=[
"pydbus",
"opencv-python",
"pycairo",
"gobject",
"PyGObject"
]
)
_ensure_pic_path()
``` |
{
"source": "jpodivin/KohoCarpet",
"score": 3
} |
#### File: KohoCarpet/KohoCarpet/KohoCarpet.py
```python
import numpy as np
import matplotlib.pyplot as pyplot
from numpy.random import default_rng
import os
def data_init(map_depth,
dist_params,
class_size = 10):
generator = default_rng()
dataset = np.array([[generator.normal(loc = mean,
scale = sigma,
size = (map_depth)) for i in range(class_size)]
for mean, sigma in dist_params])
return dataset.reshape((class_size*len(dist_params), -1))
def weights_init(size,
map_depth,
dist_sigma = 1.0):
generator = default_rng()
weights = generator.normal(scale = dist_sigma, size = (size, size, map_depth))
return weights
def update_area(time,
max_size):
if time == 1:
return max_size
else:
return int(max_size//np.log(time))
def kohonen_rule(weights,
input_vector,
position,
update_scale,
learning_rate = 0.1):
xa, ya = np.subtract(position, update_scale//2)
xb, yb = np.add(position, update_scale//2)%weights.shape[0]
if xa > xb:
xb *= -1
if ya > yb:
yb *= -1
ty = np.arange(np.min((ya,yb)), np.max((ya,yb)))
tx = np.arange(np.min((xa,xb)), np.max((xa,xb)))
for i in ty:
for j in tx:
weights[i,j,:] += learning_rate * np.subtract(input_vector, weights[i,j,:])
return weights
def distance(input_vector,
weights):
return -np.sum(np.abs(input_vector - weights))
def normalize(array):
return (array - np.min(array))/(np.max(array)-np.min(array))
def print_activations(mesh_coords,
activations,
time,
size,
path = './out/activations'):
if path.split('/')[1] not in os.listdir('.'):
os.mkdir(path.split('/')[1])
if path.split('/')[2] not in os.listdir('./'+path.split('/')[1]):
os.mkdir(path)
figure, axis = pyplot.subplots()
mesh = axis.pcolormesh(mesh_coords[0], mesh_coords[1], activations.reshape((size, size)))
figure.colorbar(mesh)
figure.savefig(path + '/snapshot' + str(time) + '.png', format='png')
pyplot.close(figure)
def print_map(weights,
dataset,
time,
size,
path = './out/clusters'):
if path.split('/')[1] not in os.listdir('.'):
os.mkdir(path.split('/')[1])
if path.split('/')[2] not in os.listdir('./'+path.split('/')[1]):
os.mkdir(path)
figure, axis = pyplot.subplots()
axis.scatter(weights[:, 0], weights[:, 1])
axis.scatter(dataset[:, 0], dataset[:, 1],
color = 'red',
marker='x')
figure.savefig(path+ '/snapshot'+str(time) + '.png',
format='png')
pyplot.close(figure)
def main():
size = 100
map_depth = 2
dataset = normalize(data_init(map_depth, [(-10, 3), (20,3)], 10))
weights = normalize(weights_init(size, map_depth))
activations = np.zeros((size, size))
mesh_coords = np.meshgrid([i for i in range(0,size)], [i for i in range(0,size)])
print('*'*80)
print('\n')
for time in range(1, size):
new_weights = weights
for example in np.random.default_rng().choice(dataset, 1):
activations = np.fromiter([distance(example, weight)
for weight in weights.reshape(-1, 2)], np.float)
activations = normalize(activations)
avg_activation = np.average(activations)
new_weights = kohonen_rule(weights,
example,
(np.argmax(activations)//size, np.argmax(activations)%size),
update_area(time, weights.shape[0]),
0.1)
weights = new_weights
print(avg_activation)
print('\n')
print_activations(mesh_coords, activations, time, size)
print_map(weights, dataset, time, size)
if __name__ == "__main__":
main()
``` |
{
"source": "jpodivin/pystrand",
"score": 3
} |
#### File: pystrand/pystrand/genotypes.py
```python
import numpy as np
class Genotype(np.ndarray):
"""
Genotype class, inherits from numpy ndarray and, in many ways,
behaves like it.
The code follows guidelines in: https://numpy.org/doc/stable/user/basics.subclassing.html
"""
def __new__(
cls,
shape,
random_init=False,
gene_vals=None,
seed=0,
default_genome=None,
protected=False,
**kwargs):
"""
Sets up the instance of the Genotype.
Much of the functionality defined here is duplicated in the __array_finalize__
method, as there are several ways the ndarray can be instantiated.
Parameters
----------
cls : type
shape : tuple
random_init : bool
gene_vals : list
seed : integer
default_genome : ndarray
protected : bool
Returns
-------
Genotype
New Genotype instance.
"""
if gene_vals is None:
gene_vals = [0, 1]
if random_init:
random_generator = np.random.default_rng(seed=seed)
genome = random_generator.choice(gene_vals, shape)
elif default_genome is not None:
genome = default_genome
else:
genome = np.zeros(shape)
genome = genome.view(cls)
genome._gene_vals = gene_vals
genome._protected = protected
return genome
def __array_finalize__(self, obj):
"""
https://numpy.org/doc/stable/reference/arrays.classes.html#numpy.class.__array_finalize__
"""
if obj is None:
return
self._genotype_fitness = getattr(obj, 'genotype_fitness', 0.0)
self._gene_vals = getattr(obj, '_gene_vals', [0, 1])
self._protected = getattr(obj, '_protected', False)
def __reduce__(self):
"""
Prepare object for pickling.
https://numpy.org/doc/stable/reference/generated/numpy.ndarray.__reduce__.html
"""
pickled_genotype = super(Genotype, self).__reduce__()
genotype_state = pickled_genotype[2] + (self._gene_vals, self._protected)
return (pickled_genotype[0], pickled_genotype[1], genotype_state)
def __setstate__(self, state):
"""
Set value of attributes _gene_vals and _protected from state.
"""
self._gene_vals = state[-2]
self._protected = state[-1]
super(Genotype, self).__setstate__(state[:-2])
def mutate(self, mutation_op):
"""
Alters one gene (symbol) with given probability.
New symbol is selected from subset of _gene_vals.
Parameters
----------
mutation_op : Mutation
Mutation operator, subtype of BaseMutation
"""
mutation_op(self)
def crossover(self, partner_genotype, mask=None):
"""
Parameters
----------
partner_genotype : Genotype
mask : np.ndarray
determines which genes (symbols) are selected from parents.
If left as 'None' the mask is randomized each time.
Thus impacting performance.
"""
if mask is None:
#Random mask is used if none defined.
mask = np.ndarray(self.shape, dtype=bool)
descendant_genome = self.copy()
descendant_genome[mask] = partner_genotype[mask]
return descendant_genome
@property
def gene_vals(self):
"""Return _gene_vals.
"""
return self._gene_vals
@property
def fitness(self):
"""Return _genotype_fitness attribute.
"""
return self._genotype_fitness
@property
def protected(self):
"""Return _protected attribute.
"""
return self._protected
@protected.setter
def protected(self, new_value):
"""Set _protected attribute/flag of the genotype.
If _protected is `True` the genotype will not be altered by operators.
"""
self._protected = new_value
def set_fitness(self, new_fitness):
"""Set fitness of the genotype directly.
Raises
------
TypeError:
If 'new_fitness' isn't of type 'float'
"""
if not isinstance(new_fitness, float):
raise TypeError()
self._genotype_fitness = new_fitness
```
#### File: pystrand/loggers/details.py
```python
import json
import os
from pystrand.loggers.base import BaseLogger
class RunDetails(BaseLogger):
# pylint: disable=protected-access
def save_run_details(self, optimizer):
def _get_mutation_details(mutation_op):
details = {
'op_name': mutation_op.__class__.__name__
}
if mutation_op._mutation_probability:
details['mutation_probability'] = mutation_op._mutation_probability
return details
def _get_selection_details(selection_op):
details = {
'op_name': selection_op.__class__.__name__
}
if '_selected_population_fraction' in dir(selection_op):
details['population_fraction'] = selection_op._selected_population_fraction
if '_selection_prob' in dir(selection_op):
details['selection_prob'] = selection_op._selection_prob
return details
run_details = {
'id': optimizer.optimizer_uuid,
'max_iterations': optimizer._max_iterations,
'mutation_ops': [_get_mutation_details(op) for op in optimizer._mutation_ops],
'selection_ops': [_get_selection_details(op) for op in optimizer._selection_methods],
'best_individual': "{}".format(optimizer.population.retrieve_best()[0])
}
details_path = optimizer.optimizer_uuid + ".json"
details_path = os.path.join(self.log_path, details_path)
with open(details_path, 'w') as file:
json.dump(run_details, file)
```
#### File: pystrand/models/base_models.py
```python
from pystrand.populations import BasePopulation
from pystrand.optimizers import BaseOptimizer
class BaseModel:
"""Basic genetic algorithm model.
Defines API for derived model classes, but isn't inteded for actual
use as the key methods are to be implemented in the subclasses.
"""
def __init__(self, gene_domain, population_size=None, **kwargs):
inferred_parameters = self._infer_pop_params(gene_domain)
if population_size is None:
population_size = inferred_parameters['pop_size']
genome_shapes = kwargs.get('genome_shapes', inferred_parameters['genome_shapes'])
gene_vals = kwargs.get('gene_vals', inferred_parameters['gene_vals'])
kwargs['parallelize'] = kwargs.get('parallelize', True)
max_iterations = kwargs.pop('max_iterations', 0)
population = BasePopulation(
population_size,
genome_shapes=genome_shapes,
gene_vals=gene_vals)
self._optimizer = BaseOptimizer(
population,
max_iterations=max_iterations,
**kwargs)
self._fitness_fn = None
def _infer_pop_params(self, domain):
"""Guess general model parameters using heuristic
Parameters
----------
domain : np.ndarraytest_samples
Returns
-------
dict
Dictionary of inferred model parameters.
"""
params = {
'pop_size': 100,
'genome_shapes': (min(len(domain), 10),),
'gene_vals': domain
}
return params
def fit(self, X, y=None, **kwargs):
"""Fit genetic algorithm model
Parameters
----------
X : np.ndarray
y : np.ndarray
"""
return self._optimizer.fit(self._fitness_fn, kwargs.get('verbose', 1))
def predict(self, x):
"""Evaluate vector 'x'
Parameters
----------
x : np.ndarray
"""
raise NotImplementedError
@property
def optimizer(self):
"""Return model optimizer.
"""
return self._optimizer
@property
def solution(self):
"""Return best performing candidate solution.
"""
return self._optimizer.population.retrieve_best()[0]
```
#### File: pystrand/models/polymodels.py
```python
import numpy as np
from pystrand.models.base_models import BaseModel
from pystrand import fitnessfunctions as fn
class PowerPolyModel(BaseModel):
"""Model as a power series polynomial with coeficients equivalent to genes.
"""
def __init__(self, gene_domain, population_size=None,
inverted_fitness=True, **kwargs):
super().__init__(gene_domain, population_size=population_size, **kwargs)
self._fitness_fn = fn.DataFitnessFn(inverted=inverted_fitness)
def fit(self, X, y, **kwargs):
"""Fit polynomial genetic algorithm model
Parameters
----------
X : np.ndarray
y : np.ndarray
Returns
-------
dict
Dictionary of recorded model statistics over time.
"""
self._fitness_fn.data = X
self._fitness_fn.labels = y
history = self._optimizer.fit(
self._fitness_fn, kwargs.get('verbose', 1))
return history
def predict(self, x):
"""Evaluate vector 'x'
Parameters
----------
x : np.ndarray
Returns
-------
float
Evaluation of the modelled polynomial.
"""
genotype = self.optimizer.population.retrieve_best()[0]['genotype']
pol = np.polynomial.Polynomial(genotype)
val = pol(x)
return val
```
#### File: pystrand/pystrand/optimizers.py
```python
import multiprocessing as mp
import uuid
from pystrand.populations import BasePopulation
from pystrand.selections import RouletteSelection, ElitismSelection, BaseSelection
from pystrand.mutations import BaseMutation, PointMutation
from pystrand.loggers.csv_logger import CsvLogger
from pystrand.loggers.details import RunDetails
class BaseOptimizer:
"""Base optimizer class.
Parameters
----------
fitness_function : BaseFunction
provides mapping from genotype to a fitness value, [0, 1]
max_iterations : int
0 by default
population : Population
Seed population, can include known sub-optimal solutions.
mutation_prob : float
0.001 by default
mutation_ops :
Mutation operator to use on genotypes.
Uses supplied mutation_prob. If None, defaults to PointMutation.
None by default.
crossover_prob : float
0.0 by default, no crossover will take place
selection_ops :
selected_fraction :
log_path :
parallelize : bool
Use multiprocessing to evaluate genomes in parallel?
Raises
------
TypeError
If supplied wrong selection method type.
If supplied mutation_op not subclassing BaseMutation.
"""
def __init__(self,
population,
max_iterations=0,
fitness_function=None,
mutation_prob=0.001,
mutation_ops=None,
crossover_prob=0.0,
selection_ops='roulette',
selected_fraction=0.1,
log_path=None,
parallelize=False,
**kwargs):
"""For each element in list of selection methods we check the type.
Only Selection and string are accepted, other types raise TypeError.
The strings must be reckognized as names of algorithm,
any other string will result in ValueError.
"""
self._optimizer_uuid = str(uuid.uuid1())
self._fitness_function = fitness_function
if mutation_ops:
if isinstance(mutation_ops, list):
self._mutation_ops = mutation_ops
elif issubclass(type(mutation_ops), BaseMutation):
self._mutation_ops = [mutation_ops]
else:
raise TypeError(
'Invalid mutation operator.',
type(mutation_ops))
else:
self._mutation_ops = [PointMutation(mutation_prob)]
if log_path:
self.logger = CsvLogger(log_path=log_path)
if kwargs.get('save_details'):
self.details_logger = RunDetails(log_path=log_path)
else:
self.details_logger = None
else:
self.logger = None
self.details_logger = None
self._crossover_probability = crossover_prob
self._selection_methods = []
self._parallelize = parallelize
self._population = population
self._max_iterations = max_iterations
#First we turn selection_methods into list, in case it isn't.
if not isinstance(selection_ops, list):
selection_ops = [selection_ops]
for selection_method in selection_ops:
if isinstance(selection_method, str):
if selection_method == 'roulette':
self._selection_methods += [RouletteSelection(selected_fraction)]
elif selection_method == 'elitism':
self._selection_methods += [ElitismSelection(selected_fraction)]
else:
raise ValueError(
'Unknown selection algorithm name.',
selection_method)
elif isinstance(selection_method, BaseSelection):
self._selection_methods += [selection_method]
else:
raise TypeError(
'Invalid selection type.',
type(selection_method))
def evaluate_individual(self, individual):
"""Return fitness value of the given individual.
"""
return self._fitness_function(individual)
def evaluate_population(self):
"""Apply set fitness function to every individual in _population
in either sequential or parallel manner depending on value of
the _paralelize attribute. And store result in the 'fitness' field.
"""
evaluated_individuals = self._population.individuals
if self._parallelize:
with mp.Pool() as worker_pool:
result = worker_pool.map_async(
self._fitness_function,
evaluated_individuals['genotype']).get(5)
evaluated_individuals['fitness'] = result
else:
evaluated_individuals['fitness'] = [
self._fitness_function(individual)
for individual
in evaluated_individuals['genotype']]
self._population.replace_individuals(evaluated_individuals)
def select_genomes(self):
"""Create new population by sequentially applying selection operators
in the order they were given to __init__.
Expand the new population to match the original one.
"""
new_population = BasePopulation(
0,
self._population.genome_shapes,
self._population.gene_values)
for selection_method in self._selection_methods:
new_population.append_individuals(
selection_method.select(self._population))
new_population.expand_population(
self._population.population_size)
self._population = new_population
def fit(self, fitnes_function=None, verbose=1):
"""Main training loop.
Return statistics of the run as dictionary of lists.
Parameters
----------
fitness_function: BaseFunction
verbose : int
If not '0' outputs statistics using print every generation.
Default is 1.
"""
if fitnes_function:
self._fitness_function = fitnes_function
elif not self._fitness_function:
raise RuntimeError("No fitness function supplied")
run_id = uuid.uuid1()
history = {
"iteration" : [],
"max_fitness" : [],
"min_fitness" : [],
"fitness_avg" : [],
"fitness_std" : []}
iteration = 0
while iteration < self._max_iterations:
try:
self.evaluate_population()
except mp.TimeoutError as timeoutException:
print(
"Population evaluation timed out, with exception {}.".format(
timeoutException))
break
history["iteration"].append(iteration)
history["max_fitness"].append(self._population.max_fitness)
history["min_fitness"].append(self._population.min_fitness)
history["fitness_avg"].append(self._population.avg_fitness)
history["fitness_std"].append(self._population.fitness_std)
if verbose > 0:
print(" // ".join(
[key + ": " + str(record[-1]) for key, record in history.items()]
))
if self._population.max_fitness == 1.0:
break
self.select_genomes()
self._population.mutate_genotypes(mutation_ops=self._mutation_ops)
if self._crossover_probability > 0.0:
self._population.cross_genomes(
crossover_prob=self._crossover_probability)
iteration += 1
if self.logger:
self.logger.save_history(history, run_id=run_id)
if self.details_logger:
self.details_logger.save_run_details(self)
return history
@property
def population(self):
"""Return optimized population.
"""
return self._population
@property
def optimizer_uuid(self):
"""Return uuid of the optimizer.
"""
return self._optimizer_uuid
```
#### File: pystrand/tests/test_optimizer.py
```python
from pystrand.optimizers import BaseOptimizer
from pystrand.genotypes import Genotype
from pystrand.populations import BasePopulation
import unittest
import numpy as np
target_genotypes_small = [
np.zeros((10),),
np.array([i%2 for i in range(10)]),
np.array([i+1%2 for i in range(10)]),
np.array([i%3 for i in range(10)])]
target_genotypes_large = [
np.resize(array, (100,)) for array in target_genotypes_small
]
class FitnessFn:
"""Simple fitness function.
The elements of the genotype array serve as coefficients
for 1d polynomial Pg, evaluated at x=1.1
Our goal is to find coefficients allowing for Pg(x) = Pt(x).
For the sake of simplicity, and since we defined fitness in range <0.0, 1.0>,
the value evaluated P is clipped and transformed.
"""
def __init__(self, target_genotype):
self.x = 1.1
self.target_polynomial_val = np.poly1d(target_genotype)(self.x)
def __call__(self, individual):
polynomial = np.poly1d(individual)
result = abs(polynomial(self.x)-self.target_polynomial_val)
if result > 1.0:
return 0.0
else:
return 1.0 - result
class Optimizer_init_test(unittest.TestCase):
def test_optimizer_init_small(self):
"""
Optimizer init test. Only checks genotype preservation and instantiation
"""
for target_genotype in target_genotypes_small:
fitness_fn = FitnessFn(target_genotype)
target_genotype = Genotype(
target_genotype.shape,
gene_vals=np.unique(target_genotype),
default_genome=target_genotype)
population = BasePopulation(
pop_size = np.sum(target_genotype.shape)*10,
genome_shapes = target_genotype.shape,
gene_vals = target_genotype.gene_vals,
random_init = True)
new_optimizer = BaseOptimizer(
population,
fitness_function=fitness_fn,
mutation_prob = 0.1,
crossover_prob = 0.5)
self.assertIsInstance(new_optimizer, BaseOptimizer)
self.assertEqual(new_optimizer._fitness_function, fitness_fn)
def test_optimizer_init_large(self):
"""
Optimizer init test. Only checks genotype preservation and instantiation
"""
for target_genotype in target_genotypes_large:
fitness_fn = FitnessFn(target_genotype)
target_genotype = Genotype(
target_genotype.shape,
gene_vals=np.unique(target_genotype),
default_genome=target_genotype)
population = BasePopulation(
pop_size = np.sum(target_genotype.shape)*10,
genome_shapes = target_genotype.shape,
gene_vals = target_genotype.gene_vals,
random_init = True)
new_optimizer = BaseOptimizer(
population,
fitness_function=fitness_fn,
mutation_prob = 0.1,
crossover_prob = 0.5)
self.assertIsInstance(new_optimizer, BaseOptimizer)
self.assertEqual(new_optimizer._fitness_function, fitness_fn)
class Optimizer_Run_test_sequential(unittest.TestCase):
test_runtime_short = 10
test_runtime_long = 100
history_dict_keys = [
'iteration',
'max_fitness',
'min_fitness',
'fitness_avg',
'fitness_std']
def test_optimizer_run_small(self):
"""
Short run of basic optimizer with default params and binary genome.
1000 generations should be enough to reach an optimal match.
However this is still stochastic process so the test will check:
- ticks of algorithm
- consistency of genotypes
- returned history of training
"""
for target_genotype in target_genotypes_small:
fitness_fn = FitnessFn(target_genotype)
population = BasePopulation(
pop_size = np.sum(target_genotype.shape)*10,
genome_shapes = target_genotype.shape,
gene_vals = np.unique(target_genotype),
random_init = True)
new_optimizer = BaseOptimizer(
max_iterations = self.test_runtime_short,
population = population,
mutation_prob = 0.1,
crossover_prob = 0.5)
history = new_optimizer.fit(fitness_fn, verbose=0)
self.assertIsInstance(history, dict)
self.assertTrue(
set(self.history_dict_keys).issubset(history.keys())
and set(history.keys()).issubset(self.history_dict_keys)
)
self.assertLessEqual(max(history['iteration']), self.test_runtime_short)
def test_optimizer_run_large(self):
"""
Long run of basic optimizer with default params and binary genome.
10000 generations should be enough to reach an optimal match.
However this is still stochastic process so the test will check:
- ticks of algorithm
- consistency of genotypes
- returned history of training
"""
for target_genotype in target_genotypes_large:
fitness_fn = FitnessFn(target_genotype)
population = BasePopulation(
pop_size = np.sum(target_genotype.shape)*10,
genome_shapes = target_genotype.shape,
gene_vals = np.unique(target_genotype),
random_init = True)
new_optimizer = BaseOptimizer(
population = population,
max_iterations = self.test_runtime_long,
mutation_prob = 0.1,
crossover_prob = 0.5)
history = new_optimizer.fit(fitness_fn, verbose=0)
self.assertIsInstance(history, dict)
self.assertTrue(
set(self.history_dict_keys).issubset(history.keys())
and set(history.keys()).issubset(self.history_dict_keys))
self.assertLessEqual(max(history['iteration']), self.test_runtime_long)
def test_optimizer_elitism_small(self):
"""
Short run of basic optimizer with elitism and binary genome.
1000 generations should be enough to reach an optimal match.
However this is still stochastic process so the test will check:
- ticks of algorithm
- consistency of genotypes
- returned history of training
"""
for target_genotype in target_genotypes_small[1:]:
fitness_fn = FitnessFn(target_genotype)
population = BasePopulation(
pop_size = target_genotype.size*10,
genome_shapes = target_genotype.shape,
gene_vals = np.unique(target_genotype),
random_init = True)
new_optimizer = BaseOptimizer(
max_iterations = self.test_runtime_short,
selection_ops = 'elitism',
population = population,
mutation_prob = 0.1,
crossover_prob = 0.5)
history = new_optimizer.fit(fitness_fn, verbose=0)
if len(history['max_fitness']) > 1:
self.assertLessEqual(
0,
np.diff(history['max_fitness']).min(),
msg="\nTarget genotype: %s \nMax_fitness: %s" %(target_genotype, history['max_fitness']))
def test_optimizer_elitism_large(self):
"""
Short run of basic optimizer with elitism and binary genome.
1000 generations should be enough to reach an optimal match.
However this is still stochastic process so the test will check:
- ticks of algorithm
- consistency of genotypes
- returned history of training
"""
for target_genotype in target_genotypes_large[1:]:
fitness_fn = FitnessFn(target_genotype)
population = BasePopulation(
pop_size = target_genotype.size*10,
genome_shapes = target_genotype.shape,
gene_vals = np.unique(target_genotype),
random_init = True)
new_optimizer = BaseOptimizer(
max_iterations = self.test_runtime_short,
selection_ops = 'elitism',
population = population,
mutation_prob = 0.1,
crossover_prob = 0.5)
history = new_optimizer.fit(fitness_fn, verbose=0)
if len(history['max_fitness']) > 1:
self.assertLessEqual(
0,
np.diff(history['max_fitness']).min(),
msg="\nTarget genotype: %s \nMax_fitness: %s" %(target_genotype, history['max_fitness']))
def test_optimizer_combined_small(self):
"""
Short run of basic optimizer with elitism and binary genome.
1000 generations should be enough to reach an optimal match.
However this is still stochastic process so the test will check:
- ticks of algorithm
- consistency of genotypes
- returned history of training
"""
for target_genotype in target_genotypes_small[1:]:
fitness_fn = FitnessFn(target_genotype)
population = BasePopulation(
pop_size = target_genotype.size*10,
genome_shapes = target_genotype.shape,
gene_vals = np.unique(target_genotype),
random_init = True)
new_optimizer = BaseOptimizer(
max_iterations = self.test_runtime_short,
selection_ops = ['elitism', 'roulette'],
population = population,
mutation_prob = 0.1,
crossover_prob = 0.5)
history = new_optimizer.fit(fitness_fn, verbose=0)
if len(history['max_fitness']) > 1:
self.assertLessEqual(
0,
np.diff(history['max_fitness']).min(),
msg="\nTarget genotype: %s \nMax_fitness: %s " %(target_genotype, history['max_fitness']))
def test_optimizer_combined_large(self):
"""Long run of basic optimizer with elitism and binary genome.
1000 generations should be enough to reach an optimal match.
However this is still stochastic process so the test will check:
- ticks of algorithm
- consistency of genotypes
- returned history of training
"""
for target_genotype in target_genotypes_large[1:]:
fitness_fn = FitnessFn(target_genotype)
population = BasePopulation(
pop_size = target_genotype.size*10,
genome_shapes = target_genotype.shape,
gene_vals = np.unique(target_genotype),
random_init = True)
new_optimizer = BaseOptimizer(
max_iterations = self.test_runtime_short,
selection_ops = ['elitism', 'roulette'],
population = population,
mutation_prob = 0.1,
crossover_prob = 0.5)
history = new_optimizer.fit(fitness_fn, verbose=0)
if len(history['max_fitness']) > 1:
self.assertLessEqual(
0,
np.diff(history['max_fitness']).min(),
msg="\nTarget genotype: %s \nMax_fitness: %s" %(target_genotype, history['max_fitness']))
class Optimizer_Run_test_parallel(unittest.TestCase):
test_runtime_short = 10
test_runtime_long = 100
history_dict_keys = [
'iteration',
'max_fitness',
'min_fitness',
'fitness_avg',
'fitness_std'
]
def test_optimizer_run_small(self):
"""
Short run of basic optimizer with default params and binary genome.
1000 generations should be enough to reach an optimal match.
However this is still stochastic process so the test will check:
- ticks of algorithm
- consistency of genotypes
- returned history of training
"""
for target_genotype in target_genotypes_small:
fitness_fn = FitnessFn(target_genotype)
population = BasePopulation(
pop_size = np.sum(target_genotype.shape)*10,
genome_shapes = target_genotype.shape,
gene_vals = np.unique(target_genotype),
random_init = True)
new_optimizer = BaseOptimizer(
max_iterations = self.test_runtime_short,
population = population,
mutation_prob = 0.1,
crossover_prob = 0.5,
parallelize = True)
history = new_optimizer.fit(fitness_fn, verbose=0)
self.assertIsInstance(history, dict)
self.assertTrue(
set(self.history_dict_keys).issubset(history.keys())
and set(history.keys()).issubset(self.history_dict_keys)
)
self.assertLessEqual(max(history['iteration']), self.test_runtime_short)
def test_optimizer_run_large(self):
"""
Long run of basic optimizer with default params and binary genome.
10000 generations should be enough to reach an optimal match.
However this is still stochastic process so the test will check:
- ticks of algorithm
- consistency of genotypes
- returned history of training
"""
for target_genotype in target_genotypes_large:
fitness_fn = FitnessFn(target_genotype)
population = BasePopulation(
pop_size = np.sum(target_genotype.shape)*10,
genome_shapes = target_genotype.shape,
gene_vals = np.unique(target_genotype),
random_init = True)
new_optimizer = BaseOptimizer(
max_iterations = self.test_runtime_short,
population = population,
mutation_prob = 0.1,
crossover_prob = 0.5,
parallelize = True)
history = new_optimizer.fit(fitness_fn, verbose=0)
self.assertIsInstance(history, dict)
self.assertTrue(
set(self.history_dict_keys).issubset(history.keys())
and set(history.keys()).issubset(self.history_dict_keys)
)
self.assertLessEqual(max(history['iteration']), self.test_runtime_long)
def test_optimizer_elitism_small(self):
"""
Short run of basic optimizer with elitism and binary genome.
1000 generations should be enough to reach an optimal match.
However this is still stochastic process so the test will check:
- ticks of algorithm
- consistency of genotypes
- returned history of training
"""
for target_genotype in target_genotypes_small[1:]:
fitness_fn = FitnessFn(target_genotype)
population = BasePopulation(
pop_size = target_genotype.size*10,
genome_shapes = target_genotype.shape,
gene_vals = np.unique(target_genotype),
random_init = True)
new_optimizer = BaseOptimizer(
max_iterations = self.test_runtime_short,
selection_ops = 'elitism',
population = population,
mutation_prob = 0.1,
crossover_prob = 0.5,
parallelize = True)
history = new_optimizer.fit(fitness_fn, verbose=0)
if len(history['max_fitness']) > 1:
self.assertLessEqual(
0,
np.diff(history['max_fitness']).min(),
msg="\nTarget genotype: %s \nMax_fitness: %s" %(target_genotype, history['max_fitness'])
)
def test_optimizer_elitism_large(self):
"""
Short run of basic optimizer with elitism and binary genome.
1000 generations should be enough to reach an optimal match.
However this is still stochastic process so the test will check:
- ticks of algorithm
- consistency of genotypes
- returned history of training
"""
for target_genotype in target_genotypes_large[1:]:
fitness_fn = FitnessFn(target_genotype)
population = BasePopulation(
pop_size = target_genotype.size*10,
genome_shapes = target_genotype.shape,
gene_vals = np.unique(target_genotype),
random_init = True)
new_optimizer = BaseOptimizer(
max_iterations = self.test_runtime_short,
selection_ops = 'elitism',
population = population,
mutation_prob = 0.1,
crossover_prob = 0.5,
parallelize = True)
history = new_optimizer.fit(fitness_fn, verbose=0)
if len(history['max_fitness']) > 1:
self.assertLessEqual(
0,
np.diff(history['max_fitness']).min(),
msg="\nTarget genotype: %s \nMax_fitness: %s" %(target_genotype, history['max_fitness'])
)
def test_optimizer_combined_small(self):
"""
Short run of basic optimizer with elitism and binary genome.
1000 generations should be enough to reach an optimal match.
However this is still stochastic process so the test will check:
- ticks of algorithm
- consistency of genotypes
- returned history of training
"""
for target_genotype in target_genotypes_small[1:]:
fitness_fn = FitnessFn(target_genotype)
population = BasePopulation(
pop_size = target_genotype.size*10,
genome_shapes = target_genotype.shape,
gene_vals = np.unique(target_genotype),
random_init = True
)
new_optimizer = BaseOptimizer(
max_iterations = self.test_runtime_short,
selection_ops = ['elitism', 'roulette'],
population = population,
mutation_prob = 0.1,
crossover_prob = 0.5,
parallelize = True)
history = new_optimizer.fit(fitness_fn, verbose=0)
if len(history['max_fitness']) > 1:
self.assertLessEqual(
0,
np.diff(history['max_fitness']).min(),
msg="\nTarget genotype: %s \nMax_fitness: %s " %(target_genotype, history['max_fitness'])
)
def test_optimizer_combined_large(self):
"""
Short run of basic optimizer with elitism and binary genome.
1000 generations should be enough to reach an optimal match.
However this is still stochastic process so the test will check:
- ticks of algorithm
- consistency of genotypes
- returned history of training
"""
for target_genotype in target_genotypes_large[1:]:
fitness_fn = FitnessFn(target_genotype)
population = BasePopulation(
pop_size = target_genotype.size*10,
genome_shapes = target_genotype.shape,
gene_vals = np.unique(target_genotype),
random_init = True)
new_optimizer = BaseOptimizer(
max_iterations = self.test_runtime_short,
selection_ops = ['elitism', 'roulette'],
population = population,
mutation_prob = 0.1,
crossover_prob = 0.5,
parallelize = True)
history = new_optimizer.fit(fitness_fn, verbose=0)
if len(history['max_fitness']) > 1:
self.assertLessEqual(
0,
np.diff(history['max_fitness']).min(),
msg="\nTarget genotype: %s \nMax_fitness: %s" %(target_genotype, history['max_fitness'])
)
``` |
{
"source": "jpodivin/testfuncpy",
"score": 3
} |
#### File: testfuncpy/landscapy/base_functions.py
```python
import numpy as np
class BaseFunction:
"""Base class of test functions.
"""
def __init__(self, inverted=False):
self._evaluated = 0
self.inverted = inverted
def __call__(self, values):
"""Evaluate function and increment evaluation counter.
"""
evaluation = self.__evaluate__(values)
self._evaluated += 1
if self.inverted:
evaluation = 1 / (1 + evaluation)
return evaluation
def __evaluate__(self, values):
"""Evaluate the function at a given point.
Return results.
:param values: function input
:type values: numpy array
:rtype: float
"""
return 0.0
def _optima(self, values):
"""Checks that provided values are among the known optimal points
(minima or maxima depending on the function and task).
Implementation of this method depends on properties
of the tested function.
:rtype: bool
"""
return False
@property
def evaluated(self):
"""Return how many times was the function evaluated.
"""
return self._evaluated
def optimum_reached(self, values):
"""Check if provided values represent one of the
known optimal values.
:param values: function input
:type values: numpy array
:rtype: bool
"""
return self._optima(values)
class SquashedDimsFunction(BaseFunction):
"""
"""
def __init__(self, inverted, final_dimension, strategy='splitsum'):
self._final_dimension = final_dimension
self._squash_strategy = strategy
super().__init__(inverted=inverted)
def __call__(self, values):
if self._squash_strategy == 'splitsum':
values = np.reshape(values, (self._final_dimension, -1))
values = np.sum(values, 1)
return super().__call__(values)
``` |
{
"source": "j-po/django-brambling",
"score": 2
} |
#### File: brambling/admin/admin.py
```python
from django.contrib import admin
from django.contrib.auth.admin import UserAdmin
from django.db.models import Count
from brambling.models import (Person, Event, DanceStyle,
EnvironmentalFactor, DietaryRestriction,
HousingCategory, CustomForm, CustomFormField,
Organization)
from brambling.admin.forms import PersonChangeForm, PersonCreationForm
class PersonAdmin(UserAdmin):
"https://docs.djangoproject.com/en/1.7/topics/auth/customizing/#a-full-example"
form = PersonChangeForm
add_form = PersonCreationForm
list_display = ('get_full_name', 'email', 'email_confirmed', 'is_active', 'created')
list_filter = ('is_active',)
add_fieldsets = (
(None, {
'fields': (('email', 'password1', '<PASSWORD>'), tuple(Person.REQUIRED_FIELDS))
}),
)
fieldsets = (
(None, {'fields': (('email', 'confirmed_email',), 'password')}),
('Personal', {'fields': (
('given_name', 'surname',), ('middle_name', 'name_order'),
('phone',)
)}),
('Permissions', {'fields': (
('is_active', 'is_superuser',), 'user_permissions', 'groups'
)}),
('Registration Settings', {'fields': (
'dietary_restrictions',
('ef_cause', 'ef_avoid'),
'person_prefer',
'person_avoid',
'housing_prefer',
'other_needs',
'dance_styles',
)}),
('Financial Transactions', {'fields': (
('stripe_customer_id', 'stripe_test_customer_id'),
"dwolla_user_id",
("dwolla_access_token", "dwolla_access_token_expires"),
("dwolla_refresh_token", "dwolla_refresh_token_expires"),
"dwolla_test_user_id",
("dwolla_test_access_token", "dwolla_test_access_token_expires"),
("dwolla_test_refresh_token", "dwolla_test_refresh_token_expires"),
)})
)
search_fields = ('email', 'given_name', 'middle_name', 'surname')
ordering = ('-created',)
def email_confirmed(self, obj):
return obj.email == obj.confirmed_email
email_confirmed.boolean = True
class OrganizationAdmin(admin.ModelAdmin):
fieldsets = (
(None, {
'fields': ('name', 'slug'),
}),
("Details", {
'classes': ('grp-collapse grp-closed',),
'fields': (
'description',
'website_url',
'facebook_url',
'banner_image',
('city', 'state_or_province'),
'country',
'dance_styles',
),
}),
("Permissions", {
'classes': ('grp-collapse grp-closed',),
'fields': ("owner", "editors"),
}),
("Event defaults", {
'classes': ('grp-collapse grp-closed',),
'fields': (
'default_event_city',
'default_event_state_or_province',
'default_event_country',
'default_event_dance_styles',
'default_event_timezone',
'default_event_currency',
),
}),
("Stripe info", {
'classes': ('grp-collapse grp-closed',),
'fields': (
("stripe_user_id", "stripe_access_token"),
("stripe_refresh_token", "stripe_publishable_key"),
("stripe_test_user_id", "stripe_test_access_token"),
("stripe_test_refresh_token", "stripe_test_publishable_key"),
)
}),
("Dwolla info", {
'classes': ('grp-collapse grp-closed',),
'fields': (
"dwolla_user_id",
("dwolla_access_token", "dwolla_access_token_expires"),
("dwolla_refresh_token", "dwolla_refresh_token_expires"),
"dwolla_test_user_id",
("dwolla_test_access_token", "dwolla_test_access_token_expires"),
("dwolla_test_refresh_token", "dwolla_test_refresh_token_expires"),
)
}),
("Check info", {
'classes': ('grp-collapse grp-closed',),
'fields': (
"check_payment_allowed",
"check_payable_to",
"check_recipient",
"check_address", "check_address_2",
("check_city", "check_state_or_province"),
("check_zip", "check_country"),
)
}),
("Dancerfly Internal", {
'classes': ('grp-collapse grp-closed',),
'fields': ('default_application_fee_percent',)
}),
)
raw_id_fields = ('owner',)
filter_horizontal = ("dance_styles", "default_event_dance_styles", "editors")
list_display = ('name', 'created', 'default_application_fee_percent', 'published_events', 'all_events')
ordering = ('-created',)
def get_queryset(self, request):
qs = super(OrganizationAdmin, self).get_queryset(request)
return qs.annotate(all_events=Count('event')).extra(select={
'published_events': """
SELECT COUNT(*) FROM brambling_event WHERE
brambling_event.organization_id = brambling_organization.id AND
brambling_event.is_published = 1
"""
})
def all_events(self, obj):
return obj.all_events
def published_events(self, obj):
return obj.published_events
class EventAdmin(admin.ModelAdmin):
fieldsets = (
(None, {
'fields': ('name', 'slug', 'api_type'),
}),
("Details", {
'classes': ('grp-collapse grp-closed',),
'fields': (
('has_dances', 'has_classes'),
'privacy', 'is_published',
('website_url', 'banner_image'),
'description',
('city', 'state_or_province'),
'country',
'dance_styles',
'currency',
'check_postmark_cutoff',
),
}),
("Dates", {
'classes': ('grp-collapse grp-closed',),
'fields': (
'timezone',
('start_date', 'end_date'),
('start_time', 'end_time'),
)
}),
("Checkout", {
'classes': ('grp-collapse grp-closed',),
'fields': (
('collect_housing_data', 'collect_survey_data'),
'liability_waiver', 'cart_timeout',
)
}),
("Permissions", {
'classes': ('grp-collapse grp-closed',),
'fields': ("organization", "additional_editors"),
}),
("Dancerfly Internal", {
'classes': ('grp-collapse grp-closed',),
'fields': ('is_frozen', 'application_fee_percent')
}),
)
raw_id_fields = ('organization',)
filter_horizontal = ("dance_styles", "additional_editors")
radio_fields = {'api_type': admin.HORIZONTAL}
list_display = ('name', 'organization', 'is_published', 'is_frozen', 'created', 'application_fee_percent')
list_filter = ('organization', 'is_published', 'is_frozen')
ordering = ('-created',)
def get_queryset(self, request):
qs = super(EventAdmin, self).get_queryset(request)
return qs.select_related('organization')
class CustomFormFieldInline(admin.TabularInline):
model = CustomFormField
extra = 0
min_num = 1
sortable_field_name = "index"
class CustomFormAdmin(admin.ModelAdmin):
inlines = [CustomFormFieldInline]
radio_fields = {'form_type': admin.HORIZONTAL}
raw_id_fields = ('event',)
fields = ('name', 'form_type', 'event', 'index')
autocomplete_lookup_fields = {
'fk': ('event',),
}
admin.site.register(Person, PersonAdmin)
admin.site.register(Organization, OrganizationAdmin)
admin.site.register(Event, EventAdmin)
admin.site.register(CustomForm, CustomFormAdmin)
admin.site.register(DanceStyle)
admin.site.register(EnvironmentalFactor)
admin.site.register(DietaryRestriction)
admin.site.register(HousingCategory)
```
#### File: brambling/forms/user.py
```python
from django.contrib.auth import login, authenticate
from django.contrib.auth.forms import (AuthenticationForm, PasswordResetForm,
SetPasswordForm)
from django.contrib.sites.shortcuts import get_current_site
from django.core.exceptions import ValidationError
from django.db import models
from django.utils.translation import ugettext_lazy as _
import floppyforms.__future__ as forms
from brambling.mail import ConfirmationMailer
from brambling.models import Person, Home, DanceStyle
from brambling.utils.international import clean_postal_code
from brambling.utils.payment import LIVE
class FloppyAuthenticationForm(AuthenticationForm):
username = forms.CharField(max_length=254)
password = forms.CharField(label=_("Password"), widget=forms.PasswordInput)
class FloppyPasswordResetForm(PasswordResetForm):
email = forms.EmailField(label=_("Email"), max_length=254)
class FloppySetPasswordForm(SetPasswordForm):
new_password1 = forms.CharField(label=_("New password"),
widget=forms.PasswordInput)
new_password2 = forms.CharField(label=_("Confirm password"),
widget=forms.PasswordInput)
class BasePersonForm(forms.ModelForm):
def __init__(self, request, *args, **kwargs):
self.request = request
super(BasePersonForm, self).__init__(*args, **kwargs)
def email_confirmation(self):
if 'email' in self.changed_data:
ConfirmationMailer(
person=self.instance,
site=get_current_site(self.request),
secure=self.request.is_secure()
).send([self.instance.email])
class SignUpForm(BasePersonForm):
error_messages = {
'duplicate_email': _("A user with that email already exists."),
'password_mismatch': _("The two password fields didn't match."),
}
password1 = forms.CharField(label=_("Password"),
widget=forms.PasswordInput)
password2 = forms.CharField(label=_("Password confirmation"),
widget=forms.PasswordInput,
help_text=_("Enter the same password as above,"
" for verification."))
class Meta:
model = Person
fields = (
'email',
'given_name',
'middle_name',
'surname',
'name_order'
)
def clean_email(self):
# Since Person.email is unique, this check is redundant,
# but it sets a nicer error message.
email = self.cleaned_data["email"]
q = models.Q(email=email) | models.Q(confirmed_email=email)
if Person._default_manager.filter(q).exists():
raise ValidationError(
self.error_messages['duplicate_email'],
code='duplicate_email',
)
return email
def clean_password2(self):
password1 = self.cleaned_data.get("<PASSWORD>")
password2 = self.cleaned_data.get("<PASSWORD>")
if password1 and password2 and password1 != <PASSWORD>:
raise ValidationError(
self.error_messages['password_mismatch'],
code='password_mismatch',
)
return password2
def save(self):
person = super(SignUpForm, self).save(commit=False)
person.set_password(self.cleaned_data["<PASSWORD>"])
person.save()
person.dance_styles = DanceStyle.objects.all()
self.email_confirmation()
user = authenticate(email=self.cleaned_data['email'],
password=self.cleaned_data['<PASSWORD>'])
login(self.request, user)
return person
class PersonForm(BasePersonForm):
error_messages = {
'password_incorrect': _("Your old password was entered incorrectly. "
"Please enter it again."),
'password_mismatch': _("The two password fields didn't match."),
}
disconnect_dwolla = forms.BooleanField(required=False)
old_password = forms.CharField(label=_("Old password"),
widget=forms.PasswordInput,
required=False)
new_password1 = forms.CharField(label=_("New password"),
widget=forms.PasswordInput,
required=False)
new_password2 = forms.CharField(label=_("New password confirmation"),
widget=forms.PasswordInput,
required=False)
class Meta:
model = Person
fields = ('email', 'given_name', 'middle_name', 'surname', 'name_order',
'phone', 'dance_styles', 'dietary_restrictions', 'ef_cause',
'ef_avoid', 'person_prefer', 'person_avoid',
'housing_prefer', 'other_needs')
def __init__(self, *args, **kwargs):
super(PersonForm, self).__init__(*args, **kwargs)
if not self.instance.dwolla_user_id:
del self.fields['disconnect_dwolla']
def clean_old_password(self):
"""
Validates that the old_password field is correct (if provided).
"""
old_password = self.cleaned_data["old_password"]
if old_password and not self.instance.check_password(old_password):
raise ValidationError(
self.error_messages['password_incorrect'],
code='password_incorrect',
)
return old_password
def clean_new_password2(self):
"""
Validates that the passwords are the same and that the old_password
field was also provided.
"""
password1 = self.cleaned_data.get('<PASSWORD>')
password2 = self.cleaned_data.get('new_<PASSWORD>')
if password1 and password2:
if not self.cleaned_data["old_password"]:
self.add_error(
'old_password',
ValidationError(
self.error_messages['password_incorrect'],
code='password_incorrect',
)
)
if password1 != password2:
raise ValidationError(
self.error_messages['password_mismatch'],
code='password_mismatch',
)
return password2
def save(self, commit=True):
self.instance.modified_directly = True
if self.cleaned_data.get('disconnect_dwolla'):
self.instance.clear_dwolla_data(LIVE)
if self.cleaned_data.get('new_password1'):
self.instance.set_password(self.cleaned_data['new_<PASSWORD>'])
person = super(PersonForm, self).save(commit)
if commit:
self.email_confirmation()
return person
class HomeForm(forms.ModelForm):
class Meta:
model = Home
exclude = ()
widgets = {
'country': forms.Select
}
def __init__(self, request, *args, **kwargs):
self.request = request
super(HomeForm, self).__init__(*args, **kwargs)
def clean(self):
cleaned_data = super(HomeForm, self).clean()
if 'country' in cleaned_data and 'zip_code' in cleaned_data:
country = cleaned_data['country']
code = cleaned_data['zip_code']
try:
cleaned_data['zip_code'] = clean_postal_code(country, code)
except ValidationError, e:
del cleaned_data['zip_code']
self.add_error('zip_code', e)
return cleaned_data
def save(self, commit=True):
instance = super(HomeForm, self).save(commit)
if self.request.user.home_id is None:
self.request.user.home = instance
self.request.user.save()
return instance
```
#### File: management/commands/update_tokens.py
```python
from optparse import make_option
from django.core.management.base import BaseCommand, CommandError
from brambling.utils.payment import dwolla_update_tokens
class Command(BaseCommand):
option_list = BaseCommand.option_list + (
make_option(
'--days',
action='store',
dest='days',
default=15,
help='Number of days ahead of time to update refresh tokens.'),
)
def handle(self, *args, **options):
try:
days = int(options['days'])
except ValueError:
raise CommandError("Days must be an integer value.")
self.stdout.write("Updating dwolla tokens...")
self.stdout.flush()
count, test_count = dwolla_update_tokens(days)
self.stdout.write("Test tokens updated: {}".format(count))
self.stdout.write("Live tokens updated: {}".format(test_count))
self.stdout.flush()
```
#### File: tests/functional/test_order_model.py
```python
from django.test import TestCase
from brambling.models import Order
from brambling.tests.factories import (EventFactory, OrderFactory, DiscountFactory, ItemFactory, ItemOptionFactory)
class OrderModelTestCase(TestCase):
def test_summary_data__base(self):
"""
Test that get_summary_data returns correct values for savings and
total cost.
"""
event = EventFactory()
order = OrderFactory(event=event)
item = ItemFactory(event=event)
item_option = ItemOptionFactory(price=100, item=item)
discount = DiscountFactory(amount=20, event=event, item_options=[item_option])
order.add_to_cart(item_option)
summary_data = order.get_summary_data()
self.assertEqual(summary_data['gross_cost'], 100)
self.assertEqual(summary_data['total_savings'], 0)
self.assertEqual(summary_data['net_cost'], 100)
order.add_discount(discount)
summary_data = order.get_summary_data()
self.assertEqual(summary_data['gross_cost'], 100)
self.assertEqual(summary_data['total_savings'], -20)
self.assertEqual(summary_data['net_cost'], 80)
def test_summary_data__itemoption_changed(self):
"""
Test that get_summary_data returns correct values for savings and
total cost even if an itemoption was changed.
"""
event = EventFactory()
order = OrderFactory(event=event)
item = ItemFactory(event=event)
item_option = ItemOptionFactory(price=100, item=item)
discount = DiscountFactory(amount=20, event=event, item_options=[item_option])
order.add_to_cart(item_option)
summary_data = order.get_summary_data()
self.assertEqual(summary_data['gross_cost'], 100)
self.assertEqual(summary_data['total_savings'], 0)
self.assertEqual(summary_data['net_cost'], 100)
order.add_discount(discount)
summary_data = order.get_summary_data()
self.assertEqual(summary_data['gross_cost'], 100)
self.assertEqual(summary_data['total_savings'], -20)
self.assertEqual(summary_data['net_cost'], 80)
item_option.price = 200
item_option.save()
# Make sure that the value isn't cached.
order = Order.objects.get(pk=order.pk)
summary_data = order.get_summary_data()
self.assertEqual(summary_data['gross_cost'], 100)
self.assertEqual(summary_data['total_savings'], -20)
self.assertEqual(summary_data['net_cost'], 80)
def test_summary_data__itemoption_deleted(self):
"""
Test that get_summary_data returns correct values for savings and
total cost even if an itemoption was deleted.
"""
event = EventFactory()
order = OrderFactory(event=event)
item = ItemFactory(event=event)
item_option = ItemOptionFactory(price=100, item=item)
discount = DiscountFactory(amount=20, event=event, item_options=[item_option])
order.add_to_cart(item_option)
summary_data = order.get_summary_data()
self.assertEqual(summary_data['gross_cost'], 100)
self.assertEqual(summary_data['total_savings'], 0)
self.assertEqual(summary_data['net_cost'], 100)
order.add_discount(discount)
summary_data = order.get_summary_data()
self.assertEqual(summary_data['gross_cost'], 100)
self.assertEqual(summary_data['total_savings'], -20)
self.assertEqual(summary_data['net_cost'], 80)
item_option.delete()
# Make sure that the value isn't cached.
order = Order.objects.get(pk=order.pk)
summary_data = order.get_summary_data()
self.assertEqual(summary_data['gross_cost'], 100)
self.assertEqual(summary_data['total_savings'], -20)
self.assertEqual(summary_data['net_cost'], 80)
def test_summary_data__discount_changed(self):
"""
Test that get_summary_data returns correct values for savings and
total cost even if a discount was changed.
"""
event = EventFactory()
order = OrderFactory(event=event)
item = ItemFactory(event=event)
item_option = ItemOptionFactory(price=100, item=item)
discount = DiscountFactory(amount=20, event=event, item_options=[item_option])
order.add_to_cart(item_option)
summary_data = order.get_summary_data()
self.assertEqual(summary_data['gross_cost'], 100)
self.assertEqual(summary_data['total_savings'], 0)
self.assertEqual(summary_data['net_cost'], 100)
order.add_discount(discount)
summary_data = order.get_summary_data()
self.assertEqual(summary_data['gross_cost'], 100)
self.assertEqual(summary_data['total_savings'], -20)
self.assertEqual(summary_data['net_cost'], 80)
discount.amount = 100
discount.save()
# Make sure that the value isn't cached.
order = Order.objects.get(pk=order.pk)
summary_data = order.get_summary_data()
self.assertEqual(summary_data['gross_cost'], 100)
self.assertEqual(summary_data['total_savings'], -20)
self.assertEqual(summary_data['net_cost'], 80)
def test_summary_data__discount_deleted(self):
"""
Test that get_summary_data returns correct values for savings and
total cost even if a discount was deleted.
"""
event = EventFactory()
order = OrderFactory(event=event)
item = ItemFactory(event=event)
item_option = ItemOptionFactory(price=100, item=item)
discount = DiscountFactory(amount=20, event=event, item_options=[item_option])
order.add_to_cart(item_option)
summary_data = order.get_summary_data()
self.assertEqual(summary_data['gross_cost'], 100)
self.assertEqual(summary_data['total_savings'], 0)
self.assertEqual(summary_data['net_cost'], 100)
order.add_discount(discount)
summary_data = order.get_summary_data()
self.assertEqual(summary_data['gross_cost'], 100)
self.assertEqual(summary_data['total_savings'], -20)
self.assertEqual(summary_data['net_cost'], 80)
discount.delete()
# Make sure that the value isn't cached.
order = Order.objects.get(pk=order.pk)
summary_data = order.get_summary_data()
self.assertEqual(summary_data['gross_cost'], 100)
self.assertEqual(summary_data['total_savings'], -20)
self.assertEqual(summary_data['net_cost'], 80)
def test_cart_boughtitem_caching(self):
"""
Test that the cached boughtitem information is correct.
"""
event = EventFactory()
order = OrderFactory(event=event)
item = ItemFactory(event=event)
item_option = ItemOptionFactory(price=100, item=item)
order.add_to_cart(item_option)
item.delete()
self.assertTrue(order.bought_items.exists())
boughtitem = order.bought_items.all()[0]
self.assertTrue(boughtitem.item_option_id is None)
self.assertEqual(boughtitem.item_name, item.name)
self.assertEqual(boughtitem.item_description, item.description)
self.assertEqual(boughtitem.item_option_name, item_option.name)
self.assertEqual(boughtitem.price, item_option.price)
self.assertTrue(boughtitem.item_option_id is None)
def test_cart_discount_caching(self):
"""
Test that the cached discount information is correct.
"""
event = EventFactory()
order = OrderFactory(event=event)
item = ItemFactory(event=event)
item_option = ItemOptionFactory(price=100, item=item)
discount = DiscountFactory(amount=20, event=event, item_options=[item_option])
order.add_to_cart(item_option)
order.add_discount(discount)
discount.delete()
self.assertTrue(order.bought_items.exists())
boughtitem = order.bought_items.all()[0]
self.assertTrue(boughtitem.discounts.exists())
boughtitemdiscount = boughtitem.discounts.all()[0]
self.assertTrue(boughtitemdiscount.discount_id is None)
self.assertEqual(boughtitemdiscount.name, discount.name)
self.assertEqual(boughtitemdiscount.code, discount.code)
self.assertEqual(boughtitemdiscount.discount_type, discount.discount_type)
self.assertEqual(boughtitemdiscount.amount, discount.amount)
self.assertEqual(boughtitemdiscount.savings(), 20)
```
#### File: tests/functional/test_organizer_forms.py
```python
from django.test import TestCase
from brambling.forms.organizer import ManualPaymentForm
from brambling.models import Transaction
from brambling.tests.factories import OrderFactory, PersonFactory, CardFactory
class ManualPaymentFormTestCase(TestCase):
def test_creation(self):
order = OrderFactory()
user = PersonFactory()
form = ManualPaymentForm(order=order, user=user, data={'amount': 10, 'method': Transaction.FAKE})
self.assertFalse(form.errors)
self.assertTrue(form.is_bound)
txn = form.save()
self.assertEqual(txn.amount, 10)
self.assertEqual(txn.order, order)
self.assertEqual(txn.event, order.event)
self.assertEqual(txn.transaction_type, Transaction.PURCHASE)
self.assertEqual(txn.method, Transaction.FAKE)
self.assertEqual(txn.created_by, user)
self.assertEqual(txn.is_confirmed, True)
self.assertEqual(txn.api_type, order.event.api_type)
```
#### File: tests/integration/test_dwolla.py
```python
from decimal import Decimal
from django.conf import settings
from django.test import TestCase
from dwolla import transactions
from brambling.models import Event, Transaction
from brambling.tests.factories import EventFactory, PersonFactory, OrderFactory
from brambling.utils.payment import dwolla_prep, dwolla_charge, dwolla_refund, dwolla_get_token
CHARGE_DATA = {
u'Amount': 42.15,
u'ClearingDate': u'',
u'Date': u'2015-01-31T02:41:38Z',
u'Destination': {u'Id': u'812-158-1368',
u'Image': u'http://uat.dwolla.com/avatars/812-158-1368',
u'Name': u'Blah blah blah',
u'Type': u'Dwolla'},
u'DestinationId': u'812-158-1368',
u'DestinationName': u'Blah blah blah',
u'Fees': [{u'Amount': 0.25, u'Id': 827529, u'Type': u'Dwolla Fee'},
{u'Amount': 0.01, u'Id': 827530, u'Type': u'Facilitator Fee'}],
u'Id': 827527,
u'Metadata': None,
u'Notes': u'',
u'OriginalTransactionId': None,
u'Source': {u'Id': u'812-743-0925',
u'Image': u'http://uat.dwolla.com/avatars/812-743-0925',
u'Name': u'<NAME>',
u'Type': u'Dwolla'},
u'SourceId': u'812-743-0925',
u'SourceName': u'<NAME>',
u'Status': u'processed',
u'Type': u'money_received',
u'UserType': u'Dwolla'
}
class DwollaChargeTestCase(TestCase):
def test_dwolla_charge__user(self):
event = EventFactory(api_type=Event.TEST,
application_fee_percent=Decimal('2.5'))
self.assertTrue(event.dwolla_connected())
dwolla_prep(Event.TEST)
person = PersonFactory()
order = OrderFactory(person=person, event=event)
charge = dwolla_charge(
sender=person,
amount=42.15,
order=order,
event=event,
pin=settings.DWOLLA_TEST_USER_PIN,
source='Balance',
)
self.assertIsInstance(charge, dict)
self.assertEqual(charge["Type"], "money_received")
self.assertEqual(len(charge['Fees']), 2)
self.assertEqual(charge["Notes"], "Order {} for {}".format(order.code, event.name))
txn = Transaction.from_dwolla_charge(charge, event=event)
# 42.15 * 0.025 = 1.05
self.assertEqual(Decimal(txn.application_fee), Decimal('1.05'))
# 0.25
self.assertEqual(Decimal(txn.processing_fee), Decimal('0.25'))
refund = dwolla_refund(
order=order,
event=event,
payment_id=txn.remote_id,
amount=txn.amount,
pin=settings.DWOLLA_TEST_ORGANIZATION_PIN
)
self.assertIsInstance(refund, dict)
self.assertEqual(refund["Amount"], txn.amount)
refund_info = transactions.info(
tid=str(refund['TransactionId']),
alternate_token=dwolla_get_token(event.organization, event.api_type)
)
self.assertEqual(refund_info["Notes"], "Order {} for {}".format(order.code, event.name))
refund_txn = Transaction.from_dwolla_refund(refund, txn, event=event)
self.assertEqual(refund_txn.amount, -1 * txn.amount)
self.assertEqual(refund_txn.application_fee, 0)
self.assertEqual(refund_txn.processing_fee, 0)
``` |
{
"source": "jpoehnelt/boltons",
"score": 3
} |
#### File: boltons/boltons/iterutils.py
```python
import os
import math
import time
import codecs
import random
import socket
import hashlib
import itertools
try:
from collections.abc import Mapping, Sequence, Set, ItemsView, Iterable
except ImportError:
from collections import Mapping, Sequence, Set, ItemsView, Iterable
try:
from typeutils import make_sentinel
_UNSET = make_sentinel('_UNSET')
_REMAP_EXIT = make_sentinel('_REMAP_EXIT')
except ImportError:
_REMAP_EXIT = object()
_UNSET = object()
try:
from future_builtins import filter
from itertools import izip
_IS_PY3 = False
except ImportError:
# Python 3 compat
_IS_PY3 = True
basestring = (str, bytes)
izip, xrange = zip, range
def is_iterable(obj):
"""Similar in nature to :func:`callable`, ``is_iterable`` returns
``True`` if an object is `iterable`_, ``False`` if not.
>>> is_iterable([])
True
>>> is_iterable(object())
False
.. _iterable: https://docs.python.org/2/glossary.html#term-iterable
"""
try:
iter(obj)
except TypeError:
return False
return True
def is_scalar(obj):
"""A near-mirror of :func:`is_iterable`. Returns ``False`` if an
object is an iterable container type. Strings are considered
scalar as well, because strings are more often treated as whole
values as opposed to iterables of 1-character substrings.
>>> is_scalar(object())
True
>>> is_scalar(range(10))
False
>>> is_scalar('hello')
True
"""
return not is_iterable(obj) or isinstance(obj, basestring)
def is_collection(obj):
"""The opposite of :func:`is_scalar`. Returns ``True`` if an object
is an iterable other than a string.
>>> is_collection(object())
False
>>> is_collection(range(10))
True
>>> is_collection('hello')
False
"""
return is_iterable(obj) and not isinstance(obj, basestring)
def split(src, sep=None, maxsplit=None):
"""Splits an iterable based on a separator. Like :meth:`str.split`,
but for all iterables. Returns a list of lists.
>>> split(['hi', 'hello', None, None, 'sup', None, 'soap', None])
[['hi', 'hello'], ['sup'], ['soap']]
See :func:`split_iter` docs for more info.
"""
return list(split_iter(src, sep, maxsplit))
def split_iter(src, sep=None, maxsplit=None):
"""Splits an iterable based on a separator, *sep*, a max of
*maxsplit* times (no max by default). *sep* can be:
* a single value
* an iterable of separators
* a single-argument callable that returns True when a separator is
encountered
``split_iter()`` yields lists of non-separator values. A separator will
never appear in the output.
>>> list(split_iter(['hi', 'hello', None, None, 'sup', None, 'soap', None]))
[['hi', 'hello'], ['sup'], ['soap']]
Note that ``split_iter`` is based on :func:`str.split`, so if
*sep* is ``None``, ``split()`` **groups** separators. If empty lists
are desired between two contiguous ``None`` values, simply use
``sep=[None]``:
>>> list(split_iter(['hi', 'hello', None, None, 'sup', None]))
[['hi', 'hello'], ['sup']]
>>> list(split_iter(['hi', 'hello', None, None, 'sup', None], sep=[None]))
[['hi', 'hello'], [], ['sup'], []]
Using a callable separator:
>>> falsy_sep = lambda x: not x
>>> list(split_iter(['hi', 'hello', None, '', 'sup', False], falsy_sep))
[['hi', 'hello'], [], ['sup'], []]
See :func:`split` for a list-returning version.
"""
if not is_iterable(src):
raise TypeError('expected an iterable')
if maxsplit is not None:
maxsplit = int(maxsplit)
if maxsplit == 0:
yield [src]
return
if callable(sep):
sep_func = sep
elif not is_scalar(sep):
sep = frozenset(sep)
sep_func = lambda x: x in sep
else:
sep_func = lambda x: x == sep
cur_group = []
split_count = 0
for s in src:
if maxsplit is not None and split_count >= maxsplit:
sep_func = lambda x: False
if sep_func(s):
if sep is None and not cur_group:
# If sep is none, str.split() "groups" separators
# check the str.split() docs for more info
continue
split_count += 1
yield cur_group
cur_group = []
else:
cur_group.append(s)
if cur_group or sep is not None:
yield cur_group
return
def chunked(src, size, count=None, **kw):
"""Returns a list of *count* chunks, each with *size* elements,
generated from iterable *src*. If *src* is not evenly divisible by
*size*, the final chunk will have fewer than *size* elements.
Provide the *fill* keyword argument to provide a pad value and
enable padding, otherwise no padding will take place.
>>> chunked(range(10), 3)
[[0, 1, 2], [3, 4, 5], [6, 7, 8], [9]]
>>> chunked(range(10), 3, fill=None)
[[0, 1, 2], [3, 4, 5], [6, 7, 8], [9, None, None]]
>>> chunked(range(10), 3, count=2)
[[0, 1, 2], [3, 4, 5]]
See :func:`chunked_iter` for more info.
"""
chunk_iter = chunked_iter(src, size, **kw)
if count is None:
return list(chunk_iter)
else:
return list(itertools.islice(chunk_iter, count))
def chunked_iter(src, size, **kw):
"""Generates *size*-sized chunks from *src* iterable. Unless the
optional *fill* keyword argument is provided, iterables not even
divisible by *size* will have a final chunk that is smaller than
*size*.
>>> list(chunked_iter(range(10), 3))
[[0, 1, 2], [3, 4, 5], [6, 7, 8], [9]]
>>> list(chunked_iter(range(10), 3, fill=None))
[[0, 1, 2], [3, 4, 5], [6, 7, 8], [9, None, None]]
Note that ``fill=None`` in fact uses ``None`` as the fill value.
"""
# TODO: add count kwarg?
if not is_iterable(src):
raise TypeError('expected an iterable')
size = int(size)
if size <= 0:
raise ValueError('expected a positive integer chunk size')
do_fill = True
try:
fill_val = kw.pop('fill')
except KeyError:
do_fill = False
fill_val = None
if kw:
raise ValueError('got unexpected keyword arguments: %r' % kw.keys())
if not src:
return
postprocess = lambda chk: chk
if isinstance(src, basestring):
postprocess = lambda chk, _sep=type(src)(): _sep.join(chk)
src_iter = iter(src)
while True:
cur_chunk = list(itertools.islice(src_iter, size))
if not cur_chunk:
break
lc = len(cur_chunk)
if lc < size and do_fill:
cur_chunk[lc:] = [fill_val] * (size - lc)
yield postprocess(cur_chunk)
return
def pairwise(src):
"""Convenience function for calling :func:`windowed` on *src*, with
*size* set to 2.
>>> pairwise(range(5))
[(0, 1), (1, 2), (2, 3), (3, 4)]
>>> pairwise([])
[]
The number of pairs is always one less than the number of elements
in the iterable passed in, except on empty inputs, which returns
an empty list.
"""
return windowed(src, 2)
def pairwise_iter(src):
"""Convenience function for calling :func:`windowed_iter` on *src*,
with *size* set to 2.
>>> list(pairwise_iter(range(5)))
[(0, 1), (1, 2), (2, 3), (3, 4)]
>>> list(pairwise_iter([]))
[]
The number of pairs is always one less than the number of elements
in the iterable passed in, or zero, when *src* is empty.
"""
return windowed_iter(src, 2)
def windowed(src, size):
"""Returns tuples with exactly length *size*. If the iterable is
too short to make a window of length *size*, no tuples are
returned. See :func:`windowed_iter` for more.
"""
return list(windowed_iter(src, size))
def windowed_iter(src, size):
"""Returns tuples with length *size* which represent a sliding
window over iterable *src*.
>>> list(windowed_iter(range(7), 3))
[(0, 1, 2), (1, 2, 3), (2, 3, 4), (3, 4, 5), (4, 5, 6)]
If the iterable is too short to make a window of length *size*,
then no window tuples are returned.
>>> list(windowed_iter(range(3), 5))
[]
"""
# TODO: lists? (for consistency)
tees = itertools.tee(src, size)
try:
for i, t in enumerate(tees):
for _ in xrange(i):
next(t)
except StopIteration:
return izip([])
return izip(*tees)
def xfrange(stop, start=None, step=1.0):
"""Same as :func:`frange`, but generator-based instead of returning a
list.
>>> tuple(xfrange(1, 3, step=0.75))
(1.0, 1.75, 2.5)
See :func:`frange` for more details.
"""
if not step:
raise ValueError('step must be non-zero')
if start is None:
start, stop = 0.0, stop * 1.0
else:
# swap when all args are used
stop, start = start * 1.0, stop * 1.0
cur = start
while cur < stop:
yield cur
cur += step
def frange(stop, start=None, step=1.0):
"""A :func:`range` clone for float-based ranges.
>>> frange(5)
[0.0, 1.0, 2.0, 3.0, 4.0]
>>> frange(6, step=1.25)
[0.0, 1.25, 2.5, 3.75, 5.0]
>>> frange(100.5, 101.5, 0.25)
[100.5, 100.75, 101.0, 101.25]
>>> frange(5, 0)
[]
>>> frange(5, 0, step=-1.25)
[5.0, 3.75, 2.5, 1.25]
"""
if not step:
raise ValueError('step must be non-zero')
if start is None:
start, stop = 0.0, stop * 1.0
else:
# swap when all args are used
stop, start = start * 1.0, stop * 1.0
count = int(math.ceil((stop - start) / step))
ret = [None] * count
if not ret:
return ret
ret[0] = start
for i in xrange(1, count):
ret[i] = ret[i - 1] + step
return ret
def backoff(start, stop, count=None, factor=2.0, jitter=False):
"""Returns a list of geometrically-increasing floating-point numbers,
suitable for usage with `exponential backoff`_. Exactly like
:func:`backoff_iter`, but without the ``'repeat'`` option for
*count*. See :func:`backoff_iter` for more details.
.. _exponential backoff: https://en.wikipedia.org/wiki/Exponential_backoff
>>> backoff(1, 10)
[1.0, 2.0, 4.0, 8.0, 10.0]
"""
if count == 'repeat':
raise ValueError("'repeat' supported in backoff_iter, not backoff")
return list(backoff_iter(start, stop, count=count,
factor=factor, jitter=jitter))
def backoff_iter(start, stop, count=None, factor=2.0, jitter=False):
"""Generates a sequence of geometrically-increasing floats, suitable
for usage with `exponential backoff`_. Starts with *start*,
increasing by *factor* until *stop* is reached, optionally
stopping iteration once *count* numbers are yielded. *factor*
defaults to 2. In general retrying with properly-configured
backoff creates a better-behaved component for a larger service
ecosystem.
.. _exponential backoff: https://en.wikipedia.org/wiki/Exponential_backoff
>>> list(backoff_iter(1.0, 10.0, count=5))
[1.0, 2.0, 4.0, 8.0, 10.0]
>>> list(backoff_iter(1.0, 10.0, count=8))
[1.0, 2.0, 4.0, 8.0, 10.0, 10.0, 10.0, 10.0]
>>> list(backoff_iter(0.25, 100.0, factor=10))
[0.25, 2.5, 25.0, 100.0]
A simplified usage example:
.. code-block:: python
for timeout in backoff_iter(0.25, 5.0):
try:
res = network_call()
break
except Exception as e:
log(e)
time.sleep(timeout)
An enhancement for large-scale systems would be to add variation,
or *jitter*, to timeout values. This is done to avoid a thundering
herd on the receiving end of the network call.
Finally, for *count*, the special value ``'repeat'`` can be passed to
continue yielding indefinitely.
Args:
start (float): Positive number for baseline.
stop (float): Positive number for maximum.
count (int): Number of steps before stopping
iteration. Defaults to the number of steps between *start* and
*stop*. Pass the string, `'repeat'`, to continue iteration
indefinitely.
factor (float): Rate of exponential increase. Defaults to `2.0`,
e.g., `[1, 2, 4, 8, 16]`.
jitter (float): A factor between `-1.0` and `1.0`, used to
uniformly randomize and thus spread out timeouts in a distributed
system, avoiding rhythm effects. Positive values use the base
backoff curve as a maximum, negative values use the curve as a
minimum. Set to 1.0 or `True` for a jitter approximating
Ethernet's time-tested backoff solution. Defaults to `False`.
"""
start = float(start)
stop = float(stop)
factor = float(factor)
if start < 0.0:
raise ValueError('expected start >= 0, not %r' % start)
if factor < 1.0:
raise ValueError('expected factor >= 1.0, not %r' % factor)
if stop == 0.0:
raise ValueError('expected stop >= 0')
if stop < start:
raise ValueError('expected stop >= start, not %r' % stop)
if count is None:
denom = start if start else 1
count = 1 + math.ceil(math.log(stop/denom, factor))
count = count if start else count + 1
if count != 'repeat' and count < 0:
raise ValueError('count must be positive or "repeat", not %r' % count)
if jitter:
jitter = float(jitter)
if not (-1.0 <= jitter <= 1.0):
raise ValueError('expected jitter -1 <= j <= 1, not: %r' % jitter)
cur, i = start, 0
while count == 'repeat' or i < count:
if not jitter:
cur_ret = cur
elif jitter:
cur_ret = cur - (cur * jitter * random.random())
yield cur_ret
i += 1
if cur == 0:
cur = 1
elif cur < stop:
cur *= factor
if cur > stop:
cur = stop
return
def bucketize(src, key=None, value_transform=None, key_filter=None):
"""Group values in the *src* iterable by the value returned by *key*,
which defaults to :class:`bool`, grouping values by truthiness.
>>> bucketize(range(5))
{False: [0], True: [1, 2, 3, 4]}
>>> is_odd = lambda x: x % 2 == 1
>>> bucketize(range(5), is_odd)
{False: [0, 2, 4], True: [1, 3]}
Value lists are not deduplicated:
>>> bucketize([None, None, None, 'hello'])
{False: [None, None, None], True: ['hello']}
Bucketize into more than 3 groups
>>> bucketize(range(10), lambda x: x % 3)
{0: [0, 3, 6, 9], 1: [1, 4, 7], 2: [2, 5, 8]}
``bucketize`` has a couple of advanced options useful in certain
cases. *value_transform* can be used to modify values as they are
added to buckets, and *key_filter* will allow excluding certain
buckets from being collected.
>>> bucketize(range(5), value_transform=lambda x: x*x)
{False: [0], True: [1, 4, 9, 16]}
>>> bucketize(range(10), key=lambda x: x % 3, key_filter=lambda k: k % 3 != 1)
{0: [0, 3, 6, 9], 2: [2, 5, 8]}
Note in some of these examples there were at most two keys, ``True`` and
``False``, and each key present has a list with at least one
item. See :func:`partition` for a version specialized for binary
use cases.
"""
if not is_iterable(src):
raise TypeError('expected an iterable')
if key is None:
key = bool
if not callable(key):
raise TypeError('expected callable key function')
if value_transform is None:
value_transform = lambda x: x
if not callable(value_transform):
raise TypeError('expected callable value transform function')
ret = {}
for val in src:
key_of_val = key(val)
if key_filter is None or key_filter(key_of_val):
ret.setdefault(key_of_val, []).append(value_transform(val))
return ret
def partition(src, key=None):
"""No relation to :meth:`str.partition`, ``partition`` is like
:func:`bucketize`, but for added convenience returns a tuple of
``(truthy_values, falsy_values)``.
>>> nonempty, empty = partition(['', '', 'hi', '', 'bye'])
>>> nonempty
['hi', 'bye']
*key* defaults to :class:`bool`, but can be carefully overridden to
use any function that returns either ``True`` or ``False``.
>>> import string
>>> is_digit = lambda x: x in string.digits
>>> decimal_digits, hexletters = partition(string.hexdigits, is_digit)
>>> ''.join(decimal_digits), ''.join(hexletters)
('0123456789', 'abcdefABCDEF')
"""
bucketized = bucketize(src, key)
return bucketized.get(True, []), bucketized.get(False, [])
def unique(src, key=None):
"""``unique()`` returns a list of unique values, as determined by
*key*, in the order they first appeared in the input iterable,
*src*.
>>> ones_n_zeros = '11010110001010010101010'
>>> ''.join(unique(ones_n_zeros))
'10'
See :func:`unique_iter` docs for more details.
"""
return list(unique_iter(src, key))
def unique_iter(src, key=None):
"""Yield unique elements from the iterable, *src*, based on *key*,
in the order in which they first appeared in *src*.
>>> repetitious = [1, 2, 3] * 10
>>> list(unique_iter(repetitious))
[1, 2, 3]
By default, *key* is the object itself, but *key* can either be a
callable or, for convenience, a string name of the attribute on
which to uniqueify objects, falling back on identity when the
attribute is not present.
>>> pleasantries = ['hi', 'hello', 'ok', 'bye', 'yes']
>>> list(unique_iter(pleasantries, key=lambda x: len(x)))
['hi', 'hello', 'bye']
"""
if not is_iterable(src):
raise TypeError('expected an iterable, not %r' % type(src))
if key is None:
key_func = lambda x: x
elif callable(key):
key_func = key
elif isinstance(key, basestring):
key_func = lambda x: getattr(x, key, x)
else:
raise TypeError('"key" expected a string or callable, not %r' % key)
seen = set()
for i in src:
k = key_func(i)
if k not in seen:
seen.add(k)
yield i
return
def redundant(src, key=None, groups=False):
"""The complement of :func:`unique()`.
By default returns non-unique values as a list of the *first*
redundant value in *src*. Pass ``groups=True`` to get groups of
all values with redundancies, ordered by position of the first
redundant value. This is useful in conjunction with some
normalizing *key* function.
>>> redundant([1, 2, 3, 4])
[]
>>> redundant([1, 2, 3, 2, 3, 3, 4])
[2, 3]
>>> redundant([1, 2, 3, 2, 3, 3, 4], groups=True)
[[2, 2], [3, 3, 3]]
An example using a *key* function to do case-insensitive
redundancy detection.
>>> redundant(['hi', 'Hi', 'HI', 'hello'], key=str.lower)
['Hi']
>>> redundant(['hi', 'Hi', 'HI', 'hello'], groups=True, key=str.lower)
[['hi', 'Hi', 'HI']]
*key* should also be used when the values in *src* are not hashable.
.. note::
This output of this function is designed for reporting
duplicates in contexts when a unique input is desired. Due to
the grouped return type, there is no streaming equivalent of
this function for the time being.
"""
if key is None:
pass
elif callable(key):
key_func = key
elif isinstance(key, basestring):
key_func = lambda x: getattr(x, key, x)
else:
raise TypeError('"key" expected a string or callable, not %r' % key)
seen = {} # key to first seen item
redundant_order = []
redundant_groups = {}
for i in src:
k = key_func(i) if key else i
if k not in seen:
seen[k] = i
else:
if k in redundant_groups:
if groups:
redundant_groups[k].append(i)
else:
redundant_order.append(k)
redundant_groups[k] = [seen[k], i]
if not groups:
ret = [redundant_groups[k][1] for k in redundant_order]
else:
ret = [redundant_groups[k] for k in redundant_order]
return ret
def one(src, default=None, key=None):
"""Along the same lines as builtins, :func:`all` and :func:`any`, and
similar to :func:`first`, ``one()`` returns the single object in
the given iterable *src* that evaluates to ``True``, as determined
by callable *key*. If unset, *key* defaults to :class:`bool`. If
no such objects are found, *default* is returned. If *default* is
not passed, ``None`` is returned.
If *src* has more than one object that evaluates to ``True``, or
if there is no object that fulfills such condition, return
*default*. It's like an `XOR`_ over an iterable.
>>> one((True, False, False))
True
>>> one((True, False, True))
>>> one((0, 0, 'a'))
'a'
>>> one((0, False, None))
>>> one((True, True), default=False)
False
>>> bool(one(('', 1)))
True
>>> one((10, 20, 30, 42), key=lambda i: i > 40)
42
See `<NAME>'s original repo`_ for further use cases.
.. _<NAME>'s original repo: https://github.com/mgaitan/one
.. _XOR: https://en.wikipedia.org/wiki/Exclusive_or
"""
ones = list(itertools.islice(filter(key, src), 2))
return ones[0] if len(ones) == 1 else default
def first(iterable, default=None, key=None):
"""Return first element of *iterable* that evaluates to ``True``, else
return ``None`` or optional *default*. Similar to :func:`one`.
>>> first([0, False, None, [], (), 42])
42
>>> first([0, False, None, [], ()]) is None
True
>>> first([0, False, None, [], ()], default='ohai')
'ohai'
>>> import re
>>> m = first(re.match(regex, 'abc') for regex in ['b.*', 'a(.*)'])
>>> m.group(1)
'bc'
The optional *key* argument specifies a one-argument predicate function
like that used for *filter()*. The *key* argument, if supplied, should be
in keyword form. For example, finding the first even number in an iterable:
>>> first([1, 1, 3, 4, 5], key=lambda x: x % 2 == 0)
4
Contributed by <NAME>, author of `the original standalone module`_.
.. _the original standalone module: https://github.com/hynek/first
"""
return next(filter(key, iterable), default)
def flatten_iter(iterable):
"""``flatten_iter()`` yields all the elements from *iterable* while
collapsing any nested iterables.
>>> nested = [[1, 2], [[3], [4, 5]]]
>>> list(flatten_iter(nested))
[1, 2, 3, 4, 5]
"""
for item in iterable:
if isinstance(item, Iterable) and not isinstance(item, basestring):
for subitem in flatten_iter(item):
yield subitem
else:
yield item
def flatten(iterable):
"""``flatten()`` returns a collapsed list of all the elements from
*iterable* while collapsing any nested iterables.
>>> nested = [[1, 2], [[3], [4, 5]]]
>>> flatten(nested)
[1, 2, 3, 4, 5]
"""
return list(flatten_iter(iterable))
def same(iterable, ref=_UNSET):
"""``same()`` returns ``True`` when all values in *iterable* are
equal to one another, or optionally a reference value,
*ref*. Similar to :func:`all` and :func:`any` in that it evaluates
an iterable and returns a :class:`bool`. ``same()`` returns
``True`` for empty iterables.
>>> same([])
True
>>> same([1])
True
>>> same(['a', 'a', 'a'])
True
>>> same(range(20))
False
>>> same([[], []])
True
>>> same([[], []], ref='test')
False
"""
iterator = iter(iterable)
if ref is _UNSET:
ref = next(iterator, ref)
return all(val == ref for val in iterator)
def default_visit(path, key, value):
# print('visit(%r, %r, %r)' % (path, key, value))
return key, value
# enable the extreme: monkeypatching iterutils with a different default_visit
_orig_default_visit = default_visit
def default_enter(path, key, value):
# print('enter(%r, %r)' % (key, value))
if isinstance(value, basestring):
return value, False
elif isinstance(value, Mapping):
return value.__class__(), ItemsView(value)
elif isinstance(value, Sequence):
return value.__class__(), enumerate(value)
elif isinstance(value, Set):
return value.__class__(), enumerate(value)
else:
# files, strings, other iterables, and scalars are not
# traversed
return value, False
def default_exit(path, key, old_parent, new_parent, new_items):
# print('exit(%r, %r, %r, %r, %r)'
# % (path, key, old_parent, new_parent, new_items))
ret = new_parent
if isinstance(new_parent, Mapping):
new_parent.update(new_items)
elif isinstance(new_parent, Sequence):
vals = [v for i, v in new_items]
try:
new_parent.extend(vals)
except AttributeError:
ret = new_parent.__class__(vals) # tuples
elif isinstance(new_parent, Set):
vals = [v for i, v in new_items]
try:
new_parent.update(vals)
except AttributeError:
ret = new_parent.__class__(vals) # frozensets
else:
raise RuntimeError('unexpected iterable type: %r' % type(new_parent))
return ret
def remap(root, visit=default_visit, enter=default_enter, exit=default_exit,
**kwargs):
"""The remap ("recursive map") function is used to traverse and
transform nested structures. Lists, tuples, sets, and dictionaries
are just a few of the data structures nested into heterogenous
tree-like structures that are so common in programming.
Unfortunately, Python's built-in ways to manipulate collections
are almost all flat. List comprehensions may be fast and succinct,
but they do not recurse, making it tedious to apply quick changes
or complex transforms to real-world data.
remap goes where list comprehensions cannot.
Here's an example of removing all Nones from some data:
>>> from pprint import pprint
>>> reviews = {'Star Trek': {'TNG': 10, 'DS9': 8.5, 'ENT': None},
... 'Babylon 5': 6, 'Dr. Who': None}
>>> pprint(remap(reviews, lambda p, k, v: v is not None))
{'Babylon 5': 6, 'Star Trek': {'DS9': 8.5, 'TNG': 10}}
Notice how both Nones have been removed despite the nesting in the
dictionary. Not bad for a one-liner, and that's just the beginning.
See `this remap cookbook`_ for more delicious recipes.
.. _this remap cookbook: http://sedimental.org/remap.html
remap takes four main arguments: the object to traverse and three
optional callables which determine how the remapped object will be
created.
Args:
root: The target object to traverse. By default, remap
supports iterables like :class:`list`, :class:`tuple`,
:class:`dict`, and :class:`set`, but any object traversable by
*enter* will work.
visit (callable): This function is called on every item in
*root*. It must accept three positional arguments, *path*,
*key*, and *value*. *path* is simply a tuple of parents'
keys. *visit* should return the new key-value pair. It may
also return ``True`` as shorthand to keep the old item
unmodified, or ``False`` to drop the item from the new
structure. *visit* is called after *enter*, on the new parent.
The *visit* function is called for every item in root,
including duplicate items. For traversable values, it is
called on the new parent object, after all its children
have been visited. The default visit behavior simply
returns the key-value pair unmodified.
enter (callable): This function controls which items in *root*
are traversed. It accepts the same arguments as *visit*: the
path, the key, and the value of the current item. It returns a
pair of the blank new parent, and an iterator over the items
which should be visited. If ``False`` is returned instead of
an iterator, the value will not be traversed.
The *enter* function is only called once per unique value. The
default enter behavior support mappings, sequences, and
sets. Strings and all other iterables will not be traversed.
exit (callable): This function determines how to handle items
once they have been visited. It gets the same three
arguments as the other functions -- *path*, *key*, *value*
-- plus two more: the blank new parent object returned
from *enter*, and a list of the new items, as remapped by
*visit*.
Like *enter*, the *exit* function is only called once per
unique value. The default exit behavior is to simply add
all new items to the new parent, e.g., using
:meth:`list.extend` and :meth:`dict.update` to add to the
new parent. Immutable objects, such as a :class:`tuple` or
:class:`namedtuple`, must be recreated from scratch, but
use the same type as the new parent passed back from the
*enter* function.
reraise_visit (bool): A pragmatic convenience for the *visit*
callable. When set to ``False``, remap ignores any errors
raised by the *visit* callback. Items causing exceptions
are kept. See examples for more details.
remap is designed to cover the majority of cases with just the
*visit* callable. While passing in multiple callables is very
empowering, remap is designed so very few cases should require
passing more than one function.
When passing *enter* and *exit*, it's common and easiest to build
on the default behavior. Simply add ``from boltons.iterutils import
default_enter`` (or ``default_exit``), and have your enter/exit
function call the default behavior before or after your custom
logic. See `this example`_.
Duplicate and self-referential objects (aka reference loops) are
automatically handled internally, `as shown here`_.
.. _this example: http://sedimental.org/remap.html#sort_all_lists
.. _as shown here: http://sedimental.org/remap.html#corner_cases
"""
# TODO: improve argument formatting in sphinx doc
# TODO: enter() return (False, items) to continue traverse but cancel copy?
if not callable(visit):
raise TypeError('visit expected callable, not: %r' % visit)
if not callable(enter):
raise TypeError('enter expected callable, not: %r' % enter)
if not callable(exit):
raise TypeError('exit expected callable, not: %r' % exit)
reraise_visit = kwargs.pop('reraise_visit', True)
if kwargs:
raise TypeError('unexpected keyword arguments: %r' % kwargs.keys())
path, registry, stack = (), {}, [(None, root)]
new_items_stack = []
while stack:
key, value = stack.pop()
id_value = id(value)
if key is _REMAP_EXIT:
key, new_parent, old_parent = value
id_value = id(old_parent)
path, new_items = new_items_stack.pop()
value = exit(path, key, old_parent, new_parent, new_items)
registry[id_value] = value
if not new_items_stack:
continue
elif id_value in registry:
value = registry[id_value]
else:
res = enter(path, key, value)
try:
new_parent, new_items = res
except TypeError:
# TODO: handle False?
raise TypeError('enter should return a tuple of (new_parent,'
' items_iterator), not: %r' % res)
if new_items is not False:
# traverse unless False is explicitly passed
registry[id_value] = new_parent
new_items_stack.append((path, []))
if value is not root:
path += (key,)
stack.append((_REMAP_EXIT, (key, new_parent, value)))
if new_items:
stack.extend(reversed(list(new_items)))
continue
if visit is _orig_default_visit:
# avoid function call overhead by inlining identity operation
visited_item = (key, value)
else:
try:
visited_item = visit(path, key, value)
except Exception:
if reraise_visit:
raise
visited_item = True
if visited_item is False:
continue # drop
elif visited_item is True:
visited_item = (key, value)
# TODO: typecheck?
# raise TypeError('expected (key, value) from visit(),'
# ' not: %r' % visited_item)
try:
new_items_stack[-1][1].append(visited_item)
except IndexError:
raise TypeError('expected remappable root, not: %r' % root)
return value
class PathAccessError(KeyError, IndexError, TypeError):
"""An amalgamation of KeyError, IndexError, and TypeError,
representing what can occur when looking up a path in a nested
object.
"""
def __init__(self, exc, seg, path):
self.exc = exc
self.seg = seg
self.path = path
def __repr__(self):
cn = self.__class__.__name__
return '%s(%r, %r, %r)' % (cn, self.exc, self.seg, self.path)
def __str__(self):
return ('could not access %r from path %r, got error: %r'
% (self.seg, self.path, self.exc))
def get_path(root, path, default=_UNSET):
"""Retrieve a value from a nested object via a tuple representing the
lookup path.
>>> root = {'a': {'b': {'c': [[1], [2], [3]]}}}
>>> get_path(root, ('a', 'b', 'c', 2, 0))
3
The path format is intentionally consistent with that of
:func:`remap`.
One of get_path's chief aims is improved error messaging. EAFP is
great, but the error messages are not.
For instance, ``root['a']['b']['c'][2][1]`` gives back
``IndexError: list index out of range``
What went out of range where? get_path currently raises
``PathAccessError: could not access 2 from path ('a', 'b', 'c', 2,
1), got error: IndexError('list index out of range',)``, a
subclass of IndexError and KeyError.
You can also pass a default that covers the entire operation,
should the lookup fail at any level.
Args:
root: The target nesting of dictionaries, lists, or other
objects supporting ``__getitem__``.
path (tuple): A list of strings and integers to be successively
looked up within *root*.
default: The value to be returned should any
``PathAccessError`` exceptions be raised.
"""
if isinstance(path, basestring):
path = path.split('.')
cur = root
try:
for seg in path:
try:
cur = cur[seg]
except (KeyError, IndexError) as exc:
raise PathAccessError(exc, seg, path)
except TypeError as exc:
# either string index in a list, or a parent that
# doesn't support indexing
try:
seg = int(seg)
cur = cur[seg]
except (ValueError, KeyError, IndexError, TypeError):
if not is_iterable(cur):
exc = TypeError('%r object is not indexable'
% type(cur).__name__)
raise PathAccessError(exc, seg, path)
except PathAccessError:
if default is _UNSET:
raise
return default
return cur
def research(root, query=lambda p, k, v: True, reraise=False):
"""The :func:`research` function uses :func:`remap` to recurse over
any data nested in *root*, and find values which match a given
criterion, specified by the *query* callable.
Results are returned as a list of ``(path, value)`` pairs. The
paths are tuples in the same format accepted by
:func:`get_path`. This can be useful for comparing values nested
in two or more different structures.
Here's a simple example that finds all integers:
>>> root = {'a': {'b': 1, 'c': (2, 'd', 3)}, 'e': None}
>>> res = research(root, query=lambda p, k, v: isinstance(v, int))
>>> print(sorted(res))
[(('a', 'b'), 1), (('a', 'c', 0), 2), (('a', 'c', 2), 3)]
Note how *query* follows the same, familiar ``path, key, value``
signature as the ``visit`` and ``enter`` functions on
:func:`remap`, and returns a :class:`bool`.
Args:
root: The target object to search. Supports the same types of
objects as :func:`remap`, including :class:`list`,
:class:`tuple`, :class:`dict`, and :class:`set`.
query (callable): The function called on every object to
determine whether to include it in the search results. The
callable must accept three arguments, *path*, *key*, and
*value*, commonly abbreviated *p*, *k*, and *v*, same as
*enter* and *visit* from :func:`remap`.
reraise (bool): Whether to reraise exceptions raised by *query*
or to simply drop the result that caused the error.
With :func:`research` it's easy to inspect the details of a data
structure, like finding values that are at a certain depth (using
``len(p)``) and much more. If more advanced functionality is
needed, check out the code and make your own :func:`remap`
wrapper, and consider `submitting a patch`_!
.. _submitting a patch: https://github.com/mahmoud/boltons/pulls
"""
ret = []
if not callable(query):
raise TypeError('query expected callable, not: %r' % query)
def enter(path, key, value):
try:
if query(path, key, value):
ret.append((path + (key,), value))
except Exception:
if reraise:
raise
return default_enter(path, key, value)
remap(root, enter=enter)
return ret
# TODO: recollect()
# TODO: refilter()
# TODO: reiter()
# GUID iterators: 10x faster and somewhat more compact than uuid.
class GUIDerator(object):
"""The GUIDerator is an iterator that yields a globally-unique
identifier (GUID) on every iteration. The GUIDs produced are
hexadecimal strings.
Testing shows it to be around 12x faster than the uuid module. By
default it is also more compact, partly due to its default 96-bit
(24-hexdigit) length. 96 bits of randomness means that there is a
1 in 2 ^ 32 chance of collision after 2 ^ 64 iterations. If more
or less uniqueness is desired, the *size* argument can be adjusted
accordingly.
Args:
size (int): character length of the GUID, defaults to 24. Lengths
between 20 and 36 are considered valid.
The GUIDerator has built-in fork protection that causes it to
detect a fork on next iteration and reseed accordingly.
"""
def __init__(self, size=24):
self.size = size
if size < 20 or size > 36:
raise ValueError('expected 20 < size <= 36')
self.count = itertools.count()
self.reseed()
def reseed(self):
self.pid = os.getpid()
self.salt = '-'.join([str(self.pid),
socket.gethostname() or b'<nohostname>',
str(time.time()),
codecs.encode(os.urandom(6),
'hex_codec').decode('ascii')])
# that codecs trick is the best/only way to get a bytes to
# hexbytes in py2/3
return
def __iter__(self):
return self
if _IS_PY3:
def __next__(self):
if os.getpid() != self.pid:
self.reseed()
target_bytes = (self.salt + str(next(self.count))).encode('utf8')
hash_text = hashlib.sha1(target_bytes).hexdigest()[:self.size]
return hash_text
else:
def __next__(self):
if os.getpid() != self.pid:
self.reseed()
return hashlib.sha1(self.salt +
str(next(self.count))).hexdigest()[:self.size]
next = __next__
class SequentialGUIDerator(GUIDerator):
"""Much like the standard GUIDerator, the SequentialGUIDerator is an
iterator that yields a globally-unique identifier (GUID) on every
iteration. The GUIDs produced are hexadecimal strings.
The SequentialGUIDerator differs in that it picks a starting GUID
value and increments every iteration. This yields GUIDs which are
of course unique, but also ordered and lexicographically sortable.
The SequentialGUIDerator is around 50% faster than the normal
GUIDerator, making it almost 20x as fast as the built-in uuid
module. By default it is also more compact, partly due to its
96-bit (24-hexdigit) default length. 96 bits of randomness means that
there is a 1 in 2 ^ 32 chance of collision after 2 ^ 64
iterations. If more or less uniqueness is desired, the *size*
argument can be adjusted accordingly.
Args:
size (int): character length of the GUID, defaults to 24.
Note that with SequentialGUIDerator there is a chance of GUIDs
growing larger than the size configured. The SequentialGUIDerator
has built-in fork protection that causes it to detect a fork on
next iteration and reseed accordingly.
"""
if _IS_PY3:
def reseed(self):
super(SequentialGUIDerator, self).reseed()
start_str = hashlib.sha1(self.salt.encode('utf8')).hexdigest()
self.start = int(start_str[:self.size], 16)
self.start |= (1 << ((self.size * 4) - 2))
else:
def reseed(self):
super(SequentialGUIDerator, self).reseed()
start_str = hashlib.sha1(self.salt).hexdigest()
self.start = int(start_str[:self.size], 16)
self.start |= (1 << ((self.size * 4) - 2))
def __next__(self):
if os.getpid() != self.pid:
self.reseed()
return '%x' % (next(self.count) + self.start)
next = __next__
guid_iter = GUIDerator()
seq_guid_iter = SequentialGUIDerator()
def soft_sorted(iterable, first=None, last=None, key=None, reverse=False):
"""For when you care about the order of some elements, but not about
others.
Use this to float to the top and/or sink to the bottom a specific
ordering, while sorting the rest of the elements according to
normal :func:`sorted` rules.
>>> soft_sorted(['two', 'b', 'one', 'a'], first=['one', 'two'])
['one', 'two', 'a', 'b']
>>> soft_sorted(range(7), first=[6, 15], last=[2, 4], reverse=True)
[6, 5, 3, 1, 0, 2, 4]
>>> import string
>>> ''.join(soft_sorted(string.hexdigits, first='za1', last='b', key=str.lower))
'aA1023456789cCdDeEfFbB'
Args:
iterable (list): A list or other iterable to sort.
first (list): A sequence to enforce for elements which should
appear at the beginning of the returned list.
last (list): A sequence to enforce for elements which should
appear at the end of the returned list.
key (callable): Callable used to generate a comparable key for
each item to be sorted, same as the key in
:func:`sorted`. Note that entries in *first* and *last*
should be the keys for the items. Defaults to
passthrough/the identity function.
reverse (bool): Whether or not elements not explicitly ordered
by *first* and *last* should be in reverse order or not.
Returns a new list in sorted order.
"""
first = first or []
last = last or []
key = key or (lambda x: x)
seq = list(iterable)
other = [x for x in seq if not ((first and key(x) in first) or (last and key(x) in last))]
other.sort(key=key, reverse=reverse)
if first:
first = sorted([x for x in seq if key(x) in first], key=lambda x: first.index(key(x)))
if last:
last = sorted([x for x in seq if key(x) in last], key=lambda x: last.index(key(x)))
return first + other + last
"""
May actually be faster to do an isinstance check for a str path
$ python -m timeit -s "x = [1]" "x[0]"
10000000 loops, best of 3: 0.0207 usec per loop
$ python -m timeit -s "x = [1]" "try: x[0] \nexcept: pass"
10000000 loops, best of 3: 0.029 usec per loop
$ python -m timeit -s "x = [1]" "try: x[1] \nexcept: pass"
1000000 loops, best of 3: 0.315 usec per loop
# setting up try/except is fast, only around 0.01us
# actually triggering the exception takes almost 10x as long
$ python -m timeit -s "x = [1]" "isinstance(x, basestring)"
10000000 loops, best of 3: 0.141 usec per loop
$ python -m timeit -s "x = [1]" "isinstance(x, str)"
10000000 loops, best of 3: 0.131 usec per loop
$ python -m timeit -s "x = [1]" "try: x.split('.')\n except: pass"
1000000 loops, best of 3: 0.443 usec per loop
$ python -m timeit -s "x = [1]" "try: x.split('.') \nexcept AttributeError: pass"
1000000 loops, best of 3: 0.544 usec per loop
"""
```
#### File: boltons/tests/test_funcutils_fb.py
```python
import pytest
from boltons.funcutils import wraps, FunctionBuilder
def pita_wrap(flag=False):
def cedar_dec(func):
@wraps(func)
def cedar_wrapper(*a, **kw):
return (flag, func.__name__, func(*a, **kw))
return cedar_wrapper
return cedar_dec
def wrappable_func(a, b):
return a, b
def wrappable_varkw_func(a, b, **kw):
return a, b
def test_wraps_basic():
@pita_wrap(flag=True)
def simple_func():
'''"""a tricky docstring"""'''
return 'hello'
assert simple_func() == (True, 'simple_func', 'hello')
assert simple_func.__doc__ == '''"""a tricky docstring"""'''
assert callable(simple_func.__wrapped__)
assert simple_func.__wrapped__() == 'hello'
assert simple_func.__wrapped__.__doc__ == '''"""a tricky docstring"""'''
@pita_wrap(flag=False)
def less_simple_func(arg='hello'):
return arg
assert less_simple_func() == (False, 'less_simple_func', 'hello')
assert less_simple_func(arg='bye') == (False, 'less_simple_func', 'bye')
with pytest.raises(TypeError):
simple_func(no_such_arg='nope')
@pita_wrap(flag=False)
def default_non_roundtrippable_repr(x=lambda y: y + 1):
return x(1)
assert default_non_roundtrippable_repr() == (
False, 'default_non_roundtrippable_repr', 2)
def test_wraps_injected():
def inject_string(func):
@wraps(func, injected="a")
def wrapped(*args, **kwargs):
return func(1, *args, **kwargs)
return wrapped
assert inject_string(wrappable_func)(2) == (1, 2)
def inject_list(func):
@wraps(func, injected=["b"])
def wrapped(a, *args, **kwargs):
return func(a, 2, *args, **kwargs)
return wrapped
assert inject_list(wrappable_func)(1) == (1, 2)
def inject_nonexistent_arg(func):
@wraps(func, injected=["X"])
def wrapped(*args, **kwargs):
return func(*args, **kwargs)
return wrapped
with pytest.raises(ValueError):
inject_nonexistent_arg(wrappable_func)
def inject_missing_argument(func):
@wraps(func, injected="c")
def wrapped(*args, **kwargs):
return func(1, *args, **kwargs)
return wrapped
def inject_misc_argument(func):
# inject_to_varkw is default True, just being explicit
@wraps(func, injected="c", inject_to_varkw=True)
def wrapped(*args, **kwargs):
return func(c=1, *args, **kwargs)
return wrapped
assert inject_misc_argument(wrappable_varkw_func)(1, 2) == (1, 2)
def inject_misc_argument_no_varkw(func):
@wraps(func, injected="c", inject_to_varkw=False)
def wrapped(*args, **kwargs):
return func(c=1, *args, **kwargs)
return wrapped
with pytest.raises(ValueError):
inject_misc_argument_no_varkw(wrappable_varkw_func)
def test_wraps_update_dict():
def updated_dict(func):
@wraps(func, update_dict=True)
def wrapped(*args, **kwargs):
return func(*args, **kwargs)
return wrapped
def f(a, b):
return a, b
f.something = True
assert getattr(updated_dict(f), 'something')
def test_wraps_unknown_args():
def fails(func):
@wraps(func, foo="bar")
def wrapped(*args, **kwargs):
return func(*args, **kwargs)
return wrapped
with pytest.raises(TypeError):
fails(wrappable_func)
def test_FunctionBuilder_invalid_args():
with pytest.raises(TypeError):
FunctionBuilder(name="fails", foo="bar")
def test_FunctionBuilder_invalid_body():
with pytest.raises(SyntaxError):
FunctionBuilder(name="fails", body="*").get_func()
def test_FunctionBuilder_modify():
fb = FunctionBuilder('return_five', doc='returns the integer 5',
body='return 5')
f = fb.get_func()
assert f() == 5
fb.varkw = 'kw'
f_kw = fb.get_func()
assert f_kw(ignored_arg='ignored_val') == 5
def test_wraps_wrappers():
call_list = []
def call_list_appender(func):
@wraps(func)
def appender(*a, **kw):
call_list.append((a, kw))
return func(*a, **kw)
return appender
with pytest.raises(TypeError):
class Num(object):
def __init__(self, num):
self.num = num
@call_list_appender
@classmethod
def added(cls, x, y=1):
return cls(x + y)
return
def test_FunctionBuilder_add_arg():
fb = FunctionBuilder('return_five', doc='returns the integer 5',
body='return 5')
f = fb.get_func()
assert f() == 5
fb.add_arg('val')
f = fb.get_func()
assert f(val='ignored') == 5
with pytest.raises(ValueError) as excinfo:
fb.add_arg('val')
excinfo.typename == 'ExistingArgument'
fb = FunctionBuilder('return_val', doc='returns the value',
body='return val')
broken_func = fb.get_func()
with pytest.raises(NameError):
broken_func()
fb.add_arg('val', default='default_val')
better_func = fb.get_func()
assert better_func() == 'default_val'
assert better_func('positional') == 'positional'
assert better_func(val='keyword') == 'keyword'
def test_wraps_expected():
def expect_string(func):
@wraps(func, expected="c")
def wrapped(*args, **kwargs):
args, c = args[:2], args[-1]
return func(*args, **kwargs) + (c,)
return wrapped
expected_string = expect_string(wrappable_func)
assert expected_string(1, 2, 3) == (1, 2, 3)
with pytest.raises(TypeError) as excinfo:
expected_string(1, 2)
# a rough way of making sure we got the kind of error we expected
assert 'argument' in repr(excinfo.value)
def expect_list(func):
@wraps(func, expected=["c"])
def wrapped(*args, **kwargs):
args, c = args[:2], args[-1]
return func(*args, **kwargs) + (c,)
return wrapped
assert expect_list(wrappable_func)(1, 2, c=4) == (1, 2, 4)
def expect_pair(func):
@wraps(func, expected=[('c', 5)])
def wrapped(*args, **kwargs):
args, c = args[:2], args[-1]
return func(*args, **kwargs) + (c,)
return wrapped
assert expect_pair(wrappable_func)(1, 2) == (1, 2, 5)
def expect_dict(func):
@wraps(func, expected={'c': 6})
def wrapped(*args, **kwargs):
args, c = args[:2], args[-1]
return func(*args, **kwargs) + (c,)
return wrapped
assert expect_dict(wrappable_func)(1, 2) == (1, 2, 6)
def test_defaults_dict():
def example(req, test='default'):
return req
fb_example = FunctionBuilder.from_func(example)
assert 'test' in fb_example.args
dd = fb_example.get_defaults_dict()
assert dd['test'] == 'default'
assert 'req' not in dd
def test_get_arg_names():
def example(req, test='default'):
return req
fb_example = FunctionBuilder.from_func(example)
assert 'test' in fb_example.args
assert fb_example.get_arg_names() == ('req', 'test')
assert fb_example.get_arg_names(only_required=True) == ('req',)
``` |
{
"source": "jpoehnelt/openapi-python-client",
"score": 3
} |
#### File: openapi-python-client/openapi_python_client/config.py
```python
import json
import mimetypes
from pathlib import Path
from typing import Dict, List, Optional
import yaml
from pydantic import BaseModel
class ClassOverride(BaseModel):
"""An override of a single generated class.
See https://github.com/openapi-generators/openapi-python-client#class_overrides
"""
class_name: Optional[str] = None
module_name: Optional[str] = None
class Config(BaseModel):
"""Contains any configurable values passed by the user.
See https://github.com/openapi-generators/openapi-python-client#configuration
"""
class_overrides: Dict[str, ClassOverride] = {}
project_name_override: Optional[str]
package_name_override: Optional[str]
package_version_override: Optional[str]
post_hooks: List[str] = [
"autoflake -i -r --remove-all-unused-imports --remove-unused-variables --ignore-init-module-imports .",
"isort .",
"black .",
]
field_prefix: str = "field_"
@staticmethod
def load_from_path(path: Path) -> "Config":
"""Creates a Config from provided JSON or YAML file and sets a bunch of globals from it"""
mime = mimetypes.guess_type(path.as_uri(), strict=True)[0]
if mime == "application/json":
config_data = json.loads(path.read_text())
else:
config_data = yaml.safe_load(path.read_text())
config = Config(**config_data)
return config
``` |
{
"source": "jpoesen/dwarf",
"score": 3
} |
#### File: dwarf/dwarf/application.py
```python
from flask import Flask, render_template, request, url_for
from datetime import datetime
import hashlib, markdown, operator, pagination, os, sys
app = Flask(__name__)
app.config.from_object('config.ProductionConfig')
app.template_folder = app.config['TEMPLATE_PATH']
# --- routes
@app.errorhandler(404)
def page_not_found():
# TODO: write to log
data = content_load('pages/404.md')
md = markdown.markdown(data)
return render_template('page.tpl.html', page=md), 404
# @app.route("/")
# def index():
# return dwarf_render_page('index')
# @app.route("/about/")
# def about():
# return dwarf_render_page('about')
@app.route("/posts/<slug>")
def blogpost(slug):
md = markdown.Markdown(extensions = ['meta'])
data = content_load("blog/{0}.md".format(slug))
markup = md.convert(data.decode('utf-8'))
meta = _md_meta_to_dict(md)
post = {'meta': meta, 'content': markup}
return render_template('post.tpl.html', post=post)
@app.route('/', defaults={'page': 1})
@app.route('/page/<int:page>')
def index(page):
files=content_list('blog')
newest_first = sorted(files,
key=operator.itemgetter("date"),
reverse=True)
count = len(newest_first)
newest_first = get_newest_first_for_page(
newest_first , page, pagination.PER_PAGE)
if not newest_first and page != 1:
raise
pagination_ = pagination.Pagination(
page, pagination.PER_PAGE, count)
return render_template('posts.tpl.html',
pagination=pagination_,
posts=newest_first )
def get_newest_first_for_page(newest_first , page, per_page):
if page == 1 :return newest_first[:per_page]
start = (page-1) * per_page
try:
return newest_first[start : start + per_page ]
except :
return None
def content_load(filename):
# TODO check if file exists, if exist: open, if not, open content/404.
with open(app.config['CONTENT_PATH'] + filename, "r") as f:
data = f.read()
return data
def content_list(content_type):
md = markdown.Markdown(extensions = ['meta'])
files = os.listdir(app.config['CONTENT_PATH'] + content_type)
content_items = []
for fname in files:
data = content_load("{0}/{1}".format(content_type, fname))
md.convert(data.decode('utf-8'))
meta = _md_meta_to_dict(md)
if 'date' in meta:
meta['date'] = _jinja2_filter_datetime(meta['date'], '%Y-%m-%d')
#Fall back to a default slug (straight filename) if necessary
if not 'slug' in meta:
meta['slug'] = fname[0: fname.find('.')] #filename without extension
# Only add this item to the list if we're not currently looking at it.
#
# Rationale: if we call this function from a route decorator, we're
# looking at a page view listing of content items (/posts), and the request object
# will not contain views_arg 'slug'.
#
# However, if we're looking at a page view
# of a specific content item (/posts/foo), the 'slug' views_arg will be
# set. This means that this function is probably called from a context
# processor (for instance to show a sidebar block of content), in which
# case we want to exclude the item we're looking at (/posts/foo) from
# the list of content we're generating.
if not meta['slug'] == request.view_args.get('slug', ''):
content_items.append(meta)
return content_items
def dwarf_render_page(slug, template='page.tpl.html'):
data = content_load("pages/{0}.md".format(slug))
page = {'content': markdown.markdown(data)}
return render_template(template, page=page)
# --- context processors
@app.context_processor
def utility_processor():
def inject_author(identifier=''):
md = markdown.Markdown(extensions = ['meta'])
data = content_load("authors/{0}.md".format(identifier))
markup = md.convert(data.decode('utf-8'))
author = {}
for key in md.Meta.keys():
author[key] = md.Meta[key][0]
author['bio'] = markup
# store md5 hash of email so the template can use it to fetch gravatar image
author['hash'] = hashlib.md5(author['email']).hexdigest()
return author
return dict(inject_author=inject_author)
# Returns the 4 most recent blog posts
@app.context_processor
def recent_posts():
files=content_list('blog')
newest_first = sorted(files,
key=operator.itemgetter("date"),
reverse=True)
return dict(recent_posts=newest_first[:4])
@app.context_processor
def authors():
files=content_list('authors')
return dict(authors=files)
# --- template filters
@app.template_filter('strftime')
def _jinja2_filter_datetime(date, format='%b %d, %Y'):
d = datetime.strptime(date, "%Y-%m-%d").date()
return d.strftime(format)
# --- helpers
# grab markdown meta info and store in simple dict, key for key
# (to automatically make all meta items available in the template later on)
def _md_meta_to_dict(md):
items = {}
for key in md.Meta.keys():
items[key] = md.Meta[key][0]
return items
def url_for_other_page(page):
args = request.view_args.copy()
args['page'] = page
return url_for(request.endpoint, **args)
app.jinja_env.globals['url_for_other_page'] = url_for_other_page
if __name__ == "__main__":
app.run()
``` |
{
"source": "jpohanka/testing-tensorflow",
"score": 2
} |
#### File: tf-demonstration/models/fit_d_student_t_marginals.py
```python
import numpy as np
import tensorflow as tf
from datetime import datetime
now = datetime.utcnow().strftime("%Y%m%d%H%M%S")
root_logdir = '/Users/peter/Documents/Python/logs/t_marginals_logs'
logdir = '{}/run-{}/'.format(root_logdir, now)
'''
Function for generating mini batches
'''
def iterate_minibatches(inputs, batchsize, shuffle=True):
if shuffle:
indices = np.arange(inputs.shape[0])
np.random.shuffle(indices)
for start_idx in range(0, inputs.shape[0] - batchsize + 1, batchsize):
if shuffle:
excerpt = indices[start_idx:start_idx + batchsize]
else:
excerpt = slice(start_idx, start_idx + batchsize)
yield inputs[excerpt]
'''
Set Parameters and generate sample data
'''
class Config():
true_dfs = 4.0
true_scales = 1.0
sample_size = 3000
init_df_params = {'minval': 2.0, 'maxval': 6.0}
init_scale_params = {'mean': 1.0, 'stddev': 0.1}
lr = 0.001
max_epochs = 1000
batch_size = 50
eps_param, eps_loss, eps_grad = 1e-10, 1e-10, 1e-10
random_seed = 0
dim = 20
config = Config()
np.random.seed(0)
sample_data = np.random.standard_t(df=config.true_dfs, size=(config.sample_size, config.dim))
np.random.seed(config.random_seed)
'''
Graph Construction
'''
tf.reset_default_graph()
t_copula = tf.Graph()
with t_copula.as_default():
# tensor for data
input_data = tf.placeholder(shape=[None, config.dim],
dtype=tf.float32, name='input_observations')
with tf.name_scope('trained_parameters'):
# tensors for parameters
dfs = tf.Variable(initial_value=tf.random_uniform([config.dim], **config.init_df_params),
dtype=tf.float32, name='degrees_of_freedom')
scales = tf.Variable(initial_value=tf.random_normal([config.dim], **config.init_scale_params),
dtype=tf.float32, name='scales')
with tf.name_scope('variable_logs'):
df_hist = tf.summary.histogram('dfs_hist', dfs)
scales_hist = tf.summary.histogram('scales_hist', scales)
for dims in range(config.dim):
df_scalar = tf.summary.scalar('dfs_scalar_dim_'+str(dims), dfs[dims])
scales_scalar = tf.summary.scalar('scales_scalar_dim_'+str(dims), scales[dims])
with tf.name_scope('mle_target'):
# loss function
t_dist = tf.contrib.distributions.StudentT(
dfs, loc=tf.zeros([1, config.dim]), scale=scales, name='student_t_RV')
log_prob = t_dist.log_prob(value=input_data)
neg_log_like = -1.0 * tf.reduce_sum(log_prob, name='log_observations')
maxl_summary = tf.summary.scalar('maximum_likelihood', neg_log_like)
with tf.name_scope('optimizer'):
# optimizer
optimizer = tf.train.AdamOptimizer(learning_rate=config.lr, name='optimizer')
train_op = optimizer.minimize(loss=neg_log_like, name='training_target')
# gradient
grad = tf.gradients(neg_log_like, [dfs, scales], name='gradient')
init = tf.global_variables_initializer()
saver = tf.train.Saver()
merged_summary = tf.summary.merge_all()
file_writer = tf.summary.FileWriter(logdir, tf.get_default_graph())
'''
Training Session
'''
with tf.Session(graph=t_copula) as sess:
sess.run(init)
epoch = 1
obs_dfs, obs_scales = sess.run(fetches=[[dfs], [scales]])
obs_loss = sess.run(fetches=[neg_log_like], feed_dict={input_data: sample_data})
obs_grad = sess.run(fetches=[grad], feed_dict={input_data: sample_data})
while True:
for batch in iterate_minibatches(sample_data, config.batch_size):
# gradient step
sess.run(fetches=train_op, feed_dict={input_data: batch})
# update parameters
new_dfs, new_scales = sess.run(fetches=[dfs, scales])
diff_norm = np.linalg.norm(np.subtract([new_dfs, new_scales],
[obs_dfs[-1], obs_scales[-1]]))
# update loss
new_loss = sess.run(fetches=neg_log_like, feed_dict={input_data: batch})
loss_diff = np.abs(new_loss - obs_loss[-1])
# update gradient
new_grad = sess.run(fetches=grad, feed_dict={input_data: batch})
grad_norm = np.linalg.norm(new_grad)
obs_dfs.append(new_dfs)
obs_scales.append(new_scales)
obs_loss.append(new_loss)
obs_grad.append(new_grad)
summary_str = merged_summary.eval(feed_dict={input_data: batch})
file_writer.add_summary(summary_str, epoch)
if epoch % 100 == 0:
print("Epoch", epoch, ": loss_diff =", loss_diff)
# print("Epoch", epoch, ": loss_diff =", loss_diff,
# 'dfs=', obs_dfs, 'scales=', obs_scales)
saver_path = saver.save(
sess, '/Users/peter/Documents/Python/logs/checkpoints/t_marginals.ckpt')
if diff_norm < config.eps_param:
print('Parameter convergence in {} iterations!'.format(epoch))
break
if loss_diff < config.eps_loss:
print('Loss function convergence in {} iterations!'.format(epoch))
break
if grad_norm < config.eps_grad:
print('Gradient convergence in {} iterations!'.format(epoch))
break
if epoch >= config.max_epochs:
print('Max number of epochs reached.')
break
epoch += 1
saver.save(
sess, '/Users/peter/Documents/Python/logs/checkpoints/t_marginals_final.ckpt')
file_writer.close()
```
#### File: tf-examples/graph-composition/graph_composer.py
```python
from __future__ import print_function
import tensorflow as tf
g = tf.Graph()
# Here the approach 1 of the graph composition if applied.
with g.as_default():
sg1_a = tf.placeholder(shape=[], dtype=tf.float64, name="sg1_a")
sg1_b = tf.placeholder(shape=[], dtype=tf.float64, name="sg1_b")
sg2_d = tf.placeholder(shape=[], dtype=tf.float64, name="sg2_d")
# Read the first sub-graph's GraphDef proto.
with open("graph_def_files/subgraph1_graph_def.pb", "rb") as graph1_file:
graph1_graph_def = tf.GraphDef()
graph1_graph_def.ParseFromString(graph1_file.read())
# Get the output from the first sub-graph
sg1_out_sum, = tf.import_graph_def(
graph_def=graph1_graph_def,
name="sg1",
input_map={
"a:0": sg1_a,
"b:0": sg1_b,
},
return_elements=["out_sum:0"],
)
# Read the second sub-graph's GraphDef proto.
with open("graph_def_files/subgraph2_graph_def.pb", "rb") as graph2_file:
graph2_graph_def = tf.GraphDef()
graph2_graph_def.ParseFromString(graph2_file.read())
# Get the output from the second sub-graph
sg2_out_calc, = tf.import_graph_def(
graph_def=graph2_graph_def,
input_map={
"c:0":sg1_out_sum,
"d:0":sg2_d,
},
name="sg2",
return_elements=["out_calc:0"],
)
# Save the GraphDef proto twice - in binary and in human-readable pbtxt format.
tf.train.write_graph(
graph_or_graph_def=g,
logdir="graph_def_files",
name="graph_composed.pb",
as_text=False
)
tf.train.write_graph(
graph_or_graph_def=g,
logdir="graph_def_files",
name="graph_composed.pbtxt",
as_text=True
)
#
with tf.Session(graph=g) as sess:
# Create a FileWriter to write a summary file containing the GraphDef.
writer = tf.summary.FileWriter(
graph=g,
logdir="summary_files/graph_composed",
)
feed_dict = {
sg1_a: 4.0,
sg1_b: 5.0,
sg2_d: 5.0
}
print(sess.run(sg2_out_calc, feed_dict=feed_dict))
``` |
{
"source": "jpohlmeyer/RPiFanControl",
"score": 3
} |
#### File: jpohlmeyer/RPiFanControl/fancontrol.py
```python
import RPi.GPIO as GPIO
import threading
import time
class SettingsProviderInterface:
def get_gpio_pin(self):
raise NotImplementedError
def get_temperature_threshold(self):
raise NotImplementedError
def get_min_cooling_duration(self):
raise NotImplementedError
class FanControl:
quit = False
is_fan_on = False
def __init__(self, settings_provider):
self.settings_provider = settings_provider
self.temp_control_thread = threading.Thread(target=self.temp_control)
GPIO.setmode(GPIO.BCM)
GPIO.setup(self.settings_provider.get_gpio_pin(), GPIO.OUT)
def temp_control(self):
while not self.quit:
with open("/sys/class/thermal/thermal_zone0/temp") as temp_file:
temp = int(temp_file.read())
if temp and temp > self.settings_provider.get_temperature_threshold():
if not self.is_fan_on:
self.fan_on()
counter = 0
while not self.quit and counter < self.settings_provider.get_min_cooling_duration():
counter += 1
time.sleep(1)
else:
self.fan_off()
time.sleep(1)
def start(self):
self.temp_control_thread.start()
def fan_on(self):
is_fan_on = True
GPIO.output(self.settings_provider.get_gpio_pin(), GPIO.HIGH)
def fan_off(self):
is_fan_on = False
GPIO.output(self.settings_provider.get_gpio_pin(), GPIO.LOW)
def exit(self):
self.quit = True
if self.temp_control_thread.is_alive():
self.temp_control_thread.join()
self.fan_off()
``` |
{
"source": "jpoikela/sappho",
"score": 4
} |
#### File: sappho/sappho/layers.py
```python
import sys
import pygame
PY3 = sys.version_info[0] == 3
range = range if PY3 else xrange
class SurfaceLayers(object):
"""Ordered series of pygame surfaces, each the size of the target
surface given at creation time.
Arguments:
target_surface (pygame.Surface): Surface that will have
have the layers blitted to when render() is called. The size
of this surface is used as the size of the generated layers.
number_of_layers (int): Number of layers to generate.
"""
def __init__(self, target_surface, number_of_layers):
self._target_surface = target_surface
self._surface_layers = self.create_surface_layers(target_surface,
number_of_layers)
@staticmethod
def create_surface_layers(target_surface, number_of_layers):
"""Create a list of pygame surfaces
the size of the target surface.
Arguments:
target_surface (pygame.Surface): The surface
whose dimensions will be used for each layer.
number_of_layers (int): The number of surfaces to
create/return.
Returns:
list[pygame.Surface]: List of surfaces
"""
surface_layers = []
for i in range(number_of_layers):
surface = pygame.surface.Surface(target_surface.get_size(),
pygame.SRCALPHA, 32)
surface_layers.append(surface)
return surface_layers
def __getitem__(self, key):
"""Access a surface by z-index.
Arguments:
key (int): The z-index of the surface.
Raises:
IndexError: When the z-index is invalid.
Returns:
pygame.Surface: The surface belonging
to the z-index specified.
"""
return self._surface_layers[key]
def __len__(self):
"""Return the number of layers.
Returns:
int: Number of members in self.surface_layers.
"""
return len(self._surface_layers)
def __iter__(self):
"""Iterate through the surface layers.
Yields:
pygame.surface.Surface
"""
for surface in self._surface_layers:
yield surface
def render(self):
"""Draw each layer onto the target surface in the correct order.
We're wiping surface layers because there may be a layer higher up
which has a lot of transparent air but parts of pillars or whatever;
if we didn't wipe you'd see that column drag across the screen as the
camera moved.
"""
for surface in self._surface_layers:
self._target_surface.blit(surface, (0, 0))
target_surface = self._target_surface
number_of_layers = len(self._surface_layers)
self._surface_layers = self.create_surface_layers(target_surface,
number_of_layers)
```
#### File: sappho/tests/test_collide.py
```python
import os
import pygame
from sappho import animate
from sappho import collide
# this sprite is 10x10
testpath = os.path.realpath(__file__)
path = os.path.abspath(os.path.join(testpath,
"..",
"resources",
"animatedsprite.gif"))
animsprite_mask_20_20 = animate.AnimatedSprite.from_gif(
path,
mask_threshold=254
)
animsprite_mask_20_20.rect.topleft = (20, 20)
animsprite_mask_40_40 = animate.AnimatedSprite.from_gif(
path,
mask_threshold=254
)
animsprite_mask_40_40.rect.topleft = (40, 40)
animsprite_20_43 = animate.AnimatedSprite.from_gif(path)
animsprite_20_43.rect.topleft = (20, 43)
animsprite_group_sans_one = pygame.sprite.Group(animsprite_mask_40_40,
animsprite_20_43)
def test_move_close_as_possible():
"""
Move `animsprite_mask_20_20` to (60, 60), which should collide
with both `animsprite_mask_40_40` and `animsprite_35_40`.
"""
closest_to_goal, collided_with = collide.move_as_close_as_possible(
animsprite_mask_20_20,
(60, 60),
animsprite_group_sans_one
)
assert closest_to_goal == (30, 30)
assert collided_with is animsprite_mask_40_40
closest_to_goal, collided_with = collide.move_as_close_as_possible(
animsprite_mask_20_20,
(10, 10),
animsprite_group_sans_one
)
assert closest_to_goal == (10, 10)
assert collided_with is None
closest_to_goal, collided_with = collide.move_as_close_as_possible(
animsprite_mask_20_20,
(20, 60),
animsprite_group_sans_one
)
assert closest_to_goal == (20, 33)
assert collided_with is animsprite_20_43
def test_sprites_in_orthogonal_path():
collided_with = collide.sprites_in_orthogonal_path(
animsprite_mask_20_20,
(20, 60),
animsprite_group_sans_one
)
assert collided_with[0] is animsprite_20_43
```
#### File: sappho/tests/test_layers.py
```python
from __future__ import absolute_import
import pygame
import sappho.layers
from .common import compare_surfaces
class TestSurfaceLayers(object):
"""
Do not need to test create_surface_layers(), because
it's inherent to SurfaceLayers' initialization and
everything else tested.
"""
NUMBER_OF_LAYERS = 100
TARGET_SURFACE_SIZE = (800, 600)
def setup(self):
self.target_surface = pygame.surface.Surface(self.TARGET_SURFACE_SIZE)
self.surface_layers = (sappho.layers.
SurfaceLayers(self.target_surface,
self.NUMBER_OF_LAYERS))
def test_getitem(self):
for i in range(self.NUMBER_OF_LAYERS):
self.surface_layers[i]
def test_len(self):
assert len(self.surface_layers) == self.NUMBER_OF_LAYERS
def test_iter(self):
for i, surface in enumerate(self.surface_layers):
assert surface is self.surface_layers[i]
assert i == (self.NUMBER_OF_LAYERS - 1)
def test_sizes(self):
for surface in self.surface_layers:
assert surface.get_size() == self.TARGET_SURFACE_SIZE
def test_render(self):
subsurface_size = (150, 150)
# Create our test surfaces
background = pygame.surface.Surface(self.TARGET_SURFACE_SIZE)
rect1 = pygame.surface.Surface(subsurface_size)
rect1pos = (100, 100)
rect2 = pygame.surface.Surface(subsurface_size)
rect2pos = (200, 200)
rect3 = pygame.surface.Surface(subsurface_size)
rect3pos = (300, 300)
# Fill the surfaces
background.fill((255, 255, 255))
rect1.fill((255, 0, 0))
rect2.fill((0, 255, 0))
rect3.fill((0, 0, 255))
# Create a surface to compare with and blit our test surfaces
test_surface = pygame.surface.Surface(self.TARGET_SURFACE_SIZE)
test_surface.blit(background, (0, 0))
test_surface.blit(rect1, rect1pos)
test_surface.blit(rect2, rect2pos)
test_surface.blit(rect3, rect3pos)
# Create the SurfaceLayers object and fill it with our layers
surface_layers = sappho.layers.SurfaceLayers(self.target_surface, 4)
surface_layers[0].blit(background, (0, 0))
surface_layers[1].blit(rect1, rect1pos)
surface_layers[2].blit(rect2, rect2pos)
surface_layers[3].blit(rect3, rect3pos)
# Render to the target surface
surface_layers.render()
# Compare the two surfaces
assert compare_surfaces(self.target_surface, test_surface)
```
#### File: sappho/tests/test_tiles.py
```python
from __future__ import absolute_import
import os
import textwrap
import pygame
import sappho.tiles
from .common import compare_surfaces
class TestTile(object):
def test_tile_instantiation(self):
surface = pygame.surface.Surface((1, 1))
tile = sappho.tiles.Tile(0, surface)
assert(tile.id_ == 0)
assert(len(tile.flags) == 0)
assert(isinstance(tile.flags, set))
class TestTilesheet(object):
def test_from_file(self):
testpath = os.path.realpath(__file__)
path = os.path.abspath(os.path.join(testpath,
"..",
"resources",
"tilesheet.png"))
tilesheet = sappho.tiles.Tilesheet.from_file(path, 1, 1)
# Test that tile rules are loaded correctly
assert(sappho.tiles.Flags.SOLID in
tilesheet.tiles[0].flags)
def test_subsurface(self):
testpath = os.path.realpath(__file__)
path = os.path.abspath(os.path.join(testpath,
"..",
"resources",
"tilesheet.png"))
tilesheet = sappho.tiles.Tilesheet.from_file(path, 1, 1)
# Grab the tile at (0, 0) and blit it's subsurface to another surface,
# then compare it against a master surface to ensure it's the color we
# want
target_surface = pygame.surface.Surface((1, 1))
target_surface.blit(tilesheet.tiles[0].image, (0, 0))
master_surface = pygame.surface.Surface((1, 1))
master_surface.fill((255, 0, 0))
assert(compare_surfaces(target_surface, master_surface))
def test_parse_rules(self):
testpath = os.path.realpath(__file__)
path = os.path.abspath(os.path.join(testpath,
"..",
"resources",
"tilesheet.png.rules"))
rules = sappho.tiles.Tilesheet.parse_rules(path)
assert(rules[0] == set([sappho.tiles.Flags.SOLID]))
assert(rules[1] == set([sappho.tiles.Flags.SOLID]))
assert(rules[2] == set([sappho.tiles.Flags.SOLID]))
assert(rules[3] == set([sappho.tiles.Flags.SOLID]))
assert(rules[4] == set([sappho.tiles.Flags.SOLID]))
class TestTilemap(object):
TILEMAP_CSV = """
0,1,2
5,3,4
"""
def setup(self):
# Load a tilesheet to use for our tilemap
testpath = os.path.realpath(__file__)
path = os.path.abspath(os.path.join(testpath,
"..",
"resources",
"tilesheet.png"))
self.tilesheet = sappho.tiles.Tilesheet.from_file(path, 1, 1)
def test_from_csv(self):
csv = textwrap.dedent(self.TILEMAP_CSV).strip()
tilemap = (sappho.tiles.TileMap.
from_csv_string_and_tilesheet(csv, self.tilesheet))
# The tile ID 0 is set as a solid block, and this is at (0, 0)
# to (1, 1) in the tilemap. Here, we check that a solid block
# has been correctly entered into the tilemap.
assert(len(tilemap.collision_group) == 5)
def test_from_tmx(self):
testpath = os.path.realpath(__file__)
path = os.path.abspath(os.path.join(testpath,
"..",
"resources",
"tilemap.tmx"))
tilemaps = sappho.tiles.tmx_file_to_tilemaps(path, self.tilesheet)
tilemap = tilemaps[0]
# Same as the above test, check for the solid block
assert(len(tilemap.collision_group) == 5)
def test_render(self):
csv = textwrap.dedent(self.TILEMAP_CSV).strip()
tilemap = (sappho.tiles.TileMap.
from_csv_string_and_tilesheet(csv, self.tilesheet))
# Create a surface that has 1x2 strips of red, green, and
# blue to against the rendered tilemap. This surface has
# to have the SRCALPHA flag and a depth of 32 to match
# the surface returned by the render function.
test_surface = pygame.surface.Surface((3, 2), pygame.SRCALPHA, 32)
test_surface.fill((255, 0, 0), pygame.Rect(0, 0, 1, 2))
test_surface.fill((0, 255, 0), pygame.Rect(1, 0, 1, 2))
test_surface.fill((0, 0, 255), pygame.Rect(2, 0, 1, 2))
# Render the tilemap
output_surface = tilemap.to_surface()
# Compare the two surfaces
assert(compare_surfaces(test_surface, output_surface))
``` |
{
"source": "JPoirier55/random_myzer",
"score": 3
} |
#### File: JPoirier55/random_myzer/http_client.py
```python
import logging
import requests
logger = logging.getLogger()
logger.setLevel("INFO")
class HttpClient:
def get(self, url):
headers = {"Accept": "application/json"}
data = requests.get(url, headers=headers)
if data.status_code != 200:
logger.error('Error')
return ''
else:
return data.text
``` |
{
"source": "jpolchlo/cloud-buster",
"score": 2
} |
#### File: cloud-buster/python/query_rf.py
```python
import argparse
import ast
import json
import os
from urllib.parse import urlparse
from uuid import uuid4
import requests
import shapely.affinity # type: ignore
import shapely.geometry # type: ignore
import shapely.ops # type: ignore
SENTINEL = "4a50cb75-815d-4fe5-8bc1-144729ce5b42"
DEFAULT_SORT = "acquisitionDatetime,desc"
def init_session(refresh_token, base_search_url):
"""Helper method to create a requests Session"""
post_body = {"refresh_token": refresh_token}
response = requests.post(
"{}/tokens/".format(base_search_url), json=post_body)
response.raise_for_status()
token = response.json()["id_token"]
session = requests.Session()
session.headers.update({"Authorization": "Bearer {}".format(token)})
return session
class RFClient:
def __init__(self, refresh_token, base_search_url):
self.refresh_token = refresh_token
self.base_search_url = base_search_url
self.refresh_session()
def refresh_session(self) -> None:
self.session = init_session(self.refresh_token, self.base_search_url)
def list_scenes(self, params={}):
response = self.session.get(
"{}/scenes/".format(self.base_search_url), params=params
)
if response.status_code == 401:
print("Refreshing session, since token was expired")
self.refresh_session()
return self.list_scenes(params)
else:
response.raise_for_status()
return response.json()
def create_shape(self, multipolygon, name):
feature_collection = {
"type": "FeatureCollection",
"features": [
{
"type": "Feature",
"properties": {"name": name},
"geometry": multipolygon,
"id": str(uuid4()),
}
],
}
# it needs an ID because geojson reasons, but we ignore it and generate on the backend
response = self.session.post(
"{}/shapes/".format(self.base_search_url), json=feature_collection
)
if response.status_code == 401:
self.refresh_session()
return self.create_shape(multipolygon, name)
else:
response.raise_for_status()
return response.json()[0]
@staticmethod
def create_scene_search_qp(
max_cloud_cover=None,
min_acquisition_date=None,
max_acquisition_date=None,
overlap_percentage=None,
datasource=SENTINEL,
bbox=None,
shape_id=None,
sort=DEFAULT_SORT,
page=None,
page_size=None,
):
params = {
"maxCloudCover": max_cloud_cover,
"minAcquisitionDatetime": min_acquisition_date,
"maxAcquisitionDatetime": max_acquisition_date,
"overlapPercentage": overlap_percentage,
"datasource": datasource,
"bbox": bbox,
"shape": shape_id,
"sort": sort,
"page": page,
"pageSize": page_size,
}
return {k: v for k, v in params.items() if v is not None}
@staticmethod
def parse_geo_filters(filter_list):
parsed_geo_filters = []
for idx, param_dict in enumerate(filter_list):
print("Parsing filter {} in provided geo filters".format(idx + 1))
parsed_geo_filters.append(
{
"minAcquisitionDate": param_dict["minAcquisitionDate"],
"maxAcquisitionDate": param_dict["maxAcquisitionDate"],
"maxCloudCover": param_dict["maxCloudCover"],
"overlapPercentage": param_dict["overlapPercentage"],
"limit": param_dict["limit"],
"chipCloudThreshold": param_dict["chipCloudThreshold"],
"windowSize": param_dict["windowSize"],
}
)
print("Parsed filter {} in provided geo filters".format(idx + 1))
return parsed_geo_filters
@staticmethod
def rf_params_from_geo_filter(geo_filter, shape_id, page=0):
return RFClient.create_scene_search_qp(
max_cloud_cover=geo_filter["maxCloudCover"],
min_acquisition_date=geo_filter["minAcquisitionDate"],
max_acquisition_date=geo_filter["maxAcquisitionDate"],
overlap_percentage=geo_filter["overlapPercentage"],
shape_id=shape_id,
page=page,
page_size=geo_filter["limit"],
)
def cli_parser() -> argparse.ArgumentParser:
parser = argparse.ArgumentParser()
parser.add_argument('--aoi-name', required=False, type=str)
parser.add_argument('--geojson', required=True, type=str)
parser.add_argument('--limit', required=False, default=1024, type=int)
parser.add_argument('--name-property', required=False, type=str)
parser.add_argument('--refresh-token', required=True, type=str)
parser.add_argument('--response', required=False, type=str)
parser.add_argument('--maxclouds', required=False, default=20, type=int)
parser.add_argument('--mindate', required=False,
nargs='+', type=str, default=['1307-10-13'])
parser.add_argument('--maxdate', required=False,
nargs='+', type=str, default=['2038-01-19'])
parser.add_argument('--scale', type=float, required=False)
parser.add_argument('--original-shape',
type=ast.literal_eval, required=False, default=False)
return parser
if __name__ == '__main__':
args = cli_parser().parse_args()
with open(args.geojson, 'r') as f:
features = json.load(f)
def convert_and_scale(f):
tmp = shapely.geometry.shape(f.get('geometry'))
if args.scale is not None:
tmp = shapely.affinity.scale(tmp, args.scale, args.scale)
return tmp
feature = list(map(convert_and_scale, features.get('features')))
shape = shapely.ops.cascaded_union(feature)
if args.aoi_name is None and args.name_property is not None:
args.aoi_name = feature.get('properties').get(args.name_property)
if args.original_shape:
args.scale = None
original_shape = shapely.ops.cascaded_union(
list(map(convert_and_scale, features.get('features'))))
aoi_shape = original_shape
else:
aoi_shape = shape
sentinel_scenes = {
'results': [],
'aoi': shapely.geometry.mapping(aoi_shape)
}
rf_client = RFClient(args.refresh_token,
'https://app.rasterfoundry.com/api')
rf_shape = rf_client.create_shape(
shapely.geometry.mapping(shape), str(uuid4()))
for (mindate, maxdate) in zip(args.mindate, args.maxdate):
geo_filter = {
"minAcquisitionDate": mindate,
"maxAcquisitionDate": maxdate,
"maxCloudCover": args.maxclouds,
"overlapPercentage": 50.0,
"limit": args.limit
}
rf_params = RFClient.rf_params_from_geo_filter(
geo_filter, rf_shape.get('id'))
sentinel_scenes['results'] += rf_client.list_scenes(
rf_params).get('results')
if args.response is None and args.aoi_name is not None:
args.response = './{}.json'.format(args.aoi_name)
if args.response is not None:
with open(args.response, 'w') as f:
json.dump(sentinel_scenes, f, sort_keys=True,
indent=4, separators=(',', ': '))
print(args.response)
``` |
{
"source": "jpolchlo/geopyspark",
"score": 2
} |
#### File: geotrellis/io_tests/rasterio_test.py
```python
import unittest
import os
import pytest
import rasterio
from geopyspark.tests.base_test_class import BaseTestClass
from geopyspark.tests.python_test_utils import file_path
class CatalogTest(BaseTestClass):
uri = file_path("srtm_52_11.tif")
@pytest.fixture(autouse=True)
def tearDown(self):
yield
BaseTestClass.pysc._gateway.close()
@pytest.mark.skipif('TRAVIS' in os.environ,
reason="Cannot resolve depency issues in Travis for the time being")
def test_tiles(self):
import geopyspark as gps
from geopyspark.geotrellis import rasterio
tiles = rasterio._read_windows(self.uri, xcols=256, ycols=256, bands=None, crs_to_proj4=lambda n: '+proj=longlat +datum=WGS84 +no_defs ')
self.assertEqual(len(list(tiles)), 144)
@pytest.mark.skipif('TRAVIS' in os.environ,
reason="Cannot resolve depency issues in Travis for the time being")
def test_layer(self):
import geopyspark as gps
from geopyspark.geotrellis import rasterio
rdd0 = gps.rasterio.get(self.uri)
rdd1 = gps.RasterLayer.from_numpy_rdd(gps.LayerType.SPATIAL, rdd0)
self.assertEqual(rdd1.count(), 144)
if __name__ == "__main__":
unittest.main()
```
#### File: tests/geotrellis/raster_layer_test.py
```python
import os
import unittest
import numpy as np
import pytest
from geopyspark.geotrellis import (SpatialKey,
Tile,
ProjectedExtent,
Extent,
RasterLayer,
LocalLayout,
TileLayout,
GlobalLayout,
LayoutDefinition,
SpatialPartitionStrategy)
from shapely.geometry import Point
from geopyspark.geotrellis.layer import TiledRasterLayer
from geopyspark.tests.base_test_class import BaseTestClass
from geopyspark.geotrellis.constants import LayerType, CellType
def make_raster(x, y, v, cols=4, rows=4, ct=CellType.FLOAT32, crs=4326):
cells = np.zeros((1, rows, cols))
cells.fill(v)
# extent of a single cell is 1, no fence-post here
extent = ProjectedExtent(Extent(x, y, x + cols, y + rows), crs)
return (extent, Tile(cells, ct, None))
class RasterLayerTest(BaseTestClass):
layers = [
make_raster(0, 0, v=1),
make_raster(3, 2, v=2),
make_raster(6, 0, v=3)
]
numpy_rdd = BaseTestClass.pysc.parallelize(layers)
layer = RasterLayer.from_numpy_rdd(LayerType.SPATIAL, numpy_rdd)
metadata = layer.collect_metadata(GlobalLayout(5))
def test_to_to_layout_with_partitioner(self):
strategy = SpatialPartitionStrategy(4)
tiled = self.layer.tile_to_layout(LocalLayout(5), partition_strategy=strategy)
self.assertEqual(tiled.get_partition_strategy(), strategy)
def test_tile_to_local_layout(self):
tiled = self.layer.tile_to_layout(LocalLayout(5))
assert tiled.layer_metadata.extent == Extent(0,0,10,6)
assert tiled.layer_metadata.tile_layout == TileLayout(2,2,5,5)
def test_tile_to_global_layout(self):
tiled = self.layer.tile_to_layout(GlobalLayout(5))
assert tiled.layer_metadata.extent == Extent(0,0,10,6)
assert tiled.layer_metadata.tile_layout == TileLayout(128,128,5,5)
assert tiled.zoom_level == 7
def test_tile_to_metadata_layout(self):
tiled = self.layer.tile_to_layout(layout=self.metadata)
self.assertEqual(tiled.layer_metadata.extent, Extent(0,0,10,6))
self.assertDictEqual(tiled.layer_metadata.to_dict(), self.metadata.to_dict())
def test_tile_to_tiled_layer_layout(self):
extent = Extent(0., 0., 10., 6.)
tile_layout = TileLayout(2,2,5,5)
layout_definition = LayoutDefinition(extent, tile_layout)
base = self.layer.tile_to_layout(layout_definition)
tiled = self.layer.tile_to_layout(layout=base)
self.assertDictEqual(tiled.layer_metadata.to_dict(), base.layer_metadata.to_dict())
def test_tile_to_layout_definition(self):
tiled = self.layer.tile_to_layout(layout=self.metadata.layout_definition)
self.assertDictEqual(tiled.layer_metadata.to_dict(), self.metadata.to_dict())
def test_no_data_of_zero(self):
no_data_layer = [(t[0], Tile.from_numpy_array(t[1].cells, 1)) for t in self.layers]
rdd = BaseTestClass.pysc.parallelize(no_data_layer)
nd_layer = RasterLayer.from_numpy_rdd(LayerType.SPATIAL, rdd)
nd_metadata = nd_layer.collect_metadata()
self.assertTrue('ud1' in nd_metadata.cell_type)
self.assertEqual(nd_metadata.no_data_value, 1)
@pytest.fixture(scope='class', autouse=True)
def tearDown(self):
yield
BaseTestClass.pysc._gateway.close()
if __name__ == "__main__":
unittest.main()
```
#### File: geopyspark/vector_pipe/osm_reader.py
```python
from shapely.geometry import Polygon
from geopyspark.geopyspark_utils import ensure_pyspark
ensure_pyspark()
from geopyspark import get_spark_context, create_python_rdd
from geopyspark.geotrellis import Extent
from geopyspark.vector_pipe import Feature, Properties
from geopyspark.vector_pipe.features_collection import FeaturesCollection
from pyspark.sql import SparkSession
__all__ = ['from_orc', 'from_dataframe']
def from_orc(source, target_extent=None):
"""Reads in OSM data from an orc file that is located either locally or on S3. The
resulting data will be read in as an instance of :class:`~geopyspark.vector_pipe.features_collection.FeaturesCollection`.
Args:
source (str): The path or URI to the orc file to be read. Can either be a local file, or
a file on S3.
Note:
Reading a file from S3 requires additional setup depending on the environment
and how the file is being read.
The following describes the parameters that need to be set depending on
how the files are to be read in. However, **if reading a file on EMR, then
the access key and secret key do not need to be set**.
If using ``s3a://``, then the following ``SparkConf`` parameters need to be set:
- ``spark.hadoop.fs.s3a.impl``
- ``spark.hadoop.fs.s3a.access.key``
- ``spark.hadoop.fs.s3a.secret.key``
If using ``s3n://``, then the following ``SparkConf`` parameters need to be set:
- ``spark.hadoop.fs.s3n.access.key``
- ``spark.hadoop.fs.s3n.secret.key``
An alternative to passing in your S3 credentials to ``SparkConf`` would be
to export them as environment variables:
- ``AWS_ACCESS_KEY_ID=YOUR_KEY``
- ``AWS_SECRET_ACCESS_KEY_ID=YOUR_SECRET_KEY``
target_extent (:class:`~geopyspark.geotrellis.Extent` or ``shapely.geometry.Polygon``, optional): The
area of interest. Only features inside this ``Extent`` will be returned. Default is, ``None``. If
``None``, then all of the features will be returned.
Returns:
:class:`~geopyspark.vector_pipe.features_collection.FeaturesCollection`
"""
if target_extent:
if isinstance(target_extent, Polygon):
target_extent = Extent.from_polygon(target_extent)._asdict()
else:
target_extent = target_extent._asdict()
pysc = get_spark_context()
session = SparkSession.builder.config(conf=pysc.getConf()).enableHiveSupport().getOrCreate()
features = pysc._jvm.geopyspark.vectorpipe.io.OSMReader.fromORC(session._jsparkSession, source, target_extent)
return FeaturesCollection(features)
def from_dataframe(dataframe, target_extent=None):
"""Reads OSM data from a Spark ``DataFrame``. The resulting data will be read
in as an instance of :class:`~geopyspark.vector_pipe.features_collection.FeaturesCollection`.
Args:
dataframe (DataFrame): A Spark ``DataFrame`` that contains the OSM data.
target_extent (:class:`~geopyspark.geotrellis.Extent` or ``shapely.geometry.Polygon``, optional): The
area of interest. Only features inside this ``Extent`` will be returned. Default is, ``None``. If
``None``, then all of the features will be returned.
Returns:
:class:`~geopyspark.vector_pipe.features_collection.FeaturesCollection`
"""
if target_extent:
if isinstance(target_extent, Polygon):
target_extent = Extent.from_polygon(target_extent)._asdict()
else:
target_extent = target_extent._asdict()
pysc = get_spark_context()
features = pysc._jvm.geopyspark.vectorpipe.io.OSMReader.fromDataFrame(dataframe._jdf, target_extent)
return FeaturesCollection(features)
```
#### File: geopyspark/vector_pipe/vector_pipe_protobufcodecs.py
```python
import datetime
from shapely.wkb import loads, dumps
from dateutil import parser
from geopyspark.geopyspark_utils import ensure_pyspark
ensure_pyspark()
from geopyspark.vector_pipe.protobuf.featureMessages_pb2 import (ProtoFeature,
ProtoFeatureCellValue,
ProtoMetadata,
ProtoTags,
ProtoTag,
ProtoCellValue)
from geopyspark.vector_pipe import Feature, Properties, CellValue
# Decoders
def from_pb_tags(pb_tags):
"""Creates a ``dict`` from ``ProtoTags``.
Args:
pb_tags (ProtoTags): The ``ProtoTags`` instance to be converted.
Returns:
dict
"""
if list(pb_tags.tags):
return {tags.key: tags.value for tags in pb_tags.tags}
else:
return {}
def from_pb_properties(pb_metadata):
"""Creates ``Properties`` from ``ProtoMetadata``.
Args:
pb_metadata (ProtoMetadata): The ``ProtoMetadata`` instance to be converted.
Returns:
:class:`~geopyspark.vector_pipe.Properties`
"""
time = parser.parse(pb_metadata.timestamp)
tags = from_pb_tags(pb_metadata.tags)
return Properties(
element_id=pb_metadata.id,
user=pb_metadata.user,
uid=pb_metadata.uid,
changeset=pb_metadata.changeset,
version=pb_metadata.version,
minor_version=pb_metadata.minorVersion,
timestamp=time,
visible=pb_metadata.visible,
tags=tags)
def from_pb_feature_cellvalue(pb_feature_cellvalue):
"""Creates a ``Feature`` with ``properties`` of ``CellValue``
from ``ProtoFeature``.
Args:
pb_feature_cellvalue (ProtoFeatureCellValue): The ``ProtoFeatureCellValue`` instance
to be converted.
Returns:
:class:`~geopyspark.vector_pipe.Feature`
"""
geometry = loads(pb_feature_cellvalue.geom)
cellvalue = CellValue(pb_feature_cellvalue.cellValue.value,
pb_feature_cellvalue.cellValue.zindex)
return Feature(geometry, cellvalue)
def from_pb_feature(pb_feature):
"""Creates a ``Feature`` with ``properties`` of ``Properties``
from ``ProtoFeature``.
Args:
pb_feature (ProtoFeature): The ``ProtoFeature`` instance to be converted.
Returns:
:class:`~geopyspark.vector_pipe.Feature`
"""
metadata = from_pb_properties(pb_feature.metadata)
geometry = loads(pb_feature.geom)
return Feature(geometry=geometry, properties=metadata)
def feature_decoder(proto_bytes):
"""Deserializes the ``ProtoFeature`` bytes into Python.
Args:
proto_bytes (bytes): The ProtoBuf encoded bytes of the ProtoBuf class.
Returns:
:class:`~geopyspark.vector_pipe.Feature`
"""
pb_feature = ProtoFeature.FromString(proto_bytes)
return from_pb_feature(pb_feature)
def feature_cellvalue_decoder(proto_bytes):
"""Deserializes the ``ProtoFeatureCellValue`` bytes into Python.
Args:
proto_bytes (bytes): The ProtoBuf encoded bytes of the ProtoBuf class.
Returns:
:class:`~geopyspark.vector_pipe.Feature`
"""
pb_feature_cellvalue = ProtoFeatureCellValue.FromString(proto_bytes)
return from_pb_feature_cellvalue(pb_feature_cellvalue)
# Encoders
def to_pb_properties(metadata):
"""Converts an instance of ``Properties`` to ``ProtoMetadata``.
Args:
obj (:class:`~geopyspark.vector_pipe.Properties`): An instance of ``Properties``.
Returns:
ProtoProperties
"""
pb_tags = ProtoTags(tags=[ProtoTag(key=k, value=v) for k, v in metadata[8].items()])
return ProtoMetadata(
id=metadata[0],
user=metadata[1],
uid=metadata[2],
changeset=metadata[3],
version=metadata[4],
minorVersion=metadata[5],
timestamp=str(metadata[6]),
visible=metadata[7],
tags=pb_tags)
def to_pb_cellvalue(cv):
"""Converts an instance of ``CellValue`` to ``ProtoCellValue``.
Args:
obj (:class:`~geopyspark.vector_pipe.CellValue`): An instance of ``CellValue``.
Returns:
ProtoCellValue
"""
return ProtoCellValue(value=cv.value, zindex=cv.zindex)
def to_pb_feature(feature):
"""Converts an instance of ``Feature`` with ``properties`` of ``Properties`` to
``ProtoFeature``.
Args:
feature (:class:`~geopyspark.vector_pipe.Feature`): An instance of ``Feature`` to be
encoded.
Returns:
ProtoFeature
"""
geom_bytes = dumps(feature[0])
pb_properties = to_pb_properties(feature[1])
return ProtoFeature(geom=geom_bytes, metadata=pb_properties)
def to_pb_feature_cellvalue(feature):
"""Converts an instance of ``Feature`` with ``properties`` of ``CellValue`` to
``ProtoFeatureCellValue``.
Args:
feature (:class:`~geopyspark.vector_pipe.Feature`): An instance of ``Feature`` to be
encoded.
Returns:
ProtoFeatureCellValue
"""
geom_bytes = dumps(feature[0])
cellvalue = to_pb_cellvalue(feature[1])
return ProtoFeatureCellValue(geom=geom_bytes, cellValue=cellvalue)
def feature_encoder(feature):
"""Encodes a ``Feature`` with ``properties`` of ``Properties`` into ``ProtoFeature`` bytes.
Args:
feature (:class:`~geopyspark.vector_pipe.Feature`): An instance of ``Feature`` to be
encoded.
Returns:
bytes
"""
return to_pb_feature(feature).SerializeToString()
def feature_cellvalue_encoder(feature):
"""Encodes a ``Feature`` with ``properties`` of ``CellValue`` into
``ProtoFeatureCellValue`` bytes.
Args:
feature (:class:`~geopyspark.vector_pipe.Feature`): An instance of ``Feature`` to be
encoded.
Returns:
bytes
"""
return to_pb_feature_cellvalue(feature).SerializeToString()
``` |
{
"source": "jpolchlo/rasterframes",
"score": 2
} |
#### File: python/tests/PyRasterFramesTests.py
```python
import unittest
import numpy as np
from pyrasterframes.rasterfunctions import *
from pyrasterframes.rf_types import *
from pyspark.sql import SQLContext
from pyspark.sql.functions import *
from . import TestEnvironment
class UtilTest(TestEnvironment):
def test_spark_confs(self):
from . import app_name
self.assertEqual(self.spark.conf.get('spark.app.name'), app_name)
self.assertEqual(self.spark.conf.get('spark.ui.enabled'), 'false')
class CellTypeHandling(unittest.TestCase):
def test_is_raw(self):
self.assertTrue(CellType("float32raw").is_raw())
self.assertFalse(CellType("float64ud1234").is_raw())
self.assertFalse(CellType("float32").is_raw())
self.assertTrue(CellType("int8raw").is_raw())
self.assertFalse(CellType("uint16d12").is_raw())
self.assertFalse(CellType("int32").is_raw())
def test_is_floating_point(self):
self.assertTrue(CellType("float32raw").is_floating_point())
self.assertTrue(CellType("float64ud1234").is_floating_point())
self.assertTrue(CellType("float32").is_floating_point())
self.assertFalse(CellType("int8raw").is_floating_point())
self.assertFalse(CellType("uint16d12").is_floating_point())
self.assertFalse(CellType("int32").is_floating_point())
def test_cell_type_no_data(self):
import math
self.assertIsNone(CellType.bool().no_data_value())
self.assertTrue(CellType.int8().has_no_data())
self.assertEqual(CellType.int8().no_data_value(), -128)
self.assertTrue(CellType.uint8().has_no_data())
self.assertEqual(CellType.uint8().no_data_value(), 0)
self.assertTrue(CellType.int16().has_no_data())
self.assertEqual(CellType.int16().no_data_value(), -32768)
self.assertTrue(CellType.uint16().has_no_data())
self.assertEqual(CellType.uint16().no_data_value(), 0)
self.assertTrue(CellType.float32().has_no_data())
self.assertTrue(np.isnan(CellType.float32().no_data_value()))
self.assertEqual(CellType("float32ud-98").no_data_value(), -98.0)
self.assertEqual(CellType("float32ud-98").no_data_value(), -98)
self.assertEqual(CellType("int32ud-98").no_data_value(), -98.0)
self.assertEqual(CellType("int32ud-98").no_data_value(), -98)
self.assertTrue(math.isnan(CellType.float64().no_data_value()))
self.assertEqual(CellType.uint8().no_data_value(), 0)
def test_cell_type_conversion(self):
for ct in rf_cell_types():
self.assertEqual(ct.to_numpy_dtype(),
CellType.from_numpy_dtype(ct.to_numpy_dtype()).to_numpy_dtype(),
"dtype comparison for " + str(ct))
if not ct.is_raw():
self.assertEqual(ct,
CellType.from_numpy_dtype(ct.to_numpy_dtype()),
"GTCellType comparison for " + str(ct))
else:
ct_ud = ct.with_no_data_value(99)
self.assertEqual(ct_ud.base_cell_type_name(),
repr(CellType.from_numpy_dtype(ct_ud.to_numpy_dtype())),
"GTCellType comparison for " + str(ct_ud)
)
class UDT(TestEnvironment):
def setUp(self):
self.create_layer()
def test_mask_no_data(self):
t1 = Tile(np.array([[1, 2], [3, 4]]), CellType("int8ud3"))
self.assertTrue(t1.cells.mask[1][0])
self.assertIsNotNone(t1.cells[1][1])
self.assertEqual(len(t1.cells.compressed()), 3)
t2 = Tile(np.array([[1.0, 2.0], [float('nan'), 4.0]]), CellType.float32())
self.assertEqual(len(t2.cells.compressed()), 3)
self.assertTrue(t2.cells.mask[1][0])
self.assertIsNotNone(t2.cells[1][1])
def test_tile_udt_serialization(self):
from pyspark.sql.types import StructType, StructField
udt = TileUDT()
cell_types = (ct for ct in rf_cell_types() if not (ct.is_raw() or ("bool" in ct.base_cell_type_name())))
for ct in cell_types:
cells = (100 + np.random.randn(3, 3) * 100).astype(ct.to_numpy_dtype())
if ct.is_floating_point():
nd = 33.0
else:
nd = 33
cells[1][1] = nd
a_tile = Tile(cells, ct.with_no_data_value(nd))
round_trip = udt.fromInternal(udt.toInternal(a_tile))
self.assertEquals(a_tile, round_trip, "round-trip serialization for " + str(ct))
schema = StructType([StructField("tile", TileUDT(), False)])
df = self.spark.createDataFrame([{"tile": a_tile}], schema)
long_trip = df.first()["tile"]
self.assertEqual(long_trip, a_tile)
def test_udf_on_tile_type_input(self):
import numpy.testing
df = self.spark.read.raster(self.img_uri)
rf = self.rf
# create trivial UDF that does something we already do with raster_Functions
@udf('integer')
def my_udf(t):
a = t.cells
return a.size # same as rf_dimensions.cols * rf_dimensions.rows
rf_result = rf.select(
(rf_dimensions('tile').cols.cast('int') * rf_dimensions('tile').rows.cast('int')).alias('expected'),
my_udf('tile').alias('result')).toPandas()
numpy.testing.assert_array_equal(
rf_result.expected.tolist(),
rf_result.result.tolist()
)
df_result = df.select(
(rf_dimensions(df.proj_raster).cols.cast('int') * rf_dimensions(df.proj_raster).rows.cast('int') -
my_udf(rf_tile(df.proj_raster))).alias('result')
).toPandas()
numpy.testing.assert_array_equal(
np.zeros(len(df_result)),
df_result.result.tolist()
)
def test_udf_on_tile_type_output(self):
import numpy.testing
rf = self.rf
# create a trivial UDF that does something we already do with a raster_functions
@udf(TileUDT())
def my_udf(t):
import numpy as np
return Tile(np.log1p(t.cells))
rf_result = rf.select(
rf_tile_max(
rf_local_subtract(
my_udf(rf.tile),
rf_log1p(rf.tile)
)
).alias('expect_zeros')
).collect()
# almost equal because of different implemenations under the hoods: C (numpy) versus Java (rf_)
numpy.testing.assert_almost_equal(
[r['expect_zeros'] for r in rf_result],
[0.0 for _ in rf_result],
decimal=6
)
def test_no_data_udf_handling(self):
from pyspark.sql.types import StructType, StructField
t1 = Tile(np.array([[1, 2], [0, 4]]), CellType.uint8())
self.assertEqual(t1.cell_type.to_numpy_dtype(), np.dtype("uint8"))
e1 = Tile(np.array([[2, 3], [0, 5]]), CellType.uint8())
schema = StructType([StructField("tile", TileUDT(), False)])
df = self.spark.createDataFrame([{"tile": t1}], schema)
@udf(TileUDT())
def increment(t):
return t + 1
r1 = df.select(increment(df.tile).alias("inc")).first()["inc"]
self.assertEqual(r1, e1)
def test_udf_np_implicit_type_conversion(self):
import math
import pandas
a1 = np.array([[1, 2], [0, 4]])
t1 = Tile(a1, CellType.uint8())
exp_array = a1.astype('>f8')
@udf(TileUDT())
def times_pi(t):
return t * math.pi
@udf(TileUDT())
def divide_pi(t):
return t / math.pi
@udf(TileUDT())
def plus_pi(t):
return t + math.pi
@udf(TileUDT())
def less_pi(t):
return t - math.pi
df = self.spark.createDataFrame(pandas.DataFrame([{"tile": t1}]))
r1 = df.select(
less_pi(divide_pi(times_pi(plus_pi(df.tile))))
).first()[0]
self.assertTrue(np.all(r1.cells == exp_array))
self.assertEqual(r1.cells.dtype, exp_array.dtype)
class TileOps(TestEnvironment):
def setUp(self):
from pyspark.sql import Row
# convenience so we can assert around Tile() == Tile()
self.t1 = Tile(np.array([[1, 2],
[3, 4]]), CellType.int8().with_no_data_value(3))
self.t2 = Tile(np.array([[1, 2],
[3, 4]]), CellType.int8().with_no_data_value(1))
self.t3 = Tile(np.array([[1, 2],
[-3, 4]]), CellType.int8().with_no_data_value(3))
self.df = self.spark.createDataFrame([Row(t1=self.t1, t2=self.t2, t3=self.t3)])
def test_addition(self):
e1 = np.ma.masked_equal(np.array([[5, 6],
[7, 8]]), 7)
self.assertTrue(np.array_equal((self.t1 + 4).cells, e1))
e2 = np.ma.masked_equal(np.array([[3, 4],
[3, 8]]), 3)
r2 = (self.t1 + self.t2).cells
self.assertTrue(np.ma.allequal(r2, e2))
col_result = self.df.select(rf_local_add('t1', 't3').alias('sum')).first()
self.assertEqual(col_result.sum, self.t1 + self.t3)
def test_multiplication(self):
e1 = np.ma.masked_equal(np.array([[4, 8],
[12, 16]]), 12)
self.assertTrue(np.array_equal((self.t1 * 4).cells, e1))
e2 = np.ma.masked_equal(np.array([[3, 4], [3, 16]]), 3)
r2 = (self.t1 * self.t2).cells
self.assertTrue(np.ma.allequal(r2, e2))
r3 = self.df.select(rf_local_multiply('t1', 't3').alias('r3')).first().r3
self.assertEqual(r3, self.t1 * self.t3)
def test_subtraction(self):
t3 = self.t1 * 4
r1 = t3 - self.t1
# note careful construction of mask value and dtype above
e1 = Tile(np.ma.masked_equal(np.array([[4 - 1, 8 - 2],
[3, 16 - 4]], dtype='int8'),
3, )
)
self.assertTrue(r1 == e1,
"{} does not equal {}".format(r1, e1))
# put another way
self.assertTrue(r1 == self.t1 * 3,
"{} does not equal {}".format(r1, self.t1 * 3))
def test_division(self):
t3 = self.t1 * 9
r1 = t3 / 9
self.assertTrue(np.array_equal(r1.cells, self.t1.cells),
"{} does not equal {}".format(r1, self.t1))
r2 = (self.t1 / self.t1).cells
self.assertTrue(np.array_equal(r2, np.array([[1,1], [1, 1]], dtype=r2.dtype)))
def test_matmul(self):
# if sys.version >= '3.5': # per https://docs.python.org/3.7/library/operator.html#operator.matmul new in 3.5
# r1 = self.t1 @ self.t2
r1 = self.t1.__matmul__(self.t2)
# The behavior of np.matmul with masked arrays is not well documented
# it seems to treat the 2nd arg as if not a MaskedArray
e1 = Tile(np.matmul(self.t1.cells, self.t2.cells), r1.cell_type)
self.assertTrue(r1 == e1, "{} was not equal to {}".format(r1, e1))
self.assertEqual(r1, e1)
class PandasInterop(TestEnvironment):
def setUp(self):
self.create_layer()
def test_pandas_conversion(self):
import pandas as pd
# pd.options.display.max_colwidth = 256
cell_types = (ct for ct in rf_cell_types() if not (ct.is_raw() or ("bool" in ct.base_cell_type_name())))
tiles = [Tile(np.random.randn(5, 5) * 100, ct) for ct in cell_types]
in_pandas = pd.DataFrame({
'tile': tiles
})
in_spark = self.spark.createDataFrame(in_pandas)
out_pandas = in_spark.select(rf_identity('tile').alias('tile')).toPandas()
self.assertTrue(out_pandas.equals(in_pandas), str(in_pandas) + "\n\n" + str(out_pandas))
def test_extended_pandas_ops(self):
import pandas as pd
self.assertIsInstance(self.rf.sql_ctx, SQLContext)
# Try to collect self.rf which is read from a geotiff
rf_collect = self.rf.take(2)
self.assertTrue(
all([isinstance(row.tile.cells, np.ndarray) for row in rf_collect]))
# Try to create a tile from numpy.
self.assertEqual(Tile(np.random.randn(10, 10), CellType.int8()).dimensions(), [10, 10])
tiles = [Tile(np.random.randn(10, 12), CellType.float64()) for _ in range(3)]
to_spark = pd.DataFrame({
't': tiles,
'b': ['a', 'b', 'c'],
'c': [1, 2, 4],
})
rf_maybe = self.spark.createDataFrame(to_spark)
# rf_maybe.select(rf_render_matrix(rf_maybe.t)).show(truncate=False)
# Try to do something with it.
sums = to_spark.t.apply(lambda a: a.cells.sum()).tolist()
maybe_sums = rf_maybe.select(rf_tile_sum(rf_maybe.t).alias('tsum'))
maybe_sums = [r.tsum for r in maybe_sums.collect()]
np.testing.assert_almost_equal(maybe_sums, sums, 12)
# Test round trip for an array
simple_array = Tile(np.array([[1, 2], [3, 4]]), CellType.float64())
to_spark_2 = pd.DataFrame({
't': [simple_array]
})
rf_maybe_2 = self.spark.createDataFrame(to_spark_2)
#print("RasterFrameLayer `show`:")
#rf_maybe_2.select(rf_render_matrix(rf_maybe_2.t).alias('t')).show(truncate=False)
pd_2 = rf_maybe_2.toPandas()
array_back_2 = pd_2.iloc[0].t
#print("Array collected from toPandas output\n", array_back_2)
self.assertIsInstance(array_back_2, Tile)
np.testing.assert_equal(array_back_2.cells, simple_array.cells)
class RasterJoin(TestEnvironment):
def setUp(self):
self.create_layer()
def test_raster_join(self):
# re-read the same source
rf_prime = self.spark.read.geotiff(self.img_uri) \
.withColumnRenamed('tile', 'tile2').alias('rf_prime')
rf_joined = self.rf.raster_join(rf_prime)
self.assertTrue(rf_joined.count(), self.rf.count())
self.assertTrue(len(rf_joined.columns) == len(self.rf.columns) + len(rf_prime.columns) - 2)
rf_joined_2 = self.rf.raster_join(rf_prime, self.rf.extent, self.rf.crs, rf_prime.extent, rf_prime.crs)
self.assertTrue(rf_joined_2.count(), self.rf.count())
self.assertTrue(len(rf_joined_2.columns) == len(self.rf.columns) + len(rf_prime.columns) - 2)
# this will bring arbitrary additional data into join; garbage result
join_expression = self.rf.extent.xmin == rf_prime.extent.xmin
rf_joined_3 = self.rf.raster_join(rf_prime, self.rf.extent, self.rf.crs,
rf_prime.extent, rf_prime.crs,
join_expression)
self.assertTrue(rf_joined_3.count(), self.rf.count())
self.assertTrue(len(rf_joined_3.columns) == len(self.rf.columns) + len(rf_prime.columns) - 2)
# throws if you don't pass in all expected columns
with self.assertRaises(AssertionError):
self.rf.raster_join(rf_prime, join_exprs=self.rf.extent)
def suite():
function_tests = unittest.TestSuite()
return function_tests
unittest.TextTestRunner().run(suite())
``` |
{
"source": "jpolciak/Web_Crawler",
"score": 3
} |
#### File: Web_Crawler/Web_Crawler/crawler.py
```python
import threading
from urllib.request import urlopen
from urllib.parse import urljoin
from bs4 import BeautifulSoup
class Worker:
base_url = ''
queue = []
crawled = set()
lock = threading.Semaphore(value=1)
def __init__(self, base_url):
self.base_url = base_url
self.queue = [base_url]
@staticmethod
def write_file(path, data):
with open(path, 'a') as f:
f.write(data)
f.close()
def report(self, url):
with self.lock:
print("Successfully crawled", url)
def work(self):
for link in self.queue:
try:
page = urlopen(link)
soup = BeautifulSoup(page, 'lxml')
self.write_file("dump.txt", soup.text)
self.write_file("log.txt", link + "\n")
self.report(link)
self.crawled.add(link)
for upper_domain in soup.find_all('a', href=True):
joined_link = urljoin(self.base_url, upper_domain['href'])
if joined_link not in self.crawled:
self.queue.append(joined_link)
except:
# log any failed URL crawls and continue
self.write_file("error_log.txt", str(link) + "\n")
pass
``` |
{
"source": "jpoler/tit-borrent",
"score": 3
} |
#### File: tit-borrent/bencode/decode.py
```python
import os
import hashlib
import bittorrent.conf as conf
from collections import OrderedDict
class Decoder(object):
def __init__(self, byte_string, wanted_substring=None):
self.bs = byte_string
self.mv = memoryview(byte_string)
self.substrings = {}
# This is ugly but necessary, otherwise nasty surprised happen with
# always being given the same list reference, in the default keyword
self.wanted_substring = wanted_substring
def parse_string(self, offset, length):
return (self.bs[offset:offset+length], offset+length)
def parse_integer(self, offset, *args):
e = self.bs.find(b'e', offset)
return (int(self.bs[offset:e]), e+1)
def parse_list(self, offset, *args):
lst = []
next_offset = offset
while self.bs[next_offset] != ord(b'e'):
func, after_header, arg = self.type_header(offset)
obj, next_offset = func(next_offset, arg)
lst.append(obj)
return (lst, next_offset+1)
def parse_dict(self, offset, *args):
dct = OrderedDict()
after_value = offset
while self.bs[after_value] != ord(b'e'):
print(self.bs[after_value])
f, after_key_header, arg = self.type_header(after_value)
key, after_key = f(after_key_header, arg)
g, after_value_header, arg2 = self.type_header(after_key)
value, after_value = g(after_value_header, arg2)
print(key)
print(self.wanted_substring)
if key == self.wanted_substring:
print(type(key),type(after_key), type(after_value))
self.substrings[key] = self.bs[after_key:after_value]
dct[key] = value
return (dct, after_value+1)
def type_header(self, offset):
if self.bs[offset] == ord(b'i'):
return (self.parse_integer, offset+1, None)
elif self.bs[offset] == ord(b'd'):
return (self.parse_dict, offset+1, None)
colon = self.bs.find(b':', offset)
datatype = self.bs[offset:colon]
print(datatype)
if datatype.isdigit():
return (self.parse_string, colon + 1, int(datatype))
elif datatype == b'l':
return (self.parse_list, colon + 1, None)
def decode(self):
f, after_header, arg = self.type_header(0)
obj, _ = f(after_header, arg)
return obj
if __name__ == '__main__':
TEST_FILE = os.path.join(conf.PROJECT_DIRECTORY, 'data', 'tom.torrent')
with open(TEST_FILE, 'rb') as f:
text = f.read()
print(type(text))
d = Decoder(text, b'info')
print(d.decode())
print(d.substrings)
```
#### File: tit-borrent/client/client.py
```python
import asyncio
import hashlib
import aiohttp
from bittorrent.bencode.decode import Decoder
class Client(object):
'''Runs the whole show.
TODO
'''
def __init__(self, torrent):
self.torrent = torrent
with open(torrent, 'rb') as f:
text = f.read()
decoder = Decoder(text, wanted_substring='info')
decoder.decode()
del text
info_hasher = hashlib.sha1()
info_hasher.update(decoder.substrings[b'info'])
self.info_hash = info_hasher.digest()
def run(self):
``` |
{
"source": "jpoley/aws-config-rules",
"score": 2
} |
#### File: python/cf_logging_enabled/cf_logging_enabled_test.py
```python
import sys
import json
import unittest
try:
from unittest.mock import MagicMock, patch, ANY
except ImportError:
import mock
from mock import MagicMock, patch, ANY
import botocore
from botocore.exceptions import ClientError
##############
# Parameters #
##############
# Define the default resource to report to Config Rules
DEFAULT_RESOURCE_TYPE = 'AWS::CloudFront::Distribution'
#############
# Main Code #
#############
config_client_mock = MagicMock()
sts_client_mock = MagicMock()
class Boto3Mock():
def client(self, client_name, *args, **kwargs):
if client_name == 'config':
return config_client_mock
elif client_name == 'sts':
return sts_client_mock
else:
raise Exception("Attempting to create an unknown client")
sys.modules['boto3'] = Boto3Mock()
rule = __import__('cf_logging_enabled')
class SampleTest(unittest.TestCase):
rule_parameters = '{\"CentralLoggingBucket\": \"cloudfrontlogs\"}'
def setUp(self):
pass
cf_distribution_log_disabled = {
"configuration": {
"distributionConfig": {
"logging": {
"bucket": "",
"enabled": False
},
},
},
"ARN":"arn:aws:cloudfront::123456789012:distribution/E1NFJOWF2FZVA6",
"configurationItemCaptureTime": "2018-11-10T08:22:15.826Z",
"awsAccountId": "123456789012",
"configurationItemStatus": "ResourceDiscovered",
"resourceType": "AWS::CloudFront::Distribution",
"resourceId": "arn:aws:cloudfront::123456789012:distribution/E1NFJOWF2FZVA6",
"resourceName": "CFDistribution"
}
cf_distribution_log_enabled = {
"configuration": {
"distributionConfig": {
"logging": {
"bucket":"cloudfrontlogs" + '.s3.amazonaws.com',
"enabled": True
},
},
},
"ARN":"arn:aws:cloudfront::123456789012:distribution/E1NFJOWF2FZVA6",
"configurationItemCaptureTime": "2018-11-10T08:22:15.826Z",
"awsAccountId": "123456789012",
"configurationItemStatus": "ResourceDiscovered",
"resourceType": "AWS::CloudFront::Distribution",
"resourceId": "arn:aws:cloudfront::123456789012:distribution/E1NFJOWF2FZVA6",
"resourceName": "CFDistribution"
}
cf_distribution_log_enabled_wrong_bucket = {
"configuration": {
"distributionConfig": {
"logging": {
"bucket":"im-different-bucket" + '.s3.amazonaws.com',
"enabled": True
},
},
},
"ARN":"arn:aws:cloudfront::123456789012:distribution/E1NFJOWF2FZVA6",
"configurationItemCaptureTime": "2018-11-10T08:22:15.826Z",
"awsAccountId": "123456789012",
"configurationItemStatus": "ResourceDiscovered",
"resourceType": "AWS::CloudFront::Distribution",
"resourceId": "arn:aws:cloudfront::123456789012:distribution/E1NFJOWF2FZVA6",
"resourceName": "CFDistribution"
}
def test_cf_distribution_log_enabled(self):
invoking_event = '{"awsAccountId":"123456789012","messageType":"ConfigurationItemChangeNotification","configurationItem":'+json.dumps(self.cf_distribution_log_enabled)+'}'
response = rule.lambda_handler(build_lambda_configurationchange_event(invoking_event, self.rule_parameters), {})
resp_expected = []
resp_expected.append(build_expected_response('COMPLIANT', 'arn:aws:cloudfront::123456789012:distribution/E1NFJOWF2FZVA6', 'AWS::CloudFront::Distribution'))
assert_successful_evaluation(self, response, resp_expected)
def test_cf_distribution_log_disabled(self):
resp_expected = []
invoking_event = '{"awsAccountId":"123456789012","messageType":"ConfigurationItemChangeNotification","configurationItem":'+json.dumps(self.cf_distribution_log_disabled)+'}'
response = rule.lambda_handler(build_lambda_configurationchange_event(invoking_event, self.rule_parameters), {})
resp_expected.append(build_expected_response('NON_COMPLIANT', 'arn:aws:cloudfront::123456789012:distribution/E1NFJOWF2FZVA6', 'AWS::CloudFront::Distribution', 'Distribution is not configured to store logs.'))
assert_successful_evaluation(self, response, resp_expected)
def test_cf_distribution_log_enabled_wrong_bucket(self):
invoking_event = '{"awsAccountId":"123456789012","messageType":"ConfigurationItemChangeNotification","configurationItem":'+json.dumps(self.cf_distribution_log_enabled_wrong_bucket)+'}'
response = rule.lambda_handler(build_lambda_configurationchange_event(invoking_event, self.rule_parameters), {})
resp_expected = []
resp_expected.append(build_expected_response('NON_COMPLIANT', 'arn:aws:cloudfront::123456789012:distribution/E1NFJOWF2FZVA6', 'AWS::CloudFront::Distribution', 'Distribution is configured to store logs in an unauthorized bucket.'))
assert_successful_evaluation(self, response, resp_expected)
####################
# Helper Functions #
####################
def build_lambda_configurationchange_event(invoking_event, rule_parameters=None):
event_to_return = {
'configRuleName':'myrule',
'executionRoleArn':'roleArn',
'eventLeftScope': False,
'invokingEvent': invoking_event,
'accountId': '123456789012',
'configRuleArn': 'arn:aws:config:us-east-1:123456789012:config-rule/config-rule-8fngan',
'resultToken':'token'
}
if rule_parameters:
event_to_return['ruleParameters'] = rule_parameters
return event_to_return
def build_lambda_scheduled_event(rule_parameters=None):
invoking_event = '{"messageType":"ScheduledNotification","notificationCreationTime":"2017-12-23T22:11:18.158Z"}'
event_to_return = {
'configRuleName':'myrule',
'executionRoleArn':'roleArn',
'eventLeftScope': False,
'invokingEvent': invoking_event,
'accountId': '123456789012',
'configRuleArn': 'arn:aws:config:us-east-1:123456789012:config-rule/config-rule-8fngan',
'resultToken':'token'
}
if rule_parameters:
event_to_return['ruleParameters'] = rule_parameters
return event_to_return
def build_expected_response(compliance_type, compliance_resource_id, compliance_resource_type=DEFAULT_RESOURCE_TYPE, annotation=None):
if not annotation:
return {
'ComplianceType': compliance_type,
'ComplianceResourceId': compliance_resource_id,
'ComplianceResourceType': compliance_resource_type
}
return {
'ComplianceType': compliance_type,
'ComplianceResourceId': compliance_resource_id,
'ComplianceResourceType': compliance_resource_type,
'Annotation': annotation
}
def assert_successful_evaluation(testClass, response, resp_expected, evaluations_count=1):
if isinstance(response, dict):
testClass.assertEquals(resp_expected['ComplianceResourceType'], response['ComplianceResourceType'])
testClass.assertEquals(resp_expected['ComplianceResourceId'], response['ComplianceResourceId'])
testClass.assertEquals(resp_expected['ComplianceType'], response['ComplianceType'])
testClass.assertTrue(response['OrderingTimestamp'])
if 'Annotation' in resp_expected or 'Annotation' in response:
testClass.assertEquals(resp_expected['Annotation'], response['Annotation'])
elif isinstance(response, list):
testClass.assertEquals(evaluations_count, len(response))
for i, response_expected in enumerate(resp_expected):
testClass.assertEquals(response_expected['ComplianceResourceType'], response[i]['ComplianceResourceType'])
testClass.assertEquals(response_expected['ComplianceResourceId'], response[i]['ComplianceResourceId'])
testClass.assertEquals(response_expected['ComplianceType'], response[i]['ComplianceType'])
testClass.assertTrue(response[i]['OrderingTimestamp'])
if 'Annotation' in response_expected or 'Annotation' in response[i]:
testClass.assertEquals(response_expected['Annotation'], response[i]['Annotation'])
def assert_customer_error_response(testClass, response, customerErrorCode=None, customerErrorMessage=None):
if customerErrorCode:
testClass.assertEqual(customerErrorCode, response['customerErrorCode'])
if customerErrorMessage:
testClass.assertEqual(customerErrorMessage, response['customerErrorMessage'])
testClass.assertTrue(response['customerErrorCode'])
testClass.assertTrue(response['customerErrorMessage'])
if "internalErrorMessage" in response:
testClass.assertTrue(response['internalErrorMessage'])
if "internalErrorDetails" in response:
testClass.assertTrue(response['internalErrorDetails'])
def sts_mock():
assume_role_response = {
"Credentials": {
"AccessKeyId": "string",
"SecretAccessKey": "string",
"SessionToken": "string"}}
sts_client_mock.reset_mock(return_value=True)
sts_client_mock.assume_role = MagicMock(return_value=assume_role_response)
##################
# Common Testing #
##################
class TestStsErrors(unittest.TestCase):
def test_sts_unknown_error(self):
rule.ASSUME_ROLE_MODE = True
sts_client_mock.assume_role = MagicMock(side_effect=botocore.exceptions.ClientError(
{'Error': {'Code': 'unknown-code', 'Message': 'unknown-message'}}, 'operation'))
response = rule.lambda_handler(build_lambda_configurationchange_event('{}'), {})
assert_customer_error_response(
self, response, 'InternalError', 'InternalError')
def test_sts_access_denied(self):
rule.ASSUME_ROLE_MODE = True
sts_client_mock.assume_role = MagicMock(side_effect=botocore.exceptions.ClientError(
{'Error': {'Code': 'AccessDenied', 'Message': 'access-denied'}}, 'operation'))
response = rule.lambda_handler(build_lambda_configurationchange_event('{}'), {})
assert_customer_error_response(
self, response, 'AccessDenied', 'AWS Config does not have permission to assume the IAM role.')
``` |
{
"source": "jpoley/lcd",
"score": 2
} |
#### File: lcd/python/app.py
```python
import sys
def handler(event, context):
return 'Hello from AWS Lambda using Python' + sys.version + '!'
``` |
{
"source": "jpolgesek/superdeploytool",
"score": 2
} |
#### File: superdeploytool/modules/versioning.py
```python
import os
import time
def get_version_string(dev = False, safe = True):
version = ["SDT-"]
try:
version.append(os.environ['GITLAB_TAG'].replace(".", ""))
except:
version.append("400")
try:
version.append(os.environ['CI_COMMIT_SHA'][:8])
except:
version.append("M")
try:
version.append(os.environ['CI_JOB_ID'][:8])
except:
version.append(str(int(time.time()))[-8:])
if dev:
version.append("DEV")
if safe:
return "-".join(version)
else:
return ".".join(version)
```
#### File: superdeploytool/tasks/cleanup.py
```python
import os
import shutil
class Task:
def __init__(self, cfg, utils, data):
self.cfg = cfg
self.utils = utils
self.data = data
return None
def run(self):
target = self.cfg.target
utils = self.utils
cfg = self.cfg
if cfg.remove_output_after_build and os.path.exists(cfg.output_dir):
shutil.rmtree(cfg.output_dir)
utils.log("Removed build directory - {}".format(cfg.output_dir), level=utils.INFO)
return True
```
#### File: superdeploytool/tasks/minify_css.py
```python
import os
import re
import shutil
import csscompressor
class Task:
def __init__(self, cfg, utils, data):
self.cfg = cfg
self.utils = utils
self.data = data
return None
def run(self):
target = self.cfg.target
utils = self.utils
cfg = self.cfg
utils.log("Minify CSS files", level=utils.INFO)
css_input = ""
css_count = 0
with open(os.path.join(cfg.source_dir, cfg.source_css), "r", encoding="UTF-8") as f:
for line in f.readlines():
if len(line) < 5 or not line.startswith("@import"): continue
line = line.strip()
try:
filename = re.search(r'''(@import url\(")(.*)("\);)''', line)[2]
if 'ie_build' in self.data and self.data['ie_build'] != None:
path = os.path.join(cfg.source_dir, "assets", "ie_css", filename)
else:
path = os.path.join(cfg.source_dir, "assets", "css", filename)
with open(path, "r", encoding="UTF-8") as src:
css_input += src.read() + "\n"
css_count += 1
except:
pass
css_compressed = csscompressor.compress(css_input)
utils.substep("Before:\t {} kB in {} files ({} lines)".format(len(css_input)//1024, css_count, css_input.count("\n")))
utils.substep("After: \t {} kB in 1 file ({} lines)".format(len(css_compressed)//1024, css_compressed.count("\n")+1))
self.data["css_compressed"] = css_compressed
```
#### File: superdeploytool/tasks/write_minified_files.py
```python
import os
import re
import shutil
import htmlmin
from pathlib import Path
class Task:
def __init__(self, cfg, utils, data):
self.cfg = cfg
self.utils = utils
self.data = data
return None
def run(self):
target = self.cfg.target
utils = self.utils
cfg = self.cfg
utils.log("Write minified files to build directory", level=utils.INFO)
if 'ie_build' in self.data and self.data['ie_build'] != None:
js_path = Path("/".join([cfg.output_dir, "assets", "ie_js", "c_app.js"]))
css_path = Path("/".join([cfg.output_dir, "assets", "ie_css", "c_style.css"]))
else:
js_path = Path("/".join([cfg.output_dir, "assets", "js", "c_app.js"]))
css_path = Path("/".join([cfg.output_dir, "assets", "css", "c_style.css"]))
js_path.parent.mkdir(parents=True, exist_ok=True)
css_path.parent.mkdir(parents=True, exist_ok=True)
with open(str(js_path), "w", encoding="UTF-8") as f:
f.write(self.data["js_compressed"])
utils.substep("Saved compiled JavaScript to {}".format(js_path))
with open(str(css_path), "w", encoding="UTF-8") as f:
f.write(self.data["css_compressed"])
utils.substep("Saved compiled CSS to {}".format(css_path))
with open(os.path.join(str(cfg.output_dir), str(cfg.source_html)), "r+", encoding="UTF-8") as f:
if 'ie_build' in self.data and self.data['ie_build'] != None:
replaced = re.sub(r"(<!-- %compile_css_start%-->)([\s\S]*)(<!-- %compile_css_end%-->)", "<link rel='stylesheet' href='assets/ie_css/c_style.css?ver={}'>".format(cfg.version), f.read())
replaced = re.sub(r"(<!-- %compile_js_start%-->)([\s\S]*)(<!-- %compile_js_end%-->)", "<script src='assets/ie_js/c_app.js?ver={}'></script>".format(cfg.version), replaced)
else:
replaced = re.sub(r"(<!-- %compile_css_start%-->)([\s\S]*)(<!-- %compile_css_end%-->)", "<link rel='stylesheet' href='assets/css/c_style.css?ver={}'>".format(cfg.version), f.read())
replaced = re.sub(r"(<!-- %compile_js_start%-->)([\s\S]*)(<!-- %compile_js_end%-->)", "<script src='assets/js/c_app.js?ver={}'></script>".format(cfg.version), replaced)
if not target["dev"]:
replaced = replaced.replace("<!--%DEV_ONLY_START%-->", "<!--%DEV_ONLY_START% ")
replaced = replaced.replace("<!--%DEV_ONLY_STOP%-->", " %DEV_ONLY_START% ")
replaced = replaced.replace("<!--%DEPLOYTOOL_ENABLE_START%", "")
replaced = replaced.replace("%DEPLOYTOOL_ENABLE_END%-->", "")
replaced = replaced.replace("%build%", cfg.version)
replaced = replaced.replace("%ver%", cfg.version)
replaced = htmlmin.minify(replaced, remove_empty_space=True, remove_comments=True)
f.seek(0)
f.write(replaced)
f.truncate()
utils.substep("Updated index.html")
``` |
{
"source": "jpolgesek/zseilplan-python",
"score": 3
} |
#### File: zseilplan-python/modules/hasher.py
```python
import hashlib
import json
def hash_output(output):
output = json.loads(output)
hash_input = ""
hash_input += output["_updateDate_min"] + "," + output["_updateDate_max"]
hash_input += json.dumps(output['timetable'], sort_keys=True) #Fails reindexing
#hash_input += json.dumps(output['teachers'], sort_keys=True) #Fails reindexing
hash_input += json.dumps(output['units'], sort_keys=True)
hash_input += json.dumps(output['classrooms'], sort_keys=True)
hash_input += json.dumps(output['teachermap'], sort_keys=True)
hash_input += json.dumps(output['timesteps'], sort_keys=True)
hash_object = hashlib.sha256(hash_input.encode("UTF-8"))
hex_dig = hash_object.hexdigest()
return str(hex_dig)
def hash_test(output):
output = json.loads(output)
hash_input = output["_updateDate_min"] + "," + output["_updateDate_max"]
hash_object = hashlib.sha256(hash_input.encode("UTF-8"))
hex_dig = hash_object.hexdigest()
print("A: {}".format(hex_dig))
hash_input = json.dumps(output['timetable'], sort_keys=True) #Fails reindexing
hash_object = hashlib.sha256(hash_input.encode("UTF-8"))
hex_dig = hash_object.hexdigest()
print("B: {}".format(hex_dig))
hash_input = json.dumps(output['teachers'], sort_keys=True) #Fails reindexing
hash_object = hashlib.sha256(hash_input.encode("UTF-8"))
hex_dig = hash_object.hexdigest()
print("C: {}".format(hex_dig))
hash_input = json.dumps(output['units'], sort_keys=True)
hash_object = hashlib.sha256(hash_input.encode("UTF-8"))
hex_dig = hash_object.hexdigest()
print("D: {}".format(hex_dig))
hash_input = json.dumps(output['classrooms'], sort_keys=True)
hash_object = hashlib.sha256(hash_input.encode("UTF-8"))
hex_dig = hash_object.hexdigest()
print("E: {}".format(hex_dig))
hash_input = json.dumps(output['teachermap'], sort_keys=True)
hash_object = hashlib.sha256(hash_input.encode("UTF-8"))
hex_dig = hash_object.hexdigest()
print("F: {}".format(hex_dig))
hash_input = json.dumps(output['timesteps'], sort_keys=True)
hash_object = hashlib.sha256(hash_input.encode("UTF-8"))
hex_dig = hash_object.hexdigest()
print("G: {}".format(hex_dig))
return str(hex_dig)
```
#### File: zseilplan-python/modules/overrides.py
```python
import requests
import AdvancedHTMLParser
import json
import datetime
import html
from unidecode import unidecode
# Placeholder, will be replaced by reference to main cfg object
# This is only to satisfy builtin vs code verifier
try:
cfg = None
cfg.teachermap_filename = None
except:
pass
print("SEMI-LEGACY OVERRIDES PARSER!!!!!!!!!")
def search_for_overrides():
r = requests.get("http://www.zseil.edu.pl/zastepstwa/")
r.encoding = "UTF-8"
if r.status_code != 200:
return False
listparser = AdvancedHTMLParser.AdvancedHTMLParser()
listparser.parseStr(r.text)
totalOutput = {}
panel = listparser.getElementById("panel_srodkowy_szerszy").getHTML()
listparser = AdvancedHTMLParser.AdvancedHTMLParser()
listparser.parseStr(panel)
for li in listparser.getElementsByTagName("a"):
url = "http://www.zseil.edu.pl/zastepstwa/{}".format(li.href)
url = url.replace("\\", "/")
z = requests.get(url)
z.encoding = "UTF-8"
if r.status_code != 200:
exit(r.status_code)
if url.endswith(".html"):
print("Zastepstwo w htmlu, parsuje! ({})".format(url))
date_fallback = url.split("-")
parse_text(totalOutput, z.text, date_fallback)
return totalOutput
def parse_text(o, text, date_fallback=None):
text_parser = AdvancedHTMLParser.AdvancedHTMLParser()
text_parser.parseStr(text)
for table in text_parser.getElementsByTagName("table"):
parse_table(o, table.getChildren(), text_parser, date_fallback)
break #NIE PARSUJ KOLEJNYCH TABEL
def parse_table(o, table, html_all=None, date_fallback=None):
output = dict()
cday = ""
cdays = []
for i,row in enumerate(table.getElementsByTagName("tr")):
if len(row.getChildren()) == 1: #Naglowek (ten z data)
day = row.getChildren()[i].innerText
if day.find("Dzie") != -1: #jest w th
print("<th>")
day = day.split(":")[1]
day = day.split("(")[0]
day = day.strip()
elif html_all != None: #jest w h1
day_ok = False
try:
print("<h1> - a")
day = html_all.getElementsByTagName("h1")[0].innerText
day = day.split(": ")[1]
day = day.split(" (")[0]
day = day.strip()
temp_fix_check = datetime.datetime.strptime(cday, "%d.%m.%Y").date().weekday() + 1
day_ok = True
except:
print("Fallback, bo ktos edytowal recznie html -.-")
if not day_ok:
try:
print("<h1> - b")
day = html_all.getElementsByTagName("h1")[1].innerText
day = day.split(": ")[1]
day = day.split(" (")[0]
day = day.strip()
temp_fix_check = datetime.datetime.strptime(cday, "%d.%m.%Y").date().weekday() + 1
except:
print("Fallback, bo ktos edytowal recznie html -.-")
day = "{}.{}.{}".format(date_fallback[2],date_fallback[1],date_fallback[0].split("/")[-1])
else:
print("Fail, nie znam tego formatu zastepstw")
return
print("Zastepstwa na dzien {}".format(day))
cday = day
cdays.append(day)
elif len(row.getChildren().getElementsByTagName("th")) == 0: #Nie naglowek (ten z nazwami)
lesson = row.getChildren()[0].innerText.replace("\n","")
oldTeacher = unidecode(row.getChildren()[1].innerText.replace("\n",""))
if row.getChildren()[2].innerText.find("IND*") != -1:
#Indywidualny
unit = row.getChildren()[2].innerText[row.getChildren()[2].innerText.find("IND*"):].replace("\n","")
unit = unit[4:]
group = -1
elif len(row.getChildren()[2].innerText.split("|")) == 2:
unit = row.getChildren()[2].innerText.split("|")[0].strip()
group = row.getChildren()[2].innerText.split("|")[1].strip()
#Dla konkretnej grupy
else:
#Dla całej klasy
unit = row.getChildren()[2].innerText.strip()
group = -1
subject = row.getChildren()[3].innerText.strip()
classroom = row.getChildren()[4].innerText.strip()
newTeacher = unidecode(row.getChildren()[5].innerText.strip())
comments = row.getChildren()[6].innerText.strip()
oldTeacherShort = unidecode(find_teacher_shortcut(oldTeacher))
newTeacherShort = find_teacher_shortcut(newTeacher)
if group != -1:
if group.find("Grupa-") != -1:
guessedGroup = group.split("Grupa-")[1]
elif group.find("r_") != -1:
guessedGroup = group.split("r_")[1]
else:
guessedGroup = -1
else:
guessedGroup = -1
if newTeacher.find("Uczniowie zwolnieni do domu") != -1 or newTeacher.find("Okienko dla uczniów") != -1 or newTeacher.find("Uczniowie przychodz") != -1: #TODO: Uczniowie przychodzą później
newTeacher = -1
#print("[ Zastepstwo ]")
#print("Godzina: {}".format(lesson))
#print("Za nauczyciela: {} ({})".format(oldTeacher, oldTeacherShort))
#print("Klasa: {}".format(unit))
#print("Grupa: {}".format(group))
#print("Nowy przedmiot: {}".format(subject))
#print("Sala: {}".format(classroom))
#print("Nowy nauczyciel: {} ({})".format(newTeacher, newTeacherShort))
#print("Uwagi: {}".format(comments))
#print()
d = datetime.datetime.strptime(cday, "%d.%m.%Y").date().weekday() + 1
if d not in output:
output[d] = dict()
output[d]['day'] = cday
if lesson not in output[d]:
output[d][lesson] = dict()
if unit not in output[d][lesson]:
output[d][lesson][unit] = []
temp = dict()
temp['subject'] = subject
temp['s'] = classroom
temp['oldTeacherLong'] = oldTeacher
temp['newTeacherLong'] = newTeacher
temp['oldTeacherShort'] = oldTeacherShort
temp['newTeacherShort'] = newTeacherShort
if group != -1:
temp['guessedGroup'] = guessedGroup
temp['comments'] = comments
output[d][lesson][unit].append(temp)
output['_min_date'] = min(cdays)
output['_max_date'] = max(cdays)
if max(cdays) in o:
o[max(cdays)].update(output)
else:
o[max(cdays)] = output
return o
def find_teacher_shortcut(name):
name = unidecode(html.unescape(name))
tm_f = open(cfg.teachermap_filename, "r")
teachermap = json.loads(tm_f.read())
for key in teachermap:
if teachermap[key].lower().find(name.lower()) != -1:
tm_f.close()
return key
tm_f.close()
return "-1"
def generate():
return search_for_overrides()
#generate()
#with open("zastepstwa.html","r", encoding="UTF-8") as inputData:
# totalOutput = {}
# date_fallback = "11.22.3333"
# parse_text(totalOutput, inputData.read(), date_fallback)
# #parseZastepstwa(inputData.read())
#inputData.close()
``` |
{
"source": "jpolitz/lambda-py-paper",
"score": 3
} |
#### File: base/modules/test_1.py
```python
import unittest
class TestClass1(unittest.TestCase):
def test_1(self):
self.assertTrue(True)
def test_11(self):
self.assertTrue(False)
class TestClass2(unittest.TestCase):
def test_2(self):
self.assertTrue(True)
def test_22(self):
self.assertTrue(True)
#def test_23(self):
#self.assertRaises(TypeError, list, 0)
def test_24(self):
self.assertRaises(TypeError, all, 0)
unittest.main()
```
#### File: base/our-tests/bound-and-free.py
```python
def f(x):
class C:
def m(self):
return x
a = x
return C
inst = f(3)()
inst.a
inst.m()
```
#### File: base/our-tests/simple.py
```python
x = 2
def f():
y = 3
print(x)
f()
print(x)
```
#### File: base/pylib/callable.py
```python
def callable(obj):
try:
obj.__call__
except:
# obj has no __call__ attribute, use the primary operator
# until functions become objects with a __call__ attribute.
return ___delta("is-func?", obj)
else:
# No exception, obj has __call__ attribute
return True
___assign("%callable", callable)
```
#### File: base/pylib/function.py
```python
class function(object):
def __get__(self, obj, objtype):
# when used as attribute a function returns a bound method or the function itself
object = ___id("%object")
method = ___id("%method")
NoneType = ___id("%NoneType")
if obj is None and objtype is not NoneType:
# no object to bind, result is the function itself
return self
else:
# result is a method bound to obj with self as the underlying function
new_method = object.__new__(method)
method.__init__(new_method, self, obj)
return new_method
def __getattr__(self, key):
# the __call__ attribute is the function itself
if ___delta("str=", key, "__call__"):
return self
else:
str = ___id("%str")
msg = ___delta("str+", "function object has not attribute ", key, str)
raise AttributeError(msg)
___assign("%function", function)
```
#### File: base/pylib/generator.py
```python
class generator(object):
def __init__(self, init):
init(self)
def __next__(self):
return self.___resume(None)
def send(self, arg):
return self.___resume(arg)
def __iter__(self):
return self
def __list__(self):
return [x for x in self]
___assign("%generator", generator)
```
#### File: base/pylib/method.py
```python
class method(object):
def __init__(self, func, obj):
# __func__ is the underlying function and __self__ the bound object
self.__func__ = func
self.__self__ = obj
def __getattr__(self, key):
# the __call__ attribute is the method itself
if ___delta("str=", key, "__call__"):
return self
else:
str = ___id("%str")
msg = ___delta("str+", "method object has not attribute ", key, str)
raise AttributeError(msg)
def __str__(self):
# we don't have __name__ for functions and classes, yet.
return "(method of " + str(type(self.__self__)) + " object)"
___assign("%method", method)
# classmethod type
# classmethod objects are converted to method objects
# with class as __self__ on attribute retrieval
class classmethod(object):
def __init__(self, func):
# __func__ is the underlying function
self.__func__ = func
def __get__(self, obj, objtype):
# when used as attribute classmethod returns a method bound to the class
object = ___id("%object")
method = ___id("%method")
new_method = object.__new__(method)
method.__init__(new_method, self.__func__, objtype)
return new_method
___assign("%classmethod", classmethod)
# staticmethod type
# staticmethod objects are converted to functions on attribute retrieval
class staticmethod(object):
def __init__(self, func):
# __func__ is the underlying function
self.__func__ = func
def __get__(self, obj, objtype):
# when used as attribute classmethod returns the function
return self.__func__
___assign("%staticmethod", staticmethod)
```
#### File: base/pylib/seq_iter.py
```python
class SeqIter:
def __init__(self,l):
self.l = l
self.i = 0
self.stop = False
def __len__(self):
return len(self.l)
def __list__(self):
l = []
while True:
try:
l.append(self.__next__())
except StopIteration:
break
return l
def __iter__(self):
return self
def __next__(self):
has_length = True
found = False
try:
self.l.__len__()
except AttributeError:
has_length = False
try:
if self.stop:
raise StopIteration()
if has_length and self.i >= self.l.__len__():
self.stop = True
raise StopIteration()
ret = self.l[self.i]
found = True
except IndexError:
raise StopIteration()
except StopIteration:
raise StopIteration()
self.i += 1
if found:
return ret
else:
return None
___assign("%SeqIter", SeqIter)
def iter(l, *args):
callable = ___id("%callable")
if args.__len__() == 1:
if callable(l):
stopwhen = args[0]
return FuncIter(l, stopwhen)
else:
TypeError("iter(v, w): v must be callable")
elif args.__len__() == 0:
try:
return l.__iter__()
except:
try:
if callable(l.__getitem__):
return SeqIter(l)
except:
raise TypeError("object is not iterable")
else:
raise TypeError("iter expect at most 2 arguments")
___assign("%iter", iter)
def next(it, *arg):
if len(arg) == 0:
return it.__next__()
else:
return it.__next__(arg[0])
___assign("%next", next)
class FuncIter:
def __init__(self, func, stopwhen):
self.func = func
self.stopwhen = stopwhen
self.stopped = False
def __list__(self):
l = []
while not self.stopped:
try:
l.append(self.__next__())
except StopIteration:
break
return l
def __next__(self):
f = self.func
v = f()
if v == self.stopwhen:
self.stopped = True
raise StopIteration()
else:
return v
___assign("%FuncIter", FuncIter)
```
#### File: base/test-show-scope/func-5.py
```python
def f():
x = 5
return x
```
#### File: base/test-show-scope/func-args-2.py
```python
x = 9
def f(x):
return x
```
#### File: lambda-py-paper/python-lib/unittest.py
```python
import sys
debug = True
def debug_print(s):
if debug:
print('[unittest debug] ' + s)
#def strclass(cls): #for debug
# return "%s.%s" % (cls.__module__, cls.__name__)
def getattr_default(obj, attr):
try:
return getattr(obj, attr)
except AttributeError:
return None
#------------Result---------------#
class TestResult(object):
def __init__(self):
self.failures = []
self.errors = []
self.expectedFailures = []
self.unexpectedSuccess = []
self.shouldStop = False
self.testsRun = 0
def startTest(self, test):
self.testsRun += 1
def stopTest(self, test): pass
def startTestRun(self): pass
def stopTestRun(self): pass
def addSuccess(self, test): pass
def addError(self, test, err):
self.errors.append((test, self._exc_info_to_string(err, test)))
def addFailure(self, test, err):
self.failures.append((test, self._exc_info_to_string(err, test)))
def addExpectedFailure(self, test, err):
self.expectedFailures.append((test, self._exc_info_to_string(err, test)))
def addUnexpectedSuccess(self, test):
self.unexpectedSuccesses.append(test)
def wasSuccessful(self):
return len(self.failures) == len(self.errors) == 0
def stop(self):
self.shouldStop = True
def _exc_info_to_string(self, err, test):
return "tmp _exc_info holder"
class TextTestResult(TestResult):
def __init__(self):
TestResult.__init__(self)
self.showAll = True # temporarily set to True
self.dots = True #temporarily set to True
def startTest(self, test):
super().startTest(test)
if self.showAll:
print(test)
def addSuccess(self, test):
super().addSuccess(test)
print("ok")
def addError(self, test, err):
super().addError(test, err)
print("ERROR")
def addFailure(self, test, err):
super().addFailure(test, err)
print("FAIL")
def addExpectedFailure(self, test, err):
super().addExpectedFailure(test, err)
print("expected failure")
def addUnexpectedSuccess(self, test):
super().addUnexpectedSuccess(test)
print("unexpected success")
#---------TestSuite-----------#
class BaseTestSuite(object):
def __init__(self, *args):
if len(args) == 0:
tests = ()
else:
tests = args[0]
self._tests = []
self.addTests(tests)
def addTests(self, tests):
if isinstance(tests, str):
raise TypeError("tests must be an iterable of tests, not a string")
for test in tests:
self.addTest(test)
def addTest(self, test):
if not callable(test):
raise TypeError(str(test) + ' is not callable')
if isinstance(test, type) and issubclass(test,
(TestCase, TestSuite)):
raise TypeError("TestCases and TestSuites must be instantiated before passing them to addTest()")
self._tests.append(test)
def __call__(self, *args):
# important!
return self.run(*args)
def run(self, result):
pass
class TestSuite(BaseTestSuite):
def run(self, result):
for t in self._tests:
t(result)
return result
#-------------Runner---------------#
class TextTestRunner(object):
def __init__(self):
self.resultclass = TextTestResult
def _makeResult(self):
return self.resultclass()
def run(self, test):
result = self._makeResult()
try:
test(result) #invoke BaseTestSuite.__call__ then
finally:
print("Ran " + str(result.testsRun) + ' tests')
expectedFails = 0
unexpectedSuccesses = 0
try:
expectedFails = len(result.expectedFailures)
except AttributeError:
pass
try:
unexpectedSuccesses = len(result.unexpectedSuccesses)
except AttributeError:
pass
infos = []
if not result.wasSuccessful():
res = "FAILED( "
failed = len(result.failures)
errored = len(result.errors)
#failed, errored = len(result.failures), len(result.errors)
if failed:
infos.append("failures="+str(failed))
if errored:
infos.append("errors=" + str(errored))
else:
print("OK")
if expectedFails:
infos.append("expected failures=" + str(expectedFails))
if unexpectedSuccesses:
infos.append("unexpected successes=" + str(unexpectedSuccesses))
if infos:
for info in infos:
res += info
res += ' '
res += ')'
print(res)
return result
#---------Loader---------#
class TestLoader(object):
def __init__(self):
self.prefix = 'test'
self.suiteClass = TestSuite
def loadTestsFromModule(self, module):
tests = []
#NOTE: get TestCase obj from module. Is dir implemented?
for name in module.__dict__.keys():
obj = getattr_default(module, name)
if isinstance(obj, type) and issubclass(obj, TestCase):
debug_print('test found: ' + name)
tests.append(self.loadTestsFromTestCase(obj))
tests = self.suiteClass(tests) #return TestSuite
return tests
def loadTestsFromTestCase(self, testCaseClass):
if issubclass(testCaseClass, TestSuite):
raise TypeError("Test cases should not be derived from TestSuite." \
" Maybe you meant to derive from TestCase?")
testCaseNames = self.getTestCaseNames(testCaseClass)
if not testCaseNames and hasattr(testCaseClass, 'runTest'):
testCaseNames = ['runTest']
#each method will be used to construct an instance of a TestCase's subclass
#all the subclasses will be wrapped in TestSuite
suites = []
for names in testCaseNames:
suites.append(testCaseClass(names))
loaded_suite = self.suiteClass(suites)
return loaded_suite
def getTestCaseNames(self, testCaseClass):
#NOTE: getattr
def isTestMethod(attrname):
return attrname[:len(self.prefix)] == self.prefix and \
callable(getattr_default(testCaseClass, attrname))
#NOTE: has dir been implemented?
testFnNames = list(filter(isTestMethod, testCaseClass.__dict__.keys()))
return testFnNames
#---------TestCase-------#
class _Outcome(object):
def __init__(self):
self.success = True
self.skipped = None
self.unexpectedSuccess = None
self.expectedFailure = None
self.errors = []
self.failures = []
class _ExpectedFailure(Exception):
def __init__(self, exc_info):
Exception.__init__(self)
self.exc_info = exc_info
class _UnexpectedSuccess(Exception):
pass
class TestCase(object):
failureException = AssertionError
def __init__(self, *args):
if len(args) == 0:
methodName = 'runTest'
else:
methodName = args[0]
self._testMethodName = methodName
testMethod = getattr_default(self, methodName)
if testMethod is None and methodName != 'runTest':
raise ValueError("no such test method in" + self.__class__ + ":" + methodName)
# def __str__(self): #for debug
# return "%s (%s)" % (self._testMethodName, strclass(self.__class__))
def __str__(self): #for debug
return self._testMethodName + ' (' + str(self.__class__) + ')'
def __call__(self, *args):
# important!
self.run(*args)
def setUp(self): pass
def tearDown(self): pass
def run(self, *args):
if len(args) == 0:
result = TestResult()
else:
result = args[0]
result.startTest(self)
testMethod = getattr_default(self, self._testMethodName)
try:
outcome = _Outcome()
self._executeTestPart(self.setUp, outcome)
if outcome.success:
self._executeTestPart(testMethod, outcome, True) #TEST specific method!
self._executeTestPart(self.tearDown, outcome)
if outcome.success:
result.addSuccess(self)
else:
#NOTE: do we need outcome.skipped?
for exc_info in outcome.errors:
result.addError(self, exc_info)
for exc_info in outcome.failures:
result.addFailure(self, exc_info)
if outcome.unexpectedSuccess is not None:
addUnexpectedSuccess = getattr_default(result, 'addUnexpectedSuccess')
if addUnexpectedSuccess is not None:
addUnexpectedSuccess(self)
if outcome.expectedFailure is not None:
addExpectedFailure = getattr_default(result, 'addExpectedFailure')
if addExpectedFailure is not None:
addExpectedFailure(self, outcome.expectedFailure)
finally:
#result.stopTest(self)
pass
def _executeTestPart(self, function, outcome, *args):
if len(args) == 1:
isTest = args[0]
else:
isTest = False
try:
function()
except _UnexpectedSuccess:
exc_info = sys.exc_info()
outcome.success = False
if isTest:
outcome.unexpectedSuccess = exc_info
else:
outcome.errors.append(exc_info)
except _ExpectedFailure:
outcome.success = False
exc_info = sys.exc_info()
if isTest:
outcome.expectedFailure = exc_info
else:
outcome.errors.append(exc_info)
except self.failureException:
outcome.success = False
outcome.failures.append(sys.exc_info())
exc_info = sys.exc_info()
except:
outcome.success = False
outcome.errors.append(sys.exc_info())
# start assert Statement
def assertTrue(self, expr, *args):
if len(args) == 0:
msg = "assertTrue error"
else:
msg = args[0]
if not expr:
raise self.failureException(msg)
def assertFalse(self, expr, *args):
if len(args) == 0:
msg = "assertFalse error"
else:
msg = args[0]
if expr:
raise self.failureException(msg)
def assertRaises(self, e, f, *args):
try:
f(*args)
except e:
return
else:
raise self.failureException("assertRaises error")
raise self.failureException("assertRaises error")
def assertEqual(self, first, second, *args):
if len(args) == 0:
msg = "assertEqual Error"
else:
msg = args[0]
if not first == second:
raise self.failureException(msg)
def assertNotEqual(self, first, second, *args):
if len(args) == 0:
msg = "assertEqual Error"
else:
msg = args[0]
if first == second:
raise self.failureException(msg)
def assertIn(self, member, container, *args):
if len(args) == 0:
msg = "assertIn Error"
else:
msg = args[0]
if member not in container:
self.failureException(msg)
def assertNotIn(self, member, container, *args):
if len(args) == 0:
msg = "assertIn Error"
else:
msg = args[0]
if member not in container:
self.failureException(msg)
def assertIs(self, expr1, expr2, *args):
if len(args) == 0:
msg = "assertIs Error"
else:
msg = args[0]
if expr1 is not expr2:
self.failureException(msg)
def assertIsNot(self, expr1, expr2, *args):
if len(args) == 0:
msg = "assertIsNot Error"
else:
msg = args[0]
if expr1 is expr2:
self.failureException(msg)
#------------main----------------
class TestProgram(object):
def __init__(self):
self.module = __main__
self.testLoader = TestLoader()
self.testRunner = TextTestRunner
debug_print('starts to find tests')
self.test = self.testLoader.loadTestsFromModule(self.module)
self.runTests()
def runTests(self):
debug_print('begin to run')
testRunner = self.testRunner()
self.result = testRunner.run(self.test)
main = TestProgram
```
#### File: tests/generators/gen-method.py
```python
def g(value = None):
while True:
try:
value = (yield value)
except TypeError:
value = "TypeError"
gen1 = g()
___assertEqual(next(gen1), None)
#resume the execution and sends a value into the generator functions
#the "value" arguments becomes the result of the yield expression
___assertEqual(gen1.send(173),173)
#Raises an exception at the point where generator was paused
___assertEqual("TypeError", gen1.throw(TypeError))
#Exit the Generator
gen1.close()
___assertRaises(StopIteration, gen1.__next__)
```
#### File: tests/generators/send-gen.py
```python
def coroutine(seq):
count = 0
while count < 200:
count += yield
seq.append(count)
seq = []
c = coroutine(seq)
next(c)
___assertEqual(seq, [])
c.send(10)
___assertEqual(seq, [10])
c.send(10)
___assertEqual(seq, [10, 20])
```
#### File: python-reference/dict/dict-hash-effects.py
```python
hashcalls = []
eqcalls = []
cmpcalls = []
class O():
def __init__(self, msg):
self.msg = msg
def __hash__(self):
hashcalls.append(self.msg)
return 5
def __eq__(self, other):
eqcalls.append(self.msg + " == " + other.msg)
return self is other
def __cmp__(self, other):
cmpcalls.append(self.msg + " == " + other.msg)
return id(self) - id(other)
d = {}
o1 = O("o1")
o2 = O("o2")
o3 = O("o3")
def reset():
global hashcalls, eqcalls, cmpcalls
eqcalls = []
hashcalls = []
cmpcalls = []
d[o1] = "1"
assert(hashcalls == ["o1"])
assert(eqcalls == [])
assert(cmpcalls == [])
reset()
d[o2] = "2"
assert(hashcalls == ["o2"])
assert(eqcalls == ["o1 == o2"])
assert(cmpcalls == [])
reset()
d[o3] = "3"
assert(hashcalls == ["o3"])
assert(eqcalls == ["o1 == o3", "o2 == o3"])
assert(cmpcalls == [])
```
#### File: python-reference/exceptions/except-reraise.py
```python
def reraise():
try:
raise TypeError("foo")
except:
try:
raise KeyError("caught")
except KeyError:
pass
raise
___assertRaises(TypeError, reraise)
```
#### File: python-reference/exceptions/nested-reraise.py
```python
def nested_reraise():
raise
def reraise():
try:
raise TypeError("foo")
except:
nested_reraise()
___assertRaises(TypeError, reraise)
```
#### File: python-reference/exceptions/test-finally-reraise.py
```python
def reraise():
try:
raise TypeError("foo")
except:
try:
raise KeyError("caught")
finally:
raise
___assertRaises(KeyError, reraise)
```
#### File: python-reference/functions/func_attr.py
```python
def f():
return f.x
f.x = 22
___assertEqual(f(), 22)
```
#### File: python-reference/generators/gen-arg.py
```python
def f():
n = yield 1
yield n
g = f()
assert next(g) == 1
assert g.send(2) == 2
```
#### File: python-reference/generators/gen-list.py
```python
def f():
n = 1
while n < 10:
yield n
n += 1
assert [x for x in f()] == [1,2,3,4,5,6,7,8,9]
```
#### File: python-reference/generators/gen-return.py
```python
def f():
yield 1
return
yield 3
g = f()
assert next(g) == 1
try:
next(g)
except StopIteration:
pass
else:
raise Exception("return inside of generator raises StopIteration")
try:
next(g)
except StopIteration:
pass
else:
raise Exception("should have gotten StopIteration")
```
#### File: python-reference/multiple-inheritance/methods.py
```python
class Foo:
def getx(self):
return 1
foo = Foo()
# Unbound methods can be called with object as argument.
___assertEqual(Foo.getx(foo), 1)
# calling a bound method
___assertEqual(foo.getx(), 1)
# The we access a class method from an object, it should become a bound method
getx = foo.getx
___assertEqual(getx(), 1)
# When we set a function as an attribute of an object, it should not treated as method
foo.f = lambda: 42
___assertEqual(foo.f(), 42)
# If we don't pass any argument, it should raise TypeError
___assertRaises(TypeError, lambda: Foo.getx())
```
#### File: python-reference/property/override_getattr_descr.py
```python
class Descriptor(object):
def __get__(self, obj, objtype):
def f(name):
return 'got ' + name
return f
class B(object):
__getattr__ = Descriptor()
assert(getattr(B(), "key") == "got key")
```
#### File: python-reference/property/simple_property_decorator.py
```python
class C(object):
def __init__(self):
self.x = 42
@property
def f(self):
self.x += 1
return self.x
@f.setter
def f(self, value):
self.x = value
@f.deleter
def f(self):
del self.x
c = C()
assert c.x == 42
assert c.f == 43
c.f = 55
assert c.x == 55
assert c.f == 56
del c.f
assert not hasattr(c, 'x')
assert not hasattr(c, 'f')
assert hasattr(C, 'f')
```
#### File: python-reference/scope/global-in-parallel-nested-functions.py
```python
def f():
y = 1
def g():
global y
return y
def h():
return y + 1
return g, h
y = 9
g, h = f()
___assertEqual(g(), 9)
___assertEqual(h(), 2)
```
#### File: python-reference/scope/mixed-freevars-and-cellvars.py
```python
def identity(x):
return x
def f(x, y, z):
def g(a, b, c):
a = a + x # 3
def h():
# z * (4 + 9)
# 3 * 13
return identity(z * (b + y))
y = c + z # 9
return h
return g
g = f(1, 2, 3)
h = g(2, 4, 6)
___assertEqual(h(), 39)
```
#### File: python-reference/scope/nesting-global-under-local.py
```python
x = 7
def f():
x = 1
def g():
global x
def i():
def h():
return x
return h()
return i()
return g()
___assertEqual(f(), 7)
___assertEqual(x, 7)
```
#### File: python-reference/scope/nonlocal-class.py
```python
def f(x):
class c:
nonlocal x
x += 1
def get(self):
return x
return c()
c = f(0)
___assertEqual(c.get(), 1)
___assertNotIn("x", c.__class__.__dict__)
```
#### File: python-reference/scope/simple-and-rebinding.py
```python
def make_adder3(x):
def adder(y):
return x + y
x = x + 1 # check tracking of assignment to x in defining scope
return adder
inc = make_adder3(0)
plus10 = make_adder3(9)
___assertEqual(inc(1), 2)
___assertEqual(plus10(-2), 8)
```
#### File: python-reference/scope/unboundlocal-after-del.py
```python
def errorInOuter():
y = 1
del y
print(y)
def inner():
return y
def errorInInner():
def inner():
return y
y = 1
del y
inner()
___assertRaises(UnboundLocalError, errorInOuter)
___assertRaises(NameError, errorInInner)
```
#### File: tests/scope/multiple-locals-calls.py
```python
def f():
x = 3
y = 4
a = locals()
#print(a) # {'x': 3, 'y': 4}
z = 2
b = locals()
#print(b) # {'x': 3, 'y': 4, 'z': 2, 'a': a}
return a, b
a, b = f()
___assertEqual(a,{'x': 3, 'y': 4, 'z': 2, 'a': a})
___assertEqual(b,{'x': 3, 'y': 4, 'z': 2, 'a': a})
___assertIs(a, b)
```
#### File: tests/scope/nonlocal-from-class-body.py
```python
class C:
x = 3
def f(self):
nonlocal x
x = 2
return x
c = C()
``` |
{
"source": "jpollock/akkaserverless-python-sdk",
"score": 2
} |
#### File: akkaserverless-python-sdk/akkaserverless/discovery_servicer.py
```python
import platform
from dataclasses import dataclass
from logging import getLogger
from pprint import pprint
from typing import List
from google.protobuf.descriptor_pb2 import FileDescriptorProto, FileDescriptorSet
from google.protobuf.descriptor_pool import Default
from google.protobuf.empty_pb2 import Empty
from akkaserverless.akkaserverless.protocol import discovery_pb2
from akkaserverless.action_protocol_entity import Action
from akkaserverless.akkaserverless.protocol.discovery_pb2_grpc import DiscoveryServicer
from akkaserverless.event_sourced_entity import EventSourcedEntity
from akkaserverless.value_entity import ValueEntity
from akkaserverless.view import View
logger = getLogger()
@dataclass
class AkkaServerlessEntityDiscoveryServicer(DiscoveryServicer):
#components: List[Component]
event_sourced_entities: List[EventSourcedEntity]
value_entities: List[ValueEntity]
views: List[View]
action_protocol_entities: List[Action]
def Discover(self, request, context):
logger.info("discovering.")
pprint(request)
descriptor_set = FileDescriptorSet()
for entity in self.event_sourced_entities + self.value_entities + self.action_protocol_entities:
logger.info(f"entity: {entity.name()}")
for descriptor in entity.file_descriptors:
logger.info(f"discovering {descriptor.name}")
logger.info(f"SD: {entity.service_descriptor.full_name}")
from_string = FileDescriptorProto.FromString(descriptor.serialized_pb)
descriptor_set.file.append(from_string)
for view in self.views:
logger.info(f"view: {view.name()}")
for descriptor in view.file_descriptors:
logger.info(f"discovering {descriptor.name}")
logger.info(f"SD: {view.service_descriptor.full_name}")
from_string = FileDescriptorProto.FromString(descriptor.serialized_pb)
descriptor_set.file.append(from_string)
descriptor_set.file.append(
FileDescriptorProto.FromString(
Default().FindFileByName("google/protobuf/empty.proto").serialized_pb
)
)
descriptor_set.file.append(
FileDescriptorProto.FromString(
Default().FindFileByName("akkaserverless/eventing.proto").serialized_pb
)
)
descriptor_set.file.append(
FileDescriptorProto.FromString(
Default().FindFileByName("akkaserverless/annotations.proto").serialized_pb
)
)
descriptor_set.file.append(
FileDescriptorProto.FromString(
Default().FindFileByName("akkaserverless/component.proto").serialized_pb
)
)
descriptor_set.file.append(
FileDescriptorProto.FromString(
Default().FindFileByName("akkaserverless/views.proto").serialized_pb
)
)
descriptor_set.file.append(
FileDescriptorProto.FromString(
Default()
.FindFileByName("google/protobuf/descriptor.proto")
.serialized_pb
)
)
descriptor_set.file.append(
FileDescriptorProto.FromString(
Default().FindFileByName("google/api/annotations.proto").serialized_pb
)
)
descriptor_set.file.append(
FileDescriptorProto.FromString(
Default().FindFileByName("google/api/http.proto").serialized_pb
)
)
'''
# Commenting out below; not sure why it is causing a problem
'''
'''
descriptor_set.file.append(
FileDescriptorProto.FromString(
Default().FindFileByName("google/api/httpbody.proto").serialized_pb
)
)
'''
descriptor_set.file.append(
FileDescriptorProto.FromString(
Default().FindFileByName("google/protobuf/any.proto").serialized_pb
)
)
spec = discovery_pb2.Spec(
service_info=discovery_pb2.ServiceInfo(
service_name="",
service_version="0.1.0",
service_runtime="Python "
+ platform.python_version()
+ " ["
+ platform.python_implementation()
+ " "
+ platform.python_compiler()
+ "]",
support_library_name="akkaserverless-python-support",
support_library_version="0.0.1",
protocol_major_version=0,
protocol_minor_version=7,
),
components=[
discovery_pb2.Component(
component_type=entity.component_type(),
service_name=entity.service_descriptor.full_name,
entity=discovery_pb2.EntitySettings(entity_type=entity.entity_type)
)
for entity in self.event_sourced_entities
+ self.value_entities
],
proto=descriptor_set.SerializeToString(),
)
# handling views; has to be a way to do this differently, as part of above
spec.components.extend(
discovery_pb2.Component(
component_type=entity.component_type(),
service_name=entity.service_descriptor.full_name,
)
for entity in self.views + self.action_protocol_entities
)
return spec
def ReportError(self, request, context):
logger.error(f"Report error: {request}")
pprint(request)
return Empty()
def ProxyTerminated(self, request, context):
logger.info(f"Proxy Terminated: {request}")
return Empty()
def HealthCheck(self, request, context):
return Empty()
```
#### File: akkaserverless-python-sdk/akkaserverless/replicated_context.py
```python
from dataclasses import dataclass, field
from typing import Any, List
from akkaserverless.contexts import ClientActionContext
from akkaserverless.akkaserverless.component.component_pb2 import Forward, SideEffect
@dataclass
class ReplicatedEntityCommandContext(ClientActionContext):
"""An value entity command context.
Command Handler Methods may take this is a parameter. It allows emitting
new events in response to a command, along with forwarding the result to other
entities, and performing side effects on other entities"""
command_name: str
command_id: int
entity_id: str
sequence: int
errors: List[str] = field(default_factory=list)
effects: List[SideEffect] = field(default_factory=list)
forward: Forward = None
state = None
def set_state(self, state):
"""
Emit the given event. The event will be persisted, and the handler of the
event defined in the current behavior will immediately be executed to pick it up
"""
self.state = state
```
#### File: akkaserverless/replicated/multi_map.py
```python
from akkaserverless.replicated.data import ReplicatedData
from akkaserverless.replicated.set import ReplicatedSet
class ReplicatedMultiMap(ReplicatedData):
def __init__(self):
self.entries = {}
self.removed = set()
self.cleared = False
def get_or_create(self, key):
if key in self.entries:
return self.entries[key]
else:
set = ReplicatedSet()
self.entries[key] = set
return set
def get(self, key):
return self.entries[key].current_value if key in self.entries else None
def put(self, key, item):
self.get_or_create(key).add(item)
def remove(self, key, item):
self.get_or_create(key).remove(item)
self.removed.add(item)
def keys(self):
return self.entries.keys()
def size(self):
return sum([len(x.current_value) for x in self.entries.current_values()])
def is_empty(self):
return len(self.entries) == 0
def clear(self):
self.entries.clear()
self.removed.clear()
self.cleared = True
def delta(self):
#deltas = set(zip(map(lambda x: x.delta if x.has_delta() else None, self.entries.current_values())))
updated = {}
for key in self.entries:
current_value = self.entries[key]
if current_value.has_delta():
updated[key] = current_value.delta()
return self.cleared, self.removed, updated
#override def hasDelta: Boolean = cleared || removed.nonEmpty || counters.current_values.exists(_.hasDelta)
def has_delta(self):
return (not self.cleared) or len(self.removed) > 0 or len(list(filter(lambda x: x.has_delta(), self.entries.current_values()))) > 0
def reset_delta(self):
for entry in self.entries.current_values():
entry.get_and_reset_delta()
self.removed = {}
self.cleared = False
def apply_delta(self, delta):
cleared, removed, updated = delta
if cleared:
self.entries.clear()
lambda x: self.entries.remove(x), removed
for update in updated:
self.get_or_create(update).apply_delta(updated[update])
#self.current_current_value += delta.counter.change
d = ReplicatedMultiMap()
d.put('reqrw', 2)
d.put('reqrwasdasd', 2)
d.put('reqrwasdasd', 3)
d.put('reqrwasdasd', 34)
print(d.get('reqrwasdasd'))
#d.remove('reqrwasdasd', 34)
#print(d.has_delta())
#delta = d.delta()
#print(delta[0])
#print(d.entries)
#d.apply_delta(delta)
#print(d.entries)
##print(d.get('reqrw'))
#print(d.size())
#d.clear()
#print(d.has_delta())
```
#### File: akkaserverless/replicated/set.py
```python
from akkaserverless.replicated.data import ReplicatedData
class ReplicatedSet(ReplicatedData):
def __init__(self):
self.current_value = set()
self.added = set()
self.removed = set()
self.cleared = False
def size(self):
return len(self.current_value)
def contains(self, item):
return item in self.current_value
def add(self, item):
if item in self.current_value:
return False
else:
self.current_value.add(item)
self.added.add(item) # encode - need todo
def remove(self, item):
if item not in self.current_value:
return False
else:
# encode - need todo
self.current_value.remove(item)
self.removed.add(item) # encode - need todo
def clear(self):
self.current_value.clear()
self.added.clear()
self.removed.clear()
self.cleared = True
def delta(self):
return self.cleared, self.removed, self.added
def has_delta(self):
return (not self.cleared) or len(self.removed) > 0 or len(self.added)>0
def reset_delta(self):
self.added.clear()
self.removed.clear()
self.cleared = False
def apply_delta(self, delta):
if delta[0] == True:
self.current_value.clear()
self.current_value -= self.removed
self.current_value.update(self.added)
'''
d = ReplicatedSet()
#print(isinstance(d, ReplicatedData))
d.add("dd")
d.add(1)
#ßd.add(ReplicatedData())
#print(d.current_value)
d.remove("dd")
print(d.current_value)
print(d.has_delta())
print(d.delta())
#d.apply_delta(d.delta())
#print(d.current_value)
'''
```
#### File: akkaserverless-python-sdk/bin/create_app.py
```python
import argparse
import git
import enum
class SomeEnum(enum.IntEnum):
ACTION = 1
VALUE_ENTITY = 2
EVENTSOURCED_ENTITY = 3
REPLICATED_ENTITY = 4
# magic methods for argparse compatibility
def __str__(self):
return self.name.lower()
def __repr__(self):
return str(self)
@staticmethod
def argparse(s):
try:
return SomeEnum[s.upper()]
except KeyError:
return s
repositories = {
SomeEnum.ACTION: '<EMAIL>:jpollock/akka-serverless-starter-python-action.git',
SomeEnum.VALUE_ENTITY: '<EMAIL>:jpollock/akka-serverless-starter-valueentity.git',
SomeEnum.EVENTSOURCED_ENTITY: '<EMAIL>:jpollock/akka-serverless-starter-python-eventsourcedentity.git',
SomeEnum.REPLICATED_ENTITY: '<EMAIL>:jpollock/akka-serverless-starter-python.git',
}
def main():
parser = argparse.ArgumentParser(description='Create a starter Python project for Akka Serverless')
parser.add_argument(
'--location', metavar='directory', type=str, required=True,
help='specify file of words to build the word cloud (default: stdin)')
parser.add_argument('--template', metavar='template type (action, value_entity, eventsourced_entity, replicated_entity)',
type=SomeEnum.argparse, choices=list(SomeEnum))
args = parser.parse_args()
template_repo = repositories[args.template]
git.Repo.clone_from(template_repo, args.location)
if __name__ == "__main__":
# execute only if run as a script
main()
```
#### File: jpollock/akkaserverless-python-sdk/setup.py
```python
import os
import pathlib
from setuptools import find_packages, setup
# Load version in akkaserverless package.
from setuptools.command.build_py import build_py
exec(open("akkaserverless/version.py").read())
PROTOBUF_VERSION = "master"
version = __version__ # noqa
name = "akkaserverless"
print(f"package name: {name}, version: {version}", flush=True)
proto_lib_roots = ["akkaserverless"]
#proto_roots = ["akkaserverless"]
class FetchBuildProtosCommand(build_py):
"""fetch libs and install the protocol buffer generated sources."""
def run(self):
os.system(f"scripts/prepare.sh {PROTOBUF_VERSION}")
for proto_root in proto_lib_roots:
for root, subdirs, files in os.walk(proto_root):
for file in [f for f in files if f.endswith(".proto")]:
file_path = pathlib.Path(root) / file
destination = "."
print(f"compiling {file_path} to {destination}")
command = f"python -m grpc_tools.protoc {' '.join([' -I ' + i for i in proto_lib_roots])} --python_out={proto_root} --grpc_python_out={proto_root} {file_path}" # noqa
os.system(command)
# the hacking to get files matched up
file_wo_ext = str(file_path).replace(".proto", "")
command = f"perl -i -pe 's/from akkaserverless/from akkaserverless.akkaserverless/g' {file_wo_ext}_pb2.py"
os.system(command)
command = f"perl -i -pe 's/from akkaserverless/from akkaserverless.akkaserverless/g' {file_wo_ext}_pb2_grpc.py"
os.system(command)
return super().run()
packages = find_packages(exclude=[])
print(f"packages: {packages}")
setup(
name=name,
version=version,
url="https://github.com/jpollock/akkaserverless-python-sdk",
license="Apache 2.0",
description="Akka Serverless Python Support Library",
packages=packages,
package_data={
"": ["*.proto"],
"": []
},
#long_description=open("Description.md", "r").read(),
#long_description_content_type="text/markdown",
zip_safe=False,
scripts=["bin/fetch-akkaserverless-pb.sh", "bin/compile.sh", "bin/prepare.sh", "bin/start.sh", "bin/docker_build.sh", "bin/docker_push.sh"],
install_requires=[
"attrs>=19.3.0",
"google-api>=0.1.12",
"googleapis-common-protos >= 1.51.0",
"grpcio>=1.31.0",
"grpcio-tools>=1.31.0",
"protobuf>=3.11.3",
"pytest>=6.2.4",
"six>=1.14.0",
"grpcio-reflection>=1.31.0",
"docker",
],
cmdclass={
"build_py": FetchBuildProtosCommand,
},
)
``` |
{
"source": "jpolton/DeeBore",
"score": 3
} |
#### File: jpolton/DeeBore/deebore.py
```python
import os
import sys
import datetime
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import xarray as xr
import sklearn.metrics as metrics
import pytz
import pickle
if(0): # Use the COAsT files, in e.g. coast_dev
coastdir = os.path.dirname('/Users/jeff/GitHub/COAsT/coast')
sys.path.insert(0, coastdir)
from coast.tidegauge import Tidegauge
from coast.general_utils import day_of_week
from coast.stats_util import find_maxima
else: # Use the COAsT package in e.g. workshop_env
#from coast.tidegauge import Tidegauge
from shoothill_api.shoothill_api import GAUGE
from coast.general_utils import day_of_week
from coast.stats_util import find_maxima
import scipy.signal # find_peaks
import logging
logging.basicConfig(filename='bore.log', filemode='w+')
logging.getLogger().setLevel(logging.DEBUG)
################################################################################
class OpenWeather:
"""
Class to load in an export OpenWeather history file at Hawarden Airport into
an xarray dataset.
"""
def __init__(self):
self.dataset = None
#%% Load method
@classmethod
def read_openweather_to_xarray(cls, fn_openweather, date_start=None, date_end=None):
"""
For reading from a single OpenWeather csv history file into an
xarray dataset.
If no data lies between the specified dates, a dataset is still created
containing information on the gauge, but the time dimension will
be empty.
The data takes the form:
dt,dt_iso,timezone,city_name,lat,lon,temp,feels_like,temp_min,temp_max,pressure,sea_level,grnd_level,humidity,wind_speed,wind_deg,rain_1h,rain_3h,snow_1h,snow_3h,clouds_all,weather_id,weather_main,weather_description,weather_icon
1104537600,2005-01-01 00:00:00 +0000 UTC,0,hawarden airport,53.176908,-2.978784,7.63,6.95,7.54,7.74,1024,,,99,1.5,150,,,,,75,803,Clouds,broken clouds,04n
1104541200,2005-01-01 01:00:00 +0000 UTC,0,hawarden airport,53.176908,-2.978784,4.83,2.61,4.54,7.54,1023,,,99,2.6,170,,,,,28,802,Clouds,scattered clouds,03n
...
Parameters
----------
fn_openweather (str) : path to OpenWeather location file
date_start (datetime) : start date for returning data
date_end (datetime) : end date for returning data
Returns
-------
xarray.Dataset object.
E.g.
Coordinates:
* time (time) datetime64[ns] 2005-01-01 ... 2021-11-08T23:00:00
Data variables:
wind_speed (time) float64 1.5 2.6 4.6 4.1 5.1 ... 3.6 4.12 0.89 4.02 2.68
wind_deg (time) int64 150 170 200 220 210 200 ... 180 190 210 117 239 226
longitude float64 53.18
latitude float64 -2.979
site_name object 'hawarden airport'
"""
try:
dataset = cls.read_openweather_data(fn_openweather, date_start, date_end)
except:
raise Exception("Problem reading OpenWeather file: " + fn_openweather)
# Attributes
dataset["longitude"] = float(dataset["lat"][0])
dataset["latitude"] = float(dataset["lon"][0])
dataset["site_name"] = str(dataset["city_name"][0])
dataset = dataset.drop_vars(["lon", "lat", "city_name"])
return dataset
@classmethod
def read_openweather_data(cls, filnam, date_start=None, date_end=None):
"""
Reads NRW data from a csv file.
Parameters
----------
filnam (str) : path to OpenWeather file
date_start (np.datetime64) : start date for returning data.
date_end (np.datetime64) : end date for returning data.
Returns
-------
xarray.Dataset containing times, wind_speed, wind_deg, lat, lon, city_name
"""
import datetime
# Initialise empty dataset and lists
dataset = xr.Dataset()
# Define custom data parser
custom_date_parser = lambda x: datetime.datetime.strptime(x, "%Y-%m-%d %H:%M:%S +0000 UTC")
data = pd.read_csv(filnam, delimiter=',', parse_dates=['dt_iso'], date_parser=custom_date_parser)
data.rename(columns={'dt_iso':'time'}, inplace=True)
data.set_index('time', inplace=True)
data.drop(columns=['dt', 'timezone', 'temp',
'feels_like', 'temp_min', 'temp_max', 'pressure', 'sea_level',
'grnd_level', 'humidity', 'rain_1h',
'rain_3h', 'snow_1h', 'snow_3h', 'clouds_all', 'weather_id',
'weather_main', 'weather_description', 'weather_icon'], inplace=True)
dataset = data.to_xarray()
if date_start != None:
dataset = dataset.where(dataset.time >= date_start)
if date_end != None:
dataset = dataset.where(dataset.time <= date_end)
# Assign local dataset to object-scope dataset
return dataset
#%% ############################################################################
class Controller():
"""
This is where the main things happen.
Where user input is managed and methods are launched
"""
############################################################################
#%% Initialising and Finishing methods
############################################################################
def __init__(self):
"""
Look for pickle file. If exists load it.
Initialise main controller.
"""
self.load_databucket()
logging.info("run interface")
self.load_bore_flag = False
self.run_interface()
def load_databucket(self):
"""
Auto load databucket from pickle file if it exists.
"""
#global DATABUCKET_FILE
#databucket = DataBucket()
logging.info("Auto load databucket from pickle file if it exists")
print("Add to pickle file, if it exists")
try:
if os.path.exists(DATABUCKET_FILE):
template = "...Loading (%s)"
print(template%DATABUCKET_FILE)
with open(DATABUCKET_FILE, 'rb') as file_object:
self.bore = pickle.load(file_object)
self.load_bore_flag = True
else:
print("... %s does not exist"%DATABUCKET_FILE)
except KeyError:
print('ErrorA ')
except (IOError, RuntimeError):
print('ErrorB ')
def pickle_bore(self):
""" save copy of self.bore into pickle file, if requested """
print('Pickle data.')
os.system('rm -f '+DATABUCKET_FILE)
if(1):
with open(DATABUCKET_FILE, 'wb') as file_object:
pickle.dump(self.bore, file_object)
else:
print("Don't save as pickle file")
return
def export_to_csv(self):
"""
Export the bore xr.Dataset to a CSV file for sharing
"""
print('Export data to csv. NOT IMPLEMENTED')
pass
def run_interface(self):
"""
Application's main loop
Get user input and respond
"""
print(INSTRUCTIONS)
while True:
command = input("What do you want to do? ")
if command == "q":
print("run_interface: quit")
logging.info("quit") # Function call.
ans = input('Save as pickle file?[Y/n]')
if ans == "n":
break
else:
self.pickle_bore()
break
elif command == "i":
print(INSTRUCTIONS)
elif command == "all":
print('load and process all data')
self.load_csv()
print('load and process measured (bodc) data')
self.load_and_process(source="bodc", HLW_list=["FW", "HW", "LW"])
#self.load_and_process(source="bodc", HLW="LW")
#self.load_and_process(source="bodc", HLW="FW")
print('load and process measured (API) data')
self.load_and_process(source="api", HLW_list=["HW", "LW", "FW"])
#self.load_and_process(source="api", HLW="LW")
#self.load_and_process(source="api", HLW="FW")
print('load and process CTR data. Obs + API')
self.get_river_data(HLW_list=["LW"])
print('load and process harmonic data')
self.load_and_process(source="harmonic", HLW_list=["HW", "LW"])
#self.load_and_process(source="harmonic", HLW="LW")
print('load and process harmonic reconstructed data')
self.load_and_process(source="harmonic_rec", HLW_list=["HW", "LW"])
#self.load_and_process(source="harmonic_rec", HLW="LW")
elif command == "0":
print('load bore observations')
self.load_csv()
elif command == "h":
print('load and process harmonic data')
if not self.load_bore_flag: self.load_csv()
self.load_and_process(source="harmonic")
elif command == "hrec":
print('load and process harmonic reconstructed data')
if not self.load_bore_flag: self.load_csv()
self.load_and_process(source="harmonic_rec")
elif command == "b":
print('load and process measured (bodc) data')
if not self.load_bore_flag: self.load_csv()
self.load_and_process(source="bodc")
elif command == "a":
print('load and process measured (API) data')
if not self.load_bore_flag: self.load_csv()
self.load_and_process(source="api")
elif command == "r":
print('load and process measured (API) river data')
if not self.load_bore_flag: self.load_csv()
self.get_river_data()
elif command == "m":
print("load and process met data")
if not self.load_bore_flag: self.load_csv()
self.get_met_data()
elif command == "2":
print('show bore dataset')
self.show()
elif command == "3":
print('plot bore data (lag vs tidal height')
plt.close('all');self.plot_lag_vs_height('bodc')
plt.close('all');self.plot_lag_vs_height('bodc', HLW="FW")
plt.close('all');self.plot_lag_vs_height('all')
plt.close('all');self.plot_lag_vs_height('harmonic')
plt.close('all');self.plot_lag_vs_height('harmonic_rec')
plt.close('all');self.plot_lag_vs_height('api')
plt.close('all');self.plot_lag_vs_height('api', HLW="FW")
elif command == "4":
print('plot difference between predicted and measured (lag vs tidal height)')
plt.close('all');self.plot_surge_effect('api')
plt.close('all');self.plot_surge_effect('bodc')
elif command == "d1":
print('load and plot HLW data')
self.load_and_plot_hlw_data()
elif command == "d2":
print("shoothill dev")
self.shoothill()
elif command == "d3":
print('Explore combinations of HLW times and heights for best fit')
self.fits_to_data(qc_flag=True)
self.fits_to_data(qc_flag=False)
elif command == "d4":
print('Plot combinations of HLW times, heights and rivers')
self.combinations_lag_hlw_river()
elif command == "d5":
print('Explore how rivers affect bore timing')
self.river_lag_timing()
elif command == "6":
self.predict_bore()
elif command == "x":
print('Export data')
self.export_to_csv()
elif command == "rm":
print('Remove pickle file)')
if os.path.exists(DATABUCKET_FILE):
os.remove(DATABUCKET_FILE)
else:
print("Can not delete the pickle file as it doesn't exists")
#self.load_databucket()
else:
template = "run_interface: I don't recognise (%s)"
print(template%command)
############################################################################
#%% Load and process methods
############################################################################
def load_and_process(self, source:str="harmonic", HLW_list=["HW"]):
"""
Performs sequential steps to build into the bore object.
1. Load Gladstone Dock data (though this might also be loaded from the obs logs)
2. Calculate the time lag between Gladstone and Saltney events.
3. Perform a linear fit to the time lag.
Inputs:
source: 'harmonic' [default] - load HLW from harmonic prediction
'harmonic_rec' - reconstruct time series from harmonic constants
'bodc' - measured and processed data
'api' - load recent, un processed data from shoothill API
HLW: [LW/HW] - the data is either processed for High or Low water events
"""
print('loading '+source+' tide data')
self.get_Glad_data(source=source, HLW_list=HLW_list)
#self.compare_Glad_HLW()
print('Calculating the Gladstone to Saltney time difference')
self.calc_Glad_Saltney_time_lag(source=source, HLW_list=HLW_list)
print('Process linear fit. Calc and save')
self.process_fit(source=source, HLW_list=HLW_list)
def process_fit(self, source:str="harmonic", HLW_list=["HW"]):
for HLW in HLW_list:
# Get linear fit with rmse
self.bore.attrs['weights_'+HLW+'_'+source], self.bore.attrs['rmse_'+HLW+'_'+source] = self.linearfit(
self.bore['liv_height_'+HLW+'_'+source],
self.bore['Saltney_lag_'+HLW+'_'+source]
)
# Apply linear model
self.bore['linfit_lag_'+HLW+'_'+source] = self.bore.attrs['weights_'+HLW+'_'+source](self.bore['liv_height_'+HLW+'_'+source])
#self.bore['rmse_'+HLW+'_'+source] = '{:4.1f} mins'.format(self.stats(source=source, HLW=HLW))
def load_csv(self):
"""
Load observed bore data from text file.
Load as a dataframe and save to bore:xr.DataSet
"""
logging.info('Load bore data from csv file')
self.load_bore_flag = True
df = pd.read_csv('data/master-Table 1.csv')
df.drop(columns=['date + logged time','Unnamed: 14', \
'Unnamed: 15','Unnamed: 16'], \
inplace=True)
df.rename(columns={"date + logged time (GMT)":"time"}, inplace=True)
df.rename(columns={"wind_deg (from)":"wind_deg"}, inplace=True)
df.rename(columns={"wind_speed (m/s)":"wind_speed"}, inplace=True)
df['time'] = pd.to_datetime(df['time'], format="%d/%m/%Y %H:%M")
#df['time'] = pd.to_datetime(df['time'], utc=True, format="%d/%m/%Y %H:%M")
#df.set_index(['time'], inplace=True)
for index, row in df.iterrows():
df.loc[index,'time'] = np.datetime64( df.at[index,'time'] ) # numpy.datetime64 in UTC
bore = xr.Dataset()
bore = df.to_xarray()
# Set the t_dim to be a dimension and 'time' to be a coordinate
bore = bore.rename_dims( {'index':'t_dim'} ).assign_coords( time=("t_dim", bore.time.data))
bore = bore.swap_dims( {'t_dim':'time'} )
self.bore = bore
logging.info('Bore data loaded')
def get_river_data(self, HLW_list=["LW"]):
"""
Get Chester weir data. Consolidate CTR data.
Data from the table takes precident. Gaps are filled by the API.
"""
if HLW_list != ["LW"]:
print('Not expecting that possibility here')
else:
# Obtain CTR data for LW for the observations times.
self.get_Glad_data(source='ctr',HLW_list=["LW"])
alph = self.bore['Chester Weir height: CHESTER WEIR 15 MIN SG'] *np.NaN
beta = self.bore['ctr_height_LW_ctr']
#print( self.bore['ctr_height_LW_ctr'][0:10] )
self.bore['ctr_height_LW'] = alph
self.bore['ctr_height_LW'].values = [alph[i].values if np.isfinite(alph[i].values) else beta[i].values for i in range(len(alph))]
# 2015-06-20T12:16:00 has a -ve value. Only keep +ve values
self.bore['ctr_height_LW'] = self.bore['ctr_height_LW'].where( self.bore['ctr_height_LW'].values>0)
#plt.plot( ctr_h_csv, 'b+' )
#plt.plot( self.bore['ctr_height_LW_ctr'], 'ro')
#plt.plot( self.bore['ctr_height_LW'], 'g.')
del self.bore['ctr_height_LW_ctr'], self.bore['ctr_time_LW_ctr']
def get_met_data(self): #, HLW:str="HW"):
"""
Get the met data time matching the observation.
Met data from OpenWeather history download.
This can then be exported into the obs table:
c.met.to_pandas().to_csv('met.csv')
"""
fn_openweather = "data/met/openweather_2005-01-01_2021-11-08.csv"
met = OpenWeather()
met.dataset = met.read_openweather_to_xarray(fn_openweather)
winsize = 6 #4h for HW, 6h for LW. +/- search distance for nearest extreme value
self.met = xr.Dataset()
for measure_var in ['wind_speed', 'wind_deg']:
met_var = []
met_time = []
for i in range(len(self.bore.time)):
try:
met_ds = None
obs_time = self.bore.time[i].values
# Find nearest met observation
dt = np.abs(met.dataset['time'] - obs_time)
index = np.argsort(dt).values
if winsize is not None: # if search window trucation exists
if np.timedelta64(dt[index[0]].values, "m").astype("int") <= 60 * winsize: # compare in minutes
#print(f"dt:{np.timedelta64(dt[index[0]].values, 'm').astype('int')}")
#print(f"winsize:{winsize}")
met_ds = met.dataset[measure_var][index[0]]
else:
# return a NaN in an xr.Dataset
# The rather odd trailing zero is to remove the array layer
# on both time and measurement, and to match the other
# alternative for a return
met_ds = xr.DataArray( [np.NaN], coords={'time': [obs_time]})
#met_ds = xr.Dataset({measure_var: ('time', [np.NaN])}, coords={'time': [obs_time]})
else: # give the closest without window search truncation
met_ds = met.dataset[measure_var][index[0]]
#print("time,HW:",obs_time, HW.values)
if type(met_ds) is xr.DataArray:
#print(f"met: {met_ds.values}")
met_var.append( float(met_ds.values) )
#print('len(met_var)', len(met_var))
met_time.append( met_ds.time.values )
#print('len(met_time)', len(met_time))
#self.bore['LT_h'][i] = HLW.dataset.sea_level[HLW.dataset['sea_level'].argmin()]
#self.bore['LT_t'][i] = HLW.dataset.time[HLW.dataset['sea_level'].argmin()]
#ind.append(i)
#print(f"i:{i}, {met_time[-1].astype('M8[ns]').astype('M8[ms]').item()}" )
#print(met_time[-1].astype('M8[ns]').astype('M8[ms]').item().strftime('%Y-%m-%d'))
## Make timeseries plot around the highwater maxima to check
# values are being extracted as expected.
if (i % 12) == 0:
fig = plt.figure()
if measure_var == "wind_speed":
ymax = 15
if measure_var == "wind_deg":
ymax = 360
plt.subplot(3,4,(i%12)+1)
plt.plot(met.dataset.time, met.dataset[measure_var])
plt.plot( met_time[-1], met_var[-1], 'r+' )
plt.plot( [self.bore.time[i].values,self.bore.time[i].values],[0,ymax],'k')
plt.xlim([met_time[-1] - np.timedelta64(5,'h'),
met_time[-1] + np.timedelta64(5,'h')])
#plt.ylim([0,11])
plt.text( met_time[-1]-np.timedelta64(5,'h'),ymax*0.9, self.bore.location[i].values)
plt.text( met_time[-1]-np.timedelta64(5,'h'),ymax*0.1, met_time[-1].astype('M8[ns]').astype('M8[ms]').item().strftime('%Y-%m-%d'))
# Turn off tick labels
plt.gca().axes.get_xaxis().set_visible(False)
#plt.xaxis_date()
#plt.autoscale_view()
if (i%12) == 12-1:
plt.savefig('figs/check_get_'+measure_var+'_times_'+str(i//12).zfill(2)+'.png')
plt.close('all')
else:
logging.info(f"Did not find a met time near this guess {obs_time}")
print(f"Did not find a met time near this guess {obs_time}")
except:
logging.warning('Issue with appending met data')
print('Issue with appending met data')
try: # Try and print the last observation timeseries
plt.savefig('figs/check_get_'+measure_var+'_times_'+str(i//12).zfill(2)+'.png')
plt.close('all')
except:
logging.info(f"Did not have any extra panels to plot")
print(f"Did not have any extra panels to plot")
# Save a xarray objects
coords = {'time': (('time'), self.bore.time.values)}
#print("number of obs:",len(self.bore.time))
#print("length of time", len(self.bore.time.values))
#print("length of data:", len(np.array(met_var)) )
self.met[measure_var] = xr.DataArray( np.array(met_var), coords=coords, dims=['time'])
def get_Glad_data(self, source:str='harmonic', HLW_list=["HW"]):
#def get_Glad_data(self, source:str='harmonic', HLW:str="HW"):
"""
Get Gladstone HLW data from external source
These data are reported in the bore.csv file but not consistently and it
is laborous to find old values.
It was considered a good idea to automate this step.
inputs:
source: 'harmonic' [default] - load HLW from harmonic prediction
'harmonic_rec' - reconstruct time series from harmonic constants
'bodc' - measured and processed data
'api' - load recent, un processed data from shoothill API
HLW_list: ["LW","HW","FW","EW"] - the data is either processed for High or Low water
events, or Flood or Ebb (inflection) events
"""
loc = "liv" # default location - Liverpool
logging.info("Get Gladstone HLW data")
if source == "harmonic": # Load tidetable data from files
filnam1 = '/Users/jeff/GitHub/DeeBore/data/Liverpool_2005_2014_HLW.txt'
filnam2 = '/Users/jeff/GitHub/DeeBore/data/Liverpool_2015_2020_HLW.txt'
filnam3 = '/Users/jeff/GitHub/DeeBore/data/Liverpool_2021_2022_HLW.txt'
tg = GAUGE()
tg1 = GAUGE()
tg2 = GAUGE()
tg3 = GAUGE()
tg1.dataset = tg1.read_hlw_to_xarray(filnam1)#, self.bore.time.min().values, self.bore.time.max().values)
tg2.dataset = tg2.read_hlw_to_xarray(filnam2)#, self.bore.time.min().values, self.bore.time.max().values)
tg3.dataset = tg3.read_hlw_to_xarray(filnam3)#, self.bore.time.min().values, self.bore.time.max().values)
tg.dataset = xr.concat([ tg1.dataset, tg2.dataset, tg3.dataset], dim='time')
# This produces an xr.dataset with sea_level_highs and sea_level_lows
# with time variables time_highs and time_lows.
tg_HLW = tg.find_high_and_low_water(var_str='sea_level')
elif source == "bodc": # load full 15min data from BODC files, extract HLW
dir = '/Users/jeff/GitHub/DeeBore/data/BODC_processed/'
filelist = ['2005LIV.txt',
'2006LIV.txt', '2007LIV.txt',
'2008LIV.txt', '2009LIV.txt',
'2010LIV.txt', '2011LIV.txt',
'2012LIV.txt', '2013LIV.txt',
'2014LIV.txt', '2015LIV.txt',
'2016LIV.txt', '2017LIV.txt',
'2018LIV.txt', '2019LIV.txt',
'2020LIV.txt',
'LIV2101.txt', 'LIV2102.txt',
'LIV2103.txt', 'LIV2104.txt',
'LIV2105.txt', 'LIV2106.txt',
'LIV2107.txt', 'LIV2108.txt',
'LIV2109.txt', 'LIV2110.txt']
tg = GAUGE()
for file in filelist:
tg0=GAUGE()
tg0.dataset = tg0.read_bodc_to_xarray(dir+file)
if tg.dataset is None:
tg.dataset = tg0.dataset
else:
tg.dataset = xr.concat([ tg.dataset, tg0.dataset], dim='time')
# Use QC to drop null values
#tg.dataset['sea_level'] = tg.dataset.sea_level.where( np.logical_or(tg.dataset.qc_flags=='', tg.dataset.qc_flags=='T'), drop=True)
tg.dataset['sea_level'] = tg.dataset.sea_level.where( tg.dataset.qc_flags!='N', drop=True)
# Fix some attributes (others might not be correct for all data)
tg.dataset['start_date'] = tg.dataset.time.min().values
tg.dataset['end_date'] = tg.dataset.time.max().values
# This produces an xr.dataset with sea_level_highs and sea_level_lows
# with time variables time_highs and time_lows.
#tg_HLW = tg.find_high_and_low_water(var_str='sea_level',method='cubic') #'cubic')
elif source == "api": # load full tidal signal from shoothill, extract HLW
date_start=np.datetime64('2005-04-01')
date_end=np.datetime64('now','D')
fn_archive = "liv" # File head for netcdf archive of api call
# Load timeseries from local file if it exists
try:
tg1 = GAUGE()
tg2 = GAUGE()
tg = GAUGE()
# Load local file. Created with archive_shoothill.py
dir = "archive_shoothill/"
tg1.dataset = xr.open_mfdataset(dir + fn_archive + "_????.nc") # Tidal port Gladstone Dock, Liverpool
tg1.dataset = tg1.dataset.sel(time=slice(date_start, date_end))
print(f"{len(tg1.dataset.time)} pts loaded from netcdf")
if (tg1.dataset.time[-1].values < date_end):
tg2 = GAUGE()
tg2.dataset = tg2.read_shoothill_to_xarray(date_start=tg1.dataset.time[-1].values, date_end=date_end)
tg.dataset = xr.concat([ tg1.dataset, tg2.dataset], dim='time')
print(f"{len(tg2.dataset.time)} pts loaded from API")
else:
tg = tg1
except:
tg.dataset = tg.read_shoothill_to_xarray(date_start=date_start, date_end=date_end)
# This produces an xr.dataset with sea_level_highs and sea_level_lows
# with time variables time_highs and time_lows.
#tg_HLW = tg.find_high_and_low_water(var_str='sea_level',method='cubic') #'cubic')
elif source == "ctr": # use api to load chester weir. Reset loc variable
loc = "ctr"
tg = GAUGE()
date_start=np.datetime64('2014-01-01')
date_end=np.datetime64('now','D')
#station_id = 7900 # below weir
station_id = 7899 # above weir
fn_archive = "ctr" # File head for netcdf archive of api call
station_id = 968
fn_archive = "iron"
# Load timeseries from local file if it exists
try:
tg1 = GAUGE()
tg2 = GAUGE()
tg = GAUGE()
# Load local file. Created with archive_shoothill.py
dir = "archive_shoothill/"
tg1.dataset = xr.open_mfdataset(dir + fn_archive + "_????.nc") # Tidal port Gladstone Dock, Liverpool
tg1.dataset = tg1.dataset.sel(time=slice(date_start, date_end))
print(f"{len(tg1.dataset.time)} pts loaded from netcdf")
if (tg1.dataset.time[-1].values < date_end):
tg2 = GAUGE()
tg2.dataset = tg2.read_shoothill_to_xarray(station_id=station_id, date_start=tg1.dataset.time[-1].values, date_end=date_end)
tg.dataset = xr.concat([ tg1.dataset, tg2.dataset], dim='time')
print(f"{len(tg2.dataset.time)} pts loaded from API")
else:
tg = tg1
except:
tg.dataset = tg.read_shoothill_to_xarray(station_id=station_id ,date_start=date_start, date_end=date_end)
# This produces an xr.dataset with sea_level_highs and sea_level_lows
# with time variables time_highs and time_lows.
tg_HLW = tg.find_high_and_low_water(var_str='sea_level')
elif source == 'harmonic_rec': # load full tidal signal using anyTide code, extract HLW
tg = GAUGE()
#date_start=np.datetime64('now')
#ndays = 5
#tg.dataset = tg.anyTide_to_xarray(date_start=date_start, ndays=5)
date_start=np.datetime64('2005-04-01')
date_end=np.datetime64('now','D')
tg.dataset = tg.anyTide_to_xarray(date_start=date_start, date_end=date_end)
# This produces an xr.dataset with sea_level_highs and sea_level_lows
# with time variables time_highs and time_lows.
tg_HLW = tg.find_high_and_low_water(var_str='sea_level')
else:
logging.debug(f"Did not expect this eventuality...")
self.tg = tg
## Process the *_highs or *_lows
for HLW in HLW_list:
print(f"HLW: {HLW}")
#time_var = 'time_highs'
#measure_var = 'sea_level_highs'
#ind = [] # list of indices in the obs bore data where gladstone data is found
if HLW == 'HW':
time_var = 'time_highs'
measure_var = 'sea_level_highs'
elif HLW == 'LW':
time_var = 'time_lows'
measure_var = 'sea_level_lows'
elif HLW == 'FW':
time_var = 'time_flood'
measure_var = 'sea_level_flood'
elif HLW == 'EW':
time_var = 'time_ebb'
measure_var = 'sea_level_ebb'
else:
print('This should not have happened...')
HT_h = [] # Extrema - height
HT_t = [] # Extrema - time
winsize = 6 #4h for HW, 6h for LW. +/- search distance for nearest extreme value
for i in range(len(self.bore.time)):
if(1): #try:
HW = None
LW = None
obs_time = self.bore.time[i].values
# Extracting the highest and lowest value with a cubic spline is
# very memory costly. Only need to use the cubic method for the
# bodc and api sources, so compute the high and low waters in a
# piecewise approach around observations times.
if source == "bodc" or source == "api":
# This produces an xr.dataset with sea_level_highs and sea_level_lows
# with time variables time_highs and time_lows.
win = GAUGE()
win.dataset = tg.dataset.sel( time=slice(obs_time - np.timedelta64(winsize, "h"), obs_time + np.timedelta64(winsize, "h")) )
#if HLW == "LW":
# print(f"win.dataset {win.dataset}")
#print(i," win.dataset.time.size", win.dataset.time.size)
if win.dataset.time.size == 0:
tg_HLW = GAUGE()
tg_HLW.dataset = xr.Dataset({measure_var: (time_var, [np.NaN])}, coords={time_var: [obs_time]})
else:
if HLW == "FW" or HLW == "EW":
tg_HLW = win.find_flood_and_ebb_water(var_str='sea_level',method='cubic')
#print(f"inflection point time: {tg_HLW.dataset[time_var]}")
print(f"inflection points: {len(tg_HLW.dataset[time_var])}")
elif HLW == "HW" or HLW == "LW":
tg_HLW = win.find_high_and_low_water(var_str='sea_level',method='cubic')
print(f"max points: {len(tg_HLW.dataset[time_var])}")
else:
print(f"This should not have happened... HLW:{HLW}")
HW = tg_HLW.get_tide_table_times(
time_guess=obs_time,
time_var=time_var,
measure_var=measure_var,
method='nearest_1',
winsize=winsize ) #4h for HW, 6h for LW
#print("time,HW:",obs_time, HW.values)
if type(HW) is xr.DataArray: ## Actually I think they are alway xr.DataArray with time, but the height can be nan.
#print(f"HW: {HW}")
HT_h.append( HW.values )
#print('len(HT_h)', len(HT_h))
HT_t.append( HW[time_var].values )
#print('len(HT_t)', len(HT_t))
#self.bore['LT_h'][i] = HLW.dataset.sea_level[HLW.dataset['sea_level'].argmin()]
#self.bore['LT_t'][i] = HLW.dataset.time[HLW.dataset['sea_level'].argmin()]
#ind.append(i)
#print(f"i:{i}, {HT_t[-1].astype('M8[ns]').astype('M8[ms]').item()}" )
#print(HT_t[-1].astype('M8[ns]').astype('M8[ms]').item().strftime('%Y-%m-%d'))
## Make timeseries plot around the highwater maxima to check
# values are being extracted as expected.
if (i % 12) == 0:
fig = plt.figure()
plt.subplot(3,4,(i%12)+1)
plt.plot(self.tg.dataset.time, self.tg.dataset.sea_level)
plt.plot( HT_t[-1], HT_h[-1], 'r+' )
plt.plot( [self.bore.time[i].values,self.bore.time[i].values],[0,11],'k')
plt.xlim([HT_t[-1] - np.timedelta64(5,'h'),
HT_t[-1] + np.timedelta64(5,'h')])
plt.ylim([0,11])
plt.text( HT_t[-1]-np.timedelta64(5,'h'),10, self.bore.location[i].values)
plt.text( HT_t[-1]-np.timedelta64(5,'h'),1, HT_t[-1].astype('M8[ns]').astype('M8[ms]').item().strftime('%Y-%m-%d'))
# Turn off tick labels
plt.gca().axes.get_xaxis().set_visible(False)
#plt.xaxis_date()
#plt.autoscale_view()
if (i%12) == 12-1:
plt.savefig('figs/check_get_tidetabletimes_'+str(i//12).zfill(2)+'_'+HLW+'_'+source+'.png')
plt.close('all')
else:
logging.info(f"Did not find a high water near this guess")
print(f"Did not find a high water near this guess")
if(0):#except:
logging.warning('Issue with appending HLW data')
print('Issue with appending HLW data')
try: # Try and print the last observation timeseries
plt.savefig('figs/check_get_tidetabletimes_'+str(i//12).zfill(2)+'_'+HLW+'_'+source+'.png')
plt.close('all')
except:
logging.info(f"Did not have any extra panels to plot")
print(f"Did not have any extra panels to plot")
# Save a xarray objects
coords = {'time': (('time'), self.bore.time.values)}
#print("number of obs:",len(self.bore.time))
#print("length of time", len(self.bore.time.values))
#print("length of data:", len(np.array(HT_h)) )
self.bore[loc+'_height_'+HLW+'_'+source] = xr.DataArray( np.array(HT_h), coords=coords, dims=['time'])
self.bore[loc+'_time_'+HLW+'_'+source] = xr.DataArray( np.array(HT_t), coords=coords, dims=['time'])
print('There is a supressed plot.scatter here')
#self.bore.plot.scatter(x='liv_time', y='liv_height'); plt.show()
logging.debug(f"len(self.bore[loc+'_time_'{HLW}'_'{source}]): {len(self.bore[loc+'_time_'+HLW+'_'+source])}")
#logging.info(f'len(self.bore.liv_time)', len(self.bore.liv_time))
logging.debug(f"type(HT_t): {type(HT_t)}")
logging.debug(f"type(HT_h): {type(HT_h)}")
if loc=='liv':
logging.debug('log time, orig tide table, new tide table lookup')
for i in range(len(self.bore.time)):
logging.debug( f"{self.bore.time[i].values}, {self.bore['Liv (Gladstone Dock) HT time (GMT)'][i].values}, {self.bore['liv_time_'+HLW+'_'+source][i].values}")
#print('log time, orig tide table, new tide table lookup')
#for i in range(len(self.bore.time)):
# print( self.bore.time[i].values, self.bore['Liv (Gladstone Dock) HT time (GMT)'][i].values, self.bore['liv_time'][i].values)
def calc_Glad_Saltney_time_lag(self, source:str="harmonic", HLW_list=["HW"]):
"""
Compute lag (obs - tide) for arrival at Saltney relative to Glastone HT
Store lags as integer (minutes) since np.datetime64 and
np.timedelta64 objects are problematic with polyfitting.
inputs:
source: 'harmonic' [default] - load HLW from harmonic prediction
'bodc' - measured and processed data
'api' - load recent, un processed data from shoothill API
HLW: [LW/HW] - the data is either processed for High or Low water events
"""
for HLW in HLW_list:
logging.info('calc_Glad_Saltney_time_diff')
nt = len(self.bore.time)
lag = (self.bore['time'].values - self.bore['liv_time_'+HLW+'_'+source].values).astype('timedelta64[m]')
# convert to integers so nans can be applied
lag = [ lag[i].astype('int') if np.isfinite(self.bore['liv_height_'+HLW+'_'+source].values)[i] else np.NaN for i in range(nt) ]
# Pick out FB and Blue bridge
Saltney_lag = [ lag[i] if self.bore.location.values[i] == 'bridge' else np.NaN for i in range(nt) ]
bluebridge_lag = [ lag[i] if self.bore.location.values[i] == 'blue bridge' else np.NaN for i in range(nt) ]
#Saltney_lag = [ lag[i].astype('int') if self.bore.location.values[i] == 'bridge' else np.NaN for i in range(nt) ]
#bluebridge_lag = [ lag[i].astype('int') if self.bore.location.values[i] == 'blue bridge' else np.NaN for i in range(nt) ]
# Save a xarray objects
coords = {'time': (('time'), self.bore.time.values)}
self.bore['lag_'+HLW+'_'+source] = xr.DataArray( lag, coords=coords, dims=['time'])
self.bore['Saltney_lag_'+HLW+'_'+source] = xr.DataArray( Saltney_lag, coords=coords, dims=['time'])
self.bore['bluebridge_lag_'+HLW+'_'+source] = xr.DataArray( bluebridge_lag, coords=coords, dims=['time'])
def linearfit(self, X, Y):
"""
Linear regression. Calculates linear fit weights and RMSE
Is used after computing the lag between Gladstone and Saltney events,
during load_and_process(), to find a fit between Liverpool heights
and Saltney arrival lag.
Returns polynomal function for linear fit that can be used:
E.g.
X=range(10)
np.poly1d(weights)( range(10) )
Also returns RMSE
"""
idx = np.isfinite(X).values & np.isfinite(Y).values
weights = np.polyfit( X[idx], Y[idx], 1)
logging.debug("weights: {weights}")
#self.linfit = np.poly1d(weights)
#self.bore['linfit_lag'] = self.linfit(X)
#self.bore.attrs['weights'] = np.poly1d(weights)
#self.bore.attrs['weights'](range(10))
Y_fit = np.poly1d(weights)(X)
rmse = '{:4.1f} mins'.format( np.sqrt(np.nanmean((Y.values - Y_fit)**2)) )
return np.poly1d(weights), rmse
############################################################################
#%% Presenting data
############################################################################
def show(self):
""" Show xarray dataset """
print( self.bore )
def plot_lag_vs_height(self, source:str="harmonic", HLW:str="HW"):
"""
Plot bore lag (obs time - Gladstone tide time) against
Gladstone extreme water water (m).
Separate colours for Saltney, Bluebridge, Chester.
inputs:
source: 'harmonic' [default] - load HLW from harmonic prediction
'harmonic_rec' - data from harmonic reconstruction
'bodc' - measured and processed data
'api' - load recent, un processed data from shoothill API
'all' - Use bodc + api data
HLW: [LW/HW] - the data is either processed for High or Low water events
"""
I = self.bore['Quality'] == "A"
if source == "all":
Yliv = self.bore['liv_height_'+HLW+'_bodc']
Xsalt = self.bore['Saltney_lag_'+HLW+'_bodc']
Xblue = self.bore['bluebridge_lag_'+HLW+'_bodc']
Yliv_api = self.bore['liv_height_'+HLW+'_api'].where( np.isnan(self.bore['liv_height_'+HLW+'_bodc']))
Xsalt_api = self.bore['Saltney_lag_'+HLW+'_api'].where( np.isnan(self.bore['liv_height_'+HLW+'_bodc']))
Xblue_api = self.bore['bluebridge_lag_'+HLW+'_api'].where( np.isnan(self.bore['liv_height_'+HLW+'_bodc']))
Xfit = self.bore['linfit_lag_'+HLW+'_bodc']
Xsalt_api_latest = Xsalt_api.where( xr.ufuncs.isfinite(Xsalt_api), drop=True)[0]
Yliv_api_latest = Yliv_api.where( xr.ufuncs.isfinite(Xsalt_api), drop=True)[0]
plt.plot( Xsalt,Yliv, 'r.', label='Saltney: rmse '+'{:4.1f}'.format(self.stats('bodc'))+'mins')
plt.plot( Xsalt[I],Yliv[I], 'k+', label='1st hand')
plt.plot( Xblue,Yliv, 'b.', label='Bluebridge')
plt.plot( Xfit,Yliv, 'k-')
plt.plot( Xsalt_api,Yliv_api, 'ro', label='Saltney API')
plt.plot( Xblue_api,Yliv_api, 'bo', label='Bluebridge API')
plt.plot( Xsalt_api_latest,Yliv_api_latest, 'go', label='Saltney latest')
plt.plot( Xsalt_api[I],Yliv_api[I], 'k+')
else:
Yliv = self.bore['liv_height_'+HLW+'_'+source]
Xsalt = self.bore['Saltney_lag_'+HLW+'_'+source]
Xblue = self.bore['bluebridge_lag_'+HLW+'_'+source]
Xfit = self.bore['linfit_lag_'+HLW+'_'+source]
plt.plot( Xsalt,Yliv, 'r.', label='Saltney: rmse '+'{:4.1f}'.format(self.stats(source,HLW))+'mins')
plt.plot( Xsalt[I],Yliv[I], 'k+', label='1st hand')
plt.plot( Xblue,Yliv, 'b.', label='Bluebridge')
plt.plot( Xfit,Yliv, 'k-')
Xsalt_latest = Xsalt.where( xr.ufuncs.isfinite(Xsalt), drop=True)[0]
Yliv_latest = Yliv.where( xr.ufuncs.isfinite(Xsalt), drop=True)[0]
# Highlight recent data
Yliv = self.bore['liv_height_'+HLW+'_'+source].where( self.bore.time > np.datetime64('2021-01-01') )
Xsalt = self.bore['Saltney_lag_'+HLW+'_'+source].where( self.bore.time > np.datetime64('2021-01-01') )
Xblue = self.bore['bluebridge_lag_'+HLW+'_'+source].where( self.bore.time > np.datetime64('2021-01-01') )
#Yliv = self.bore['liv_height_'+HLW+'_'+source].where( np.isnan(self.bore['liv_height_'+HLW+'_bodc']))
#Xsalt = self.bore['Saltney_lag_'+HLW+'_'+source].where( np.isnan(self.bore['liv_height_'+HLW+'_bodc']))
#Xblue = self.bore['bluebridge_lag_'+HLW+'_'+source].where( np.isnan(self.bore['liv_height_'+HLW+'_bodc']))
plt.plot( Xsalt,Yliv, 'ro', label='Saltney 2021')
plt.plot( Xblue,Yliv, 'bo', label='Bluebridge 2021')
plt.plot( Xsalt_latest,Yliv_latest, 'go', label='Saltney latest')
plt.plot( Xsalt[I],Yliv[I], 'k+')
#plt.plot( Xblue[0],Yliv[0], 'b+', label='Bluebridge recent')
plt.ylabel('Liv (Gladstone Dock) '+HLW+' (m)')
plt.xlabel('Arrival time (mins) relative to Liv '+HLW)
if source =='harmonic': str='tide table predicted'
if source =='harmonic_rec': str='harmonic reconstructed'
if source =='all': str='all measured'
if source =='bodc': str='measured only QCd'
if source == 'api': str='measured w/o QC'
plt.title(f"Bore arrival time at <NAME> ({str} data)")
#plt.xlim([-125, -40]) # minutes
#plt.ylim([8.2, 10.9]) # metres
plt.legend()
#plt.show()
plt.savefig('figs/SaltneyArrivalLag_vs_LivHeight_'+HLW+'_'+source+'.png')
def plot_surge_effect(self, source:str='bodc', HLW:str="HW"):
"""
Compare harmonic predicted HLW+lag with measured HLW+lag
Plot quiver between harmonic and measured values.
NB should probably have linfit predicted lag instead of
Saltney_lag_*_harmonic for the predicted value.
inputs:
source:
'bodc' - measured and processed data
'api' - load recent, un processed data from shoothill API
HLW: [LW/HW] - the data is either processed for High or Low water events
"""
# Example plot
from matplotlib.collections import LineCollection
from matplotlib import colors as mcolors
import matplotlib.dates as mdates
if source=='api':
last_bodc_time = self.bore['liv_time_'+HLW+'_bodc']\
.where(np.isfinite(self.bore['liv_height_'+HLW+'_bodc'].values))\
.dropna('time')\
.max().values
I = self.bore['liv_time_'+HLW+'_api'] > last_bodc_time + np.timedelta64(1,'D') #np.datetime64('2020-09-01')
nval = sum(I).values
else:
nval = min( len(self.bore['linfit_lag_'+HLW+'_harmonic']), len(self.bore['linfit_lag_'+HLW+'_bodc']) )
I = np.arange(nval)
segs_h = np.zeros((nval,2,2)) # line, pointA/B, t/z
#convert dates to numbers first
segs_h[:,0,1] = self.bore['liv_height_'+HLW+'_'+source][I]
segs_h[:,1,1] = self.bore['liv_height_'+HLW+'_harmonic'][I]
segs_h[:,0,0] = self.bore['Saltney_lag_'+HLW+'_'+source][I]
segs_h[:,1,0] = self.bore['Saltney_lag_'+HLW+'_harmonic'][I]
if source=='api':
print('liv_height_'+HLW+'_'+source, segs_h[:,0,1])
print('liv_height_'+HLW+'_harmonic', segs_h[:,1,1])
print('Saltney_lag_'+HLW+'_'+source, segs_h[:,0,0])
print('Saltney_lag_'+HLW+'_harmonic', segs_h[:,1,0])
II = self.bore['Quality'][I] == "A"
#segs_h[:,0,0] = self.bore.liv_height_bodc[:nval]
#segs_h[:,1,0] = self.bore.liv_height_harmonic[:nval]
#segs_h[:,0,1] = self.bore.Saltney_lag_bodc[:nval]
#segs_h[:,1,1] = self.bore.Saltney_lag_harmonic[:nval]
fig, ax = plt.subplots()
ax.set_ylim(np.nanmin(segs_h[:,:,1]), np.nanmax(segs_h[:,:,1]))
line_segments_HW = LineCollection(segs_h, cmap='plasma', linewidth=1)
ax.add_collection(line_segments_HW)
ax.scatter(segs_h[:,1,0],segs_h[:,1,1], c='red', s=4, label='predicted') # harmonic predictions
ax.scatter(segs_h[:,0,0],segs_h[:,0,1], c='green', s=4, label='measured') # harmonic predictions
ax.scatter(segs_h[II,0,0],segs_h[II,0,1], c='green', s=16) # 1st hand
ax.set_title('Harmonic prediction with quiver to measured high waters')
plt.ylabel('Liv (Gladstone Dock) '+HLW+' (m)')
plt.xlabel('Arrival time (mins relative to LiV '+HLW+')')
plt.title('Bore arrival time at Saltney Ferry. Harmonic prediction cf measured')
plt.legend()
#plt.xlim([-125, -40]) # minutes
#plt.ylim([8.2, 10.9]) # metres
plt.savefig('figs/SaltneyArrivalLag_vs_LivHeight_shift_'+HLW+'_'+source+'.png')
plt.close('all')
def plot_scatter_river(self, source:str='bodc', HLW:str="HW"):
"""
"""
plt.close('all')
fig = plt.figure(figsize=(8, 6), dpi=120)
if HLW=="dLW":
X = self.bore['Saltney_lag_LW_'+source]
Y = self.bore['liv_height_HW_'+source] - self.bore['liv_height_LW_'+source]
elif HLW=="dHW":
X = self.bore['Saltney_lag_HW_'+source]
Y = self.bore['liv_height_HW_'+source] - self.bore['liv_height_LW_'+source]
elif HLW=="XX":
X = self.bore['Saltney_lag_HW_'+source]
Y = self.bore['liv_height_LW_'+source]
else:
X = self.bore['Saltney_lag_'+HLW+'_'+source]
Y = self.bore['liv_height_'+HLW+'_'+source]
S = [40 if self.bore['Quality'][i] == "A" else 5 for i in range(len(self.bore['Quality']))]
lab = [ self.bore.time[i].values.astype('datetime64[D]').astype(object).strftime('%d%b%y') if self.bore['Quality'][i] == "A" else "" for i in range(len(self.bore['Quality']))]
ss= plt.scatter( X, Y, \
c=self.bore['ctr_height_LW'],
s=S,
#cmap='magma',
cmap='jet',
vmin=4.4,
vmax=5.5, # 4.6
label="RMSE:"+self.bore.attrs['rmse_'+HLW+'_'+source]
)
cbar = plt.colorbar(ss)
for ind in range(len(self.bore['Quality'])):
# zip joins x and y coordinates in pairs
plt.annotate(lab[ind], # this is the text
(X[ind],Y[ind]), # this is the point to label
textcoords="offset points", # how to position the text
xytext=(0,6), # distance from text to points (x,y)
ha='center', # horizontal alignment can be left, right or center
fontsize=4)
plt.legend()
# Linear fit
#x = self.df['Liv (Gladstone Dock) HT height (m)']
#plt.plot( x, self.df['linfit_lag'], '-' )
cbar.set_label('River height (m)')
plt.title('Bore arrival time at Saltney Ferry')
plt.xlabel('Arrival time (mins) relative to Liv '+HLW)
plt.ylabel('Liv (Gladstone Dock) '+HLW+' height (m)')
plt.savefig('figs/SaltneyArrivalLag_vs_LivHeight_river_'+HLW+'_'+source+'.png')
############################################################################
#%% DIAGNOSTICS
############################################################################
def predict_bore(self, source:str='harmonic', HLW:str="HW"):
"""
Predict the bore timing at Saltney for a request input date (given in
days relative to now).
Implements a linear fit model to predicted tides.
Can select which linear fit model (weights) to use with by specifying
'source' and 'HLW'
INPUTS: which define the weights used.
-------
source: 'harmonic' [default] - from harmonic prediction
'bodc' - from measured and processed data
'api' - from recent, un processed data from shoothill API
HLW: [LW/HW] - processed from either High or Low water events
Requested parameters
--------------------
day : day
DESCRIPTION.
"""
print('Predict bore event for date')
filnam = '/Users/jeff/GitHub/DeeBore/data/Liverpool_2021_2022_HLW.txt'
nd = input('Make predictions for N days from hence (int):?')
day = np.datetime64('now', 'D') + np.timedelta64(int(nd), 'D')
dayp1 = day + np.timedelta64(24, 'h')
if(1): # np.datetime64('now', 'Y') < np.datetime64('2021'): # year 2020
print("predict_bore(): should check is table data is available. If not use harm reconstructed data")
tg = GAUGE()
tg.dataset = tg.read_hlw_to_xarray(filnam, day, dayp1)
HT = tg.dataset['sea_level'].where(tg.dataset['sea_level']\
.values > 7).dropna('time') #, drop=True)
else: # year 2021 (no tide table data)
source = 'harmonic_rec'
print('source=',source)
tg = GAUGE()
tg_tmp = GAUGE()
tg_tmp.dataset = tg_tmp.anyTide_to_xarray(date_start=day, date_end=dayp1)
tg = tg_tmp.find_high_and_low_water(var_str='sea_level')
#tg.dataset = tg.get_Glad_data(source='harmonic_rec',date_start=day, date_end=dayp1)
HT = tg.dataset['sea_level_highs'].where(tg.dataset['sea_level_highs']\
.values > 7).dropna('time_highs')\
.rename({'time_highs':'time'})
#plt.plot( HT.time, HT,'.' );plt.show()
#lag_pred = self.linfit(HT)
lag_pred = self.bore.attrs['weights_'+HLW+'_'+source](HT)
#lag_pred = lag_pred[np.isfinite(lag_pred)] # drop nans
Saltney_time_pred = [HT.time[i].values
+ np.timedelta64(int(round(lag_pred[i])), 'm')
for i in range(len(lag_pred))]
# Iterate over high tide events to print useful information
print(f"Predictions based on fit to {source} {HLW} data")
for i in range(len(lag_pred)):
#print( "Gladstone HT", np.datetime_as_string(HT.time[i], unit='m',timezone=pytz.timezone('UTC')),"(GMT). Height: {:.2f} m".format( HT.values[i]))
#print(" Saltney arrival", np.datetime_as_string(Saltney_time_pred[i], unit='m', timezone=pytz.timezone('Europe/London')),"(GMT/BST). Lag: {:.0f} mins".format( lag_pred[i] ))
print("Predictions for ", day_of_week(Saltney_time_pred[i]), Saltney_time_pred[i].astype('datetime64[s]').astype(datetime.datetime).strftime('%Y/%m/%d') )
print("Saltney FB:", np.datetime_as_string(Saltney_time_pred[i], unit='m', timezone=pytz.timezone('Europe/London')) )
try:
Glad_HLW = tg.get_tide_table_times( Saltney_time_pred[i], method='nearest_2' )
# Extract the High Tide value
print('Liv HT: ', np.datetime_as_string(Glad_HLW[ np.argmax(Glad_HLW.values) ].time.values, unit='m', timezone=pytz.timezone('Europe/London')), Glad_HLW[ np.argmax(Glad_HLW.values) ].values, 'm' )
# Extract the Low Tide value
print('Liv LT: ', np.datetime_as_string(Glad_HLW[ np.argmin(Glad_HLW.values) ].time.values, unit='m', timezone=pytz.timezone('Europe/London')), Glad_HLW[ np.argmin(Glad_HLW.values) ].values, 'm' )
except:
pass
print("")
#plt.scatter( Saltney_time_pred, HT ,'.');plt.show()
# problem with time stamp
def stats(self, source:str='harmonic', HLW:str="HW"):
"""
root mean square error
"""
rmse = np.sqrt(np.nanmean((self.bore['Saltney_lag_'+HLW+'_'+source].values - self.bore['linfit_lag_'+HLW+'_'+source].values)**2))
print(f"{source}: Root mean square error = {rmse}")
return rmse
############################################################################
#%% SECTION
############################################################################
def load_timeseries(self):
fn_tidegauge = '../COAsT/example_files/tide_gauges/lowestoft-p024-uk-bodc'
date0 = datetime.datetime(2007,1,10)
date1 = datetime.datetime(2007,1,12)
tidegauge = GAUGE(fn_tidegauge, date_start = date0, date_end = date1)
print(tidegauge.dataset)
############################################################################
#%% Development / Misc methods
############################################################################
def load_and_plot_hlw_data(self):
""" Simply load HLW file and plot """
filnam = 'data/Liverpool_2015_2020_HLW.txt'
date_start = datetime.datetime(2020, 1, 1)
date_end = datetime.datetime(2020, 12, 31)
tg = GAUGE()
tg.dataset = tg.read_hlw_to_xarray(filnam, date_start, date_end)
# Exaple plot
plt.figure()
tg.dataset.plot.scatter(x="time", y="sea_level")
plt.savefig('figs/Liverpool_HLW.png')
plt.close('all')
print(f"stats: mean {tg.time_mean('sea_level')}")
print(f"stats: std {tg.time_std('sea_level')}")
def shoothill(self):
"""
Extract the timeseries for a period.
Extract the extrema.
Plot timeseries. Overlay highs and lows
"""
date_start = np.datetime64('2020-09-01')
date_end = np.datetime64('2020-09-30')
# E.g Liverpool (Gladstone Dock station_id="13482", which is read by default.
# Load in data from the Shoothill API
sg = GAUGE()
sg.dataset = sg.read_shoothill_to_xarray(date_start=date_start, date_end=date_end)
#sg = GAUGE(startday=date_start, endday=date_end) # create modified Tidegauge object
sg_HLW = sg.find_high_and_low_water(var_str='sea_level', method='cubic')
#g.dataset
#g_HLW.dataset
plt.figure()
sg.dataset.plot.scatter(x="time", y="sea_level")
sg_HLW.dataset.plot.scatter(x="time_highs", y="sea_level_highs")
sg_HLW.dataset.plot.scatter(x="time_lows", y="sea_level_lows")
plt.savefig('figs/Liverpool_shoothill.png')
plt.close('all')
"""
Compare harmonic predicted highs with measured highs
"""
# Compare tide predictions with measured HLW
filnam = 'data/Liverpool_2015_2020_HLW.txt'
tg = GAUGE()
tg.dataset = tg.read_hlw_to_xarray(filnam, date_start, date_end)
tg_HLW = tg.find_high_and_low_water(var_str='sea_level')
sg = GAUGE()
sg.dataset = sg.read_shoothill_to_xarray(date_start=date_start, date_end=date_end)
sg_HW = sg.find_nearby_high_and_low_water(var_str='sea_level', target_times=tg_HLW.dataset.time_highs, method='cubic', extrema="max")
# Example plot
from matplotlib.collections import LineCollection
from matplotlib import colors as mcolors
import matplotlib.dates as mdates
nval = min( len(sg_HLW.dataset.time_highs), len(tg_HLW.dataset.time_highs) )
segs_h = np.zeros((nval,2,2)) # line, pointA/B, t/z
#convert dates to numbers first
segs_h[:,0,0] = mdates.date2num( tg_HLW.dataset.time_highs[:nval].astype('M8[ns]').astype('M8[ms]') )
segs_h[:,1,0] = mdates.date2num( sg_HW.dataset.time_highs[:nval].astype('M8[ns]').astype('M8[ms]') )
segs_h[:,0,1] = tg_HLW.dataset.sea_level_highs[:nval]
segs_h[:,1,1] = sg_HW.dataset.sea_level_highs[:nval]
fig, ax = plt.subplots()
ax.set_ylim(segs_h[:,:,1].min(), segs_h[:,:,1].max())
line_segments_HW = LineCollection(segs_h, cmap='plasma', linewidth=1)
ax.add_collection(line_segments_HW)
ax.scatter(segs_h[:,0,0],segs_h[:,0,1], c='green', s=2) # harmonic predictions
ax.set_title('Harmonic prediction with quiver to measured high waters')
ax.xaxis_date()
ax.autoscale_view()
plt.savefig('figs/Liverpool_shoothill_vs_table.png')
plt.close('all')
"""
Compare QC's BODC measured highs with API highs (check reference levels)
"""
bg=GAUGE()
bg.dataset = bg.read_bodc_to_xarray("data/BODC_processed/2020LIV.txt")
# Use QC to drop null values
bg.dataset['sea_level'] = bg.dataset.sea_level.where( bg.dataset.qc_flags!='N', drop=True)
# Trim dataset
bg.dataset = bg.dataset.sel(time=slice(date_start, date_end))
# Fix some attributes (others might not be correct for all data)
bg.dataset['start_date'] = bg.dataset.time.min().values
bg.dataset['end_date'] = bg.dataset.time.max().values
# This produces an xr.dataset with sea_level_highs and sea_level_lows
# with time variables time_highs and time_lows.
bg_HW = bg.find_nearby_high_and_low_water(var_str='sea_level', target_times=tg_HLW.dataset.time_highs, method='cubic', extrema="max")
#bg_HLW = bg.find_high_and_low_water(var_str='sea_level',method='cubic') #'cubic')
nval = min( len(sg_HW.dataset.time_highs), len(bg_HW.dataset.time_highs) )
segs_h = np.zeros((nval,2,2)) # line, pointA/B, t/z
#convert dates to numbers first
segs_h[:,0,0] = mdates.date2num( bg_HW.dataset.time_highs[:nval].astype('M8[ns]').astype('M8[ms]') )
segs_h[:,1,0] = mdates.date2num( sg_HW.dataset.time_highs[:nval].astype('M8[ns]').astype('M8[ms]') )
segs_h[:,0,1] = bg_HW.dataset.sea_level_highs[:nval]
segs_h[:,1,1] = sg_HW.dataset.sea_level_highs[:nval]
fig, ax = plt.subplots()
ax.set_ylim(segs_h[:,:,1].min(), segs_h[:,:,1].max())
line_segments_HW = LineCollection(segs_h, cmap='plasma', linewidth=1)
ax.add_collection(line_segments_HW)
ax.scatter(segs_h[:,0,0],segs_h[:,0,1], c='green', s=2) # harmonic predictions
ax.set_title('BODC QCd quiver to API measured high waters')
ax.xaxis_date()
ax.autoscale_view()
plt.savefig('figs/Liverpool_shoothill_vs_bodc.png')
plt.close('all')
def fits_to_data(self, source:str="bodc", qc_flag:bool=False):
"""
Explore different combinations of HW and LW times and heights to
find the best fit to the data
qc_flag: if True, only fit bore['Quality'] == "A" data, else fit all data
"""
args_list = []
self.bore.attrs['weights_HW_'+source] = []
self.bore.attrs['rmse_HW_'+source] = []
args_list.append( {"HLW":"HW",
"source":source,
'xvar':self.bore['liv_height_HW_'+source],
'yvar':self.bore['Saltney_lag_HW_'+source],
'label':'height(HW), time(HW)',
'wvar':'weights_HW'+'_'+source,
'rvar':'rmse_HW'+'_'+source}
)
self.bore.attrs['weights_dHW_'+source] = []
self.bore.attrs['rmse_dHW_'+source] = []
args_list.append( {"HLW":"dHW",
"source":source,
'xvar':self.bore['liv_height_HW_'+source]-self.bore['liv_height_LW_'+source],
'yvar':self.bore['Saltney_lag_HW_'+source],
'label':'height(HW-LW), time(HW)',
'wvar':'weights_dHW_'+source,
'rvar':'rmse_dHW'+'_'+source}
)
self.bore.attrs['weights_dLW_'+source] = []
self.bore.attrs['rmse_dLW_'+source] = []
args_list.append( {"HLW":"dLW",
"source":source,
'xvar':self.bore['liv_height_HW_'+source]-self.bore['liv_height_LW_'+source],
'yvar':self.bore['Saltney_lag_LW_'+source],
'label':'height(HW-LW), time(LW)',
'wvar':'weights_dLW'+'_'+source,
'rvar':'rmse_dLW'+'_'+source}
)
self.bore.attrs['weights_LW_'+source] = []
self.bore.attrs['rmse_LW_'+source] = []
args_list.append( {"HLW":"LW",
"source":source,
'xvar':self.bore['liv_height_LW_'+source],
'yvar':self.bore['Saltney_lag_LW_'+source],
'label':'height(LW), time(LW)',
'wvar':'weights_LW'+'_'+source,
'rvar':'rmse_LW'+'_'+source}
)
#self.bore.attrs['weights_XX_'+source] = []
#self.bore.attrs['rmse_XX_'+source] = []
args_list.append( {"HLW":"XX",
"source":source,
'xvar':self.bore['liv_height_LW_'+source],
'yvar':self.bore['Saltney_lag_HW_'+source],
'label':'height(LW), time(HW)',
'wvar':'weights_XX'+'_'+source,
'rvar':'rmse_XX'+'_'+source}
)
for args in args_list:
self.bore.attrs[args['wvar']] = []
self.bore.attrs[args['rvar']] = []
if qc_flag:
weights,rmse = self.linearfit( args['xvar'].where( self.bore['Quality'].values=="A"),
args['yvar'].where( self.bore['Quality'].values=="A" ) )
print(f"{source} class A| {args['label']}: {rmse}")
self.bore.attrs[args['wvar']] = weights
self.bore.attrs[args['rvar']] = rmse
else:
weights,rmse = self.linearfit( args['xvar'], args['yvar'] )
print(f"{source}| {args['label']}: {rmse}")
self.bore.attrs[args['wvar']] = weights
self.bore.attrs[args['rvar']] = rmse
###
def combinations_lag_hlw_river(self):
"""
Plot different combinations of Lag,HLW w/ rivers
"""
self.plot_scatter_river(source='harmonic', HLW="HW")
self.plot_scatter_river(source='bodc', HLW="HW")
self.plot_scatter_river(source='bodc', HLW="LW")
self.plot_scatter_river(source='bodc', HLW="dLW")
self.plot_scatter_river(source='bodc', HLW="dHW")
self.plot_scatter_river(source='bodc', HLW="XX")
self.plot_scatter_river(source='bodc', HLW="FW")
self.plot_scatter_river(source='api', HLW="HW")
self.plot_scatter_river(source='api', HLW="FW")
self.plot_scatter_date(source='api', HLW="HW")
self.plot_scatter_date(source='bodc', HLW="HW")
self.plot_scatter_date(source='bodc', HLW="FW")
self.plot_scatter_date(source='harmonic', HLW="HW")
self.plot_scatter_wind(source='api', HLW="HW")
self.plot_scatter_wind(source='bodc', HLW="HW")
self.plot_scatter_wind(source='bodc', HLW="FW")
self.plot_scatter_wind(source='harmonic', HLW="HW")
def river_lag_timing(self, HLW="HW", source="api"):
"""
Explore how rivers affect bore timing
"""
plt.close('all')
fig = plt.figure(figsize=(8, 6), dpi=120)
if HLW=="dLW":
X = self.bore['Saltney_lag_LW_'+source]
Y = self.bore['liv_height_HW_'+source] - self.bore['liv_height_LW_'+source]
elif HLW=="dHW":
X = self.bore['Saltney_lag_HW_'+source]
Y = self.bore['liv_height_HW_'+source] - self.bore['liv_height_LW_'+source]
elif HLW=="XX":
X = self.bore['Saltney_lag_HW_'+source]
Y = self.bore['liv_height_LW_'+source]
else:
Y = self.bore['ctr_height_LW']
lag_pred = self.bore.attrs['weights_'+HLW+'_'+source](self.bore['liv_height_HW_'+source])
X = lag_pred - self.bore['Saltney_lag_'+HLW+'_'+source]
S = [40 if self.bore['Quality'][i] == "A" else 5 for i in range(len(self.bore['Quality']))]
lab = [ self.bore.time[i].values.astype('datetime64[D]').astype(object).strftime('%d%b%y') if self.bore['Quality'][i] == "A" else "" for i in range(len(self.bore['Quality']))]
ss= plt.scatter( X, Y, \
c=self.bore['liv_height_HW_'+source], # - self.bore['liv_height_HW_harmonic'],
s=S,
#cmap='magma',
cmap='jet',
#vmin=8.5,
#vmax=10.5,
label="RMSE:"+self.bore.attrs['rmse_'+HLW+'_'+source]
)
cbar = plt.colorbar(ss)
for ind in range(len(self.bore['Quality'])):
# zip joins x and y coordinates in pairs
plt.annotate(lab[ind], # this is the text
(X[ind],Y[ind]), # this is the point to label
textcoords="offset points", # how to position the text
xytext=(0,6), # distance from text to points (x,y)
ha='center', # horizontal alignment can be left, right or center
fontsize=4)
plt.legend()
# Linear fit
#x = self.df['Liv (Gladstone Dock) HT height (m)']
#plt.plot( x, self.df['linfit_lag'], '-' )
cbar.set_label('Liv (Gladstone Dock) '+HLW+' height (m)')
plt.title('Bore arrival time at Saltney Ferry')
plt.xlabel('Timing error (mins) on prediction relative to '+HLW)
plt.ylabel('River height (m)')
plt.savefig('figs/SaltneyArrivalLag_vs_river_LivHeight'+HLW+'_'+source+'.png')
def plot_scatter_date(self, source:str='bodc', HLW:str="HW"):
"""
"""
plt.close('all')
fig = plt.figure(figsize=(8, 6), dpi=120)
if HLW=="dLW":
X = self.bore['Saltney_lag_LW_'+source]
Y = self.bore['liv_height_HW_'+source] - self.bore['liv_height_LW_'+source]
elif HLW=="dHW":
X = self.bore['Saltney_lag_HW_'+source]
Y = self.bore['liv_height_HW_'+source] - self.bore['liv_height_LW_'+source]
elif HLW=="XX":
X = self.bore['Saltney_lag_HW_'+source]
Y = self.bore['liv_height_LW_'+source]
else:
X = self.bore['Saltney_lag_'+HLW+'_'+source]
Y = self.bore['liv_height_'+HLW+'_'+source]
S = [40 if self.bore['Quality'][i] == "A" else 10 for i in range(len(self.bore['Quality']))]
lab = [ self.bore.time[i].values.astype('datetime64[D]').astype(object).strftime('%b%y') for i in range(len(self.bore['Quality']))]
ss= plt.scatter( X, Y, \
c=self.bore.time, #self.bore['ctr_height_LW'],
s=S,
#cmap='magma',
cmap='jet',
#vmin=4.4,
#vmax=5.5, # 4.6
label="RMSE:"+self.bore.attrs['rmse_'+HLW+'_'+source]
)
cbar = plt.colorbar(ss)
for ind in range(len(self.bore['Quality'])):
# zip joins x and y coordinates in pairs
plt.annotate(lab[ind], # this is the text
(X[ind],Y[ind]), # this is the point to label
textcoords="offset points", # how to position the text
xytext=(0,6), # distance from text to points (x,y)
ha='center', # horizontal alignment can be left, right or center
fontsize=4)
plt.legend()
# Linear fit
#x = self.df['Liv (Gladstone Dock) HT height (m)']
#plt.plot( x, self.df['linfit_lag'], '-' )
cbar.set_label('Date')
plt.title('Bore arrival time at <NAME>')
plt.xlabel('Arrival time (mins) relative to Liv '+HLW)
plt.ylabel('Liv (Gladstone Dock) '+HLW+' height (m)')
plt.savefig('figs/SaltneyArrivalLag_vs_LivHeight_date_'+HLW+'_'+source+'.png')
def plot_scatter_wind(self, source:str='bodc', HLW:str="HW"):
"""
dir: str [along/across]. PLot either the along or across estuary wind speed
"""
for dirn in ["along", "across"]:
plt.close('all')
fig = plt.figure(figsize=(8, 6), dpi=120)
if HLW=="dLW":
X = self.bore['Saltney_lag_LW_'+source]
Y = self.bore['liv_height_HW_'+source] - self.bore['liv_height_LW_'+source]
elif HLW=="dHW":
X = self.bore['Saltney_lag_HW_'+source]
Y = self.bore['liv_height_HW_'+source] - self.bore['liv_height_LW_'+source]
elif HLW=="XX":
X = self.bore['Saltney_lag_HW_'+source]
Y = self.bore['liv_height_LW_'+source]
else:
X = self.bore['Saltney_lag_'+HLW+'_'+source]
Y = self.bore['liv_height_'+HLW+'_'+source]
S = [40 if self.bore['Quality'][i] == "A" else 10 for i in range(len(self.bore['Quality']))]
lab = [ self.bore.time[i].values.astype('datetime64[D]').astype(object).strftime('%b%y') for i in range(len(self.bore['Quality']))]
if dirn == "along":
spd = self.bore.wind_speed * np.cos((315 - self.bore.wind_deg)*np.pi/180.)
elif dirn == "across":
spd = self.bore.wind_speed * np.sin((315 - self.bore.wind_deg)*np.pi/180.)
else:
print(f"{dirn}: did not expect that direction option")
ss= plt.scatter( X, Y, \
c=spd, #self.bore['ctr_height_LW'],
s=S,
cmap='Spectral',
vmin=-7,
vmax=7, # 4.6
label="RMSE:"+self.bore.attrs['rmse_'+HLW+'_'+source]
)
cbar = plt.colorbar(ss)
for ind in range(len(self.bore['Quality'])):
# zip joins x and y coordinates in pairs
plt.annotate(lab[ind], # this is the text
(X[ind],Y[ind]), # this is the point to label
textcoords="offset points", # how to position the text
xytext=(0,6), # distance from text to points (x,y)
ha='center', # horizontal alignment can be left, right or center
fontsize=4)
plt.legend()
# Linear fit
#x = self.df['Liv (Gladstone Dock) HT height (m)']
#plt.plot( x, self.df['linfit_lag'], '-' )
cbar.set_label(dirn+' estuary wind (m/s), from Hawarden/Connahs Quay')
plt.title('Bore arrival time at Saltney Ferry')
plt.xlabel('Arrival time (mins) relative to Liv '+HLW)
plt.ylabel('Liv (Gladstone Dock) '+HLW+' height (m)')
plt.savefig('figs/SaltneyArrivalLag_vs_LivHeight_'+dirn+'_wind_'+HLW+'_'+source+'.png')
################################################################################
################################################################################
#%% Main Routine
################################################################################
################################################################################
if __name__ == "__main__":
#### Initialise logging
now_str = datetime.datetime.now().strftime("%d%b%y %H:%M")
logging.info(f"-----{now_str}-----")
#### Constants
DATABUCKET_FILE = "deebore.pkl"
INSTRUCTIONS = """
Choose Action:
all load and process all data
0 load bore observations
h load and process harmonic data
hrec load and process harmonic reconstructed data
b load and process measured (bodc) data
a load and process measured (API) data
r load and process measured (API) river data
m load and process met data
2 show bore dataset
3 plot bore data (lag vs tidal height)
4 plot difference between predicted and measured (lag vs tidal height)
6 Predict bore event for date
x Export data to csv. NOT IMPLEMENTED
rm Remove pickle file
i to show these instructions
q to quit (and pickle bore)
---
DEV:
d1 load and plot HLW data
d2 shoothill dev
d3 Explore different RMSE fits to the data
d4 Plot different combinations of Lag,HLW w/ rivers
d5 Explore how rivers affect bore timing
"""
## Do the main program
c = Controller()
```
#### File: DeeBore/utils/archive_NRW_csv.py
```python
import datetime
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
import pandas as pd
import xarray as xr
from shoothill_api.shoothill_api import GAUGE
class NRWGauge:
"""
"""
def __init__(self):
self.dataset = None
#%% Load method
@classmethod
def read_nrw_to_xarray(cls, fn_nrw, date_start=None, date_end=None):
"""
For reading from a single NRW csv file into an
xarray dataset.
If no data lies between the specified dates, a dataset is still created
containing information on the gauge, but the time dimension will
be empty.
The data takes the form:
Station Site: Northern [CY]
Station Name: <NAME>
Station Number: 067020
LocalX: ---
LocalY: ---
Datum: ---
Parameter Name: SG [Stage]
Parameter Type: S
Parameter Type Name: Stage
Time series Name: 067020.SG.15.P
Time series Unit: m
GlobalX: 340846
GlobalY: 365842
Longitude: -2.88535
Latitude: 53.186092
Date,Time,Value[m],State of value,Interpolation,Absolute value[m],State of absolute value,Interpolation of absolute value,Tags,Comments
01/01/2021,09:00:00,4.800,G,102,---,255,101
01/01/2021,09:15:00,4.800,G,102,---,255,101
01/01/2021,09:30:00,4.800,G,102,---,255,101
01/01/2021,09:45:00,4.799,G,102,---,255,101
...
Parameters
----------
fn_nrw (str) : path to NRW gauge file
date_start (datetime) : start date for returning data
date_end (datetime) : end date for returning data
Returns
-------
xarray.Dataset object.
"""
#debug(f'Reading "{fn_nrw}" as a NRW file with {get_slug(cls)}') # TODO Maybe include start/end dates
try:
header_dict = cls.read_nrw_header(fn_nrw)
dataset = cls.read_nrw_data(fn_nrw, date_start, date_end)
except:
raise Exception("Problem reading NRW file: " + fn_nrw)
# Attributes
dataset["longitude"] = float(header_dict["Longitude"])
dataset["latitude"] = float(header_dict["Latitude"])
dataset["site_name"] = header_dict["Station Name"]
del header_dict["Longitude"]
del header_dict["Latitude"]
del header_dict["Station Name"]
dataset.attrs = header_dict
return dataset
@classmethod
def read_nrw_header(cls, filnam):
"""
Reads header from a NRW csv file.
Parameters
----------
filnam (str) : path to file
Returns
-------
dictionary of attributes
"""
#debug(f'Reading NRW header from "{filnam}" ')
my_dict = pd.read_csv(filnam, nrows=15, delimiter=':', header=None, index_col=0).to_dict()
# Strip out special characters and remove nesting from dict
header_dict = {key.strip(): item.strip() for key, item in my_dict[1].items()} # my_dict has a nested dict. Want the nest.
return header_dict
@classmethod
def read_nrw_data(cls, filnam, date_start=None, date_end=None, header_length: int = 14):
"""
Reads NRW data from a csv file.
Parameters
----------
filnam (str) : path to HLW tide gauge file
date_start (np.datetime64) : start date for returning data.
date_end (np.datetime64) : end date for returning data.
header_length (int) : number of lines in header (to skip when reading).
Not including column titles
Returns
-------
xarray.Dataset containing times, water level values, other data
"""
import datetime
# Initialise empty dataset and lists
#debug(f'Reading NRW data from "{filnam}"')
dataset = xr.Dataset()
#time = []
#sea_level = []
data = pd.read_csv(filnam, parse_dates=[['Date', 'Time']], dayfirst=True, skiprows=header_length, delimiter=',', header=1, index_col=0)
my_dict = {data.filter(regex='Value*').columns[0]: 'sea_level'} # Captures different string endings
data.rename(columns=my_dict, inplace=True)
data.index.names=['time']
dataset = data.to_xarray()
if date_start != None:
dataset = dataset.where(dataset.time >= date_start)
if date_end != None:
dataset = dataset.where(dataset.time <= date_end)
# Assign local dataset to object-scope dataset
return dataset
#%% Save method
def save_method(loc, ofile=None):
"""
Parameters
----------
loc : STR
variable name used.
ofile : STR
filename head if different from "loc_year"
Returns
-------
None.
"""
# Check the exported file is as you expect.
# Load file as see that the xarray structure is preserved.
ofile = "../archive_shoothill/" + ofile + ".nc"
try:
object = xr.open_dataset(ofile)
object.close() # close file associated with this object
file_flag = True
loc.dataset = xr.concat([loc.dataset, object], "time").compute()
#os.remove(ofile)
print('loaded old file')
except:
print(f'{ofile} does not exist or does not load. Write a fresh file')
file_flag = False
try:
loc.dataset.to_netcdf( ofile, mode="w", format="NETCDF4" )
print(f'{ofile} saved.')
except:
print(f'{ofile} would not save. Create _2.nc file')
loc.dataset.to_netcdf( ofile.replace('.nc','_2.nc'), mode="w", format="NETCDF4" )
#%% plot functions
def line_plot(ax, time, y, color, size, label=None ):
ax.plot(time, y, color=color, linewidth=size, label=label)
return ax
def scatter_plot(ax, time, y, color, size, label=None ):
ax.scatter(time, y, color=color, s=size, label=label)
return ax
#%%
#%% Load and export NRW csv files to xarray and netcdf
######################################################
dir = "data/ATI 22356a - River level & flow data at Chester on the river Dee/"
try:
fn_nrw = dir + "067020.SG[Stage].15min.csv"
ctr067020SG = NRWGauge()
ctr067020SG.dataset = ctr067020SG.read_nrw_to_xarray(fn_nrw, date_start=None, date_end=None)
save_method(ctr067020SG, ofile="ctr067020SG_2021")
#liv.plot_timeseries()
except:
print('failed for ctr067020SG')
try:
fn_nrw = dir + "067033.SG[Stage].15min.csv"
ctr067033SG = NRWGauge()
ctr067033SG.dataset = ctr067033SG.read_nrw_to_xarray(fn_nrw, date_start=None, date_end=None)
save_method(ctr067033SG, ofile="ctr067033SG_2021")
#liv.plot_timeseries()
except:
print('failed for ctr067033SG')
try:
fn_nrw = dir + "067033.FL[FlowLogged].15min.csv"
ctr067033FL = NRWGauge()
ctr067033FL.dataset = ctr067033FL.read_nrw_to_xarray(fn_nrw, date_start=None, date_end=None)
save_method(ctr067033FL, ofile="ctr067033FL_2021")
#liv.plot_timeseries()
except:
print('failed for ctr067033FL')
#%% Load from csv and plot
######################################################
if(1):
date_end = np.datetime64('2021-11-08T23:59:59')
date_start = np.datetime64('2021-11-08')
fn_nrw = dir + "067033.SG[Stage].15min.csv"
ctr = NRWGauge()
ctr.dataset = ctr.read_nrw_to_xarray(fn_nrw, date_start=date_start, date_end=date_end)
fn_nrw = dir + "067020.SG[Stage].15min.csv"
ctr2 = NRWGauge()
ctr2.dataset = ctr2.read_nrw_to_xarray(fn_nrw, date_start=date_start, date_end=date_end)
fn_nrw = dir + "067033.FL[FlowLogged].15min.csv"
ctrf = NRWGauge()
ctrf.dataset = ctrf.read_nrw_to_xarray(fn_nrw, date_start=date_start, date_end=date_end)
#%% Plot data
# CTR height + flow
line_flag = True
today_only_flag = True
try: date_start
except NameError: date_start = np.datetime64(ctr2.dataset.time[0].values,'ms')
try: date_end
except NameError: date_end = np.datetime64(ct2.dataset.time[-1].values, 'ms')
plt.close('all')
fig, ax1 = plt.subplots(1, sharex=True)
## Only get tides over the weir with 8.75m at Liverpool
fig.suptitle('Dee River heights and flow')
#ax1.scatter(liv.dataset.time, liv.dataset.sea_level, color='k', s=1, label=liv.dataset.site_name)
ax1 = scatter_plot(ax1, ctr.dataset.time, ctr.dataset.sea_level, 'k', 1, ctr.dataset.site_name.values)
if line_flag:
ax1 = line_plot(ax1, ctr.dataset.time, ctr.dataset.sea_level, 'k', 1)
ax1 = scatter_plot(ax1, ctr2.dataset.time, ctr2.dataset.sea_level, 'b', 1, ctr2.dataset.site_name.values)
if line_flag:
ax1 = line_plot(ax1, ctr2.dataset.time, ctr2.dataset.sea_level, 'b', 1)
ax1b = ax1.twinx()
ax1b = scatter_plot(ax1b, ctrf.dataset.time, ctrf.dataset.sea_level, 'g', 1, ctrf.dataset.site_name.values)
if line_flag:
ax1b = line_plot(ax1b, ctrf.dataset.time, ctrf.dataset.sea_level, 'g', 1)
ax1b.set_ylabel('flow rate (m3/s)', color='g')
for tl in ax1b.get_yticklabels():
tl.set_color('g')
ax1.set_ylabel('water level (m)')
myFmt = mdates.DateFormatter('%H:%M') #('%d-%a')
ax1.xaxis.set_major_formatter(myFmt)
#ax1.set_xlabel( date_start.astype(datetime.datetime).strftime('%d%b%y') + \
# '-' + date_end.astype(datetime.datetime).strftime('%d%b%y') )
ax1.set_xlabel(date_end.astype(datetime.datetime).strftime('%d%b%y'))
# Add empty data to ax1 to get "green flow data" in the legend
ax1 = scatter_plot(ax1, [], [], 'g', 1, "Flow, above weir")
# plot the legend
ax1.legend(markerscale=6, loc='lower left')
plt.savefig('Chester_river_NRW_levels.png')
```
#### File: DeeBore/utils/CTR_tide_times.py
```python
import os
import sys
import datetime
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import xarray as xr
import sklearn.metrics as metrics
import pytz
import pickle
import scipy.signal # find_peaks
#from coast.tidegauge import Tidegauge
from shoothill_api.shoothill_api import GAUGE
from coast.general_utils import day_of_week
from coast.stats_util import find_maxima
import logging
logging.basicConfig(filename='ctr.log', filemode='w+')
logging.getLogger().setLevel(logging.DEBUG)
from deebore import GAUGE
from deebore import Controller
#%% ################################################################################
class Databucket():
"""
This is where the main things happen.
Where user input is managed and methods are launched
"""
############################################################################
#%% Initialising and Finishing methods
############################################################################
def __init__(self):
pass
def process(self, tg:GAUGE=None, HLW:str="HW"):
"""
Save into an dataset which is indexed against tide table HW times.
tg: dataset to process. E.g. full timeseries from chester
return xr.DataSet of tide events and variables indexed by Liv HT time
"""
loc = "ctr"
HLW = "HW"
print(f"loc: {loc} {HLW}")
tt = GAUGE()
print( tg.dataset.time.min() )
# TideTable dataset truncated to relevant period for both highs and lows
tt.dataset = self.glad_HLW.dataset.sel(
time_highs=slice(tg.dataset.time.min(), tg.dataset.time.max()),
time_lows =slice(tg.dataset.time.min(), tg.dataset.time.max()) )
if HLW == 'HW':
time_var = 'time_highs'
measure_var = 'sea_level_highs'
winsize = [3,3] #4h for HW, 6h for LW. +/- search distance for nearest extreme value
elif HLW == 'LW':
time_var = 'time_lows'
measure_var = 'sea_level_lows'
# TideTable dataset truncated to relevant period
winsize = [-3,9] #4h for HW, 6h for LW. +/- search distance for nearest extreme value
else:
print('This should not have happened...')
# Truncate tide table data is necessary, for speed
# Iterate of tide table HW times (even for LT analysis)
HT_h = [] # Extrema - height
HT_t = [] # Extrema - time
HT_lag = [] # lag between liv HT and tg_HT
LT_h = [] # Extrema low tide - height
LT_t = [] # Extrema low tide - time
LT_lag = [] # lag between Liv HT and tg_LT
ref_HT_t = [] # store index HT times. Input guess_time
ref_HT_h = [] # store index HT height. Input height(guess_time)
ref_LT_t = [] # store index LT times. Input guess_time
ref_LT_h = [] # store index LT height.
for i in range(len(tt.dataset[time_var])):
if(1):#try:
time_var = 'time_highs'
measure_var = 'sea_level_highs'
HH = None
guess_time = tt.dataset[time_var][i].values
print(f"guess: {guess_time}")
# Extracting the highest and lowest value with a cubic spline is
# very memory costly. Only need to use the cubic method for the
# bodc and api sources, so compute the high and low waters in a
# piecewise approach around observations times.
"""
INPUT:
xr.dataset of river data.
guess_time : liv_HW time
2 part window for time clipping
RETURNS:
xr.dataset single values for river HW height, time and lag, using cubic fit
xr.dataset NaN, not enough data
"""
if(1):
# This produces an xr.dataset with sea_level_highs and sea_level_lows
# with time variables time_highs and time_lows.
win = GAUGE()
win.dataset = tg.dataset.sel( time=slice(guess_time - np.timedelta64(winsize[0], "h"), guess_time + np.timedelta64(winsize[1], "h")) )
#if HLW == "LW":
# print(f"win.dataset {win.dataset}")
print(i," win.dataset.time.size", win.dataset.time.size)
if win.dataset.time.size <= 3:
tg_HW = GAUGE()
tg_HW.dataset = xr.Dataset({measure_var: (time_var, [np.NaN])}, coords={time_var: [guess_time]})
else:
if HLW == "HW" or HLW == "LW":
#win.dataset['sea_level_trend'] = win.dataset.sea_level.differentiate("time")
tg_HW = win.find_high_and_low_water(var_str='sea_level',method='cubic')
#tg_inf = win.find_high_and_low_water(var_str='sea_level_trend',method='cubic')
print(f"max points: {len(tg_HW.dataset[time_var])}")
else:
print(f"This should not have happened... HLW:{HW}")
# Save the largest
try:
#print("tg_HLW.dataset[measure_var]",i, tg_HLW.dataset[measure_var])
HH = tg_HW.dataset[measure_var][tg_HW.dataset[measure_var].argmax()]
event_time = tg_HW.dataset[time_var][tg_HW.dataset[measure_var].argmax()]
HH_lag = (event_time - guess_time).astype('timedelta64[m]')
except:
HH = xr.DataArray([np.NaN], dims=(time_var), coords={time_var: [guess_time]})[0]
HH_lag = xr.DataArray([np.datetime64('NaT').astype('timedelta64[m]')], dims=(time_var), coords={time_var: [guess_time]})[0]
""" Append HW event data [floats, np.datetime64] """
#print("time,HH,HH_lag:",i, guess_time, HH.values, HH_lag.values)
if type(HH) is xr.DataArray: ## Actually I think they are alway xr.DataArray with time, but the height can be nan.
print(f"HH: {HH}")
HT_h.append( HH.values )
#print('len(HT_h)', len(HT_h))
HT_t.append( HH[time_var].values )
HT_lag.append( HH_lag.values )
ref_HT_t.append( tt.dataset[time_var][i].values ) # guess_time
ref_HT_h.append( tt.dataset[measure_var][i].values )
##################
# Find the turning/shock point before HT.
# Remove a linear trend from HT-3 : HT. Find minimum.
"""
INPUT:
xr.dataset of river data.
guess_time : liv_HW time
2 part window for time clipping [window[0] : rivHW_t]
RETURNS:
xr.dataset single values for river LW height, time and lag, using cubic fit
xr.dataset NaN, not enough data
"""
time_var = 'time_lows'
measure_var = 'sea_level_lows'
win_mod = GAUGE()
win_mod.dataset = tg.dataset.sel( time=slice(guess_time - np.timedelta64(winsize[0], "h"), HH.time_highs.values) )
if win_mod.dataset.time.size == 0:
tg_LW = GAUGE()
tg_LW.dataset = xr.Dataset({measure_var: (time_var, [np.NaN])}, coords={time_var: [guess_time]})
else:
print(f"win_mod.dataset.time.size : {win_mod.dataset.time.size}")
nt = len(win_mod.dataset.sea_level)
y0 = win_mod.dataset.sea_level[0].values
y1 = win_mod.dataset.sea_level[-1].values
win_mod.dataset['sea_level'] = win_mod.dataset.sea_level - [(y0*(nt-1-kk) + y1*kk)/(nt-1) for kk in range(nt)]
tg_LW = win_mod.find_high_and_low_water(var_str='sea_level',method='comp')
if(0):
plt.close('all')
plt.figure()
plt.plot( win_mod.dataset.time, win_mod.dataset.sea_level, 'g.' )
plt.plot( win_mod.dataset.time, win_mod.dataset.sea_level, 'g' )
plt.plot( tg_LW.dataset.time_lows, tg_LW.dataset.sea_level_lows, 'r+')
plt.plot( tg_LW.dataset.time_lows, tg_LW.dataset.sea_level_lows, 'r')
plt.xlim([guess_time - np.timedelta64(winsize[0],'h'),
guess_time + np.timedelta64(winsize[1],'h')])
plt.show()
try:
# Find time. Interpolate time onto original timeseries
#print(f"tg_LW.dataset:{tg_LW.dataset}")
#print(f"---")
#print(f"tg_LW.dataset[measure_var].argmin():{tg_LW.dataset[measure_var].argmin().values}")
event_time = tg_LW.dataset[time_var][tg_LW.dataset[measure_var].argmin().values]
#print(f"event_time: {event_time}")
# interpolate back onto original sea_level timeseries (not needed for method="comp")
LL = win.dataset.sea_level.interp(time=event_time, method='cubic') # two coords: {time_lows, time} inherited from {event_time, win_mod.dataset}
#print(f"LL.values: {LL.values}")
#print("tg_LW.dataset[measure_var]",i, tg_LW.dataset[measure_var])
#LL = tg_HLW.dataset[measure_var][tg_inf.dataset[measure_trend_var].argmax()] # Units: (m), not (m/s)
LL_lag = (event_time - guess_time).astype('timedelta64[m]')
except:
LL = xr.DataArray([np.NaN], dims=(time_var), coords={time_var: [guess_time]})[0]
LL_lag = xr.DataArray([np.datetime64('NaT').astype('timedelta64[m]')], dims=(time_var), coords={time_var: [guess_time]})[0]
# Find the preceeding minima
""" Append LW event data, being careful to get the appropriate liv LT [floats, np.datetime64] """
#print("time,LL,LL_lag:",i, guess_time, LL.values, LL_lag.values)
if type(LL) is xr.DataArray: ## Actually I think they are alway xr.DataArray with time, but the height can be nan.
LT_h.append( LL.values )
#print('len(HT_h)', len(HT_h))
LT_t.append( LL[time_var].values )
LT_lag.append( LL_lag.values )
print(f"Check guess: {tt.dataset.time_highs[i].values}")
try: #if(1):
if (tt.dataset.time_lows[i].values < tt.dataset.time_highs[i].values) and \
(tt.dataset.time_lows[i].values > (tt.dataset.time_highs[i].values - np.timedelta64(12, 'h'))):
print('HT_t(i)-12 < LT_t(i) < HT_t(i)')
ref_LT_t.append( tt.dataset[time_var][i].values )
ref_LT_h.append( tt.dataset[measure_var][i].values )
elif (tt.dataset.time_lows[i-1].values < tt.dataset.time_highs[i].values) and \
(tt.dataset.time_lows[i-1].values > (tt.dataset.time_highs[i].values - np.timedelta64(12, 'h'))):
print('HT_t(i)-12 < LT_t(i-1) < HT_t(i)')
ref_LT_t.append( tt.dataset[time_var][i-1].values )
ref_LT_h.append( tt.dataset[measure_var][i-1].values )
elif (tt.dataset.time_lows[i+1].values < tt.dataset.time_highs[i].values) and \
(tt.dataset.time_lows[i+1].values > (tt.dataset.time_highs[i].values - np.timedelta64(12, 'h'))):
print('HT_t(i)-12 < LT_t(i+1) < HT_t(i)')
ref_LT_t.append( tt.dataset[time_var][i+1].values )
ref_LT_h.append( tt.dataset[measure_var][i+1].values )
else:
#print('LT_t(i) !< HT_t(i)')
print(f"LT:{tt.dataset.time_lows[i].values}. HT:{tt.dataset.time_highs[i].values}")
ref_LT_t.append( np.datetime64('NaT').astype('timedelta64[m]') )
ref_LT_h.append( np.nan )
except:
ref_LT_t.append( np.datetime64('NaT').astype('timedelta64[m]') )
ref_LT_h.append( np.nan )
#print('len(HT_t)', len(HT_t))
#print(f"i:{i}, {HT_t[-1].astype('M8[ns]').astype('M8[ms]').item()}" )
#print(HT_t[-1].astype('M8[ns]').astype('M8[ms]').item().strftime('%Y-%m-%d'))
## Make timeseries plot around the highwater maxima to check
# values are being extracted as expected.
if (i % 12) == 0:
fig = plt.figure()
if HLW == "HW":
xlim = [HT_t[-1] - np.timedelta64(winsize[0],'h'),
HT_t[-1] + np.timedelta64(winsize[1],'h')]
elif HLW == "LW":
xlim = [guess_time - np.timedelta64(winsize[0],'h'),
guess_time + np.timedelta64(winsize[1],'h')]
else:
print(f"Not expecting HLW:{HLW}")
if loc == 'ctr':
ylim = [2,7]
elif loc == 'liv':
ylim = [0,11]
else:
ylim = [0,11]
plt.subplot(3,4,(i%12)+1)
plt.plot(tg.dataset.time, tg.dataset.sea_level,'b')
plt.plot(tg.dataset.time, tg.dataset.sea_level,'b.')
#plt.plot(tg.dataset.time, ylim[0]+1e13*tg.dataset.sea_level.differentiate("time"),'g')
print(f"LT_h[-1]: {LT_h[-1]}")
print(f"LT_t[-1]: {LT_t[-1]}")
plt.plot( HT_t[-1], HT_h[-1], 'r+' )
plt.plot( LT_t[-1], LT_h[-1], 'g+' )
plt.plot( [guess_time, guess_time],[0,11],'k')
plt.xlim(xlim)
plt.ylim(ylim) #[0,11])
plt.text( HT_t[-1]-np.timedelta64(winsize[0],'h'),ylim[0]+ 0.05*(ylim[1]-ylim[0]), HT_t[-1].astype('M8[ns]').astype('M8[ms]').item().strftime('%Y-%m-%d'))
# Turn off tick labels
plt.gca().axes.get_xaxis().set_visible(False)
#plt.xaxis_date()
#plt.autoscale_view()
if (i%12) == 12-1:
plt.savefig('figs/LIV_CTR_get_tidetabletimes_'+str(i//12).zfill(2)+'_'+HLW+'.png')
plt.close('all')
else:
logging.info(f"Did not find a high water near this guess")
print(f"Did not find a high water near this guess")
if(0):#except:
logging.warning('Issue with appending HLW data')
print('Issue with appending HLW data')
try: # Try and print the last observation timeseries
plt.savefig('figs/LIV_CTR_get_tidetabletimes_'+str(i//12).zfill(2)+'_'+HLW+'.png')
plt.close('all')
except:
logging.info(f"Did not have any extra panels to plot")
print(f"Did not have any extra panels to plot")
# Save a xarray objects
coords = {'time': (('time'), ref_HT_t)}
#print("length of data:", len(np.array(HT_h)) )
HT_height_xr = xr.DataArray( np.array(HT_h), coords=coords, dims=['time'])
HT_time_xr = xr.DataArray( np.array(HT_t), coords=coords, dims=['time'])
HT_lag_xr = xr.DataArray( np.array(HT_lag), coords=coords, dims=['time'])
HT_ref_h_xr = xr.DataArray( np.array(ref_HT_h), coords=coords, dims=['time'])
LT_height_xr = xr.DataArray( np.array(LT_h), coords=coords, dims=['time'])
LT_time_xr = xr.DataArray( np.array(LT_t), coords=coords, dims=['time'])
LT_lag_xr = xr.DataArray( np.array(LT_lag), coords=coords, dims=['time'])
LT_ref_h_xr = xr.DataArray( np.array(ref_LT_h), coords=coords, dims=['time'])
LT_ref_t_xr = xr.DataArray( np.array(ref_LT_t), coords=coords, dims=['time'])
#logging.debug(f"len(self.bore[loc+'_time_'{HLW}]): {len(self.bore[loc+'_time_'+HLW])}")
#logging.info(f'len(self.bore.liv_time)', len(self.bore.liv_time))
logging.debug(f"type(HT_t): {type(HT_t)}")
logging.debug(f"type(HT_h): {type(HT_h)}")
#return HT_height_xr, HT_time_xr, HT_lag_xr, HT_ref_h_xr, LT_height_xr, LT_time_xr, LT_lag_xr, LT_ref_h_xr, LT_ref_t_xr
# lags are referenced to liv_HT_t, which is also the index variable
return xr.Dataset(data_vars={
"ctr_HT_h": HT_height_xr, "ctr_HT_t": HT_time_xr, "ctr_HT_dt": HT_lag_xr,
"liv_HT_h" : HT_ref_h_xr, "liv_HT_t" : HT_ref_h_xr.time,
"ctr_LT_h" : LT_height_xr, "ctr_LT_t": LT_time_xr, "ctr_LT_dt": LT_lag_xr,
"liv_LT_h" : LT_ref_h_xr, "liv_LT_t" : LT_ref_t_xr
})
def load_tidetable(self):
"""
load gladstone data
save HT values in xarray:
times_highs
sea_level_highs
"""
logging.info("Get Gladstone HLW data")
# Load tidetable data from files
filnam1 = '/Users/jeff/GitHub/DeeBore/data/Liverpool_2005_2014_HLW.txt'
filnam2 = '/Users/jeff/GitHub/DeeBore/data/Liverpool_2015_2020_HLW.txt'
filnam3 = '/Users/jeff/GitHub/DeeBore/data/Liverpool_2021_2022_HLW.txt'
tg = GAUGE()
tg1 = GAUGE()
tg2 = GAUGE()
tg3 = GAUGE()
tg1.dataset = tg1.read_hlw_to_xarray(filnam1)#, self.bore.time.min().values, self.bore.time.max().values)
tg2.dataset = tg2.read_hlw_to_xarray(filnam2)#, self.bore.time.min().values, self.bore.time.max().values)
tg3.dataset = tg3.read_hlw_to_xarray(filnam3)#, self.bore.time.min().values, self.bore.time.max().values)
tg.dataset = xr.concat([ tg1.dataset, tg2.dataset, tg3.dataset], dim='time')
# This produces an xr.dataset with sea_level_highs and sea_level_lows
# with time variables time_highs and time_lows.
self.glad_HLW = tg.find_high_and_low_water(var_str='sea_level')
def load_ctr(self):
"""
load timeseries data.
store as xr.dataArray
"""
ctr = GAUGE()
#ctr.dataset = xr.open_dataset("archive_shoothill/ctr_2021.nc")
#ctr.dataset = ctr.dataset.sel( time=slice(np.datetime64('2021-03-31T06:00:00'), np.datetime64('2021-03-31T18:00:00')) )
ctr.dataset = xr.open_mfdataset("archive_shoothill/ctr2_2020.nc")
#ctr.dataset = ctr.dataset.sel( time=slice(np.datetime64('2020-04-14T04:00:00'), np.datetime64('2020-04-16T18:00:00')) )
#ctr.dataset = ctr.dataset.sel( time=slice(np.datetime64('2020-01-01T04:00:00'), np.datetime64('2020-04-16T18:00:00')) )
#ctr.dataset = xr.open_mfdataset("archive_shoothill/ctr2_202*.nc")
#ctr_HLW = ctr.find_high_and_low_water(var_str='sea_level', method="cubic")
self.ctr = ctr
#self.ctr_HLW = ctr_HLW
def load_liv(self):
"""
load timeseries data.
store as xr.dataArray
"""
liv = GAUGE()
liv.dataset = xr.open_dataset("archive_shoothill/liv_2021.nc")
#liv.dataset = xr.open_mfdataset("archive_shoothill/liv_20*.nc")
#liv_HLW = liv.find_high_and_low_water(var_str='sea_level', method="cubic")
self.liv = liv
#self.liv_HLW = liv_HLW
class PickleJar():
""" Class to handle pickle methods """
def __init__(self, pickle_file:str=""):
print(f"pickle file: {pickle_file}")
self.pickle_file = pickle_file
pass
def load(self):
"""
Auto load databucket from pickle file if it exists.
Return:
self.dataset
self.load_pickle_flag [True/False]
"""
print("Add to pickle file, if it exists")
self.load_pickle_flag = False
self.dataset = []
try:
if os.path.exists(self.pickle_file):
template = "...Loading (%s)"
print(template%self.pickle_file)
with open(self.pickle_file, 'rb') as file_object:
self.dataset = pickle.load(file_object)
self.load_pickle_flag = True
else:
print("... %s does not exist"%pickle_file)
except KeyError:
print('ErrorA ')
except (IOError, RuntimeError):
print('ErrorB ')
def to_pickle(self):
"""
Save copy of self.dataset into pickle file, if requested
Inputs:
self.dataset [xr.dataset]
pickle_file [str]
Returns:
pkl file
"""
print('Pickle data.')
os.system('rm -f '+self.pickle_file)
try:
with open(self.pickle_file, 'wb') as file_object:
pickle.dump(self.dataset, file_object)
except:
print(f"Problem saving pickle file {self.pickle_file}")
class PostProcess():
"""
Test the hypothesis that the data can collapse to a shallow water propagation
problem, with a reference height to be determined. Ignoring effects of variable
river depth
"""
############################################################################
#%% Initialising and Finishing methods
############################################################################
def __init__(self):
pass
def ref_height_from_ds(self, ds):
""" Compute a reference height from xr.dataset
dt_LW = dt(ctr_LW_t:Glad_LW_t) = ctr_t - Glad_HW_t + Glad_HW_t - Glad_LW_t
= LT_lag + HT_ref_t - LT_ref_t
"""
dt_LW_sq = ( (ds.ctr_LT_dt + ds.liv_HT_t.time - ds.liv_LT_t)/np.timedelta64(1, 's') )**2
dt_HW_sq = ( ds.ctr_HT_dt/np.timedelta64(1, 's') )**2
den = dt_HW_sq - dt_LW_sq
a = (ds.liv_LT_h*dt_LW_sq - ds.liv_HT_h*dt_HW_sq) / den
ds['a'] = a
return ds
def ref_L_from_ds(self, ds):
""" Compute hyperthetical distance that linear wave travels, given reference height a"""
dt_HW_sq = ( ds.ctr_HT_dt/np.timedelta64(1, 's') )**2
L = np.sqrt( (ds.a + ds.liv_HT_h)*9.81 * dt_HW_sq )/1000. # in km
ds['L'] = L # in km
return ds
############################################################################
## Bespoke methods
############################################################################
def histogram_CTR_LIV_lag():
tt = Databucket()
tt.load_tidetable()
tt.load_ctr()
HLW = "HW"
ds = tt.process(tg = tt.ctr, HLW=HLW)
plt.figure()
plt.plot( ds.ctr_HT_dt / np.timedelta64(1, 'm'),ds.liv_HT_h, '+')
plt.xlim([0,100])
plt.xlabel(f"Timing CTR {HLW}, minutes after LIV")
plt.ylabel(f"Liverpool {HLW} (m)")
plt.plot([0,100],[8.05, 8.05]) # 13/10/2021 04:39 BST 8.05
plt.savefig("tt.png")
lag = ds.ctr_HT_dt.where(ds.liv_HT_h > 7.9).where(ds.liv_HT_h < 8.2) / np.timedelta64(1, 'm')
fig, ax = plt.subplots(figsize =(10, 7))
ax.hist(lag, bins = np.linspace(40,100,10))
plt.xlabel(f"Timing CTR {HLW}, minutes after LIV")
plt.ylabel('bin count. Liv HT: 7.9 - 8.2m')
plt.title(f"Histogram of CTR {HLW} timing 2020-21")
plt.savefig('hh.png')
def main1():
""" Read and process timeseries. Create xarray dataset. Export and pickly dataframe
Plot graphs """
data_bucket = Databucket()
data_bucket.load_tidetable()
data_bucket.load_ctr()
#HT_height_xr, HT_time_xr, HT_lag_xr, HT_ref_h_xr, LT_height_xr, LT_time_xr, LT_lag_xr, LT_ref_h_xr, LT_ref_t_xr = data_bucket.process(tg = tt.ctr, HLW="HW")
ds = data_bucket.process(tg = data_bucket.ctr, HLW="HW")
#data_bucket.ds = ds
#data_bucket.to_pickle()
pickle_jar = PickleJar(pickle_file="CTR_tide_times.pkl")
pickle_jar.dataset = ds
pickle_jar.to_pickle()
# Make some plots
plt.figure()
#plt.plot( tt.ctr_lag / np.timedelta64(1, 'm'), tt.liv_height-tt.ctr_height, '+')
plt.plot( ds.ctr_HT_dt / np.timedelta64(1, 'm'), ds.liv_HT_h-ds.ctr_HT_h, '+')
plt.xlim([0,100])
plt.ylim([3,5.5])
plt.xlabel('Timing CTR HT, minutes after LIV')
plt.ylabel('Liverpool-Chester HT (m)')
plt.savefig("dd.png")
plt.figure()
#plt.plot( tt.ctr_lag / np.timedelta64(1, 'm'), tt.liv_height-tt.ctr_height, '+')
plt.scatter( (ds.ctr_HT_dt - ds.ctr_LT_dt) / np.timedelta64(1, 'm'),
ds.ctr_HT_h - ds.ctr_LT_h,
c=ds.liv_HT_h, marker='+')
#plt.xlim([0,100])
#plt.ylim([3,5.5])
#legend
cbar = plt.colorbar()
cbar.set_label('High Water at Liverpool (m)', rotation=270)
plt.xlabel('time(LT:HT) at CTR, mins')
plt.ylabel('hight(HT-LT) at Chester (m)')
plt.title('Magnitude and duration of rising tide at CTR')
plt.savefig("deltaH_deltaT_CTR.png")
################################################################################
################################################################################
#%% Main Routine
################################################################################
################################################################################
if __name__ == "__main__":
#### Constants
DATABUCKET_FILE = "CTR_tide_times.pkl"
#### Initialise logging
now_str = datetime.datetime.now().strftime("%d%b%y %H:%M")
logging.info(f"-----{now_str}-----")
## Plot lag vs Gladstone heights for Chester HT
## Plot the histogram of CTR lags for a window of Liv heights.
#histogram_CTR_LIV_lag()
## Read and process timeseries. Create xarray dataset. Export and pickly dataframe
## Plot graphs
#main1()
if(0):
aa = PostProcess()
#ds = aa.load_databucket()
pickle_jar = PickleJar(pickle_file="CTR_tide_times.pkl")
pickle_jar.load()
ds = pickle_jar.dataset
ds = aa.ref_height_from_ds(ds)
# For a river river height (LT_height), is 'a' about constant? Well it does depend on the Glad HT_h...
#ax1 = df.plot.scatter(x='a', y='LT_height', c='HT_ref_h') #; plt.show()
plt.scatter( ds.a , ds.ctr_LT_h, c=ds.liv_HT_h )
plt.xlabel('Estimated displacement depth (m)')
plt.ylabel('CTR LT waterlevel (m)')
clb=plt.colorbar()
clb.ax.set_ylabel('Liv HT (m)')
plt.show()
ds = aa.ref_L_from_ds(ds)
#ax1 = df.plot.scatter(x='L', y='LT_height', c='HT_ref_h'); plt.show()
plt.scatter( ds.L , ds.ctr_LT_h, c=ds.liv_HT_h )
plt.xlabel('Estimated separation distance (km)')
plt.ylabel('CTR LT waterlevel (m)')
clb=plt.colorbar()
clb.ax.set_ylabel('Liv HT (m)')
plt.show()
```
#### File: DeeBore/utils/plot_CTR_river_event.py
```python
import numpy as np
import matplotlib.pyplot as plt
import xarray as xr # There are many ways to read netCDF files, including this one!
#%% plot functions
def line_plot(ax, time, y, color, size, label=None ):
ax.plot(time, y, color=color, linewidth=size, label=label)
return ax
def scatter_plot(ax, time, y, color, size, label=None ):
ax.scatter(time, y, color=color, s=size, label=label)
return ax
#%%
# Choose some arbitary dates
start_date = np.datetime64('2019-02-17')
end_date = np.datetime64('2019-02-23')
#start_date = np.datetime64('2021-11-02')
#end_date = np.datetime64('2021-11-04')
# location of files
dir = "archive_shoothill/" #
# load data by location.
liv = xr.open_mfdataset(dir+"liv_????.nc") # Tidal port Gladstone Dock, Liverpool
ctr_dn = xr.open_mfdataset(dir+"ctr2_????.nc") # below the Chester weir
ctr_up = xr.open_mfdataset(dir+"ctr_????.nc") # above the Chester weir
ctr_fl = xr.open_mfdataset(dir+"ctrf_????.nc") # flow rate above (and at) the weir
iron= xr.open_mfdataset(dir+"iron_????.nc") # upstream river at Ironbridge
farn= xr.open_mfdataset(dir+"farn_????.nc") # upstream river at Farndon.
#%% Plot data
# Top: Gladstone + Ironbridge
line_flag = True
today_only_flag = True
plt.close('all')
fig, (ax1, ax2) = plt.subplots(2, sharex=True)
## Only get tides over the weir with about 8.75m at Liverpool
fig.suptitle('Dee River heights and flow')
ax1 = scatter_plot(ax1, liv.time, liv.sea_level, 'k', 1, liv.site_name)
if line_flag:
ax1 = line_plot(ax1, liv.time, liv.sea_level, 'k', 1)
ax1.plot( [start_date - np.timedelta64(1,'D'), end_date], [8.75,8.75], 'k--')
ax1b = ax1.twinx()
ax1b = scatter_plot(ax1b, iron.time, iron.sea_level, 'b', 1, iron.site_name)
if line_flag:
ax1b = line_plot(ax1b, iron.time, iron.sea_level, 'b', 1)
ax1.set_ylabel('water level (m)', color='k')
ax1b.set_ylabel('water level (m)', color='b')
for tl in ax1b.get_yticklabels():
tl.set_color('b')
# plot legend. sort y limits
ax1.set_ylim([0, 12]) # Liverpool range
ax1.legend(markerscale=6)
ax1b.legend(markerscale=6)
# Lower: CTR height + flow
ax2 = scatter_plot(ax2, ctr_up.time, ctr_up.sea_level, 'k', 1, "Chester, above weir")
if line_flag:
ax2 = line_plot(ax2, ctr_up.time, ctr_up.sea_level, 'k', 1)
ax2 = scatter_plot(ax2, ctr_dn.time, ctr_dn.sea_level, 'b', 1, "Chester, below weir")
if line_flag:
ax2 = line_plot(ax2, ctr_dn.time, ctr_dn.sea_level, 'b', 1)
ax2b = ax2.twinx()
ax2b = scatter_plot(ax2b, ctr_fl.time, ctr_fl.sea_level, 'g', 1, ctr_fl.site_name)
if line_flag:
ax2b = line_plot(ax2b, ctr_fl.time, ctr_fl.sea_level, 'g', 1)
ax2b.set_ylabel('flow rate (m3/s)', color='g')
for tl in ax2b.get_yticklabels():
tl.set_color('g')
ax2.set_ylabel('water level (m)')
# Add empty data to ax1 to get "green flow data" in the legend
ax2 = scatter_plot(ax2, [], [], 'g', 1, "Flow, above weir")
# plot the legend. sort y limits
ax2.set_ylim([2, 7]) # weir range
ax2b.set_ylim([-400, 400]) # flow range
ax2.set_xlim([start_date, end_date])
ax2.legend(markerscale=6, loc='lower left')
#plt.show()
plt.savefig('Chester_river_levels.png')
``` |
{
"source": "jpolz/pycomlink",
"score": 2
} |
#### File: pycomlink/core/comlink.py
```python
from __future__ import absolute_import
from __future__ import division
# ----------------------------------------------------------------------------
# Name: comlink
# Purpose: Class that represents one CML, which consists of several
# ComlinkChannels and CML-specific metadata like coordinates
# of the TX- and RX-sites
#
# Authors: <NAME>
#
# Created: 21.04.2016
# Copyright: (c) <NAME> 2016
# Licence: The MIT License
# ----------------------------------------------------------------------------
from builtins import zip
from builtins import str
from builtins import object
import warnings
from copy import deepcopy
from collections import namedtuple, OrderedDict
import matplotlib.pyplot as plt
import pandas as pd
import folium
from .comlink_channel import ComlinkChannel
from ..processing import Processor
from ..spatial.helper import distance
# Assure that the pandas matplotlib converters are registered,
# as long as a new matplotlib release does not handle pandas
# time data (or np.datetime64) automatically
# TODO: Remove this when solved via new matplotlib, maybe in 2.2.something...
# here: https://github.com/matplotlib/matplotlib/pull/9779
import pandas.plotting
pandas.plotting.register_matplotlib_converters()
Coords = namedtuple('coords', ['lon_a', 'lon_b', 'lat_a', 'lat_b'])
class Comlink(object):
""" A class representing a CML with its channels and metadata"""
def __init__(self, channels=None, metadata=None, **kwargs):
"""
Comlink object representing one physical (commercial) microwave link,
abbreviated as CML. One CML can contain several CommlinkChannels,
typically two, for both directions of communication.
The preferred way to initialize a Comlink object is to initialize the
CommlinkChannels first and pass them as argument here.
Parameters
----------
channels : ComlinkChannel or list of those
metadata : dict
Dictionary with basic CML metadata of the form
{'site_a_latitude': 12.34,
'site_a_longitude': 12.34,
'site_b_latitude': 56.78,
'site_b_longitude': 56.78,
'cml_id': 'XY1234'}
"""
# If no channels are supplied, there must be at least `t`, `rx` and
# the necessary channel metadata to automatically build a ComlinkChannel
if channels is None:
t = kwargs.pop('t')
rx = kwargs.pop('rx')
tx = kwargs.pop('tx')
elif type(channels) == ComlinkChannel:
channels = [channels]
elif type(channels) == list:
for channel in channels:
# Duck-type to see if it behaves like a ComlinkChannel
try:
channel.data
except Exception:
raise AttributeError('`channels` must behave like a '
'ComlinkChannel object')
else:
raise AttributeError('`channels` is %s must be either a '
'ComlinkChannel or a list of ComlinkChannels' %
type(channels))
# if channels are supplied, channel metadata or separate data for
# `t`, `tx` or `rx` should not be supplied since they will have no
# effect, because they are already part of the individual
# ComlinkChannels
if channels is not None:
if (('t' in kwargs) or
('rx' in kwargs) or
('tx' in kwargs) or
('f_GHz' in kwargs) or
('pol' in kwargs)):
warnings.warn('All supplied channel metadata (e.g. f_GHz) '
'has no effect, since they are already '
'contained in the supplied ComlinkChannel')
self.channels = _channels_list_to_ordered_dict(channels)
self.metadata = {'site_a_latitude': metadata['site_a_latitude'],
'site_a_longitude': metadata['site_a_longitude'],
'site_b_latitude': metadata['site_b_latitude'],
'site_b_longitude': metadata['site_b_longitude'],
'cml_id': metadata['cml_id']}
calculated_length = self.calc_length()
if 'length' in list(metadata.keys()):
length_diff = calculated_length - metadata['length']
if abs(length_diff) > 0.5:
warnings.warn('Calculated length = %2.2f and supplied length '
'= %2.2f differ more than 0.5 km' %
(calculated_length, self.metadata['length']))
if kwargs.pop('calculate_length', True):
self.metadata['length'] = calculated_length
self.process = Processor(self)
def __getattr__(self, item):
""" Makes channels available via, e.g. `comlink.channel_1` """
if ((item.split('_')[0] == 'channel') and
(type(int(item.split('_')[1])) == int)):
channel_n = int(item.split('_')[1])-1
if channel_n < 0:
raise AttributeError('The channel number must be >= 1')
return self.channels[item]
else:
raise AttributeError('`Comlink` has no attribute %s' % item)
def _repr_html_(self):
html_str = '<table> <tr> '
for channel_name in self.channels:
cml_ch = self.channels[channel_name]
html_str = (html_str + '<td> ' +
'<b>' + channel_name + '</b><br/>' +
cml_ch._repr_html_() + '</td>')
html_str = html_str + '</tr>' + '</table>'
return html_str
def __dir__(self):
attr_list = (list(Comlink.__dict__.keys()) +
list(self.__dict__.keys()) +
list(self.channels.keys()))
return attr_list
def __copy__(self):
cls = self.__class__
new_cml = cls.__new__(cls)
new_cml.__dict__.update(self.__dict__)
return new_cml
def __deepcopy__(self, memo=None):
new_cml = self.__copy__()
if memo is None:
memo = {}
memo[id(self)] = new_cml
#for name, channel in self.channels.iteritems():
# new_cml.channels[name] = deepcopy(channel, memo)
new_cml.metadata = deepcopy(self.metadata, memo)
new_cml.channels = deepcopy(self.channels, memo)
new_cml.process = deepcopy(self.process, memo)
return new_cml
def get_coordinates(self):
""" Return the coordinates of site_a and site_b
Returns
-------
coords : namedtuple
Named tuple of coordinates with the names 'lon_a', 'lon_b',
'lat_a', 'lat_b'.
"""
coords = Coords(lon_a=self.metadata['site_a_longitude'],
lon_b=self.metadata['site_b_longitude'],
lat_a=self.metadata['site_a_latitude'],
lat_b=self.metadata['site_b_latitude'])
return coords
def calc_length(self):
""" Calculate and return length of CML km """
coords = self.get_coordinates()
d_km = distance((coords.lat_a, coords.lon_a),
(coords.lat_b, coords.lon_b))
return d_km
def get_length(self):
""" Return length of CML in km """
return self.metadata['length']
def plot_map(self, tiles='OpenStreetMap', fol_map=None):
""" Plot a dynamic map in Jupyter notebook using folium
Parameters
----------
tiles: str
Name of tile to be used by folium, default is 'OpenStreetMap'
fol_map: folium map instance
An existing folium map instance can be passed here
Returns
-------
fol_map : folium map object
"""
coords = self.get_coordinates()
if fol_map is None:
fol_map = folium.Map(location=[(coords.lat_a + coords.lat_b) / 2,
(coords.lon_a + coords.lon_b) / 2],
tiles=tiles,
zoom_start=11)
fol_map.add_children(folium.PolyLine([(coords.lat_a, coords.lon_a),
(coords.lat_b, coords.lon_b)]))
return fol_map
def plot_line(self, ax=None, *args, **kwargs):
""" Plot the CML path using matplotlib
`args` and `kwargs` will be passed to `matplotlib.pyplot.plot`
Parameters
----------
ax : matplotlib.axes
Matplotlib axes handle, defaults to None. A figure is created in
the default case
Returns
-------
ax : matplotib.axes
"""
if ax is None:
fig, ax = plt.subplots()
coords = self.get_coordinates()
ax.plot([coords.lon_a, coords.lon_b],
[coords.lat_a, coords.lat_b],
*args, **kwargs)
return ax
def plot_data(self, columns=['rx', ], channels=None, ax=None):
""" Plot time series of data from the different channels
Linked subplots will be created for the different specified columns
of the DataFrames of the different channels.
Parameters
----------
columns : list, optional
List of DataFrame columns to plot for each channel.
Defaults to ['rx', ]
channels : list, optional
List of channel names, i.e. the keys of the Comlink.channels
dictionary, to specify which channel to plot. Defaults to None,
which plots for all channels
ax : matplotlib.axes, optional
Axes handle, defaults to None, which plots into a new figure
Returns
-------
ax : matplotlib.axes
"""
if ax is None:
fig, ax = plt.subplots(len(columns),
1,
figsize=(10, 1.5*len(columns) + 1),
sharex=True)
try:
ax[0].get_alpha()
except TypeError:
ax = [ax, ]
if channels is None:
channels_to_plot = self.channels
else:
channels_to_plot = {ch_key: self.channels[ch_key]
for ch_key in channels}
for ax_i, column in zip(ax, columns):
for i, (name, cml_ch) in enumerate(channels_to_plot.items()):
if column == 'wet':
ax_i.fill_between(
cml_ch.data[column].index,
i,
i + cml_ch.data[column].values,
alpha=0.9,
linewidth=0.0,
label=name)
else:
ax_i.plot(
cml_ch.data[column].index,
cml_ch.data[column].values,
label=name)
ax_i.set_ylabel(column)
return ax
def get_center_lon_lat(self):
""" Calculate and return longitude and latitude of the CML path center
Returns
-------
(center_lon, center_lat)
"""
coords = self.get_coordinates()
center_lon = (coords.lon_a + coords.lon_b) / 2
center_lat = (coords.lat_a + coords.lat_b) / 2
return center_lon, center_lat
def append_data(self, cml, max_length=None, max_age=None):
""" Append the data from the same CML stored in another Comlink object
Parameters
----------
cml
max_length
max_age
Returns
-------
"""
for key in self.metadata.keys():
if self.metadata[key] != cml.metadata[key]:
raise ValueError('Comlink metadata `%s` is different'
'for the two CMLs: %s vs. %s' %
(key, self.metadata[key], cml.metadata[key]))
for ch_name in self.channels.keys():
self.channels[ch_name].append_data(
cml_ch=cml.channels[ch_name],
max_length=max_length,
max_age=max_age)
def _channels_list_to_ordered_dict(channels):
""" Helper function to parse a list of channels to a dict of channels
The keys will be `channel_(i+1)`, where i is the index of the list of
channels. These keys will be used to make the different channels
available in the Comlink object via e.g. `comlink.channel_1`.
Parameters
----------
channels : list
List of ComlinkChannel objects
Returns
-------
channel_dict : dict
"""
channel_dict = OrderedDict()
for i, channel in enumerate(channels):
channel_dict['channel_' + str(i+1)] = channel
return channel_dict
```
#### File: pycomlink/spatial/helper.py
```python
from __future__ import division
from builtins import map
from math import radians, cos, sin, asin, sqrt
# TODO: Check if these functions are still needed
def distance(origin, destination):
"""Simple distance (in km) calculation between two locations
Parameters
----------
origin : tuple
Coordinates of first location in decimal format.
Required format (Latitude,Longitude)
destination : tuple
Coordinates of second location in decimal format.
Required format (Latitude,Longitude)
Returns
-------
Distance between origin and destination in kilometer
"""
lat1, lon1 = origin
lat2, lon2 = destination
return haversine(lon1, lat1, lon2, lat2)
def haversine(lon1, lat1, lon2, lat2):
"""Calculate the great circle distance between two points
on the earth (specified in decimal degrees)
"""
# convert decimal degrees to radians
lon1, lat1, lon2, lat2 = list(map(radians, [lon1, lat1, lon2, lat2]))
# haversine formula
dlon = lon2 - lon1
dlat = lat2 - lat1
a = sin(dlat / 2)**2 + cos(lat1) * cos(lat2) * sin(dlon / 2)**2
c = 2 * asin(sqrt(a))
km = 6367 * c
return km
def label_loc(lon_a,lat_a,lon_b,lat_b):
"""Helper function for method info_plot of class Comlink
"""
if lon_a < lon_b and lat_a < lat_b:
x_a,y_a = lon_a-0.025,lat_a-0.005
x_b,y_b = lon_b+0.01,lat_b+0.005
elif lon_a < lon_b and lat_a > lat_b:
x_a,y_a = lon_a-0.025,lat_a+0.005
x_b,y_b = lon_b+0.01,lat_b-0.005
elif lon_a > lon_b and lat_a > lat_b:
x_a,y_a = lon_a+0.01,lat_a+0.005
x_b,y_b = lon_b-0.025,lat_b-0.005
elif lon_a > lon_b and lat_a < lat_b:
x_a,y_a = lon_a+0.01,lat_a-0.005
x_b,y_b = lon_b-0.025,lat_b+0.005
xy = [x_a,y_a,x_b,y_b]
return xy
```
#### File: pycomlink/tests/test_processor.py
```python
import unittest
import numpy as np
from pycomlink.tests.utils import load_and_clean_example_cml
import pycomlink as pycml
class TestWetDryStdDev(unittest.TestCase):
def test_standrad_processing(self):
cml = load_and_clean_example_cml()
cml.process.wet_dry.std_dev(window_length=60, threshold=0.8)
assert (cml.channel_1.data.wet.loc['2016-11-02 14:00'].values[0]
== True)
assert (cml.channel_1.data.wet.loc['2016-11-02 13:00'].values[0]
== False)
assert (cml.channel_2.data.wet.loc['2016-11-02 14:00'].values[0]
== True)
assert (cml.channel_2.data.wet.loc['2016-11-02 13:00'].values[0]
== False)
cml.process.wet_dry.std_dev(window_length=30, threshold=0.8)
assert (cml.channel_1.data.wet.loc['2016-11-02 14:00'].values[0]
== False)
assert (cml.channel_1.data.wet.loc['2016-11-02 13:00'].values[0]
== False)
assert (cml.channel_1.data.wet.loc['2016-11-02 14:30'].values[0]
== True)
assert (cml.channel_2.data.wet.loc['2016-11-02 14:00'].values[0]
== False)
assert (cml.channel_2.data.wet.loc['2016-11-02 13:00'].values[0]
== False)
assert (cml.channel_2.data.wet.loc['2016-11-02 14:30'].values[0]
== True)
# Test if the end result, the rain rate, is the same when using the
# Processor and the functions
# This test only works correctly if the CML uses vertical polarization
# since the default in the function is 'H'
assert cml.channel_1.metadata['polarization'] == 'V'
cml.process.wet_dry.std_dev(window_length=30, threshold=0.8)
cml.process.baseline.linear()
cml.process.baseline.calc_A()
cml.process.A_R.calc_R()
R_from_processor = cml.channel_1.data.R.copy()
R_from_function = (
pycml.processing.A_R_relation.A_R_relation
.calc_R_from_A(
A=cml.channel_1.data.A,
L=cml.metadata['length'],
f_GHz=cml.channel_1.metadata['frequency'] / 1e9,
pol=cml.channel_1.metadata['polarization']))
np.testing.assert_almost_equal(R_from_processor, R_from_function)
def test_processing_for_selected_time_period(self):
cml = load_and_clean_example_cml()
# First as a comparisson the standard way
cml.process.wet_dry.std_dev(window_length=60, threshold=0.8)
assert (cml.channel_1.data.wet.loc['2016-11-02 18:30'].values[0]
== True)
assert (cml.channel_1.data.wet.loc['2016-11-02 14:00'].values[0]
== True)
assert (cml.channel_1.data.wet.loc['2016-11-02 13:00'].values[0]
== False)
# Then for a period from a starting point in time
t_start = '2016-11-02 14:30'
cml.channel_1.data.wet = False
cml.process.wet_dry.std_dev(window_length=60,
threshold=0.8,
t_start=t_start)
assert (cml.channel_1.data.wet.loc['2016-11-02 18:30'].values[0]
== True)
assert (cml.channel_1.data.wet.loc['2016-11-02 14:00'].values[0]
== False)
assert (cml.channel_1.data.wet.loc['2016-11-02 13:00'].values[0]
== False)
# Then for a period to a end point in time
t_stop = '2016-11-02 15:00'
cml.channel_1.data.wet = False
cml.process.wet_dry.std_dev(window_length=60,
threshold=0.8,
t_stop=t_stop)
assert (cml.channel_1.data.wet.loc['2016-11-02 18:30'].values[0]
== False)
assert (cml.channel_1.data.wet.loc['2016-11-02 14:00'].values[0]
== True)
assert (cml.channel_1.data.wet.loc['2016-11-02 13:00'].values[0]
== False)
```
#### File: pycomlink/tests/test_stats.py
```python
import unittest
import numpy as np
import collections
import pycomlink as pycml
class TestWetDryandRainErrorfunctions(unittest.TestCase):
def test_WetDryError_with_simple_arrays(self):
reference = np.array([True, False, True, True, False,
True, False, np.nan, np.nan, np.nan])
predicted = np.array([True, False, False, True, True,
True, True, True, False, np.nan])
wd_error = pycml.validation.stats.calc_wet_dry_performance_metrics(
reference,
predicted)
class_name = 'WetDryError_reference'
fields = 'false_wet_rate missed_wet_rate matthews_correlation ' \
'true_wet_rate true_dry_rate N_dry_reference N_wet_reference '\
'N_true_wet N_true_dry N_false_wet N_missed_wet ' \
'N_all_pairs N_nan_pairs N_nan_reference_only ' \
'N_nan_predicted_only'
WetDryError_reference = collections.namedtuple(class_name, fields)
ref = WetDryError_reference(0.66666667, 0.25, 0.09128709291752767, 0.75,
0.33333334, 3, 4, 3, 1, 2, 1, 10, 3, 3, 1)
np.testing.assert_array_almost_equal(
wd_error,
ref)
# the mcc should be zero, when predicted only contains false/zeros
def test_mcc_with_zero_wet_prediction(self):
reference = np.array([True, False, False])
predicted = np.array([False, False, False])
wd_error = pycml.validation.stats.calc_wet_dry_performance_metrics(
reference,
predicted)
np.testing.assert_array_almost_equal(
wd_error.matthews_correlation,
0)
# verify mcc against a mcc calculated with sklearn.metrics.matthews_corrcoef
# extra test because sklearn.metrics.matthews_corrcoef can't handel nans
def test_mcc_against_sklearns_mcc(self):
reference = np.array([0, 1, 0, 0, 1, 0, 1, 0, 1, 1, 1])
predicted = np.array([0, 1, 0, 0, 0, 0, 0, 1, 0, 1, 1])
wd_error = pycml.validation.stats.calc_wet_dry_performance_metrics(
reference,
predicted)
np.testing.assert_array_almost_equal(
wd_error.matthews_correlation,
0.3105295017040594)
def test_RainError_with_simple_arrays(self):
reference = np.array([1, 0, 1, 1, 1, 0, 1, 0, np.nan, np.nan, np.nan])
predicted = np.array([1, 0, 0, 0, 1, 1, 0.01, 1, 1, 0, np.nan])
rainerror = pycml.validation.stats.calc_rain_error_performance_metrics(
reference,
predicted,
rainfall_threshold_wet=0.1)
class_name = 'RainError_reference'
fields = 'pearson_correlation coefficient_of_variation ' \
'root_mean_square_error mean_absolute_error R_sum_reference ' \
'R_sum_predicted R_mean_reference R_mean_predicted ' \
'false_wet_rate missed_wet_rate ' \
'false_wet_precipitation_rate missed_wet_precipitation_rate ' \
'rainfall_threshold_wet N_all_pairs N_nan_pairs ' \
'N_nan_reference_only N_nan_predicted_only'
RainError_reference = collections.namedtuple(class_name, fields)
ref = RainError_reference(-0.256899818, 1.246767019, 0.788994613,
.62375, 5, 4.01, 0.625, 0.50125, 0.666666666,
0.6, 1, 1, 0.1, 11, 3, 3, 1)
np.testing.assert_almost_equal(
rainerror,
ref)
# Test that the calculation does not change the input arrays
np.testing.assert_almost_equal(
reference,
np.array([1, 0, 1, 1, 1, 0, 1, 0, np.nan, np.nan, np.nan]))
np.testing.assert_almost_equal(
predicted,
np.array([1, 0, 0, 0, 1, 1, 0.01, 1, 1, 0, np.nan]))
```
#### File: pycomlink/tests/utils.py
```python
import pkg_resources
import os.path
import pycomlink as pycml
def get_test_data_path():
return pkg_resources.resource_filename('pycomlink', 'tests/test_data')
def load_and_clean_example_cml():
cml = pycml.io.examples.read_one_cml()
cml.process.quality_control.set_to_nan_if('tx', '>=', 100)
cml.process.quality_control.set_to_nan_if('rx', '==', -99.9)
for cml_ch in cml.channels.values():
cml_ch.data.interpolate(limit=3)
return cml
def load_processed_cml_list():
data_path = get_test_data_path()
fn = '75_cmls_processed.h5'
return pycml.io.read_from_cmlh5(os.path.join(data_path, fn),
read_all_data=True)
```
#### File: pycomlink/validation/validator.py
```python
from __future__ import division
from builtins import zip
from builtins import object
from collections import namedtuple
import xarray as xr
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from shapely.geometry import LineString, Polygon
from pycomlink.util.maintenance import deprecated
class Validator(object):
def __init__(self):
pass
def calc_stats(self, cml, time_series):
cml_copy = cml
time_series_df = pd.DataFrame(data={'xr_ds': time_series},
index=time_series.time)
cml_copy.data.index = cml_copy.data.index.strftime("%Y-%m-%d %H:%M:%S")
# pd.DatetimeIndex(cml.data.index)
joined_df = time_series_df.join(cml_copy.data.txrx_nf)
pearson_r = joined_df.xr_ds.corr(joined_df.txrx_nf, method='pearson')
return pearson_r
class GridValidator(Validator):
def __init__(self, lats=None, lons=None, values=None, xr_ds=None):
if xr_ds is None:
# construct xr_ds from lats, lons & values here?
xr_ds = xr.Dataset()
self.xr_ds = xr_ds
self.intersect_weights = None
self.weighted_grid_sum = None
pass
def _get_cml_intersection_weights(self, cml):
self.cml = cml
cml_coords = cml.get_coordinates()
# get intersect weights
self.intersect_weights = calc_intersect_weights(
x1_line=cml_coords.lon_a,
y1_line=cml_coords.lat_a,
x2_line=cml_coords.lon_b,
y2_line=cml_coords.lat_b,
x_grid=self.xr_ds.longitudes.values,
y_grid=self.xr_ds.latitudes.values)
return self.intersect_weights
def get_time_series(self, cml, values):
intersect_weights = self._get_cml_intersection_weights(cml)
# Get start and end time of CML data set to constrain lookup in `xr_ds`
t_start = cml.channel_1.data.index.values[0]
t_stop = cml.channel_2.data.index.values[-1]
t_mask = (self.xr_ds.time > t_start) & (self.xr_ds.time < t_stop)
# Get bounding box where CML intersects with grid to constrain
# lookup in `xr_ds`
w_mask = intersect_weights > 0
# Since we cannot use a 2D mask in xarray, build the slices of
# indices for the x- and y-axis
w_ix_x = np.unique(np.where(w_mask)[1])
w_ix_y = np.unique(np.where(w_mask)[0])
slice_x = slice(w_ix_x.min(), w_ix_x.max()+1)
slice_y = slice(w_ix_y.min(), w_ix_y.max()+1)
self.weighted_grid_sum = (self.xr_ds[values][t_mask, slice_y, slice_x]
* intersect_weights[slice_y, slice_x]
).sum(dim=['x', 'y']).to_dataframe()
return self.weighted_grid_sum
def resample_to_grid_time_series(self,
df,
grid_time_index_label,
grid_time_zone=None):
df_temp = df.copy()
df_truth_t = pd.DataFrame(self.weighted_grid_sum.index,
self.weighted_grid_sum.index)
if grid_time_zone is not None:
df_truth_t.index = df_truth_t.index.tz_localize(grid_time_zone)
if grid_time_index_label == 'right':
method = 'bfill'
elif grid_time_index_label == 'left':
method = 'ffill'
else:
raise NotImplementedError('Only `left` and `right` are allowed up '
'to now for `grid_time_index_label.')
df_truth_t = df_truth_t.reindex(df.index, method=method)
df_temp['truth_time_ix'] = df_truth_t.time
return df_temp.groupby('truth_time_ix').mean()
def plot_intersections(self, cml, ax=None):
if ax is None:
fig, ax = plt.subplots()
cml.plot_line(ax=ax)
# Generate lon-lat grid assuming the original coordinates represent
# the center of the grid
lons = np.zeros(self.xr_ds.longitudes.shape + np.array([1, 1]))
lats = np.zeros(self.xr_ds.latitudes.shape + np.array([1, 1]))
grid = np.stack([self.xr_ds.longitudes.values,
self.xr_ds.latitudes.values],
axis=2)
grid_corners = _calc_grid_corners_for_center_location(grid)
lons[:-1, :-1] = grid_corners.ll_grid[:, :, 0]
lons[-1, :-1] = grid_corners.ul_grid[-1, :, 0]
lons[:-1, -1] = grid_corners.lr_grid[:, -1, 0]
lons[-1, -1] = grid_corners.ur_grid[-1, -1, 0]
lats[:-1, :-1] = grid_corners.ll_grid[:, :, 1]
lats[-1, :-1] = grid_corners.ul_grid[-1, :, 1]
lats[:-1, -1] = grid_corners.lr_grid[:, -1, 1]
lats[-1, -1] = grid_corners.ur_grid[-1, -1, 1]
cml_coords = cml.get_coordinates()
# get intersect weights
intersect, pixel_poly_list = calc_intersect_weights(
x1_line=cml_coords.lon_a,
y1_line=cml_coords.lat_a,
x2_line=cml_coords.lon_b,
y2_line=cml_coords.lat_b,
x_grid=self.xr_ds.longitudes.values,
y_grid=self.xr_ds.latitudes.values,
return_pixel_poly_list=True)
ax.pcolormesh(lons, lats, intersect, cmap=plt.cm.gray_r)
ax.scatter(self.xr_ds.longitudes.values,
self.xr_ds.latitudes.values, s=1, c='k')
for pixel_poly in pixel_poly_list:
ax.plot(*pixel_poly.exterior.xy)
ax.set_ylim()
return ax
class PointValidator(Validator):
def __init__(lats, lons, values):
#self.truth_data = [lats, lons, time_series]
pass
def _get_cml_pair_indices(cml):
# get nearest point location
#return pair_indices
pass
def get_time_series(self, cml, values):
pass
def calc_intersect_weights(x1_line,
y1_line,
x2_line,
y2_line,
x_grid,
y_grid,
grid_point_location='center',
offset=None,
return_pixel_poly_list=False):
""" Calculate intersecting weights for a line and a grid
Calculate the intersecting weights for the line defined by `x1_line`,
`y1_line`, `x2_line` and `y2_line` and the grid defined by the x- and y-
grid points from `x_grid` and `y_grid`.
Parameters
----------
x1_line : float
y1_line : float
x2_line : float
y2_line : float
x_grid : 2D array
x-coordinates of grid points
y_grid : 2D array
y-coordinates of grid points
grid_point_location : str, optional
The only option currently is `center` which assumes that the
coordinates in `xr_ds` represent the centers of grid cells
offset : float, optional
The offset in units of the coordinates to constrain the calculation
of intersection to a bounding box around the CML coordinates. The
offset specifies by how much this bounding box will be larger then
the width- and height-extent of the CML coordinates.
return_pixel_poly_list : bool, optional
If `True`, also return the list of shapely.Polygon objects which were
used to calculate the intersection weights. Defaults to `False`.
Returns
-------
intersect : array
2D array of intersection weights with shape of the longitudes- and
latitudes grid of `xr_ds`
pixel_poly_list : list, optional
List of shapely.Polygons which were used to calculate intersections
"""
x_grid = x_grid.astype('float64')
y_grid = y_grid.astype('float64')
#grid = np.stack([xr_ds.longitudes.values, xr_ds.latitudes.values], axis=2)
grid = np.stack([x_grid, y_grid], axis=2)
# Get link coordinates for easy access
#cml_coords = cml.get_coordinates()
# Convert CML to shapely line
link = LineString([(x1_line, y1_line),
(x2_line, y2_line)])
# Derive grid cell width to set bounding box offset
ll_cell = grid[0, 1, 0] - grid[0, 0, 0]
ul_cell = grid[-1, 1, 0] - grid[-1, 0, 0]
lr_cell = grid[0, -1, 0] - grid[0, -2, 0]
ur_cell = (grid[-1, -1, 0] - grid[-1, -2, 0])
offset_calc = max(ll_cell, ul_cell, lr_cell, ur_cell)
# Set bounding box offset
if offset is None:
offset = offset_calc
# Set bounding box
x_max = max([x1_line, x2_line])
x_min = min([x1_line, x2_line])
y_max = max([y1_line, y2_line])
y_min = min([y1_line, y2_line])
#lon_grid = grid[:, :, 0]
#lat_grid = grid[:, :, 1]
bounding_box = (
((x_grid > x_min - offset) & (x_grid < x_max + offset)) &
((y_grid > y_min - offset) & (y_grid < y_max + offset)))
# Calculate polygon corners assuming that `grid` defines the center
# of each grid cell
if grid_point_location == 'center':
grid_corners = _calc_grid_corners_for_center_location(grid)
elif grid_point_location == 'lower_left':
grid_corners = _calc_grid_corners_for_lower_left_location(grid)
else:
raise ValueError('`grid_point_location` = %s not implemented' %
grid_point_location)
# Find intersection
intersect = np.zeros([grid.shape[0], grid.shape[1]])
pixel_poly_list = []
# Iterate only over the indices within the bounding box and
# calculate the intersect weigh for each pixel
ix_in_bbox = np.where(bounding_box == True)
for i, j in zip(ix_in_bbox[0], ix_in_bbox[1]):
pixel_poly = Polygon(
[grid_corners.ll_grid[i, j],
grid_corners.lr_grid[i, j],
grid_corners.ur_grid[i, j],
grid_corners.ul_grid[i, j]])
pixel_poly_list.append(pixel_poly)
c = link.intersection(pixel_poly)
if not c.is_empty:
intersect[i][j] = (c.length / link.length)
if return_pixel_poly_list:
return intersect, pixel_poly_list
else:
return intersect
def _calc_grid_corners_for_center_location(grid):
"""
Parameters
----------
grid : array
3D matrix holding x and y grids. Shape of `grid` must be
(height, width, 2).
Returns
-------
namedtuple with the grids for the four corners of the grid defined
by points at the lower left corner
"""
grid = grid.astype('float64')
# Upper right
ur_grid = np.zeros_like(grid)
ur_grid[0:-1, 0:-1, :] = (grid[0:-1, 0:-1, :] + grid[1:, 1:, :]) / 2.0
ur_grid[-1, :, :] = (ur_grid[-2, :, :]
+ (ur_grid[-2, :, :] - ur_grid[-3, :, :]))
ur_grid[:, -1, :] = (ur_grid[:, -2, :]
+ (ur_grid[:, -2, :] - ur_grid[:, -3, :]))
# Upper left
ul_grid = np.zeros_like(grid)
ul_grid[0:-1, 1:, :] = (grid[0:-1, 1:, :] + grid[1:, :-1, :]) / 2.0
ul_grid[-1, :, :] = (ul_grid[-2, :, :]
+ (ul_grid[-2, :, :] - ul_grid[-3, :, :]))
ul_grid[:, 0, :] = (ul_grid[:, 1, :]
- (ul_grid[:, 2, :] - ul_grid[:, 1, :]))
# Lower right
lr_grid = np.zeros_like(grid)
lr_grid[1:, 0:-1, :] = (grid[1:, 0:-1, :] + grid[:-1, 1:, :]) / 2.0
lr_grid[0, :, :] = (lr_grid[1, :, :]
- (lr_grid[2, :, :] - lr_grid[1, :, :]))
lr_grid[:, -1, :] = (lr_grid[:, -2, :]
+ (lr_grid[:, -2, :] - lr_grid[:, -3, :]))
# Lower left
ll_grid = np.zeros_like(grid)
ll_grid[1:, 1:, :] = (grid[1:, 1:, :] + grid[:-1, :-1, :]) / 2.0
ll_grid[0, :, :] = (ll_grid[1, :, :]
- (ll_grid[2, :, :] - ll_grid[1, :, :]))
ll_grid[:, 0, :] = (ll_grid[:, 1, :]
- (ll_grid[:, 2, :] - ll_grid[:, 1, :]))
GridCorners = namedtuple('GridCorners',
['ur_grid', 'ul_grid', 'lr_grid', 'll_grid'])
return GridCorners(ur_grid=ur_grid,
ul_grid=ul_grid,
lr_grid=lr_grid,
ll_grid=ll_grid)
def _calc_grid_corners_for_lower_left_location(grid):
"""
Parameters
----------
grid : array
3D matrix holding x and y grids. Shape of `grid` must be
(height, width, 2).
Returns
-------
namedtuple with the grids for the four corners around the
central grid points
"""
grid = grid.astype('float64')
if (np.diff(grid[:, :, 0], axis=1) < 0).any():
raise ValueError("x values must be ascending along axis 1")
if (np.diff(grid[:, :, 1], axis=0) < 0).any():
raise ValueError("y values must be ascending along axis 0")
# Upper right
ur_grid = np.zeros_like(grid)
ur_grid[0:-1, 0:-1, :] = grid[1:, 1:, :]
ur_grid[-1, :, :] = (ur_grid[-2, :, :]
+ (ur_grid[-2, :, :] - ur_grid[-3, :, :]))
ur_grid[:, -1, :] = (ur_grid[:, -2, :]
+ (ur_grid[:, -2, :] - ur_grid[:, -3, :]))
# Upper left
ul_grid = np.zeros_like(grid)
ul_grid[0:-1, 0:-1, :] = grid[1:, 0:-1, :]
ul_grid[-1, :, :] = (ul_grid[-2, :, :]
+ (ul_grid[-2, :, :] - ul_grid[-3, :, :]))
ul_grid[:, -1, :] = (ul_grid[:, -2, :]
+ (ul_grid[:, -2, :] - ul_grid[:, -3, :]))
# Lower right
lr_grid = np.zeros_like(grid)
lr_grid[0:-1, 0:-1, :] = grid[0:-1, 1:, :]
lr_grid[-1, :, :] = (lr_grid[-2, :, :]
+ (lr_grid[-2, :, :] - lr_grid[-3, :, :]))
lr_grid[:, -1, :] = (lr_grid[:, -2, :]
+ (lr_grid[:, -2, :] - lr_grid[:, -3, :]))
# Lower left
ll_grid = grid.copy()
GridCorners = namedtuple('GridCorners',
['ur_grid', 'ul_grid', 'lr_grid', 'll_grid'])
return GridCorners(ur_grid=ur_grid,
ul_grid=ul_grid,
lr_grid=lr_grid,
ll_grid=ll_grid)
@deprecated('Use `pycomlink.validation.stats.calc_wet_error_rates()` '
'instead since the `dry_error` here makes no sense.')
def calc_wet_dry_error(df_wet_truth, df_wet):
dry_error = (((df_wet_truth == False) & (df_wet == True)).sum() /
float((df_wet_truth == False).sum()))
wet_error = (((df_wet_truth == True) & (df_wet == False)).sum() /
float((df_wet_truth == True).sum()))
return wet_error, dry_error
``` |
{
"source": "jpolz/radolan_to_netcdf",
"score": 2
} |
#### File: radolan_to_netcdf/tests/test_write_to_netcdf.py
```python
import unittest
import os
import pkg_resources
import glob
import netCDF4
import numpy as np
from numpy.testing import assert_almost_equal
from radolan_to_netcdf import radolan_to_netcdf
from radolan_to_netcdf.tests.tools import get_test_data_for_product
def parse_and_validate_test_data(product_name):
fn = 'test.nc'
radolan_to_netcdf.create_empty_netcdf(fn, product_name=product_name)
data_list, metadata_list = [], []
for fn_radolan_file in get_test_data_for_product(product_name):
data, metadata = radolan_to_netcdf.read_in_one_bin_file(
fn_radolan_file)
data_list.append(data)
metadata_list.append(metadata)
radolan_to_netcdf.create_empty_netcdf(fn=fn, product_name=product_name)
radolan_to_netcdf.append_to_netcdf(
fn=fn,
data_list=data_list,
metadata_list=metadata_list)
with netCDF4.Dataset(fn, mode='r') as ds:
actual = ds['rainfall_amount'][:].filled(np.nan).sum(axis=0)
reference = np.stack(data_list, axis=2).sum(axis=2)
assert_almost_equal(actual, reference)
os.remove(fn)
def test_RW():
parse_and_validate_test_data(product_name='RW')
def test_YW():
parse_and_validate_test_data(product_name='YW')
def test_flagged_pixels():
fn_radolan_files = get_test_data_for_product(product_name='RW')
fn_bin = fn_radolan_files[0]
data, metadata = radolan_to_netcdf.read_in_one_bin_file(fn_bin)
# Write file to NetCDF
fn = 'test.nc'
radolan_to_netcdf.create_empty_netcdf(fn, product_name='RW')
radolan_to_netcdf.append_to_netcdf(fn, [data, ], [metadata, ])
for flag_name in ['secondary', 'nodatamask', 'cluttermask']:
# Read back and check flagged pixels
with netCDF4.Dataset(fn, mode='r') as ds:
# Get data as matrix from NetCDF and derive the non-zero indices
# because this is how they are stored in RADOLAN bin files and
# wradlib returns them that way
actual = np.nonzero(ds[flag_name][0, :, :].flatten())[0]
reference = metadata[flag_name]
np.testing.assert_almost_equal(actual, reference)
os.remove(fn)
```
#### File: radolan_to_netcdf/tests/test_write_to_radolan_bin.py
```python
import unittest
import wradlib as wrl
import numpy as np
from radolan_to_netcdf import radolan_to_netcdf
from radolan_to_netcdf import wradlib_to_radolan_bin
from radolan_to_netcdf.tests.tools import get_test_data_for_product
class TestWradlibMetadataToHeader(unittest.TestCase):
def test_RW(self):
for fn_radolan_file in get_test_data_for_product('RW'):
data, metadata = radolan_to_netcdf.read_in_one_bin_file(
fn_radolan_file)
with wrl.io.radolan.get_radolan_filehandle(fn_radolan_file) as f:
reference = wrl.io.radolan.read_radolan_header(f)
actual = wradlib_to_radolan_bin.metadata_to_header(metadata)
assert actual == reference
def test_not_RW_error(self):
with self.assertRaises(NotImplementedError) as context:
fn_radolan_files = get_test_data_for_product('YW')
data, metadata = radolan_to_netcdf.read_in_one_bin_file(
fn_radolan_files[0])
wradlib_to_radolan_bin.metadata_to_header(metadata)
self.assertTrue(
'Currently only RADOALN-RW is supported' in str(context.exception)
)
class TestWradlibDataToByteArray(unittest.TestCase):
def test_RW(self):
for fn_radolan_file in get_test_data_for_product('RW'):
data, metadata = radolan_to_netcdf.read_in_one_bin_file(
fn_radolan_file)
with wrl.io.radolan.get_radolan_filehandle(fn_radolan_file) as f:
header = wrl.io.radolan.read_radolan_header(f)
attrs = wrl.io.radolan.parse_dwd_composite_header(header)
reference = wrl.io.read_radolan_binary_array(f, attrs['datasize'])
actual = wradlib_to_radolan_bin.data_to_byte_array(data, metadata)
assert actual == reference
def test_not_RW_error(self):
with self.assertRaises(NotImplementedError) as context:
fn_radolan_files = get_test_data_for_product('YW')
data, metadata = radolan_to_netcdf.read_in_one_bin_file(
fn_radolan_files[0])
wradlib_to_radolan_bin.data_to_byte_array(data, metadata)
self.assertTrue(
'Currently only RADOALN-RW is supported' in str(context.exception)
)
class TestWradlibToRadolanBinaryRoundtrip(unittest.TestCase):
def test_RW(self):
for fn_radolan_file in get_test_data_for_product('RW'):
data_reference, metadata_reference = radolan_to_netcdf.read_in_one_bin_file(
fn_radolan_file)
wradlib_to_radolan_bin.write_to_radolan_bin_file(
fn='test_radolan.bin',
data=data_reference,
metadata=metadata_reference,
)
data_actual, metadata_actual = radolan_to_netcdf.read_in_one_bin_file(
'test_radolan.bin')
np.testing.assert_almost_equal(data_actual, data_reference)
assert list(metadata_actual.keys()) == list(metadata_reference.keys())
for key in metadata_reference.keys():
try:
np.testing.assert_almost_equal(
metadata_actual[key], metadata_reference[key]
)
except TypeError:
assert metadata_actual[key] == metadata_reference[key]
``` |
{
"source": "JPompeus/Stone-Soup",
"score": 2
} |
#### File: dataassociator/tests/test_neighbour.py
```python
import datetime
import pytest
import numpy as np
from ..neighbour import NearestNeighbour, GlobalNearestNeighbour
from ...types.detection import Detection
from ...types.state import GaussianState
from ...types.track import Track
@pytest.fixture(params=[NearestNeighbour, GlobalNearestNeighbour])
def associator(request, hypothesiser):
return request.param(hypothesiser)
def test_nearest_neighbour(associator):
timestamp = datetime.datetime.now()
t1 = Track([GaussianState(np.array([[0]]), np.array([[1]]), timestamp)])
t2 = Track([GaussianState(np.array([[3]]), np.array([[1]]), timestamp)])
d1 = Detection(np.array([[2]]))
d2 = Detection(np.array([[5]]))
tracks = {t1, t2}
detections = {d1, d2}
associations = associator.associate(tracks, detections, timestamp)
# There should be 2 associations
assert len(associations) == 2
# Each track should associate with a unique detection
associated_measurements = [hypothesis.measurement
for hypothesis in associations.values()
if hypothesis.measurement]
assert len(associated_measurements) == len(set(associated_measurements))
def test_missed_detection_nearest_neighbour(associator):
timestamp = datetime.datetime.now()
t1 = Track([GaussianState(np.array([[0]]), np.array([[1]]), timestamp)])
t2 = Track([GaussianState(np.array([[3]]), np.array([[1]]), timestamp)])
d1 = Detection(np.array([[20]]))
tracks = {t1, t2}
detections = {d1}
associations = associator.associate(tracks, detections, timestamp)
# Best hypothesis should be missed detection hypothesis
assert all(not hypothesis.measurement
for hypothesis in associations.values())
```
#### File: stonesoup/deleter/error.py
```python
import numpy as np
from ..base import Property
from .base import Deleter
class CovarianceBasedDeleter(Deleter):
""" Track deleter based on covariance matrix size.
Deletes tracks whose state covariance matrix (more specifically its trace)
exceeds a given threshold.
"""
covar_trace_thresh = Property(
float, doc="Covariance matrix trace threshold")
def check_for_deletion(self, track, **kwargs):
"""Check if a given track should be deleted
A track is flagged for deletion if the trace of its state covariance
matrix is higher than :py:attr:`~covar_trace_thresh`.
Parameters
----------
track : :class:`stonesoup.types.Track`
A track object to be checked for deletion.
Returns
-------
: :class:`bool`
``True`` if track should be deleted, ``False`` otherwise.
"""
track_covar_trace = np.trace(track.state.covar)
if(track_covar_trace > self.covar_trace_thresh):
return True
return False
```
#### File: feeder/tests/conftest.py
```python
import datetime
import pytest
from ...buffered_generator import BufferedGenerator
from ...reader import DetectionReader
from ...types.detection import Detection
@pytest.fixture()
def detector():
class Detector(DetectionReader):
@BufferedGenerator.generator_method
def detections_gen(self):
time = datetime.datetime(2019, 4, 1, 14)
time_step = datetime.timedelta(seconds=1)
yield time, {
Detection([[50], [0]], timestamp=time,
metadata={'colour': 'red',
'score': 0}),
Detection([[20], [5]], timestamp=time,
metadata={'colour': 'green',
'score': 0.5}),
Detection([[1], [1]], timestamp=time,
metadata={'colour': 'blue',
'score': 0.1}),
}
time += time_step
yield time, {
Detection([[-5], [4]], timestamp=time,
metadata={'colour': 'red',
'score': 0.4}),
Detection([[11], [200]], timestamp=time,
metadata={'colour': 'green'}),
Detection([[0], [0]], timestamp=time,
metadata={'colour': 'green',
'score': 0.2}),
Detection([[-43], [-10]], timestamp=time,
metadata={'colour': 'blue',
'score': 0.326}),
}
time += time_step
yield time, {
Detection([[561], [10]], timestamp=time,
metadata={'colour': 'red',
'score': 0.745}),
Detection([[1], [-10]], timestamp=time - time_step/2,
metadata={'colour': 'red',
'score': 0}),
Detection([[-11], [-50]], timestamp=time,
metadata={'colour': 'blue',
'score': 2}),
}
time += time_step
yield time, {
Detection([[1], [-5]], timestamp=time,
metadata={'colour': 'red',
'score': 0.3412}),
Detection([[1], [-5]], timestamp=time,
metadata={'colour': 'blue',
'score': 0.214}),
}
time += time_step
yield time, {
Detection([[-11], [5]], timestamp=time,
metadata={'colour': 'red',
'score': 0.5}),
Detection([[13], [654]], timestamp=time,
metadata={'colour': 'blue',
'score': 0}),
Detection([[-3], [6]], timestamp=time,
metadata={}),
}
time += time_step*2
yield time, {
Detection([[0], [0]], timestamp=time,
metadata={'colour': 'red',
'score': 1}),
Detection([[0], [0]], timestamp=time,
metadata={'colour': 'blue',
'score': 0.612}),
Detection([[0], [0]], timestamp=time,
metadata={'score': 0}),
Detection([[0], [0]], timestamp=time,
metadata={}),
}
time -= time_step
yield time, {
Detection([[5], [-6]], timestamp=time,
metadata={'colour': 'red',
'score': 0.2}),
Detection([[10], [0]], timestamp=time,
metadata={'colour': 'blue'}),
}
return Detector()
```
#### File: feeder/tests/test_filter.py
```python
import datetime
import numpy as np
import pytest
from ..filter import (MetadataReducer,
MetadataValueFilter,
BoundingBoxDetectionReducer)
def test_metadata_reducer(detector):
feeder = MetadataReducer(detector, metadata_field="colour")
multi_none = False
for time, detections in feeder:
all_colours = [detection.metadata.get('colour')
for detection in detections]
if not multi_none:
multi_none = len(
[colour for colour in all_colours if colour is None]) > 1
colours = [colour for colour in all_colours if colour is not None]
assert len(colours) == len(set(colours))
assert "red" in colours
assert "blue" in colours
if time < datetime.datetime(2019, 4, 1, 14, 0, 2):
assert "green" in colours
else:
assert "green" not in colours
assert all(time == detection.timestamp for detection in detections)
assert multi_none
def test_metadata_value_filter(detector):
feeder = MetadataValueFilter(detector,
metadata_field="score",
operator=lambda x: x >= 0.1)
# Discard unmatched
nones = False
for time, detections in feeder:
all_scores = [detection.metadata.get('score')
for detection in detections]
nones = nones | (len([score for score in all_scores
if score is None]) > 0)
scores = [score for score in all_scores if score is not None]
assert len(scores) == len(set(scores))
assert 0 not in scores
if time < datetime.datetime(2019, 4, 1, 14, 0, 2):
assert all([score <= 0.5 for score in scores])
assert all(time == detection.timestamp for detection in detections)
assert not nones
# Keep unmatched
feeder.keep_unmatched = True
nones = False
for time, detections in feeder:
all_scores = [detection.metadata.get('score')
for detection in detections]
nones = nones | (len([score for score in all_scores
if score is None]) > 0)
scores = [score for score in all_scores if score is not None]
assert len(scores) == len(set(scores))
assert 0 not in scores
if time < datetime.datetime(2019, 4, 1, 14, 0, 2):
assert all([score <= 0.5 for score in scores])
assert all(time == detection.timestamp for detection in detections)
assert nones
def test_boundingbox_reducer(detector):
# Simple 2D rectangle/bounding box
limits = np.array([[-1, 1],
[-2, 2]])
mapping = [1, 0]
# Confirm errors raised on improper instantiation attempt
with pytest.raises(TypeError):
BoundingBoxDetectionReducer()
with pytest.raises(TypeError):
BoundingBoxDetectionReducer(detector)
feeder = BoundingBoxDetectionReducer(detector, limits, mapping)
# Assert correct constructor assignments
assert np.array_equal(limits, feeder.limits)
assert np.array_equal(mapping, feeder.mapping)
# Ensure only measurements within box are returned
multi_check = True
for time, detections in feeder:
for detection in detections:
num_dims = len(limits)
for i in range(num_dims):
min = limits[i, 0]
max = limits[i, 1]
value = detection.state_vector[mapping[i]]
multi_check &= not (value < min or value > max)
if time < datetime.datetime(2019, 4, 1, 14, 0, 2):
assert len(detections) == 1
elif time == datetime.datetime(2019, 4, 1, 14, 0, 7):
assert not (len(detections))
assert all(time == detection.timestamp for detection in detections)
assert multi_check
def test_boundingbox_reducer_default_mapping(detector):
# Simple 2D rectangle/bounding box
limits = np.array([[-1, 1],
[-2, 2]])
feeder = BoundingBoxDetectionReducer(detector, limits)
assert feeder.mapping == (0, 1)
```
#### File: transition/tests/test_rw.py
```python
import datetime
from pytest import approx
import scipy as sp
from scipy.stats import multivariate_normal
from ..linear import RandomWalk
def test_rwodel():
""" RandomWalk Transition Model test """
# State related variables
state_vec = sp.array([[3.0]])
old_timestamp = datetime.datetime.now()
timediff = 1 # 1sec
new_timestamp = old_timestamp + datetime.timedelta(seconds=timediff)
time_interval = new_timestamp - old_timestamp
# Model-related components
noise_diff_coeff = 0.001 # m/s^2
F = sp.array([[1]])
Q = sp.array([[timediff]]) * noise_diff_coeff
# Create and a Random Walk model object
rw = RandomWalk(noise_diff_coeff=noise_diff_coeff)
# Ensure ```rw.transfer_function(time_interval)``` returns F
assert sp.array_equal(F, rw.matrix(
timestamp=new_timestamp, time_interval=time_interval))
# Ensure ```rw.covar(time_interval)``` returns Q
assert sp.array_equal(Q, rw.covar(
timestamp=new_timestamp, time_interval=time_interval))
# Propagate a state vector through the model
# (without noise)
new_state_vec_wo_noise = rw.function(
state_vec,
timestamp=new_timestamp,
time_interval=time_interval,
noise=0)
assert sp.array_equal(new_state_vec_wo_noise, F@state_vec)
# Evaluate the likelihood of the predicted state, given the prior
# (without noise)
prob = rw.pdf(new_state_vec_wo_noise,
state_vec,
timestamp=new_timestamp,
time_interval=time_interval)
assert approx(prob) == multivariate_normal.pdf(
new_state_vec_wo_noise.T,
mean=sp.array(F@state_vec).ravel(),
cov=Q)
# Propagate a state vector throught the model
# (with internal noise)
new_state_vec_w_inoise = rw.function(
state_vec,
timestamp=new_timestamp,
time_interval=time_interval)
assert not sp.array_equal(new_state_vec_w_inoise, F@state_vec)
# Evaluate the likelihood of the predicted state, given the prior
# (with noise)
prob = rw.pdf(new_state_vec_w_inoise,
state_vec,
timestamp=new_timestamp,
time_interval=time_interval)
assert approx(prob) == multivariate_normal.pdf(
new_state_vec_w_inoise.T,
mean=sp.array(F@state_vec).ravel(),
cov=Q)
# Propagate a state vector throught the model
# (with external noise)
noise = rw.rvs(timestamp=new_timestamp, time_interval=time_interval)
new_state_vec_w_enoise = rw.function(
state_vec,
timestamp=new_timestamp,
time_interval=time_interval,
noise=noise)
assert sp.array_equal(new_state_vec_w_enoise, F@state_vec+noise)
# Evaluate the likelihood of the predicted state, given the prior
# (with noise)
prob = rw.pdf(new_state_vec_w_enoise, state_vec,
timestamp=new_timestamp, time_interval=time_interval)
assert approx(prob) == multivariate_normal.pdf(
new_state_vec_w_enoise.T,
mean=sp.array(F@state_vec).ravel(),
cov=Q)
```
#### File: stonesoup/predictor/particle.py
```python
from functools import lru_cache
from .base import Predictor
from ..types.particle import Particle
from ..types.prediction import ParticleStatePrediction
class ParticlePredictor(Predictor):
"""ParticlePredictor class
An implementation of a Particle Filter predictor.
"""
@lru_cache()
def predict(self, prior, control_input=None, timestamp=None, **kwargs):
"""Particle Filter prediction step
Parameters
----------
prior : :class:`~.ParticleState`
A prior state object
control_input : :class:`~.State`, optional
The control input. It will only have an effect if
:attr:`control_model` is not `None` (the default is `None`)
timestamp: :class:`datetime.datetime`, optional
A timestamp signifying when the prediction is performed
(the default is `None`)
Returns
-------
: :class:`~.ParticleStatePrediction`
The predicted state
"""
# Compute time_interval
try:
time_interval = timestamp - prior.timestamp
except TypeError:
# TypeError: (timestamp or prior.timestamp) is None
time_interval = None
new_particles = []
for particle in prior.particles:
new_state_vector = self.transition_model.function(
particle.state_vector,
time_interval=time_interval,
**kwargs)
new_particles.append(
Particle(new_state_vector,
weight=particle.weight,
parent=particle.parent))
return ParticleStatePrediction(new_particles, timestamp=timestamp)
```
#### File: simulator/tests/test_groundtruth.py
```python
import datetime
import pytest
import numpy as np
from ...types.state import GaussianState, State
from ..simple import (
SingleTargetGroundTruthSimulator, MultiTargetGroundTruthSimulator,
SwitchOneTargetGroundTruthSimulator, SwitchMultiTargetGroundTruthSimulator)
@pytest.fixture(params=[datetime.timedelta(seconds=1),
datetime.timedelta(seconds=10),
datetime.timedelta(minutes=1)])
def timestep(request):
return request.param
def test_single_target_ground_truth_simulator(transition_model1, timestep):
initial_state = State(np.array([[1]]), timestamp=datetime.datetime.now())
simulator = SingleTargetGroundTruthSimulator(transition_model1,
initial_state, timestep)
for step, (time, groundtruth_paths) in enumerate(simulator):
# Check single ground truth track
assert len(groundtruth_paths) == 1
# Check length of path is equal to number of steps
gt_path = groundtruth_paths.pop()
assert len(gt_path) == step + 1
# Check time is now + steps
timedelta = simulator.timestep * step
assert gt_path[-1].timestamp == initial_state.timestamp + timedelta
# Check ground truth object has moved
assert gt_path[-1].state_vector == initial_state.state_vector +\
timedelta.total_seconds()
# Check that the number of steps is equal to the simulation steps
assert step + 1 == simulator.number_steps
def test_multitarget_ground_truth_simulator(transition_model1, timestep):
initial_state = GaussianState(np.array([[1]]), np.array([[0]]),
timestamp=datetime.datetime.now())
simulator = MultiTargetGroundTruthSimulator(transition_model1,
initial_state, timestep)
total_paths = set()
for step, (time, groundtruth_paths) in enumerate(simulator):
total_paths |= groundtruth_paths
# Check time is now + steps
assert time == initial_state.timestamp + simulator.timestep * step
# Check number of steps is equal to simulation steps
assert step + 1 == simulator.number_steps
# Check that there are multiple ground truth paths
assert len(total_paths) > 1
# Check that ground truth paths die
assert len(groundtruth_paths) < len(total_paths)
# Check that ground truth paths vary in length
assert len({len(path) for path in total_paths}) > 1
def test_one_target_ground_truth_simulator_switch(transition_model1,
transition_model2,
timestep):
initial_state = State(np.array([[1]]), timestamp=datetime.datetime.now())
model_probs = [[0.5, 0.5], [0.5, 0.5]]
simulator = SwitchOneTargetGroundTruthSimulator(
transition_models=[transition_model1, transition_model2],
model_probs=model_probs,
initial_state=initial_state,
timestep=timestep)
for step, (time, groundtruth_paths) in enumerate(simulator):
# Check single ground truth track
assert len(groundtruth_paths) == 1
# Check length of path is equal to number of steps
gt_path = groundtruth_paths.pop()
assert len(gt_path) == step + 1
# Check time is now + steps
timedelta = simulator.timestep * step
assert gt_path[-1].timestamp == initial_state.timestamp + timedelta
record = []
for state in gt_path:
record.append(state.metadata.get("index")+1)
total = sum(record[1:])
# Check ground truth object has moved
assert gt_path[-1].state_vector == initial_state.state_vector +\
timestep.total_seconds()*total
# Check that the number of steps is equal to the simulation steps
assert step + 1 == simulator.number_steps
def test_multitarget_ground_truth_simulator_witch(transition_model1,
transition_model2,
timestep):
initial_state = GaussianState(np.array([[1]]), np.array([[0]]),
timestamp=datetime.datetime.now())
model_probs = [[0.5, 0.5], [0.5, 0.5]]
simulator = SwitchMultiTargetGroundTruthSimulator(
transition_models=[transition_model1, transition_model2],
model_probs=model_probs,
initial_state=initial_state,
timestep=timestep)
total_paths = set()
for step, (time, groundtruth_paths) in enumerate(simulator):
total_paths |= groundtruth_paths
# Check time is now + steps
assert time == initial_state.timestamp + simulator.timestep * step
# Check number of steps is equal to simulation steps
assert step + 1 == simulator.number_steps
# Check that there are multiple ground truth paths
assert len(total_paths) > 1
# Check that ground truth paths die
assert len(groundtruth_paths) < len(total_paths)
# Check that ground truth paths vary in length
assert len({len(path) for path in total_paths}) > 1
for path in total_paths:
indices = []
for state in path:
indices.append(state.metadata.get("index"))
if len(path) > 9:
assert indices.count(1) < len(path)
```
#### File: stonesoup/tests/conftest.py
```python
import pytest
from ..base import Base, Property
@pytest.fixture('session')
def base():
class _TestBase(Base):
property_a = Property(int)
property_b = Property(str)
property_c = Property(int, default=123)
return _TestBase
```
#### File: stonesoup/tests/test_functions.py
```python
import numpy as np
from numpy import deg2rad
from pytest import approx
from ..functions import (
jacobian, gm_reduce_single, mod_bearing, mod_elevation)
def test_jacobian():
""" jacobian function test """
# State related variables
state_mean = np.array([[3.0], [1.0]])
def f(x):
return np.array([[1, 1], [0, 1]])@x
jac = jacobian(f, state_mean)
jac = jac # Stop flake8 unused warning
def test_jacobian2():
""" jacobian function test """
# Sample functions to compute Jacobian on
def fun(x):
""" function for testing scalars i.e. scalar input, scalar output"""
return 2*x**2
def fun1d(vec):
""" test function with vector input, scalar output"""
out = 2*vec[0]+3*vec[1]
return out
def fun2d(vec):
""" test function with 2d input and 2d output"""
out = np.empty((2, 1))
out[0] = 2*vec[0]**2 + 3*vec[1]**2
out[1] = 2*vec[0]+3*vec[1]
return out
x = 3
jac = jacobian(fun, x)
assert jac == 4*x
x = np.array([[1], [2]])
# Tolerance value to use to test if arrays are equal
tol = 1.0e-5
jac = jacobian(fun1d, x)
T = np.array([2.0, 3.0])
FOM = np.where(np.abs(jac-T) > tol)
# Check # of array elements bigger than tol
assert len(FOM[0]) == 0
jac = jacobian(fun2d, x)
T = np.array([[4.0*x[0], 6*x[1]],
[2, 3]])
FOM = np.where(np.abs(jac - T) > tol)
# Check # of array elements bigger than tol
assert len(FOM[0]) == 0
return
def test_gm_reduce_single():
means = np.array([[1, 2], [3, 4], [5, 6]])
covars = np.array([[[1, 1], [1, 0.7]],
[[1.2, 1.4], [1.3, 2]],
[[2, 1.4], [1.2, 1.2]]])
weights = np.array([1, 2, 5])
mean, covar = gm_reduce_single(means, covars, weights)
assert np.allclose(mean, np.array([[4], [5]]))
assert np.allclose(covar, np.array([[3.675, 3.35],
[3.2, 3.3375]]))
def test_bearing():
bearing_in = [10., 170., 190., 260., 280., 350., 705]
rad_in = deg2rad(bearing_in)
bearing_out = [10., 170., -170., -100., -80., -10., -15.]
rad_out = deg2rad(bearing_out)
for ind, val in enumerate(rad_in):
assert rad_out[ind] == approx(mod_bearing(val))
def test_elevation():
elev_in = [10., 80., 110., 170., 190., 260., 280]
rad_in = deg2rad(elev_in)
elev_out = [10., 80., 70., 10., -10., -80., -80.]
rad_out = deg2rad(elev_out)
for ind, val in enumerate(rad_in):
assert rad_out[ind] == approx(mod_elevation(val))
```
#### File: types/tests/test_association.py
```python
import datetime
import numpy as np
import pytest
from ..association import Association, AssociationPair, AssociationSet, \
SingleTimeAssociation, TimeRangeAssociation
from ..detection import Detection
from ..time import TimeRange
def test_association():
with pytest.raises(TypeError):
Association()
objects = {Detection(np.array([[1], [2]])),
Detection(np.array([[3], [4]])),
Detection(np.array([[5], [6]]))}
assoc = Association(objects)
assert assoc.objects == objects
def test_associationpair():
with pytest.raises(TypeError):
AssociationPair()
objects = [Detection(np.array([[1], [2]])),
Detection(np.array([[3], [4]])),
Detection(np.array([[5], [6]]))]
# Over 3 objects
with pytest.raises(ValueError):
AssociationPair(set(objects))
# Under 2 objects
with pytest.raises(ValueError):
AssociationPair({objects[0]})
# 2 objects
assoc = AssociationPair(set(objects[:2]))
np.array_equal(assoc.objects, set(objects[:2]))
def test_singletimeassociation():
with pytest.raises(TypeError):
SingleTimeAssociation()
objects = {Detection(np.array([[1], [2]])),
Detection(np.array([[3], [4]])),
Detection(np.array([[5], [6]]))}
timestamp = datetime.datetime(2018, 3, 1, 5, 3, 35)
assoc = SingleTimeAssociation(objects=objects, timestamp=timestamp)
assert assoc.objects == objects
assert assoc.timestamp == timestamp
def test_timerangeassociation():
with pytest.raises(TypeError):
TimeRangeAssociation()
objects = {Detection(np.array([[1], [2]])),
Detection(np.array([[3], [4]])),
Detection(np.array([[5], [6]]))}
timestamp1 = datetime.datetime(2018, 3, 1, 5, 3, 35)
timestamp2 = datetime.datetime(2018, 3, 1, 5, 8, 35)
timerange = TimeRange(start_timestamp=timestamp1, end_timestamp=timestamp2)
assoc = TimeRangeAssociation(objects=objects, time_range=timerange)
assert assoc.objects == objects
assert assoc.time_range == timerange
def test_associationset():
# Set up some dummy variables
timestamp1 = datetime.datetime(2018, 3, 1, 5, 3, 35)
timestamp2 = datetime.datetime(2018, 3, 1, 5, 8, 35)
timerange = TimeRange(start_timestamp=timestamp1, end_timestamp=timestamp2)
objects_list = [Detection(np.array([[1], [2]])),
Detection(np.array([[3], [4]])),
Detection(np.array([[5], [6]]))]
assoc1 = SingleTimeAssociation(objects=set(objects_list),
timestamp=timestamp1)
assoc2 = TimeRangeAssociation(objects=set(objects_list[1:]),
time_range=timerange)
assoc_set = AssociationSet({assoc1, assoc2})
assert assoc_set.associations == {assoc1, assoc2}
# Test associations including objects
# Object only present in object 1
assert assoc_set.associations_including_objects(objects_list[0]) \
== {assoc1}
# Object present in both
assert assoc_set.associations_including_objects(objects_list[1]) \
== {assoc1, assoc2}
# Object present in neither
assert not assoc_set.associations_including_objects(
Detection(np.array([[0], [0]])))
# Test associations including timestamp
# Timestamp present in one object
assert assoc_set.associations_at_timestamp(timestamp2) \
== {assoc2}
# Timestamp present in both
assert assoc_set.associations_at_timestamp(timestamp1) \
== {assoc1, assoc2}
# Timestamp not present in either
timestamp3 = datetime.datetime(2018, 3, 1, 6, 8, 35)
assert not assoc_set.associations_at_timestamp(timestamp3)
```
#### File: stonesoup/updater/base.py
```python
from abc import abstractmethod
from ..base import Base, Property
from ..models.measurement import MeasurementModel
class Updater(Base):
r"""Updater base class
An updater is used to update the predicted state, utilising a measurement
and a :class:`~.MeasurementModel`. The general observation model is
.. math::
\mathbf{z} = h(\mathbf{x}, \mathbf{\sigma})
where :math:`\mathbf{x}` is the state, :math:`\mathbf{\sigma}`, the
measurement noise and :math:`\mathbf{z}` the resulting measurement.
"""
measurement_model = Property(MeasurementModel, doc="measurement model")
@abstractmethod
def predict_measurement(
self, state_prediction, measurement_model=None, **kwargs):
"""Get measurement prediction from state prediction
Parameters
----------
state_prediction : :class:`~.StatePrediction`
The state prediction
measurement_model: :class:`~.MeasurementModel`, optional
The measurement model used to generate the measurement prediction.
Should be used in cases where the measurement model is dependent
on the received measurement. The default is `None`, in which case
the updater will use the measurement model specified on
initialisation
Returns
-------
: :class:`~.MeasurementPrediction`
The predicted measurement
"""
raise NotImplementedError
@abstractmethod
def update(self, hypothesis, **kwargs):
"""Update state using prediction and measurement.
Parameters
----------
hypothesis : :class:`~.Hypothesis`
Hypothesis with predicted state and associated detection used for
updating.
Returns
-------
: :class:`~.State`
The state posterior
"""
raise NotImplementedError
```
#### File: stonesoup/updater/particle.py
```python
from functools import lru_cache
from .base import Updater
from ..base import Property
from ..resampler import Resampler
from ..types.numeric import Probability
from ..types.particle import Particle
from ..types.prediction import ParticleMeasurementPrediction
from ..types.update import ParticleStateUpdate
class ParticleUpdater(Updater):
"""Simple Particle Updater
Perform measurement update step in the standard Kalman Filter.
"""
resampler = Property(Resampler,
doc='Resampler to prevent particle degeneracy')
def update(self, hypothesis, **kwargs):
"""Particle Filter update step
Parameters
----------
hypothesis : :class:`~.Hypothesis`
Hypothesis with predicted state and associated detection used for
updating.
Returns
-------
: :class:`~.ParticleState`
The state posterior
"""
if hypothesis.measurement.measurement_model is None:
measurement_model = self.measurement_model
else:
measurement_model = hypothesis.measurement.measurement_model
for particle in hypothesis.prediction.particles:
particle.weight *= measurement_model.pdf(
hypothesis.measurement.state_vector, particle.state_vector,
**kwargs)
# Normalise the weights
sum_w = Probability.sum(
i.weight for i in hypothesis.prediction.particles)
for particle in hypothesis.prediction.particles:
particle.weight /= sum_w
# Resample
new_particles = self.resampler.resample(
hypothesis.prediction.particles)
return ParticleStateUpdate(new_particles,
hypothesis,
timestamp=hypothesis.measurement.timestamp)
@lru_cache()
def predict_measurement(self, state_prediction, measurement_model=None,
**kwargs):
if measurement_model is None:
measurement_model = self.measurement_model
new_particles = []
for particle in state_prediction.particles:
new_state_vector = measurement_model.function(
particle.state_vector, noise=0, **kwargs)
new_particles.append(
Particle(new_state_vector,
weight=particle.weight,
parent=particle.parent))
return ParticleMeasurementPrediction(
new_particles, timestamp=state_prediction.timestamp)
``` |
{
"source": "jponf/leetcode",
"score": 4
} |
#### File: leetcode/problem3/p3solution.py
```python
def length_of_longest_substring(text):
chars = {} # type: MutableMapping[str, int]
best, first = 0, 0
for i, c in enumerate(text):
if c in chars:
best = max(best, i - first)
first = max(first, chars.pop(c) + 1)
chars[c] = i
return max(best, len(text) - first)
if __name__ == '__main__':
text = "abcabcbb"
print("Length of longest substring in '%s'" % text)
print(length_of_longest_substring(text))
```
#### File: leetcode/problem4/p4solution.py
```python
def find_sorted_arrays_median(nums1, nums2):
"""
:type nums1: List[int]
:type nums2: List[int]
:rtype: float
"""
if len(nums1) > len(nums2):
return _find_sorted_ararys_median(nums2, 0, len(nums2),
nums1, 0, len(nums1))
return _find_sorted_ararys_median(nums1, 0, len(nums1),
nums2, 0, len(nums2))
def find_sorted_array_median(nums):
"""
:type nums: List[int]
:rtype: float
"""
mid = len(nums) // 2
return nums[mid] if len(nums) % 2 else (nums[mid - 1] + nums[mid]) / 2.0
def _find_sorted_ararys_median(nums1, start1, len1, nums2, start2, len2):
mid2 = start2 + len2 // 2
if len1 == 0:
return nums2[mid2] if len2 % 2 else (nums2[mid2-1] + nums2[mid2]) / 2.0
if len1 == 1:
if len2 == 1:
return (nums1[start1] + nums2[start2]) / 2.0
elif len2 % 2: # is odd
return median_of_4(nums1[start1], nums2[mid2-1],
nums2[mid2], nums2[mid2+1])
else: # is even
return median_of_3(nums1[start1], nums2[mid2-1], nums2[mid2])
elif len1 == 2:
if len2 == 2:
return median_of_4(nums1[start1], nums1[start1+1],
nums2[start2], nums2[start2+1])
elif len2 % 2: # is odd
return median_of_3(max(nums1[start1], nums2[mid2-1]),
nums2[mid2],
min(nums1[start1+1], nums2[mid2+1]))
else: # is even
return median_of_4(max(nums1[start1], nums2[mid2-2]),
nums2[mid2-1], nums2[mid2],
min(nums1[start1+1], nums2[mid2+1]))
idx1, idx2 = start1 + (len1 - 1) // 2, start2 + (len2 - 1) // 2
if nums1[idx1] > nums2[idx2]:
return _find_sorted_ararys_median(nums1, start1, len1 // 2 + 1,
nums2, start2 + idx1 - start1,
len2 + start1 - idx1)
else:
return _find_sorted_ararys_median(nums1, idx1, len1 // 2 + 1,
nums2, start2, len2 + start1 - idx1)
def median_of_2(num1, num2):
"""Utility functions to compute the median of 3 numbers"""
return (num1 + num2) / 2.0
def median_of_3(num1, num2, num3):
"""Utility functions to compute the median of 3 numbers"""
return num1 + num2 + num3 - min(num1, num2, num3) - max(num1, num2, num3)
def median_of_4(num1, num2, num3, num4):
"""Utility functions to compute the median of 4 numbers"""
num_sum = num1 + num2 + num3 + num4
num_sum -= min(num1, num2, num3, num4)
num_sum -= max(num1, num2, num3, num4)
return num_sum / 2.0
if __name__ == '__main__':
NUMS1 = [1, 3]
NUMS2 = [2]
print("Median of", NUMS1, "and", NUMS2)
print(" > Expected:", find_sorted_array_median(sorted(NUMS1 + NUMS2)))
print(" > Result:", find_sorted_arrays_median(NUMS1, NUMS2))
NUMS3 = [1, 2]
NUMS4 = [3, 4]
print("Median of", NUMS3, "and", NUMS4)
print(" > Expected:", find_sorted_array_median(sorted(NUMS3 + NUMS4)))
print(" > Result:", find_sorted_arrays_median(NUMS3, NUMS4))
NUMS5 = [1, 2]
NUMS6 = [3, 4, 5]
print("Median of", NUMS5, "and", NUMS6)
print(" > Expected:", find_sorted_array_median(sorted(NUMS5 + NUMS6)))
print(" > Result:", find_sorted_arrays_median(NUMS5, NUMS6))
NUMS7 = [1, 2, 5, 8, 10]
NUMS8 = [3, 4, 6, 9, 12]
print("Median of", NUMS7, "and", NUMS8)
print(" > Expected:", find_sorted_array_median(sorted(NUMS7 + NUMS8)))
print(" > Result:", find_sorted_arrays_median(NUMS7, NUMS8))
NUMS9 = [1, 2, 5, 8, 10]
NUMS10 = [2, 3, 4, 6, 9, 12]
print("Median of", NUMS9, "and", NUMS10)
print(" > Expected:", find_sorted_array_median(sorted(NUMS9 + NUMS10)))
print(" > Result:", find_sorted_arrays_median(NUMS9, NUMS10))
NUMS11 = [1]
NUMS12 = [4, 7, 10, 15]
print("Median of", NUMS11, "and", NUMS12)
print(" > Expected:", find_sorted_array_median(sorted(NUMS11 + NUMS12)))
print(" > Result:", find_sorted_arrays_median(NUMS11, NUMS12))
NUMS13 = [1]
NUMS14 = [4, 7, 10]
print("Median of", NUMS13, "and", NUMS14)
print(" > Expected:", find_sorted_array_median(sorted(NUMS13 + NUMS14)))
print(" > Result:", find_sorted_arrays_median(NUMS13, NUMS14))
NUMS15 = [1, 5, 6]
NUMS16 = [2, 3, 4, 7, 8]
print("Median of", NUMS15, "and", NUMS16)
print(" > Expected:", find_sorted_array_median(sorted(NUMS15 + NUMS16)))
print(" > Result:", find_sorted_arrays_median(NUMS15, NUMS16))
NUMS17 = []
NUMS18 = [2, 3]
print("Median of", NUMS17, "and", NUMS18)
print(" > Expected:", find_sorted_array_median(sorted(NUMS17 + NUMS18)))
print(" > Result:", find_sorted_arrays_median(NUMS17, NUMS18))
``` |
{
"source": "jponf/pyrl",
"score": 2
} |
#### File: pyrl/agents/core.py
```python
import abc
import six
import tqdm.auto as tqdm
# ...
import pyrl.util.ugym
from pyrl.util.summary import BaseSummary, DummySummary
###############################################################################
@six.add_metaclass(abc.ABCMeta)
class BaseAgent:
"""Base Agent interface."""
def __init__(self, *args, **kwargs):
super(BaseAgent, self).__init__(*args, **kwargs)
self._summary: BaseSummary = DummySummary()
self._num_episodes: int = 0
self._train_steps: int = 0
self._train_mode: bool = True
@property
def num_train_steps(self) -> int:
"""Number of times the agent has been trained."""
return self._train_steps
def init_summary_writter(self, log_path):
"""Initializes a tensorboard summary writter to track the agent
evolution while trainig."""
if not isinstance(self._summary, pyrl.util.summary.DummySummary):
raise ValueError("summary writter can only be initialized once")
self._summary = pyrl.util.summary.Summary(log_dir=log_path)
def set_eval_mode(self):
"""Sets the agent in evaluation mode."""
self.set_train_mode(mode=False)
@abc.abstractmethod
def set_train_mode(self, mode: bool = True):
"""Sets the agent training mode."""
self._train_mode = mode
def begin_episode(self):
"""Prepares the agent to run a new training episode.
Some agents have to prepare to register a new training
episode, for example by emptying buffers, reseting noise, etc.
"""
def end_episode(self):
"""Indicates the agent that the episode that started in a previous
call to `begin_episode` has finished.
When `end_episode` is called the agent can use all the experience
gathered on calls to `update` to compute metrics and move data
from temporary to persisten buffers.
"""
self._num_episodes += 1
@abc.abstractmethod
def update(self, state, action, reward, next_state, terminal):
"""Registers the transition and updates any agent internal information
useful for training.
:param state: State from which the agent decided to take `action`.
:param action: Action taken to move from `state` to `next_state`.
:param reward: Reward received for taking `action` from `state`.
:param next_state: State reached after taking `action`.
:param terminal: Whether or not `next_state` is a terminal state.
"""
@abc.abstractmethod
def compute_action(self, state):
"""Computes the next action to take given the current `state` of
the environment.
This function may behave differently depending on the agent
mode, evaluation or training, for example by adding noise to
explore unknown states.
"""
raise NotImplementedError
def train(self, steps, progress: bool = False):
"""Trains an agent for the specified number of `steps`.
:param steps: The number of steps to train the agent for.
:param progress: If set the training progress is printed on the
standard output stream (using tqdm).
"""
if not self._train_mode:
raise ValueError("agent must be in train mode")
if steps <= 0:
raise ValueError("steps must be > 0")
if progress:
t_steps = tqdm.trange(steps, desc="Train step", dynamic_ncols=True)
else:
t_steps = six.moves.range(steps)
for _ in t_steps:
self._train()
self._train_steps += 1
@abc.abstractmethod
def _train(self):
"""Train the agent one step.
This method is called internally by `train()`.
"""
# Agent State
########################
@abc.abstractmethod
def state_dict(self):
"""Returns a dictionary containing the whole state of the agent.
The content depends on the type of agent and may include neural
nets, running averages, etc.
:return: A dictionary containing the whole state of the agent.
:rtype: dict
"""
@abc.abstractmethod
def load_state_dict(self, state):
"""Copies the state into this agent. Any additional key in the
dictionary is ignored.
Unless you know what you are doing you should only pass dictionaries
returned by `state_dict()`.
:param state: A dict containing a valid agent state.
:raise KeyError: If a required key is not in the dictionary.
"""
@abc.abstractmethod
def aggregate_state_dicts(self, states):
"""Aggregates the content of multiple states into this agent.
This method is mainly intended for distributed training.
:param states: A list of states (dictionaries) valid for this agent.
"""
@abc.abstractmethod
def save(self, path, replay_buffer=True):
"""Saves the agent in the given `path`.
Different agents may save their state using different formats
but the preferred way is using `path` as a root directory that
will contain all the agent components.
:param replay_buffer: Whether the replay buffer should be saved
or not.
"""
raise NotImplementedError
class Agent(BaseAgent):
"""Generic Base Agent Interface."""
def __init__(self, observation_space, action_space, *args, **kwargs):
super(Agent, self).__init__(*args, **kwargs)
self.observation_space = observation_space
self.action_space = action_space
@classmethod
def load(cls, path, *args, replay_buffer=True, **kwargs):
"""Loads the agent from the given path.
:param path: Path that contains the agent information.
:param replay_buffer: If set loads the replay buffer.
"""
raise NotImplementedError
class HerAgent(BaseAgent):
"""Generic Hindsight Experience Replay Agent interface."""
def __init__(self, env, *args, **kwargs):
super(HerAgent, self).__init__(*args, **kwargs)
if not pyrl.util.ugym.is_her_env(env):
raise ValueError("{} is not a valid HER environment".format(env))
self.env = env
@property
def max_episode_steps(self):
"""The `max_episode_steps` attribute from the environment's spec."""
return self.env.spec.max_episode_steps
@abc.abstractmethod
def load_demonstrations(self, demo_path):
"""Loads a .npz file with 3 components 'acs', 'obs' and 'info'.
- acs: are the actions taken by the agent as given to step(...).
- obs: are the states returned by reset() and step(...).
- info: are the info objects returne by step(...).
Note: There should always be one more 'obs' than 'acs' and 'info'.
:param demo_path: Path to the .npz file with the data to build the
demonstration replay buffer.
"""
@classmethod
def load(cls, path, env, *args, replay_buffer=True, **kwargs):
"""Loads the agent from the given path.
:param path: Path that contains the agent information.
:param env: Environment that the agent acts on.
:param replay_buffer: If set loads the replay buffer.
"""
raise NotImplementedError
```
#### File: pyrl/agents/sac.py
```python
import collections
import errno
import os
import pickle
# Scipy
# import numpy as np
# Torch
import torch
import torch.optim as optim
import torch.nn.functional as F
# ...
import pyrl.util.logging
import pyrl.util.umath as umath
from pyrl.models.models_utils import soft_update
from .agents_utils import create_normalizer, create_actor, create_critic, dicts_mean
from .core import Agent
from .replay_buffer import FlatReplayBuffer
###############################################################################
_DEVICE = "cpu"
_LOG = pyrl.util.logging.get_logger()
###############################################################################
class SAC(Agent):
"""Soft Actor Critic.
Introduced in the paper: Off-Policy Maximum Entropy Deep Reinforcement
Learning with a Stochastic Actor.
"""
def __init__(
self,
observation_space,
action_space,
alpha=0.2,
gamma=0.95,
tau=0.005,
batch_size=128,
reward_scale=1.0,
replay_buffer_size=1000000,
random_steps=1000,
actor_cls=None,
actor_kwargs=None,
actor_lr=0.001,
critic_cls=None,
critic_kwargs=None,
critic_lr=0.001,
tune_alpha=True,
observation_normalizer="none",
observation_clip=float("inf"),
):
"""
:param observation_space: Structure of the observations returned by
the enviornment.
:type observation_space: gym.Box
:param action_space: Structure of the actions that can be taken in
the environment.
:type action_space: gym.Box
:param alpha: Entropy coefficient, if None this value is auto-tuned.
:param gamma: Bellman's discount rate.
:type gamma: float
:param tau: Used to perform "soft" updates (polyak averaging) of the
weights from the actor/critic to their "target" counterparts.
:type tau: float
:param batch_size: Size of the sample used to train the actor and
critic at each timestep.
:type batch_size: int
:param replay_buffer_size: Number of transitions to store in the replay
buffer.
:type replay_buffer_size: int
:param random_steps: Number of steps taken completely at random while
training before using the actor action + noise.
:type random_steps: int
:param actor_cls: Actor network class.
:type actor_cls: type
:param actor_kwargs: Arguments to initialize the actor network.
:type actor_kwargs: dict
:param actor_lr: Learning rate for the actor network.
:type actor_lr: float
:param critic_cls: Critic network class.
:type critic_cls: type
:param actor_kwargs: Arguments to initialize the critic network.
:type actor_kwargs: dict
:param critic_lr: Learning rate for the critic network.
:type critic_lr: float
:param observation_normalizer: Normalize the environment observations
according to a normalizer. observation_normalizer can either be
"none" or "standard".
:type observation_normalizer: str
:param observation_clip: Range of the observations after being
normalized. This parameter will only take effect when normalizer
is not set to "none".
:type observation_clip: float
"""
super(SAC, self).__init__(observation_space, action_space)
self.gamma = gamma
self.tau = tau
self.batch_size = batch_size
self.reward_scale = reward_scale
self.replay_buffer = FlatReplayBuffer(
state_shape=self.observation_space.shape,
action_shape=self.action_space.shape,
max_size=replay_buffer_size,
)
self.random_steps = random_steps
# Build model (AC architecture)
(
self.actor,
(self.critic_1, self.target_critic_1),
(self.critic_2, self.target_critic_2),
) = build_sac_ac(
self.observation_space,
self.action_space,
actor_cls,
actor_kwargs,
critic_cls,
critic_kwargs,
)
self._actor_kwargs = actor_kwargs
self._actor_lr = actor_lr
self._critic_kwargs = critic_kwargs
self._critic_lr = critic_lr
self.actor_optimizer = optim.Adam(self.actor.parameters(), lr=actor_lr)
self.critic_1_optimizer = optim.Adam(self.critic_1.parameters(), lr=critic_lr)
self.critic_2_optimizer = optim.Adam(self.critic_2.parameters(), lr=critic_lr)
# Autotune alpha
self.target_entropy = -torch.prod(torch.Tensor(action_space.shape)).item()
self._log_alpha = torch.as_tensor(alpha, dtype=torch.float32).log()
if tune_alpha:
self._log_alpha.requires_grad_()
self._alpha_optim = optim.Adam([self._log_alpha], lr=critic_lr)
else:
self._alpha_optim = None
# Normalizer
self._obs_normalizer_arg = observation_normalizer
self.obs_normalizer = create_normalizer(
observation_normalizer,
self.observation_space.shape,
clip_range=observation_clip,
)
# Other attributes
self._total_steps = 0
@property
def alpha(self):
"""Relative importance of the entropy term against the reward."""
with torch.no_grad():
return self._log_alpha.exp()
# BaseAgent methods
##########################
def set_train_mode(self, mode=True):
"""Sets the agent training mode."""
super(SAC, self).set_train_mode(mode)
self.actor.train(mode=mode)
self.critic_1.train(mode=mode)
self.critic_2.train(mode=mode)
def update(self, state, action, reward, next_state, terminal):
self._total_steps += 1
action = self._to_actor_space(action) # re-scale action
self.obs_normalizer.update(state)
self.replay_buffer.add(
state=state,
action=action,
next_state=next_state,
reward=reward,
terminal=terminal,
)
@torch.no_grad()
def compute_action(self, state):
# Random exploration
if self._train_mode and self._total_steps < self.random_steps:
return self.action_space.sample()
# Pre-process
state = torch.from_numpy(state).float()
state = self.obs_normalizer.transform(state).unsqueeze_(0).to(_DEVICE)
# Compute action
action, _ = self.actor.sample(state)
action = action.squeeze_(0).cpu().numpy()
return self._to_action_space(action)
def train(self, steps, progress=False):
if len(self.replay_buffer) >= self.batch_size:
super(SAC, self).train(steps, progress)
def _train(self):
(
states,
actions,
next_states,
rewards,
terminals,
) = self.replay_buffer.sample_batch_torch(self.batch_size, device=_DEVICE)
next_states = self.obs_normalizer.transform(next_states)
states = self.obs_normalizer.transform(states)
self._train_critic(states, actions, next_states, rewards, terminals)
self._train_policy(states)
self._train_alpha(states)
self._update_target_networks()
def _train_critic(self, states, actions, next_states, rewards, terminals):
with torch.no_grad():
next_action, next_log_pi = self.actor.sample(next_states)
next_q1 = self.target_critic_1(next_states, next_action)
next_q2 = self.target_critic_2(next_states, next_action)
next_q = torch.min(next_q1, next_q2) - self.alpha * next_log_pi
next_q *= (1 - terminals.int()) * self.gamma
next_q += self.reward_scale * rewards
# Optimize critics
curr_q1 = self.critic_1(states, actions)
loss_q1 = F.smooth_l1_loss(curr_q1, next_q)
self.critic_1_optimizer.zero_grad()
loss_q1.backward()
self.critic_1_optimizer.step()
curr_q2 = self.critic_2(states, actions)
loss_q2 = F.smooth_l1_loss(curr_q2, next_q)
self.critic_2_optimizer.zero_grad()
loss_q2.backward()
self.critic_2_optimizer.step()
# with torch.no_grad():
# self._summary.add_scalars("Q", {"Mean_Q1": curr_q1.mean(),
# "Mean_Q2": curr_q2.mean(),
# "Mean_Target": next_q.mean()},
# self._train_steps)
# self._summary.add_scalar("Loss/Q1", loss_q1, self._train_steps)
# self._summary.add_scalar("Loss/Q2", loss_q2, self._train_steps)
def _train_policy(self, states):
actor_out, log_pi = self.actor.sample(states)
min_q = torch.min(
self.critic_1(states, actor_out), self.critic_2(states, actor_out)
)
# Jπ = 𝔼st∼D,εt∼N[α * log π(f(εt;st)|st) − Q(st,f(εt;st))]
loss_a = (self.alpha * log_pi - min_q).mean()
self.actor_optimizer.zero_grad()
loss_a.backward(retain_graph=True)
self.actor_optimizer.step()
with torch.no_grad():
self._summary.add_scalar("Loss/Policy", loss_a, self._train_steps)
self._summary.add_scalar("Stats/LogProb", log_pi.mean(), self._train_steps)
self._summary.add_scalar("Stats/Alpha", self.alpha, self._train_steps)
means_logs = zip(actor_out.mean(dim=0), log_pi)
for i, (mean, log) in enumerate(means_logs):
self._summary.add_scalar(f"Action/Mean_{i}", mean, self._train_steps)
self._summary.add_scalar(
f"Action/Prob_{i}", log / len(states), self._train_steps
)
def _train_alpha(self, states):
if self._alpha_optim is not None:
_, log_pi = self.actor.sample(states)
alpha_loss = (
self._log_alpha * (-log_pi - self.target_entropy).detach()
).mean()
self._alpha_optim.zero_grad()
alpha_loss.backward()
self._alpha_optim.step()
self._summary.add_scalar(
"Loss/Alpha", alpha_loss.detach(), self._train_steps
)
def _update_target_networks(self):
soft_update(self.critic_1, self.target_critic_1, self.tau)
soft_update(self.critic_2, self.target_critic_2, self.tau)
# Agent State
########################
def state_dict(self):
state = {
"critic1": self.critic_1.state_dict(),
"critic2": self.critic_2.state_dict(),
"actor": self.actor.state_dict(),
"log_alpha": self._log_alpha,
"obs_normalizer": self.obs_normalizer.state_dict(),
"train_steps": self._train_steps,
"total_steps": self._total_steps,
}
return state
def load_state_dict(self, state):
self.critic_1.load_state_dict(state["critic1"])
self.target_critic_1.load_state_dict(state["critic1"])
self.critic_2.load_state_dict(state["critic2"])
self.target_critic_2.load_state_dict(state["critic2"])
self.actor.load_state_dict(state["actor"])
with torch.no_grad():
self._log_alpha.copy_(state["log_alpha"])
self.obs_normalizer.load_state_dict(state["obs_normalizer"])
self._train_steps = state["train_steps"]
self._total_steps = state["total_steps"]
def aggregate_state_dicts(self, states):
critic_1_state = dicts_mean([x["critic1"] for x in states])
self.critic_1.load_state_dict(critic_1_state)
self.target_critic_1.load_state_dict(critic_1_state)
critic_2_state = dicts_mean([x["critic2"] for x in states])
self.critic_2.load_state_dict(critic_2_state)
self.target_critic_2.load_state_dict(critic_2_state)
actor_state = dicts_mean([x["actor"] for x in states])
self.actor.load_state_dict(actor_state)
with torch.no_grad():
self._log_alpha.copy_(sum(x["log_alpha"] for x in states))
self._log_alpha.div_(len(states))
self.obs_normalizer.load_state_dict([x["obs_normalizer"] for x in states])
self._train_steps = max(x["train_steps"] for x in states)
self._total_steps = max(x["total_steps"] for x in states)
# Save/Load Agent
########################
def save(self, path, replay_buffer=True):
try:
os.makedirs(path)
except OSError as err:
if err.errno != errno.EEXIST:
raise
args = collections.OrderedDict(
[
("observation_space", self.observation_space),
("action_space", self.action_space),
("alpha", self.alpha.item()),
("gamma", self.gamma),
("tau", self.tau),
("batch_size", self.batch_size),
("reward_scale", self.reward_scale),
("replay_buffer_size", self.replay_buffer.max_size),
("random_steps", self.random_steps),
("actor_cls", type(self.actor)),
("actor_kwargs", self._actor_kwargs),
("actor_lr", self._actor_lr),
("critic_cls", type(self.critic_1)),
("critic_kwargs", self._critic_kwargs),
("critic_lr", self._critic_lr),
("tune_alpha", self._alpha_optim is not None),
("observation_normalizer", self._obs_normalizer_arg),
("observation_clip", self.obs_normalizer.clip_range),
]
)
pickle.dump(args, open(os.path.join(path, "args.pkl"), "wb"))
state = self.state_dict()
pickle.dump(state, open(os.path.join(path, "state.pkl"), "wb"))
if replay_buffer:
self.replay_buffer.save(os.path.join(path, "replay_buffer.h5"))
@classmethod
def load(cls, path, *args, replay_buffer=True, **kwargs):
if not os.path.isdir(path):
raise ValueError("{} is not a directory".format(path))
# Load and Override arguments used to build the instance
with open(os.path.join(path, "args.pkl"), "rb") as rfh:
_LOG.debug("(TD3) Loading agent arguments")
args_values = pickle.load(rfh)
args_values.update(kwargs)
fmt_string = " {{:>{}}}: {{}}".format(
max(len(x) for x in args_values.keys())
)
for key, value in args_values.items():
_LOG.debug(fmt_string.format(key, value))
# Create instance and load the rest of the data
instance = cls(**args_values)
with open(os.path.join(path, "state.pkl"), "rb") as rfh:
_LOG.debug("(TD3) Loading agent state")
state = pickle.load(rfh)
instance.load_state_dict(state)
replay_buffer_path = os.path.join(path, "replay_buffer.h5")
if replay_buffer and os.path.isfile(replay_buffer_path):
_LOG.debug("(TD3) Loading replay buffer")
instance.replay_buffer.load(replay_buffer_path)
return instance
# Utilities
########################
def _to_actor_space(self, action):
return umath.scale(
x=action,
min_x=self.action_space.low,
max_x=self.action_space.high,
min_out=self.actor.action_space.low,
max_out=self.actor.action_space.high,
)
def _to_action_space(self, action):
return umath.scale(
x=action,
min_x=self.actor.action_space.low,
max_x=self.actor.action_space.high,
min_out=self.action_space.low,
max_out=self.action_space.high,
)
#
###############################################################################
def build_sac_ac(
observation_space, action_space, actor_cls, actor_kwargs, critic_cls, critic_kwargs
):
"""Builds the actor-critic architecture for the SAC algorithm."""
actor = create_actor(
observation_space, action_space, actor_cls, actor_kwargs, policy="gaussian"
).to(_DEVICE)
critic_1 = create_critic(
observation_space, action_space, critic_cls, critic_kwargs
).to(_DEVICE)
target_critic_1 = create_critic(
observation_space, action_space, critic_cls, critic_kwargs
).to(_DEVICE)
target_critic_1.load_state_dict(critic_1.state_dict())
target_critic_1.eval()
critic_2 = create_critic(
observation_space, action_space, critic_cls, critic_kwargs
).to(_DEVICE)
target_critic_2 = create_critic(
observation_space, action_space, critic_cls, critic_kwargs
).to(_DEVICE)
target_critic_2.load_state_dict(critic_2.state_dict())
target_critic_2.eval()
return (actor, (critic_1, target_critic_1), (critic_2, target_critic_2))
```
#### File: pyrl/cli/util.py
```python
import random
import six
# SciPy Stack
import numpy as np
# Torch
import torch
###############################################################################
def initialize_seed(seed):
"""Initializes the seed of different PRNGs.
:param seed: Value to initialize the PRNGs.
"""
np.random.seed(seed)
random.seed(seed)
torch.manual_seed(seed)
def evaluate(agent, env, max_steps, render):
"""Evaluates the given agent on an environment.
:return: A numpy array with the reward of each step taken by the agent.
"""
rewards = []
infos = []
done = False
state = env.reset()
for _ in six.moves.range(max_steps):
action = agent.compute_action(state)
next_state, reward, done, info = env.step(action)
if render:
env.render()
state = next_state
rewards.append(reward)
infos.append(info)
if done:
break
return np.array(rewards), infos, done
``` |
{
"source": "jponf/UTM",
"score": 3
} |
#### File: src/tm/tm.py
```python
import copy
from abc import ABCMeta, abstractmethod
from tm.exceptions import HaltStateException, InvalidSymbolException, \
UnknownTransitionException, TapeNotSetException
# TODO: rewrite doc
class TuringMachine:
"""
Represents a turing machine, to work properly there are some restrictions:
- symbols on input alphabet and tape alphabet must be one char length
- transition function must be a dictionary with the following format:
(state, symbol) : (state, symbol, movement)
- tape movements are defined by the following "constants":
- MOVE_LEFT
- MOVE_RIGHT
- NON_MOVEMENT
"""
MOVE_RIGHT = 1
MOVE_LEFT = 2
NON_MOVEMENT = 3
HEAD_MOVEMENTS = frozenset((MOVE_LEFT, MOVE_RIGHT, NON_MOVEMENT))
def __init__(self, states, in_alphabet, tape_alphabet, trans_function,
init_state, final_states, halt_state, blank_sym):
"""
TuringMachine(states, in_alphabet, tape_alphabet, trans_function,
istate, fstate, hstate, blank)
Initialize an instance of TuringMachine with the given data
- states:
Iterable with the possible states
- in_alphabet:
Iterable with the input alphabet
- tape_alphabet:
Iterable with the machine tape alphabet
- trans_function:
Dictionary representing the transition function
(state, symbol) : (state, symbol, movement)
- istate:
Initial state
- fstates:
Iterable with the possible final states
- hstate:
Halt state. If reached, execution stops immediatly
- blank:
Default symbol in all unspecified tape positions
"""
self._states = frozenset(states)
self._in_alphabet = frozenset(in_alphabet)
self._tape_alphabet = frozenset(tape_alphabet)
self._trans_function = copy.copy(trans_function)
self._init_state = init_state
self._final_states = frozenset(final_states)
self._halt_state = halt_state
self._blank_sym = blank_sym
self._check_data()
# Machine tape, head and current state
self._tape = None
self._head = 0
self._cur_state = init_state
self._num_executed_steps = 0
# Set of observers
# is a list because other structures like set forces to implement
# the __hash__ operation
self._observers = []
def run_step(self):
"""
Performs one execution step.
- If it's at Halt state raises HaltStateException
- If tape is unset raises UnsetTapeException
- If there are no specified transition for the current state and
symbol, raises UnknownTransitionException
"""
if self.is_at_halt_state():
raise HaltStateException("Current state is halt state")
if self._tape is None:
raise TapeNotSetException(
"Tape must be set before perform an step")
cur = (self._cur_state, self._tape[self._head])
for obs in self._observers:
obs.on_step_start(cur[0], cur[1])
try:
state, sym, movement = self._trans_function[cur]
self._tape[self._head] = sym
self._cur_state = state
prev_head_pos = self._head
if movement == TuringMachine.MOVE_LEFT:
if self._head == 0:
self._tape.insert(0, self._blank_sym)
else:
self._head -= 1
elif movement == TuringMachine.MOVE_RIGHT:
self._head += 1
if self._head == len(self._tape):
self._tape.append(self._blank_sym)
# Notify observers
for obs in self._observers:
obs.on_step_end(state, sym, movement)
if prev_head_pos != self._head:
obs.on_head_moved(self._head, prev_head_pos)
self._num_executed_steps += 1
except KeyError:
raise UnknownTransitionException(
'There are no transition for %s' % str(cur))
def run(self, max_steps=None):
"""
run(max_steps=None): int
Perform steps until 'halt' or 'max steps'
Return values:
0 - Ends by halt state
1 - Ends by max steps limit
2 - Ends by unknown transition
"""
try:
if max_steps:
try:
for _ in range(max_steps):
self.run_step()
return 1
except HaltStateException:
return 0
else:
while not self.is_at_halt_state():
self.run_step()
return 0
except UnknownTransitionException:
return 2
def get_current_state(self):
"""
Returns the current state (Cpt. Obvious)
"""
return self._cur_state
def get_blank_symbol(self):
"""
Returns the blank symbol
"""
return self._blank_sym
def get_halt_state(self):
"""
Returns the halt state
"""
return self._halt_state
def get_initial_state(self):
"""
Returns the initial state
"""
return self._init_state
def get_symbol_at(self, pos):
"""
Returns the symbol at the specified position
The internal symbols goes from 0 to getInternalTapeSize() - 1
for any other position out of this range the blank symbol is returned
"""
if pos < 0 or pos >= len(self._tape):
return self._blank_sym
return self._tape[pos]
def get_internal_tape_size(self):
"""
Returns the size of the internal tape representation
"""
return len(self._tape)
def get_head_position(self):
"""
Returns the current head position
"""
return self._head
def get_tape_iterator(self):
"""Returns an iterator of the internal tape
"""
if self._tape:
return iter(self._tape)
else:
raise TapeNotSetException(
"Tape must be set before getting its iterator")
def get_executed_steps_counter(self):
"""
Return the amount of steps executed until the creation of the machine
or the last call to resetExecutedStepsCounter()
"""
return self._num_executed_steps
def is_at_halt_state(self):
"""
Returns true only if current state is the halt state
"""
return self._cur_state == self._halt_state
def is_at_final_state(self):
"""
Returns true only if current state is a final state
"""
return self._cur_state in self._final_states
def is_tape_set(self):
"""
Returns true only if tape is set
"""
return self._tape is not None
def is_word_accepted(self, word, max_steps=None):
"""Tests if the given word is accepted by this turing machine.
:param word: An iterable str/list/tuple/... of symbols.
:param max_steps: Limit of steps to test if the word is accepted.
:return: True if accepted, False otherwise.
"""
old_tape, old_head = self._tape, self._head
old_state = self._cur_state
self.set_tape(word)
self.run(max_steps)
accepted = self.is_at_final_state()
self._tape, self._head = old_tape, old_head
self._cur_state = old_state
return accepted
def set_tape(self, tape, head_pos=0):
"""Sets tape content and head position.
If head position is negative or greater than tape length the tape is
filled with blanks.
:raise InvalidSymbolException: if tape contains an invalid symbol.
"""
for s in tape:
if s not in self._tape_alphabet:
raise InvalidSymbolException("Invalid tape symbol " + str(s))
# If head pos is out of tape make tape grow with blanks
if head_pos < 0:
self._tape = [self._blank_sym] * (-head_pos)
self._tape.extend(tape)
self._head = 0
elif head_pos >= len(tape):
self._tape = list(tape)
if not self._tape:
self._tape = [self._blank_sym] # Empty tape
self._tape.extend([self._blank_sym] * (head_pos - len(tape)))
self._head = head_pos
else:
self._tape = list(tape)
self._head = head_pos
for obs in self._observers:
obs.on_tape_changed(head_pos)
def set_at_initial_state(self):
"""Forces the machine state to be the initial state."""
self._cur_state = self._init_state
def attach_observer(self, observer):
"""Attaches an observer to this Turing Machine."""
# Observer must have the following method
if not isinstance(observer, BaseTuringMachineObserver):
raise TypeError(
"Observer must be subclass of BaseTuringMachineObserver")
if observer not in self._observers:
self._observers.append(observer)
def detach_observer(self, observer):
"""Removes the specified observer
"""
try:
self._observers.remove(observer)
except ValueError:
pass
def reset_executed_steps_counter(self):
"""Set the executed steps counter to 0
"""
self._num_executed_steps = 0
def _check_data(self):
"""
Checks if the given information is correct
1- Input alphabet is subset of tape alphabet
2- Blank symbol is into the tape alphabet
3- Initial state is in states
4- Final states are all in states
5- Transition states are defined in states
6- Transition symbols are defined in tape alphabet
7- Transition is composed by elements with the specified format:
(state, symbol) : (nstate, nsymbol, movement)
If one of the above fails raises an exception
"""
movements = frozenset([TuringMachine.MOVE_LEFT,
TuringMachine.MOVE_RIGHT,
TuringMachine.NON_MOVEMENT])
if not self._in_alphabet.issubset(self._tape_alphabet):
raise Exception('Input alphabet is not subset of tape alphabet')
if self._blank_sym not in self._tape_alphabet:
raise Exception('Blank symbol is not into the tape alphabet')
if self._init_state not in self._states:
raise Exception('Initial state is not a valid state')
if not self._final_states.issubset(self._states):
raise Exception('Final states are not a subset of states')
for k, v in self._trans_function.items():
if len(k) != 2 or len(v) != 3:
raise Exception('Invalid format in transition %s -> %s' %
(str(k), str(v)))
inv_state = None
if k[0] not in self._states:
inv_state = k[0]
if v[0] not in self._states:
inv_state = v[0]
if inv_state:
raise Exception('Invalid state %s in transition %s -> %s' %
(str(inv_state), str(k), str(v)))
inv_sym = None
if k[1] not in self._tape_alphabet:
inv_sym = k[1]
if v[1] not in self._tape_alphabet:
inv_sym = v[1]
if inv_sym:
raise Exception('Invalid symbol %s in transition %s -> %s' %
(str(inv_sym), str(k), str(v)))
if v[2] not in movements:
raise Exception('Invalid movement %s in transition %s -> %s' %
(str(v[2]), str(k), str(v)))
def __str__(self):
return 'States: %s\n' \
'Input alphabet: %s\n' \
'Tape alphabet: %s\n' \
'Blank symbol: %s\n' \
'Initial state: %s\n' \
'Final states: %s\n' \
'Halt state: %s\n\n' \
'Transition Function:\n%s' \
% (
str(self._states), str(self._in_alphabet),
str(self._tape_alphabet), str(self._blank_sym),
str(self._init_state),
str(self._final_states), str(self._halt_state),
str(self._trans_function)
)
# Turing Machine Observers base class
##############################################################################
class BaseTuringMachineObserver(metaclass=ABCMeta):
@abstractmethod
def on_step_start(self, state, symbol):
raise NotImplementedError()
@abstractmethod
def on_step_end(self, state, symbol, movement):
raise NotImplementedError()
@abstractmethod
def on_tape_changed(self, head_pos):
raise NotImplementedError()
@abstractmethod
def on_head_moved(self, head_pos, old_head_pos):
raise NotImplementedError()
```
#### File: UTM/src/utm.py
```python
import os
import pkgutil
import sys
import highlighters
from PyQt4 import QtGui
from PyQt4.QtCore import Qt
from tm import BaseTuringMachineObserver, TuringMachine, TuringMachineParser
from tm.exceptions import UnknownTransitionException, HaltStateException, \
TapeNotSetException
__program__ = 'Universal Turing Machine Simulator'
__version__ = '1.1'
__author__ = "<NAME>"
__copyright__ = "Copyright 2012-2016, <NAME>"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "Development"
# UTM Main
##############################################################################
def main():
# Initialized the qt application
app = QtGui.QApplication(sys.argv)
gui = GUI()
gui.init_gui()
gui.show()
sys.exit(app.exec_())
# UTM Qt GUI
##############################################################################
class GUI(QtGui.QWidget):
TAPE_SIZE = 31
TAPE_HEAD = TAPE_SIZE // 2
TAPE_HEAD_LEFT = TAPE_HEAD - 1
TAPE_HEAD_RIGHT = TAPE_HEAD + 1
DEF_WIDTH = 800
DEF_HEIGHT = 600
H_SPACING = 10
V_SPACING = 5
# Tape style(s)
TAPE_HEAD_STYLE = 'QLineEdit { border: 2px solid red; background: white;}'
def __init__(self):
super().__init__()
self.parser = TuringMachineParser()
self.turing_machine = None
self.main_vbox = QtGui.QVBoxLayout(self)
def init_gui(self):
# Configure window
self.setMinimumSize(GUI.DEF_WIDTH, GUI.DEF_HEIGHT)
self.setWindowTitle(__program__)
# Set GUI icon
self._init_icon()
# Add Tape widgets
self._init_tape()
# Add log text box
self._init_log_area()
# Add controls
self._init_control_area()
# Install handlers
self._install_handlers()
self.resize(GUI.DEF_WIDTH, GUI.DEF_HEIGHT)
def redraw_tape(self, head_pos):
blank = self.turing_machine.get_blank_symbol()
# sym = self.turing_machine.getSymbolAt(head_pos)
# self.tape_textboxes[GUI.TAPE_HEAD].setText(
# '' if sym == blank else str(sym))
for i in range(GUI.TAPE_HEAD + 1):
txt_box_index = GUI.TAPE_HEAD - i
tape_index = head_pos - i
sym = self.turing_machine.get_symbol_at(tape_index)
self.tape_textboxes[txt_box_index].setText(
'' if sym == blank else str(sym))
for inc, i in enumerate(range(GUI.TAPE_HEAD + 1, GUI.TAPE_SIZE)):
tape_index = head_pos + inc + 1
sym = self.turing_machine.get_symbol_at(tape_index)
self.tape_textboxes[i].setText('' if sym == blank else str(sym))
def print_error_log(self, error):
"""Prints a message on the log_textbox
Text Color: RED
"""
self.log_textbox.setTextColor(Qt.red)
self.log_textbox.setFontWeight(QtGui.QFont.Normal)
self.log_textbox.append(error)
def print_info_log(self, msg):
"""Prints a message on the log_textbox
Text Color: BLACK
"""
self.log_textbox.setTextColor(Qt.black)
self.log_textbox.setFontWeight(QtGui.QFont.Normal)
self.log_textbox.append(msg)
def print_striking_info_log(self, msg):
"""Prints a message on the log_textbox making it more visible than a
normal log
"""
self.log_textbox.setTextColor(Qt.darkBlue)
self.log_textbox.setFontWeight(QtGui.QFont.Bold)
self.log_textbox.append(msg)
#
# QtGui event handlers
#
def on_set_turing_machine_clicked(self):
tm_str = str(self.src_textbox.toPlainText())
try:
self.parser.clean()
self.parser.parse_string(tm_str)
self.turing_machine = self.parser.create()
self.turing_machine.attach_observer(TuringMachineObserver(self))
self.print_info_log('Turing machine created')
self.print_info_log('Current state: ' +
str(self.turing_machine.get_current_state()))
except Exception as e:
self.print_error_log('Error: %s' % str(e))
def on_set_tape_clicked(self):
tape_str = str(self.tape_textbox.toPlainText())
if self.turing_machine is not None:
self.turing_machine.set_tape(tape_str)
self.turing_machine.set_at_initial_state()
self.print_info_log('Tape value established')
else:
self.print_error_log("Error: The Turing machine must be set"
" before setting the tape")
def on_run_step_clicked(self):
try:
self.turing_machine.run_step()
except HaltStateException as e:
self.print_error_log(str(e))
except TapeNotSetException as e:
self.print_error_log(str(e))
except UnknownTransitionException as e:
self.print_error_log(str(e))
except AttributeError:
self.print_error_log('Error: Turing machine is unset')
except Exception as e:
self.print_error_log(str(type(e)))
def on_run_until_halt_clicked(self):
try:
if self.turing_machine.is_at_halt_state():
self.print_error_log('Error: The Turing Machine is on halt state')
else:
self.print_info_log('---------- Run Until Halt ----------')
try:
while not self.turing_machine.is_at_halt_state():
self.turing_machine.run_step()
except TapeNotSetException as e:
self.print_error_log(str(e))
except UnknownTransitionException as e:
self.print_error_log(str(e))
except AttributeError:
self.print_error_log('Error: Turing machine is unset')
def on_load_clicked(self):
fname = QtGui.QFileDialog.getOpenFileName(self, 'Load file',
os.path.expanduser('~'))
if fname:
f = open(fname, 'r')
fstr = f.read()
self.src_textbox.setPlainText(fstr)
f.close()
self.print_info_log('Loaded file: %s' % fname)
def on_save_clicked(self):
fname = QtGui.QFileDialog.getSaveFileName(self, 'Save file',
os.path.expanduser('~'))
if fname:
f = open(fname, 'w')
fstr = str(self.src_textbox.toPlainText())
f.write(fstr)
f.close()
self.print_info_log('Saved file: %s' % fname)
def on_clear_log_clicked(self):
self.log_textbox.clear()
def on_print_all_tape(self):
if self.turing_machine:
try:
tape_value = ' '.join(self.turing_machine.get_tape_iterator())
self.print_info_log('***************************************')
self.print_info_log('Tape Values:')
self.print_striking_info_log(tape_value)
self.print_info_log('***************************************')
except Exception as e:
self.print_error_log(str(e))
else:
self.print_error_log("Error: The Turing Machine must be set"
" before printing the tape")
#
# 'Private'
#
def _init_icon(self):
data = pkgutil.get_data('resources', 'icon.png')
pix_map = QtGui.QPixmap()
pix_map.loadFromData(data)
self.setWindowIcon(QtGui.QIcon(pix_map))
def _init_tape(self):
self.tape_label = QtGui.QLabel('Tape', self)
self.tape_hbox = QtGui.QHBoxLayout()
# self.tape_lbutton = QtGui.QPushButton('<', self)
# self.tape_rbutton = QtGui.QPushButton('>', self)
self.tape_textboxes = self._create_tape()
# self.tape_hbox.addWidget(self.tape_lbutton)
for txt_box in self.tape_textboxes:
self.tape_hbox.addWidget(txt_box)
# self.tape_hbox.addWidget(self.tape_rbutton)
self.main_vbox.addWidget(self.tape_label, 0, Qt.AlignCenter)
self.main_vbox.addLayout(self.tape_hbox, 1)
self.main_vbox.addSpacing(GUI.V_SPACING)
def _create_tape(self):
tape_txt_boxes = [QtGui.QLineEdit(self) for _ in range(GUI.TAPE_SIZE)]
for txt_box in tape_txt_boxes:
txt_box.setReadOnly(True)
txt_box.setFocusPolicy(Qt.NoFocus)
txt_box.setAlignment(Qt.AlignHCenter)
tape_txt_boxes[GUI.TAPE_HEAD].setStyleSheet(GUI.TAPE_HEAD_STYLE)
return tape_txt_boxes
def _init_log_area(self):
log_vbox = QtGui.QVBoxLayout()
# Add log text box
log_label = QtGui.QLabel('Activity Log', self)
self.log_textbox = QtGui.QTextEdit(self)
self.log_textbox.setReadOnly(True)
log_vbox.addWidget(log_label, 0, Qt.AlignCenter)
log_vbox.addWidget(self.log_textbox)
# Add some control buttons
log_hbox = QtGui.QHBoxLayout()
self.clear_log_btn = QtGui.QPushButton('Clear Log', self)
self.print_all_tape_btn = QtGui.QPushButton('Print All Tape', self)
log_hbox.addWidget(self.print_all_tape_btn)
log_hbox.addWidget(self.clear_log_btn)
log_vbox.addLayout(log_hbox)
# Add all the previous stuff to the window layout
self.main_vbox.addLayout(log_vbox, 1)
self.main_vbox.addSpacing(GUI.V_SPACING)
def _init_control_area(self):
self.ctrl_hbox = QtGui.QHBoxLayout()
# Add source text box and load/save buttons
ctrl_llabel = QtGui.QLabel("TM Source Code", self)
self.src_textbox = QtGui.QTextEdit(self)
highlighters.TMSourceHighlighter(self.src_textbox)
self.src_load_btn = QtGui.QPushButton('Load', self)
self.src_save_btn = QtGui.QPushButton('Save', self)
self.ctrl_lvbox = QtGui.QVBoxLayout()
self.ctrl_lvbox.addWidget(ctrl_llabel, 0, Qt.AlignCenter)
self.ctrl_lvbox.addWidget(self.src_textbox)
ctrl_btn_hbox = QtGui.QHBoxLayout()
ctrl_btn_hbox.addWidget(self.src_load_btn)
ctrl_btn_hbox.addWidget(self.src_save_btn)
self.ctrl_lvbox.addLayout(ctrl_btn_hbox)
# Add control buttons
ctrl_rlabel = QtGui.QLabel("Tape's Initial Value", self)
self.tape_textbox = QtGui.QPlainTextEdit(self)
self.set_tm_btn = QtGui.QPushButton('Set TM', self)
self.set_tape_btn = QtGui.QPushButton('Set Tape', self)
self.run_step_btn = QtGui.QPushButton('Run Step', self)
self.run_all_btn = QtGui.QPushButton('Run Until Halt', self)
self.ctrl_rvbox = QtGui.QVBoxLayout()
self.ctrl_rvbox.addWidget(ctrl_rlabel, 0, Qt.AlignCenter)
self.ctrl_rvbox.addWidget(self.tape_textbox)
self.ctrl_rvbox.addWidget(self.set_tm_btn)
self.ctrl_rvbox.addWidget(self.set_tape_btn)
self.ctrl_rvbox.addWidget(self.run_step_btn)
self.ctrl_rvbox.addWidget(self.run_all_btn)
# Add some tooltips
self.set_tape_btn.setToolTip('Sets the tape values and forces the TM '
'to be at the initial state')
# Add the control area to the main layout
self.ctrl_hbox.addLayout(self.ctrl_lvbox, 2)
self.ctrl_hbox.addSpacing(GUI.H_SPACING)
self.ctrl_hbox.addLayout(self.ctrl_rvbox, 1)
self.main_vbox.addLayout(self.ctrl_hbox, 2)
def _install_handlers(self):
self.set_tm_btn.clicked.connect(self.on_set_turing_machine_clicked)
self.set_tape_btn.clicked.connect(self.on_set_tape_clicked)
self.run_step_btn.clicked.connect(self.on_run_step_clicked)
self.run_all_btn.clicked.connect(self.on_run_until_halt_clicked)
self.src_load_btn.clicked.connect(self.on_load_clicked)
self.src_save_btn.clicked.connect(self.on_save_clicked)
self.clear_log_btn.clicked.connect(self.on_clear_log_clicked)
self.print_all_tape_btn.clicked.connect(self.on_print_all_tape)
class TuringMachineObserver(BaseTuringMachineObserver):
def __init__(self, gui):
self.gui = gui
def on_step_start(self, current_state, current_tape_symbol):
self.gui.print_info_log('+++++++++++++++++++++++++++++++++++++++++++++++')
self.gui.print_info_log('Started step at state "%s" with tape symbol "%s"'
% (str(current_state), str(current_tape_symbol)))
def on_step_end(self, new_state, writen_symbol, movement):
self.gui.print_info_log('-----------------------------------------------')
self.gui.print_info_log('Writen Symbol: ' + str(writen_symbol))
if movement == TuringMachine.MOVE_LEFT:
self.gui.print_info_log('Head moved to the left')
elif movement == TuringMachine.MOVE_RIGHT:
self.gui.print_info_log('Head moved to the right')
else:
self.gui.print_info_log('Head remains at the same position')
self.gui.print_info_log('Current state: ' + str(new_state) +
(' (FINAL)' if self.gui.turing_machine.is_at_final_state() else ''))
def on_tape_changed(self, head_pos):
self.gui.redraw_tape(head_pos)
def on_head_moved(self, head_pos, _):
self.gui.redraw_tape(head_pos)
if __name__ == '__main__':
main()
``` |
{
"source": "jponge/pyappenginetodolist",
"score": 2
} |
#### File: jponge/pyappenginetodolist/todo.py
```python
import wsgiref.handlers
import os
from google.appengine.ext import db
from google.appengine.api import users
from google.appengine.ext import webapp
from google.appengine.ext.webapp import template
class TodoEntry(db.Model):
user = db.UserProperty()
text = db.StringProperty()
class TodoHandler(webapp.RequestHandler):
def authenticate(self):
user = users.get_current_user()
if not user:
self.redirect(users.create_login_url(self.request.uri))
else:
return user
def get(self):
user = self.authenticate()
if not user:
return
if self.request.get('delkey'):
entry = db.get(self.request.get('delkey'))
if entry:
entry.delete()
entries = db.GqlQuery("SELECT * FROM TodoEntry WHERE user = :userid", userid=user)
values = {
'userid': user.nickname(),
'entries': entries,
'logout_url': users.create_logout_url(self.request.uri)
}
tpl = os.path.join(os.path.dirname(__file__), 'todo.html')
self.response.out.write(template.render(tpl, values))
def post(self):
user = users.get_current_user()
if not user:
return
textdata = self.request.get('textdata')
entry = TodoEntry(user=user,
text=textdata)
if textdata:
entry.put()
self.redirect('/')
def main():
mappings = [
('/', TodoHandler),
('/todo', TodoHandler),
('/add', TodoHandler)
]
application = webapp.WSGIApplication(mappings, debug=True)
wsgiref.handlers.CGIHandler().run(application)
if __name__ == '__main__':
main()
``` |
{
"source": "j-pong/HYnet2",
"score": 2
} |
#### File: hynet/schedulers/tri_stage_lr.py
```python
import math
from distutils.version import LooseVersion
from typing import Union
import torch
from torch.optim.lr_scheduler import _LRScheduler
from typeguard import check_argument_types
from espnet2.schedulers.abs_scheduler import AbsBatchStepScheduler
class TriStageLR(_LRScheduler, AbsBatchStepScheduler):
"""The WarmupLR scheduler
This scheduler is almost same as NoamLR Scheduler except for following difference:
NoamLR:
lr = optimizer.lr * model_size ** -0.5
* min(step ** -0.5, step * warmup_step ** -1.5)
WarmupLR:
lr = optimizer.lr * warmup_step ** 0.5
* min(step ** -0.5, step * warmup_step ** -1.5)
Note that the maximum lr equals to optimizer.lr in this scheduler.
"""
def __init__(
self,
optimizer: torch.optim.Optimizer,
init_lr_scale: Union[int, float],
final_lr_scale: Union[int, float],
hold_steps: Union[int, float],
decay_steps: Union[int, float],
warmup_steps: Union[int, float],
last_epoch: int = -1,
):
if LooseVersion(torch.__version__) < LooseVersion("1.1.0"):
raise NotImplementedError(f"Require PyTorch>=1.1.0: {torch.__version__}")
assert check_argument_types()
# calculate LR at each point
self.base_lrs = [group['lr'] for group in optimizer.param_groups]
self.peak_lr = self.base_lrs[0]
self.init_lr = init_lr_scale * self.base_lrs[0]
self.final_lr = final_lr_scale * self.base_lrs[0]
self.warmup_steps = warmup_steps
self.hold_steps = hold_steps
self.decay_steps = decay_steps
assert (
self.warmup_steps + self.hold_steps + self.decay_steps > 0
), "please specify steps or phase_ratio"
self.warmup_rate = (
(self.peak_lr - self.init_lr) / self.warmup_steps
if self.warmup_steps != 0
else 0
)
self.decay_factor = -math.log(final_lr_scale) / self.decay_steps
# __init__() must be invoked before setting field
# because step() is also invoked in __init__()
super().__init__(optimizer, last_epoch)
def __repr__(self):
return f"{self.__class__.__name__}(warmup_steps={self.warmup_steps})"
def _decide_stage(self, update_step):
if update_step < self.warmup_steps:
# warmup state
return 0, update_step
offset = self.warmup_steps
if update_step < offset + self.hold_steps:
# hold stage
return 1, update_step - offset
offset += self.hold_steps
if update_step <= offset + self.decay_steps:
# decay stage
return 2, update_step - offset
offset += self.decay_steps
# still here ? constant lr stage
return 3, update_step - offset
def get_lr(self):
stage, steps_in_stage = self._decide_stage(self.last_epoch + 1)
if stage == 0:
lr = self.init_lr + self.warmup_rate * steps_in_stage
elif stage == 1:
lr = self.peak_lr
elif stage == 2:
lr = self.peak_lr * math.exp(-self.decay_factor * steps_in_stage)
elif stage == 3:
lr = self.final_lr
else:
raise ValueError("Undefined stage")
return [lr]
``` |
{
"source": "jponnela/simbe",
"score": 4
} |
#### File: simbe/example/my_code.py
```python
def fibonacci(n):
a = b = 1
for i in range(n):
yield a
a, b = b, a + b
for k in fibonacci(10):
print(k)
```
#### File: jponnela/simbe/simbe.py
```python
SIMBE_FRAME = "**" # Start new Beamber frame
SIMBE_BULLET = "-" # Insert bullet using itemize
SIMBE_EQN_BEGIN = "--" # Begin equation
SIMBE_EQN_END = "--" # End equation
SIMBE_EQN_END_NN = "--nn" # End equation with no numbering
SIMBE_FIG = "---" # Begin or end figure
SIMBE_CODE = "----" # Begin or end code (lstlisting)
SIMBE_CODE_LONG = "-----" # Begin or end code from file(lstinputlisting)
def read_parms():
""" Read input file name from the user."""
import sys
try:
filename = sys.argv[1]
except:
print("Please provide the input file name.")
sys.exit()
return filename
def read_file(inputfile):
""" Read the lines in each frame to a frame buffer (dictionary)."""
frames = {}
frame_no = 0
for line in open(inputfile):
line = line.rstrip()
if line[0:len(SIMBE_FRAME)] == SIMBE_FRAME:
frame_no += 1
try:
frames[frame_no].append(line)
except:
frames[frame_no] = [line]
return frames
def add_frames(frames, start_frame=1):
""" Add the frame envinronments with titles."""
for frame_no in sorted(frames.keys())[start_frame:]:
frame_title = frames[frame_no][0][len(SIMBE_FRAME):]
frames[frame_no][0] = "\\frametitle{" + frame_title + "}"
frames[frame_no].insert(0, "\\begin{frame}[fragile]")
frames[frame_no].append("\\end{frame}")
def add_lstinputlisting_env(frames, start_frame=1):
for frame_no in sorted(frames.keys())[start_frame:]:
for line_no in range(len(frames[frame_no])):
line = frames[frame_no][line_no]
if line[:len(SIMBE_CODE_LONG)]==SIMBE_CODE_LONG and len(line)>2*len(SIMBE_CODE_LONG) and line[-len(SIMBE_CODE_LONG):] == SIMBE_CODE_LONG:
line = line[len(SIMBE_CODE_LONG):-len(SIMBE_CODE_LONG)]
frames[frame_no][line_no] = "\\lstinputlisting{" + line + "}"
def add_lstlisting_env(frames, start_frame=1):
lstlisting_open = False
for frame_no in sorted(frames.keys())[start_frame:]:
for line_no in range(len(frames[frame_no])):
line = frames[frame_no][line_no]
# We have a 1-line code insertion.
if line[:len(SIMBE_CODE)]==SIMBE_CODE and len(line)>2*len(SIMBE_CODE) and line[-len(SIMBE_CODE):] == SIMBE_CODE:
line = line[len(SIMBE_CODE):-len(SIMBE_CODE)]
frames[frame_no][line_no] = "\\begin{lstlisting}\n" + line + "\n" + "\\end{lstlisting}\n"
# We may have a 3-line code insertion.
else:
if line==SIMBE_CODE and not lstlisting_open:
frames[frame_no][line_no] = "\\begin{lstlisting}"
lstlisting_open = True
elif line==SIMBE_CODE and lstlisting_open:
frames[frame_no][line_no] = "\\end{lstlisting}"
lstlisting_open = False
def add_figure_env(frames, start_frame=1):
figure_open = False
for frame_no in sorted(frames.keys())[start_frame:]:
for line_no in range(len(frames[frame_no])):
if frames[frame_no][line_no] == SIMBE_FIG:
if not figure_open:
figure_open = True
frames[frame_no][line_no] = "\\begin{center} \\begin{figure}"
else:
frames[frame_no][line_no] = "\\end{figure} \\end{center}"
figure_open = False
elif figure_open and frames[frame_no][line_no][0:len(SIMBE_BULLET)] != SIMBE_BULLET:
line = frames[frame_no][line_no]
(figname, figsize) = line.split(",")
frames[frame_no][line_no] = "\\includegraphics[width=" + figsize + "\\textwidth]{" + figname + "}"
elif figure_open and frames[frame_no][line_no][0:len(SIMBE_BULLET)] == SIMBE_BULLET:
line = frames[frame_no][line_no]
frames[frame_no][line_no] = "\\caption{" + line[1:] + "}"
def add_equation_env(frames, start_frame=1):
equation_open = False
for frame_no in sorted(frames.keys())[start_frame:]:
for line_no in range(len(frames[frame_no])):
line = frames[frame_no][line_no]
# We have a 1-line equation.
if line[:len(SIMBE_EQN_BEGIN)]==SIMBE_EQN_BEGIN and len(line)>len(SIMBE_EQN_END_NN):
if line[-len(SIMBE_EQN_END):] == SIMBE_EQN_END:
line = line[len(SIMBE_EQN_BEGIN):-len(SIMBE_EQN_END)]
frames[frame_no][line_no] = "\\begin{equation}\n" + line + "\n" + "\\end{equation}\n"
elif line[-len(SIMBE_EQN_END_NN):] == SIMBE_EQN_END_NN:
line = line[len(SIMBE_EQN_BEGIN):-len(SIMBE_EQN_END_NN)]
frames[frame_no][line_no] = "\\begin{equation}\n" + line + "\n" + "\\nonumber\n \\end{equation}\n"
# We may have a 3-line equation.
else:
if line==SIMBE_EQN_BEGIN and not equation_open:
frames[frame_no][line_no] = "\\begin{equation}"
equation_open = True
elif line==SIMBE_EQN_END and equation_open:
frames[frame_no][line_no] = "\\end{equation}"
equation_open = False
elif line==SIMBE_EQN_END_NN and equation_open:
frames[frame_no][line_no] = "\\nonumber \\end{equation}"
equation_open = False
def add_itemize_env(frames, start_frame=1):
""" Add the itemize environments."""
for frame_no in sorted(frames.keys())[start_frame:]:
frame = frames[frame_no]
new_frame = []
bullet_depth = 0
for line_no in range(len(frame)):
if line_no == 0:
new_frame.append(frame[line_no])
elif line_no > 0:
# Locate bullet depth.
if frame[line_no][0:len(SIMBE_BULLET)] == SIMBE_BULLET and frame[line_no][len(SIMBE_BULLET)] != SIMBE_BULLET:
curr_bullet_depth = 1
elif frame[line_no][0:len(SIMBE_BULLET) + 1] == 1*"\t" + SIMBE_BULLET:
curr_bullet_depth = 2
elif frame[line_no][0:len(SIMBE_BULLET) + 2] == 2*"\t" + SIMBE_BULLET:
curr_bullet_depth = 3
elif frame[line_no][0:len(SIMBE_BULLET) + 3] == 3*"\t" + SIMBE_BULLET:
curr_bullet_depth = 4
elif frame[line_no][0:len(SIMBE_BULLET) + 4] == 1*" " + SIMBE_BULLET:
curr_bullet_depth = 2
elif frame[line_no][0:len(SIMBE_BULLET) + 8] == 2*" " + SIMBE_BULLET:
curr_bullet_depth = 3
elif frame[line_no][0:len(SIMBE_BULLET) + 12] == 3*" " + SIMBE_BULLET:
curr_bullet_depth = 4
else:
curr_bullet_depth = 0
if curr_bullet_depth > bullet_depth:
new_frame.append("\t"*bullet_depth + "\\begin{itemize}")
bullet_depth += 1
elif curr_bullet_depth < bullet_depth:
while (curr_bullet_depth < bullet_depth):
bullet_depth -= 1
new_frame.append("\t"*bullet_depth + "\\end{itemize}")
if curr_bullet_depth == 0:
new_frame.append(frame[line_no])
else:
new_frame.append("\t" + frame[line_no].replace(SIMBE_BULLET, "\\item ", 1))
frames[frame_no] = new_frame
def write_output(frames, outputfile):
""" Print out the final product."""
F = open(outputfile, "w")
for frame_no in frames:
frame = frames[frame_no]
for line in frame:
F.write(line + "\r")
F.write("\n% ------------------------------------------------------------------------------------------------------------\n")
F.write("\\end{document}\n")
F.close()
def print_frames(frames, start_frame=1):
""" Print out frames and line numbers."""
for frame_no in sorted(frames.keys())[start_frame:]:
print("--------------------------------------")
for line_no in range(len(frames[frame_no])):
print(line_no, frames[frame_no][line_no])
print("--------------------------------------")
# ------------------------------------------------------------------------------------------------------------
# Read file.
inputfile = read_parms()
frames = read_file(inputfile)
#print_frames(frames)
add_frames(frames)
# Deal with ----- (5)
add_lstinputlisting_env(frames, start_frame=1)
# Deal with ---- (4)
add_lstlisting_env(frames)
# Deal with --- (3)
add_figure_env(frames)
# Deal with -- (2)
add_equation_env(frames)
# Deal with - (1)
add_itemize_env(frames)
# Write output
outputfile = inputfile.replace(".simbe.tex", ".tex")
write_output(frames, outputfile)
``` |
{
"source": "jpool-nv/apex",
"score": 2
} |
#### File: L0/run_transformer/run_gpt_minimal_test.py
```python
from functools import partial
from typing import List
import time
import torch
from apex.transformer import parallel_state
from apex.transformer.tensor_parallel import model_parallel_cuda_manual_seed
from apex.transformer.pipeline_parallel.utils import setup_microbatch_calculator
from apex.transformer.pipeline_parallel.utils import (
average_losses_across_data_parallel_group,
)
from apex.transformer.pipeline_parallel.utils import get_ltor_masks_and_position_ids
from apex.transformer.pipeline_parallel.schedules.common import build_model
from apex.transformer.pipeline_parallel.schedules.common import (
_get_params_for_weight_decay_optimization,
)
from apex.transformer.pipeline_parallel.schedules.fwd_bwd_pipelining_without_interleaving import (
forward_backward_pipelining_without_interleaving,
)
from apex.transformer.testing.standalone_gpt import gpt_model_provider
from apex.transformer.testing import global_vars
from apex.transformer.testing.commons import TEST_SUCCESS_MESSAGE
from apex.transformer.testing.commons import initialize_distributed
MANUAL_SEED = 42
inds = None
data_idx = 0
N_VOCAB = 128
def download_fancy_data():
# import requests
# response = requests.get('https://internet.com/book.txt')
# text = ' '.join(response.text.split())
text = """
An original sentence not subject to any license restrictions, copyright, or royalty payments. Nothing to see here. Commercial or non-commercial use. Research or non-research purposes. The quick brown fox jumps over the lazy dog. Lorem ipsum.
"""
text = text * 1024
encoded = text.encode("ascii", "replace")
ints = [int(encoded[i]) for i in range(len(encoded))]
return torch.tensor(ints)
# build a batch given sequence_len and batch size
def generate_fancy_data_labels(sequence_len, batch_size):
global data_idx
global inds
global MANUAL_SEED
temps = list()
for i in range(batch_size):
if inds is None or data_idx >= len(inds):
# hack as use of RNG will fall out of sync due to pipelines being different
model_parallel_cuda_manual_seed(MANUAL_SEED)
inds = torch.randperm(effective_length, device="cuda")
MANUAL_SEED += 1
data_idx = 0
data_idx_ = data_idx
offset = inds[data_idx_]
data_idx += 1
curr = fancy_data[offset : offset + sequence_len + 1].clone().detach()
temps.append(curr)
temp = torch.stack(temps, dim=0).cuda()
return temp
easy_data = None
def get_batch(int_tensors: List[torch.Tensor]):
data = int_tensors[0]
# Unpack.
tokens_ = data.long()
labels = tokens_[:, 1:].contiguous()
tokens = tokens_[:, :-1].contiguous()
# Get the masks and position ids.
attention_mask, loss_mask, position_ids = get_ltor_masks_and_position_ids(
tokens,
N_VOCAB, # tokenizer.eod,
False, # args.reset_position_ids,
False, # args.reset_attention_mask,
False, # args.eod_mask_loss,
)
return tokens, labels, loss_mask, attention_mask, position_ids
# Ref: https://github.com/NVIDIA/Megatron-LM/blob/b31e1296354e979722627a6c4dedafe19b51fa97/pretrain_gpt.py#L75
def loss_func(loss_mask, output_tensor):
losses = output_tensor.float()
loss_mask = loss_mask.view(-1).float()
loss = torch.sum(losses.view(-1) * loss_mask) / loss_mask.sum()
# Reduce loss for logging.
averaged_loss = average_losses_across_data_parallel_group([loss])
return loss, {"lm loss": averaged_loss[0]}
# Ref: https://github.com/NVIDIA/Megatron-LM/blob/b31e1296354e979722627a6c4dedafe19b51fa97/pretrain_gpt.py#L86
def fwd_step_func(batch, model):
"""Forward step."""
tokens, labels, loss_mask, attention_mask, position_ids = get_batch(batch)
output_tensor = model(tokens, position_ids, attention_mask, labels=labels)
return output_tensor, partial(loss_func, loss_mask)
def train(model, optim, pipeline_model_parallel_size, async_comm):
sequence_len = global_vars.get_args().seq_length
micro_batch_size = global_vars.get_args().micro_batch_size
hidden_size = global_vars.get_args().hidden_size
fwd_bwd_func = forward_backward_pipelining_without_interleaving
tensor_shape = (args.seq_length, args.micro_batch_size, args.hidden_size)
runtime = 0
# training loop
for i in range(3):
since = time.time()
if torch.distributed.get_rank() == 0:
print("begin iter", i)
batch = [
generate_fancy_data_labels(args.seq_length, args.global_batch_size)
for _ in range(pipeline_model_parallel_size)
]
if torch.distributed.get_rank() == 0:
print("finished making batch...")
optim.zero_grad()
fwd_bwd_func(
fwd_step_func, batch, model, forward_only=False, tensor_shape=tensor_shape, async_comm=async_comm
)
if torch.distributed.get_rank() == 0:
print("finished forward step")
optim.step()
if torch.distributed.get_rank() == 0:
print("finished iter", i)
runtime += time.time() - since
return runtime / 3.0
if __name__ == "__main__":
init = True
for async_comm in (False, True):
global fancy_data
global effective_length
if init:
init = False
global_vars.set_global_variables()
args = global_vars.get_args()
fancy_data = download_fancy_data()
effective_length = fancy_data.size(0) // args.seq_length
effective_length = fancy_data.size(0) - args.seq_length
initialize_distributed()
world_size = torch.distributed.get_world_size()
failure = None
args.padded_vocab_size = 128
batch_size = args.global_batch_size
micro_batch_size = args.micro_batch_size
setup_microbatch_calculator(
args.rank,
args.rampup_batch_size,
args.global_batch_size,
args.micro_batch_size,
args.data_parallel_size, # args.data_parallel_size,
)
world_size = torch.distributed.get_world_size()
print(args.tensor_model_parallel_size, "MODEL PARALLEL SIZE")
parallel_state.initialize_model_parallel(
tensor_model_parallel_size_=args.tensor_model_parallel_size,
pipeline_model_parallel_size_=args.pipeline_model_parallel_size,
)
pipeline_model_parallel_size = (
parallel_state.get_pipeline_model_parallel_world_size()
)
model_parallel_cuda_manual_seed(0)
model = build_model(
gpt_model_provider,
wrap_with_ddp=True,
virtual_pipeline_model_parallel_size=None,
cpu_offload=args.cpu_offload,
)
assert isinstance(model, list), model
_param_groups = _get_params_for_weight_decay_optimization(model)
optim = torch.optim.Adam(_param_groups)
runtime = train(model, optim, args.pipeline_model_parallel_size, async_comm)
parallel_state.destroy_model_parallel()
torch.distributed.barrier()
if torch.distributed.get_rank() == 0:
print(TEST_SUCCESS_MESSAGE)
print("Average Iteration Time:", runtime)
``` |
{
"source": "jpope8/container-escape-dataset",
"score": 3
} |
#### File: container-escape-dataset/src/annotationfile.py
```python
import time
import json
import fileutil
from outstream import OutStream
class AnnotationFile:
"""
AnnotationFile for specified file to provide easy way to save annotations.
"""
def __init__(self, filename):
# open file once, perhaps better to reduce syscalls if a lot of annotating
self._filename = filename
self._file = OutStream( filename, append=True)
def annotateName(self, annotationValue, key='annotationName'):
"""
Saves the annotationValue using key along with the time.
param: annotationName str, added to dict along with time, converted to json, then written
"""
annotationsDictionary = dict()
annotationsDictionary[key] = annotationValue
self.annotateDict(annotationsDictionary)
def annotateDict(self, annotationsDictionary):
"""
Saves the annotationDictionary along with the time.
param: annotationDictionary dict, add time, convert to json, then write to file
"""
annotationTime = time.time()
annotationDate = fileutil.formatTime( annotationTime )
annotationsDictionary['annotationTime'] = annotationTime
annotationsDictionary['annotationDate'] = annotationDate
# Save annotation
#annotationFile = OutStream( os.path.join( logDir , 'annotated.txt'), append=True)
#self._file.writef( '%.3f:{"scenario":"%s", "date":"%s"},\n',
# scenarioTime, annotationKey, scenarioDate )
self._file.writeln( json.dumps(annotationsDictionary) + ',' )
def close(self):
"""
Close the stream wrapped by self.
"""
del(self._file)
#
# Test main function
#
def main():
if( len(sys.argv) != 4 ):
print(' Usage: <time in minutes> <directory to place log files> <attack scenario "A" | "B" | "None">')
print('Example: 1 B')
return
seconds = int(sys.argv[1]) * 60
secondsPassed = 0
interval = 1
attackSecond = random.randint(1, seconds)
print( 'Attack at second ' + str(attackSecond) )
while(secondsPassed < seconds):
## SIMULATE ATTACK, EXACTLY ONCE
if( simulateEvent is True and secondsPassed >= attackSecond ):
scenarioTime = time.time()
scenario.onEvent()
annotate(scenario, scenarioTime, logDir)
percentage = attackSecond/float(seconds)
print('Event occured %.2f%% into experiment'%(percentage) )
simulateEvent = False
## VERBOSE TO LET USER KNOW PERCENTAGE COMPLETE
if( secondsPassed % 60 == 0 ):
execute( 4, 'sudo ls -la ' + auditfile )
percentComplete = 100.0 * secondsPassed / float(seconds)
print( 'Percent complete {0:d} / {1:d} = {2:3.2f}'.format(secondsPassed, seconds, percentComplete) )
secondsPassed = secondsPassed + interval
time.sleep(interval)
if __name__ == '__main__':
main()
```
#### File: container-escape-dataset/src/list_perm.py
```python
import subprocess
import os
# a function to list the files in
# the current directory and
# parse the output.
def list_command(args = '-l'):
# the ls command
cmd = 'ls'
# using the Popen function to execute the
# command and store the result in temp.
# it returns a tuple that contains the
# data and the error if any.
temp = subprocess.Popen([cmd, args], stdout = subprocess.PIPE)
# we use the communicate function
# to fetch the output
output = str(temp.communicate())
# splitting the output so that
# we can parse them line by line
output = output.split("\n")
output = output[0].split('\\')
# a variable to store the output
res = []
# iterate through the output
# line by line
for line in output:
res.append(line)
# print the output
for i in range(1, len(res) - 1):
print(res[i])
return res
# parse the output of the ls
# command and fetch the permissions
# of the files and store them in
# a text file .
def get_permissions():
# get the output of the
# list command
res = list_command('-l')
permissions = {}
# iterate through all the rows
# and retrieve the name of the file
# and its permission.
for i in range(1, len(res) - 1):
line = res[i]
line = line.split(' ')
folder_name = line[len(line) - 1]
permission_value = line[0]
permissions[folder_name] = permission_value
# create a directory called
# outputs to store the output files
try:
os.mkdir('outputs')
except:
pass
os.chdir('outputs')
# open the output file
out = open('permissions.txt', 'w')
out.write('Folder Name Permissions\n\n')
# write to the output file
for folder in permissions:
out.write(folder + ' : ' + permissions[folder] + '\n')
os.chdir('..')
return permissions
if __name__ == '__main__':
list_command('-al')
```
#### File: container-escape-dataset/src/scenarioPrivesc.py
```python
import command_line
import random
class ScenarioPrivesc:
def __init__(self):
self._name = 'privesc'
self._annotationFile = None # Set later in init
def getName(self):
"""
Gets the name of the scenario.
"""
return self._name
def init(self, scheduler, experimentSeconds, annotationFile):
"""
Setup any resources for the scenario.
Logging is not active.
"""
self._annotationFile = annotationFile
# Start the container for unauthorized writing to host.
# Clean up any previous attacks
self.execute( 'sudo rm /etc/sudoers.d/010_testuser-nopasswd' )
# Start the container
self.execute( 'docker run -d=true --rm --name ESCAPE_B -v /:/privesc -it alpine_volume_privesc /bin/sh' )
# Schedule the escape/attack
attackSecond = random.randint(1, experimentSeconds)
print( 'SCENARIO B: Schedule to attack at second ' + str(attackSecond) )
scheduler.enter( attackSecond, 1, self.onEvent )
def start(self):
"""
May be called multiple times during experiment.
Logging is active.
"""
def onEvent(self):
"""
Event occurred. For example execute a series of
commands to carry out an attack.
"""
# Order matters, need annotation to occur before the attack starts
self._annotationFile.annotateName( self._name )
# Container Escape and Attack B (modify files from container)
#self.execute( 'docker exec -it ESCAPE_B /privesc/escape.sh' )
self.execute( 'docker exec -it ESCAPE_B /escape.sh' )
print( 'Scenario ' + self._name + ': Attack started' )
def stop(self):
"""
May be called multiple times during experiment.
Logging is active.
"""
def destroy(self):
"""
Tears down the scenario, for example, stop container.
Logging is not active
"""
self.execute( 'sudo docker stop ESCAPE_B' )
def execute( self, command ):
"""
Convenience to call execute and print out results.
"""
result = command_line.execute( command )
for line in result:
print( 'Scenario ' + self._name + ': ' + line)
``` |
Subsets and Splits