prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
import pandas as pd
from py_expression_eval import Parser
math_parser = Parser()
def _get_mz_tolerance(qualifiers, mz):
if qualifiers is None:
return 0.1
if "qualifierppmtolerance" in qualifiers:
ppm = qualifiers["qualifierppmtolerance"]["value"]
mz_tol = abs(ppm * mz / 1000000)
return mz_tol
if "qualifiermztolerance" in qualifiers:
return qualifiers["qualifiermztolerance"]["value"]
return 0.1
def _get_massdefect_min(qualifiers):
if qualifiers is None:
return 0, 1
if "qualifiermassdefect" in qualifiers:
return qualifiers["qualifiermassdefect"]["min"], qualifiers["qualifiermassdefect"]["max"]
return 0, 1
def _get_minintensity(qualifier):
"""
Returns absolute min and relative min
Args:
qualifier ([type]): [description]
Returns:
[type]: [description]
"""
min_intensity = 0
min_percent_intensity = 0
min_tic_percent_intensity = 0
if qualifier is None:
min_intensity = 0
min_percent_intensity = 0
return min_intensity, min_percent_intensity, min_tic_percent_intensity
if "qualifierintensityvalue" in qualifier:
min_intensity = float(qualifier["qualifierintensityvalue"]["value"])
if "qualifierintensitypercent" in qualifier:
min_percent_intensity = float(qualifier["qualifierintensitypercent"]["value"]) / 100
if "qualifierintensityticpercent" in qualifier:
min_tic_percent_intensity = float(qualifier["qualifierintensityticpercent"]["value"]) / 100
# since the subsequent comparison is a strict greater than, if people set it to 100, then they won't get anything.
min_percent_intensity = min(min_percent_intensity, 0.99)
return min_intensity, min_percent_intensity, min_tic_percent_intensity
def _get_exclusion_flag(qualifiers):
if qualifiers is None:
return False
if "qualifierexcluded" in qualifiers:
return True
return False
def _set_intensity_register(ms_filtered_df, register_dict, condition):
if "qualifiers" in condition:
if "qualifierintensityreference" in condition["qualifiers"]:
qualifier_variable = condition["qualifiers"]["qualifierintensitymatch"]["value"]
grouped_df = ms_filtered_df.groupby("scan").sum().reset_index()
for grouped_scan in grouped_df.to_dict(orient="records"):
# Saving into the register
key = "scan:{}:variable:{}".format(grouped_scan["scan"], qualifier_variable)
register_dict[key] = grouped_scan["i"]
return
def _filter_intensitymatch(ms_filtered_df, register_dict, condition):
if "qualifiers" in condition:
if "qualifierintensitymatch" in condition["qualifiers"] and \
"qualifierintensitytolpercent" in condition["qualifiers"]:
qualifier_expression = condition["qualifiers"]["qualifierintensitymatch"]["value"]
qualifier_variable = qualifier_expression[0] #TODO: This assumes the variable is the first character in the expression, likely a bad assumption
grouped_df = ms_filtered_df.groupby("scan").sum().reset_index()
filtered_grouped_scans = []
for grouped_scan in grouped_df.to_dict(orient="records"):
# Reading from the register
key = "scan:{}:variable:{}".format(grouped_scan["scan"], qualifier_variable)
if key in register_dict:
register_value = register_dict[key]
evaluated_new_expression = math_parser.parse(qualifier_expression).evaluate({
qualifier_variable : register_value
})
min_match_intensity, max_match_intensity = _get_intensitymatch_range(condition["qualifiers"], evaluated_new_expression)
scan_intensity = grouped_scan["i"]
#print(key, scan_intensity, qualifier_expression, min_match_intensity, max_match_intensity, grouped_scan)
if scan_intensity > min_match_intensity and \
scan_intensity < max_match_intensity:
filtered_grouped_scans.append(grouped_scan)
else:
# Its not in the register, which means we don't find it
continue
return pd.DataFrame(filtered_grouped_scans)
return ms_filtered_df
def _get_intensitymatch_range(qualifiers, match_intensity):
"""
Matching the intensity range
Args:
qualifiers ([type]): [description]
match_intensity ([type]): [description]
Returns:
[type]: [description]
"""
min_intensity = 0
max_intensity = 0
if "qualifierintensitytolpercent" in qualifiers:
tolerance_percent = qualifiers["qualifierintensitytolpercent"]["value"]
tolerance_value = float(tolerance_percent) / 100 * match_intensity
min_intensity = match_intensity - tolerance_value
max_intensity = match_intensity + tolerance_value
return min_intensity, max_intensity
def ms2prod_condition(condition, ms1_df, ms2_df, reference_conditions_register):
"""
Filters the MS1 and MS2 data based upon MS2 peak conditions
Args:
condition ([type]): [description]
ms1_df ([type]): [description]
ms2_df ([type]): [description]
reference_conditions_register ([type]): Edits this in place
Returns:
ms1_df ([type]): [description]
ms2_df ([type]): [description]
"""
exclusion_flag = _get_exclusion_flag(condition.get("qualifiers", None))
if len(ms2_df) == 0:
return ms1_df, ms2_df
ms2_list = []
for mz in condition["value"]:
if mz == "ANY":
# Checking defect options
massdefect_min, massdefect_max = _get_massdefect_min(condition.get("qualifiers", None))
ms2_filtered_df = ms2_df
ms2_filtered_df["mz_defect"] = ms2_filtered_df["mz"] - ms2_filtered_df["mz"].astype(int)
min_int, min_intpercent, min_tic_percent_intensity = _get_minintensity(condition.get("qualifiers", None))
ms2_filtered_df = ms2_filtered_df[
(ms2_filtered_df["mz_defect"] > massdefect_min) &
(ms2_filtered_df["mz_defect"] < massdefect_max) &
(ms2_filtered_df["i"] > min_int) &
(ms2_filtered_df["i_norm"] > min_intpercent) &
(ms2_filtered_df["i_tic_norm"] > min_tic_percent_intensity)
]
else:
mz_tol = _get_mz_tolerance(condition.get("qualifiers", None), mz)
mz_min = mz - mz_tol
mz_max = mz + mz_tol
min_int, min_intpercent, min_tic_percent_intensity = _get_minintensity(condition.get("qualifiers", None))
ms2_filtered_df = ms2_df[(ms2_df["mz"] > mz_min) &
(ms2_df["mz"] < mz_max) &
(ms2_df["i"] > min_int) &
(ms2_df["i_norm"] > min_intpercent) &
(ms2_df["i_tic_norm"] > min_tic_percent_intensity)]
# Setting the intensity match register
_set_intensity_register(ms2_filtered_df, reference_conditions_register, condition)
# Applying the intensity match
ms2_filtered_df = _filter_intensitymatch(ms2_filtered_df, reference_conditions_register, condition)
ms2_list.append(ms2_filtered_df)
if len(ms2_list) == 1:
ms2_filtered_df = ms2_list[0]
else:
ms2_filtered_df = pd.concat(ms2_list)
# Apply the negation operator
if exclusion_flag:
filtered_scans = set(ms2_filtered_df["scan"])
original_scans = set(ms2_df["scan"])
negation_scans = original_scans - filtered_scans
ms2_filtered_df = ms2_df[ms2_df["scan"].isin(negation_scans)]
if len(ms2_filtered_df) == 0:
return pd.DataFrame(), pd.DataFrame()
# Filtering the actual data structures
filtered_scans = set(ms2_filtered_df["scan"])
ms2_df = ms2_df[ms2_df["scan"].isin(filtered_scans)]
# Filtering the MS1 data now
ms1_scans = set(ms2_df["ms1scan"])
ms1_df = ms1_df[ms1_df["scan"].isin(ms1_scans)]
return ms1_df, ms2_df
def ms2nl_condition(condition, ms1_df, ms2_df, reference_conditions_register):
"""
Filters the MS1 and MS2 data based upon MS2 neutral loss conditions
Args:
condition ([type]): [description]
ms1_df ([type]): [description]
ms2_df ([type]): [description]
reference_conditions_register ([type]): Edits this in place
Returns:
ms1_df ([type]): [description]
ms2_df ([type]): [description]
"""
exclusion_flag = _get_exclusion_flag(condition.get("qualifiers", None))
if len(ms2_df) == 0:
return ms1_df, ms2_df
ms2_list = []
for mz in condition["value"]:
if mz == "ANY":
# Checking defect options
massdefect_min, massdefect_max = _get_massdefect_min(condition.get("qualifiers", None))
ms2_filtered_df = ms2_df
ms2_filtered_df["mz_defect"] = ms2_filtered_df["mz"] - ms2_filtered_df["mz"].astype(int)
min_int, min_intpercent, min_tic_percent_intensity = _get_minintensity(condition.get("qualifiers", None))
ms2_filtered_df = ms2_filtered_df[
(ms2_filtered_df["mz_defect"] > massdefect_min) &
(ms2_filtered_df["mz_defect"] < massdefect_max) &
(ms2_filtered_df["i"] > min_int) &
(ms2_filtered_df["i_norm"] > min_intpercent) &
(ms2_filtered_df["i_tic_norm"] > min_tic_percent_intensity)
]
else:
mz_tol = _get_mz_tolerance(condition.get("qualifiers", None), mz) #TODO: This is incorrect logic if it comes to PPM accuracy
nl_min = mz - mz_tol
nl_max = mz + mz_tol
min_int, min_intpercent, min_tic_percent_intensity = _get_minintensity(condition.get("qualifiers", None))
ms2_filtered_df = ms2_df[
((ms2_df["precmz"] - ms2_df["mz"]) > nl_min) &
((ms2_df["precmz"] - ms2_df["mz"]) < nl_max) &
(ms2_df["i"] > min_int) &
(ms2_df["i_norm"] > min_intpercent) &
(ms2_df["i_tic_norm"] > min_tic_percent_intensity)
]
# Setting the intensity match register
_set_intensity_register(ms2_filtered_df, reference_conditions_register, condition)
# Applying the intensity match
ms2_filtered_df = _filter_intensitymatch(ms2_filtered_df, reference_conditions_register, condition)
ms2_list.append(ms2_filtered_df)
if len(ms2_list) == 1:
ms2_filtered_df = ms2_list[0]
else:
ms2_filtered_df = pd.concat(ms2_list)
# Apply the negation operator
if exclusion_flag:
filtered_scans = set(ms2_filtered_df["scan"])
original_scans = set(ms2_df["scan"])
negation_scans = original_scans - filtered_scans
ms2_filtered_df = ms2_df[ms2_df["scan"].isin(negation_scans)]
if len(ms2_filtered_df) == 0:
return pd.DataFrame(), pd.DataFrame()
# Filtering the actual data structures
filtered_scans = set(ms2_filtered_df["scan"])
ms2_df = ms2_df[ms2_df["scan"].isin(filtered_scans)]
# Filtering the MS1 data now
ms1_scans = set(ms2_df["ms1scan"])
ms1_df = ms1_df[ms1_df["scan"].isin(ms1_scans)]
return ms1_df, ms2_df
def ms2prec_condition(condition, ms1_df, ms2_df, reference_conditions_register):
"""
Filters the MS1 and MS2 data based upon MS2 precursor conditions
Args:
condition ([type]): [description]
ms1_df ([type]): [description]
ms2_df ([type]): [description]
reference_conditions_register ([type]): Edits this in place
Returns:
ms1_df ([type]): [description]
ms2_df ([type]): [description]
"""
exclusion_flag = _get_exclusion_flag(condition.get("qualifiers", None))
if len(ms2_df) == 0:
return ms1_df, ms2_df
ms2_list = []
for mz in condition["value"]:
if mz == "ANY":
# Checking defect options
massdefect_min, massdefect_max = _get_massdefect_min(condition.get("qualifiers", None))
ms2_filtered_df = ms2_df
ms2_filtered_df["precmz_defect"] = ms2_filtered_df["precmz"] - ms2_filtered_df["precmz"].astype(int)
ms2_filtered_df = ms2_filtered_df[(
ms2_filtered_df["precmz_defect"] > massdefect_min) &
(ms2_filtered_df["precmz_defect"] < massdefect_max)
]
else:
mz_tol = _get_mz_tolerance(condition.get("qualifiers", None), mz)
mz_min = mz - mz_tol
mz_max = mz + mz_tol
ms2_filtered_df = ms2_df[(
ms2_df["precmz"] > mz_min) &
(ms2_df["precmz"] < mz_max)
]
ms2_list.append(ms2_filtered_df)
if len(ms2_list) == 1:
ms2_filtered_df = ms2_list[0]
else:
ms2_filtered_df = pd.concat(ms2_list)
# Apply the negation operator
if exclusion_flag:
filtered_scans = set(ms2_filtered_df["scan"])
original_scans = set(ms2_df["scan"])
negation_scans = original_scans - filtered_scans
ms2_filtered_df = ms2_df[ms2_df["scan"].isin(negation_scans)]
if len(ms2_filtered_df) == 0:
return pd.DataFrame(), pd.DataFrame()
# Filtering the actual data structures
filtered_scans = set(ms2_filtered_df["scan"])
ms2_df = ms2_df[ms2_df["scan"].isin(filtered_scans)]
# Filtering the MS1 data now
if len(ms1_df) > 0:
ms1_scans = set(ms2_df["ms1scan"])
ms1_df = ms1_df[ms1_df["scan"].isin(ms1_scans)]
return ms1_df, ms2_df
def ms1_condition(condition, ms1_df, ms2_df, reference_conditions_register):
"""
Filters the MS1 and MS2 data based upon MS1 peak conditions
Args:
condition ([type]): [description]
ms1_df ([type]): [description]
ms2_df ([type]): [description]
reference_conditions_register ([type]): Edits this in place
Returns:
ms1_df ([type]): [description]
ms2_df ([type]): [description]
"""
exclusion_flag = _get_exclusion_flag(condition.get("qualifiers", None))
if len(ms1_df) == 0:
return ms1_df, ms2_df
ms1_list = []
for mz in condition["value"]:
if mz == "ANY":
# Checking defect options
massdefect_min, massdefect_max = _get_massdefect_min(condition.get("qualifiers", None))
ms1_filtered_df = ms1_df
ms1_filtered_df["mz_defect"] = ms1_filtered_df["mz"] - ms1_filtered_df["mz"].astype(int)
min_int, min_intpercent, min_tic_percent_intensity = _get_minintensity(condition.get("qualifiers", None))
ms1_filtered_df = ms1_filtered_df[
(ms1_filtered_df["mz_defect"] > massdefect_min) &
(ms1_filtered_df["mz_defect"] < massdefect_max) &
(ms1_filtered_df["i"] > min_int) &
(ms1_filtered_df["i_norm"] > min_intpercent) &
(ms1_filtered_df["i_tic_norm"] > min_tic_percent_intensity)
]
else:
# Checking defect options
massdefect_min, massdefect_max = _get_massdefect_min(condition.get("qualifiers", None))
mz_tol = _get_mz_tolerance(condition.get("qualifiers", None), mz)
mz_min = mz - mz_tol
mz_max = mz + mz_tol
min_int, min_intpercent, min_tic_percent_intensity = _get_minintensity(condition.get("qualifiers", None))
ms1_filtered_df = ms1_df[
(ms1_df["mz"] > mz_min) &
(ms1_df["mz"] < mz_max) &
(ms1_df["i"] > min_int) &
(ms1_df["i_norm"] > min_intpercent) &
(ms1_df["i_tic_norm"] > min_tic_percent_intensity)]
if massdefect_min > 0 or massdefect_max < 1:
ms1_filtered_df["mz_defect"] = ms1_filtered_df["mz"] - ms1_filtered_df["mz"].astype(int)
ms1_filtered_df = ms1_filtered_df[
(ms1_filtered_df["mz_defect"] > massdefect_min) &
(ms1_filtered_df["mz_defect"] < massdefect_max)
]
# Setting the intensity match register
_set_intensity_register(ms1_filtered_df, reference_conditions_register, condition)
# Applying the intensity match
ms1_filtered_df = _filter_intensitymatch(ms1_filtered_df, reference_conditions_register, condition)
ms1_list.append(ms1_filtered_df)
if len(ms1_list) == 1:
ms1_filtered_df = ms1_list[0]
else:
ms1_filtered_df = pd.concat(ms1_list)
# Apply the negation operator
if exclusion_flag:
filtered_scans = set(ms1_filtered_df["scan"])
original_scans = set(ms1_df["scan"])
negation_scans = original_scans - filtered_scans
ms1_filtered_df = ms1_df[ms1_df["scan"].isin(negation_scans)]
if len(ms1_filtered_df) == 0:
return pd.DataFrame(), | pd.DataFrame() | pandas.DataFrame |
import datetime
import pandas as pd
import plotly.express as px
import streamlit as st
def clean_dataframe(df):
df = df.drop(columns=[0])
df.rename(
columns={
1: "errand_date",
2: "scrape_time",
3: "rekyl_id",
4: "status",
5: "reporter",
6: "apartment",
7: "kategori",
8: "detaljer",
},
inplace=True,
)
return df
def reformat_dataframe(cleaned_df):
reformat_df = (
cleaned_df.groupby(["rekyl_id", "status", "kategori", "reporter", "detaljer"])
.agg({"scrape_time": "min", "errand_date": "min"})
.sort_values(by=["scrape_time"], ascending=False)
.reset_index()
)
reformat_df["scrape_time"] = pd.to_datetime(reformat_df["scrape_time"])
reformat_df["errand_date"] = pd.to_datetime(reformat_df["errand_date"])
return reformat_df
def add_info_flags(reform_df):
pivoted = reform_df.pivot(
values=["scrape_time"],
index=["rekyl_id", "errand_date", "kategori", "reporter", "detaljer"],
columns=["status"],
).reset_index()
pivoted["time_to_complete"] = (
pivoted["scrape_time"]["Avslutad"] - pivoted["errand_date"]
).dt.days
pivoted["is_completed"] = pivoted.apply(
lambda row: "No" if pd.isnull(row.scrape_time.Avslutad) else "Yes", axis=1
)
start_date = datetime.datetime(2021, 9, 5)
pivoted["after_start_scrape"] = start_date < pivoted["errand_date"]
return pivoted
def get_closed_stats_per_category(df):
df4 = df[(df["is_completed"] == "Yes") & (df["after_start_scrape"] == True)]
df5 = df4[["rekyl_id", "kategori", "time_to_complete"]]
df5.columns = df5.columns.droplevel(level=1)
df5 = (
df5.groupby(["kategori"])
.agg({"time_to_complete": "mean", "rekyl_id": "count"})
.rename(columns={"time_to_complete": "avg_days", "rekyl_id": "Antal ärenden"})
.reset_index()
.sort_values(by=["kategori"])
)
df5["avg_days"] = df5["avg_days"].astype("int")
return df5
def get_open_stats_per_category(df):
open_errands_df = df[df["is_completed"] == "No"]
open_errands_df.columns = open_errands_df.columns.droplevel(level=1)
return (
open_errands_df.groupby(["kategori"])
.agg({"rekyl_id": "count"})
.rename(columns={"rekyl_id": "num errands"})
.reset_index()
.sort_values(by=["kategori"])
)
def transform_errands_per_date(raw_data):
df = pd.DataFrame(raw_data, columns=["Datum", "Antal ärenden"])
df["Datum"] = | pd.to_datetime(df["Datum"]) | pandas.to_datetime |
#!/usr/bin/env python
# coding: utf-8
# ## Pandas
# In[1]:
import pandas as pd
import os
# In[2]:
os.getcwd()
# In[7]:
titanic_df=pd.read_csv('/Users/kangjunseo/python programming/파이썬 머신러닝 완벽 가이드/titanic_train.csv')
titanic_df.head(3)
# In[8]:
print(type(titanic_df))
print(titanic_df.shape)
# In[9]:
titanic_df.info()
# In[10]:
titanic_df.describe()
# In[11]:
print(titanic_df['Pclass'].value_counts())
# In[12]:
titanic_df['Pclass'].head()
# ### DataFrame 변환
# In[13]:
import numpy as np
# In[17]:
#1d array to DataFrame
col_name1=['col1']
df_list=pd.DataFrame([1,2,3],columns=col_name1)
print(df_list)
arr=np.array([1,2,3])
df_arr=pd.DataFrame(arr,columns=col_name1)
print(df_arr)
# In[18]:
#2d array to DataFrame
col_name2=['col1','col2','col3']
df_list=pd.DataFrame([[1,2,3],
[11,12,13]],columns=col_name2)
print(df_list)
arr=np.array([[1,2,3],
[11,12,13]])
df_arr=pd.DataFrame(arr,columns=col_name2)
print(df_arr)
# In[20]:
#Dictionary to DataFrame
dict={'col1':[1,11],'col2':[2,22],'col3':[3,33]}
df_dict = pd.DataFrame(dict)
print(df_dict)
# #### Dataframe to Others
# In[85]:
#ndarray
arr=df_dict.values
print(arr)
#list
_list=df_dict.values.tolist()
print(_list)
#dictionary
dict=df_dict.to_dict()
print(dict)
# ### DataFrame의 Column Data Set 생성 및 수정
# #### 새로운 칼럼 추가
# In[25]:
titanic_df['Age_0']=0
titanic_df.head(3)
# In[27]:
titanic_df['Age_by_10']=titanic_df['Age']*10
titanic_df['Family_No']=titanic_df['SibSp']+titanic_df['Parch']+1
titanic_df.head(3)
# In[28]:
titanic_df['Age_by_10']=titanic_df['Age_by_10']+100
titanic_df.head(3)
# ### DataFrame 데이터 삭제
# #### inplace = False 인 경우
# In[32]:
#axis0=row, axis1=col
titanic_drop_df=titanic_df.drop('Age_0',axis=1)
titanic_drop_df.head()
# #### inplace = True 인 경우
# In[33]:
drop_result=titanic_df.drop(['Age_0','Age_by_10','Family_No'],axis=1,inplace=True)
print(drop_result) #inplace=True 이어서 반환값이 None
titanic_df.head(3)
# In[34]:
| pd.set_option('display.width',1000) | pandas.set_option |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# @Date : 2020-12
# @Author : <NAME>
'''
This file will help you crawl city's stations and plan route by Dijkstra algorithm
- Amap: https://lbs.amap.com/api/webservice/summary
- 本地宝:http://sh.bendibao.com/ditie/linemap.shtml
'''
import requests
from bs4 import BeautifulSoup
import pandas as pd
import json
import os
from tqdm import tqdm
from collections import defaultdict
import pickle
import itertools
from selenium import webdriver
from geopy.distance import geodesic
class Scrapy_and_Plan:
'''请修改参数'''
def __init__(self,city='上海',city_code='sh',site1='昌吉东路',site2='迪士尼'):
self.city = city
self.city_code= city_code
self.keynum='a2e1307eb761e7ac6f3a87b7e95f234c' # 你的ak
self.site1 = site1
self.site2 = site2
self.user_agent='Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_6_8; en-us) AppleWebKit/534.50 ' \
'(KHTML, like Gecko) Version/5.1 Safari/534.50'
self.headers = {'User-Agent': self.user_agent}
self.chrome=r'D:\Anaconda\Scripts\chromedriver.exe'
def spyder_by_selenium(self):
print('正在爬取{}地铁信息...'.format(self.city))
url='http://{}.bendibao.com/ditie/linemap.shtml'.format(self.city_code)
driver = webdriver.Chrome(self.chrome)
driver.implicitly_wait(5)
driver.get(url)
ele_totals = driver.find_elements_by_css_selector('.s-main .line-list')
df = pd.DataFrame(columns=['name', 'site'])
for ele_line in tqdm(ele_totals):
line_name = ele_line.find_element_by_css_selector('.line-list a').text.replace('线路图', '')
# line_names = driver.find_elements_by_css_selector('div[class="wrap"]')
stations = ele_line.find_elements_by_css_selector('a[class="link"]')
for station in stations:
longitude, latitude = self.get_location(station.text)
temp = {'name': station.text, 'site': line_name, 'longitude': longitude, 'latitude': latitude}
df = df.append(temp, ignore_index=True)
driver.quit()
df.to_excel('./data/{}_subway.xlsx'.format(self.city_code), index=False)
def spyder_by_bs4(self):
print('正在爬取{}地铁信息...'.format(self.city))
url='http://{}.bendibao.com/ditie/linemap.shtml'.format(self.city_code)
user_agent='Opera/9.80 (Macintosh; Intel Mac OS X 10.6.8; U; en) Presto/2.8.131 Version/11.11'
headers = {'User-Agent': user_agent}
r = requests.get(url, headers=headers)
r.encoding = r.apparent_encoding
soup = BeautifulSoup(r.text, 'lxml')
all_info = soup.find_all('div', class_='line-list')
df= | pd.DataFrame(columns=['name','site']) | pandas.DataFrame |
"""Classes and functions related to the management of sets of BIDSVariables."""
from copy import copy
import warnings
import re
from collections import OrderedDict
from itertools import chain
import fnmatch
import numpy as np
import pandas as pd
from pandas.api.types import is_numeric_dtype
from .variables import (
SparseRunVariable,
SimpleVariable,
DenseRunVariable,
merge_variables,
BIDSVariable,
)
from bids.utils import listify, matches_entities
class BIDSVariableCollection(object):
"""A container for one or more variables extracted from variable files
at a single level of analysis.
Parameters
----------
variables : list
A list of BIDSVariables or SimpleVariables.
Notes
-----
Variables in the list must all share the same analysis level, which
must be one of 'session', 'subject', or 'dataset' level. For
run-level Variables, use the BIDSRunVariableCollection.
"""
def __init__(self, variables):
if not variables:
raise ValueError("No variables were provided")
SOURCE_TO_LEVEL = {
"events": "run",
"physio": "run",
"stim": "run",
"regressors": "run",
"scans": "session",
"sessions": "subject",
"participants": "dataset",
}
var_levels = set(
[
SOURCE_TO_LEVEL[v.source] if v.source in SOURCE_TO_LEVEL else v.source
for v in variables
]
)
# TODO: relax this requirement & allow implicit merging between levels
if len(var_levels) > 1:
raise ValueError(
"A Collection cannot be initialized from "
"variables at more than one level of analysis. "
"Levels found in input variables: %s" % var_levels
)
elif not var_levels:
raise ValueError(
"None of the provided variables matched any of the known levels, which are: %s"
% (", ".join(sorted(SOURCE_TO_LEVEL.values())))
)
self.level = list(var_levels)[0]
variables = self.merge_variables(variables)
self.variables = {v.name: v for v in variables}
self._index_entities()
# Container for variable groups (see BIDS-StatsModels spec)--maps from
# group names to lists of variables.
self.groups = {}
@staticmethod
def merge_variables(variables, **kwargs):
"""Concatenates Variables along row axis.
Parameters
----------
variables : list
List of Variables to merge. Variables can have
different names (and all Variables that share a name will be
concatenated together).
Returns
-------
list
A list of Variables.
"""
var_dict = OrderedDict()
for v in variables:
if v.name not in var_dict:
var_dict[v.name] = []
var_dict[v.name].append(v)
return [merge_variables(vars_, **kwargs) for vars_ in list(var_dict.values())]
def to_df(
self, variables=None, format="wide", fillna=np.nan, entities=True, timing=True
):
"""Merge variables into a single pandas DataFrame.
Parameters
----------
variables : list of str or BIDSVariable
Optional list of variables or variable names to retain. If strings
are passed, each one gives the name of a variable in the current
collection. If BIDSVariables are passed, they will be used as-is.
If None, all variables are returned. Strings and BIDSVariables
cannot be mixed in the list.
format : {'wide', 'long'}
Whether to return a DataFrame in 'wide' or 'long' format. In 'wide'
format, each row is defined by a unique entity combination, and
each variable is in a separate column. In 'long' format, each row
is a unique combination of entities and variable names, and a
single 'amplitude' column provides the value.
fillna : value
Replace missing values with the specified value.
entities : bool
Whether or not to include a column for each entity.
timing : bool
Whether or not to include onset and duration columns.
Returns
-------
:obj:`pandas.DataFrame`
A pandas DataFrame.
"""
if variables is None:
variables = list(self.variables.keys())
# Can receive already-selected Variables from sub-classes
if not isinstance(variables[0], BIDSVariable):
variables = [v for v in self.variables.values() if v.name in variables]
# Convert all variables to separate DFs.
# Note: bad things can happen if we pass the conditions, entities, and
# timing flags through to the individual variables and then do
# concat/reshaping operations. So instead, we set them all to True
# temporarily, do what we need to, then drop them later if needed.
dfs = [v.to_df(True, True, timing=True) for v in variables]
# Always concatenate along row axis (for format='wide', we'll pivot).
df = pd.concat(dfs, axis=0, sort=True)
all_cols = set(df.columns)
ent_cols = list(all_cols - {"condition", "amplitude", "onset", "duration"})
if format == "long":
df = df.reset_index(drop=True).fillna(fillna)
else:
# Rows in wide format can only be defined by combinations of level entities
# plus (for run-level variables) onset and duration.
valid_vars = {"run", "session", "subject", "dataset", "onset", "duration"}
idx_cols = list(valid_vars & all_cols)
df["amplitude"] = df["amplitude"].fillna("n/a")
wide_df = df.pivot_table(
index=idx_cols, columns="condition", values="amplitude", aggfunc="first"
)
select_cols = list(set(ent_cols) - set(idx_cols))
if entities and select_cols:
ent_df = df.groupby(idx_cols)[select_cols].first()
df = pd.concat([wide_df, ent_df], axis=1)
else:
df = wide_df
df = df.reset_index().replace("n/a", fillna)
df.columns.name = None
# Drop any columns we don't want
if not timing:
df.drop(["onset", "duration"], axis=1, inplace=True)
if not entities:
df.drop(ent_cols, axis=1, inplace=True, errors="ignore")
return df
@classmethod
def from_df(cls, data, entities=None, source="contrast"):
"""Create a Collection from a pandas DataFrame.
Parameters
----------
df : :obj:`pandas.DataFrame`
The DataFrame to convert to a Collection. Each
column will be converted to a SimpleVariable.
entities : :obj:`pandas.DataFrame`
An optional second DataFrame containing
entity information.
source : str
The value to set as the source for all Variables.
Returns
-------
BIDSVariableCollection
"""
variables = []
for col in data.columns:
_data = pd.DataFrame(data[col].values, columns=["amplitude"])
if entities is not None:
_data = pd.concat([_data, entities], axis=1, sort=True)
variables.append(SimpleVariable(name=col, data=_data, source=source))
return BIDSVariableCollection(variables)
def clone(self):
"""Returns a shallow copy of the current instance, except that all
variables are deep-cloned.
"""
clone = copy(self)
clone.variables = {k: v.clone() for (k, v) in self.variables.items()}
return clone
def _index_entities(self):
"""Sets current instance's entities based on the existing index.
Notes
-----
Only entity key/value pairs common to all rows in all contained
Variables are returned. E.g., if a Collection contains Variables
extracted from runs 1, 2 and 3 from subject '01', the returned dict
will be {'subject': '01'}; the runs will be excluded as they vary
across the Collection contents.
"""
all_ents = pd.DataFrame.from_records(
[v.entities for v in self.variables.values()]
)
constant = all_ents.apply(lambda x: x.nunique() == 1)
if constant.empty:
self.entities = {}
else:
keep = all_ents.columns[constant]
ents = {k: all_ents[k].dropna().iloc[0] for k in keep}
self.entities = {k: v for k, v in ents.items() if pd.notnull(v)}
def __getitem__(self, var):
if var in self.variables:
return self.variables[var]
keys = list(self.variables.keys())
raise ValueError(
"No variable named '{}' found in this collection. "
"Available names are {}.".format(var, keys)
)
def __setitem__(self, var, obj):
# Ensure name matches collection key, but raise warning if needed.
if obj.name != var:
warnings.warn(
"The provided key to use in the collection ('%s') "
"does not match the passed Column object's existing "
"name ('%s'). The Column name will be set to match "
"the provided key." % (var, obj.name)
)
obj.name = var
self.variables[var] = obj
def match_variables(self, pattern, return_type="name", match_type="unix"):
"""Return columns whose names match the provided pattern.
Parameters
----------
pattern : str, list
One or more regex patterns to match all variable names against.
return_type : {'name', 'variable'}
What to return. Must be one of:
'name': Returns a list of names of matching variables.
'variable': Returns a list of Variable objects whose names
match.
match_type : str
Matching approach to use. Either 'regex' (full-blown regular
expression matching) or 'unix' (unix-style pattern matching
via the fnmatch module).
Returns
-------
A list of all matching variables or variable names
"""
pattern = listify(pattern)
results = []
for patt in pattern:
if match_type.lower().startswith("re"):
patt = re.compile(patt)
vars_ = [v for v in self.variables.keys() if patt.search(v)]
else:
vars_ = fnmatch.filter(list(self.variables.keys()), patt)
if return_type.startswith("var"):
vars_ = [self.variables[v] for v in vars_]
results.extend(vars_)
return results
class BIDSRunVariableCollection(BIDSVariableCollection):
"""A container for one or more RunVariables--i.e., Variables that have a
temporal dimension.
Parameters
----------
variables : list
A list of SparseRunVariable and/or DenseRunVariable.
sampling_rate : float
Sampling rate (in Hz) to use when working with
dense representations of variables. If None, defaults to 10.
Notes
-----
Variables in the list must all be at the 'run' level. For other
levels (session, subject, or dataset), use the
BIDSVariableCollection.
"""
def __init__(self, variables, sampling_rate=None):
# Don't put the default value in signature because None is passed from
# several places and we don't want multiple conflicting defaults.
if sampling_rate:
if isinstance(sampling_rate, str):
raise ValueError("Sampling rate must be numeric.")
self.sampling_rate = sampling_rate or 10
super(BIDSRunVariableCollection, self).__init__(variables)
def get_dense_variables(self, variables=None):
"""Returns a list of all stored DenseRunVariables."""
if variables is None:
variables = set(self.variables.keys())
return [
v
for v in self.variables.values()
if isinstance(v, DenseRunVariable) and v.name in variables
]
def get_sparse_variables(self, variables=None):
"""Returns a list of all stored SparseRunVariables."""
if variables is None:
variables = set(self.variables.keys())
return [
v
for v in self.variables.values()
if isinstance(v, SparseRunVariable) and v.name in variables
]
def all_dense(self):
return len(self.get_dense_variables()) == len(self.variables)
def all_sparse(self):
return len(self.get_sparse_variables()) == len(self.variables)
def _get_sampling_rate(self, sampling_rate):
"""Parse sampling rate argument and return appropriate value."""
if sampling_rate is None:
return self.sampling_rate
if isinstance(sampling_rate, (float, int)):
return sampling_rate
if sampling_rate == "TR":
trs = {var.run_info[0].tr for var in self.variables.values()}
if not trs:
raise ValueError(
"Repetition time unavailable; specify "
"sampling_rate in Hz explicitly or set to"
" 'highest'."
)
elif len(trs) > 1:
raise ValueError(
"Non-unique Repetition times found "
"({!r}); specify sampling_rate explicitly".format(trs)
)
return 1.0 / trs.pop()
if sampling_rate.lower() == "highest":
dense_vars = self.get_dense_variables()
# If no dense variables are available, fall back on instance SR
if not dense_vars:
return self.sampling_rate
var_srs = [v.sampling_rate for v in dense_vars]
if len(var_srs) == 1:
return var_srs[0]
return max(*var_srs)
raise ValueError(
"Invalid sampling_rate value '{}' provided. Must be "
"a float, None, 'TR', or 'highest'.".format(sampling_rate)
)
def _densify_and_resample(
self,
sampling_rate=None,
variables=None,
resample_dense=False,
force_dense=False,
in_place=False,
kind="linear",
):
sr = self._get_sampling_rate(sampling_rate)
_dense, _sparse = [], []
# Filter variables and sort by class
for name, var in self.variables.items():
if variables is not None and name not in variables:
continue
if isinstance(var, DenseRunVariable):
_dense.append(var)
else:
_sparse.append(var)
_variables = {}
if force_dense:
for v in _sparse:
if | is_numeric_dtype(v.values) | pandas.api.types.is_numeric_dtype |
# To mine the required data from Reddit
import praw
import pandas as pd
# from textblob import TextBlob
# import re
reddit = praw.Reddit(client_id='O819Gp7QK8_o5A', client_secret='<KEY>', user_agent='Reddit WebScraping')
def top_posts(topic):
posts=[]
try:
f_subreddit = reddit.subreddit(topic)
for post in f_subreddit.hot(limit=5):
posts.append([post.title, post.score, post.id, post.num_comments])
posts = pd.DataFrame(posts,columns=['title', 'score', 'id', 'num_comments'])
# posts.sort_values(by=['score','num_comments'], inplace=True, ascending=False)
posts.set_index('title',inplace=True)
return posts
except:
posts.append(["Null","0","0","0"])
posts = | pd.DataFrame(posts,columns=['title', 'score', 'id', 'num_comments']) | pandas.DataFrame |
# Script use to collect incumbents_v5.pkl
# Uses incumbents_v4.pkl and reorders the list in a semi-deterministic manner
import pickle
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from scipy.spatial import distance_matrix
# from scipy.spatial.distance import euclidean
from scipy.spatial import ConvexHull, convex_hull_plot_2d
# Incumbents for 'lasso' from regression tasks
lasso = [{'alpha': 0.0588816078969954,
'fit_intercept': 0,
'normalize': 0,
'max_iter': 1188,
'tol': 0.0142116958607831,
'positive': 0},
{'alpha': 0.01,
'fit_intercept': 1,
'normalize': 0,
'max_iter': 5000,
'tol': 0.0934421821777098,
'positive': 0},
{'alpha': 0.0301443773293404,
'fit_intercept': 1,
'normalize': 1,
'max_iter': 465,
'tol': 0.0994437776399929,
'positive': 0},
{'alpha': 0.0342844214778938,
'fit_intercept': 1,
'normalize': 1,
'max_iter': 20,
'tol': 0.086836065868564,
'positive': 0}]
# Incumbents for 'linear' from regression tasks
linear = [{'alpha': 0.0807158611555724,
'fit_intercept': 1,
'normalize': 1,
'max_iter': 1482,
'tol': 0.000985256913005844},
{'alpha': 0.0158280830848803,
'fit_intercept': 1,
'normalize': 1,
'max_iter': 2024,
'tol': 0.000274213922897436},
{'alpha': 0.0131360370365985,
'fit_intercept': 1,
'normalize': 0,
'max_iter': 27,
'tol': 0.000758983848008941},
{'alpha': 0.0286632897398748,
'fit_intercept': 1,
'normalize': 0,
'max_iter': 257,
'tol': 0.000567925032398133}]
def load_model(model_name):
with open('submissions/switching-optimizer/utils/incumbents_v4.pkl', 'rb') as f:
inc = pickle.load(f)
model_incs = inc.loc[model_name].dropna()
incs = []
for index in model_incs: incs.extend(index)
incs = pd.DataFrame(incs)
return incs
def plot_LR_hull():
model = load_model('linearC') # model name used in v4
mean_point = 10 ** np.log10(model).mean(axis=0)
mean_dist = distance_matrix([np.log10(mean_point)], np.log10(model))
closest_point = model.iloc[mean_dist.argmin()]
hull = ConvexHull(model)
x = model.iloc[hull.vertices, 0].to_numpy()
x = np.append(x, x[0])
y = model.iloc[hull.vertices, 1].to_numpy()
y = np.append(y, y[0])
plt.scatter(model['C'], model['intercept_scaling'])
plt.plot(x, y, color='red')
plt.scatter(mean_point['C'], mean_point['intercept_scaling'], label='mean point')
plt.scatter(closest_point['C'], closest_point['intercept_scaling'], label='closest point to mean')
plt.legend()
plt.xscale('log'); plt.yscale('log')
plt.show()
def plot_LR_hull_and_topN(N=16):
fig, axes = plt.subplots(2, 2)
LR = create_order('linearC')
hull = ConvexHull(LR)
x = LR.iloc[hull.vertices, 0].to_numpy()
x = np.append(x, x[0])
y = LR.iloc[hull.vertices, 1].to_numpy()
y = np.append(y, y[0])
mean_point = 10 ** np.log10(LR).mean(axis=0)
mean_dist = distance_matrix([np.log10(mean_point)], np.log10(LR))
closest_point = LR.iloc[mean_dist.argmin()]
# the incumbent space from v4
for i in range(2):
for j in range(2):
axes[i, j].scatter(LR['C'], LR['intercept_scaling'], color='black')
axes[i, j].plot(x, y, color='black', alpha=0.7)
axes[i, j].scatter(mean_point['C'], mean_point['intercept_scaling'], label='mean point')
axes[i, j].scatter(closest_point['C'], closest_point['intercept_scaling'], label='closest point to mean')
axes[i, j].set_xscale('log'); axes[i, j].set_yscale('log')
# trial 1
LR = LR.iloc[:N, :]
axes[0, 0].scatter(LR['C'], LR['intercept_scaling'], s=80, facecolors='none', edgecolors='red', label='top {}'.format(N))
axes[i, j].legend()
# trial 2
LR = create_order('linearC')
LR = LR.iloc[:N, :]
axes[0, 1].scatter(LR['C'], LR['intercept_scaling'], s=80, facecolors='none', edgecolors='red', label='top {}'.format(N))
# trial 3
LR = create_order('linearC')
LR = LR.iloc[:N, :]
axes[1, 0].scatter(LR['C'], LR['intercept_scaling'], s=80, facecolors='none', edgecolors='red', label='top {}'.format(N))
# trial 4
LR = create_order('linearC')
LR = LR.iloc[:N, :]
axes[1, 1].scatter(LR['C'], LR['intercept_scaling'], s=80, facecolors='none', edgecolors='red', label='top {}'.format(N))
for i in range(2):
for j in range(2):
axes[i, j].legend()
plt.suptitle('LR incumbents')
plt.show()
def create_order(model_name='linearC'):
if model_name == 'linear':
return pd.DataFrame(linear)
if model_name == 'lasso':
return pd.DataFrame(lasso)
model = load_model(model_name)
hull = ConvexHull(model)
mean_point = 10 ** np.log10(model).mean(axis=0)
incs_added = []
hull_vertex_added = []
# finding point closest to mean
mean_dist_all = distance_matrix([np.log10(mean_point)], np.log10(model))
closest_point = model.iloc[mean_dist_all.argmin()]
incs_added.append(mean_dist_all.argmin())
# distance between the vertices
hull_dist = distance_matrix(np.log10(model.iloc[hull.vertices]),
np.log10(model.iloc[hull.vertices]))
# distance of closest point to mean from all vertices
dist_point_vertices = distance_matrix([np.log10(closest_point)],
np.log10(model.iloc[hull.vertices]))
# store incumbent list, ranked through a heuristic
ranked_list = []
# adding point closest to mean as the first entry
ranked_list.extend([closest_point.to_numpy().tolist()])
# adding the convex hull vertex farthest from the point closest to mean
point = model.iloc[hull.vertices].iloc[np.argmax(dist_point_vertices)]
ranked_list.extend([point.to_numpy().tolist()])
hull_vertex_added.append(np.argmax(dist_point_vertices))
curr_idx = np.argmax(dist_point_vertices)
while len(model) > len(ranked_list) and len(hull_vertex_added) < len(hull.vertices) + 1:
candidates = np.argsort(hull_dist[curr_idx])[1:][::-1]
for i in range(len(candidates)):
if candidates[i] not in hull_vertex_added:
curr_idx = candidates[i]
break
point = model.iloc[hull.vertices].iloc[curr_idx]
ranked_list.extend([point.to_numpy().tolist()])
hull_vertex_added.append(curr_idx)
for idx in hull_vertex_added:
incs_added.append(hull.vertices[idx])
model = model.drop(index=incs_added).sample(frac=1)
model = | pd.DataFrame(ranked_list, columns=model.columns) | pandas.DataFrame |
# __author__ : slade
# __time__ : 17/12/21
import pandas as pd
import numpy as np
from xgboost.sklearn import XGBClassifier
import random
from data_preprocessing import data_preprocessing
from sklearn.externals import joblib
# load data
path1 = 'ensemble_data.txt'
train_data = pd.read_table(path1)
# change columns
train_data.columns = ['uid', 'label', 'f1', 'f2', 'f3', 'f4', 'f5', 'f6', 'f7', 'f8', 'f9', 'f10', 'f11', 'f12', 'f13',
'f14', 'f15', 'f16', 'f17', 'f18', 'f19', 'f20', 'f21', 'f22', 'f23', 'f24', 'f25', 'f26', 'f27',
'f28', 'f29', 'f30', 'f31', 'f32', 'f33', 'f34', 'f35', 'f36', 'f37', 'f38', 'f39', 'f40', 'f41',
'f42', 'f43', 'f44', 'f45', 'f46', 'f47', 'f48', 'f49', 'f50', 'f51', 'f52', 'f53', 'f54', 'f55',
'f56', 'f57', 'f58', 'f59', 'f60', 'f61', 'f62', 'f63', 'f64', 'f65', 'f66', 'f67', 'f68', 'f69',
'f70', 'f71', 'f72', 'f73', 'f74', 'f75', 'f76', 'f77', 'f78', 'f79', 'f80', 'f81', 'f82', 'f83',
'f84', 'f85', 'f86', 'f87', 'f88', 'f89', 'f90', 'f91', 'f92', 'f93', 'f94', 'f95', 'f96', 'f97',
'f98', 'f99', 'f100', 'f101', 'f102', 'f103', 'f104', 'f105', 'f106', 'f107', 'f108', 'f109',
'f110']
# describe every columns
# the different element number of every column
arrange_data_col = {}
# the different element of every column
detail_data_col = {}
for i in train_data.columns:
if i != 'uid':
arrange_data_col[i] = len(set(train_data[i]))
detail_data_col[i] = set(train_data[i])
# separate the classification data : define that if the set is under 10,the columns can be treated as classification,you can also change the number 10 to any number you need
# the class columns set
class_set = []
# the continue columns set
continue_set = []
for key in arrange_data_col:
if arrange_data_col[key] >= 10 and key != 'uid':
continue_set.append(key)
class_set = [x for x in train_data.columns if
x not in continue_set and x != 'uid' and x != 'label' and arrange_data_col[x] > 1]
# make the continuous data
continue_reshape_train_data = train_data[continue_set][:]
# remove the null columns here ,but i do not use this function,you can try it
def continuous_columns_nan_count(data, columns):
res = []
for i in columns:
rate = (data[i] < 0).sum() / data[i].shape[0]
res.append((i, rate))
print('we have got the %s' % i)
return res
continue_describe = continuous_columns_nan_count(continue_reshape_train_data, continue_set)
continue_set_classed = [x[0] for x in continue_describe]
continue_set_classed_data = pd.DataFrame()
keep_q_set = {}
# split continuous columns into 10 sub_class columns
for i in continue_set_classed:
assistant_vector = []
q1 = continue_reshape_train_data[i][continue_reshape_train_data[i] != -1].quantile(0.1)
q2 = continue_reshape_train_data[i][continue_reshape_train_data[i] != -1].quantile(0.2)
q3 = continue_reshape_train_data[i][continue_reshape_train_data[i] != -1].quantile(0.3)
q4 = continue_reshape_train_data[i][continue_reshape_train_data[i] != -1].quantile(0.4)
q5 = continue_reshape_train_data[i][continue_reshape_train_data[i] != -1].quantile(0.5)
q6 = continue_reshape_train_data[i][continue_reshape_train_data[i] != -1].quantile(0.6)
q7 = continue_reshape_train_data[i][continue_reshape_train_data[i] != -1].quantile(0.7)
q8 = continue_reshape_train_data[i][continue_reshape_train_data[i] != -1].quantile(0.8)
q9 = continue_reshape_train_data[i][continue_reshape_train_data[i] != -1].quantile(0.9)
q_set = set([q1, q2, q3, q4, q5, q6, q7, q8, q9])
keep_q_set[i] = q_set
if len(q_set) == 9:
array_q_set = [x for x in q_set]
for j in range(continue_reshape_train_data[i].shape[0]):
if continue_reshape_train_data[i][j] == -1:
assistant_vector.append(-1)
elif continue_reshape_train_data[i][j] <= array_q_set[0]:
assistant_vector.append(0)
elif continue_reshape_train_data[i][j] <= array_q_set[1]:
assistant_vector.append(1)
elif continue_reshape_train_data[i][j] <= array_q_set[2]:
assistant_vector.append(2)
elif continue_reshape_train_data[i][j] <= array_q_set[3]:
assistant_vector.append(3)
elif continue_reshape_train_data[i][j] <= array_q_set[4]:
assistant_vector.append(4)
elif continue_reshape_train_data[i][j] <= array_q_set[5]:
assistant_vector.append(5)
elif continue_reshape_train_data[i][j] <= array_q_set[6]:
assistant_vector.append(6)
elif continue_reshape_train_data[i][j] <= array_q_set[7]:
assistant_vector.append(7)
elif continue_reshape_train_data[i][j] <= array_q_set[8]:
assistant_vector.append(8)
else:
assistant_vector.append(9)
if len(q_set) == 8:
array_q_set = [x for x in q_set]
for j in range(continue_reshape_train_data[i].shape[0]):
if continue_reshape_train_data[i][j] == -1:
assistant_vector.append(-1)
elif continue_reshape_train_data[i][j] <= array_q_set[0]:
assistant_vector.append(0)
elif continue_reshape_train_data[i][j] <= array_q_set[1]:
assistant_vector.append(1)
elif continue_reshape_train_data[i][j] <= array_q_set[2]:
assistant_vector.append(2)
elif continue_reshape_train_data[i][j] <= array_q_set[3]:
assistant_vector.append(3)
elif continue_reshape_train_data[i][j] <= array_q_set[4]:
assistant_vector.append(4)
elif continue_reshape_train_data[i][j] <= array_q_set[5]:
assistant_vector.append(5)
elif continue_reshape_train_data[i][j] <= array_q_set[6]:
assistant_vector.append(6)
elif continue_reshape_train_data[i][j] <= array_q_set[7]:
assistant_vector.append(7)
else:
assistant_vector.append(8)
if len(q_set) == 7:
array_q_set = [x for x in q_set]
for j in range(continue_reshape_train_data[i].shape[0]):
if continue_reshape_train_data[i][j] == -1:
assistant_vector.append(-1)
elif continue_reshape_train_data[i][j] <= array_q_set[0]:
assistant_vector.append(0)
elif continue_reshape_train_data[i][j] <= array_q_set[1]:
assistant_vector.append(1)
elif continue_reshape_train_data[i][j] <= array_q_set[2]:
assistant_vector.append(2)
elif continue_reshape_train_data[i][j] <= array_q_set[3]:
assistant_vector.append(3)
elif continue_reshape_train_data[i][j] <= array_q_set[4]:
assistant_vector.append(4)
elif continue_reshape_train_data[i][j] <= array_q_set[5]:
assistant_vector.append(5)
elif continue_reshape_train_data[i][j] <= array_q_set[6]:
assistant_vector.append(6)
else:
assistant_vector.append(7)
if len(q_set) == 6:
array_q_set = [x for x in q_set]
for j in range(continue_reshape_train_data[i].shape[0]):
if continue_reshape_train_data[i][j] == -1:
assistant_vector.append(-1)
elif continue_reshape_train_data[i][j] <= array_q_set[0]:
assistant_vector.append(0)
elif continue_reshape_train_data[i][j] <= array_q_set[1]:
assistant_vector.append(1)
elif continue_reshape_train_data[i][j] <= array_q_set[2]:
assistant_vector.append(2)
elif continue_reshape_train_data[i][j] <= array_q_set[3]:
assistant_vector.append(3)
elif continue_reshape_train_data[i][j] <= array_q_set[4]:
assistant_vector.append(4)
elif continue_reshape_train_data[i][j] <= array_q_set[5]:
assistant_vector.append(5)
else:
assistant_vector.append(6)
if len(q_set) == 5:
array_q_set = [x for x in q_set]
for j in range(continue_reshape_train_data[i].shape[0]):
if continue_reshape_train_data[i][j] == -1:
assistant_vector.append(-1)
elif continue_reshape_train_data[i][j] <= array_q_set[0]:
assistant_vector.append(0)
elif continue_reshape_train_data[i][j] <= array_q_set[1]:
assistant_vector.append(1)
elif continue_reshape_train_data[i][j] <= array_q_set[2]:
assistant_vector.append(2)
elif continue_reshape_train_data[i][j] <= array_q_set[3]:
assistant_vector.append(3)
elif continue_reshape_train_data[i][j] <= array_q_set[4]:
assistant_vector.append(4)
else:
assistant_vector.append(5)
if len(q_set) == 4:
array_q_set = [x for x in q_set]
for j in range(continue_reshape_train_data[i].shape[0]):
if continue_reshape_train_data[i][j] == -1:
assistant_vector.append(-1)
elif continue_reshape_train_data[i][j] <= array_q_set[0]:
assistant_vector.append(0)
elif continue_reshape_train_data[i][j] <= array_q_set[1]:
assistant_vector.append(1)
elif continue_reshape_train_data[i][j] <= array_q_set[2]:
assistant_vector.append(2)
elif continue_reshape_train_data[i][j] <= array_q_set[3]:
assistant_vector.append(3)
else:
assistant_vector.append(4)
if len(q_set) == 3:
array_q_set = [x for x in q_set]
for j in range(continue_reshape_train_data[i].shape[0]):
if continue_reshape_train_data[i][j] == -1:
assistant_vector.append(-1)
elif continue_reshape_train_data[i][j] <= array_q_set[0]:
assistant_vector.append(0)
elif continue_reshape_train_data[i][j] <= array_q_set[1]:
assistant_vector.append(1)
elif continue_reshape_train_data[i][j] <= array_q_set[2]:
assistant_vector.append(2)
else:
assistant_vector.append(3)
if len(q_set) == 2:
array_q_set = [x for x in q_set]
for j in range(continue_reshape_train_data[i].shape[0]):
if continue_reshape_train_data[i][j] == -1:
assistant_vector.append(-1)
elif continue_reshape_train_data[i][j] <= min(array_q_set):
assistant_vector.append(0)
elif continue_reshape_train_data[i][j] <= max(array_q_set):
assistant_vector.append(1)
else:
assistant_vector.append(2)
if len(q_set) == 1:
array_q_set = [x for x in q_set]
for j in range(continue_reshape_train_data[i].shape[0]):
if continue_reshape_train_data[i][j] == -1:
assistant_vector.append(-1)
elif continue_reshape_train_data[i][j] <= min(array_q_set):
assistant_vector.append(0)
else:
assistant_vector.append(1)
if len(q_set) == 0:
assistant_vector = [-1] * continue_reshape_train_data[i].shape[0]
continue_set_classed_data = pd.concat([continue_set_classed_data, pd.DataFrame(assistant_vector)], axis=1)
print('we have got the continuous column : %s ' % i)
# save the quantiles of each columns, you will load them if u want to deploy a trained model
joblib.dump(keep_q_set, 'keep_q_set.pkl')
# merge the data
continue_set_classed_data.columns = continue_set_classed
cbind_classed_data_columns = continue_set_classed + class_set
cbind_classed_data = pd.concat(
[train_data['uid'], train_data['label'], continue_set_classed_data, train_data[class_set]], axis=1)
# describe every columns again for removing the low variance columns
arrange_data_col = {}
detail_data_col = {}
for i in cbind_classed_data_columns:
if i != 'uid' and i != 'label':
arrange_data_col[i] = len(set(cbind_classed_data[i]))
detail_data_col[i] = set(cbind_classed_data[i])
# I do not use this function , if needed ,try it
meaningful_col = ['uid', 'label']
for i in cbind_classed_data_columns:
if i != 'uid' and i != 'label':
if arrange_data_col[i] >= 2:
meaningful_col.append(i)
meaningful_data = cbind_classed_data[meaningful_col]
# reshape the merged data and oht the data
reshaped_data = pd.DataFrame()
for i in meaningful_col:
if i != 'uid' and i != 'label':
reshaped_data = pd.concat([reshaped_data, | pd.get_dummies(meaningful_data[i], prefix=i) | pandas.get_dummies |
import os
from datetime import date
from dask.dataframe import DataFrame as DaskDataFrame
from numpy import nan, ndarray
from numpy.testing import assert_allclose, assert_array_equal
from pandas import DataFrame, Series, Timedelta, Timestamp
from pandas.testing import assert_frame_equal, assert_series_equal
from pymove import (
DaskMoveDataFrame,
MoveDataFrame,
PandasDiscreteMoveDataFrame,
PandasMoveDataFrame,
read_csv,
)
from pymove.core.grid import Grid
from pymove.utils.constants import (
DATE,
DATETIME,
DAY,
DIST_PREV_TO_NEXT,
DIST_TO_PREV,
HOUR,
HOUR_SIN,
LATITUDE,
LOCAL_LABEL,
LONGITUDE,
PERIOD,
SITUATION,
SPEED_PREV_TO_NEXT,
TID,
TIME_PREV_TO_NEXT,
TRAJ_ID,
TYPE_DASK,
TYPE_PANDAS,
UID,
WEEK_END,
)
list_data = [
[39.984094, 116.319236, '2008-10-23 05:53:05', 1],
[39.984198, 116.319322, '2008-10-23 05:53:06', 1],
[39.984224, 116.319402, '2008-10-23 05:53:11', 2],
[39.984224, 116.319402, '2008-10-23 05:53:11', 2],
]
str_data_default = """
lat,lon,datetime,id
39.984093,116.319236,2008-10-23 05:53:05,4
39.9842,116.319322,2008-10-23 05:53:06,1
39.984222,116.319402,2008-10-23 05:53:11,2
39.984222,116.319402,2008-10-23 05:53:11,2
"""
str_data_different = """
latitude,longitude,time,traj_id
39.984093,116.319236,2008-10-23 05:53:05,4
39.9842,116.319322,2008-10-23 05:53:06,1
39.984222,116.319402,2008-10-23 05:53:11,2
39.984222,116.319402,2008-10-23 05:53:11,2
"""
str_data_missing = """
39.984093,116.319236,2008-10-23 05:53:05,4
39.9842,116.319322,2008-10-23 05:53:06,1
39.984222,116.319402,2008-10-23 05:53:11,2
39.984222,116.319402,2008-10-23 05:53:11,2
"""
def _default_move_df():
return MoveDataFrame(
data=list_data,
latitude=LATITUDE,
longitude=LONGITUDE,
datetime=DATETIME,
traj_id=TRAJ_ID,
)
def _default_pandas_df():
return DataFrame(
data=[
[39.984094, 116.319236, Timestamp('2008-10-23 05:53:05'), 1],
[39.984198, 116.319322, Timestamp('2008-10-23 05:53:06'), 1],
[39.984224, 116.319402, Timestamp('2008-10-23 05:53:11'), 2],
[39.984224, 116.319402, Timestamp('2008-10-23 05:53:11'), 2],
],
columns=['lat', 'lon', 'datetime', 'id'],
index=[0, 1, 2, 3],
)
def test_move_data_frame_from_list():
move_df = _default_move_df()
assert MoveDataFrame.has_columns(move_df)
try:
MoveDataFrame.validate_move_data_frame(move_df)
except Exception:
assert False
assert isinstance(move_df, PandasMoveDataFrame)
def test_move_data_frame_from_file(tmpdir):
d = tmpdir.mkdir('core')
file_default_columns = d.join('test_read_default.csv')
file_default_columns.write(str_data_default)
filename_default = os.path.join(
file_default_columns.dirname, file_default_columns.basename
)
move_df = read_csv(filename_default)
assert MoveDataFrame.has_columns(move_df)
try:
MoveDataFrame.validate_move_data_frame(move_df)
except Exception:
assert False
assert isinstance(move_df, PandasMoveDataFrame)
file_different_columns = d.join('test_read_different.csv')
file_different_columns.write(str_data_different)
filename_diferent = os.path.join(
file_different_columns.dirname, file_different_columns.basename
)
move_df = read_csv(
filename_diferent,
latitude='latitude',
longitude='longitude',
datetime='time',
traj_id='traj_id',
)
assert MoveDataFrame.has_columns(move_df)
try:
MoveDataFrame.validate_move_data_frame(move_df)
except Exception:
assert False
assert isinstance(move_df, PandasMoveDataFrame)
file_missing_columns = d.join('test_read_missing.csv')
file_missing_columns.write(str_data_missing)
filename_missing = os.path.join(
file_missing_columns.dirname, file_missing_columns.basename
)
move_df = read_csv(
filename_missing, names=[LATITUDE, LONGITUDE, DATETIME, TRAJ_ID]
)
assert MoveDataFrame.has_columns(move_df)
try:
MoveDataFrame.validate_move_data_frame(move_df)
except Exception:
assert False
assert isinstance(move_df, PandasMoveDataFrame)
def test_move_data_frame_from_dict():
dict_data = {
LATITUDE: [39.984198, 39.984224, 39.984094],
LONGITUDE: [116.319402, 116.319322, 116.319402],
DATETIME: [
'2008-10-23 05:53:11',
'2008-10-23 05:53:06',
'2008-10-23 05:53:06',
],
}
move_df = MoveDataFrame(
data=dict_data,
latitude=LATITUDE,
longitude=LONGITUDE,
datetime=DATETIME,
traj_id=TRAJ_ID,
)
assert MoveDataFrame.has_columns(move_df)
try:
MoveDataFrame.validate_move_data_frame(move_df)
except Exception:
assert False
assert isinstance(move_df, PandasMoveDataFrame)
def test_move_data_frame_from_data_frame():
df = _default_pandas_df()
move_df = MoveDataFrame(
data=df, latitude=LATITUDE, longitude=LONGITUDE, datetime=DATETIME
)
assert MoveDataFrame.has_columns(move_df)
try:
MoveDataFrame.validate_move_data_frame(move_df)
except Exception:
assert False
assert isinstance(move_df, PandasMoveDataFrame)
def test_attribute_error_from_data_frame():
df = DataFrame(
data=[
[39.984094, 116.319236, Timestamp('2008-10-23 05:53:05'), 1],
[39.984198, 116.319322, Timestamp('2008-10-23 05:53:06'), 1],
[39.984224, 116.319402, Timestamp('2008-10-23 05:53:11'), 2],
[39.984224, 116.319402, Timestamp('2008-10-23 05:53:11'), 2],
],
columns=['laterr', 'lon', 'datetime', 'id'],
index=[0, 1, 2, 3],
)
try:
MoveDataFrame(
data=df, latitude=LATITUDE, longitude=LONGITUDE, datetime=DATETIME
)
raise AssertionError(
'AttributeError error not raised by MoveDataFrame'
)
except KeyError:
pass
df = DataFrame(
data=[
[39.984094, 116.319236, Timestamp('2008-10-23 05:53:05'), 1],
[39.984198, 116.319322, Timestamp('2008-10-23 05:53:06'), 1],
[39.984224, 116.319402, Timestamp('2008-10-23 05:53:11'), 2],
[39.984224, 116.319402, Timestamp('2008-10-23 05:53:11'), 2],
],
columns=['lat', 'lonerr', 'datetime', 'id'],
index=[0, 1, 2, 3],
)
try:
MoveDataFrame(
data=df, latitude=LATITUDE, longitude=LONGITUDE, datetime=DATETIME
)
raise AssertionError(
'AttributeError error not raised by MoveDataFrame'
)
except KeyError:
pass
df = DataFrame(
data=[
[39.984094, 116.319236, Timestamp('2008-10-23 05:53:05'), 1],
[39.984198, 116.319322, Timestamp('2008-10-23 05:53:06'), 1],
[39.984224, 116.319402, Timestamp('2008-10-23 05:53:11'), 2],
[39.984224, 116.319402, Timestamp('2008-10-23 05:53:11'), 2],
],
columns=['lat', 'lon', 'datetimerr', 'id'],
index=[0, 1, 2, 3],
)
try:
MoveDataFrame(
data=df, latitude=LATITUDE, longitude=LONGITUDE, datetime=DATETIME
)
raise AssertionError(
'AttributeError error not raised by MoveDataFrame'
)
except KeyError:
pass
def test_lat():
move_df = _default_move_df()
lat = move_df.lat
srs = Series(
data=[39.984094, 39.984198, 39.984224, 39.984224],
index=[0, 1, 2, 3],
dtype='float64',
name='lat',
)
assert_series_equal(lat, srs)
def test_lon():
move_df = _default_move_df()
lon = move_df.lon
srs = Series(
data=[116.319236, 116.319322, 116.319402, 116.319402],
index=[0, 1, 2, 3],
dtype='float64',
name='lon',
)
assert_series_equal(lon, srs)
def test_datetime():
move_df = _default_move_df()
datetime = move_df.datetime
srs = Series(
data=[
'2008-10-23 05:53:05',
'2008-10-23 05:53:06',
'2008-10-23 05:53:11',
'2008-10-23 05:53:11',
],
index=[0, 1, 2, 3],
dtype='datetime64[ns]',
name='datetime',
)
assert_series_equal(datetime, srs)
def test_loc():
move_df = _default_move_df()
assert move_df.loc[0, TRAJ_ID] == 1
loc_ = move_df.loc[move_df[LONGITUDE] > 116.319321]
expected = DataFrame(
data=[
[39.984198, 116.319322, Timestamp('2008-10-23 05:53:06'), 1],
[39.984224, 116.319402, Timestamp('2008-10-23 05:53:11'), 2],
[39.984224, 116.319402, Timestamp('2008-10-23 05:53:11'), 2],
],
columns=['lat', 'lon', 'datetime', 'id'],
index=[1, 2, 3],
)
assert_frame_equal(loc_, expected)
def test_iloc():
move_df = _default_move_df()
expected = Series(
data=[39.984094, 116.319236, Timestamp('2008-10-23 05:53:05'), 1],
index=['lat', 'lon', 'datetime', 'id'],
dtype='object',
name=0,
)
assert_series_equal(move_df.iloc[0], expected)
def test_at():
move_df = _default_move_df()
assert move_df.at[0, TRAJ_ID] == 1
def test_values():
move_df = _default_move_df()
expected = [
[39.984094, 116.319236, Timestamp('2008-10-23 05:53:05'), 1],
[39.984198, 116.319322, Timestamp('2008-10-23 05:53:06'), 1],
[39.984224, 116.319402, Timestamp('2008-10-23 05:53:11'), 2],
[39.984224, 116.319402, Timestamp('2008-10-23 05:53:11'), 2],
]
assert_array_equal(move_df.values, expected)
def test_columns():
move_df = _default_move_df()
assert_array_equal(
move_df.columns, [LATITUDE, LONGITUDE, DATETIME, TRAJ_ID]
)
def test_index():
move_df = _default_move_df()
assert_array_equal(move_df.index, [0, 1, 2, 3])
def test_dtypes():
move_df = _default_move_df()
expected = Series(
data=['float64', 'float64', '<M8[ns]', 'int64'],
index=['lat', 'lon', 'datetime', 'id'],
dtype='object',
name=None,
)
assert_series_equal(move_df.dtypes, expected)
def test_shape():
move_df = _default_move_df()
assert move_df.shape == (4, 4)
def test_len():
move_df = _default_move_df()
assert move_df.len() == 4
def test_unique():
move_df = _default_move_df()
assert_array_equal(move_df['id'].unique(), [1, 2])
def test_head():
move_df = _default_move_df()
expected = DataFrame(
data=[
[39.984094, 116.319236, Timestamp('2008-10-23 05:53:05'), 1],
[39.984198, 116.319322, Timestamp('2008-10-23 05:53:06'), 1],
],
columns=['lat', 'lon', 'datetime', 'id'],
index=[0, 1],
)
assert_frame_equal(move_df.head(2), expected)
def test_tail():
move_df = _default_move_df()
expected = DataFrame(
data=[
[39.984224, 116.319402, Timestamp('2008-10-23 05:53:11'), 2],
[39.984224, 116.319402, Timestamp('2008-10-23 05:53:11'), 2],
],
columns=['lat', 'lon', 'datetime', 'id'],
index=[2, 3],
)
assert_frame_equal(move_df.tail(2), expected)
def test_number_users():
move_df = MoveDataFrame(
data=list_data,
latitude=LATITUDE,
longitude=LONGITUDE,
datetime=DATETIME,
traj_id=TRAJ_ID,
)
assert move_df.get_users_number() == 1
move_df[UID] = [1, 1, 2, 3]
assert move_df.get_users_number() == 3
def test_to_numpy():
move_df = MoveDataFrame(
data=list_data,
latitude=LATITUDE,
longitude=LONGITUDE,
datetime=DATETIME,
traj_id=TRAJ_ID,
)
assert isinstance(move_df.to_numpy(), ndarray)
def test_to_dict():
move_df = MoveDataFrame(
data=list_data,
latitude=LATITUDE,
longitude=LONGITUDE,
datetime=DATETIME,
traj_id=TRAJ_ID,
)
assert isinstance(move_df.to_dict(), dict)
def test_to_grid():
move_df = MoveDataFrame(
data=list_data,
latitude=LATITUDE,
longitude=LONGITUDE,
datetime=DATETIME,
traj_id=TRAJ_ID,
)
g = move_df.to_grid(8)
assert isinstance(move_df.to_grid(8), Grid)
def test_to_data_frame():
move_df = MoveDataFrame(
data=list_data,
latitude=LATITUDE,
longitude=LONGITUDE,
datetime=DATETIME,
traj_id=TRAJ_ID,
)
assert isinstance(move_df.to_data_frame(), DataFrame)
def test_to_discrete_move_df():
move_df = PandasDiscreteMoveDataFrame(
data={DATETIME: ['2020-01-01 01:08:29',
'2020-01-05 01:13:24',
'2020-01-06 02:21:53',
'2020-01-06 03:34:48',
'2020-01-08 05:55:41'],
LATITUDE: [3.754245,
3.150849,
3.754249,
3.165933,
3.920178],
LONGITUDE: [38.3456743,
38.6913486,
38.3456743,
38.2715962,
38.5161605],
TRAJ_ID: ['pwe-5089',
'xjt-1579',
'tre-1890',
'xjt-1579',
'pwe-5089'],
LOCAL_LABEL: [1, 4, 2, 16, 32]},
)
assert isinstance(
move_df.to_dicrete_move_df(), PandasDiscreteMoveDataFrame
)
def test_describe():
move_df = _default_move_df()
expected = DataFrame(
data=[
[4.0, 4.0, 4.0],
[39.984185, 116.31934049999998, 1.5],
[6.189237971348586e-05, 7.921910543639078e-05, 0.5773502691896257],
[39.984094, 116.319236, 1.0],
[39.984172, 116.3193005, 1.0],
[39.984211, 116.319362, 1.5],
[39.984224, 116.319402, 2.0],
[39.984224, 116.319402, 2.0],
],
columns=['lat', 'lon', 'id'],
index=['count', 'mean', 'std', 'min', '25%', '50%', '75%', 'max'],
)
assert_frame_equal(move_df.describe(), expected)
def test_memory_usage():
move_df = _default_move_df()
expected = Series(
data=[128, 32, 32, 32, 32],
index=['Index', 'lat', 'lon', 'datetime', 'id'],
dtype='int64',
name=None,
)
assert_series_equal(move_df.memory_usage(), expected)
def test_copy():
move_df = _default_move_df()
cp = move_df.copy()
assert_frame_equal(move_df, cp)
cp.at[0, TRAJ_ID] = 0
assert move_df.loc[0, TRAJ_ID] == 1
assert move_df.loc[0, TRAJ_ID] != cp.loc[0, TRAJ_ID]
def test_generate_tid_based_on_id_datetime():
move_df = _default_move_df()
new_move_df = move_df.generate_tid_based_on_id_datetime(inplace=False)
expected = DataFrame(
data=[
[
39.984094,
116.319236,
Timestamp('2008-10-23 05:53:05'),
1,
'12008102305',
],
[
39.984198,
116.319322,
Timestamp('2008-10-23 05:53:06'),
1,
'12008102305',
],
[
39.984224,
116.319402,
Timestamp('2008-10-23 05:53:11'),
2,
'22008102305',
],
[
39.984224,
116.319402,
Timestamp('2008-10-23 05:53:11'),
2,
'22008102305',
],
],
columns=['lat', 'lon', 'datetime', 'id', 'tid'],
index=[0, 1, 2, 3],
)
assert_frame_equal(new_move_df, expected)
assert isinstance(new_move_df, PandasMoveDataFrame)
assert TID not in move_df
move_df.generate_tid_based_on_id_datetime()
assert_frame_equal(move_df, expected)
def test_generate_date_features():
move_df = _default_move_df()
new_move_df = move_df.generate_date_features(inplace=False)
expected = DataFrame(
data=[
[
39.984094,
116.319236,
Timestamp('2008-10-23 05:53:05'),
1,
date(2008, 10, 23),
],
[
39.984198,
116.319322,
Timestamp('2008-10-23 05:53:06'),
1,
date(2008, 10, 23),
],
[
39.984224,
116.319402,
Timestamp('2008-10-23 05:53:11'),
2,
date(2008, 10, 23),
],
[
39.984224,
116.319402,
Timestamp('2008-10-23 05:53:11'),
2,
date(2008, 10, 23),
],
],
columns=['lat', 'lon', 'datetime', 'id', 'date'],
index=[0, 1, 2, 3],
)
assert_frame_equal(new_move_df, expected)
assert isinstance(new_move_df, PandasMoveDataFrame)
assert DATE not in move_df
move_df.generate_date_features()
assert_frame_equal(move_df, expected)
def test_generate_hour_features():
move_df = _default_move_df()
new_move_df = move_df.generate_hour_features(inplace=False)
expected = DataFrame(
data=[
[39.984094, 116.319236, Timestamp('2008-10-23 05:53:05'), 1, 5],
[39.984198, 116.319322, Timestamp('2008-10-23 05:53:06'), 1, 5],
[39.984224, 116.319402, Timestamp('2008-10-23 05:53:11'), 2, 5],
[39.984224, 116.319402, Timestamp('2008-10-23 05:53:11'), 2, 5],
],
columns=['lat', 'lon', 'datetime', 'id', 'hour'],
index=[0, 1, 2, 3],
)
assert_frame_equal(new_move_df, expected)
assert isinstance(new_move_df, PandasMoveDataFrame)
assert HOUR not in move_df
move_df.generate_hour_features()
assert_frame_equal(move_df, expected)
def test_generate_day_of_the_week_features():
move_df = _default_move_df()
new_move_df = move_df.generate_day_of_the_week_features(inplace=False)
expected = DataFrame(
data=[
[
39.984094,
116.319236,
Timestamp('2008-10-23 05:53:05'),
1,
'Thursday',
],
[
39.984198,
116.319322,
Timestamp('2008-10-23 05:53:06'),
1,
'Thursday',
],
[
39.984224,
116.319402,
Timestamp('2008-10-23 05:53:11'),
2,
'Thursday',
],
[
39.984224,
116.319402,
Timestamp('2008-10-23 05:53:11'),
2,
'Thursday',
],
],
columns=['lat', 'lon', 'datetime', 'id', 'day'],
index=[0, 1, 2, 3],
)
assert_frame_equal(new_move_df, expected)
assert isinstance(new_move_df, PandasMoveDataFrame)
assert DAY not in move_df
move_df.generate_day_of_the_week_features()
assert_frame_equal(move_df, expected)
def test_generate_weekend_features():
move_df = _default_move_df()
new_move_df = move_df.generate_weekend_features(inplace=False)
expected = DataFrame(
data=[
[39.984094, 116.319236, Timestamp('2008-10-23 05:53:05'), 1, 0],
[39.984198, 116.319322, Timestamp('2008-10-23 05:53:06'), 1, 0],
[39.984224, 116.319402, Timestamp('2008-10-23 05:53:11'), 2, 0],
[39.984224, 116.319402, Timestamp('2008-10-23 05:53:11'), 2, 0],
],
columns=['lat', 'lon', 'datetime', 'id', 'weekend'],
index=[0, 1, 2, 3],
)
assert_frame_equal(new_move_df, expected)
assert isinstance(new_move_df, PandasMoveDataFrame)
assert WEEK_END not in move_df
move_df.generate_weekend_features()
assert_frame_equal(move_df, expected)
def test_generate_time_of_day_features():
move_df = _default_move_df()
new_move_df = move_df.generate_time_of_day_features(inplace=False)
expected = DataFrame(
data=[
[
39.984094,
116.319236,
Timestamp('2008-10-23 05:53:05'),
1,
'Early morning',
],
[
39.984198,
116.319322,
Timestamp('2008-10-23 05:53:06'),
1,
'Early morning',
],
[
39.984224,
116.319402,
Timestamp('2008-10-23 05:53:11'),
2,
'Early morning',
],
[
39.984224,
116.319402,
Timestamp('2008-10-23 05:53:11'),
2,
'Early morning',
],
],
columns=['lat', 'lon', 'datetime', 'id', 'period'],
index=[0, 1, 2, 3],
)
assert_frame_equal(new_move_df, expected)
assert isinstance(new_move_df, PandasMoveDataFrame)
assert PERIOD not in move_df
move_df.generate_time_of_day_features()
assert_frame_equal(move_df, expected)
def test_generate_datetime_in_format_cyclical():
move_df = _default_move_df()
new_move_df = move_df.generate_datetime_in_format_cyclical(inplace=False)
expected = DataFrame(
data=[
[
39.984094,
116.319236,
Timestamp('2008-10-23 05:53:05'),
1,
0.9790840876823229,
0.20345601305263375,
],
[
39.984198,
116.319322,
Timestamp('2008-10-23 05:53:06'),
1,
0.9790840876823229,
0.20345601305263375,
],
[
39.984224,
116.319402,
Timestamp('2008-10-23 05:53:11'),
2,
0.9790840876823229,
0.20345601305263375,
],
[
39.984224,
116.319402,
Timestamp('2008-10-23 05:53:11'),
2,
0.9790840876823229,
0.20345601305263375,
],
],
columns=['lat', 'lon', 'datetime', 'id', 'hour_sin', 'hour_cos'],
index=[0, 1, 2, 3],
)
assert_frame_equal(new_move_df, expected)
assert isinstance(new_move_df, PandasMoveDataFrame)
assert HOUR_SIN not in move_df
move_df.generate_datetime_in_format_cyclical()
assert_frame_equal(move_df, expected)
def test_generate_dist_time_speed_features():
move_df = _default_move_df()
new_move_df = move_df.generate_dist_time_speed_features(inplace=False)
expected = DataFrame(
data=[
[
1,
39.984094,
116.319236,
Timestamp('2008-10-23 05:53:05'),
nan,
nan,
nan,
],
[
1,
39.984198,
116.319322,
Timestamp('2008-10-23 05:53:06'),
13.690153134343689,
1.0,
13.690153134343689,
],
[
2,
39.984224,
116.319402,
Timestamp('2008-10-23 05:53:11'),
nan,
nan,
nan,
],
[
2,
39.984224,
116.319402,
Timestamp('2008-10-23 05:53:11'),
0.0,
0.0,
nan,
],
],
columns=[
'id',
'lat',
'lon',
'datetime',
'dist_to_prev',
'time_to_prev',
'speed_to_prev',
],
index=[0, 1, 2, 3],
)
assert_frame_equal(new_move_df, expected)
assert isinstance(new_move_df, PandasMoveDataFrame)
assert DIST_TO_PREV not in move_df
move_df.generate_dist_time_speed_features()
assert_frame_equal(move_df, expected)
def test_generate_dist_features():
move_df = _default_move_df()
new_move_df = move_df.generate_dist_features(inplace=False)
expected = DataFrame(
data=[
[
1,
39.984094,
116.319236,
Timestamp('2008-10-23 05:53:05'),
nan,
13.690153134343689,
nan,
],
[
1,
39.984198,
116.319322,
Timestamp('2008-10-23 05:53:06'),
13.690153134343689,
nan,
nan,
],
[
2,
39.984224,
116.319402,
Timestamp('2008-10-23 05:53:11'),
nan,
0.0,
nan,
],
[
2,
39.984224,
116.319402,
Timestamp('2008-10-23 05:53:11'),
0.0,
nan,
nan,
],
],
columns=[
'id',
'lat',
'lon',
'datetime',
'dist_to_prev',
'dist_to_next',
'dist_prev_to_next',
],
index=[0, 1, 2, 3],
)
assert_frame_equal(new_move_df, expected)
assert isinstance(new_move_df, PandasMoveDataFrame)
assert DIST_PREV_TO_NEXT not in move_df
move_df.generate_dist_features()
assert_frame_equal(move_df, expected)
def test_generate_time_features():
move_df = _default_move_df()
new_move_df = move_df.generate_time_features(inplace=False)
expected = DataFrame(
data=[
[
1,
39.984094,
116.319236,
Timestamp('2008-10-23 05:53:05'),
nan,
1.0,
nan,
],
[
1,
39.984198,
116.319322,
Timestamp('2008-10-23 05:53:06'),
1.0,
nan,
nan,
],
[
2,
39.984224,
116.319402,
Timestamp('2008-10-23 05:53:11'),
nan,
0.0,
nan,
],
[
2,
39.984224,
116.319402,
Timestamp('2008-10-23 05:53:11'),
0.0,
nan,
nan,
],
],
columns=[
'id',
'lat',
'lon',
'datetime',
'time_to_prev',
'time_to_next',
'time_prev_to_next',
],
index=[0, 1, 2, 3],
)
assert_frame_equal(new_move_df, expected)
assert isinstance(new_move_df, PandasMoveDataFrame)
assert TIME_PREV_TO_NEXT not in move_df
move_df.generate_time_features()
assert_frame_equal(move_df, expected)
def test_generate_speed_features():
move_df = _default_move_df()
new_move_df = move_df.generate_speed_features(inplace=False)
expected = DataFrame(
data=[
[
1,
39.984094,
116.319236,
Timestamp('2008-10-23 05:53:05'),
nan,
13.690153134343689,
nan,
],
[
1,
39.984198,
116.319322,
Timestamp('2008-10-23 05:53:06'),
13.690153134343689,
nan,
nan,
],
[
2,
39.984224,
116.319402,
Timestamp('2008-10-23 05:53:11'),
nan,
nan,
nan,
],
[
2,
39.984224,
116.319402,
Timestamp('2008-10-23 05:53:11'),
nan,
nan,
nan,
],
],
columns=[
'id',
'lat',
'lon',
'datetime',
'speed_to_prev',
'speed_to_next',
'speed_prev_to_next',
],
index=[0, 1, 2, 3],
)
assert_frame_equal(new_move_df, expected)
assert isinstance(new_move_df, PandasMoveDataFrame)
assert SPEED_PREV_TO_NEXT not in move_df
move_df.generate_speed_features()
| assert_frame_equal(move_df, expected) | pandas.testing.assert_frame_equal |
# coding: utf-8
"""Extract vertical profiles from RHI and PPI.
Authors: <NAME> and <NAME>
"""
from glob import glob
from os import path
from datetime import datetime, timedelta
import pyart
import numpy as np
import pandas as pd
import scipy.io as sio
import matplotlib.pyplot as plt
from radcomp.tools import db2lin, lin2db
from j24 import eprint
R_IKA_HYDE = 64450 # m
AZIM_IKA_HYDE = 81.89208 # deg
DB_SCALED_VARS = ('ZH', 'ZDR')
def lin_agg(db, agg_fun=np.nanmean, **kws):
"""aggregate in linear space"""
lin = db2lin(db)
aggregated = agg_fun(lin, **kws)
return lin2db(aggregated)
def calibration(radar, field_name, addition):
"""change radar object calibration by adding a constant value"""
field = radar.fields[field_name].copy()
field['data'] += addition
radar.fields.update({field_name: field})
def _interp(h_target, h_orig, var, agg_fun=np.nanmedian, **kws):
"""numpy.interp wrapper with aggregation on axis 1"""
fun = agg_fun_chooser(agg_fun, **kws)
var_agg = fun(var, axis=1)
return np.interp(h_target, h_orig, var_agg)
def fix_elevation(radar):
"""Correct elevation for antenna transition."""
for i in [0, 1]:
if radar.elevation['data'][i] > 90.0:
radar.elevation['data'][i] = 0.0
def kdp_csu(radar):
""""CSU kdp and processed phidp to radar object"""
from csu_radartools import csu_kdp
dz = extract_unmasked_data(radar, 'reflectivity')
dp = extract_unmasked_data(radar, 'differential_phase')
rng2d, ele2d = np.meshgrid(radar.range['data'], radar.elevation['data'])
kd, fd, sd = csu_kdp.calc_kdp_bringi(dp=dp, dz=dz, rng=rng2d/1000.0,
thsd=12, gs=125.0, window=10)
radar = add_field_to_radar_object(kd, radar, field_name='kdp_csu', units='deg/km',
long_name='Specific Differential Phase',
standard_name='Specific Differential Phase',
dz_field='reflectivity')
radar = add_field_to_radar_object(fd, radar, field_name='FDP', units='deg',
long_name='Filtered Differential Phase',
standard_name='Filtered Differential Phase',
dz_field='reflectivity')
return radar
def kdp_maesaka(radar, **kws):
"""Compute KDP using Maesaka algo from a radar object."""
mask = radar.fields['differential_phase']['data'].mask
try:
kdp_m = pyart.retrieve.kdp_maesaka(radar, **kws)
except IndexError:
# outlier checking sometimes causes trouble (with weak kdp?)
eprint('Skipping outlier check.')
kdp_m = pyart.retrieve.kdp_maesaka(radar, check_outliers=False,
**kws)
return np.ma.masked_array(data=kdp_m[0]['data'], mask=mask)
def kdp_all(radar):
"""all kdp processing methods"""
radar = kdp_csu(radar)
opt = dict(psidp_field='FDP')
#kdp_m=pyart.retrieve.kdp_maesaka(radar)
#kdp_s=pyart.retrieve.kdp_schneebeli(radar, **opt)
#kdp_v=pyart.retrieve.kdp_vulpiani(radar, **opt)
#radar.add_field('kdp_maesaka', kdp_m[0])
#radar.add_field('kdp_s', kdp_s[0])
#radar.add_field('kdp_v', kdp_v[0])
return radar
def kdp_retrieval(radar, method='csu', **kws):
"""wrapper for selecting KDP method"""
if method == 'maesaka':
return kdp_maesaka(radar, **kws)
elif method == 'csu':
radar = kdp_csu(radar)
return radar.fields['kdp_csu'].copy()['data']
raise ValueError('Unknown KDP method.')
def extract_radar_vars(radar, recalculate_kdp=True, kdp_debug=False, **kws):
"""Extract radar variables."""
ZH = radar.fields['reflectivity'].copy()['data']
ZDR = radar.fields['differential_reflectivity'].copy()['data']
RHO = radar.fields['cross_correlation_ratio'].copy()['data']
DP = radar.fields['differential_phase'].copy()['data']
if kdp_debug:
radar = kdp_all(radar)
KDP = radar.fields['kdp_csu'].copy()['data']
elif recalculate_kdp:
KDP = kdp_retrieval(radar, **kws)
else:
KDP = radar.fields['specific_differential_phase'].copy()['data']
return dict(ZH=ZH, ZDR=ZDR, KDP=KDP, RHO=RHO, DP=DP)
def scan_timestamp(radar):
"""scan timestamp in minute resolution"""
t_start = radar.time['units'].split(' ')[-1]
median_ts = np.median(radar.time['data'])
t = pd.to_datetime(t_start) + timedelta(seconds=median_ts)
return t.replace(second=0, microsecond=0).to_datetime64()
def filter_range(rdr_vars, r, r_poi, r_agg):
"""Discard all data that is not within a range from a distance of interest.
"""
rvars = rdr_vars.copy()
rmin = r_poi - r_agg
rmax = r_poi + r_agg
for key in ['ZH', 'ZDR', 'KDP', 'RHO']:
var = rvars[key]
var.set_fill_value(np.nan)
var.mask[r<=rmin] = True
var.mask[r>=rmax] = True
rvars.update({key: var.filled()})
return rvars
def height(radar, r, r_poi):
dr = np.abs(r-r_poi)
ix = dr.argmin(axis=1)
return radar.gate_z['data'][range(ix.size),ix]
def plot_compare_kdp(vrhi):
mask = vrhi.fields['differential_phase']['data'].mask
kdp = pyart.retrieve.kdp_maesaka(vrhi, Clpf=5000)[0]
kdp['data'] = np.ma.masked_array(data=kdp['data'], mask=mask)
vrhi.fields['kdp'] = kdp
fig, axarr = plt.subplots(ncols=2, figsize=(12,5))
disp = pyart.graph.RadarDisplay(vrhi)
disp.plot('specific_differential_phase', vmin=0, vmax=0.3, ax=axarr[0], cmap='viridis')
disp.plot('kdp', vmin=0, vmax=0.3, ax=axarr[1])
return disp, axarr
def agg2vp(hght, rdr_vars, agg_fun=np.nanmedian):
"""Aggregate along r axis to a vertical profile."""
# TODO: Panel
df = pd.Panel(major_axis=hght, data=rdr_vars).apply(np.nanmedian, axis=2)
df.index.name = 'height'
return df
def vrhi2vp(radar, h_thresh=60, Clpf=5000, use_hyy_h=False, **kws):
"""Extract vertical profile from volume scan slice."""
#plot_compare_kdp(radar)
calib_all(radar)
rdr_vars, hght = rhi_preprocess(radar, Clpf=Clpf, **kws)
df = agg2vp(hght, rdr_vars)
df.index.name = 'height'
if use_hyy_h:
h = np.array([580, 1010, 1950, 3650, 5950, 10550])
h_norm = np.linalg.norm(df.index.values-h)
if h_norm > h_thresh:
efmt = 'Altitudes do not match preset values: {} > {}'
raise ValueError(efmt.format(h_norm, h_thresh))
df.index = h
return scan_timestamp(radar), df
def rhi2vp(radar, n_hbins=None, hbins=None, agg_fun=np.nanmedian, **kws):
hbins = hbins or np.linspace(200, 15000, n_hbins)
"""Extract vertical profile from RHI."""
calib_all(radar)
rdr_vars, hght = rhi_preprocess(radar, **kws)
if hght is None:
return None, None
rvars = dict()
for key in rdr_vars.keys():
db_scale = key in DB_SCALED_VARS
rvars[key] = _interp(hbins, hght, rdr_vars[key], agg_fun,
db_scale=db_scale)
df = pd.DataFrame(index=hbins, data=rvars)
return scan_timestamp(radar), df
def calib_all(radar):
calibration(radar, 'differential_reflectivity', 0.5)
calibration(radar, 'reflectivity', 3)
def rhi_preprocess(radar, r_poi=R_IKA_HYDE, r_agg=1e3, **kws):
"""Process RHI data for aggregation."""
try: # extracting variables
fix_elevation(radar)
rdr_vars = extract_radar_vars(radar, **kws)
except Exception as e:
eprint('[extract error] {e}'.format(e=e))
return None, None
r = radar.gate_x['data'] # horizontal range
rvars = filter_range(rdr_vars, r, r_poi, r_agg)
hght = height(radar, r, r_poi)
return rvars, hght
def agg_fun_chooser(agg_fun, db_scale=False):
"""aggregation of db-scale variables"""
if (agg_fun == np.nanmedian) or not db_scale:
return agg_fun
return lambda x: lin_agg(x, agg_fun=agg_fun)
def create_volume_scan(files):
"""volume scan from multiple radar data files"""
r = None
for f in files:
if r is None:
r = pyart.io.read(f)
continue
r = pyart.util.join_radar(r, pyart.io.read(f))
return r
def volscan_groups(dir_in):
"""Group by time for volume scan processing."""
fnames = pd.Series(glob(path.join(dir_in, '*PPI3_[A-F].raw')))
tstrs = fnames.apply(lambda s: s[-27:-15])
return fnames.groupby(tstrs)
def xarray_workflow(dir_in, dir_out=None, **kws):
"""Extract profiles from volume scans as xarray Dataset."""
g = volscan_groups(dir_in)
vps = dict()
for tstr, df in g:
print(tstr)
df.sort_values(inplace=True)
vs = create_volume_scan(df)
vrhi = pyart.util.cross_section_ppi(vs, [AZIM_IKA_HYDE])
t, vp = vrhi2vp(vrhi, **kws)
vps[t] = vp
df = pd.concat(vps)
df.index.rename(['time', 'height'], inplace=True)
ds = df.to_xarray()
if dir_out is not None:
t = ds.time.values[0]
fname = pd.to_datetime(t).strftime('%Y%m%d_IKA_vpvol.nc')
ds.to_netcdf(path.join(dir_out, fname))
return ds
def xarray_ppi():
pass
def mat_workflow(dir_in, dir_out, fname_supl='IKA_vprhi', overwrite=False,
**kws):
"""LEGACY method to extract profiles and save as mat."""
n_hbins = 297
files = np.sort(glob(path.join(dir_in, "*RHI_HV*.raw")))
ObsTime = []
time_filename = path.basename(files[0])[0:8]
fileOut = path.join(dir_out, time_filename + '_' + fname_supl + '.mat')
if path.exists(fileOut) and not overwrite:
print('{} [notice] file already exists, skipping.'.format(fileOut))
return
for file_indx, filename in enumerate(files):
print(filename)
try: # reading radar data
radar = pyart.io.read(filename)
except Exception as e:
eprint('{fname} [read error] {e}'.format(fname=filename, e=e))
continue
# TODO: very much broken after this line
ts, df = rhi2vp(radar, n_hbins=n_hbins, **kws)
if ts is None:
raise ValueError('no timestamp')
tstr = path.basename(filename)[0:12]
ObsTime.append(datetime.strptime(tstr, '%Y%m%d%H%M').isoformat())
print(fileOut)
vp_rhi = {'ObsTime': ObsTime, 'height': df.index.values}
vp_rhi.update(df.to_dict(orient='list'))
sio.savemat(fileOut, {'VP_RHI': vp_rhi})
return df # for debugging
def nc_workflow(dir_in, dir_out, fname_supl='IKA_vprhi', overwrite=False,
**kws):
"""Extract profiles and save as nc."""
n_hbins = 297
files = np.sort(glob(path.join(dir_in, "*RHI_HV*.raw")))
time_filename = path.basename(files[0])[0:8]
fileOut = path.join(dir_out, time_filename + '_' + fname_supl + '.nc')
vps = dict()
if path.exists(fileOut) and not overwrite:
print('{} [notice] file already exists, skipping.'.format(fileOut))
return
for file_indx, filename in enumerate(files):
print(filename)
try: # reading radar data
radar = pyart.io.read(filename)
except Exception as e:
eprint('{fname} [read error] {e}'.format(fname=filename, e=e))
continue
ts, df = rhi2vp(radar, n_hbins=n_hbins, **kws)
vps[ts] = df
df = | pd.concat(vps) | pandas.concat |
import math
import sys
import heapq
import time
import re
import pandas as pd
import numpy as np
from collections import namedtuple
from empress.compare import Default_Cmp
from empress.compare import Balace_Cmp
from empress.tree import Tree
from empress.tree import DEFAULT_COLOR
from empress.tree import SELECT_COLOR
import empress.tools as tools
DEFAULT_WIDTH = 4096
DEFAULT_HEIGHT = 4096
class Model(object):
def __init__(self, tree, metadata, highlight_ids=None,
coords_file=None, port=8080):
""" Model constructor.
This initializes the model, including
the tree object and the metadata.
Parameters
----------
tree : skbio.TreeNode
Tree data structure.
metadata : str
Metadata object for the features being plotted on the tree.
clade_field : str
Name of field within metadata that contains clade names
highlight_file : list of str
List of nodes to highlight
port : int
port number
Notes
-----
The first column name should be renamed to Node_id
"""
self.TIP_LIMIT = 100
self.zoom_level = 1
self.scale = 1
# convert to empress tree
print('converting tree TreeNode to Tree')
self.tree = Tree.from_tree(tree)
tools.name_internal_nodes(self.tree)
if coords_file is None:
print('calculating tree coords')
self.tree.tip_count_per_subclade()
self.edge_metadata = self.tree.coords(DEFAULT_WIDTH, DEFAULT_HEIGHT)
else:
print('extracting tree coords from file')
self.tree.from_file(coords_file)
self.edge_metadata = self.tree.to_df()
# read in main metadata
self.headers = metadata.columns.values.tolist()
self.edge_metadata = pd.merge(self.edge_metadata, metadata,
how='outer', on="Node_id")
# todo need to warn user that some entries in metadata do not have a mapping to tree
self.edge_metadata = self.edge_metadata[self.edge_metadata.x.notnull()]
self.edge_metadata['index'] = self.edge_metadata['Node_id']
self.edge_metadata = self.edge_metadata.set_index('index')
print(metadata)
self.triangles = pd.DataFrame()
self.selected_tree = pd.DataFrame()
self.selected_root = self.tree
self.triData = {}
self.colored_clades = {}
# cached subtrees
self.cached_subtrees = list()
self.cached_clades = list()
# start = time.time()
# print('starting auto collapse')
# self.default_auto_collapse(100)
# end = time.time()
# print('finished auto collapse in %d' % (end - start))
print('highlight_ids')
self.highlight_nodes(highlight_ids)
self.__clade_level()
def layout(self, layout_type):
""" Calculates the coordinates for the tree.
Pipeline function
This calculates the actual coordinates for
the tree. These are not the coordinates that
will be rendered. The calculated coordinates
will be updated as a class property.
The layout will only be utilized during
initialization.
Parameters
----------
layout_type : str
This specifies the layout algorithm to be used.
Note
----
This will wipe the coords and viewcoords in order to
recalculate the coordinates with the new layout.
"""
self.coords = pd.DataFrame()
# These are coordinates scaled to the canvas
self._canvascoords = np.array()
# These are coordinates scaled for viewing
self.viewcoords = np.array()
# TODO: These will need to be recomputed once the algorithms for
# new layouts has been created.
pass
def select_edge_category(self):
"""
Select categories required by webgl to plot edges
Parameters
----------
Returns
-------
edgeData : pd.Dataframe
dataframe containing information necessary to draw tree in
webgl
"""
# TODO: may want to add in width in the future
attributes = ['x', 'y', 'px', 'py', 'branch_color']
return self.select_category(attributes, 'branch_is_visible')
def select_node_category(self):
"""
Select categories required by webgl to plot nodes
Parameters
----------
Returns
-------
edgeData : pd.Dataframe
dataframe containing information necessary to draw tree in
webgl
"""
attributes = ['x', 'y', 'node_color', 'size']
return self.select_category(attributes, 'node_is_visible')
def select_category(self, attributes, is_visible_col):
""" Returns edge_metadata whose 'is_visible_col is True'
Parameters
----------
edgeData : pd.Dataframe
dataframe containing information necessary to draw tree in
webgl
"""
is_visible = self.edge_metadata[is_visible_col]
edgeData = self.edge_metadata[is_visible]
return edgeData[attributes]
def update_edge_category(self, attribute, category,
new_value=DEFAULT_COLOR, lower="",
equal="", upper=""):
""" Returns edge_metadata with updated width value which tells View
what to hightlight
Parameters
----------
attribute : str
The name of the attribute(column of the table).
category:
The column of table that will be updated such as branch_color
new_value : str
A hex string representing color to change branch
lower : float
The smallest number a feature must match in order for its color to change
equal : str/float
The number/string a feature must match in order for its color to change
upper : float
The largest number a feature can match in order for its color to change
Returns
-------
edgeData : pd.Dataframe
All entries from self.edge_metadata that are visible and match criteria
passed in.
"""
# update the cached trees
new_value = DEFAULT_COLOR if new_value == "DEFAULT" else new_value
for edge_data, _ in self.cached_subtrees:
if lower is not "":
edge_data.loc[edge_data[attribute] > float(lower), category] = new_value
if equal is not "":
try:
value = float(equal)
except ValueError:
value = equal
edge_data.loc[edge_data[attribute] == value, category] = new_value
if upper is not "":
edge_data.loc[edge_data[attribute] < float(upper), category] = new_value
# update the current tree
if lower is not "":
self.edge_metadata.loc[self.edge_metadata[attribute] > float(lower), category] = new_value
if equal is not "":
try:
value = float(equal)
except ValueError:
value = equal
self.edge_metadata.loc[self.edge_metadata[attribute] == value, category] = new_value
if upper is not "":
self.edge_metadata.loc[self.edge_metadata[attribute] < float(upper), category] = new_value
return self.edge_metadata
def highlight_nodes(self, highlight_ids=None):
""" Reads in Node_ids for 'file' and colors their branches red
Parameters
----------
file : csv file containing Node_ids
"""
# with open(highlight_ids, 'r') as file:
# lines = file.readlines()
# ids = [re.split(';', item) for item in lines]
# em = self.edge_metadata
# for i in range(len(ids)):
# em.loc[em['Node_id'] == ids[i][0], 'branch_color'] = ids[i][1]
if highlight_ids is not None:
# idx = self.edge_metadata['Node_id'].isin(highlight_ids)
# self.edge_metadata.loc[idx, 'branch_color'] = highlight_color
self.edge_metadata.update(highlight_ids)
def get_highlighted_values(self, attribute, lower="",
equal="", upper=""):
""" Returns edge_metadata with that match the arguments
Parameters
----------
attribute : str
The name of the attribute(column of the table).
lower : int
The smallest number a feature must match in order for its color to change
equal : str/int
The number/string a feature must match in order for its color to change
upper : int
The largest number a feature can match in order for its color to change
Returns
-------
edgeData : pd.Dataframe
updated version of edge metadata
"""
columns = list(self.headers)
columns.append('x')
columns.append('y')
if lower is not "":
return self.edge_metadata.loc[self.edge_metadata[attribute] > float(lower), columns]
if equal is not "":
value = equal
return self.edge_metadata.loc[self.edge_metadata[attribute] == value, columns]
if upper is not "":
return self.edge_metadata.loc[self.edge_metadata[attribute] < float(upper), columns]
def get_default_table_values(self):
""" Returns all edge_metadata values need to initialize slickgrid
Parameters
----------
Returns
-------
pd.DataFrame
dataframe containing information necessary to draw tree in
webgl
"""
columns = list(self.headers)
columns.append('x')
columns.append('y')
return self.edge_metadata[columns]
def get_headers(self):
""" Returns a list of the headers for the metadata
Parameters
----------
Returns
-------
return : list
a list of the internal metadata headers
"""
return self.headers
def color_clade(self, clade_field, clade, color):
""" Will highlight a certain clade by drawing a sector around the clade.
The sector will start at the root of the clade and create an arc from the
most to the right most tip. The sector will aslo have a defualt arc length
equal to the distance from the root of the clade to the deepest tip..
Parameters
----------
clade : string
The clade to highlight
color : string (hex string)
The color to highlight the clade with
Returns
-------
return : list
A list of all highlighted clades
"""
if clade_field != 'None':
c = clade
clade_root = self.edge_metadata.loc[self.edge_metadata[clade_field] == clade]
clade_roots_id = clade_root['Node_id'].values
if len(clade_roots_id) == 0:
for c in range(0, len(self.cached_clades)):
if clade in self.cached_clades[c]:
self.cached_clades[c][clade]['color'] = color
return {"empty": []}
i = 0
for clade_root_id in clade_roots_id:
clades = self.tree.find_all(clade_root_id)
for clade in clades:
color_clade = self.tree.get_clade_info(clade)
color_clade['color'] = color
color_clade_s = tools.create_arc_sector(color_clade)
depth = len([node.name for node in clade.ancestors()])
self.colored_clades[c+str(i)] = {'data': color_clade_s,
'depth': depth,
'color': color,
'node': clade}
i += 1
else:
i = 0
clade_name = clade
for (k,v) in self.colored_clades.items():
if clade_name in k:
clade = v['node']
color_clade = self.tree.get_clade_info(clade)
color_clade['color'] = color
color_clade_s = tools.create_arc_sector(color_clade)
depth = len([node.name for node in clade.ancestors()])
self.colored_clades[k] = {'data': color_clade_s,
'depth': depth,
'color': color,
'node': clade}
i += 1
return self.get_colored_clade()
def clear_clade(self, clade):
""" Removes the colored clade
Note this doesn't remove any branches from the tree. It only removes the artifacts
created by javascript
"""
clades = self.colored_clades.keys()
clades = [c for c in clades]
for c in clades:
if clade in c:
self.colored_clades.pop(c)
for colored_clades in self.cached_clades:
clades = colored_clades.keys()
clades = [c for c in clades]
for c in clades:
if clade in c:
colored_clades.pop(c)
return self.get_colored_clade()
def get_colored_clade(self):
CLADE_INDEX = 0
DEPTH_INDEX = 1
clades = [(k, v['depth']) for k, v in self.colored_clades.items()]
clades.sort(key=lambda clade: clade[DEPTH_INDEX])
sorted_clades = [self.colored_clades[clade[CLADE_INDEX]]['data'] for clade in clades]
sorted_clades = [flat for two_d in sorted_clades for flat in two_d]
return {"clades": sorted_clades}
# Todo need to added the other items in colored-clades
def refresh_clades(self):
colored_clades = {}
for k, v in self.colored_clades.items():
clade_id = self.colored_clades[k]['id']
clade = self.tree.find(clade_id)
color_clade = self.tree.get_clade_info(clade)
color_clade['color'] = v['color']
color_clade_s = tools.create_arc_sector(color_clade)
depth = len([node.name for node in clade.ancestors()])
colored_clades[k] = {'data': color_clade_s,
'depth': depth,
'color': color_clade['color']}
return colored_clades
def create_subtree(self, attribute, lower="", equal="", upper=""):
""" Creates a subtree from from the tips whose metadata matches the users query. Also, if
the attribute referes to an inner node, then this method will first locate the tips whose
ansestor is the inner node. This will create a subtree by passing in the tips to skbio.shear()
Parameters
----------
attribute : string
The name of the attribute(column of the table).
lower : integer
The smallest number a feature must match in order for its color to change
equal : string/integer
The number/string a feature must match in order for its color to change
upper : integer
The largest number a feature can match in order for its color to change
Returns
-------
edgeData : pd.Dataframe
updated version of edge metadata
"""
# retrive the tips of the subtree
nodes = self.get_highlighted_values(attribute, lower, equal, upper)
nodes = nodes['Node_id'].values
tips = list()
for node in nodes:
# node is a tip
if self.tree.find(node).is_tip():
tips.append(node)
continue
# retive the tips of node
for tip in self.tree.find(node).tips():
tips.append(tip.name)
# store the previous tree/metadata
self.cached_subtrees.append((self.edge_metadata, self.tree))
# grab relivent metadata for old metadata
columns = list(self.edge_metadata.columns.values)
columns.remove('x')
columns.remove('y')
columns.remove('px')
columns.remove('py')
self.tree = self.tree.shear(tips)
nodes = list()
for node in self.tree.postorder():
nodes.append(node.name)
metadata = self.edge_metadata.loc[self.edge_metadata["Node_id"].isin(nodes), columns]
# create new metadata
self.edge_metadata = self.tree.coords(900, 1500)
self.edge_metadata = self.edge_metadata[['Node_id', 'x', 'y', 'px', 'py']]
self.edge_metadata = pd.merge(self.edge_metadata, metadata,
how='outer', on="Node_id")
self.cached_clades.append(self.colored_clades)
self.colored_clades = self.refresh_clades()
return self.edge_metadata
def get_old_tree(self):
""" retrives the nost recently cached tree if one exists.
"""
if len(self.cached_subtrees) > 0:
self.edge_metadata, self.tree = self.cached_subtrees.pop()
old_clades = self.colored_clades
self.colored_clades = self.cached_clades.pop()
for k, v in old_clades.items():
if k not in self.colored_clades:
self.colored_clades[k] = v
self.colored_clades[k]['color'] = old_clades[k]['color']
self.colored_clades = self.refresh_clades()
return self.edge_metadata
return pd.DataFrame()
def select_sub_tree(self, x1, y1, x2, y2):
""" Marks all tips whose coordinates in the box created by (x1, y1) and (x2, y2). The marked
tips can then be used in collapse_selected_tree
Parameters
----------
x1 : Number
The x coordinate of the top left corner of the select box
y1 : Number
The y coordinate of the top left corner of the select box
x2 : Number
The x coordinate of the bottom right corner of the select box
y2 : Number
The y coordinate of the bottom right corner of the select box
"""
df = self.edge_metadata
(x1, y1, x2, y2) = (float(x1), float(y1), float(x2), float(y2))
(smallX, smallY) = (min(x1, x2), min(y1, y2))
(largeX, largeY) = (max(x1, x2), max(y1, y2))
entries = df.loc[
(df['x'] >= smallX) & (df['x'] <= largeX) &
(df['y'] >= smallY) & (df['y'] <= largeY)]
entries = entries["Node_id"].values
if len(entries) == 0:
return pd.DataFrame()
if len(entries) == 1:
nodes = entries
root = entries
else:
root = self.tree.lowest_common_ancestor(entries)
nodes = [node.name for node in root.postorder(include_self=False)]
selected_tree = self.edge_metadata.loc[self.edge_metadata["Node_id"].isin(nodes)]
self.selected_tree = selected_tree.copy()
self.selected_tree['branch_color'] = SELECT_COLOR
self.selected_root = root
return self.selected_tree
def collapse_selected_tree(self):
clade = self.selected_root
self.__collapse_clade(clade)
self.update_collapse_clades()
return self.edge_metadata.loc[self.edge_metadata['branch_is_visible']]
def update_collapse_clades(self):
"""
Call this method after a series of clade collapse to hide the collapse clade within
collapsed clades
"""
collapse_ids = self.triData.keys()
for node_id in collapse_ids:
ancestors = [a.name for a in self.tree.find(node_id).ancestors()]
for other_id in collapse_ids:
if other_id in ancestors:
self.triData[node_id]['visible'] = False
def get_triangles(self):
triangles = {k: v for (k, v) in self.triData.items() if v['visible']}
self.triangles = pd.DataFrame(triangles).T
return self.triangles
def __get_tie_breaker_num(self):
self.tie_breaker += 1
return self.tie_breaker
def __collapse_clade(self, clade):
if not clade.is_tip():
s = self.tree.get_clade_info(clade)
(rx, ry) = (clade.x2, clade.y2)
theta = s['starting_angle']
(c_b1, s_b1) = (math.cos(theta), math.sin(theta))
(x1, y1) = (s['largest_branch'] * c_b1, s['largest_branch'] * s_b1)
# find right most branch
theta += s['theta']
(c_b2, s_b2) = (math.cos(theta), math.sin(theta))
(x2, y2) = (s['smallest_branch'] * c_b2, s['smallest_branch'] * s_b2)
(x1, y1) = (x1 + rx, y1 + ry)
(x2, y2) = (x2 + rx, y2 + ry)
level = clade.level
else:
(rx, ry) = (clade.parent.x2, clade.parent.y2)
(x1, y1) = (x2, y2) = (clade.x2, clade.y2)
collapsed_nodes = [clade]
nodes = [clade.name]
level = clade.parent.level
collapsed_nodes = [node for node in clade.postorder(include_self=False)]
nodes = [node.name for node in collapsed_nodes]
nId = {"Node_id": clade.name}
root = {'cx': rx, 'cy': ry}
shortest = {'lx': x1, 'ly': y1}
longest = {'rx': x2, 'ry': y2}
color = {'color': "0000FF"}
visible = {'visible': True}
depth = {'depth': level}
self.triData[clade.name] = {**nId, **root, **shortest,
**longest, **color, **visible, **depth}
self.edge_metadata.loc[self.edge_metadata['Node_id'].isin(nodes), 'branch_is_visible'] = False
return collapsed_nodes
def default_auto_collapse(self, tips):
"""
collapses clades with fewest num of tips until number of tips is TIP_LIMIT
WARNING: this method will automatically uncollapse all clades.
"""
self.tie_breaker = 0
# uncollapse everything
self.edge_metadata['branch_is_visible'] = True
self.triData = {}
num_tips = int(self.tree.tip_count * (float(tips)/ 100.0))
collapse_amount = self.tree.tip_count - num_tips
Element = namedtuple('Element', ' level tips breaker clade')
pq = [Element(clade.level, clade.tip_count, self.__get_tie_breaker_num(), clade)
for clade in self.tree.levelorder(include_self=False) if clade.tip_count < collapse_amount]
pq = sorted(pq, key=Default_Cmp)
collapsed_nodes = set()
for clade in pq:
if collapse_amount == 0:
break
if collapse_amount - clade.tips >= 0 and clade.clade not in collapsed_nodes:
if clade.tips > 1:
collapsed = set(self.__collapse_clade(clade.clade))
collapsed_nodes |= collapsed
else:
self.edge_metadata.loc[self.edge_metadata['Node_id'] == clade.clade.name, 'branch_color'] = '0000FF'
collapse_amount -= clade.tips
return self.edge_metadata.loc[self.edge_metadata['branch_is_visible']]
# TODO: Needs to implement set
def balance_auto_collapse(self, tips, threshold):
"""
collapses clades with fewest num of tips until number of tips is TIP_LIMIT
WARNING: this method will automatically uncollapse all clades.
"""
L_CHLD = 0
R_CHLD = 1
# uncollapse everything
self.edge_metadata['branch_is_visible'] = True
self.triData = {}
num_tips = int(self.tree.tip_count * (float(tips)/ 100.0))
collapse_amount = self.tree.tip_count - num_tips
threshold = -1 #float(float(threshold) / 100.0)
cur_node = self.tree
Element = namedtuple('Element', 'node left right')
start_node = Element(self.tree, False, False)
holdings = [start_node]
balances = dict(self.edge_metadata[['Node_id', 'C(diet)[T.Not]']].values)
count = 0
while collapse_amount > 0 and len(holdings) > 0:
count += 1
item = holdings.pop()
node = item.node
if node.is_tip():
self.__collapse_clade(node)
collapse_amount -= node.tip_count
continue
# collapse node if both children have been explored
if item.left and item.right:
self.__collapse_clade(node)
continue
# collapse the subtree that contributes the least to the balance
if abs(balances[node.name]) > threshold:
if balances[node.name] < 0:
if collapse_amount - node.children[L_CHLD].tip_count > 0:
self.__collapse_clade(node.children[L_CHLD])
collapse_amount -= node.children[L_CHLD].tip_count
holdings.append(Element(node, True, True))
holdings.append(Element(node.children[R_CHLD], False, False))
else:
holdings.append(Element(node.children[L_CHLD], False, False))
else:
if collapse_amount - node.children[R_CHLD].tip_count > 0:
self.__collapse_clade(node.children[R_CHLD])
collapse_amount -= node.children[R_CHLD].tip_count
holdings.append(Element(node, True, True))
holdings.append(Element(node.children[L_CHLD], False, False))
else:
holdings.append(Element(node.children[R_CHLD], False, False))
# handle threshold
return self.edge_metadata.loc[self.edge_metadata['branch_is_visible']]
def uncollapse_selected_tree(self, x, y):
"""
Parameters
----------
x: The x coordinate of the double click
y: The y coordinate of the double click
"""
selected_ids = []
# Find triangles that contains the point
for k in self.triData.keys():
if self.is_in_triangle(k, x, y):
selected_ids.append(k)
# Find the highest level of triangle
outer = sys.maxsize
outer_node = None
for id in selected_ids:
if self.triData[id]['depth'] < outer:
outer = self.triData[id]['depth']
outer_node = id
nodes = [node.name for node in self.tree.find(outer_node).postorder(include_self=False)]
for id in self.triData.keys():
if id in nodes:
selected_ids.append(id)
# Find the next highest level of triangle if there is any
inner = sys.maxsize
inner_node = None
for id in selected_ids:
depth = self.triData[id]['depth']
if depth > outer and depth < inner:
inner = self.triData[id]['depth']
inner_node = id
del self.triData[outer_node]
if inner_node:
nodes_inner = [node.name for node in self.tree.find(inner_node).postorder(include_self=False)]
nodes = list(set(nodes)-set(nodes_inner))
for id in self.triData.keys():
self.triData[id]['visible'] = True
self.edge_metadata.loc[self.edge_metadata['Node_id'].isin(nodes), 'branch_is_visible'] = True
return self.edge_metadata.loc[self.edge_metadata['branch_is_visible']]
def is_in_triangle(self, root, x, y):
"""
Check if a point is in triangle of root
"""
x = np.float64(x)
y = np.float64(y)
triangle = self.triData[root]
area = self.triangle_area(
triangle['cx'],
triangle['cy'],
triangle['lx'],
triangle['ly'],
triangle['rx'],
triangle['ry'],
)
sub_1 = self.triangle_area(
x,
y,
triangle['lx'],
triangle['ly'],
triangle['rx'],
triangle['ry'],
)
sub_2 = self.triangle_area(
x,
y,
triangle['cx'],
triangle['cy'],
triangle['rx'],
triangle['ry'],
)
sub_3 = self.triangle_area(
x,
y,
triangle['lx'],
triangle['ly'],
triangle['cx'],
triangle['cy'],
)
return abs(sub_1 + sub_2 + sub_3-area) < 0.001
def triangle_area(self, x1, y1, x2, y2, x3, y3):
"""
Calculate triangle area
"""
return abs((x1 * (y2 - y3) + x2 * (y3 - y1) + x3 * (y1 - y2)) / 2.0)
def get_triangles(self):
triangles = {k: v for (k, v) in self.triData.items() if v['visible']}
self.triangles = | pd.DataFrame(triangles) | pandas.DataFrame |
# -*- encoding: utf-8 -*-
#
# Copyright © 2016 Red Hat, Inc.
# Copyright © 2014-2015 eNovance
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Time series data manipulation, better with pancetta."""
import datetime
import functools
import itertools
import logging
import math
import numbers
import random
import re
import struct
import time
import lz4.block
import numpy
import numpy.lib.recfunctions
import pandas
from scipy import ndimage
import six
# NOTE(sileht): pandas relies on time.strptime()
# and often triggers http://bugs.python.org/issue7980
# its dues to our heavy threads usage, this is the workaround
# to ensure the module is correctly loaded before we use really it.
time.strptime("2016-02-19", "%Y-%m-%d")
LOG = logging.getLogger(__name__)
class BeforeEpochError(Exception):
"""Error raised when a timestamp before Epoch is used."""
def __init__(self, timestamp):
self.timestamp = timestamp
super(BeforeEpochError, self).__init__(
"%s is before Epoch" % timestamp)
class UnAggregableTimeseries(Exception):
"""Error raised when timeseries cannot be aggregated."""
def __init__(self, reason):
self.reason = reason
super(UnAggregableTimeseries, self).__init__(reason)
class UnknownAggregationMethod(Exception):
"""Error raised when the aggregation method is unknown."""
def __init__(self, agg):
self.aggregation_method = agg
super(UnknownAggregationMethod, self).__init__(
"Unknown aggregation method `%s'" % agg)
class InvalidData(ValueError):
"""Error raised when data are corrupted."""
def __init__(self):
super(InvalidData, self).__init__("Unable to unpack, invalid data")
def round_timestamp(ts, freq):
return pandas.Timestamp(
(pandas.Timestamp(ts).value // freq) * freq)
class GroupedTimeSeries(object):
def __init__(self, ts, granularity):
# NOTE(sileht): The whole class assumes ts is ordered and don't have
# duplicate timestamps, it uses numpy.unique that sorted list, but
# we always assume the orderd to be the same as the input.
freq = granularity * 10e8
self._ts = ts
self.indexes = (numpy.array(ts.index, numpy.float) // freq) * freq
self.tstamps, self.counts = numpy.unique(self.indexes,
return_counts=True)
def mean(self):
return self._scipy_aggregate(ndimage.mean)
def sum(self):
return self._scipy_aggregate(ndimage.sum)
def min(self):
return self._scipy_aggregate(ndimage.minimum)
def max(self):
return self._scipy_aggregate(ndimage.maximum)
def median(self):
return self._scipy_aggregate(ndimage.median)
def std(self):
# NOTE(sileht): ndimage.standard_deviation is really more performant
# but it use ddof=0, to get the same result as pandas we have to use
# ddof=1. If one day scipy allow to pass ddof, this should be changed.
return self._scipy_aggregate(ndimage.labeled_comprehension,
remove_unique=True,
func=functools.partial(numpy.std, ddof=1),
out_dtype='float64',
default=None)
def _count(self):
timestamps = self.tstamps.astype('datetime64[ns]', copy=False)
return (self.counts, timestamps)
def count(self):
return pandas.Series(*self._count())
def last(self):
counts, timestamps = self._count()
cumcounts = numpy.cumsum(counts) - 1
values = self._ts.values[cumcounts]
return pandas.Series(values, pandas.to_datetime(timestamps))
def first(self):
counts, timestamps = self._count()
counts = numpy.insert(counts[:-1], 0, 0)
cumcounts = numpy.cumsum(counts)
values = self._ts.values[cumcounts]
return pandas.Series(values, pandas.to_datetime(timestamps))
def quantile(self, q):
return self._scipy_aggregate(ndimage.labeled_comprehension,
func=functools.partial(
numpy.percentile,
q=q,
),
out_dtype='float64',
default=None)
def _scipy_aggregate(self, method, remove_unique=False, *args, **kwargs):
if remove_unique:
tstamps = self.tstamps[self.counts > 1]
else:
tstamps = self.tstamps
if len(tstamps) == 0:
return pandas.Series()
values = method(self._ts.values, self.indexes, tstamps,
*args, **kwargs)
timestamps = tstamps.astype('datetime64[ns]', copy=False)
return pandas.Series(values, pandas.to_datetime(timestamps))
class TimeSerie(object):
"""A representation of series of a timestamp with a value.
Duplicate timestamps are not allowed and will be filtered to use the
last in the group when the TimeSerie is created or extended.
"""
def __init__(self, ts=None):
if ts is None:
ts = pandas.Series()
self.ts = ts
@staticmethod
def clean_ts(ts):
if ts.index.has_duplicates:
ts = ts[~ts.index.duplicated(keep='last')]
if not ts.index.is_monotonic:
ts = ts.sort_index()
return ts
@classmethod
def from_data(cls, timestamps=None, values=None):
return cls(pandas.Series(values, timestamps))
@classmethod
def from_tuples(cls, timestamps_values):
return cls.from_data(*zip(*timestamps_values))
def __eq__(self, other):
return (isinstance(other, TimeSerie)
and self.ts.all() == other.ts.all())
def __getitem__(self, key):
return self.ts[key]
def set_values(self, values):
t = pandas.Series(*reversed(list(zip(*values))))
self.ts = self.clean_ts(t).combine_first(self.ts)
def __len__(self):
return len(self.ts)
@staticmethod
def _to_offset(value):
if isinstance(value, numbers.Real):
return | pandas.tseries.offsets.Nano(value * 10e8) | pandas.tseries.offsets.Nano |
'''
CONGESTION ANALYSIS TOOL
Approach & Idea : <NAME>
Author : <NAME>
Acknowledgments : Energy Exemplar Solution Engineering Team
'''
import csv
import pandas as pd
import os
import time
import sys, re
import csv
import numpy as np
from pandas.io.common import EmptyDataError
from sympy import symbols
import xlsxwriter
from shutil import copyfile
# load PLEXOS assemblies:
import sys, clr
sys.path += ['C:/Program Files (x86)/Energy Exemplar/PLEXOS 8.1/']
clr.AddReference('PLEXOS7_NET.Core')
clr.AddReference('EEUTILITY')
# Import from .NET assemblies (both PLEXOS and system)
from PLEXOS7_NET.Core import *
from EEUTILITY.Enums import *
import System
def main(FB, TB, TS, ptdf_data, plexos_db = '', plexos_sol = '', db=None, sol=None):
# Initialize timer and file io
start_time = time.time()
Line = '{}_{}'.format(FB,TB)
temp_file = '{}_sol.csv'.format(Line)
results_folder = './Results {}'.format(Line)
results_excel = '{}.xlsx'.format(Line)
# create the relevant results folder
os.makedirs(results_folder, exist_ok=True)
# Load PLEXOS Input and Output data
if db is None:
db = DatabaseCore()
db.Connection(plexos_db)
print('Loaded PLEXOS input:', time.time() - start_time)
if sol is None:
sol = Solution()
sol.Connection(plexos_sol)
print('Loaded PLEXOS solution:', time.time() - start_time)
# Before we load the PTDF data, let's first find which data we actually want to utilize
# The PTDF data file is produced from a different PLEXOS Analysis script.
with open(ptdf_data) as infile:
# read the first 3 lines of the file
# line1 is the columns header
# line2 is a list of "from" busses
# line3 is a list of "to" busses
line1, line2, line3 = infile.readline(), infile.readline(), infile.readline()
# find only those columns that have the right from and to bus pairs
idxs = [idx for idx, from_bus, to_bus in zip(line1.split(','), line2.split(','), line3.split(',')) if FB in from_bus and TB in to_bus]
print('Computed required PTDF columns:', time.time() - start_time)
# Pull only those data that are related to this from-to pair
ptdf_df = pd.read_csv(ptdf_data, usecols=['0'] + idxs, low_memory=False, skiprows=[1,2])
ptdf_df.rename(columns = {'0':'child_name'}, inplace=True)
print('Loaded PTDF data:', time.time() - start_time)
# Pull a query from the PLEXOS data into a CSV file
# below is the signature of the API method
'''
Boolean QueryToCSV(
String strCSVFile,
Boolean bAppendToFile,
SimulationPhaseEnum SimulationPhaseId,
CollectionEnum CollectionId,
String ParentName,
String ChildName,
PeriodEnum PeriodTypeId,
SeriesTypeEnum SeriesTypeId,
String PropertyList[ = None],
Object DateFrom[ = None],
Object DateTo[ = None],
String TimesliceList[ = None],
String SampleList[ = None],
String ModelName[ = None],
AggregationEnum AggregationType[ = None],
String Category[ = None],
String Separator[ = ,]
)
'''
sol.QueryToCSV(temp_file, False, \
SimulationPhaseEnum.STSchedule, \
CollectionEnum.SystemNodes, \
'', \
'', \
PeriodEnum.Interval, \
SeriesTypeEnum.Periods, \
'22', \
System.DateTime.Parse(TS), \
System.DateTime.Parse(TS).AddHours(1), \
'', \
'', \
'', \
0, \
'', \
','
)
injection_df = | pd.read_csv(temp_file) | pandas.read_csv |
from IPython.display import HTML
import pandas as pd
import numpy as np
from matplotlib import pyplot as plt
import seaborn as sns
from IPython.display import YouTubeVideo
from scipy.spatial.distance import pdist, squareform
from scipy.cluster.hierarchy import linkage, dendrogram
from matplotlib.colors import ListedColormap
import networkx as nx
import urllib
import os as os
import pandas as pd
import numpy as np
import itertools
import networkx as nx
from bokeh.io import show, output_file
from bokeh.models import Plot, Range1d, MultiLine, Circle, HoverTool, TapTool, BoxSelectTool, BoxZoomTool, ResetTool, PanTool, WheelZoomTool
import bokeh.models.graphs as graphs
#from bokeh.model.graphs import from_networkx, NodesAndLinkedEdges, EdgesAndLinkedNodes
from bokeh.palettes import Spectral4
#######################################################################################################
#######################################################################################################
def run_classification_model(model_data, cv, rf, virus_df):
from HPnex import functions as f
#predictors = [
# 'jaccard', 'betweeness_diff', 'in_same_cluster', 'degree_diff',
# 'FamilyMatch', 'PubMed_diff', 'PubMed_Search_ln1', 'PubMed_Search_ln2', 'neighbors_n',
# 'adamic_adar', 'resource', 'preferential_attach'
#]
predictors = [
'jaccard', 'betweeness_diff', 'in_same_cluster', 'degree_diff',
'FamilyMatch', 'PubMed_diff', 'PubMed_Search_ln1', 'PubMed_Search_ln2'
]
# datasets for sklearn
Y = model_data["label"].values
X = model_data[list(predictors)].values
#### Standardize continuous variables
from sklearn.preprocessing import StandardScaler
from sklearn import preprocessing
from pandas_ml import ConfusionMatrix
scaler = StandardScaler()
X_std = scaler.fit_transform(X)
data_processed = pd.DataFrame(X_std, columns=predictors)
#data_processed.head()
### Encoding categorical variables
le = preprocessing.LabelEncoder()
le.fit(virus_df.viral_family.unique())
model_data['F1'] = le.transform(model_data.ViralFamily1.fillna('Not_Assinged'))
model_data['F2'] = le.transform(model_data.ViralFamily2.fillna('Not_Assinged'))
data_processed['F1'] = model_data.F1
data_processed['F2'] = model_data.F2
data_processed.fillna(0, inplace=True)
### Running cross validation scores and predictions
from sklearn.model_selection import StratifiedKFold ,cross_val_score, train_test_split, cross_val_predict
from sklearn.metrics import classification_report, f1_score, accuracy_score, confusion_matrix, precision_recall_fscore_support
scores = cross_val_score(rf, data_processed, Y, cv=cv)
print('\nAccuracy of model on cross validation dataset while training')
print("Accuracy: %0.6f (+/- %0.6f)" % (scores.mean(), scores.std() * 2))
y_pred = cross_val_predict(rf, data_processed, Y, cv=cv)
print ('accuracy', accuracy_score(Y, y_pred))
print(
'\nprecision = positive predictive value\nrecall = sensitivity\nf-1 Score = harmonic average of precision and racall\nsupport = n\n'
)
print (classification_report(Y, y_pred))
cr = precision_recall_fscore_support(Y, y_pred)
confusion_matrix = ConfusionMatrix(Y, y_pred)
confusion_matrix.plot(
backend='seaborn', normalized=False, cmap='Blues', annot=True, fmt='d')
plt.show()
data_processed['Virus1'] = model_data.Virus1
data_processed['Virus2'] = model_data.Virus2
data_processed['Prediction'] = y_pred
return data_processed, scores, confusion_matrix, cr
#######################################################################################################
#######################################################################################################
def generate_data_for_multilabel_model(training_network, training_network_data, i,
BPnx, data_path, virus_df, dictionary, Species_file_name, plot= False):
from HPnex import functions as f
IUCN = | pd.read_csv(data_path+ Species_file_name) | pandas.read_csv |
import calendar
from datetime import datetime
import locale
import unicodedata
import numpy as np
import pytest
import pandas as pd
from pandas import (
DatetimeIndex,
Index,
Timedelta,
Timestamp,
date_range,
offsets,
)
import pandas._testing as tm
from pandas.core.arrays import DatetimeArray
class TestDatetime64:
def test_no_millisecond_field(self):
msg = "type object 'DatetimeIndex' has no attribute 'millisecond'"
with pytest.raises(AttributeError, match=msg):
DatetimeIndex.millisecond
msg = "'DatetimeIndex' object has no attribute 'millisecond'"
with pytest.raises(AttributeError, match=msg):
DatetimeIndex([]).millisecond
def test_datetimeindex_accessors(self):
dti_naive = date_range(freq="D", start=datetime(1998, 1, 1), periods=365)
# GH#13303
dti_tz = date_range(
freq="D", start=datetime(1998, 1, 1), periods=365, tz="US/Eastern"
)
for dti in [dti_naive, dti_tz]:
assert dti.year[0] == 1998
assert dti.month[0] == 1
assert dti.day[0] == 1
assert dti.hour[0] == 0
assert dti.minute[0] == 0
assert dti.second[0] == 0
assert dti.microsecond[0] == 0
assert dti.dayofweek[0] == 3
assert dti.dayofyear[0] == 1
assert dti.dayofyear[120] == 121
assert dti.isocalendar().week[0] == 1
assert dti.isocalendar().week[120] == 18
assert dti.quarter[0] == 1
assert dti.quarter[120] == 2
assert dti.days_in_month[0] == 31
assert dti.days_in_month[90] == 30
assert dti.is_month_start[0]
assert not dti.is_month_start[1]
assert dti.is_month_start[31]
assert dti.is_quarter_start[0]
assert dti.is_quarter_start[90]
assert dti.is_year_start[0]
assert not dti.is_year_start[364]
assert not dti.is_month_end[0]
assert dti.is_month_end[30]
assert not dti.is_month_end[31]
assert dti.is_month_end[364]
assert not dti.is_quarter_end[0]
assert not dti.is_quarter_end[30]
assert dti.is_quarter_end[89]
assert dti.is_quarter_end[364]
assert not dti.is_year_end[0]
assert dti.is_year_end[364]
assert len(dti.year) == 365
assert len(dti.month) == 365
assert len(dti.day) == 365
assert len(dti.hour) == 365
assert len(dti.minute) == 365
assert len(dti.second) == 365
assert len(dti.microsecond) == 365
assert len(dti.dayofweek) == 365
assert len(dti.dayofyear) == 365
assert len(dti.isocalendar()) == 365
assert len(dti.quarter) == 365
assert len(dti.is_month_start) == 365
assert len(dti.is_month_end) == 365
assert len(dti.is_quarter_start) == 365
assert len(dti.is_quarter_end) == 365
assert len(dti.is_year_start) == 365
assert len(dti.is_year_end) == 365
dti.name = "name"
# non boolean accessors -> return Index
for accessor in DatetimeArray._field_ops:
if accessor in ["week", "weekofyear"]:
# GH#33595 Deprecate week and weekofyear
continue
res = getattr(dti, accessor)
assert len(res) == 365
assert isinstance(res, Index)
assert res.name == "name"
# boolean accessors -> return array
for accessor in DatetimeArray._bool_ops:
res = getattr(dti, accessor)
assert len(res) == 365
assert isinstance(res, np.ndarray)
# test boolean indexing
res = dti[dti.is_quarter_start]
exp = dti[[0, 90, 181, 273]]
tm.assert_index_equal(res, exp)
res = dti[dti.is_leap_year]
exp = DatetimeIndex([], freq="D", tz=dti.tz, name="name")
| tm.assert_index_equal(res, exp) | pandas._testing.assert_index_equal |
'''
process_timeseries_files_pipeline.py
Processes precipitation timeseries data from raster files downloaded from the
NASA GPM mission.
Author: <NAME>
Date: 17/01/2022
'''
import numpy as np
import xarray as xr
import rioxarray
import datetime
import re
import sys
import argparse
import glob
import os
import pandas as pd
from shapely.geometry import Point
from shapely.ops import transform
import pyproj
#=============================================================================
# This is just a welcome screen that is displayed if no arguments are provided.
#=============================================================================
def print_welcome():
print("\n\n=======================================================================")
print("Hello! I'm going to process some data from NASA GPM.")
print("I need some information to process the data for you:")
print("Use --file_folder the path to the folders where the files to process are")
print("Use --crs to define the coordinate system in the format EPSG:XXXX")
print("Use --x_lon to define the longitude of the point")
print("Use --y_lat to define where the latitude of the point.")
print("Use --time to define the Date time in format %Y-%m-%d:%H%M%S.")
print("=======================================================================\n\n ")
#=============================================================================
# This is the main function that runs the whole thing
#=============================================================================
def main(args=None):
if args is None:
args = sys.argv[1:]
# If no arguments, send to the welcome screen.
if not len(sys.argv) > 1:
full_paramfile = print_welcome()
sys.exit()
parser = argparse.ArgumentParser()
parser.add_argument("-f", "--file_folder", dest = "file_folder", help="Folder with the files")
parser.add_argument("-c", "--crs", dest = "crs", help="Coordinate system in the format EPSG:XXXX")
parser.add_argument("-x", "--x_lon",dest = "longitude", help="Longitude of point", type=int)
parser.add_argument("-y", "--y_lat",dest ="latitude", help="Latitude of point", type=int)
parser.add_argument("-t", "--time",dest = "time", help="Date time in format %Y-%m-%d:%H%M%S")#, type=int)
args = parser.parse_args()
file_folder = args.file_folder
coordinate = args.crs
x_lon_to_slice = args.longitude
y_lat_to_slice = args.latitude
time_to_slice = args.time
print(time_to_slice)
time_to_slice = datetime.datetime.strptime(time_to_slice, "%Y-%m-%d:%H%M%S")
print(type(time_to_slice))
print(time_to_slice)
# extract all raster files from the given folder
os.chdir(file_folder)
file_names = []
for file in glob.glob("*.bil"):
file_names.append(file)
print(f'These are the files I am going to process: {file_names}')
print(f'file folder: {file_folder},\
longitude: {x_lon_to_slice},\
latitude: {y_lat_to_slice}, \
full_date: {time_to_slice}')
def sort_file_list(file_list):
"""
Sort list of files based on date given on the filename.
Parameters
----------
file_list : list of str
List of files to sort.
Returns
----------
file_list_sorted : list of str
List of sorted files.
Author: MRSO
"""
file_list_sorted=[]
timeformat = "%Y%m%d" # this is how your timestamp looks like
regex = re.compile("Calib_rainfall_([0-9]{8})-S([0-9]{6})")
#Calib_rainfall_20140110-S000000-bil
def gettimestamp(thestring):
m = regex.search(thestring)
print(m)
return datetime.datetime.strptime(m.groups()[0], timeformat)
for fn in sorted(file_list, key=gettimestamp):
file_list_sorted.append(fn)
return file_list_sorted
def extract_datetime_from_file(file_name):
"""
Extract date from a file name and convert it to a datetime object.
Parameters
----------
file_name : str
Name of file to extract date from.
Returns
----------
date_formatted : datetime
Date extracted from filename.
"""
date_file_1 = re.search("([0-9]{8})", file_name)
hour_file_1 = re.search("(S[0-9]{6})",file_name)
date_number_1 = date_file_1.group(0)
hour_file_1 = hour_file_1.group(0)
year = int(date_number_1[0:4])
month = int(date_number_1[4:6])
day = int(date_number_1[6:8])
hour = int(hour_file_1[1:3])
minute = int(hour_file_1[3:5])
second = int(hour_file_1[5:7])
date_formatted = datetime.datetime(year, month, day, hour, minute, second)
return date_formatted
def output_precipitation_timeseries(lon, lat, netcdf_filename):
"""
Extract a precipitation timeseries from a netCDF file given a lat, lon point.
Parameters
----------
lon : int
Longitude.
lat : int
Latitude.
netcdf_filename : str
Name of netCDF (.nc) file to extract date from.
Returns
----------
precip_timeseries : list of int
Timeseries of precipitation for the given lat, lon coordinates.
"""
joint_ds = xr.open_dataset(netcdf_filename, engine="rasterio")
print(joint_ds)
print('this is the joint business')
print(joint_ds.sel(x=lon, y = lat, method="nearest").precipitation)
precip_timeseries = joint_ds.sel(x=lon, y = lat, method="nearest").precipitation
# this following line sometimes fails
#precip_timeseries = joint_ds.sel(x=lon, y = lat, method="nearest").precipitation.to_numpy().ravel()
return precip_timeseries
def output_precipitation_raster(time_to_slice, netcdf_filename):
"""
Slice a netCDF file from a timeslice and create new netCDF file with the sliced data.
Parameters
----------
time_to_slice : datetime
Date and time to slice from the data.
netcdf_filename : str
Name of netCDF (.nc) file to extract date from.
Returns
----------
None
"""
# could potentially increase functionality by adding output data format: netcdf or raster
joint_ds = xr.open_dataset(netcdf_filename, engine="rasterio")
sliced_joint_ds = joint_ds.sel(time=time_to_slice).precipitation
date_string_name = time_to_slice.strftime('%Y%m%d-%H%M%S')
sliced_joint_ds.to_netcdf(f'output_precipitation_raster_{date_string_name}.nc', mode='w', format='NETCDF3_64BIT')
#return sliced_joint_ds
def concatenate_raster_files(dataset_names, output_joint_file_name):
"""
Read from a list of raster files, concatenate them along the time direction\
and create a netCDF file.
Parameters
----------
dataset_names : list of str
Names of the raster files to concatenate.
output_joint_file_name : str
Name of output file.
Returns
----------
joint_ds : xarray dataset
Concatenated raster files.
date_list : list of datetime
Dates corresponding to the raster files.
"""
joint_ds_list = []
date_list = []
for i in range(len(dataset_names)):
date_file = re.search("([0-9]{8})", dataset_names[i])
hour_file = re.search("(S[0-9]{6})", dataset_names[i])
date_file = extract_datetime_from_file(dataset_names[i])
xds = xr.open_dataset(dataset_names[i], engine="rasterio")
# the spatial reference is the coordinate system information
expanded_ds = xds.expand_dims("time").assign_coords(time=("time", [date_file]))
expanded_ds = expanded_ds.drop('band')
expanded_ds = expanded_ds.rename({'band_data':'precipitation'})
joint_ds_list.append(expanded_ds)
date_list.append(date_file)
joint_ds = xr.concat(joint_ds_list, dim='time')
joint_ds['precipitation'].attrs = {'description':'precipitation amount in mm/s'}
joint_ds.to_netcdf(output_joint_file_name, mode='w', format='NETCDF3_64BIT')
return joint_ds, date_list
def convert_crs_point(point_x, point_y, in_proj, out_proj):
"""
Change the coordinate system of a lat, lon point.
Parameters
----------
point_x : int
Longitude coordinate.
point_y : int
Latitude coordinate.
in_proj : str
Coordinate system to transform from.
out_proj : str
Coordinate system to transform to.
Returns
----------
AoI_point : shapely Point
Lat, lon point in new coordinate system.
"""
in_pt = Point(point_x, point_y)
in_proj = pyproj.CRS(in_proj)
out_proj = pyproj.CRS(out_proj)
project = pyproj.Transformer.from_crs(in_proj, out_proj, always_xy=True).transform
AoI_point = transform(project, in_pt)
return AoI_point
# first we need to convert the point to the coordinate system that we want.
# need to first check what the coordinate system of the area is
file_names_sorted = sort_file_list(file_names)
print(f'These are the files I am going to concatenate: {file_names}')
output_joint_file_name = 'joint_ds_with_all_times.nc'
joint_ds, date_list = concatenate_raster_files(file_names_sorted, output_joint_file_name)
print(f'I have concatenated all your files and created a time series')
print(f'This is the date list: {date_list}')
# need to make this better as this is not necessarily the closest point
time_selected = joint_ds.sel(time=time_to_slice)
# first we need to convert the point to the coordinate system that we want.
# need to first check what the coordinate system of the area is
joint_ds = xr.open_dataset('joint_ds_with_all_times.nc', engine="rasterio")
raster_crs = joint_ds.rio.crs
converted_lat_lon = convert_crs_point(x_lon_to_slice, y_lat_to_slice, coordinate, raster_crs)
x_converted = round(converted_lat_lon.x, 2)
y_converted = round(converted_lat_lon.y,2)
timeseries=output_precipitation_timeseries(x_converted, y_converted, output_joint_file_name)
timeseries_df = pd.DataFrame(timeseries, columns=['precipitation_mm/s'])
# need to add time datetime column
timeseries_df['date'] = | pd.to_datetime(date_list) | pandas.to_datetime |
import auxilary_functions as f
cfg = f.get_actual_parametrization("../src/config-human.json")
#cfg = f.update_cfg("../src/config.json", "NETWORK_TO_SEARCH_IN", "yeast")
import psutil
import os
import numpy as np
import pandas as pd
import sys
import joblib
sys.path.insert(0, "../src")
ART_NET_PATH = "../networks"
import auxilary_functions as f
from generation_algorithm import *
from copy import deepcopy
import networkx as nx
from collections import namedtuple
from itertools import product, combinations
from matplotlib import pyplot as pltthe
from datetime import datetime
from tqdm import tqdm
from time import sleep
import multiprocessing as mp
import tracemalloc
def get_network_nucleus(
interaction_matrix, motifs, motifs_network, min_size, random_seed):
"""
Getting subsample from real network as a nucleus for artificial network
________________________________________________________________________
interaction_matrix (numpy.array) - binary interaction matrix for genes
motifs (numpy.array) - list of unique identifiers for condidered motifs (FFL triads)
motifs_network (numpy.array) - vertex-based motifs network (linkage by shared nodes)
min_size (int) - minimal required size of resulting nucleus (may be slightly higher eventually)
random_seed (int) - reproducibility parameter
"""
np.random.seed(random_seed)
substrate_motif_idxs = [np.random.randint(len(motifs))]
substrate_motifs = np.array([motifs[i] for i in substrate_motif_idxs])
substrate_size = len(set(sum([f.split_motif(motif) for motif in substrate_motifs], [])))
# grow network nucleus while required size obtained
while substrate_size < min_size:
neighbors = np.where(motifs_network[:, substrate_motif_idxs].sum(axis=1) != 0)[0]
neighbors = np.array(list(set(neighbors) - set(substrate_motif_idxs)))
# assignment of weights to candidate motifs by their connectivity
# with already selected motifs grown substrate network
weights = motifs_network[neighbors, :][:, substrate_motif_idxs].sum(axis=1)
weights /= sum(weights)
substrate_motif_idxs.append(np.random.choice(neighbors, size=1, p=weights)[0])
substrate_motifs = np.array([motifs[i] for i in substrate_motif_idxs])
substrate_size = len(set(sum([f.split_motif(motif) for motif in substrate_motifs], [])))
# interaction matrix building
G = nx.DiGraph()
for motif in substrate_motifs:
nodes = f.split_motif(motif)
M = nx.DiGraph(interaction_matrix[nodes, :][:, nodes])
M = nx.relabel_nodes(M, mapping={i: node for i, node in enumerate(nodes)})
G = nx.compose(G, M)
substrate_matrix = nx.convert_matrix.to_numpy_array(G)
return substrate_matrix
#### Parametrization
#Motif types and number of shared nodes distributions inference.
#The support set for FFL motif type by TF/TG content is {TTT, TTG} where T and G are for TF and TG respectively.
#The support set for the number of shared nodes is {1, 2}. We are not considering 0 as we focus only on the largest connected component of FFL VMN which actually contains all of the FFLs in the yeast Tnet and nearly all (99%) in E.coli Tnet
def get_network_params(interaction_matrix, config_file, verbose=False,motif_search=True,known_motifs=False):
# motif search
if motif_search:
motifs, counter = f.motif_search(
config_file, interaction_matrix, batch_size=10000, verbose=False
)
motifs = motifs["030T"]
else:
motifs = known_motifs
# TF/TG recognition
tf_nodes = np.where(interaction_matrix.sum(axis=0) != 0)[0]
tg_nodes = np.where(interaction_matrix.sum(axis=0) == 0)[0]
# motif type distribution
n_tg_nodes_list = np.array(
[len(set(f.split_motif(motif)) - set(tf_nodes)) for motif in motifs]
)
mtype_probs = pd.Series(n_tg_nodes_list).value_counts(normalize=True).sort_index()
if verbose:
prob = len(tf_nodes)/interaction_matrix.shape[0]
print(f"TF content: {prob}")
print("Number of TG in motif distribution:")
print(mtype_probs)
print()
# nodes participation in FFL
node_part = np.zeros(interaction_matrix.shape[0])
for triad in motifs:
for x in map(int, triad.split("_")):
node_part[x] += 1
node_part = pd.Series(node_part)
if verbose:
print("Node patricipation distribution:")
print(node_part.value_counts(normalize=True).head())
print()
# Distribution of X-unique nodes motifs number
edges_1 = []
motifs_0 = []
types = {i: 0 for i in range(3)}
for triad in motifs:
res = 0
i = 0
for x in map(int, triad.split("_")):
res += node_part[x] == 1
if node_part[x] == 1:
i = x
#####
try:
types[res] += 1
if res == 1:
edges_1.append(set(f.split_motif(triad))-set([i]))
if res == 0:
motifs_0.append(triad)
except KeyError:
pass
####
types = pd.Series(types)
unique_nodes = types/sum(types)
if verbose:
print("Unique nodes number distribution")
print(unique_nodes)
print()
# Is edge unique? (for 1-unique node motifs)
edges_1_part = {"_".join(map(str, sorted(edge))): 0 for edge in edges_1}
for triad in motifs:
for x in combinations(f.split_motif(triad), 2):
edge_1 = "_".join(map(str, sorted(x)))
try:
edges_1_part[edge_1] += 1
except KeyError:
pass
edges_1_part = | pd.Series(edges_1_part) | pandas.Series |
import json
import pandas as pd
import os
import re
def create_entry(raw_entry,hashfunction,encoding):
return_dict = {}
app_metadata = {'is_god':raw_entry['is_admin']}
if not pd.isna(raw_entry['organisation_id']):
app_metadata['organisation_id'] = round(raw_entry['organisation_id'])
if not pd.isna(raw_entry['base_ids']):
app_metadata['base_ids']=str(raw_entry['base_ids']).split(',')
return_dict['user_id']=str(raw_entry['id'])
return_dict['name']=raw_entry['naam']
if not pd.isna(raw_entry['deleted']) and raw_entry['deleted'] != '0000-00-00 00:00:00':
return_dict['email']=re.sub(r'\.deleted\.\d+', '',raw_entry['email'])
app_metadata['last_blocked_date']=raw_entry['deleted']
return_dict['blocked']=True
else:
return_dict['email']=raw_entry['email']
return_dict['email_verified']=False
return_dict['custom_password_hash']= {
"algorithm":hashfunction,
"hash":{
"value":raw_entry['pass'],
"encoding":encoding
}
}
if not pd.isna(raw_entry['cms_usergroups_id']):
app_metadata['usergroup_id']=int(round(raw_entry['cms_usergroups_id']))
if not pd.isna(raw_entry['valid_firstday']) and raw_entry['valid_firstday'] != '0000-00-00':
app_metadata['valid_firstday']=raw_entry['valid_firstday']
if not pd.isna(raw_entry['valid_lastday']) and raw_entry['valid_lastday'] != '0000-00-00':
app_metadata['valid_lastday']=raw_entry['valid_lastday']
print(return_dict)
print(app_metadata)
return_dict['app_metadata'] =app_metadata
return return_dict
# set wd to path of current file
os.chdir(os.path.dirname(os.path.abspath(__file__)))
# load users for auth0 connection
users = | pd.read_csv('users.csv') | pandas.read_csv |
import json
from django.http import HttpResponse
from .models import (
Invoice,
Seller,
Receiver,
)
from .serializers import (
InvoiceSerializer,
SellerSerializer,
ReceiverSerializer,
)
import re
from django.views import View
from django.http import Http404
import pandas as pd
import datetime as dt
def get_object_invoice(pk):
try:
return Invoice.objects.get(pk=pk)
except Invoice.DoesNotExist:
raise Http404
def get_object_seller(pk):
try:
return Seller.objects.get(pk=pk)
except Seller.DoesNotExist:
raise Http404
def get_object_receiver(pk):
try:
return Receiver.objects.get(pk=pk)
except Receiver.DoesNotExist:
raise Http404
class InvoiceShowDelete(View):
def get(self, request, pk):
invoice = get_object_invoice(pk)
serializer = InvoiceSerializer(invoice)
return HttpResponse(json.dumps(serializer.data), status=200)
def delete(self, request, pk):
invoice = get_object_invoice(pk)
invoice.delete()
return HttpResponse(status=204)
class InvoiceCreateList(View):
def get(self, request):
invoices = Invoice.objects.all()
serializer = InvoiceSerializer(invoices, many=True)
return HttpResponse(json.dumps(serializer.data))
def post(self, request):
dict_invoice = {}
dict_seller = {}
dict_receiver = {}
json_dict = None
if request.body:
json_dict = json.loads(request.body)
elif request.POST:
json_dict = request.POST
# access_key, uf_code_seller, cnpj_seller, number
access_key = json_dict['main_access_key'].replace(' ', '')
uf_code_seller = access_key[0:2]
cnpj_seller = access_key[6:20]
number = access_key[25:34]
dict_invoice['access_key'] = access_key
dict_invoice['number'] = number
dict_seller['uf_code'] = uf_code_seller
dict_seller['cnpj'] = cnpj_seller
# cpf_cnpj_receiver
cpf_cnpj_receiver = json_dict['sender_cnpj_cpf']
cpf_cnpj_receiver = re.search(
r'\d{11}|\d{14}|\d{3}\.\d{3}\.\d{3}\-\d{2}|\d{2}\.\d{3}\.\d{3}\/\d{4}\-\d{2}',
cpf_cnpj_receiver,
re.M | re.I
)
cpf_cnpj_receiver = str(cpf_cnpj_receiver.group())
cpf_cnpj_receiver = cpf_cnpj_receiver.replace('-', '')
cpf_cnpj_receiver = cpf_cnpj_receiver.replace('.', '')
cpf_cnpj_receiver = cpf_cnpj_receiver.replace('/', '')
cpf_cnpj_receiver = cpf_cnpj_receiver.replace(' ', '')
dict_receiver['cpf_cnpj'] = cpf_cnpj_receiver
# operation_nature
dict_invoice['operation_nature'] = json_dict['main_nature_operation']
# authorization_protocol
dict_invoice['authorization_protocol'] = json_dict['main_protocol_authorization_use']
# state_registration
dict_invoice['state_registration'] = json_dict['main_state_registration']
# emission_date
emission_date = json_dict['sender_emission_date']
emission_date = re.search(r'\d{2}\/\d{2}\/\d{4}', emission_date, re.M | re.I)
emission_date = str(emission_date.group())
emission_date = emission_date.split('/')
emission_date = emission_date[2] + '-' + emission_date[1] + '-' + emission_date[0]
dict_invoice['emission_date'] = emission_date
# entry_exit_datetime
entry_exit_datetime = json_dict['sender_out_input_date']
entry_exit_datetime = entry_exit_datetime.split('/')
time = json_dict['sender_output_time']
u = entry_exit_datetime[2] + '-' + entry_exit_datetime[1] + '-' + entry_exit_datetime[0] + 'T' + time
entry_exit_datetime = u
dict_invoice['entry_exit_datetime'] = entry_exit_datetime
# total_products_value
total_products_value = json_dict['tax_total_cost_products']
total_products_value = total_products_value.replace('.', '')
total_products_value = total_products_value.replace(',', '.')
dict_invoice['total_products_value'] = float(total_products_value)
# total_invoice_value
total_invoice_value = json_dict['tax_cost_total_note']
total_invoice_value = total_invoice_value.replace('.', '')
total_invoice_value = total_invoice_value.replace(',', '.')
dict_invoice['total_invoice_value'] = float(total_invoice_value)
# basis_calculation_icms
basis_calculation_icms = json_dict['tax_icms_basis']
basis_calculation_icms = basis_calculation_icms.replace('.', '')
basis_calculation_icms = basis_calculation_icms.replace(',', '.')
dict_invoice['basis_calculation_icms'] = float(basis_calculation_icms)
# freight_value
freight_value = json_dict['tax_cost_freight']
freight_value = freight_value.replace('.', '')
freight_value = freight_value.replace(',', '.')
dict_invoice['freight_value'] = float(freight_value)
# insurance_value
insurance_value = json_dict['tax_cost_insurance']
insurance_value = insurance_value.replace('.', '')
insurance_value = insurance_value.replace(',', '.')
dict_invoice['insurance_value'] = float(insurance_value)
# icms_value
icms_value = json_dict['tax_cost_icms']
icms_value = icms_value.replace('.', '')
icms_value = icms_value.replace(',', '.')
dict_invoice['icms_value'] = float(icms_value)
# discount_value
discount_value = json_dict['tax_discount']
discount_value = discount_value.replace('.', '')
discount_value = discount_value.replace(',', '.')
dict_invoice['discount_value'] = float(discount_value)
# basis_calculation_icms_st
basis_calculation_icms_st = json_dict['tax_icms_basis_st']
basis_calculation_icms_st = basis_calculation_icms_st.replace('.', '')
basis_calculation_icms_st = basis_calculation_icms_st.replace(',', '.')
dict_invoice['basis_calculation_icms_st'] = float(basis_calculation_icms_st)
# icms_value_st
icms_value_st = json_dict['tax_cost_icms_replacement']
icms_value_st = icms_value_st.replace('.', '')
icms_value_st = icms_value_st.replace(',', '.')
dict_invoice['icms_value_st'] = float(icms_value_st)
# other_expenditure
other_expenditure = json_dict['tax_other_expenditure']
other_expenditure = other_expenditure.replace('.', '')
other_expenditure = other_expenditure.replace(',', '.')
dict_invoice['other_expenditure'] = float(other_expenditure)
# ipi_value
ipi_value = json_dict['tax_cost_ipi']
ipi_value = ipi_value.replace('.', '')
ipi_value = ipi_value.replace(',', '.')
dict_invoice['ipi_value'] = float(ipi_value)
# receiver
dict_receiver['name'] = json_dict['sender_name_social']
dict_receiver['address'] = json_dict['sender_address']
dict_receiver['neighborhood'] = json_dict['sender_neighborhood_district']
dict_receiver['cep'] = json_dict['sender_cep'].replace('-', '')
dict_receiver['county'] = json_dict['sender_county']
dict_receiver['uf'] = json_dict['sender_uf']
dict_receiver['phone'] = json_dict['sender_phone_fax']
# ------------------------
if Receiver.objects.filter(cpf_cnpj=cpf_cnpj_receiver).count() == 1:
receiver = Receiver.objects.get(cpf_cnpj=cpf_cnpj_receiver)
dict_invoice['receiver'] = receiver.pk
else:
receiver_serializer = ReceiverSerializer(data=dict_receiver)
if receiver_serializer.is_valid():
receiver_serializer.save()
else:
return HttpResponse(
json.dumps([
receiver_serializer.errors,
]),
status=400
)
dict_invoice['receiver'] = receiver_serializer.data['id']
if Seller.objects.filter(cnpj=cnpj_seller).count() == 1:
seller = Seller.objects.get(cnpj=cnpj_seller)
dict_invoice['seller'] = seller.pk
else:
seller_serializer = SellerSerializer(data=dict_seller)
if seller_serializer.is_valid():
seller_serializer.save()
else:
return HttpResponse(
json.dumps([
seller_serializer.errors,
]),
status=400
)
dict_invoice['seller'] = seller_serializer.data['id']
invoice_serializer = InvoiceSerializer(data=dict_invoice)
if invoice_serializer.is_valid():
invoice_serializer.save()
else:
return HttpResponse(
json.dumps(
invoice_serializer.errors
),
status=400
)
return HttpResponse(
json.dumps([
invoice_serializer.data,
]),
status=200
)
def sellerShow(request, pk):
if request.method == 'GET':
seller = get_object_seller(pk)
serializer = SellerSerializer(seller)
return HttpResponse(json.dumps(serializer.data), status=200)
return HttpResponse(status=400)
def receiverShow(request, pk):
if request.method == 'GET':
receiver = get_object_receiver(pk)
serializer = ReceiverSerializer(receiver)
return HttpResponse(json.dumps(serializer.data), status=200)
return HttpResponse(status=400)
def sellerList(request):
if request.method == 'GET':
seller = Seller.objects.all()
serializer = SellerSerializer(seller, many=True)
return HttpResponse(json.dumps(serializer.data))
return HttpResponse(status=400)
def receiverList(request):
if request.method == 'GET':
receiver = Receiver.objects.all()
serializer = ReceiverSerializer(receiver, many=True)
return HttpResponse(json.dumps(serializer.data))
return HttpResponse(status=400)
def chart_total_value_per_time(request):
if request.method == 'GET':
invoices = Invoice.objects.all()
date = []
total = []
for invoice in invoices:
date.append(invoice.emission_date)
total.append(invoice.total_invoice_value)
df = pd.DataFrame({'date': date, 'total': total})
df = df.sort_values(by='date')
sf = df.groupby('date')['total'].sum()
df = pd.DataFrame({'date': sf.index, 'total': sf.values})
df['date'] = pd.to_datetime(df['date']).apply(lambda x: x.strftime('%d/%m/%Y'))
df['total'] = pd.to_numeric(df['total'].apply(lambda x: round(x, 2)))
data = df.to_dict('list')
df = pd.DataFrame({'dateM': date, 'totalM': total})
df = df.sort_values(by='dateM')
df['dateM'] = pd.to_datetime(df['dateM']).apply(lambda x: x.strftime('%Y-%m'))
sf = df.groupby('dateM')['totalM'].sum()
df = pd.DataFrame({'dateM': sf.index, 'totalM': sf.values})
df['dateM'] = pd.to_datetime(df['dateM']).apply(lambda x: x.strftime('%m/%Y'))
df['totalM'] = pd.to_numeric(df['totalM'].apply(lambda x: round(x, 2)))
data['dateM'] = df.to_dict('list')['dateM']
data['totalM'] = df.to_dict('list')['totalM']
df = pd.DataFrame({'dateY': date, 'totalY': total})
df = df.sort_values(by='dateY')
df['dateY'] = pd.to_datetime(df['dateY']).apply(lambda x: x.strftime('%Y'))
sf = df.groupby('dateY')['totalY'].sum()
df = pd.DataFrame({'dateY': sf.index, 'totalY': sf.values})
df['totalY'] = pd.to_numeric(df['totalY'].apply(lambda x: round(x, 2)))
data['dateY'] = df.to_dict('list')['dateY']
data['totalY'] = df.to_dict('list')['totalY']
return HttpResponse(json.dumps(data))
return HttpResponse(status=400)
def chart_qtd_per_time(request):
if request.method == 'GET':
invoices = Invoice.objects.all()
date = []
for invoice in invoices:
date.append(invoice.emission_date)
df = pd.DataFrame({'date': date})
df = df.sort_values(by='date')
df['date'] = pd.to_datetime(df['date']).apply(lambda x: x.strftime('%Y-%m'))
sf = df.groupby('date').size()
df = pd.DataFrame({'date': sf.index, 'count': sf.values})
df['date'] = pd.to_datetime(df['date']).apply(lambda x: x.strftime('%m/%Y'))
data = df.to_dict('list')
dfY = pd.DataFrame({'dateY': date})
dfY = dfY.sort_values(by='dateY')
dfY['dateY'] = pd.to_datetime(dfY['dateY']).apply(lambda x: x.strftime('%Y'))
sf = dfY.groupby('dateY').size()
dfY = pd.DataFrame({'dateY': sf.index, 'countY': sf.values})
data['dateY'] = dfY.to_dict('list')['dateY']
data['countY'] = dfY.to_dict('list')['countY']
return HttpResponse(json.dumps(data))
return HttpResponse(status=400)
def chart_total_value_per_chosen_date(request):
if request.method == 'GET':
invoices = Invoice.objects.all()
date = []
total = []
for invoice in invoices:
date.append(invoice.emission_date)
total.append(invoice.total_invoice_value)
df = pd.DataFrame({'date': date, 'total': total})
df = df.sort_values(by='date')
sf = df.groupby('date')['total'].sum()
df = pd.DataFrame({'date': sf.index, 'total': sf.values})
week = dt.datetime.now().date() - dt.timedelta(days=7)
month = dt.datetime.now().date() - dt.timedelta(days=30)
semester = dt.datetime.now().date() - dt.timedelta(days=182)
year = dt.datetime.now().date() - dt.timedelta(days=365)
yearDf = df.groupby('date').filter(lambda x: (x['date'] > year))
semesterDf = yearDf.groupby('date').filter(lambda x: (x['date'] > semester))
monthDf = semesterDf.groupby('date').filter(lambda x: (x['date'] > month))
weekDf = monthDf.groupby('date').filter(lambda x: (x['date'] > week))
sf = weekDf.groupby('date')['total'].mean()
weekDf = pd.DataFrame({'date': sf.index, 'total': sf.values})
sf = monthDf.groupby('date')['total'].mean()
monthDf = pd.DataFrame({'date': sf.index, 'total': sf.values})
sf = semesterDf.groupby('date')['total'].mean()
semesterDf = pd.DataFrame({'date': sf.index, 'total': sf.values})
sf = yearDf.groupby('date')['total'].mean()
yearDf = pd.DataFrame({'date': sf.index, 'total': sf.values})
weekDf['date'] = pd.to_datetime(weekDf['date']).apply(lambda x: x.strftime('%d/%m/%Y'))
monthDf['date'] = pd.to_datetime(monthDf['date']).apply(lambda x: x.strftime('%d/%m/%Y'))
semesterDf['date'] = pd.to_datetime(semesterDf['date']).apply(lambda x: x.strftime('%d/%m/%Y'))
yearDf['date'] = pd.to_datetime(yearDf['date']).apply(lambda x: x.strftime('%d/%m/%Y'))
weekDf['total'] = pd.to_numeric(weekDf['total'].apply(lambda x: round(x, 2)))
monthDf['total'] = pd.to_numeric(monthDf['total'].apply(lambda x: round(x, 2)))
semesterDf['total'] = pd.to_numeric(semesterDf['total'].apply(lambda x: round(x, 2)))
yearDf['total'] = pd.to_numeric(yearDf['total'].apply(lambda x: round(x, 2)))
data = {}
data['dateW'] = weekDf.to_dict('list')['date']
data['totalW'] = weekDf.to_dict('list')['total']
data['dateM'] = monthDf.to_dict('list')['date']
data['totalM'] = monthDf.to_dict('list')['total']
data['dateS'] = semesterDf.to_dict('list')['date']
data['totalS'] = semesterDf.to_dict('list')['total']
data['dateY'] = yearDf.to_dict('list')['date']
data['totalY'] = yearDf.to_dict('list')['total']
return HttpResponse(json.dumps(data))
return HttpResponse(status=400)
def chart_total_value_current(request):
if request.method == 'GET':
invoices = Invoice.objects.all()
date = []
total = []
for invoice in invoices:
date.append(invoice.emission_date)
total.append(invoice.total_invoice_value)
df = pd.DataFrame({'date': date, 'total': total})
df = df.sort_values(by='date')
sf = df.groupby('date')['total'].sum()
df = pd.DataFrame({'date': sf.index, 'total': sf.values})
df['total'] = pd.to_numeric(df['total'].apply(lambda x: round(x, 2)))
current_year = dt.date(
dt.datetime.now().year,
1,
1
)
current_month = dt.date(
dt.datetime.now().year,
dt.datetime.now().month,
1
)
df = df.groupby('date').filter(lambda x: (x['date'] > current_year))
sf = df.groupby('date')['total'].mean()
df = pd.DataFrame({'date': sf.index, 'total': sf.values})
dfM = df.groupby('date').filter(lambda x: (x['date'] > current_month))
sf = dfM.groupby('date')['total'].mean()
dfM = pd.DataFrame({'date': sf.index, 'total': sf.values})
df['date'] = pd.to_datetime(df['date']).apply(lambda x: x.strftime('%d/%m/%Y'))
dfM['date'] = pd.to_datetime(dfM['date']).apply(lambda x: x.strftime('%d/%m/%Y'))
data = df.to_dict('list')
data['dateM'] = dfM.to_dict('list')['date']
data['totalM'] = dfM.to_dict('list')['total']
return HttpResponse(json.dumps(data))
return HttpResponse(status=400)
def chart_total_value_per_category(request):
if request.method == 'GET':
invoices = Invoice.objects.all()
category = []
total = []
for invoice in invoices:
category.append(invoice.operation_nature)
total.append(invoice.total_invoice_value)
df = pd.DataFrame({'category': category, 'total': total})
df = df.sort_values(by='category')
sf = df.groupby('category')['total'].sum()
df = pd.DataFrame({'category': sf.index, 'total': sf.values})
df['total'] = pd.to_numeric(df['total'].apply(lambda x: round(x, 2)))
data = df.to_dict('list')
return HttpResponse(json.dumps(data))
return HttpResponse(status=400)
def chart_freight_value_per_date(request):
if request.method == 'GET':
invoices = Invoice.objects.all()
date = []
freight = []
for invoice in invoices:
date.append(invoice.emission_date)
freight.append(invoice.freight_value)
df = pd.DataFrame({'date': date, 'freight': freight})
df = df.sort_values(by='date')
sf = df.groupby('date')['freight'].sum()
df = | pd.DataFrame({'date': sf.index, 'freight': sf.values}) | pandas.DataFrame |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 1999-2020 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy as np
import pandas as pd
try:
import pyarrow as pa
except ImportError: # pragma: no cover
pa = None
import mars.tensor as mt
import mars.dataframe as md
from mars.executor import register, Executor
from mars.tensor.core import TensorOrder
from mars.tensor.datasource import ArrayDataSource
from mars.tiles import get_tiled
from mars.session import new_session, Session
class Test(unittest.TestCase):
def setUp(self):
new_session().as_default()
def testSessionExecute(self):
a = mt.random.rand(10, 20)
res = a.sum().to_numpy()
self.assertTrue(np.isscalar(res))
self.assertLess(res, 200)
def testSessionAsyncExecute(self):
raw_a = np.random.RandomState(0).rand(10, 20)
a = mt.tensor(raw_a)
expected = raw_a.sum()
res = a.sum().to_numpy(wait=False).result()
self.assertEqual(expected, res)
res = a.sum().execute(wait=False)
res = res.result().fetch()
self.assertEqual(expected, res)
raw_df = pd.DataFrame(raw_a)
expected = raw_df.sum()
df = md.DataFrame(a)
res = df.sum().to_pandas(wait=False).result()
pd.testing.assert_series_equal(expected, res)
res = df.sum().execute(wait=False)
res = res.result().fetch()
| pd.testing.assert_series_equal(expected, res) | pandas.testing.assert_series_equal |
import json
import networkx as nx
import numpy as np
import os
import pandas as pd
from sklearn.decomposition import TruncatedSVD
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.preprocessing import LabelEncoder
from tqdm import tqdm
from config import logger, config
def read_profile_data():
profile_na = np.zeros(67)
profile_na[0] = -1
profile_na = pd.DataFrame(profile_na.reshape(1, -1))
profile_df = pd.read_csv(config.profile_file)
profile_na.columns = profile_df.columns
profile_df = profile_df.append(profile_na)
return profile_df
def merge_raw_data():
tr_queries = pd.read_csv(config.train_query_file, parse_dates=['req_time'])
te_queries = pd.read_csv(config.test_query_file, parse_dates=['req_time'])
tr_plans = pd.read_csv(config.train_plan_file, parse_dates=['plan_time'])
te_plans = pd.read_csv(config.test_plan_file, parse_dates=['plan_time'])
tr_click = pd.read_csv(config.train_click_file)
trn = tr_queries.merge(tr_click, on='sid', how='left')
trn = trn.merge(tr_plans, on='sid', how='left')
trn = trn.drop(['click_time'], axis=1)
trn['click_mode'] = trn['click_mode'].fillna(0)
tst = te_queries.merge(te_plans, on='sid', how='left')
tst['click_mode'] = -1
df = pd.concat([trn, tst], axis=0, sort=False)
df = df.drop(['plan_time'], axis=1)
df = df.reset_index(drop=True)
df['weekday'] = df['req_time'].dt.weekday
df['day'] = df['req_time'].dt.day
df['hour'] = df['req_time'].dt.hour
df = df.drop(['req_time'], axis=1)
logger.info('total data size: {}'.format(df.shape))
logger.info('data columns: {}'.format(', '.join(df.columns)))
return df
def extract_plans(df):
plans = []
for sid, plan in tqdm(zip(df['sid'].values, df['plans'].values)):
try:
p = json.loads(plan)
for x in p:
x['sid'] = sid
plans.extend(p)
except:
pass
return pd.DataFrame(plans)
def generate_od_features(df):
feat = df[['o','d']].drop_duplicates()
feat = feat.merge(df.groupby('o')[['day', 'hour', 'pid', 'click_mode']].nunique().reset_index(), how='left', on='o')
feat.rename(columns={'day': 'o_nunique_day',
'hour': 'o_nunique_hour',
'pid': 'o_nunique_pid',
'click_mode': 'o_nunique_click'}, inplace=True)
feat = feat.merge(df.groupby('d')[['day', 'hour', 'pid', 'click_mode']].nunique().reset_index(), how='left', on='d')
feat.rename(columns={'day': 'd_nunique_day',
'hour': 'd_nunique_hour',
'pid': 'd_nunique_pid',
'click_mode': 'd_nunique_click'}, inplace=True)
feat = feat.merge(df.groupby(['o', 'd'])[['day', 'hour', 'pid', 'click_mode']].nunique().reset_index(), how='left', on=['o', 'd'])
feat.rename(columns={'day': 'od_nunique_day',
'hour': 'od_nunique_hour',
'pid': 'od_nunique_pid',
'click_mode': 'od_nunique_click'}, inplace=True)
return feat
def generate_pid_features(df):
feat = df.groupby('pid')[['hour', 'day']].nunique().reset_index()
feat.rename(columns={'hour': 'pid_nunique_hour', 'day': 'pid_nunique_day'}, inplace=True)
feat['nunique_hour_d_nunique_day'] = feat['pid_nunique_hour'] / feat['pid_nunique_day']
feat = feat.merge(df.groupby('pid')[['o', 'd']].nunique().reset_index(), how='left', on='pid')
feat.rename(columns={'o': 'pid_nunique_o', 'd': 'pid_nunique_d'}, inplace=True)
feat['nunique_o_d_nunique_d'] = feat['pid_nunique_o'] / feat['pid_nunique_d']
return feat
def generate_od_cluster_features(df):
G = nx.Graph()
G.add_nodes_from(df['o'].unique().tolist())
G.add_nodes_from(df['d'].unique().tolist())
edges = df[['o','d']].apply(lambda x: (x[0],x[1]), axis=1).tolist()
G.add_edges_from(edges)
cluster = nx.clustering(G)
cluster_df = pd.DataFrame([{'od': key, 'cluster': cluster[key]} for key in cluster.keys()])
return cluster_df
def gen_od_feas(data):
data['o1'] = data['o'].apply(lambda x: float(x.split(',')[0]))
data['o2'] = data['o'].apply(lambda x: float(x.split(',')[1]))
data['d1'] = data['d'].apply(lambda x: float(x.split(',')[0]))
data['d2'] = data['d'].apply(lambda x: float(x.split(',')[1]))
data = data.drop(['o', 'd'], axis=1)
return data
def gen_plan_feas(data):
n = data.shape[0]
mode_list_feas = np.zeros((n, 12))
max_dist, min_dist, mean_dist, std_dist = np.zeros((n,)), np.zeros((n,)), np.zeros((n,)), np.zeros((n,))
max_price, min_price, mean_price, std_price = np.zeros((n,)), np.zeros((n,)), np.zeros((n,)), np.zeros((n,))
max_eta, min_eta, mean_eta, std_eta = np.zeros((n,)), np.zeros((n,)), np.zeros((n,)), np.zeros((n,))
min_dist_mode, max_dist_mode, min_price_mode, max_price_mode, min_eta_mode, max_eta_mode, first_mode = np.zeros(
(n,)), np.zeros((n,)), np.zeros((n,)), np.zeros((n,)), np.zeros((n,)), np.zeros((n,)), np.zeros((n,))
mode_texts = []
for i, plan in tqdm(enumerate(data['plans'].values)):
try:
cur_plan_list = json.loads(plan)
except:
cur_plan_list = []
if len(cur_plan_list) == 0:
mode_list_feas[i, 0] = 1
first_mode[i] = 0
max_dist[i] = -1
min_dist[i] = -1
mean_dist[i] = -1
std_dist[i] = -1
max_price[i] = -1
min_price[i] = -1
mean_price[i] = -1
std_price[i] = -1
max_eta[i] = -1
min_eta[i] = -1
mean_eta[i] = -1
std_eta[i] = -1
min_dist_mode[i] = -1
max_dist_mode[i] = -1
min_price_mode[i] = -1
max_price_mode[i] = -1
min_eta_mode[i] = -1
max_eta_mode[i] = -1
mode_texts.append('word_null')
else:
distance_list = []
price_list = []
eta_list = []
mode_list = []
for tmp_dit in cur_plan_list:
distance_list.append(int(tmp_dit['distance']))
if tmp_dit['price'] == '':
price_list.append(0)
else:
price_list.append(int(tmp_dit['price']))
eta_list.append(int(tmp_dit['eta']))
mode_list.append(int(tmp_dit['transport_mode']))
mode_texts.append(
' '.join(['word_{}'.format(mode) for mode in mode_list]))
distance_list = np.array(distance_list)
price_list = np.array(price_list)
eta_list = np.array(eta_list)
mode_list = np.array(mode_list, dtype='int')
mode_list_feas[i, mode_list] = 1
distance_sort_idx = np.argsort(distance_list)
price_sort_idx = np.argsort(price_list)
eta_sort_idx = np.argsort(eta_list)
max_dist[i] = distance_list[distance_sort_idx[-1]]
min_dist[i] = distance_list[distance_sort_idx[0]]
mean_dist[i] = np.mean(distance_list)
std_dist[i] = np.std(distance_list)
max_price[i] = price_list[price_sort_idx[-1]]
min_price[i] = price_list[price_sort_idx[0]]
mean_price[i] = np.mean(price_list)
std_price[i] = np.std(price_list)
max_eta[i] = eta_list[eta_sort_idx[-1]]
min_eta[i] = eta_list[eta_sort_idx[0]]
mean_eta[i] = np.mean(eta_list)
std_eta[i] = np.std(eta_list)
first_mode[i] = mode_list[0]
max_dist_mode[i] = mode_list[distance_sort_idx[-1]]
min_dist_mode[i] = mode_list[distance_sort_idx[0]]
max_price_mode[i] = mode_list[price_sort_idx[-1]]
min_price_mode[i] = mode_list[price_sort_idx[0]]
max_eta_mode[i] = mode_list[eta_sort_idx[-1]]
min_eta_mode[i] = mode_list[eta_sort_idx[0]]
feature_data = pd.DataFrame(mode_list_feas)
feature_data.columns = ['mode_feas_{}'.format(i) for i in range(12)]
feature_data['max_dist'] = max_dist
feature_data['min_dist'] = min_dist
feature_data['mean_dist'] = mean_dist
feature_data['std_dist'] = std_dist
feature_data['max_price'] = max_price
feature_data['min_price'] = min_price
feature_data['mean_price'] = mean_price
feature_data['std_price'] = std_price
feature_data['max_eta'] = max_eta
feature_data['min_eta'] = min_eta
feature_data['mean_eta'] = mean_eta
feature_data['std_eta'] = std_eta
feature_data['max_dist_mode'] = max_dist_mode
feature_data['min_dist_mode'] = min_dist_mode
feature_data['max_price_mode'] = max_price_mode
feature_data['min_price_mode'] = min_price_mode
feature_data['max_eta_mode'] = max_eta_mode
feature_data['min_eta_mode'] = min_eta_mode
feature_data['first_mode'] = first_mode
logger.info('mode tfidf...')
tfidf_enc = TfidfVectorizer(ngram_range=(1, 2))
tfidf_vec = tfidf_enc.fit_transform(mode_texts)
svd_enc = TruncatedSVD(n_components=10, n_iter=20, random_state=2019)
mode_svd = svd_enc.fit_transform(tfidf_vec)
mode_svd = pd.DataFrame(mode_svd)
mode_svd.columns = ['svd_mode_{}'.format(i) for i in range(10)]
data = pd.concat([data, feature_data, mode_svd], axis=1)
data = data.drop(['plans'], axis=1)
return data
def gen_profile_feas(data):
profile_data = read_profile_data()
x = profile_data.drop(['pid'], axis=1).values
svd = TruncatedSVD(n_components=20, n_iter=20, random_state=2019)
svd_x = svd.fit_transform(x)
svd_feas = pd.DataFrame(svd_x)
svd_feas.columns = ['svd_fea_{}'.format(i) for i in range(20)]
svd_feas['pid'] = profile_data['pid'].values
data['pid'] = data['pid'].fillna(-1)
data = data.merge(svd_feas, on='pid', how='left')
return data
def group_weekday_and_hour(row):
if row['weekday'] == 0 or row['weekday'] == 6:
w = 0
else:
w = row['weekday']
if row['hour'] > 7 and row['hour'] < 18: # 7:00 - 18:00
h = row['hour']
elif row['hour'] >= 18 and row['hour'] < 21: # 18:00 - 21:00
h = 1
elif row['hour'] >= 21 or row['hour'] < 6: # 21:00 - 6:00
h = 0
else: # 6:00 - 7:00
h = 2
return str(w) + '_' + str(h)
def gen_ratio_feas(data):
data['dist-d-eta'] = data['mean_dist'] / data['mean_eta']
data['price-d-dist'] = data['mean_price'] / data['mean_dist']
data['price-d-eta'] = data['mean_price'] / data['mean_eta']
data['o1-d-d1'] = data['o1'] / data['d1']
data['o2-d-d2'] = data['o2'] / data['d2']
return data
def gen_fly_dist_feas(data):
data['fly-dist'] = ((data['d1'] - data['o1'])**2 + (data['d2'] - data['o2'])**2)**0.5
data['fly-dist-d-dist'] = data['fly-dist'] / data['mean_dist']
data['fly-dist-d-eta'] = data['fly-dist'] / data['mean_eta']
data['price-d-fly-dist'] = data['mean_price'] / data['fly-dist']
return data
def gen_aggregate_profile_feas(data):
aggr = data.groupby('pid')['sid'].agg(['count'])
aggr.columns = ['%s_%s' % ('sid', col) for col in aggr.columns.values]
aggr = aggr.reset_index()
aggr.loc[aggr['pid'] == -1.0,'sid_count'] = 0 # reset in case pid == -1
data = data.merge(aggr, how='left', on=['pid'])
return data
def gen_pid_feat(data):
feat = pd.read_csv(config.pid_feature_file)
data = data.merge(feat, how='left', on='pid')
return data
def gen_od_feat(data):
feat = pd.read_csv(config.od_feature_file)
tr_sid = pd.read_csv(config.train_query_file, usecols=['sid','o','d'])
te_sid = pd.read_csv(config.test_query_file, usecols=['sid','o','d'])
sid = pd.concat((tr_sid, te_sid))
logger.info('sid shape={}'.format(sid.shape))
feat = sid.merge(feat, how='left', on=['o','d']).drop(['o','d'], axis=1)
logger.info('feature shape={}'.format(feat.shape))
logger.info('feature columns={}'.format(feat.columns))
data = data.merge(feat, how='left', on='sid')
click_cols = [c for c in feat.columns if c.endswith('click')]
data.drop(click_cols, axis=1, inplace=True)
return data
def gen_od_cluster_feat(data):
feat = pd.read_csv(config.od_cluster_feature_file)
tr_sid = pd.read_csv(config.train_query_file, usecols=['sid','o','d'])
te_sid = pd.read_csv(config.test_query_file, usecols=['sid','o','d'])
sid = pd.concat((tr_sid, te_sid))
f = feat.copy()
feat = sid.merge(feat, how='left', left_on='o', right_on='od').drop(['od','o'], axis=1)
feat.rename(columns={'cluster': 'o_cluster'}, inplace=True)
feat = feat.merge(f, how='left', left_on='d', right_on='od').drop(['od','d'], axis=1)
feat.rename(columns={'cluster': 'd_cluster'}, inplace=True)
data = data.merge(feat, how='left', on='sid')
return data
def gen_od_eq_feat(data):
data['o1-eq-d1'] = (data['o1'] == data['d1']).astype(int)
data['o2-eq-d2'] = (data['o2'] == data['d2']).astype(int)
data['o-eq-d'] = data['o1-eq-d1']*data['o2-eq-d2']
data['o1-m-o2'] = np.abs(data['o1'] - data['o2'])
data['d1-m-d2'] = np.abs(data['d1'] - data['d2'])
data['od_area'] = data['o1-m-o2']*data['d1-m-d2']
data['od_ratio'] = data['o1-m-o2']/data['d1-m-d2']
return data
def gen_od_mode_cnt_feat(data):
feat = pd.read_csv(config.od_mode_cnt_feature_file)
tr_sid = pd.read_csv(config.train_query_file, usecols=['sid','o','d'])
te_sid = pd.read_csv(config.test_query_file, usecols=['sid','o','d'])
sid = pd.concat((tr_sid, te_sid))
feat = sid.merge(feat, how='left', on=['o','d']).drop(['o','d'], axis=1)
data = data.merge(feat, how='left', on='sid')
return data
def gen_weekday_hour_cnt_feat(data):
feat = pd.read_csv(config.weekday_hour_feature_file)
tr_sid = | pd.read_csv(config.train_query_file, usecols=['sid','req_time']) | pandas.read_csv |
from datetime import datetime
import numpy as np
import pytest
import pandas as pd
from pandas import (
Categorical,
CategoricalIndex,
DataFrame,
Index,
MultiIndex,
Series,
qcut,
)
import pandas._testing as tm
def cartesian_product_for_groupers(result, args, names, fill_value=np.NaN):
"""Reindex to a cartesian production for the groupers,
preserving the nature (Categorical) of each grouper
"""
def f(a):
if isinstance(a, (CategoricalIndex, Categorical)):
categories = a.categories
a = Categorical.from_codes(
np.arange(len(categories)), categories=categories, ordered=a.ordered
)
return a
index = MultiIndex.from_product(map(f, args), names=names)
return result.reindex(index, fill_value=fill_value).sort_index()
_results_for_groupbys_with_missing_categories = {
# This maps the builtin groupby functions to their expected outputs for
# missing categories when they are called on a categorical grouper with
# observed=False. Some functions are expected to return NaN, some zero.
# These expected values can be used across several tests (i.e. they are
# the same for SeriesGroupBy and DataFrameGroupBy) but they should only be
# hardcoded in one place.
"all": np.NaN,
"any": np.NaN,
"count": 0,
"corrwith": np.NaN,
"first": np.NaN,
"idxmax": np.NaN,
"idxmin": np.NaN,
"last": np.NaN,
"mad": np.NaN,
"max": np.NaN,
"mean": np.NaN,
"median": np.NaN,
"min": np.NaN,
"nth": np.NaN,
"nunique": 0,
"prod": np.NaN,
"quantile": np.NaN,
"sem": np.NaN,
"size": 0,
"skew": np.NaN,
"std": np.NaN,
"sum": 0,
"var": np.NaN,
}
def test_apply_use_categorical_name(df):
cats = qcut(df.C, 4)
def get_stats(group):
return {
"min": group.min(),
"max": group.max(),
"count": group.count(),
"mean": group.mean(),
}
result = df.groupby(cats, observed=False).D.apply(get_stats)
assert result.index.names[0] == "C"
def test_basic():
cats = Categorical(
["a", "a", "a", "b", "b", "b", "c", "c", "c"],
categories=["a", "b", "c", "d"],
ordered=True,
)
data = DataFrame({"a": [1, 1, 1, 2, 2, 2, 3, 4, 5], "b": cats})
exp_index = CategoricalIndex(list("abcd"), name="b", ordered=True)
expected = DataFrame({"a": [1, 2, 4, np.nan]}, index=exp_index)
result = data.groupby("b", observed=False).mean()
tm.assert_frame_equal(result, expected)
cat1 = Categorical(["a", "a", "b", "b"], categories=["a", "b", "z"], ordered=True)
cat2 = Categorical(["c", "d", "c", "d"], categories=["c", "d", "y"], ordered=True)
df = DataFrame({"A": cat1, "B": cat2, "values": [1, 2, 3, 4]})
# single grouper
gb = df.groupby("A", observed=False)
exp_idx = CategoricalIndex(["a", "b", "z"], name="A", ordered=True)
expected = DataFrame({"values": Series([3, 7, 0], index=exp_idx)})
result = gb.sum()
tm.assert_frame_equal(result, expected)
# GH 8623
x = DataFrame(
[[1, "<NAME>"], [2, "<NAME>"], [1, "<NAME>"]],
columns=["person_id", "person_name"],
)
x["person_name"] = Categorical(x.person_name)
g = x.groupby(["person_id"], observed=False)
result = g.transform(lambda x: x)
tm.assert_frame_equal(result, x[["person_name"]])
result = x.drop_duplicates("person_name")
expected = x.iloc[[0, 1]]
tm.assert_frame_equal(result, expected)
def f(x):
return x.drop_duplicates("person_name").iloc[0]
result = g.apply(f)
expected = x.iloc[[0, 1]].copy()
expected.index = Index([1, 2], name="person_id")
expected["person_name"] = expected["person_name"].astype("object")
tm.assert_frame_equal(result, expected)
# GH 9921
# Monotonic
df = DataFrame({"a": [5, 15, 25]})
c = pd.cut(df.a, bins=[0, 10, 20, 30, 40])
result = df.a.groupby(c, observed=False).transform(sum)
tm.assert_series_equal(result, df["a"])
tm.assert_series_equal(
df.a.groupby(c, observed=False).transform(lambda xs: np.sum(xs)), df["a"]
)
tm.assert_frame_equal(df.groupby(c, observed=False).transform(sum), df[["a"]])
tm.assert_frame_equal(
df.groupby(c, observed=False).transform(lambda xs: np.max(xs)), df[["a"]]
)
# Filter
tm.assert_series_equal(df.a.groupby(c, observed=False).filter(np.all), df["a"])
tm.assert_frame_equal(df.groupby(c, observed=False).filter(np.all), df)
# Non-monotonic
df = DataFrame({"a": [5, 15, 25, -5]})
c = pd.cut(df.a, bins=[-10, 0, 10, 20, 30, 40])
result = df.a.groupby(c, observed=False).transform(sum)
tm.assert_series_equal(result, df["a"])
tm.assert_series_equal(
df.a.groupby(c, observed=False).transform(lambda xs: np.sum(xs)), df["a"]
)
tm.assert_frame_equal(df.groupby(c, observed=False).transform(sum), df[["a"]])
tm.assert_frame_equal(
df.groupby(c, observed=False).transform(lambda xs: np.sum(xs)), df[["a"]]
)
# GH 9603
df = DataFrame({"a": [1, 0, 0, 0]})
c = pd.cut(df.a, [0, 1, 2, 3, 4], labels=Categorical(list("abcd")))
result = df.groupby(c, observed=False).apply(len)
exp_index = CategoricalIndex(c.values.categories, ordered=c.values.ordered)
expected = Series([1, 0, 0, 0], index=exp_index)
expected.index.name = "a"
tm.assert_series_equal(result, expected)
# more basic
levels = ["foo", "bar", "baz", "qux"]
codes = np.random.randint(0, 4, size=100)
cats = Categorical.from_codes(codes, levels, ordered=True)
data = DataFrame(np.random.randn(100, 4))
result = data.groupby(cats, observed=False).mean()
expected = data.groupby(np.asarray(cats), observed=False).mean()
exp_idx = CategoricalIndex(levels, categories=cats.categories, ordered=True)
expected = expected.reindex(exp_idx)
tm.assert_frame_equal(result, expected)
grouped = data.groupby(cats, observed=False)
desc_result = grouped.describe()
idx = cats.codes.argsort()
ord_labels = np.asarray(cats).take(idx)
ord_data = data.take(idx)
exp_cats = Categorical(
ord_labels, ordered=True, categories=["foo", "bar", "baz", "qux"]
)
expected = ord_data.groupby(exp_cats, sort=False, observed=False).describe()
tm.assert_frame_equal(desc_result, expected)
# GH 10460
expc = Categorical.from_codes(np.arange(4).repeat(8), levels, ordered=True)
exp = CategoricalIndex(expc)
tm.assert_index_equal((desc_result.stack().index.get_level_values(0)), exp)
exp = Index(["count", "mean", "std", "min", "25%", "50%", "75%", "max"] * 4)
tm.assert_index_equal((desc_result.stack().index.get_level_values(1)), exp)
def test_level_get_group(observed):
# GH15155
df = DataFrame(
data=np.arange(2, 22, 2),
index=MultiIndex(
levels=[CategoricalIndex(["a", "b"]), range(10)],
codes=[[0] * 5 + [1] * 5, range(10)],
names=["Index1", "Index2"],
),
)
g = df.groupby(level=["Index1"], observed=observed)
# expected should equal test.loc[["a"]]
# GH15166
expected = DataFrame(
data=np.arange(2, 12, 2),
index=MultiIndex(
levels=[CategoricalIndex(["a", "b"]), range(5)],
codes=[[0] * 5, range(5)],
names=["Index1", "Index2"],
),
)
result = g.get_group("a")
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("ordered", [True, False])
def test_apply(ordered):
# GH 10138
dense = Categorical(list("abc"), ordered=ordered)
# 'b' is in the categories but not in the list
missing = Categorical(list("aaa"), categories=["a", "b"], ordered=ordered)
values = np.arange(len(dense))
df = DataFrame({"missing": missing, "dense": dense, "values": values})
grouped = df.groupby(["missing", "dense"], observed=True)
# missing category 'b' should still exist in the output index
idx = MultiIndex.from_arrays([missing, dense], names=["missing", "dense"])
expected = DataFrame([0, 1, 2.0], index=idx, columns=["values"])
# GH#21636 tracking down the xfail, in some builds np.mean(df.loc[[0]])
# is coming back as Series([0., 1., 0.], index=["missing", "dense", "values"])
# when we expect Series(0., index=["values"])
result = grouped.apply(lambda x: np.mean(x))
tm.assert_frame_equal(result, expected)
# we coerce back to ints
expected = expected.astype("int")
result = grouped.mean()
tm.assert_frame_equal(result, expected)
result = grouped.agg(np.mean)
tm.assert_frame_equal(result, expected)
# but for transform we should still get back the original index
idx = MultiIndex.from_arrays([missing, dense], names=["missing", "dense"])
expected = Series(1, index=idx)
result = grouped.apply(lambda x: 1)
tm.assert_series_equal(result, expected)
def test_observed(observed):
# multiple groupers, don't re-expand the output space
# of the grouper
# gh-14942 (implement)
# gh-10132 (back-compat)
# gh-8138 (back-compat)
# gh-8869
cat1 = Categorical(["a", "a", "b", "b"], categories=["a", "b", "z"], ordered=True)
cat2 = Categorical(["c", "d", "c", "d"], categories=["c", "d", "y"], ordered=True)
df = DataFrame({"A": cat1, "B": cat2, "values": [1, 2, 3, 4]})
df["C"] = ["foo", "bar"] * 2
# multiple groupers with a non-cat
gb = df.groupby(["A", "B", "C"], observed=observed)
exp_index = MultiIndex.from_arrays(
[cat1, cat2, ["foo", "bar"] * 2], names=["A", "B", "C"]
)
expected = DataFrame({"values": Series([1, 2, 3, 4], index=exp_index)}).sort_index()
result = gb.sum()
if not observed:
expected = cartesian_product_for_groupers(
expected, [cat1, cat2, ["foo", "bar"]], list("ABC"), fill_value=0
)
tm.assert_frame_equal(result, expected)
gb = df.groupby(["A", "B"], observed=observed)
exp_index = MultiIndex.from_arrays([cat1, cat2], names=["A", "B"])
expected = DataFrame({"values": [1, 2, 3, 4]}, index=exp_index)
result = gb.sum()
if not observed:
expected = cartesian_product_for_groupers(
expected, [cat1, cat2], list("AB"), fill_value=0
)
tm.assert_frame_equal(result, expected)
# https://github.com/pandas-dev/pandas/issues/8138
d = {
"cat": Categorical(
["a", "b", "a", "b"], categories=["a", "b", "c"], ordered=True
),
"ints": [1, 1, 2, 2],
"val": [10, 20, 30, 40],
}
df = DataFrame(d)
# Grouping on a single column
groups_single_key = df.groupby("cat", observed=observed)
result = groups_single_key.mean()
exp_index = CategoricalIndex(
list("ab"), name="cat", categories=list("abc"), ordered=True
)
expected = DataFrame({"ints": [1.5, 1.5], "val": [20.0, 30]}, index=exp_index)
if not observed:
index = CategoricalIndex(
list("abc"), name="cat", categories=list("abc"), ordered=True
)
expected = expected.reindex(index)
tm.assert_frame_equal(result, expected)
# Grouping on two columns
groups_double_key = df.groupby(["cat", "ints"], observed=observed)
result = groups_double_key.agg("mean")
expected = DataFrame(
{
"val": [10, 30, 20, 40],
"cat": Categorical(
["a", "a", "b", "b"], categories=["a", "b", "c"], ordered=True
),
"ints": [1, 2, 1, 2],
}
).set_index(["cat", "ints"])
if not observed:
expected = cartesian_product_for_groupers(
expected, [df.cat.values, [1, 2]], ["cat", "ints"]
)
tm.assert_frame_equal(result, expected)
# GH 10132
for key in [("a", 1), ("b", 2), ("b", 1), ("a", 2)]:
c, i = key
result = groups_double_key.get_group(key)
expected = df[(df.cat == c) & (df.ints == i)]
tm.assert_frame_equal(result, expected)
# gh-8869
# with as_index
d = {
"foo": [10, 8, 4, 8, 4, 1, 1],
"bar": [10, 20, 30, 40, 50, 60, 70],
"baz": ["d", "c", "e", "a", "a", "d", "c"],
}
df = DataFrame(d)
cat = pd.cut(df["foo"], np.linspace(0, 10, 3))
df["range"] = cat
groups = df.groupby(["range", "baz"], as_index=False, observed=observed)
result = groups.agg("mean")
groups2 = df.groupby(["range", "baz"], as_index=True, observed=observed)
expected = groups2.agg("mean").reset_index()
tm.assert_frame_equal(result, expected)
def test_observed_codes_remap(observed):
d = {"C1": [3, 3, 4, 5], "C2": [1, 2, 3, 4], "C3": [10, 100, 200, 34]}
df = DataFrame(d)
values = pd.cut(df["C1"], [1, 2, 3, 6])
values.name = "cat"
groups_double_key = df.groupby([values, "C2"], observed=observed)
idx = MultiIndex.from_arrays([values, [1, 2, 3, 4]], names=["cat", "C2"])
expected = DataFrame({"C1": [3, 3, 4, 5], "C3": [10, 100, 200, 34]}, index=idx)
if not observed:
expected = cartesian_product_for_groupers(
expected, [values.values, [1, 2, 3, 4]], ["cat", "C2"]
)
result = groups_double_key.agg("mean")
tm.assert_frame_equal(result, expected)
def test_observed_perf():
# we create a cartesian product, so this is
# non-performant if we don't use observed values
# gh-14942
df = DataFrame(
{
"cat": np.random.randint(0, 255, size=30000),
"int_id": np.random.randint(0, 255, size=30000),
"other_id": np.random.randint(0, 10000, size=30000),
"foo": 0,
}
)
df["cat"] = df.cat.astype(str).astype("category")
grouped = df.groupby(["cat", "int_id", "other_id"], observed=True)
result = grouped.count()
assert result.index.levels[0].nunique() == df.cat.nunique()
assert result.index.levels[1].nunique() == df.int_id.nunique()
assert result.index.levels[2].nunique() == df.other_id.nunique()
def test_observed_groups(observed):
# gh-20583
# test that we have the appropriate groups
cat = Categorical(["a", "c", "a"], categories=["a", "b", "c"])
df = DataFrame({"cat": cat, "vals": [1, 2, 3]})
g = df.groupby("cat", observed=observed)
result = g.groups
if observed:
expected = {"a": Index([0, 2], dtype="int64"), "c": Index([1], dtype="int64")}
else:
expected = {
"a": Index([0, 2], dtype="int64"),
"b": Index([], dtype="int64"),
"c": Index([1], dtype="int64"),
}
tm.assert_dict_equal(result, expected)
def test_observed_groups_with_nan(observed):
# GH 24740
df = DataFrame(
{
"cat": Categorical(["a", np.nan, "a"], categories=["a", "b", "d"]),
"vals": [1, 2, 3],
}
)
g = df.groupby("cat", observed=observed)
result = g.groups
if observed:
expected = {"a": Index([0, 2], dtype="int64")}
else:
expected = {
"a": Index([0, 2], dtype="int64"),
"b": Index([], dtype="int64"),
"d": Index([], dtype="int64"),
}
tm.assert_dict_equal(result, expected)
def test_observed_nth():
# GH 26385
cat = Categorical(["a", np.nan, np.nan], categories=["a", "b", "c"])
ser = Series([1, 2, 3])
df = DataFrame({"cat": cat, "ser": ser})
result = df.groupby("cat", observed=False)["ser"].nth(0)
index = Categorical(["a", "b", "c"], categories=["a", "b", "c"])
expected = Series([1, np.nan, np.nan], index=index, name="ser")
expected.index.name = "cat"
tm.assert_series_equal(result, expected)
def test_dataframe_categorical_with_nan(observed):
# GH 21151
s1 = Categorical([np.nan, "a", np.nan, "a"], categories=["a", "b", "c"])
s2 = Series([1, 2, 3, 4])
df = DataFrame({"s1": s1, "s2": s2})
result = df.groupby("s1", observed=observed).first().reset_index()
if observed:
expected = DataFrame(
{"s1": Categorical(["a"], categories=["a", "b", "c"]), "s2": [2]}
)
else:
expected = DataFrame(
{
"s1": Categorical(["a", "b", "c"], categories=["a", "b", "c"]),
"s2": [2, np.nan, np.nan],
}
)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("ordered", [True, False])
@pytest.mark.parametrize("observed", [True, False])
@pytest.mark.parametrize("sort", [True, False])
def test_dataframe_categorical_ordered_observed_sort(ordered, observed, sort):
# GH 25871: Fix groupby sorting on ordered Categoricals
# GH 25167: Groupby with observed=True doesn't sort
# Build a dataframe with cat having one unobserved category ('missing'),
# and a Series with identical values
label = Categorical(
["d", "a", "b", "a", "d", "b"],
categories=["a", "b", "missing", "d"],
ordered=ordered,
)
val = Series(["d", "a", "b", "a", "d", "b"])
df = DataFrame({"label": label, "val": val})
# aggregate on the Categorical
result = df.groupby("label", observed=observed, sort=sort)["val"].aggregate("first")
# If ordering works, we expect index labels equal to aggregation results,
# except for 'observed=False': label 'missing' has aggregation None
label = Series(result.index.array, dtype="object")
aggr = Series(result.array)
if not observed:
aggr[aggr.isna()] = "missing"
if not all(label == aggr):
msg = (
"Labels and aggregation results not consistently sorted\n"
f"for (ordered={ordered}, observed={observed}, sort={sort})\n"
f"Result:\n{result}"
)
assert False, msg
def test_datetime():
# GH9049: ensure backward compatibility
levels = pd.date_range("2014-01-01", periods=4)
codes = np.random.randint(0, 4, size=100)
cats = Categorical.from_codes(codes, levels, ordered=True)
data = DataFrame(np.random.randn(100, 4))
result = data.groupby(cats, observed=False).mean()
expected = data.groupby(np.asarray(cats), observed=False).mean()
expected = expected.reindex(levels)
expected.index = CategoricalIndex(
expected.index, categories=expected.index, ordered=True
)
tm.assert_frame_equal(result, expected)
grouped = data.groupby(cats, observed=False)
desc_result = grouped.describe()
idx = cats.codes.argsort()
ord_labels = cats.take(idx)
ord_data = data.take(idx)
expected = ord_data.groupby(ord_labels, observed=False).describe()
tm.assert_frame_equal(desc_result, expected)
tm.assert_index_equal(desc_result.index, expected.index)
tm.assert_index_equal(
desc_result.index.get_level_values(0), expected.index.get_level_values(0)
)
# GH 10460
expc = Categorical.from_codes(np.arange(4).repeat(8), levels, ordered=True)
exp = CategoricalIndex(expc)
tm.assert_index_equal((desc_result.stack().index.get_level_values(0)), exp)
exp = Index(["count", "mean", "std", "min", "25%", "50%", "75%", "max"] * 4)
tm.assert_index_equal((desc_result.stack().index.get_level_values(1)), exp)
def test_categorical_index():
s = np.random.RandomState(12345)
levels = ["foo", "bar", "baz", "qux"]
codes = s.randint(0, 4, size=20)
cats = Categorical.from_codes(codes, levels, ordered=True)
df = DataFrame(np.repeat(np.arange(20), 4).reshape(-1, 4), columns=list("abcd"))
df["cats"] = cats
# with a cat index
result = df.set_index("cats").groupby(level=0, observed=False).sum()
expected = df[list("abcd")].groupby(cats.codes, observed=False).sum()
expected.index = CategoricalIndex(
Categorical.from_codes([0, 1, 2, 3], levels, ordered=True), name="cats"
)
tm.assert_frame_equal(result, expected)
# with a cat column, should produce a cat index
result = df.groupby("cats", observed=False).sum()
expected = df[list("abcd")].groupby(cats.codes, observed=False).sum()
expected.index = CategoricalIndex(
Categorical.from_codes([0, 1, 2, 3], levels, ordered=True), name="cats"
)
tm.assert_frame_equal(result, expected)
def test_describe_categorical_columns():
# GH 11558
cats = CategoricalIndex(
["qux", "foo", "baz", "bar"],
categories=["foo", "bar", "baz", "qux"],
ordered=True,
)
df = DataFrame(np.random.randn(20, 4), columns=cats)
result = df.groupby([1, 2, 3, 4] * 5).describe()
tm.assert_index_equal(result.stack().columns, cats)
tm.assert_categorical_equal(result.stack().columns.values, cats.values)
def test_unstack_categorical():
# GH11558 (example is taken from the original issue)
df = DataFrame(
{"a": range(10), "medium": ["A", "B"] * 5, "artist": list("XYXXY") * 2}
)
df["medium"] = df["medium"].astype("category")
gcat = df.groupby(["artist", "medium"], observed=False)["a"].count().unstack()
result = gcat.describe()
exp_columns = CategoricalIndex(["A", "B"], ordered=False, name="medium")
tm.assert_index_equal(result.columns, exp_columns)
tm.assert_categorical_equal(result.columns.values, exp_columns.values)
result = gcat["A"] + gcat["B"]
expected = Series([6, 4], index=Index(["X", "Y"], name="artist"))
tm.assert_series_equal(result, expected)
def test_bins_unequal_len():
# GH3011
series = Series([np.nan, np.nan, 1, 1, 2, 2, 3, 3, 4, 4])
bins = pd.cut(series.dropna().values, 4)
# len(bins) != len(series) here
msg = r"Length of grouper \(8\) and axis \(10\) must be same length"
with pytest.raises(ValueError, match=msg):
series.groupby(bins).mean()
def test_as_index():
# GH13204
df = DataFrame(
{
"cat": Categorical([1, 2, 2], [1, 2, 3]),
"A": [10, 11, 11],
"B": [101, 102, 103],
}
)
result = df.groupby(["cat", "A"], as_index=False, observed=True).sum()
expected = DataFrame(
{
"cat": Categorical([1, 2], categories=df.cat.cat.categories),
"A": [10, 11],
"B": [101, 205],
},
columns=["cat", "A", "B"],
)
tm.assert_frame_equal(result, expected)
# function grouper
f = lambda r: df.loc[r, "A"]
result = df.groupby(["cat", f], as_index=False, observed=True).sum()
expected = DataFrame(
{
"cat": Categorical([1, 2], categories=df.cat.cat.categories),
"A": [10, 22],
"B": [101, 205],
},
columns=["cat", "A", "B"],
)
tm.assert_frame_equal(result, expected)
# another not in-axis grouper (conflicting names in index)
s = Series(["a", "b", "b"], name="cat")
result = df.groupby(["cat", s], as_index=False, observed=True).sum()
tm.assert_frame_equal(result, expected)
# is original index dropped?
group_columns = ["cat", "A"]
expected = DataFrame(
{
"cat": Categorical([1, 2], categories=df.cat.cat.categories),
"A": [10, 11],
"B": [101, 205],
},
columns=["cat", "A", "B"],
)
for name in [None, "X", "B"]:
df.index = Index(list("abc"), name=name)
result = df.groupby(group_columns, as_index=False, observed=True).sum()
tm.assert_frame_equal(result, expected)
def test_preserve_categories():
# GH-13179
categories = list("abc")
# ordered=True
df = DataFrame({"A": Categorical(list("ba"), categories=categories, ordered=True)})
index = CategoricalIndex(categories, categories, ordered=True, name="A")
tm.assert_index_equal(
df.groupby("A", sort=True, observed=False).first().index, index
)
tm.assert_index_equal(
df.groupby("A", sort=False, observed=False).first().index, index
)
# ordered=False
df = DataFrame({"A": Categorical(list("ba"), categories=categories, ordered=False)})
sort_index = CategoricalIndex(categories, categories, ordered=False, name="A")
nosort_index = CategoricalIndex(list("bac"), list("bac"), ordered=False, name="A")
tm.assert_index_equal(
df.groupby("A", sort=True, observed=False).first().index, sort_index
)
tm.assert_index_equal(
df.groupby("A", sort=False, observed=False).first().index, nosort_index
)
def test_preserve_categorical_dtype():
# GH13743, GH13854
df = DataFrame(
{
"A": [1, 2, 1, 1, 2],
"B": [10, 16, 22, 28, 34],
"C1": Categorical(list("abaab"), categories=list("bac"), ordered=False),
"C2": Categorical(list("abaab"), categories=list("bac"), ordered=True),
}
)
# single grouper
exp_full = DataFrame(
{
"A": [2.0, 1.0, np.nan],
"B": [25.0, 20.0, np.nan],
"C1": Categorical(list("bac"), categories=list("bac"), ordered=False),
"C2": Categorical(list("bac"), categories=list("bac"), ordered=True),
}
)
for col in ["C1", "C2"]:
result1 = df.groupby(by=col, as_index=False, observed=False).mean()
result2 = df.groupby(by=col, as_index=True, observed=False).mean().reset_index()
expected = exp_full.reindex(columns=result1.columns)
tm.assert_frame_equal(result1, expected)
tm.assert_frame_equal(result2, expected)
@pytest.mark.parametrize(
"func, values",
[
("first", ["second", "first"]),
("last", ["fourth", "third"]),
("min", ["fourth", "first"]),
("max", ["second", "third"]),
],
)
def test_preserve_on_ordered_ops(func, values):
# gh-18502
# preserve the categoricals on ops
c = Categorical(["first", "second", "third", "fourth"], ordered=True)
df = DataFrame({"payload": [-1, -2, -1, -2], "col": c})
g = df.groupby("payload")
result = getattr(g, func)()
expected = DataFrame(
{"payload": [-2, -1], "col": Series(values, dtype=c.dtype)}
).set_index("payload")
tm.assert_frame_equal(result, expected)
def test_categorical_no_compress():
data = Series(np.random.randn(9))
codes = np.array([0, 0, 0, 1, 1, 1, 2, 2, 2])
cats = Categorical.from_codes(codes, [0, 1, 2], ordered=True)
result = data.groupby(cats, observed=False).mean()
exp = data.groupby(codes, observed=False).mean()
exp.index = CategoricalIndex(
exp.index, categories=cats.categories, ordered=cats.ordered
)
tm.assert_series_equal(result, exp)
codes = np.array([0, 0, 0, 1, 1, 1, 3, 3, 3])
cats = Categorical.from_codes(codes, [0, 1, 2, 3], ordered=True)
result = data.groupby(cats, observed=False).mean()
exp = data.groupby(codes, observed=False).mean().reindex(cats.categories)
exp.index = CategoricalIndex(
exp.index, categories=cats.categories, ordered=cats.ordered
)
tm.assert_series_equal(result, exp)
cats = Categorical(
["a", "a", "a", "b", "b", "b", "c", "c", "c"],
categories=["a", "b", "c", "d"],
ordered=True,
)
data = DataFrame({"a": [1, 1, 1, 2, 2, 2, 3, 4, 5], "b": cats})
result = data.groupby("b", observed=False).mean()
result = result["a"].values
exp = np.array([1, 2, 4, np.nan])
tm.assert_numpy_array_equal(result, exp)
def test_groupby_empty_with_category():
# GH-9614
# test fix for when group by on None resulted in
# coercion of dtype categorical -> float
df = DataFrame({"A": [None] * 3, "B": Categorical(["train", "train", "test"])})
result = df.groupby("A").first()["B"]
expected = Series(
Categorical([], categories=["test", "train"]),
index=Series([], dtype="object", name="A"),
name="B",
)
tm.assert_series_equal(result, expected)
def test_sort():
# https://stackoverflow.com/questions/23814368/sorting-pandas-
# categorical-labels-after-groupby
# This should result in a properly sorted Series so that the plot
# has a sorted x axis
# self.cat.groupby(['value_group'])['value_group'].count().plot(kind='bar')
df = DataFrame({"value": np.random.randint(0, 10000, 100)})
labels = [f"{i} - {i+499}" for i in range(0, 10000, 500)]
cat_labels = Categorical(labels, labels)
df = df.sort_values(by=["value"], ascending=True)
df["value_group"] = pd.cut(
df.value, range(0, 10500, 500), right=False, labels=cat_labels
)
res = df.groupby(["value_group"], observed=False)["value_group"].count()
exp = res[sorted(res.index, key=lambda x: float(x.split()[0]))]
exp.index = CategoricalIndex(exp.index, name=exp.index.name)
tm.assert_series_equal(res, exp)
def test_sort2():
# dataframe groupby sort was being ignored # GH 8868
df = DataFrame(
[
["(7.5, 10]", 10, 10],
["(7.5, 10]", 8, 20],
["(2.5, 5]", 5, 30],
["(5, 7.5]", 6, 40],
["(2.5, 5]", 4, 50],
["(0, 2.5]", 1, 60],
["(5, 7.5]", 7, 70],
],
columns=["range", "foo", "bar"],
)
df["range"] = Categorical(df["range"], ordered=True)
index = CategoricalIndex(
["(0, 2.5]", "(2.5, 5]", "(5, 7.5]", "(7.5, 10]"], name="range", ordered=True
)
expected_sort = DataFrame(
[[1, 60], [5, 30], [6, 40], [10, 10]], columns=["foo", "bar"], index=index
)
col = "range"
result_sort = df.groupby(col, sort=True, observed=False).first()
tm.assert_frame_equal(result_sort, expected_sort)
# when categories is ordered, group is ordered by category's order
expected_sort = result_sort
result_sort = df.groupby(col, sort=False, observed=False).first()
tm.assert_frame_equal(result_sort, expected_sort)
df["range"] = Categorical(df["range"], ordered=False)
index = CategoricalIndex(
["(0, 2.5]", "(2.5, 5]", "(5, 7.5]", "(7.5, 10]"], name="range"
)
expected_sort = DataFrame(
[[1, 60], [5, 30], [6, 40], [10, 10]], columns=["foo", "bar"], index=index
)
index = CategoricalIndex(
["(7.5, 10]", "(2.5, 5]", "(5, 7.5]", "(0, 2.5]"],
categories=["(7.5, 10]", "(2.5, 5]", "(5, 7.5]", "(0, 2.5]"],
name="range",
)
expected_nosort = DataFrame(
[[10, 10], [5, 30], [6, 40], [1, 60]], index=index, columns=["foo", "bar"]
)
col = "range"
# this is an unordered categorical, but we allow this ####
result_sort = df.groupby(col, sort=True, observed=False).first()
tm.assert_frame_equal(result_sort, expected_sort)
result_nosort = df.groupby(col, sort=False, observed=False).first()
tm.assert_frame_equal(result_nosort, expected_nosort)
def test_sort_datetimelike():
# GH10505
# use same data as test_groupby_sort_categorical, which category is
# corresponding to datetime.month
df = DataFrame(
{
"dt": [
datetime(2011, 7, 1),
datetime(2011, 7, 1),
datetime(2011, 2, 1),
datetime(2011, 5, 1),
datetime(2011, 2, 1),
datetime(2011, 1, 1),
datetime(2011, 5, 1),
],
"foo": [10, 8, 5, 6, 4, 1, 7],
"bar": [10, 20, 30, 40, 50, 60, 70],
},
columns=["dt", "foo", "bar"],
)
# ordered=True
df["dt"] = Categorical(df["dt"], ordered=True)
index = [
datetime(2011, 1, 1),
datetime(2011, 2, 1),
datetime(2011, 5, 1),
datetime(2011, 7, 1),
]
result_sort = DataFrame(
[[1, 60], [5, 30], [6, 40], [10, 10]], columns=["foo", "bar"]
)
result_sort.index = CategoricalIndex(index, name="dt", ordered=True)
index = [
datetime(2011, 7, 1),
datetime(2011, 2, 1),
datetime(2011, 5, 1),
datetime(2011, 1, 1),
]
result_nosort = DataFrame(
[[10, 10], [5, 30], [6, 40], [1, 60]], columns=["foo", "bar"]
)
result_nosort.index = CategoricalIndex(
index, categories=index, name="dt", ordered=True
)
col = "dt"
tm.assert_frame_equal(
result_sort, df.groupby(col, sort=True, observed=False).first()
)
# when categories is ordered, group is ordered by category's order
tm.assert_frame_equal(
result_sort, df.groupby(col, sort=False, observed=False).first()
)
# ordered = False
df["dt"] = Categorical(df["dt"], ordered=False)
index = [
datetime(2011, 1, 1),
datetime(2011, 2, 1),
datetime(2011, 5, 1),
datetime(2011, 7, 1),
]
result_sort = DataFrame(
[[1, 60], [5, 30], [6, 40], [10, 10]], columns=["foo", "bar"]
)
result_sort.index = CategoricalIndex(index, name="dt")
index = [
datetime(2011, 7, 1),
datetime(2011, 2, 1),
datetime(2011, 5, 1),
datetime(2011, 1, 1),
]
result_nosort = DataFrame(
[[10, 10], [5, 30], [6, 40], [1, 60]], columns=["foo", "bar"]
)
result_nosort.index = CategoricalIndex(index, categories=index, name="dt")
col = "dt"
tm.assert_frame_equal(
result_sort, df.groupby(col, sort=True, observed=False).first()
)
tm.assert_frame_equal(
result_nosort, df.groupby(col, sort=False, observed=False).first()
)
def test_empty_sum():
# https://github.com/pandas-dev/pandas/issues/18678
df = DataFrame(
{"A": Categorical(["a", "a", "b"], categories=["a", "b", "c"]), "B": [1, 2, 1]}
)
expected_idx = CategoricalIndex(["a", "b", "c"], name="A")
# 0 by default
result = df.groupby("A", observed=False).B.sum()
expected = Series([3, 1, 0], expected_idx, name="B")
tm.assert_series_equal(result, expected)
# min_count=0
result = df.groupby("A", observed=False).B.sum(min_count=0)
expected = Series([3, 1, 0], expected_idx, name="B")
tm.assert_series_equal(result, expected)
# min_count=1
result = df.groupby("A", observed=False).B.sum(min_count=1)
expected = Series([3, 1, np.nan], expected_idx, name="B")
tm.assert_series_equal(result, expected)
# min_count>1
result = df.groupby("A", observed=False).B.sum(min_count=2)
expected = Series([3, np.nan, np.nan], expected_idx, name="B")
tm.assert_series_equal(result, expected)
def test_empty_prod():
# https://github.com/pandas-dev/pandas/issues/18678
df = DataFrame(
{"A": Categorical(["a", "a", "b"], categories=["a", "b", "c"]), "B": [1, 2, 1]}
)
expected_idx = CategoricalIndex(["a", "b", "c"], name="A")
# 1 by default
result = df.groupby("A", observed=False).B.prod()
expected = Series([2, 1, 1], expected_idx, name="B")
tm.assert_series_equal(result, expected)
# min_count=0
result = df.groupby("A", observed=False).B.prod(min_count=0)
expected = Series([2, 1, 1], expected_idx, name="B")
tm.assert_series_equal(result, expected)
# min_count=1
result = df.groupby("A", observed=False).B.prod(min_count=1)
expected = Series([2, 1, np.nan], expected_idx, name="B")
tm.assert_series_equal(result, expected)
def test_groupby_multiindex_categorical_datetime():
# https://github.com/pandas-dev/pandas/issues/21390
df = DataFrame(
{
"key1": Categorical(list("<KEY>")),
"key2": Categorical(
list(pd.date_range("2018-06-01 00", freq="1T", periods=3)) * 3
),
"values": np.arange(9),
}
)
result = df.groupby(["key1", "key2"]).mean()
idx = MultiIndex.from_product(
[
Categorical(["a", "b", "c"]),
Categorical(pd.date_range("2018-06-01 00", freq="1T", periods=3)),
],
names=["key1", "key2"],
)
expected = DataFrame({"values": [0, 4, 8, 3, 4, 5, 6, np.nan, 2]}, index=idx)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"as_index, expected",
[
(
True,
Series(
index=MultiIndex.from_arrays(
[Series([1, 1, 2], dtype="category"), [1, 2, 2]], names=["a", "b"]
),
data=[1, 2, 3],
name="x",
),
),
(
False,
DataFrame(
{
"a": Series([1, 1, 2], dtype="category"),
"b": [1, 2, 2],
"x": [1, 2, 3],
}
),
),
],
)
def test_groupby_agg_observed_true_single_column(as_index, expected):
# GH-23970
df = DataFrame(
{"a": Series([1, 1, 2], dtype="category"), "b": [1, 2, 2], "x": [1, 2, 3]}
)
result = df.groupby(["a", "b"], as_index=as_index, observed=True)["x"].sum()
tm.assert_equal(result, expected)
@pytest.mark.parametrize("fill_value", [None, np.nan, pd.NaT])
def test_shift(fill_value):
ct = Categorical(
["a", "b", "c", "d"], categories=["a", "b", "c", "d"], ordered=False
)
expected = Categorical(
[None, "a", "b", "c"], categories=["a", "b", "c", "d"], ordered=False
)
res = ct.shift(1, fill_value=fill_value)
tm.assert_equal(res, expected)
@pytest.fixture
def df_cat(df):
"""
DataFrame with multiple categorical columns and a column of integers.
Shortened so as not to contain all possible combinations of categories.
Useful for testing `observed` kwarg functionality on GroupBy objects.
Parameters
----------
df: DataFrame
Non-categorical, longer DataFrame from another fixture, used to derive
this one
Returns
-------
df_cat: DataFrame
"""
df_cat = df.copy()[:4] # leave out some groups
df_cat["A"] = df_cat["A"].astype("category")
df_cat["B"] = df_cat["B"].astype("category")
df_cat["C"] = Series([1, 2, 3, 4])
df_cat = df_cat.drop(["D"], axis=1)
return df_cat
@pytest.mark.parametrize(
"operation, kwargs", [("agg", {"dtype": "category"}), ("apply", {})]
)
def test_seriesgroupby_observed_true(df_cat, operation, kwargs):
# GH 24880
index = MultiIndex.from_frame(
DataFrame(
{"A": ["foo", "foo", "bar", "bar"], "B": ["one", "two", "one", "three"]},
**kwargs,
)
)
expected = Series(data=[1, 3, 2, 4], index=index, name="C")
grouped = df_cat.groupby(["A", "B"], observed=True)["C"]
result = getattr(grouped, operation)(sum)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("operation", ["agg", "apply"])
@pytest.mark.parametrize("observed", [False, None])
def test_seriesgroupby_observed_false_or_none(df_cat, observed, operation):
# GH 24880
index, _ = MultiIndex.from_product(
[
CategoricalIndex(["bar", "foo"], ordered=False),
CategoricalIndex(["one", "three", "two"], ordered=False),
],
names=["A", "B"],
).sortlevel()
expected = Series(data=[2, 4, np.nan, 1, np.nan, 3], index=index, name="C")
if operation == "agg":
expected = expected.fillna(0, downcast="infer")
grouped = df_cat.groupby(["A", "B"], observed=observed)["C"]
result = getattr(grouped, operation)(sum)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"observed, index, data",
[
(
True,
MultiIndex.from_tuples(
[
("foo", "one", "min"),
("foo", "one", "max"),
("foo", "two", "min"),
("foo", "two", "max"),
("bar", "one", "min"),
("bar", "one", "max"),
("bar", "three", "min"),
("bar", "three", "max"),
],
names=["A", "B", None],
),
[1, 1, 3, 3, 2, 2, 4, 4],
),
(
False,
MultiIndex.from_product(
[
CategoricalIndex(["bar", "foo"], ordered=False),
CategoricalIndex(["one", "three", "two"], ordered=False),
Index(["min", "max"]),
],
names=["A", "B", None],
),
[2, 2, 4, 4, np.nan, np.nan, 1, 1, np.nan, np.nan, 3, 3],
),
(
None,
MultiIndex.from_product(
[
CategoricalIndex(["bar", "foo"], ordered=False),
CategoricalIndex(["one", "three", "two"], ordered=False),
Index(["min", "max"]),
],
names=["A", "B", None],
),
[2, 2, 4, 4, np.nan, np.nan, 1, 1, np.nan, np.nan, 3, 3],
),
],
)
def test_seriesgroupby_observed_apply_dict(df_cat, observed, index, data):
# GH 24880
expected = Series(data=data, index=index, name="C")
result = df_cat.groupby(["A", "B"], observed=observed)["C"].apply(
lambda x: {"min": x.min(), "max": x.max()}
)
tm.assert_series_equal(result, expected)
def test_groupby_categorical_series_dataframe_consistent(df_cat):
# GH 20416
expected = df_cat.groupby(["A", "B"])["C"].mean()
result = df_cat.groupby(["A", "B"]).mean()["C"]
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("code", [([1, 0, 0]), ([0, 0, 0])])
def test_groupby_categorical_axis_1(code):
# GH 13420
df = DataFrame({"a": [1, 2, 3, 4], "b": [-1, -2, -3, -4], "c": [5, 6, 7, 8]})
cat = Categorical.from_codes(code, categories=list("abc"))
result = df.groupby(cat, axis=1).mean()
expected = df.T.groupby(cat, axis=0).mean().T
tm.assert_frame_equal(result, expected)
def test_groupby_cat_preserves_structure(observed, ordered):
# GH 28787
df = DataFrame(
{"Name": Categorical(["Bob", "Greg"], ordered=ordered), "Item": [1, 2]},
columns=["Name", "Item"],
)
expected = df.copy()
result = (
df.groupby("Name", observed=observed)
.agg(DataFrame.sum, skipna=True)
.reset_index()
)
tm.assert_frame_equal(result, expected)
def test_get_nonexistent_category():
# Accessing a Category that is not in the dataframe
df = DataFrame({"var": ["a", "a", "b", "b"], "val": range(4)})
with pytest.raises(KeyError, match="'vau'"):
df.groupby("var").apply(
lambda rows: DataFrame(
{"var": [rows.iloc[-1]["var"]], "val": [rows.iloc[-1]["vau"]]}
)
)
def test_series_groupby_on_2_categoricals_unobserved(reduction_func, observed, request):
# GH 17605
if reduction_func == "ngroup":
pytest.skip("ngroup is not truly a reduction")
if reduction_func == "corrwith": # GH 32293
mark = pytest.mark.xfail(
reason="TODO: implemented SeriesGroupBy.corrwith. See GH 32293"
)
request.node.add_marker(mark)
df = DataFrame(
{
"cat_1": Categorical(list("AABB"), categories=list("ABCD")),
"cat_2": Categorical(list("AB") * 2, categories=list("ABCD")),
"value": [0.1] * 4,
}
)
args = {"nth": [0]}.get(reduction_func, [])
expected_length = 4 if observed else 16
series_groupby = df.groupby(["cat_1", "cat_2"], observed=observed)["value"]
agg = getattr(series_groupby, reduction_func)
result = agg(*args)
assert len(result) == expected_length
def test_series_groupby_on_2_categoricals_unobserved_zeroes_or_nans(
reduction_func, request
):
# GH 17605
# Tests whether the unobserved categories in the result contain 0 or NaN
if reduction_func == "ngroup":
pytest.skip("ngroup is not truly a reduction")
if reduction_func == "corrwith": # GH 32293
mark = pytest.mark.xfail(
reason="TODO: implemented SeriesGroupBy.corrwith. See GH 32293"
)
request.node.add_marker(mark)
df = DataFrame(
{
"cat_1": Categorical(list("AABB"), categories=list("ABC")),
"cat_2": Categorical(list("AB") * 2, categories=list("ABC")),
"value": [0.1] * 4,
}
)
unobserved = [tuple("AC"), tuple("BC"), tuple("CA"), tuple("CB"), tuple("CC")]
args = {"nth": [0]}.get(reduction_func, [])
series_groupby = df.groupby(["cat_1", "cat_2"], observed=False)["value"]
agg = getattr(series_groupby, reduction_func)
result = agg(*args)
zero_or_nan = _results_for_groupbys_with_missing_categories[reduction_func]
for idx in unobserved:
val = result.loc[idx]
assert (pd.isna(zero_or_nan) and pd.isna(val)) or (val == zero_or_nan)
# If we expect unobserved values to be zero, we also expect the dtype to be int.
# Except for .sum(). If the observed categories sum to dtype=float (i.e. their
# sums have decimals), then the zeros for the missing categories should also be
# floats.
if zero_or_nan == 0 and reduction_func != "sum":
assert np.issubdtype(result.dtype, np.integer)
def test_dataframe_groupby_on_2_categoricals_when_observed_is_true(reduction_func):
# GH 23865
# GH 27075
# Ensure that df.groupby, when 'by' is two Categorical variables,
# does not return the categories that are not in df when observed=True
if reduction_func == "ngroup":
pytest.skip("ngroup does not return the Categories on the index")
df = DataFrame(
{
"cat_1": Categorical(list("AABB"), categories=list("ABC")),
"cat_2": Categorical(list("1111"), categories=list("12")),
"value": [0.1, 0.1, 0.1, 0.1],
}
)
unobserved_cats = [("A", "2"), ("B", "2"), ("C", "1"), ("C", "2")]
df_grp = df.groupby(["cat_1", "cat_2"], observed=True)
args = {"nth": [0], "corrwith": [df]}.get(reduction_func, [])
res = getattr(df_grp, reduction_func)(*args)
for cat in unobserved_cats:
assert cat not in res.index
@pytest.mark.parametrize("observed", [False, None])
def test_dataframe_groupby_on_2_categoricals_when_observed_is_false(
reduction_func, observed, request
):
# GH 23865
# GH 27075
# Ensure that df.groupby, when 'by' is two Categorical variables,
# returns the categories that are not in df when observed=False/None
if reduction_func == "ngroup":
pytest.skip("ngroup does not return the Categories on the index")
df = DataFrame(
{
"cat_1": Categorical(list("AABB"), categories=list("ABC")),
"cat_2": Categorical(list("1111"), categories=list("12")),
"value": [0.1, 0.1, 0.1, 0.1],
}
)
unobserved_cats = [("A", "2"), ("B", "2"), ("C", "1"), ("C", "2")]
df_grp = df.groupby(["cat_1", "cat_2"], observed=observed)
args = {"nth": [0], "corrwith": [df]}.get(reduction_func, [])
res = getattr(df_grp, reduction_func)(*args)
expected = _results_for_groupbys_with_missing_categories[reduction_func]
if expected is np.nan:
assert res.loc[unobserved_cats].isnull().all().all()
else:
assert (res.loc[unobserved_cats] == expected).all().all()
def test_series_groupby_categorical_aggregation_getitem():
# GH 8870
d = {"foo": [10, 8, 4, 1], "bar": [10, 20, 30, 40], "baz": ["d", "c", "d", "c"]}
df = DataFrame(d)
cat = pd.cut(df["foo"], np.linspace(0, 20, 5))
df["range"] = cat
groups = df.groupby(["range", "baz"], as_index=True, sort=True)
result = groups["foo"].agg("mean")
expected = groups.agg("mean")["foo"]
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"func, expected_values",
[(Series.nunique, [1, 1, 2]), (Series.count, [1, 2, 2])],
)
def test_groupby_agg_categorical_columns(func, expected_values):
# 31256
df = DataFrame(
{
"id": [0, 1, 2, 3, 4],
"groups": [0, 1, 1, 2, 2],
"value": Categorical([0, 0, 0, 0, 1]),
}
).set_index("id")
result = df.groupby("groups").agg(func)
expected = DataFrame(
{"value": expected_values}, index=Index([0, 1, 2], name="groups")
)
tm.assert_frame_equal(result, expected)
def test_groupby_agg_non_numeric():
df = DataFrame({"A": Categorical(["a", "a", "b"], categories=["a", "b", "c"])})
expected = DataFrame({"A": [2, 1]}, index=[1, 2])
result = df.groupby([1, 2, 1]).agg(Series.nunique)
tm.assert_frame_equal(result, expected)
result = df.groupby([1, 2, 1]).nunique()
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("func", ["first", "last"])
def test_groupy_first_returned_categorical_instead_of_dataframe(func):
# GH 28641: groupby drops index, when grouping over categorical column with
# first/last. Renamed Categorical instead of DataFrame previously.
df = DataFrame({"A": [1997], "B": Series(["b"], dtype="category").cat.as_ordered()})
df_grouped = df.groupby("A")["B"]
result = getattr(df_grouped, func)()
expected = Series(["b"], index= | Index([1997], name="A") | pandas.Index |
"""
Market data import and transformation functions
"""
import calendar
from collections import Counter
import copy
from datetime import date
import time
from urllib.request import FancyURLopener
import warnings
import datetime as dt
from bs4 import BeautifulSoup
from lxml import html
import numpy as np
import pandas as pd
import pytz
from volvisualizer.utils import ImpliedVol
# pylint: disable=invalid-name
# Class used to open urls for financial data
class UrlOpener(FancyURLopener):
"""
Extract data from Yahoo Finance URL
"""
version = 'w3m/0.5.3+git20180125'
class Data():
"""
Market data import and transformation functions
"""
@classmethod
def create_option_data(cls, params, tables):
"""
Extract the URL for each of the listed option on Yahoo Finance
for the given ticker. Extract option data from each URL.
Filter / transform the data and calculate implied volatilities for
specified put and call strikes.
Parameters
----------
start_date : Str
Date from when to include prices (some of the options
won't have traded for days / weeks and therefore will
have stale prices).
ticker : Str
The ticker identifier used by Yahoo for the chosen stock. The
default is '^SPX'.
ticker_label : Str
The ticker label used in charts.
wait : Int
Number of seconds to wait between each url query
lastmins : Int, Optional
Restrict to trades within number of minutes since last
trade time recorded
mindays : Int, Optional
Restrict to options greater than certain option expiry
minopts : Int, Optional
Restrict to minimum number of options to include that
option expiry
volume : Int, Optional
Restrict to minimum Volume
openint : Int, Optional
Restrict to minimum Open Interest
monthlies : Bool
Restrict expiries to only 3rd Friday of the month. Default
is False.
spot : Float
Underlying reference level.
put_strikes : List
Range of put strikes to calculate implied volatility for.
call_strikes : List
Range of call strikes to calculate implied volatility for.
strike_limits : Tuple
min and max strikes to use expressed as a decimal
percentage. The default is (0.5, 2.0).
divisor : Int
Distance between each strike in dollars. The default is 25 for SPX
and 10 otherwise.
r : Float
Interest Rate. The default is 0.005.
q : Float
Dividend Yield. The default is 0.
epsilon : Float
Degree of precision to return implied vol. The default
is 0.001.
method : Str
Implied Vol method; 'nr', 'bisection' or 'naive'. The
default is 'nr'.
Returns
-------
DataFrame
DataFrame of Option data.
"""
# Extract URLs and option data
params, tables = cls.extractoptions(params=params, tables=tables)
print("Options data extracted")
# Filter / transform data
params, tables = cls.transform(params=params, tables=tables)
print("Data transformed")
# Calculate implied volatilities and combine
params, tables = cls.combine(params=params, tables=tables)
print("Data combined")
return params, tables
@classmethod
def extractoptions(cls, params, tables):
"""
Extract the URL for each of the listed option on Yahoo Finance
for the given ticker. Extract option data from each URL.
Parameters
----------
ticker : Str
The ticker identifier used by Yahoo for the chosen stock. The
default is '^SPX'.
wait : Int
Number of seconds to wait between each url query
Returns
-------
DataFrame
All option data from each of the supplied urls.
"""
# Extract dictionary of option dates and urls
params = cls._extracturls(params=params)
print("URL's extracted")
params['raw_web_data'] = cls._extract_web_data(params=params)
params = cls._read_web_data(params=params)
# Create an empty DataFrame
tables['full_data'] = pd.DataFrame()
# Make a list of all the dates of the DataFrames just stored
# in the default dictionary
params['date_list'] = list(params['option_dict'].keys())
params, tables = cls._process_options(params=params, tables=tables)
return params, tables
@staticmethod
def _extracturls(params):
"""
Extract the URL for each of the listed option on Yahoo Finance
for the given ticker.
Parameters
----------
ticker : Str
Yahoo ticker (Reuters RIC) for the stock.
Returns
-------
Dict
Dictionary of dates and URLs.
"""
# Define the stock root webpage
url = 'https://finance.yahoo.com/quote/'+params['ticker']\
+'/options?p='+params['ticker']
# Create a UrlOpener object to extract data from the url
urlopener = UrlOpener()
response = urlopener.open(url)
# Collect the text from this object
params['html_doc'] = response.read()
# Use Beautiful Soup to parse this
soup = BeautifulSoup(params['html_doc'], features="lxml")
# Create a list of all the option dates
option_dates = [a.get_text() for a in soup.find_all('option')]
# Convert this list from string to datetimes
dates_list = [dt.datetime.strptime(date, "%B %d, %Y").date() for date
in option_dates]
# Convert back to strings in the required format
str_dates = [date_obj.strftime('%Y-%m-%d') for date_obj in dates_list]
# Create a list of all the unix dates used in the url for each
# of these dates
option_pages = [a.attrs['value'] for a in soup.find_all('option')]
# Combine the dates and unixdates in a dictionary
optodict = dict(zip(str_dates, option_pages))
# Create an empty dictionary
params['url_dict'] = {}
# For each date and unixdate in the first dictionary
for date_val, page in optodict.items():
# Create an entry with the date as key and the url plus
# unix date as value
params['url_dict'][date_val] = str(
'https://finance.yahoo.com/quote/'
+params['ticker']+'/options?date='+page)
return params
@staticmethod
def _extract_web_data(params):
raw_web_data = {}
# each url needs to have an option expiry date associated with
# it in the url dict
for input_date, url in params['url_dict'].items():
# UrlOpener function downloads the data
urlopener = UrlOpener()
weburl = urlopener.open(url)
raw_web_data[input_date] = weburl.read()
# wait between each query so as not to overload server
time.sleep(params['wait'])
return raw_web_data
@staticmethod
def _read_web_data(params):
# Create an empty dictionary
params['option_dict'] = {}
params['url_except_dict'] = {}
for input_date, url in params['url_dict'].items():
# if data exists
try:
# read html data into a DataFrame
option_frame = pd.read_html(params['raw_web_data'][input_date])
# Add this DataFrame to the default dictionary, named
# with the expiry date it refers to
params['option_dict'][input_date] = option_frame
# otherwise collect dictionary of exceptions
except ValueError:
params['url_except_dict'][input_date] = url
return params
@staticmethod
def _process_options(params, tables):
# Create list to store exceptions
params['opt_except_list'] = []
# For each of these dates
for input_date in params['date_list']:
try:
# The first entry is 'calls'
calls = params['option_dict'][input_date][0]
# Create a column designating these as calls
calls['Option Type'] = 'call'
try:
# The second entry is 'puts'
puts = params['option_dict'][input_date][1]
# Create a column designating these as puts
puts['Option Type'] = 'put'
# Concatenate these two DataFrames
options = pd.concat([calls, puts])
# Add an 'Expiry' column with the expiry date
options['Expiry'] = pd.to_datetime(input_date).date()
# Add this DataFrame to 'full_data'
tables['full_data'] = pd.concat(
[tables['full_data'], options])
except IndexError:
# Add an 'Expiry' column with the expiry date
calls['Expiry'] = pd.to_datetime(input_date).date()
# Add this DataFrame to 'full_data'
tables['full_data'] = pd.concat(
[tables['full_data'], calls])
except IndexError:
try:
# The second entry is 'puts'
puts = params['option_dict'][input_date][1]
# Create a column designating these as puts
puts['Option Type'] = 'put'
# Add an 'Expiry' column with the expiry date
puts['Expiry'] = pd.to_datetime(input_date).date()
# Add this DataFrame to 'full_data'
tables['full_data'] = pd.concat(
[tables['full_data'], puts])
except IndexError:
params['opt_except_list'].append(input_date)
return params, tables
@classmethod
def transform(cls, params, tables):
"""
Perform some filtering / transforming of the option data
Parameters
----------
start_date : Str
Date from when to include prices (some of the options
won't have traded for days / weeks and therefore will
have stale prices).
lastmins : Int, Optional
Restrict to trades within number of minutes since last
trade time recorded
mindays : Int, Optional
Restrict to options greater than certain option expiry
minopts : Int, Optional
Restrict to minimum number of options to include that
option expiry
volume : Int, Optional
Restrict to minimum Volume
openint : Int, Optional
Restrict to minimum Open Interest
monthlies : Bool
Restrict expiries to only 3rd Friday of the month. Default
is False.
Returns
-------
DataFrame
Creates a new DataFrame as a modification of 'full_data'.
"""
# Make a copy of 'full_data'
tables['data'] = copy.deepcopy(tables['full_data'])
# Set timezone
est = pytz.timezone('US/Eastern')
# Convert 'Last Trade Date' to a DateTime variable
tables['data']['Last Trade Date Raw'] = (
tables['data']['Last Trade Date'])
# Format date based on Eastern Daylight or Standard Time
try:
tables['data']['Last Trade Date'] = pd.to_datetime(
tables['data']['Last Trade Date'],
format='%Y-%m-%d %I:%M%p EDT')
except KeyError:
tables['data']['Last Trade Date'] = pd.to_datetime(
tables['data']['Last Trade Date'],
format='%Y-%m-%d %I:%M%p EST')
tables['data']['Last Trade Date'] = (
tables['data']['Last Trade Date'].apply(
lambda x: x.replace(tzinfo=est)))
# Create columns of expiry date as datetime object and str
tables['data']['Expiry_datetime'] = pd.to_datetime(
tables['data']['Expiry'], format='%Y-%m-%d')
tables['data']['Expiry_str'] = (
tables['data']['Expiry_datetime'].dt.strftime('%Y-%m-%d'))
# Filter data from start date
tables['data'] = (
tables['data'][tables['data']['Last Trade Date']>=str(
| pd.to_datetime(params['start_date']) | pandas.to_datetime |
import pandas as pd
import numpy as np
import multiprocessing as mp
from tqdm import tqdm
import h5py
import os
###########################################
def match_profile_coords():
# After applying profile mask, the masked df_profile should match the df_beads on both coordinates and seq.
amino_acids = pd.read_csv('amino_acids.csv')
vocab = {y.upper(): x for x, y in zip(amino_acids.AA, amino_acids.AA3C)}
# profile_dir = 'training_100_profile'
profile_dir = 'validation_profile'
bead_dir = 'proteinnet_beads'
pdb1 = pd.read_csv(f'{profile_dir}/flist.txt')['fname']
# pdb2 = pdb1.apply(lambda x: x.split('_')[0] + '_' + x.split('_')[2])
pdb2 = pdb1.apply(lambda x: x.split('_')[0][3:] + '_' + x.split('_')[2])
bad = []
good = []
for p1, p2 in tqdm(zip(pdb1, pdb2)):
p2_path = f'{bead_dir}/{p2}_bead.csv'
if not os.path.exists(p2_path):
continue
df1 = | pd.read_csv(f'{profile_dir}/{p1}') | pandas.read_csv |
import pandas as pd
pd.set_option('display.max_columns', 500)
pd.set_option('display.width', 1000)
data= | pd.read_excel(r"C:\Users\PIYUSH\Desktop\data\maindata.xlsx",sheet_name=None) | pandas.read_excel |
# Importing Libraries:
import pandas as pd
import numpy as np
import pickle
# for displaying all feature from dataset:
| pd.pandas.set_option('display.max_columns', None) | pandas.pandas.set_option |
import pandas as pd
import numpy as np
import time
import bs4
import string
import os
from bs4 import BeautifulSoup
from selenium import webdriver
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize
#-----GLOBAL-VARIABLES--------
# List of relevant tags
medium_tags_df = pd.read_csv('medium_tag_1000.csv')
tag_list = list(medium_tags_df['Tags'])
#-----MISC-FUNCTIONS----------
# Finding common elements of two lists
# def common_member(a, b):
# c = [value for value in a if value in b]
# if c != []:
# return c
# else:
# return 'None'
# Processing text (nltk) for removing stopwords
def text_processor(text):
stop_words = set(stopwords.words('english'))
word_tokens = word_tokenize(text)
filtered_sentence = [w for w in word_tokens if not w in stop_words]
filtered_sentence = []
for w in word_tokens:
if w not in stop_words:
filtered_sentence.append(w)
return filtered_sentence
#----SCRAPER------------------
# Scraper function
def scrapeURL(url):
# Getting Pages
#driver = webdriver.PhantomJS("/dependency/phantomjs-2.1.1-windows/bin/phantomjs.exe")
driver = webdriver.PhantomJS()#(os.getcwd() + "/dependency/phantomjs-2.1.1-linux-x86_64/bin/phantomjs.exe")
driver.get(url)
res = driver.execute_script("return document.documentElement.outerHTML")
driver.quit()
# Parse Page
soup = BeautifulSoup(res, 'lxml')
wok = True
try:
# Name
try:
name = soup.find('h1').getText()
except:
name = 'None'
# print(name)
# Tags
tags = []
_li = soup.findAll('ul')
li = []
for _l in _li:
for l in _l.findAll('li'):
li.append(l.getText())
for l in li:
if l in tag_list:
tags.append(l)
# Text
para = soup.findAll('p')
text = ''
for p in para:
text = text + ' ' + p.getText()
# text = text_processor(text)
claps = []
# Return each row data
eachDict = {'Name': name, 'Url': url, 'Text': text, 'Tags': tags}
except:
wok = False
if wok:
return eachDict
else:
return -1
#-----Iterative-MAIN----------------
# Can run multiple times | Saves after each scrape
# List of URLs to Scrape
tot_df = pd.read_csv('Export-Medium-Data-950.csv')
url_list = list(tot_df['Url'])
# Check if file exists
try:
done_df = pd.read_csv('article-database.csv')
done_url_list = list(done_df.Url)
isEx = True
except:
isEx = False
# If file exists then check for done URLs
if(isEx):
this_url_list = list(set(url_list)-set(done_url_list))
else:
this_url_list = url_list
this_run = len(this_url_list)
# how many left
print(this_run)
if this_run >= 1:
for i in range(0, this_run):
main_db = {'Name': [], 'Url': [], 'Text': [], 'Tags': []}
dataIns = scrapeURL(this_url_list[i])
if dataIns != -1:
main_db['Name'].append(dataIns['Name'])
main_db['Url'].append(dataIns['Url'])
main_db['Text'].append(dataIns['Text'])
main_db['Tags'].append(dataIns['Tags'])
if(isEx):
pd.DataFrame(data=main_db).to_csv('article-database.csv', mode='a', header=False)
else:
| pd.DataFrame(data=main_db) | pandas.DataFrame |
import codecademylib3_seaborn
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
# Import the CSV files and create the DataFrames:
user_data = pd.read_csv("user_data.csv")
pop_data = pd.read_csv("pop_data.csv")
# Paste print code here:
print(user_data.head(15))
# Paste merge code here:
new_df = pd.merge(user_data, pop_data)
# Paste location code here:
print(new_df.head(15))
new_df.loc[new_df.population_proper < 100000, "location"] = "rural"
new_df.loc[new_df.population_proper >= 100000, "location"] = "urban"
print(new_df.head(15))
import codecademylib3_seaborn
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
# Import CSVs:
user_data = | pd.read_csv("user_data.csv") | pandas.read_csv |
#Creo el dataset para la predicción del boosting
import gc
gc.collect()
import pandas as pd
import seaborn as sns
import numpy as np
#%% marzo
marzo = pd.read_csv(r'C:\Users\argomezja\Desktop\Data Science\MELI challenge\Project MELI\Dataset_limpios\marzo_limpio.csv.gz')
marzo = marzo.loc[marzo['day']>=4].reset_index(drop=True)
marzo['day']=marzo['day']-3
#Trabajo mejor el price
marzo = marzo.assign(current_price=marzo.groupby('currency').transform(lambda x: (x - x.min()) / (x.max()- x.min())))
subtest1 = marzo[['sku', 'day', 'sold_quantity']]
subtest1= subtest1.pivot_table(index = 'sku', columns= 'day', values = 'sold_quantity').add_prefix('sales')
subtest2 = marzo[['sku', 'day', 'current_price']]
subtest2= subtest2.pivot_table(index = 'sku', columns= 'day', values = 'current_price').add_prefix('price')
subtest3 = marzo[['sku', 'day', 'minutes_active']]
subtest3= subtest3.pivot_table(index = 'sku', columns= 'day', values = 'minutes_active').add_prefix('active_time')
subtest4 = marzo[['sku', 'day', 'listing_type']]
subtest4= subtest4.pivot_table(index = 'sku', columns= 'day', values = 'listing_type').add_prefix('listing_type')
subtest6 = marzo[['sku', 'day', 'shipping_logistic_type']]
subtest6= subtest6.pivot_table(index = 'sku', columns= 'day', values = 'shipping_logistic_type').add_prefix('shipping_logistic_type')
subtest7 = marzo[['sku', 'day', 'shipping_payment']]
subtest7= subtest7.pivot_table(index = 'sku', columns= 'day', values = 'shipping_payment').add_prefix('shipping_payment')
final = pd.merge(subtest1, subtest2, left_index=True, right_index=True )
final = pd.merge(final, subtest3, left_index=True, right_index=True)
final = pd.merge(final, subtest4, left_index=True, right_index=True)
final = pd.merge(final, subtest6, left_index=True, right_index=True)
final = pd.merge(final, subtest7, left_index=True, right_index=True)
del subtest1,subtest2,subtest3,subtest4,subtest6, subtest7
#%% Promedios cada 3 dias
marzo_test = marzo.sort_values(['sku','day']).reset_index(drop=True).copy()
marzo_test['promedio_3'] = marzo.groupby(['sku'])['sold_quantity'].rolling(3, min_periods=3).mean().reset_index(drop=True)
marzo_test['promedio_7'] = marzo.groupby(['sku'])['sold_quantity'].rolling(7, min_periods=7).mean().reset_index(drop=True)
marzo_test['promedio_15'] = marzo.groupby(['sku'])['sold_quantity'].rolling(15, min_periods=15).mean().reset_index(drop=True)
marzo_test['promedio_20'] = marzo.groupby(['sku'])['sold_quantity'].rolling(20, min_periods=20).mean().reset_index(drop=True)
# Pivoteo y mergeo
subtest3 = marzo_test[['sku', 'day', 'promedio_3']]
subtest3= subtest3.pivot_table(index = 'sku', columns= 'day', values = 'promedio_3', dropna=False).add_prefix('promedio_3')
subtest4 = marzo_test[['sku', 'day', 'promedio_7']]
subtest4= subtest4.pivot_table(index = 'sku', columns= 'day', values = 'promedio_7', dropna=False).add_prefix('promedio_7')
subtest6 = marzo_test[['sku', 'day', 'promedio_15']]
subtest6= subtest6.pivot_table(index = 'sku', columns= 'day', values = 'promedio_15', dropna=False).add_prefix('promedio_15')
subtest7 = marzo_test[['sku', 'day', 'promedio_20']]
subtest7= subtest7.pivot_table(index = 'sku', columns= 'day', values = 'promedio_20', dropna=False).add_prefix('promedio_20')
final = pd.merge(final, subtest3, left_index=True, right_index=True)
final = pd.merge(final, subtest4, left_index=True, right_index=True)
final = pd.merge(final, subtest6, left_index=True, right_index=True)
final = | pd.merge(final, subtest7, left_index=True, right_index=True) | pandas.merge |
from datetime import datetime
import numpy as np
import pandas as pd
import pygsheets
import json
with open('./config.json') as config:
creds = json.load(config)['google']
def convert_int(value):
value = str(value).lower()
value = value.replace('fewer than five', '0')
value = value.replace('fewer than 5', '0')
value = value.replace('<5', '0')
value = value.replace('approximately', '')
value = value.replace('approx.', '').strip()
value = value.replace(',', '').replace('.0', '').replace('.', '')
return int(value)
def split_high_low(df, col):
df = df.copy()
data = df[col].str.split(" to ", n=1, expand=True).fillna(0)
df[f'{col}_low'] = data[0].apply(convert_int)
df[f'{col}_high'] = data[1].apply(convert_int)
df[f'{col}_avg'] = (df[f'{col}_low'] + df[f'{col}_high'])/2
return df
def clean_facility_city(string):
if string == None:
return
string = string.replace(')','')
string = string.split('-')[0]
return string.strip()
def sync_sheets(df, sheet_name):
print(f'[status] syncing google sheet {sheet_name}')
# google sheets authentication
api = pygsheets.authorize(service_file=creds, retries=5)
wb = api.open('ri-covid-19')
# open the google spreadsheet
sheet = wb.worksheet_by_title(f'{sheet_name}')
sheet.set_dataframe(df, (1,1))
def clean_general(fname):
print('[status] cleaning statwide general info')
df = pd.read_csv(f'./data/raw/{fname}.csv', parse_dates=['date'])
# remove total causing errors
df = df[df['metric'] != 'Cumulative people who tested positive (counts first positive lab per person) plus cumulative negative tests (may count people more than once)']
# re name metrics to shorten them
df.loc[(df['metric'].str.contains('positive')) & (df['date'] < '2020-07-13'), 'metric'] = 'RI positive cases'
df.loc[(df['metric'].str.contains('negative')) & (df['date'] < '2020-07-13'), 'metric'] = 'RI negative results'
df.loc[(df['metric'].str.contains('self-quarantine')) & (df['date'] < '2020-07-13'), 'metric'] = 'instructed to self-quarantine'
df.loc[(df['metric'].str.contains('hospitalized')) & (df['date'] < '2020-07-13'), 'metric'] = 'currently hospitalized'
df.loc[(df['metric'].str.contains('die')) & (df['date'] < '2020-07-13'), 'metric'] = 'total deaths'
df.loc[(df['metric'].str.contains('fatalities')) & (df['date'] < '2020-07-13'), 'metric'] = 'total deaths'
df.loc[(df['metric'].str.contains('ventilators')) & (df['date'] < '2020-07-13'), 'metric'] = 'currently on ventilator'
df.loc[(df['metric'].str.contains('on a vent')) & (df['date'] < '2020-07-13'), 'metric'] = 'currently on ventilator'
df.loc[(df['metric'].str.contains('intensive care')) & (df['date'] < '2020-07-13'), 'metric'] = 'currently in icu'
df.loc[(df['metric'].str.contains('discharged')) & (df['date'] < '2020-07-13'), 'metric'] = 'total discharged'
df.loc[df['metric'].str.contains('Cumulative people who tested positive '), 'metric'] = 'people positive'
df.loc[df['metric'].str.contains('Cumulative people tested '), 'metric'] = 'people tested'
df.loc[df['metric'].str.contains('New people who tested positive'), 'metric'] = 'new positive'
df.loc[df['metric'].str.contains('Cumlative people who tested positive'), 'metric'] = 'RI positive cases'
df.loc[df['metric'].str.contains('Cumlative people who have only tested negative'), 'metric'] = 'RI negative results'
df.loc[df['metric'].str.contains('Currently hospitalized'), 'metric'] = 'currently hospitalized'
df.loc[df['metric'].str.contains('Currently in ICU'), 'metric'] = 'currently in icu'
df.loc[df['metric'].str.contains('Currently vented'), 'metric'] = 'currently on ventilator'
df.loc[df['metric'].str.contains('Total deaths'), 'metric'] = 'total deaths'
# convert types count -> int, date -> datetime str
df['count'] = df['count'].apply(convert_int)
# pivot to get total tests given out then un-pivot
df = df.pivot_table(index='date', columns='metric', values='count').reset_index()
df['RI total tests'] = df['RI positive cases'] + df['RI negative results']
df = df.melt(col_level=0, id_vars=['date'], value_name='count').sort_values(by=['date', 'metric'])
# get daily changes
df['count'] = df['count'].fillna(0)
df['new_cases'] = df.groupby('metric')['count'].diff().fillna(0).astype(int)
df['change_%'] = df.groupby('metric')['count'].pct_change().replace(np.inf, 0).fillna(0)
# add date format
df['date'] = pd.to_datetime(df['date']).dt.strftime('%m/%d/%Y')
# save & sync to google sheets
df.to_csv('./data/clean/ri-covid-19-clean.csv', index=False)
sync_sheets(df, 'statewide')
def clean_geographic(fname):
print('[status] cleaning city/town info')
df = | pd.read_csv(f'./data/raw/{fname}.csv') | pandas.read_csv |
# -*- coding: utf-8 -*-
"""
Created on Sat Sep 21 23:30:41 2019
@author:
using to select event from semantic lines and visiuize LDA to check consistency
"""
import os
#import sys
import argparse
import json
import numpy as np
from LDA import lda_model, corp_dict
#import random as rd
#from gensim.models import CoherenceModel
import gensim
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib
from sklearn import preprocessing
#from datetime import Datetime
import datetime
import matplotlib as mpl
if __name__=='__main__':
print('begin')
parser = argparse.ArgumentParser()
parser.add_argument("-k","--k",type = int,default = 8)#topic number
parser.add_argument('--tfidf', dest='tf_idf', action='store_true')
parser.add_argument('--no-tfidf', dest='tf_idf', action='store_false')
parser.set_defaults(tf_idf=True)
parser.add_argument("-st_time",'--start_time',type = str,default = None)
parser.add_argument('-ed_time','--end_time',type = str,default = None)
parser.add_argument('-granu','--granularity',type = str,default = 'day')
parser.add_argument("-cksz","--chunksize",type = int,default = 32)
parser.add_argument("-ps","--passes",type = int,default = 10)
parser.add_argument("-ite","--iteration",type = int,default = 5)
parser.add_argument("-db","--dictionary_below",type = int,default = 15)
parser.add_argument("-da","--dictionary_above",type = float,default = 0.9)
parser.add_argument("-al","--alpha",type = str,default = 'asymmetric')
parser.add_argument("-dc","--decay",type = float,default = 0.5)
args = parser.parse_args()
# def process_data(path = 'test.json'):
# inp = open(path,'rb')
# data = json.load(inp)
# data = pd.DataFrame(data)
# data = data.fillna('') #na request
# inp.close()
# data['time'] = pd.to_datetime(data.time.values)
# #sort time 1st
# data = data.sort_index(by = 'time',ascending = True)
# data = data.drop_duplicates(subset=['passage'], keep=False)
inp = open('test.json','rb')
data = json.load(inp)
data = pd.DataFrame(data)
print(data.head())
data['time'] = pd.to_datetime(data.time.values)
#get data prepared and LDA model ready
labels = data['label'].values
passages = data['passage'].values
headlines = data['headline'].values
str_time = data['time'].values
#get semantic-scaled
#change neg's sign
semantic_value = data['semantic_value'].values
semantic_value = np.array([np.array(x) for x in semantic_value])
semantic_arr = semantic_value.max(1) #get semantic value ready
neg_idx = np.where(labels==0)#0 represent neg, 1 represent pos
pos_idx = np.where(labels==1)
semantic_arr[neg_idx] = -semantic_arr[neg_idx]#get full representative semantics
data['semantic_arr'] = semantic_arr
#scale
#scale the data so the plot be more obvious / 分别对pos和neg部分scale,拉开差距
neg_semantic = semantic_arr[neg_idx].reshape(-1, 1)
pos_semantic = semantic_arr[pos_idx].reshape(-1,1)
pos_scaler = preprocessing.StandardScaler().fit(pos_semantic)
neg_scaler = preprocessing.StandardScaler().fit(neg_semantic)
pos_semantic = pos_scaler.transform(pos_semantic)
pos_semantic = np.array([float(x) for x in pos_semantic])
neg_semantic = neg_scaler.transform(neg_semantic)
neg_semantic = np.array([float(x) for x in neg_semantic])
scale_semantic = np.zeros(len(semantic_arr))
scale_semantic[neg_idx] = neg_semantic
scale_semantic[pos_idx] = pos_semantic
data['scale_semantic'] = scale_semantic
str_time = data['time'].values
str_time = [str(x).split('.')[0] for x in str_time]
#print(str_time[:100])
myFormat = "%Y-%m-%d %H:%M"
datetime_arr = [datetime.datetime.strptime(x, '%Y-%m-%dT%H:%M:%S').strftime(myFormat) for x in str_time]
datetime_arr = [datetime.datetime.strptime(x,myFormat) for x in datetime_arr] #change to datetime obj
all_time_arr = pd.to_datetime(datetime_arr)
#set granu
gran = args.granularity
if gran=='day':
time_index = [x.replace(hour = 0,minute = 0) for x in datetime_arr]#according to granularity choose the suitable time index
time_index = pd.to_datetime(time_index)
elif gran=='hour':
time_index = [x.replace(minute = 0) for x in datetime_arr]#according to granularity choose the suitable time index
time_index = | pd.to_datetime(time_index) | pandas.to_datetime |
import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
def object_creation():
s = pd.Series([1, np.nan])
dates = pd.date_range('20130101', periods=2)
df = pd.DataFrame(np.random.randn(2, 3), index=dates, columns=list('ABC'))
df2 = pd.DataFrame({'A': pd.Timestamp('20130102'),
'B': pd.Series(1, index=list(range(2)), dtype='float32'),
'C': np.array([3] * 2, dtype='int32'),
'D': pd.Categorical(['test', 'train'])})
print(df2.dtypes)
return df
def viewing_data():
print(df.head())
print(df.tail(1))
print(df.index)
print(df.columns)
# DataFrame.to_numpy() can be an expensive operation when df has columns with different data types
print(df.to_numpy())
print(df.describe())
print(df.T)
print(df.sort_index(axis=1, ascending=False))
print(df.sort_values(by='B'))
def selection():
# Getting
print(df['A']) # Selecting a single column. Equivalent to df.A.# selecting via [], which slices the rows
print(df[:2]) # Selecting via [], which slices the rows
print(df[:'20130102'])
# Selection by label
print(df.loc['20130101'])
print(df.loc[:, ['A', 'B']])
# Selection by position
print(df.iloc[1])
print(df.iloc[:1, 1:2])
print(df.iloc[[0, 1], [0, 2]])
print(df.iat[1, 1]) # For getting fast access to a scalar
# Boolean indexing
print(df[df['A'] > 0])
print(df[df > 0])
df2 = df.copy()
df2['D'] = ['one', 'two']
print(df2[df2['D'].isin(['two'])])
# Setting
df.at['20130101', 'A'] = 0
df.iat[0, 1] = 0
df.loc[:, 'C'] = np.array([5] * len(df))
print(df)
df2 = df.copy()
df2[df2 > 0] = -df2
print(df2)
def missing_data():
# pandas uses np.nan to represent missing data
df1 = df.reindex(index=df.index[:2], columns=list(df.columns) + ['D'])
df1.loc[:df.index[0], 'D'] = 1
print(df1)
print(df1.dropna(how='any'))
print(df1.fillna(value=5))
print(pd.isna(df1))
def operations():
print(df.mean()) # operations in general exclude missing data
print(df.mean(1)) # same operation on the other axis
s = pd.Series([1, np.nan], index=df.index).shift(1)
print(df)
print(df.sub(s, axis='index'))
print(df.apply(np.cumsum))
print(df.apply(lambda x: x.max() - x.min()))
print(pd.Series(np.random.randint(0, 7, size=10)).value_counts()) # histogramming
print(pd.Series(['Aaba', np.nan]).str.lower())
def merge():
# Adding a column to a DataFrame is relatively fast. However, adding a row requires a copy, and may be expensive.
print(pd.concat([df[:1], df[1:]]))
left = pd.DataFrame({'key': ['foo', 'foo'], 'lval': [1, 2]})
right = pd.DataFrame({'key': ['foo', 'foo'], 'rval': [4, 5]})
print(pd.merge(left, right, on='key')) # join, SQL style merges
def grouping():
df0 = pd.DataFrame({'A': ['foo', 'bar', 'foo', 'bar', 'foo', 'bar', 'foo', 'foo'],
'B': ['one', 'one', 'two', 'three', 'two', 'two', 'one', 'three'],
'C': np.random.randn(8),
'D': np.random.randn(8)})
print(df0.groupby(['A', 'B']).sum())
def reshaping():
tuples = list(zip(*[['bar', 'bar', 'baz', 'baz'], ['one', 'two', 'one', 'two']]))
index = pd.MultiIndex.from_tuples(tuples, names=['first', 'second'])
df0 = pd.DataFrame(np.random.randn(4, 2), index=index, columns=['A', 'B'])
print(df0)
stacked = df0.stack()
print(stacked)
print(stacked.unstack()) # by default unstacks the last level
print(stacked.unstack(0))
df0 = pd.DataFrame({'A': ['one', 'one', 'two', 'three'] * 3,
'B': ['A', 'B', 'C'] * 4,
'C': ['foo', 'foo', 'foo', 'bar', 'bar', 'bar'] * 2,
'D': np.random.randn(12),
'E': np.random.randn(12)})
print(pd.pivot_table(df0, values='D', index=['A', 'B'], columns=['C']))
def categoricals():
df0 = pd.DataFrame({"id": [1, 2, 3, 4, 5, 6], "raw_grade": ['a', 'b', 'b', 'a', 'a', 'e']})
df0["grade"] = df0["raw_grade"].astype("category") # convert to a categorical data type
print(df0["grade"])
df0["grade"].cat.categories = ["very good", "good", "very bad"] # rename the categories
print(df0["grade"])
# Reorder the categories and simultaneously add the missing categories
df0["grade"] = df0["grade"].cat.set_categories(["very bad", "bad", "medium", "good", "very good"])
print(df0["grade"])
# Sorting is per order in the categories, not lexical order
print(df0.sort_values(by="grade"))
def plotting():
index = pd.date_range('1/1/2000', periods=1000)
df0 = pd.DataFrame(np.random.randn(1000, 4), index=index, columns=['A', 'B', 'C', 'D'])
df0 = df0.cumsum()
df0.plot()
plt.legend(loc='best')
# plt.show()
def getting_data_in_out():
if not os.path.exists('tmp'):
os.mkdir('tmp')
df.to_csv('tmp/foo.csv')
print( | pd.read_csv('tmp/foo.csv') | pandas.read_csv |
import pandas as pd
import os
from configparser import ConfigParser, NoOptionError, NoSectionError
from datetime import datetime
import statistics
import numpy as np
import glob
from simba.drop_bp_cords import *
from simba.rw_dfs import *
def time_bins_movement(configini,binLength):
dateTime = datetime.now().strftime('%Y%m%d%H%M%S')
config = ConfigParser()
configFile = str(configini)
config.read(configFile)
projectPath = config.get('General settings', 'project_path')
csv_dir_in = os.path.join(projectPath, 'csv', 'outlier_corrected_movement_location')
vidLogFilePath = os.path.join(projectPath, 'logs', 'video_info.csv')
vidinfDf = pd.read_csv(vidLogFilePath)
vidinfDf["Video"] = vidinfDf["Video"].astype(str)
try:
wfileType = config.get('General settings', 'workflow_file_type')
except NoOptionError:
wfileType = 'csv'
noAnimals = config.getint('General settings', 'animal_no')
columnHeaders, shifted_columnHeaders, logList = [], [], []
logDf = pd.DataFrame(columns=['Videos_omitted_from_time_bin_analysis'])
for animal in range(noAnimals):
animalBp = config.get('process movements', 'animal_' + str(animal + 1) + '_bp')
columnHeaders.append([animalBp + '_x', animalBp + '_y'])
shifted_columnHeaders.append([animalBp + '_x_shifted', animalBp + '_y_shifted'])
columnHeaders_flat = [item for sublist in columnHeaders for item in sublist]
shifetcolheaders_flat = [item for sublist in shifted_columnHeaders for item in sublist]
VideoNo_list, columnNames1, fileCounter = [], [], 0
try:
multiAnimalIDList = config.get('Multi animal IDs', 'id_list')
multiAnimalIDList = multiAnimalIDList.split(",")
if multiAnimalIDList[0] != '':
multiAnimalStatus = True
print('Applying settings for multi-animal tracking...')
else:
multiAnimalStatus = False
multiAnimalIDList = []
for animal in range(noAnimals):
multiAnimalIDList.append('Animal ' + str(animal + 1) + ' ')
print('Applying settings for classical tracking...')
except NoSectionError:
multiAnimalIDList = []
for animal in range(noAnimals):
multiAnimalIDList.append('Animal ' + str(animal + 1) + ' ')
multiAnimalStatus = False
print('Applying settings for classical tracking...')
########### FIND CSV FILES ###########
filesFound = glob.glob(csv_dir_in + '/*.' + wfileType)
print('Processing movement data for ' + str(len(filesFound)) + ' files...')
for currentFile in filesFound:
mov_and_vel_headers, distanceHeaders = [], []
frameCounter, readHeadersList, finalList, concatList, finalList, distanceCols = 0, [], [], [], [], []
currVideoName = os.path.basename(currentFile).replace('.' + wfileType, '')
videoSettings = vidinfDf.loc[vidinfDf['Video'] == currVideoName]
try:
fps = int(videoSettings['fps'])
currPixPerMM = float(videoSettings['pixels/mm'])
except TypeError:
print('Error: make sure all the videos that are going to be analyzed are represented in the project_folder/logs/video_info.csv file')
csv_df = read_df(currentFile, wfileType)
try:
csv_df = csv_df.set_index('scorer')
except KeyError:
pass
colHeaders = getBpHeaders(configini)
csv_df.columns = colHeaders
csv_df = csv_df[columnHeaders_flat]
csv_df_shifted = csv_df.shift(-1, axis=0)
csv_df_shifted.columns = shifetcolheaders_flat
csv_df = | pd.concat([csv_df, csv_df_shifted], axis=1) | pandas.concat |
import numpy as np
import pandas as pd
url = 'Reordered Linescan_nro 31 label JORDAN_234_P1_201901271901_MGA94_55.csv'
dfdos = | pd.read_csv(url) | pandas.read_csv |
import pandas as pd
#import arrow
def fips_glue(row):
x = int(str(row['STATE']) + str(row['COUNTY']).zfill(3))
return x
def pop_mortality(row):
x = float("{:.4f}".format(row['Deaths'] / row['POPESTIMATE2019']* 100))
return x
def case_mortality(row):
if row['Confirmed'] == 0:
return 0
x = float("{:.4f}".format(row['Deaths'] / row['Confirmed']* 100))
return x
def pop_confirmed(row):
x = float("{:.4f}".format(row['Confirmed'] / row['POPESTIMATE2019']* 100))
return x
def x_in_y(row):
if row['Confirmed'] == 0:
return 0
x = int(row['POPESTIMATE2019'] / row['Confirmed'])
return x
def case_per_1k(row):
if row['Confirmed'] == 0:
return 0
x = float("{:.2f}".format(row['Confirmed'] / (row['POPESTIMATE2019'] / 1000)))
return x
def death_per_1k(row):
if row['Confirmed'] == 0:
return 0
x = float("{:.2f}".format(row['Deaths'] / (row['POPESTIMATE2019'] / 1000)))
return x
def covid_daily_report(d):
cvd_data = pd.read_csv("../COVID-19/csse_covid_19_data/csse_covid_19_daily_reports/{0}.csv".format(d))
cvd_df = pd.DataFrame(cvd_data, columns = ['FIPS', 'Admin2', 'Province_State', 'Confirmed', 'Deaths'])
dem_data = pd.read_csv("./data/CO-EST2019-alldata.csv")
dem_df = pd.DataFrame(dem_data, columns = [ 'STATE', 'COUNTY', 'POPESTIMATE2019','DEATHS2019'])
dem_df['FIPS'] = dem_df.apply(fips_glue, axis = 1)
df3 = | pd.merge(dem_df,cvd_df, on='FIPS') | pandas.merge |
import pandas as pd
import numpy as np
svy18 = | pd.read_csv('Survey_2018.csv') | pandas.read_csv |
from collections import Counter
import pandas as pd
import networkx as nx
from biometrics.utils import get_logger
logger = get_logger()
class Cluster:
def __init__(self, discordance_threshold=0.05):
self.discordance_threshold = discordance_threshold
def cluster(self, comparisons):
assert comparisons is not None, "There is no fingerprint comparison data available."
if len(comparisons) < 1:
logger.warning('There are not enough comparisons to cluster.')
return None
sample2group = dict(zip(
comparisons['ReferenceSample'], comparisons['ReferenceSampleGroup']))
sample2group.update(dict(zip(
comparisons['QuerySample'], comparisons['QuerySampleGroup'])))
comparisons['is_same_group'] = comparisons['DiscordanceRate'].map(
lambda x: 1 if ((x <= self.discordance_threshold) & (~ | pd.isna(x) | pandas.isna |
import json
import requests
import pandas as pd
import websocket
# Get Alpaca API Credential
endpoint = "https://data.alpaca.markets/v2"
headers = json.loads(open("key.txt", 'r').read())
def hist_data(symbols, start="2021-01-01", timeframe="1Hour", limit=50, end=""):
"""
returns historical bar data for a string of symbols separated by comma
symbols should be in a string format separated by comma e.g. symbols = "MSFT,AMZN,GOOG"
"""
df_data_tickers = {}
for symbol in symbols:
bar_url = endpoint + "/stocks/{}/bars".format(symbol)
params = {"start":start, "limit" :limit, "timeframe":timeframe}
data = {"bars": [], "next_page_token":'', "symbol":symbol}
while True:
r = requests.get(bar_url, headers = headers, params = params)
r = r.json()
if r["next_page_token"] == None:
data["bars"]+=r["bars"]
break
else:
params["page_token"] = r["next_page_token"]
data["bars"]+=r["bars"]
data["next_page_token"] = r["next_page_token"]
df_data = | pd.DataFrame(data["bars"]) | pandas.DataFrame |
import datetime
try:
import pandas as pd
from pandas.testing import assert_index_equal
except ImportError:
pd = None
import numpy as np
import bsonnumpy
from test import client_context, unittest
def to_dataframe(seq, dtype, n):
data = bsonnumpy.sequence_to_ndarray(seq, dtype, n)
if '_id' in dtype.fields:
return pd.DataFrame(data, index=data['_id'])
else:
return | pd.DataFrame(data) | pandas.DataFrame |
#!/usr/bin/env python
# -*- coding:utf-8 -*-
import os
import joblib
import pandas as pd
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.feature_selection import mutual_info_classif
from dl_omics import create_l1000_df
from utils import create_umap_df, scatter_plot
matplotlib.use('Agg')
sns.set(style='whitegrid')
sns.set_context('paper', font_scale=1.3)
current_file_path = os.path.dirname(os.path.realpath(__file__))
base_dir = os.path.join(current_file_path, os.path.pardir)
def save_figure(image_path, fig=None):
if fig is None:
fig = plt.gcf()
fig.savefig(image_path, dpi=300, bbox_inches='tight', pad_inches=0.01,
pil_kwargs={'compression': 'tiff_lzw'})
plt.close()
def create_grid_search_plots(grid_search_file, image_folder):
search = joblib.load(grid_search_file)
search_result_df = pd.DataFrame(search.cv_results_)
param_name = 'param_selector__k'
param_display_name = 'k'
# find configuration with best test score for each k
best_score_per_k_index = search_result_df.groupby(param_name)['mean_test_score']\
.idxmax()
search_result_df = search_result_df.loc[best_score_per_k_index, :]
# convert results to long format
param_names = ['param_scaler', 'param_selector__k', 'param_svc__C']
train_split_names = [c for c in search_result_df.columns if
c.startswith('split') and c.endswith('train_score')]
test_split_names = [c for c in search_result_df.columns if
c.startswith('split') and c.endswith('test_score')]
data = []
for index, row in search_result_df.iterrows():
param_values = row[param_names].tolist()
train_scores = row[train_split_names].tolist()
test_scores = row[test_split_names].tolist()
for train_score in train_scores:
data.append(param_values + ['train', train_score, row.mean_train_score, index])
for test_score in test_scores:
data.append(param_values + ['test', test_score, row.mean_test_score, index])
plot_data = pd.DataFrame(
data, columns=['scaler', 'k', 'C', 'split', 'MCC', 'mean', 'index'])
plot_data['scaler'] = plot_data['scaler'].astype(str)
plot_data = plot_data.rename(columns={'split': 'Split'})
fig, ax = plt.subplots(figsize=(9, 4))
sns.lineplot(
data=plot_data,
x=param_display_name, y='MCC', hue='Split', hue_order=['train', 'test'], ax=ax
)
x_ticks = sorted(plot_data[param_display_name].unique())
x_ticks = x_ticks[::2]
ax.set_xticks(x_ticks)
x = search.best_params_[param_name.replace('param_', '')]
y = search.best_score_
ax.plot(x, y, '*k', markersize=15, zorder=-1, alpha=0.8,
color=ax.lines[1].get_color())
ax.set_xlim(plot_data[param_display_name].min(), plot_data[param_display_name].max())
ax.set_xlabel('Number of features')
ax.set_ylabel('Model performance (MCC)')
image_path = os.path.join(image_folder, f'figure03_grid_search_{param_display_name}.tiff')
save_figure(image_path)
def create_data_plots(image_folder):
df, genes, meta_columns = create_l1000_df()
target_name = 'DILI'
df[target_name] = df['dili'].replace({0: 'negative', 1: 'positive'})
reduced_df = create_umap_df(df, features=genes, densmap=True)
fig, ax = plt.subplots(figsize=(9, 6))
scatter_plot(reduced_df, hue='DILI', alpha=0.8, s=70, ax=ax)
image_path = os.path.join(image_folder, 'figure01_umap_DILI.tiff')
save_figure(image_path)
fig, (ax1, ax2) = plt.subplots(nrows=1, ncols=2, figsize=(9, 3))
corr_df = df[genes].corr(method='spearman').abs()
np.fill_diagonal(corr_df.values, np.nan)
sns.histplot(corr_df.max(axis=1), bins=20, ax=ax1)
ax1.set_xlabel('Maximum absolute Spearman\'s\ncorrelation between features')
mi = mutual_info_classif(df[genes], df['dili'])
sns.histplot(mi, bins=10, ax=ax2)
ax2.set_xlabel('Mutual information with DILI class')
ax2.set_ylabel(None)
image_path = os.path.join(image_folder, 'figure02_corr_mi.tiff')
save_figure(image_path)
def create_coeff_boxplot(image_folder):
df, genes, meta_columns = create_l1000_df()
models_file = os.path.join(base_dir, 'models.pkl')
models = joblib.load(models_file)
coefficient_list = []
for model in models:
selected_features = model.named_steps['selector'].get_support()
selected_features = np.array(genes)[selected_features]
coefficients = np.abs(model.named_steps['svc'].coef_)[0]
coefficient_list.append(
| pd.DataFrame({'Gene': selected_features, 'Coefficient': coefficients}) | pandas.DataFrame |
# -*- coding: utf-8 -*-
#
# Copyright (c) 2018 Leland Stanford Junior University
# Copyright (c) 2018 The Regents of the University of California
#
# This file is part of the SimCenter Backend Applications
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# You should have received a copy of the BSD 3-Clause License along with
# this file. If not, see <http://www.opensource.org/licenses/>.
#
# Contributors:
# <NAME>
#
import os
import subprocess
import json
import random
import numpy as np
import pandas as pd
from FetchOpenSHA import *
def create_earthquake_scenarios(scenario_info, stations):
# Number of scenarios
source_num = scenario_info.get('Number', 1)
if source_num == 'All':
# Large number to consider all sources in the ERF
source_num = 10000000
# Directly defining earthquake ruptures
if scenario_info['Generator'] == 'Simulation':
# TODO:
print('Physics-based earthquake simulation is under development.')
return 1
# Searching earthquake ruptures that fulfill the request
elif scenario_info['Generator'] == 'Selection':
# Collecting all possible earthquake scenarios
lat = []
lon = []
for s in stations['Stations']:
lat.append(s['Latitude'])
lon.append(s['Longitude'])
# Reference location
lat = np.mean(lat)
lon = np.mean(lon)
ref_station = [lat, lon]
# Getting earthquake rupture forecast data
source_type = scenario_info['EqRupture']['Type']
if source_type == 'ERF':
source_model = scenario_info['EqRupture']['Model']
source_name = scenario_info['EqRupture'].get('Name', None)
min_M = scenario_info['EqRupture'].get('min_Mag', 5.0)
max_M = scenario_info['EqRupture'].get('max_Mag', 9.0)
max_R = scenario_info['EqRupture'].get('max_Dist', 1000.0)
eq_source = getERF(source_model, True)
erf_data = export_to_json(eq_source, ref_station, outfile = None, \
EqName = source_name, minMag = min_M, \
maxMag = max_M, maxDistance = max_R, \
maxSources = np.max([500, source_num]))
# Parsing data
feat = erf_data['features']
tag = []
for i, cur_f in enumerate(feat):
if source_name and (source_name not in cur_f['properties']['Name']):
continue
if min_M > cur_f['properties']['Magnitude']:
continue
tag.append(i)
# Abstracting desired ruptures
s_tag = random.sample(tag, min(source_num, len(tag)))
erf_data['features'] = list(feat[i] for i in s_tag)
scenario_data = dict()
for i, rup in enumerate(erf_data['features']):
scenario_data.update({i: {
'Type': source_type,
'RuptureForecast': source_model,
'SourceIndex': rup['properties']['Source'],
'RuptureIndex': rup['properties']['Rupture']
}})
# Cleaning tmp outputs
del erf_data
elif source_type == 'PointSource':
scenario_data = dict()
try:
magnitude = scenario_info['EqRupture']['Magnitude']
location = scenario_info['EqRupture']['Location']
average_rake = scenario_info['EqRupture']['AverageRake']
average_dip = scenario_info['EqRupture']['AverageDip']
scenario_data.update({0: {
'Type': source_type,
'Magnitude': magnitude,
'Location': location,
'AverageRake': average_rake,
'AverageDip': average_dip
}})
except:
print('Please check point-source inputs.')
# return
return scenario_data
def create_wind_scenarios(scenario_info, stations, data_dir):
# Number of scenarios
source_num = scenario_info.get('Number', 1)
# Directly defining earthquake ruptures
if scenario_info['Generator'] == 'Simulation':
# Collecting site locations
lat = []
lon = []
for s in stations['Stations']:
lat.append(s['Latitude'])
lon.append(s['Longitude'])
# Save Stations.csv
df = pd.DataFrame({
'lat': lat,
'lon': lon
})
df.to_csv(data_dir + 'Stations.csv', index = False, header = False)
# Save Lat_w.csv
lat_w = np.linspace(min(lat) - 0.5, max(lat) + 0.5, 100)
df = pd.DataFrame({'lat_w': lat_w})
df.to_csv(data_dir + 'Lat_w.csv', index = False, header = False)
# Parsing Terrain info
df = pd.read_csv(data_dir + scenario_info['Terrain']['Longitude'],
header = None, index_col = None)
df.to_csv(data_dir + 'Long_wr.csv', header = False, index = False)
df = pd.read_csv(data_dir + scenario_info['Terrain']['Latitude'],
header = None, index_col = None)
df.to_csv(data_dir + 'Lat_wr.csv', header = False, index = False)
df = pd.read_csv(data_dir + scenario_info['Terrain']['Size'],
header = None, index_col = None)
df.to_csv(data_dir + 'wr_sizes.csv', header = False, index = False)
df = pd.read_csv(data_dir + scenario_info['Terrain']['z0'],
header = None, index_col = None)
df.to_csv(data_dir + 'z0r.csv', header = False, index = False)
# Parsing storm properties
param = []
param.append(scenario_info['Storm']['Landfall']['Latitude'])
param.append(scenario_info['Storm']['Landfall']['Longitude'])
param.append(scenario_info['Storm']['LandingAngle'])
param.append(scenario_info['Storm']['Pressure'])
param.append(scenario_info['Storm']['Speed'])
param.append(scenario_info['Storm']['Radius'])
df = pd.DataFrame({'param': param})
df.to_csv(data_dir + 'param.csv', index = False, header = False)
df = pd.read_csv(data_dir + scenario_info['Storm']['Track'],
header = None, index_col = None)
df.to_csv(data_dir + 'Track.csv', header = False, index = False)
# Saving del_par.csv
del_par = [0, 0, 0] # default
df = | pd.DataFrame({'del_par': del_par}) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Created on Mon Mar 25 16:42:21 2019
@author: owen.henry
"""
from __future__ import division
#Pyodbc is used to connect to various databases
from pyodbc import connect
#CespanarVariables is my own script to track variables like database names and
#drivers between scripts. For general use this needs to be converted to something
#far more secure, but works for the moment
import Cespanar_variables as cv
#Pandas combines the data and runs analysis
import pandas
import datetime as dt
import sys
import matplotlib.pyplot as plt
import jinja2
#Initialize variables for time periods, as these will be used globally
this_period_start = ''
this_period_end = ''
last_period_start = ''
last_period_end = ''
today = dt.date.today()
yesterday = dt.date.today() + dt.timedelta(-1)
day = today.day
month = today.month
year = today.year
current_quarter = ''
#This tracks various values per table/piece of market source data
#This is called frequently in loops
ms_values_dict = {'audiences': ['audlong','audShort','audiences'],
'campaigns' : ['camplong', 'campShort', 'campaigns'],
'creatives' : ['creativelong', 'creativeShort', 'creatives'],
'fiscal_year' : ['fylong', 'fyShort', 'fiscal_years'],
'media' : ['mediumLong', 'mediumShort','media'],
'platforms' : ['platLong','platShort','platforms'],
'programs' : ['progLong', 'progShort','programs']
}
form_and_revenue_query = '''
SELECT COF.ContactsOnlineFormID,
COF.VanID,
COF.OnlineFormID,
CONVERT(date, COF.DateCreated) as DateCreated,
IsNewContact,
BatchEmailJobDistributionID,
MarketSource,
Amount,
LOWER(SUBSTRING(MarketSource, 1, 3)) as 'progShort',
LOWER(SUBSTRING(MarketSource, 4, 3)) as 'platShort',
LOWER(SUBSTRING(MarketSource, 7, 2)) as 'mediumShort',
LOWER(SUBSTRING(MarketSource, 9, 2)) as 'fyShort',
LOWER(SUBSTRING(MarketSource, 11, 3)) as 'campShort',
LOWER(SUBSTRING(MarketSource, 14, 2)) as 'audShort',
LOWER(SUBSTRING(MarketSource, 16, 3)) as 'creativeShort',
LOWER(SUBSTRING(MarketSource, 19, 2)) as 'Iteration'
FROM [dbo].[CRS_ContactsOnlineForms] COF
LEFT JOIN [dbo].[CRS_ContactsContributions] CC ON COF.ContactsOnlineFormID = CC.ContactsOnlineFormID
LEFT JOIN [dbo].[CRS_ContactsContributionsCodes] CCC ON CC.ContactsContributionID = CCC.COntactsContributionID
LEFT JOIN [dbo].[CRS_Codes] C on C.CodeID = CCC.CodeID
'''
ms_where_clause = '''
MarketSource IS NOT NULL
AND LEN(MarketSource) = 20
'''
email_query = '''
SELECT A.EmailMessageID,
EmailMessageDistributionID,
DateSent,
DateScheduled,
EmailMessageName,
EmailSubject
FROM CRS_EmailMessageDistributions A
INNER JOIN CRS_EmailMessages B ON A.EmailMessageID = B.EmailMessageID
WHERE A.EmailMessageID NOT IN (
884,
885
)
'''
def set_time_period(period, verbose = False):
try:
global this_period_start
global this_period_end
global last_period_start
global last_period_end
global current_quarter
if period == 'week':
this_period_start = today - dt.timedelta(dt.date.weekday(today))
this_period_end = this_period_start + dt.timedelta(6)
last_period_start = this_period_start - dt.timedelta(7)
last_period_end = this_period_end - dt.timedelta(7)
if verbose == True:
print("This week starts: %s"%this_period_start)
print("This week ends: %s"%this_period_end)
print("Last week started: %s"%last_period_start )
print("Last week ended: %s" %last_period_end )
if period == 'month':
next_month = today.replace(day=28) + dt.timedelta(days=4)
this_period_start = dt.date(year, month, 1)
this_period_end = next_month - dt.timedelta(next_month.day)
last_period_end = this_period_start - dt.timedelta(1)
last_period_start = last_period_end - dt.timedelta(last_period_end.day -1)
if verbose == True:
print("This month starts: %s" %this_period_start )
print("This month ends: %s" %this_period_end )
print("Last month started: %s" %last_period_start )
print("Last month ended: %s" %last_period_end )
if period == 'quarter':
cur_month = int(today.month -1)
cur_quarter = int(cur_month//3)
if cur_quarter == 0:
current_quarter = "Q2"
this_period_start = dt.date(year, 1, 1)
this_period_end = dt.date(year, 3, 31)
last_period_start = dt.date(year - 1, 10, 1)
last_period_end = dt.date(year-1, 12, 31)
elif cur_quarter == 1:
current_quarter = "Q3"
this_period_start = dt.date(year, 4, 1)
this_period_end = dt.date(year, 6, 30)
last_period_start = dt.date(year - 1, 1, 1)
last_period_end = dt.date(year-1, 3, 31)
elif cur_quarter == 2:
current_quarter = "Q4"
this_period_start = dt.date(year, 7, 1)
this_period_end = dt.date(year, 9, 30)
last_period_start = dt.date(year - 1, 4, 1)
last_period_end = dt.date(year - 1, 6, 31)
elif cur_quarter == 3:
current_quarter = "Q1"
this_period_start = dt.date(year, 10, 1)
this_period_end = dt.date(year, 12, 31)
last_period_start = dt.date(year - 1, 7, 1)
last_period_end = dt.date(year-1, 9, 30)
else:
raise ValueError('Set Quarter Fail')
if verbose == True:
print("This quarter started: %s" %this_period_start )
print("This quarter ended: %s" %this_period_end )
print("Last quarter started: %s" %last_period_start )
print("Last quarter ended: %s" %last_period_end )
if period == 'year':
this_period_start = dt.date(year, 1, 1)
this_period_end = today
last_period_start = dt.date(year - 1, 1, 1)
last_period_end = dt.date(year - 1, 12, 31)
if verbose == True:
print("This year starts: %s" %this_period_start )
print("This year ends: %s" %this_period_end )
print("Last year started: %s" %last_period_start )
print("Last year ended: %s" %last_period_end )
except Exception as e:
print(e)
sys.exit()
#Sets the wee
def set_week():
try:
global this_period_start
global this_period_end
global last_period_start
global last_period_end
this_period_start = today - dt.timedelta(dt.date.weekday(today))
this_period_end = this_period_start + dt.timedelta(6)
last_period_start = this_period_start - dt.timedelta(7)
last_period_end = this_period_end - dt.timedelta(7)
except Exception as e:
print(e)
sys.exit()
#Sets the month
def set_month():
next_month = today.replace(day=28) + dt.timedelta(days=4)
global this_period_start
global this_period_end
global last_period_start
global last_period_end
this_period_start = dt.date(year, month, 1)
this_period_end = next_month - dt.timedelta(next_month.day)
last_period_end = this_period_start - dt.timedelta(1)
last_period_start = last_period_end - dt.timedelta(last_period_end.day -1)
#Sets the quarter, which at CRS runs October to September
def set_quarter():
try:
global current_quarter
global this_quarter_start
global this_quarter_end
global last_quarter_start
global last_quarter_end
cur_day = dt.date.today()
cur_month = int(cur_day.month -1)
cur_quarter = int(cur_month//3)
if cur_quarter == 0:
current_quarter = "Q2"
this_quarter_start = dt.date(year, 1, 1)
this_quarter_end = dt.date(year, 3, 31)
last_quarter_start = dt.date(year - 1, 10, 1)
last_quarter_end = dt.date(year-1, 12, 31)
elif cur_quarter == 1:
current_quarter = "Q3"
this_quarter_start = dt.date(year, 4, 1)
this_quarter_end = dt.date(year, 6, 30)
last_quarter_start = dt.date(year - 1, 1, 1)
last_quarter_end = dt.date(year-1, 3, 31)
elif cur_quarter == 2:
current_quarter = "Q4"
this_quarter_start = dt.date(year, 7, 1)
this_quarter_end = dt.date(year, 9, 30)
last_quarter_start = dt.date(year - 1, 4, 1)
last_quarter_end = dt.date(year - 1, 6, 31)
elif cur_quarter == 3:
current_quarter = "Q1"
this_quarter_start = dt.date(year, 10, 1)
this_quarter_end = dt.date(year, 12, 31)
last_quarter_start = dt.date(year - 1, 7, 1)
last_quarter_end = dt.date(year-1, 9, 30)
else:
raise ValueError('Set Quarter Fail')
except Exception as e:
print(e)
sys.exit()
#Sets the year
def set_year():
global this_year_start
global this_year_end
global last_year_start
global last_year_end
this_year_start = dt.date(year, 1, 1)
this_year_end = dt.date(year, 12, 31)
last_year_start = dt.date(year - 1, 1, 1)
last_year_end = dt.date(year - 1, 12, 31)
#Test method for making sure that the dates are right
def time_period_test():
print("This week starts: %s"%this_week_start)
print("This week ends: %s"%this_week_end)
print("Last week started: %s"%last_week_start )
print("Last week ended: %s" %last_week_end )
print("This month starts: %s" %this_month_start )
print("This month ends: %s" %this_month_end )
print("Last month started: %s" %last_month_start )
print("Last month ended: %s" %last_month_end )
print("This quarter started: %s" %this_quarter_start )
print("This quarter ended: %s" %this_quarter_end )
print("Last quarter started: %s" %last_quarter_start )
print("Last quarter ended: %s" %last_quarter_end )
print("This year starts: %s" %this_year_start )
print("This year ends: %s" %this_year_end )
print("Last year started: %s" %last_year_start )
print("Last year ended: %s" %last_year_end )
print("The current quarter is %s" %current_quarter)
#This method creates a database connection given the requisite variables
def db_connect(driver, server, port, database, username, password):
connect_statement='DRIVER='+driver+';SERVER='+server+';PORT='+str(port)
connect_statement+=';DATABASE='+database+';UID='+username+';PWD='+password
cnxn = connect(connect_statement)
return cnxn
#This method creates a dataframe from a database connection and a query.
#If a second dataframe and mergecolumn is provided, it updates the frame based
#on the data in the query. This is useful for when you have matching data in
#multiple databases.
def frame_assembler(sql_query, cnxn, update_type = None,
dataframe = None, mergecol = None):
#print("Assembling dataframe based on query: ")
#print("")
#print(sql_query)
#try:
new_dataframe = pandas.read_sql(sql_query, cnxn)
if update_type == 'merge':
if dataframe is not None and mergecol is not None:
updated_frame = pandas.merge(dataframe,
new_dataframe,
on = mergecol,
how = 'left'
)
return updated_frame
elif dataframe is not None and mergecol is None:
raise ValueError('No merge column provided.')
sys.exit()
elif dataframe is None:
raise ValueError('Dataframe parameter cannot be empty!')
else:
raise ValueError('Problem assembling dataframe')
elif update_type == 'append':
if dataframe is not None:
updated_frame = dataframe.append(new_dataframe,
ignore_index = True)
return updated_frame
elif dataframe is None:
print('Error - dataframe parameter cannot be empty!')
else:
print('Error - problem assembling frame')
else:
return new_dataframe
#except Exception as e:
#print(e)
#sys.exit()
def get_ms_data():
try:
#Connect to the EA Data Warehouse in Azure to pull transactions with codes
ea_dw_cnxn = db_connect(cv.az_driver,
cv.az_server,
cv.az_port,
cv.az_database,
cv.az_username,
cv.az_password)
#Connect to the code generator database to pull metadata on codes
ms_db_cnxn = db_connect(cv.cg_driver,
cv.cg_server,
cv.cg_port,
cv.cg_database,
cv.cg_username,
cv.cg_password)
ms_data_query = form_and_revenue_query + " WHERE " + ms_where_clause
ms_df = frame_assembler(ms_data_query, ea_dw_cnxn)
#This handles some small discrepancies in the data - some of the column
#names are upper case, some are lower case, and some are numbers.
#This difference in naming conventions created a small challenge when
#matching between Azure and the Code Generator, so this method enforces
#lower case to maintain a good match between both data sources.
#It then takes the existing EA Dataframe of transactions and puts it
#together with the existing metadata to produce a dataframe with all values
#necessary for reporting
for value in ms_values_dict.keys():
if value == 'fiscal_year' :
ms_query = 'SELECT ' + ms_values_dict[value][0] + ', ' + ms_values_dict[value][1]
ms_query += ' from ' + ms_values_dict[value][2]
else:
ms_query = 'SELECT ' + ms_values_dict[value][0] + ', ' + 'LCASE(' + ms_values_dict[value][1] + ')'
ms_query += ' as ' + ms_values_dict[value][1] + ' from ' + ms_values_dict[value][2]
ms_df = frame_assembler(ms_query, ms_db_cnxn, 'merge', ms_df, ms_values_dict[value][1])
return ms_df
except Exception as e:
print(e)
#This method takes a dataframe and other information and outputs a graph as
#a file. This will eventually be converted to add images to a pdf.
def figure_maker(dataframe, group_col, name, agg_method = 'count', plot_kind = 'bar'):
#try:
plot_base = dataframe.groupby([group_col])[group_col].agg(agg_method).sort_values(ascending = False)
plot = plot_base.plot(kind=plot_kind, figsize = (10,10))
fig = plot.get_figure()
fig.savefig(name)
plt.show()
#except Exception as e:
#print(e)
#print('%tb')
#else:
#return fig
#This creates my graphs by time period
def period_figure_maker(df, datecolumn, xlab, ylab, legend1, legend2, title):
x_count = (last_period_end - last_period_start)
plt.xlim(0, x_count.days)
#print(x_count.days)
#try:
#print('Setting datetime indexes...')
df['DateUnsubscribed'] = | pandas.to_datetime(df[datecolumn]) | pandas.to_datetime |
# coding=utf-8
# Author: <NAME>
# Date: Jun 30, 2019
#
# Description: Indexes certain genes and exports their list.
#
#
import math
import numpy as np
import pandas as pd
pd.set_option('display.max_rows', 100)
pd.set_option('display.max_columns', 500)
pd.set_option('display.width', 1000)
import argparse
from utils import ensurePathExists
from scipy.stats import ks_2samp
from itertools import combinations
import swifter
# Separating by At Least One Match
def select_by_at_least_one_match(ilist, keeplist):
# Only keep genes that are found in any of our gene list
return [i for i in ilist if i in keeplist]
if __name__ == '__main__':
parser = argparse.ArgumentParser()
celltypes = ['spermatocyte', 'spermatogonia', 'spermatid', 'enterocyte', 'neuron', 'muscle']
parser.add_argument("--celltype", default='spermatocyte', type=str, choices=celltypes, help="Cell type. Defaults to spermatocyte")
parser.add_argument("--biotype", default='protein_coding', type=str, choices=['protein_coding'], help="Filter nodes by biotype (e.g., protein-coding)")
parser.add_argument("--attribute", default='TPM', type=str, help="Which attribute to plot. Defaults to 'TPM'.")
# parser.add_argument("--log", default=True, type=bool, help="Transforms attribute into log2(attribute).")
parser.add_argument("--minTPM", default=1, type=int, help="minLogTPM = math.log2(x). Defaults to 1.")
args = parser.parse_args()
celltype = args.celltype # spermatocyte or enterocyte
biotype = args.biotype
attribute = args.attribute
# log = args.log
minTPM = args.minTPM
print('Exporint {celltype:s}-{biotype:s}-{attribute:s}'.format(celltype=celltype, biotype=biotype, attribute=attribute))
print('Loading {celltype:s} Files'.format(celltype=celltype))
path = '../../02-core_genes/results/'
df_HS = pd.read_csv(path + 'FPKM/HS/HS-FPKM-{celltype:s}.csv.gz'.format(celltype=celltype), index_col='id_gene')
df_MM = pd.read_csv(path + 'FPKM/MM/MM-FPKM-{celltype:s}.csv.gz'.format(celltype=celltype), index_col='id_gene')
df_DM = pd.read_csv(path + 'FPKM/DM/DM-FPKM-{celltype:s}.csv.gz'.format(celltype=celltype), index_col='id_gene')
# Remove Duplicates
df_HS = df_HS.loc[~df_HS.index.duplicated(keep='first'), :]
df_MM = df_MM.loc[~df_MM.index.duplicated(keep='first'), :]
df_DM = df_DM.loc[~df_DM.index.duplicated(keep='first'), :]
# minTPM
if minTPM:
df_HS = df_HS.loc[(df_HS['TPM'] >= minTPM), :]
df_MM = df_MM.loc[(df_MM['TPM'] >= minTPM), :]
df_DM = df_DM.loc[(df_DM['TPM'] >= minTPM), :]
# Meta Genes
print('Loading {celltype:s} meta genes'.format(celltype=celltype))
dfM = pd.read_csv(path + 'meta-genes/meta-{celltype:s}-genes.csv.gz'.format(celltype=celltype), index_col='id_eggnog', usecols=['id_eggnog', 'id_string_HS', 'id_string_MM', 'id_string_DM'])
dfM['id_string_HS'] = dfM['id_string_HS'].apply(lambda x: x.split(',') if not pd.isnull(x) else [])
dfM['id_string_MM'] = dfM['id_string_MM'].apply(lambda x: x.split(',') if not pd.isnull(x) else [])
dfM['id_string_DM'] = dfM['id_string_DM'].apply(lambda x: x.split(',') if not | pd.isnull(x) | pandas.isnull |
import dash
import dash_core_components as dcc
import dash_html_components as html
import dash_bootstrap_components as dbc
import dash_html_components as html
import dash
import dash_bootstrap_components as dbc
import dash_core_components as dcc
import pandas as pd
import metabolyze as met
from dash.dependencies import Input, Output
from functools import reduce
import dash_html_components as html
import dash_core_components as dcc
import pandas as pd
import metabolyze as met
from functools import reduce
import warnings
import pandas as pd
import itertools
import scipy
import scipy.stats
import numpy as np
from functools import reduce
import re
import numpy
import subprocess as sp
import os
import sys
import time
import dash
import dash_table
import pandas as pd
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Output, Input, State
import plotly.graph_objs as go
from sklearn.preprocessing import scale
import scipy.cluster.hierarchy as shc
import pandas as pd
import os
import matplotlib
matplotlib.use('Agg')
import itertools
import string
#import RNAseq
import numpy as np
import pandas as pd
from scipy import stats
import numpy as np
import sys, json
from time import sleep
from scipy import *
from scipy import stats
from sklearn.decomposition import PCA
import chart_studio.plotly as py
#%matplotlib inline
import plotly
#plotly.offline.init_notebook_mode() # To embed plots in the output cell of the notebook
import plotly.graph_objs as go
import os
import operator
import matplotlib.pyplot as plt
from matplotlib.lines import Line2D
from math import sqrt
from itertools import combinations
from matplotlib import rcParams
result = met.Analysis('skeleton_output.tsv','Groups.csv')
def register_callbacks(dashapp):
COLORS20 = [
'#1f77b4',
'#aec7e8',
'#ff7f0e',
'#ffbb78',
'#2ca02c',
'#98df8a',
'#d62728',
'#ff9896',
'#9467bd',
'#c5b0d5',
'#8c564b',
'#c49c94',
'#e377c2',
'#f7b6d2',
'#7f7f7f',
'#c7c7c7',
'#bcbd22',
'#dbdb8d',
'#17becf',
'#9edae5',
]
@dashapp.callback(
dash.dependencies.Output('intermediate-value', 'children'),
[dash.dependencies.Input('button', 'n_clicks')],
[dash.dependencies.State('my-dropdown', 'value')])
def update_output(n_clicks,value):
standard = pd.read_table(result.data)
detection_column_index = standard.columns.get_loc("detections")
standard = standard.iloc[:,0:detection_column_index]
standard.index = standard['Metabolite']
del standard['Metabolite']
matrices = []
sample_groups = result.get_groups()
#print (comparison[0])
#value = value.replace("'", "")
value_list = value.split(',')
#value_list_strip = [value.replace(" ", "") for value in value_list]
value_list_final = [val[1:-1] for val in value_list]
#value_list = value_list.replace("'", "")
#test_condition = 'GPI1'
#ids = (sample_groups[test_condition])
#print('wtf')
comparison_ids = []
matrices = []
for condition in value_list_final:
#print("condition",condition)
if condition in sample_groups:
test = condition
#real_condition = condition.replace("'", "")
ids = (sample_groups[test])
#print (ids)
matrices.append((result.get_imputed_full_matrix(result.get_matrix(ids=ids),param='detected')))
comparison_ids.append(ids)
#print(matrices)
#print("yowtf",matrices[0].shape[1])
group_sample_number = int((matrices[0].shape)[1])
group_sample_number_2 = int(group_sample_number+ ((matrices[1].shape)[1]))
df_m = reduce(lambda left,right: pd.merge(left,right,left_index=True, right_index=True), matrices)
intensity_matrix = reduce(lambda left,right: pd.merge(left,right,left_index=True, right_index=True), matrices)
blank_matrix = pd.DataFrame(result.get_matrix(result.get_ids('Blank')))
#blank_matrix.to_csv(results_folder+'Tables/'+'blank_intensity.csv')
blank_threshold = pd.DataFrame(blank_matrix.mean(axis=1)*3)+10000
blank_threshold['Metabolite'] = blank_threshold.index
blank_threshold.columns = ['blank_threshold','Metabolite']
#print(df_m.head())
df_m['ttest_pval'] = ((scipy.stats.ttest_ind(df_m.iloc[:, :group_sample_number], df_m.iloc[:, group_sample_number:group_sample_number_2], axis=1))[1])
df_m['1/pvalue'] = float(1)/df_m['ttest_pval']
group_1_df = (pd.DataFrame(df_m.iloc[:, :group_sample_number]))
group_2_df = (pd.DataFrame(df_m.iloc[:, group_sample_number:group_sample_number_2]))
df_m[value_list_final[0]+'_Mean'] = (group_1_df.mean(axis=1))
df_m[value_list_final[1]+'_Mean'] = (group_2_df.mean(axis=1))
df_m['Log2FoldChange'] = np.log2(((group_1_df.mean(axis=1)))/((group_2_df.mean(axis=1))))
df_m['LogFoldChange'] = (((group_1_df.mean(axis=1)))/((group_2_df.mean(axis=1))))
# df_m['Metabolite'] = df_m.index
final_df_m = pd.merge(standard, df_m, left_index=True, right_index=True)
final_df_m = pd.merge(final_df_m,blank_threshold,left_index=True, right_index=True)
# # Add detection column
for col in blank_matrix.columns:
final_df_m[col] = blank_matrix[col].values
final_df_m['combined_mean'] = (final_df_m[value_list_final[0]+'_Mean']+final_df_m[value_list_final[1]+'_Mean'])/2
final_df_m['impact_score'] = (((2**abs(final_df_m['Log2FoldChange']))*final_df_m['combined_mean'])/final_df_m['ttest_pval'])/1000000
final_df_m.impact_score = final_df_m.impact_score.round()
final_df_m['impact_score'] = final_df_m['impact_score'].fillna(0)
sig_genes = final_df_m.loc[final_df_m['ttest_pval'] < 0.05].index
# data = cluster_plot(intensity_matrix,sig_genes)
detection_dict = {}
comparison_matrix = group_1_df.join(group_2_df, how='outer')
for index, row in comparison_matrix.iterrows():
test_list = []
#print (row)
#print(index)
row_intensity = ( | pd.DataFrame(row) | pandas.DataFrame |
import numpy as np
from datetime import timedelta
from distutils.version import LooseVersion
import pandas as pd
import pandas.util.testing as tm
from pandas import to_timedelta
from pandas.util.testing import assert_series_equal, assert_frame_equal
from pandas import (Series, Timedelta, DataFrame, Timestamp, TimedeltaIndex,
timedelta_range, date_range, DatetimeIndex, Int64Index,
_np_version_under1p10, Float64Index, Index, tslib)
from pandas.tests.test_base import Ops
class TestTimedeltaIndexOps(Ops):
def setUp(self):
super(TestTimedeltaIndexOps, self).setUp()
mask = lambda x: isinstance(x, TimedeltaIndex)
self.is_valid_objs = [o for o in self.objs if mask(o)]
self.not_valid_objs = []
def test_ops_properties(self):
self.check_ops_properties(['days', 'hours', 'minutes', 'seconds',
'milliseconds'])
self.check_ops_properties(['microseconds', 'nanoseconds'])
def test_asobject_tolist(self):
idx = timedelta_range(start='1 days', periods=4, freq='D', name='idx')
expected_list = [Timedelta('1 days'), Timedelta('2 days'),
Timedelta('3 days'), Timedelta('4 days')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
idx = TimedeltaIndex([timedelta(days=1), timedelta(days=2), pd.NaT,
timedelta(days=4)], name='idx')
expected_list = [Timedelta('1 days'), Timedelta('2 days'), pd.NaT,
Timedelta('4 days')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
def test_minmax(self):
# monotonic
idx1 = TimedeltaIndex(['1 days', '2 days', '3 days'])
self.assertTrue(idx1.is_monotonic)
# non-monotonic
idx2 = TimedeltaIndex(['1 days', np.nan, '3 days', 'NaT'])
self.assertFalse(idx2.is_monotonic)
for idx in [idx1, idx2]:
self.assertEqual(idx.min(), Timedelta('1 days')),
self.assertEqual(idx.max(), Timedelta('3 days')),
self.assertEqual(idx.argmin(), 0)
self.assertEqual(idx.argmax(), 2)
for op in ['min', 'max']:
# Return NaT
obj = TimedeltaIndex([])
self.assertTrue(pd.isnull(getattr(obj, op)()))
obj = TimedeltaIndex([pd.NaT])
self.assertTrue(pd.isnull(getattr(obj, op)()))
obj = TimedeltaIndex([pd.NaT, pd.NaT, pd.NaT])
self.assertTrue(pd.isnull(getattr(obj, op)()))
def test_numpy_minmax(self):
dr = pd.date_range(start='2016-01-15', end='2016-01-20')
td = TimedeltaIndex(np.asarray(dr))
self.assertEqual(np.min(td), Timedelta('16815 days'))
self.assertEqual(np.max(td), Timedelta('16820 days'))
errmsg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, errmsg, np.min, td, out=0)
tm.assertRaisesRegexp(ValueError, errmsg, np.max, td, out=0)
self.assertEqual(np.argmin(td), 0)
self.assertEqual(np.argmax(td), 5)
if not _np_version_under1p10:
errmsg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, errmsg, np.argmin, td, out=0)
tm.assertRaisesRegexp(ValueError, errmsg, np.argmax, td, out=0)
def test_round(self):
td = pd.timedelta_range(start='16801 days', periods=5, freq='30Min')
elt = td[1]
expected_rng = TimedeltaIndex([
Timedelta('16801 days 00:00:00'),
Timedelta('16801 days 00:00:00'),
Timedelta('16801 days 01:00:00'),
Timedelta('16801 days 02:00:00'),
Timedelta('16801 days 02:00:00'),
])
expected_elt = expected_rng[1]
tm.assert_index_equal(td.round(freq='H'), expected_rng)
self.assertEqual(elt.round(freq='H'), expected_elt)
msg = pd.tseries.frequencies._INVALID_FREQ_ERROR
with self.assertRaisesRegexp(ValueError, msg):
td.round(freq='foo')
with tm.assertRaisesRegexp(ValueError, msg):
elt.round(freq='foo')
msg = "<MonthEnd> is a non-fixed frequency"
tm.assertRaisesRegexp(ValueError, msg, td.round, freq='M')
tm.assertRaisesRegexp(ValueError, msg, elt.round, freq='M')
def test_representation(self):
idx1 = TimedeltaIndex([], freq='D')
idx2 = TimedeltaIndex(['1 days'], freq='D')
idx3 = TimedeltaIndex(['1 days', '2 days'], freq='D')
idx4 = TimedeltaIndex(['1 days', '2 days', '3 days'], freq='D')
idx5 = TimedeltaIndex(['1 days 00:00:01', '2 days', '3 days'])
exp1 = """TimedeltaIndex([], dtype='timedelta64[ns]', freq='D')"""
exp2 = ("TimedeltaIndex(['1 days'], dtype='timedelta64[ns]', "
"freq='D')")
exp3 = ("TimedeltaIndex(['1 days', '2 days'], "
"dtype='timedelta64[ns]', freq='D')")
exp4 = ("TimedeltaIndex(['1 days', '2 days', '3 days'], "
"dtype='timedelta64[ns]', freq='D')")
exp5 = ("TimedeltaIndex(['1 days 00:00:01', '2 days 00:00:00', "
"'3 days 00:00:00'], dtype='timedelta64[ns]', freq=None)")
with pd.option_context('display.width', 300):
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5],
[exp1, exp2, exp3, exp4, exp5]):
for func in ['__repr__', '__unicode__', '__str__']:
result = getattr(idx, func)()
self.assertEqual(result, expected)
def test_representation_to_series(self):
idx1 = | TimedeltaIndex([], freq='D') | pandas.TimedeltaIndex |
# -*- coding: utf-8 -*-
"""
Created on Sat Mar 14 17:55:40 2020
@author: Erick
"""
import pandas as pd
import numpy as np
from scipy import optimize
import matplotlib as mpl
import matplotlib.pyplot as plt
from scipy.linalg import svd
import matplotlib.gridspec as gridspec
import os
import matplotlib.ticker as mticker
from matplotlib.ticker import ScalarFormatter
from matplotlib.ticker import EngFormatter
from epydemics import seir_model, confidence as cf
import datetime
from scipy.optimize import OptimizeResult
# Source https://ourworldindata.org/coronavirus-source-data
# csv_data = './full_data.csv'
csv_data = 'https://covid.ourworldindata.org/data/full_data.csv'
# Source: https://worldpopulationreview.com/
csv_population = './population_by_country.csv'
results_folder = './SEIR'
csv_results = 'fitting_results.csv'
df_confd = pd.read_csv(
'https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_19-covid-Confirmed.csv')
df_death = pd.read_csv(
'https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_19-covid-Deaths.csv')
df_recvd = pd.read_csv(
'https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_19-covid-Recovered.csv')
data_color = 'C0'
location = 'Italy'
add_days = 365
plot_pbands = True
xpos = -100
ypos = 50
start_idx = 15
before_day = 0
xfmt = ScalarFormatter(useMathText=True)
xfmt.set_powerlimits((-3, 3))
engfmt = EngFormatter(places=1, sep=u"\N{THIN SPACE}") # U+2009
if location == 'United States':
alt_loc = 'US'
elif location == 'South Korea':
alt_loc = 'Korea, South'
else:
alt_loc = location
removed_color = 'C4'
def fobj_seir(p: np.ndarray, time: np.ndarray, infected_: np.ndarray,
removed_: np.ndarray, population_: float, I0_: int, R0_: int = 0):
p = np.power(10, p)
sol = seir_model.seir_model(time, N=population_, beta=p[0], gamma=p[1],
sigma=p[2], I0=I0_, R0=R0_, E0=p[3])
y = sol.sol(time)
S, E, I, R = y
n = len(infected_)
residual = np.empty(n * 2)
for j in range(n):
residual[j] = I[j] - infected_[j] # np.log10(I[i]+1) - np.log10(infected[i]+1)
residual[j + n] = R[j] - removed_[j] # np.log10(R[i]+1) - np.log10(removed[i]+1)
return residual
def seir(time: np.ndarray, p: np.ndarray, population_: float, I0_: int, R0_: int = 0):
p = np.power(10, p)
sol = seir_model.seir_model(time, N=population_, beta=p[0], gamma=p[1],
sigma=p[2], I0=I0_, R0=R0_, E0=p[3])
y = sol.sol(time)
S, E, I, R = y
points = len(time)
result = np.zeros((points, 2), dtype=np.float)
for n, j, r in zip(range(points), I, R):
result[n] = (j, r)
return result
defaultPlotStyle = {'font.size': 14,
'font.family': 'Arial',
'font.weight': 'regular',
'legend.fontsize': 14,
'mathtext.fontset': 'stix',
# 'mathtext.rm': 'Times New Roman',
# 'mathtext.it': 'Times New Roman:italic',#'Arial:italic',
# 'mathtext.cal': 'Times New Roman:italic',#'Arial:italic',
# 'mathtext.bf': 'Times New Roman:bold',#'Arial:bold',
'xtick.direction': 'in',
'ytick.direction': 'in',
'xtick.major.size': 4.5,
'xtick.major.width': 1.75,
'ytick.major.size': 4.5,
'ytick.major.width': 1.75,
'xtick.minor.size': 2.75,
'xtick.minor.width': 1.0,
'ytick.minor.size': 2.75,
'ytick.minor.width': 1.0,
'ytick.right': False,
'lines.linewidth': 2.5,
'lines.markersize': 10,
'lines.markeredgewidth': 0.85,
'axes.labelpad': 5.0,
'axes.labelsize': 16,
'axes.labelweight': 'regular',
'axes.linewidth': 1.25,
'axes.titlesize': 16,
'axes.titleweight': 'bold',
'axes.titlepad': 6,
'figure.titleweight': 'bold',
'figure.dpi': 100}
def lighten_color(color, amount=0.5):
"""
Lightens the given color by multiplying (1-luminosity) by the given amount.
Input can be matplotlib color string, hex string, or RGB tuple.
Examples:
>> lighten_color('g', 0.3)
>> lighten_color('#F034A3', 0.6)
>> lighten_color((.3,.55,.1), 0.5)
"""
import matplotlib.colors as mc
import colorsys
try:
c = mc.cnames[color]
except Exception as e:
c = color
c = colorsys.rgb_to_hls(*mc.to_rgb(c))
return colorsys.hls_to_rgb(c[0], 1 - amount * (1 - c[1]), c[2])
def latex_format(x: float, digits: int = 2):
if np.isinf(x):
return r'$\infty$'
digits = abs(digits)
fmt_dgts = '%%.%df' % digits
fmt_in = '%%.%dE' % digits
x_str = fmt_in % x
x_sci = (np.array(x_str.split('E'))).astype(np.float)
if abs(x_sci[1]) <= 3:
fmt_dgts = '%%.%df' % digits
return fmt_dgts % x
if digits == 0:
return r'$\mathregular{10^{%d}}$' % x_sci[1]
else:
ltx_str = fmt_dgts % x_sci[0]
ltx_str += r'$\mathregular{\times 10^{%d}}$' % x_sci[1]
return ltx_str
covid_type = np.dtype([('date', 'M8[ns]'),
('confirmed', 'u8'),
('recovered', 'u8'),
('dead', 'u8'),
('infected', 'u8')])
if __name__ == '__main__':
if not os.path.exists(results_folder):
os.makedirs(results_folder)
column_msk = np.zeros(len(df_confd.columns), dtype=bool)
for i in range(len(df_confd.columns)):
if i == 0 or i > 3:
column_msk[i] = True
df_confd_country = df_confd[df_confd['Country/Region'] == alt_loc].groupby("Country/Region").sum()
df_confd_country = df_confd_country[df_confd_country.columns[2::]].T.reset_index()
df_confd_country = df_confd_country.rename(columns={alt_loc: 'confirmed',
'index': 'date'})
df_death_country = df_death[df_death['Country/Region'] == alt_loc].groupby("Country/Region").sum()
df_death_country = df_death_country[df_death_country.columns[2::]].T.reset_index()
df_death_country = df_death_country.rename(columns={alt_loc: 'dead',
'index': 'date'})
df_recvd_country = df_recvd[df_recvd['Country/Region'] == alt_loc].groupby("Country/Region").sum()
df_recvd_country = df_recvd_country[df_recvd_country.columns[2::]].T.reset_index()
df_recvd_country = df_recvd_country.rename(columns={alt_loc: 'recovered',
'index': 'date'})
df_full = pd.merge(df_confd_country, df_recvd_country,
how="outer").fillna(0)
df_full = pd.merge(df_full, df_death_country,
how="outer").fillna(0)
df_full = df_full.eval('infected = confirmed - recovered - dead')
df_full['date'] = pd.to_datetime(df_full['date'], format='%m/%d/%y')
df_full = df_full[df_full['infected'] > 0]
df = | pd.read_csv(csv_data) | pandas.read_csv |
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
# Created by <NAME>
import unittest
import pandas as pd
import pandas.testing as pdtest
from allfreqs import AlleleFreqs
from allfreqs.classes import Reference, MultiAlignment
from allfreqs.tests.constants import (
REAL_ALG_X_FASTA, REAL_ALG_X_NOREF_FASTA, REAL_RSRS_FASTA,
REAL_ALG_L6_FASTA, REAL_ALG_L6_NOREF_FASTA,
SAMPLE_MULTIALG_FASTA, SAMPLE_MULTIALG_NOREF_FASTA, SAMPLE_REF_FASTA,
SAMPLE_MULTIALG_CSV, SAMPLE_MULTIALG_NOREF_CSV, SAMPLE_REF_CSV,
sample_sequences_df, SAMPLE_SEQUENCES_DICT, sample_sequences_freqs,
sample_sequences_freqs_amb, SAMPLE_FREQUENCIES,
SAMPLE_FREQUENCIES_AMB, REAL_ALG_X_DF, REAL_X_FREQUENCIES, REAL_ALG_L6_DF,
REAL_L6_FREQUENCIES, TEST_CSV
)
class TestBasic(unittest.TestCase):
def setUp(self) -> None:
ref = Reference("AAG-CTNGGGCATTTCAGGGTGAGCCCGGGCAATACAGGG-TAT")
alg = MultiAlignment(SAMPLE_SEQUENCES_DICT)
self.af = AlleleFreqs(multialg=alg, reference=ref)
self.af_amb = AlleleFreqs(multialg=alg, reference=ref, ambiguous=True)
def test_df(self):
# Given/When
exp_df = sample_sequences_df()
# Then
pdtest.assert_frame_equal(self.af.df, exp_df)
def test_frequencies(self):
# Given/When
exp_freqs = sample_sequences_freqs()
# Then
pdtest.assert_frame_equal(self.af.frequencies, exp_freqs)
def test_frequencies_ambiguous(self):
# Given/When
exp_freqs = sample_sequences_freqs_amb()
# Then
pdtest.assert_frame_equal(self.af_amb.frequencies, exp_freqs)
def test__get_frequencies(self):
# Given
test_freq = pd.Series({'A': 0.2, 'C': 0.2, 'G': 0.1, 'T': 0.3,
'-': 0.1, 'N': 0.1})
exp_freq = {'A': 0.2, 'C': 0.2, 'G': 0.1, 'T': 0.3, 'gap': 0.1,
'oth': 0.1}
# When
result = self.af._get_frequencies(test_freq)
# Then
self._dict_almost_equal(result, exp_freq)
def test_to_csv(self):
# Given/When
self.af.to_csv(TEST_CSV)
result = pd.read_csv(TEST_CSV)
expected = pd.read_csv(SAMPLE_FREQUENCIES)
# Then
pdtest.assert_frame_equal(result, expected)
def test_to_csv_ambiguous(self):
# Given/When
self.af_amb.to_csv(TEST_CSV)
result = pd.read_csv(TEST_CSV)
expected = pd.read_csv(SAMPLE_FREQUENCIES_AMB)
# Then
pdtest.assert_frame_equal(result, expected)
@staticmethod
def _dict_almost_equal(expected: dict, result: dict, acc=10**-8) -> bool:
"""Compare to dictionaries and ensure that all their values are the
same, accounting for some fluctuation up to the given accuracy value.
Args:
expected: expected dictionary
result: resulting dictionary
acc: accuracy to use [default: 10**-8]
"""
if expected.keys() == result.keys():
for key in expected.keys():
if abs(expected[key] - result[key]) < acc:
continue
return True
return False
# From Fasta
class TestFromFasta(unittest.TestCase):
def setUp(self) -> None:
self.af = AlleleFreqs.from_fasta(sequences=SAMPLE_MULTIALG_FASTA)
def test_df(self):
# Given/When
exp_df = sample_sequences_df()
# Then
pdtest.assert_frame_equal(self.af.df, exp_df)
def test_frequencies(self):
# Given/When
exp_freqs = sample_sequences_freqs()
# Then
pdtest.assert_frame_equal(self.af.frequencies, exp_freqs)
def test_to_csv(self):
# Given/When
self.af.to_csv(TEST_CSV)
result = pd.read_csv(TEST_CSV)
expected = pd.read_csv(SAMPLE_FREQUENCIES)
# Then
pdtest.assert_frame_equal(result, expected)
class TestFromFastaNoRef(unittest.TestCase):
def setUp(self) -> None:
self.af = AlleleFreqs.from_fasta(sequences=SAMPLE_MULTIALG_NOREF_FASTA,
reference=SAMPLE_REF_FASTA)
def test_df(self):
# Given/When
exp_df = sample_sequences_df()
# Then
pdtest.assert_frame_equal(self.af.df, exp_df)
def test_frequencies(self):
# Given/When
exp_freqs = sample_sequences_freqs()
# Then
pdtest.assert_frame_equal(self.af.frequencies, exp_freqs)
def test_to_csv(self):
# Given/When
self.af.to_csv(TEST_CSV)
result = pd.read_csv(TEST_CSV)
expected = pd.read_csv(SAMPLE_FREQUENCIES)
# Then
pdtest.assert_frame_equal(result, expected)
# From Csv
class TestFromCsv(unittest.TestCase):
def setUp(self) -> None:
self.af = AlleleFreqs.from_csv(sequences=SAMPLE_MULTIALG_CSV)
def test_df(self):
# Given/When
exp_df = sample_sequences_df()
# Then
pdtest.assert_frame_equal(self.af.df, exp_df)
def test_frequencies(self):
# Given/When
exp_freqs = sample_sequences_freqs()
# Then
pdtest.assert_frame_equal(self.af.frequencies, exp_freqs)
def test_to_csv(self):
# Given/When
self.af.to_csv(TEST_CSV)
result = pd.read_csv(TEST_CSV)
expected = pd.read_csv(SAMPLE_FREQUENCIES)
# Then
pdtest.assert_frame_equal(result, expected)
class TestFromCsvNoRef(unittest.TestCase):
def setUp(self) -> None:
self.af = AlleleFreqs.from_csv(sequences=SAMPLE_MULTIALG_NOREF_CSV,
reference=SAMPLE_REF_CSV)
def test_df(self):
# Given/When
exp_df = sample_sequences_df()
# Then
pdtest.assert_frame_equal(self.af.df, exp_df)
def test_frequencies(self):
# Given/When
exp_freqs = sample_sequences_freqs()
# Then
| pdtest.assert_frame_equal(self.af.frequencies, exp_freqs) | pandas.testing.assert_frame_equal |
import matplotlib
import matplotlib.pyplot as plt
import nibabel as nib
import numpy as np
import os
import pandas as pd
matplotlib.use('agg')
def get_whole_tumor_mask(data):
return data > 0
def get_tumor_core_mask(data):
return np.logical_or(data == 1, data == 4)
def get_enhancing_tumor_mask(data):
return data == 4
def dice_coefficient(truth, prediction):
if np.sum(truth) == 0:
# if true mask should be 0 and is 0, dice will be 1
if np.sum(prediction) == 0:
return 1.
else:
return 0
return 2 * np.sum(truth * prediction)/(np.sum(truth) + np.sum(prediction))
def main():
prediction_dir = 'prediction'
ext = '.png'
filename_truth = 'truth.nii.gz'
filename_predict = 'prediction.nii.gz'
header = ("DiceCoeff", "TruthSize", "PredictSize")
rows = list()
subject_ids = list()
dir_iterator = next(os.walk(prediction_dir))[1]
for case_folder in dir_iterator:
case_folder = os.path.join(prediction_dir, case_folder)
if not os.path.isdir(case_folder):
continue
subject_ids.append(os.path.basename(case_folder))
truth_file = os.path.join(case_folder, filename_truth)
truth_image = nib.load(truth_file)
truth = truth_image.get_fdata()
prediction_file = os.path.join(case_folder, filename_predict)
prediction_image = nib.load(prediction_file)
prediction = prediction_image.get_fdata()
truth[truth > 0] = 1
rows.append([dice_coefficient(get_whole_tumor_mask(truth),
get_whole_tumor_mask(prediction)),
np.sum(truth), np.sum(prediction)])
df = | pd.DataFrame.from_records(rows, columns=header, index=subject_ids) | pandas.DataFrame.from_records |
import json
from datetime import datetime
import os
import shutil
import re
import pandas as pd
import seaborn as sns
import matplotlib.ticker as ticker
import matplotlib.pyplot as plt
import configuration as c
from scipy.stats import mannwhitneyu
import numpy as np
all_apps_permissions_counts = []
permission_counts_covid_apps = []
permission_counts_non_covid_apps = []
permission_frequencies_covid = dict()
permission_frequencies_non_covid = dict()
permission_protection_levels = dict()
protection_level_app_frequencies_covid = dict()
protection_level_app_frequencies_non_covid = dict()
protection_level_app_permission_frequencies_covid = dict()
protection_level_app_permission_frequencies_non_covid = dict()
permissions_stats_file = open(c.figures_path + "permissions_stats.txt", "w")
def sort_dictionary(dictionary, key_or_value):
return {k: v for k, v in sorted(dictionary.items(), key=lambda item: item[key_or_value])}
def count_permissions_per_app(app):
global all_apps_permissions_counts
global permission_counts_covid_apps
global permission_counts_non_covid_apps
all_apps_permissions_counts.append(app['permission_count'])
if app['is_covid']:
permission_counts_covid_apps.append(app['permission_count'])
else:
permission_counts_non_covid_apps.append(app['permission_count'])
def count_apps_per_permission(app):
global permission_frequencies_covid
global permission_frequencies_non_covid
for permission in app['permissions']:
permission_name = permission[permission.rindex('.')+1:]
if app['is_covid']:
if permission_name in permission_frequencies_covid.keys():
permission_frequencies_covid[permission_name] += 1
else:
permission_frequencies_covid[permission_name] = 1
else:
if permission_name in permission_frequencies_non_covid.keys():
permission_frequencies_non_covid[permission_name] += 1
else:
permission_frequencies_non_covid[permission_name] = 1
def compute_median_number_of_permissions(df):
permissions_stats_file.write("Median # of permissions:\n" + str(df.groupby(['app_type'])['permission_count'].median()))
permissions_stats_file.write("\n-------------------------------------\n")
def generate_boxplots_of_permission_counts_per_app(df):
#### Boxplot of the number of permissions of COVID and Non-COVID apps ####
boxplot_num_permissions = sns.boxplot(data=df, x="app_type", y="permission_count", palette="Set3")
boxplot_num_permissions.yaxis.set_major_locator(ticker.MultipleLocator(1))
boxplot_num_permissions.set(ylim=(0, max(all_apps_permissions_counts)), xlabel='Apps', ylabel='# of permissions')
fig = boxplot_num_permissions.get_figure()
fig.set_size_inches(6, 8)
fig.savefig(c.figures_path + 'num_permissions.pdf')
fig.clf()
# Run Mann-Whitney U test
mann_test = mannwhitneyu(list(df[df['app_type'] == 'COVID']['permission_count']), list(df[df['app_type'] == 'Non-COVID']['permission_count']))
permissions_stats_file.write("App level:\n Mann-Whitney U test p-value:" + str(mann_test.pvalue))
permissions_stats_file.write("\n-------------------------------------\n")
def generate_separate_bar_charts_of_permission_fequencies(top):
sorted_permission_frequencies_covid = sort_dictionary(permission_frequencies_covid, 1)
sorted_permission_frequencies_non_covid = sort_dictionary(permission_frequencies_non_covid, 1)
# COVID permissions
plt.barh(range(top), list(sorted_permission_frequencies_covid.values())[-top:])
plt.yticks(range(top), list(sorted_permission_frequencies_covid.keys())[-top:])
plt.xlabel('Frequency (# of apps)')
plt.ylabel('Permission')
plt.gcf().set_size_inches(8, 5)
plt.savefig(c.figures_path + 'permission_frequencies_covid.pdf', bbox_inches='tight')
plt.clf()
# Non-COVID permissions
plt.barh(range(top), list(sorted_permission_frequencies_non_covid.values())[-top:])
plt.yticks(range(top), list(sorted_permission_frequencies_non_covid.keys())[-top:])
plt.xlabel('Frequency (# of apps)')
plt.ylabel('Permission')
plt.gcf().set_size_inches(8, 5)
plt.savefig(c.figures_path + 'permission_frequencies_non_covid.pdf', bbox_inches='tight')
plt.clf()
def generate_combined_bar_chart_of_permission_fequencies(top='all'):
permission_frequencies_df = pd.DataFrame({'covid':pd.Series(permission_frequencies_covid),'non_covid':pd.Series(permission_frequencies_non_covid)}).fillna(0)
permission_frequencies_df.covid = (permission_frequencies_df.covid / total_number_of_covid_apps * 100)
permission_frequencies_df.non_covid = (permission_frequencies_df.non_covid / total_number_of_non_covid_apps * 100)
permission_frequencies_df = permission_frequencies_df.sort_values('covid', ascending=False)
displayed_permissions = permission_frequencies_df if top=='all' else permission_frequencies_df.head(top)
positions = list(range(len(displayed_permissions.index)))
width = 0.35
fig, ax = plt.subplots(figsize=(5, 4 + len(displayed_permissions.index)/6))
ax.bar([pos + width/2 for pos in positions], displayed_permissions['covid'], width, label='COVID', color=['#95cbc1'])
ax.bar([pos - width/2 for pos in positions], displayed_permissions['non_covid'], width, label='Non-COVID', color=['#f6f6bd'])
ax.set_ylabel('Frequency (% of apps)')
ax.set_xlabel('Permission')
ax.set_yticks(np.arange(0, 101, 10))
ax.set_xticks(positions)
ax.set_xticklabels(list(displayed_permissions.index), rotation=30, ha="right")
ax.legend()
fig.savefig(c.figures_path + str(top) + '_permission_frequencies_percentages_covid_and_non_covid.pdf', bbox_inches='tight')
fig.clf()
def identify_permissions_only_in_covid_or_non_covid():
permissions_only_in_covid = permission_frequencies_covid.keys() - permission_frequencies_non_covid.keys()
permissions_only_in_non_ovid = permission_frequencies_non_covid.keys() - permission_frequencies_covid.keys()
permissions_stats_file.write("Permissions only in COVID:\n")
for permission in permissions_only_in_covid:
permissions_stats_file.write("\t" + permission + ": " + str(permission_frequencies_covid[permission]) + "\n")
permissions_stats_file.write("\nPermissions only in Non-COVID:\n")
for permission in permissions_only_in_non_ovid:
permissions_stats_file.write("\t" + permission + ": " + str(permission_frequencies_non_covid[permission]) + "\n")
permissions_stats_file.write("-------------------------------------\n")
def measure_difference_in_permission_frequencies():
permissions_only_in_covid = permission_frequencies_covid.keys() - permission_frequencies_non_covid.keys()
permissions_only_in_non_ovid = permission_frequencies_non_covid.keys() - permission_frequencies_covid.keys()
# Add permissions that do not exist in the other category of apps with zero frequency
all_permissions_covid = permission_frequencies_covid
all_permissions_non_covid = permission_frequencies_non_covid
for permission in permissions_only_in_covid:
all_permissions_non_covid[permission] = 0
for permission in permissions_only_in_non_ovid:
all_permissions_covid[permission] = 0
# Sort permissions based name
all_permissions_covid = sort_dictionary(all_permissions_covid, 0)
all_permissions_non_covid = sort_dictionary(all_permissions_non_covid, 0)
# Run Mann-Whitney U test
mann_test = mannwhitneyu(list(all_permissions_covid.values()), list(all_permissions_non_covid.values()))
permissions_stats_file.write("Permission level:\n Mann-Whitney U test p-value:" + str(mann_test.pvalue))
permissions_stats_file.write("\n-------------------------------------\n")
def extract_protection_levels_of_permissions():
all_unique_permissions = set(list(permission_frequencies_covid.keys()) + list(permission_frequencies_non_covid.keys()))
android_permissions = json.load(open(c.STATIC_RESOURCES_PATH + 'android_permissions.json', 'r'))
permission_details = android_permissions['permissions']
for permission in permission_details:
protection_level = permission_details[permission]['protection_level']
protection_level = 'Not for third-party apps' if 'Not for use by third-party applications' in protection_level else protection_level # very long name to plot
protection_level = 'signature | [others]' if 'signature|' in protection_level else protection_level # very long name to plot
permission_protection_levels[permission] = protection_level
def get_app_protecition_levels(app_permissions):
app_protecition_levels = []
for permission in app_permissions:
if permission in permission_protection_levels.keys():
protection_level = permission_protection_levels[permission]
protection_level = 'Not for third-party apps' if 'Not for use by third-party applications' in protection_level else protection_level # very long name to plot
protection_level = 'signature | [others]' if 'signature|' in protection_level else protection_level # very long name to plot
else:
protection_level = 'undefined'
app_protecition_levels.append(protection_level)
return app_protecition_levels
def count_apps_per_protection_level(app):
global protection_level_app_frequencies_covid
global protection_level_app_frequencies_non_covid
unique_protection_levels = set(list(permission_protection_levels.values()) + ['undefined'])
for protection_level in unique_protection_levels:
protection_level_count = app['protection_levels'].count(protection_level)
if app['is_covid']:
if protection_level in protection_level_app_permission_frequencies_covid:
protection_level_app_permission_frequencies_covid[protection_level].append(protection_level_count)
else:
protection_level_app_permission_frequencies_covid[protection_level] = [protection_level_count]
else:
if protection_level in protection_level_app_permission_frequencies_non_covid:
protection_level_app_permission_frequencies_non_covid[protection_level].append(protection_level_count)
else:
protection_level_app_permission_frequencies_non_covid[protection_level] = [protection_level_count]
if protection_level in app['protection_levels']:
if app['is_covid']:
if protection_level in protection_level_app_frequencies_covid:
protection_level_app_frequencies_covid[protection_level] += 1
else:
protection_level_app_frequencies_covid[protection_level] = 1
else:
if protection_level in protection_level_app_frequencies_non_covid:
protection_level_app_frequencies_non_covid[protection_level] += 1
else:
protection_level_app_frequencies_non_covid[protection_level] = 1
def count_permissions_per_app_per_protection_level(app):
global protection_level_app_frequencies_covid
global protection_level_app_frequencies_non_covid
unique_protection_levels = set(permission_protection_levels.values())
for protection_level in unique_protection_levels:
if protection_level in app['protection_levels']:
if app['is_covid']:
if protection_level in protection_level_app_frequencies_covid:
protection_level_app_frequencies_covid[protection_level] += 1
else:
protection_level_app_frequencies_covid[protection_level] = 1
else:
if protection_level in protection_level_app_frequencies_non_covid:
protection_level_app_frequencies_non_covid[protection_level] += 1
else:
protection_level_app_frequencies_non_covid[protection_level] = 1
for protection_level in app['protection_levels']:
#permission_name = permission[permission.rindex('.')+1:]
if app['is_covid']:
if protection_level in protection_level_app_permission_frequencies_covid.keys():
protection_level_app_permission_frequencies_covid[protection_level] += 1
else:
protection_level_app_permission_frequencies_covid[protection_level] = 1
else:
if protection_level in protection_level_app_permission_frequencies_non_covid.keys():
protection_level_app_permission_frequencies_non_covid[protection_level] += 1
else:
protection_level_app_permission_frequencies_non_covid[protection_level] = 1
def generate_combined_bar_chart_of_app_protection_levels():
global total_number_of_covid_apps
global total_number_of_non_covid_apps
protection_level_frequencies_df = pd.DataFrame({'covid': | pd.Series(protection_level_app_frequencies_covid) | pandas.Series |
from flask import Response, url_for, current_app, request
from flask_restful import Resource, reqparse
import pandas as pd
import os
from pathlib import Path
from flask_mysqldb import MySQL
from datetime import datetime
import random
import string
from flask_mail import Mail, Message
db = MySQL()
parser = reqparse.RequestParser()
class ApproveCovid(Resource):
def approve(self, update_id, aid):
cur = db.connection.cursor()
cur.execute(
"""
INSERT INTO Administrator_approved_covid_update (Update_id, AID)
VALUES (%s, %s);
""", (update_id, aid)
)
db.connection.commit()
def getUpdate(self, update_id):
cur = db.connection.cursor()
cur.execute(
"""
Select * from Covid_update where Update_id = %s
""", (update_id,)
)
data = cur.fetchall()
return data[0]
def post(self):
params = ['update_id', 'aid', 'approve']
for elem in params:
parser.add_argument(elem)
args = parser.parse_args()
print(args['update_id'], args['aid'])
_, recovered, death, confirmed, _, countie = self.getUpdate(args['update_id'])
self.approve(args['update_id'], args['aid'])
print(confirmed, death, recovered, countie)
if (args['approve']):
cur = db.connection.cursor()
cur.execute(
"""
UPDATE covid_data
SET Confirmed = %s, Deaths = %s, Recovered = %s
where Admin2 = %s;
""", (confirmed, death, recovered, countie)
)
db.connection.commit()
print(cur.rowcount)
if cur.rowcount > 0:
return Response("Record Sucessfully updated", status=200)
else:
return Response("Update failed", status=500)
else:
return Response("Update Sucessfully Denied", status=500)
class CovidUpdates(Resource):
def administratorExist(self, aid):
cur = db.connection.cursor()
cur.execute(
"""
Select * from Administrators where AID = %s
""", (aid)
)
return True if cur.rowcount > 0 else False
def get(self):
parser.add_argument('aid')
args = parser.parse_args()
if self.administratorExist(args['aid']) == False:
return Response("Administrators does not exist", 400)
cur = db.connection.cursor()
cur.execute(
"""
Select * from Covid_update where Update_id not in (
Select Update_id from Administrator_approved_covid_update
);
"""
)
data = cur.fetchall()
row_headers=[x[0] for x in cur.description]
json_data=[]
for result in data:
json_data.append(dict(zip(row_headers,result)))
cur.close()
df = pd.DataFrame(json_data)
return Response(df.to_json(orient="records"), mimetype='application/json')
class ApproveFire(Resource):
def approve(self, update_id, aid):
cur = db.connection.cursor()
cur.execute(
"""
INSERT INTO Administrator_approve_fire_update (Update_id, AID)
VALUES (%s, %s);
""", (update_id, aid)
)
db.connection.commit()
def getUpdate(self, update_id):
cur = db.connection.cursor()
cur.execute(
"""
Select * from fire_update where update_id = %s
""", (update_id,)
)
data = cur.fetchall()
return data[0]
def post(self):
params = ['update_id', 'aid', 'approve']
for elem in params:
parser.add_argument(elem)
args = parser.parse_args()
_, acres, containment, fireName = self.getUpdate(args['update_id'])
self.approve(args['update_id'], args['aid'])
if (args['approve']):
print(args['update_id'])
print(args['approve'])
cur = db.connection.cursor()
cur.execute(
"""
UPDATE fire_data
SET incident_acres_burned = %s, incident_containment = %s
where incident_name = %s;
""", (acres, containment, fireName)
)
db.connection.commit()
if cur.rowcount > 0:
return Response("Record Sucessfully updated", status=200)
else:
return Response("Update failed", status=500)
else:
return Response("Update Sucessfully denied", status=200)
class NewFires(Resource):
def administratorExist(self, aid):
cur = db.connection.cursor()
cur.execute(
"""
Select * from Administrators where AID = %s
""", (aid)
)
return True if cur.rowcount > 0 else False
def get(self):
parser.add_argument('aid')
args = parser.parse_args()
if self.administratorExist(args['aid']) == False:
return Response("Administrators does not exist", 400)
cur = db.connection.cursor()
cur.execute(
"""
Select * from fire_add where fire_add_id not in (
Select fire_add_id from administrator_approve_fire_add
);
"""
)
data = cur.fetchall()
row_headers=[x[0] for x in cur.description]
json_data=[]
for result in data:
json_data.append(dict(zip(row_headers,result)))
cur.close()
df = pd.DataFrame(json_data)
return Response(df.to_json(orient="records"), mimetype='application/json')
class ApproveFireAdd(Resource):
def approve(self, fire_add_id, aid):
cur = db.connection.cursor()
cur.execute(
"""
INSERT INTO administrator_approve_fire_add (fire_add_id, AID)
VALUES (%s, %s);
""", (fire_add_id, aid)
)
db.connection.commit()
def getUpdate(self, update_id):
cur = db.connection.cursor()
cur.execute(
"""
Select * from fire_add where fire_add_id = %s
""", (update_id,)
)
data = cur.fetchall()
return data[0]
def post(self):
params = ['update_id', 'aid', 'approve']
for elem in params:
parser.add_argument(elem)
args = parser.parse_args()
data = self.getUpdate(args['update_id'])
self.approve(args['update_id'], args['aid'])
if (args['approve']):
cur = db.connection.cursor()
cur.execute(
"""
INSERT INTO fire_data (incident_id, incident_name, incident_is_final, incident_date_last_update, incident_date_created, incident_administrative_unit, incident_administrative_unit_url, incident_location, incident_county, incident_control, incident_cooperating_agencies, incident_type, incident_url, incident_date_extinguished, incident_dateonly_extinguished, incident_dateonly_created, is_active, calfire_incident, notification_desired, incident_acres_burned, incident_containment, incident_longitude, incident_latitude)
VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s);
""", data
)
db.connection.commit()
if cur.rowcount > 0:
return Response("Record Sucessfully added", status=200)
else:
return Response("Add failed", status=500)
else:
return Response("Add Sucessfully denied", status=200)
class FireUpdates(Resource):
def administratorExist(self, aid):
cur = db.connection.cursor()
cur.execute(
"""
Select * from Administrators where AID = %s
""", (aid)
)
return True if cur.rowcount > 0 else False
def get(self):
parser.add_argument('aid')
args = parser.parse_args()
if self.administratorExist(args['aid']) == False:
return Response("Administrators does not exist", 400)
cur = db.connection.cursor()
cur.execute(
"""
Select * from fire_update where Update_id not in (
Select Update_id from Administrator_approve_fire_update
);
"""
)
data = cur.fetchall()
row_headers=[x[0] for x in cur.description]
json_data=[]
for result in data:
json_data.append(dict(zip(row_headers,result)))
cur.close()
df = pd.DataFrame(json_data)
return Response(df.to_json(orient="records"), mimetype='application/json')
class CovidAlerts(Resource):
def administratorExist(self, aid):
cur = db.connection.cursor()
cur.execute(
"""
Select * from Administrators where AID = %s
""", (aid)
)
return True if cur.rowcount > 0 else False
def get(self):
parser.add_argument('aid')
args = parser.parse_args()
if self.administratorExist(args['aid']) == False:
return Response("Administrators does not exist", 400)
cur = db.connection.cursor()
cur.execute(
"""
Select * from fire_update where Update_id not in (
Select Update_id from Administrator_approved_covid_update
);
"""
)
data = cur.fetchall()
row_headers=[x[0] for x in cur.description]
json_data=[]
for result in data:
json_data.append(dict(zip(row_headers,result)))
cur.close()
df = pd.DataFrame(json_data)
return Response(df.to_json(orient="records"), mimetype='application/json')
class AdministratorLogin(Resource):
def get(self):
username = request.args.get('username')
password = request.args.get('password')
cur = db.connection.cursor()
cur.execute(
"""
Select * from Administrators where Mail = %s and pwd = %s;
""", (username, password)
)
data = cur.fetchall()
if (cur.rowcount == 0):
cur = db.connection.cursor()
cur.execute(
"""
Select * from Administrators where Mail = %s and pwd = %s;
""", (username, password)
)
data = cur.fetchall()
if (cur.rowcount == 0):
return Response("No account found!!")
row_headers=[x[0] for x in cur.description]
json_data=[]
for result in data:
json_data.append(dict(zip(row_headers,result)))
cur.close()
df = pd.DataFrame(json_data)
return Response(df.to_json(orient="records"), mimetype='application/json')
class GetAlerts(Resource):
def get(self):
parser.add_argument('aid')
aid = parser.parse_args()['aid'] # getting the administrator ID
cur = db.connection.cursor()
cur.execute( # Getting all alerts that did not get approve
"""
Select *
from Alert Al
where Al.alert_id not in (
Select A.alert_id
from Administrators_retrieve_alert A
);
"""
)
data = cur.fetchall()
row_headers = [x[0] for x in cur.description]
json_data = []
for result in data: # Creating the JSON that will contains all the alerts
json_data.append(dict(zip(row_headers, result)))
cur.close()
df = | pd.DataFrame(json_data) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Created on Tue May 18 21:25:39 2021
@author: alber
"""
import sys, os
import pandas as pd
import numpy as np
import time
import pickle
import six
sys.modules["sklearn.externals.six"] = six
from joblib import Parallel, delayed
from itertools import combinations, permutations, product
from shapely.geometry import Polygon
from sklearn.preprocessing import StandardScaler
N_JOBS = 1
def checkPointInside(
data_point, df_rules, numerical_cols, categorical_cols, check_opposite=True
):
"""
1 for the hypercubes where it's inside, 0 for when not. It checks differently
whether its for scenarios where the rules are independent according to the
different combination of categorical variables or whether everything is analyzed
alltogether.
Parameters
----------
data_point : TYPE
DESCRIPTION.
df_rules : TYPE
DESCRIPTION.
numerical_cols : TYPE
DESCRIPTION.
categorical_cols : TYPE
DESCRIPTION.
check_opposite : TYPE
It indicates whether to consider datapoints with >=/<= or strict >/<.
Since we will see the rules in a counterfactual way (p.e what should
happen for an outlier to be an inlier) we consider the datapoints of the
target rules with >=/<=, and the ones from the other class as >/< (that
means that we consider rules with P=1 even if they have points from the
other class on the edges) [NOT USED]
Returns
-------
df_plot : TYPE
DESCRIPTION.
"""
df_plot = df_rules.copy()
if len(df_rules) == 0:
df_plot["check"] = 0
return df_plot
# Default value
df_plot["check"] = 1
# Check for categorical
if len(categorical_cols) > 0:
for col in categorical_cols:
value = data_point[col]
df_plot["check"] = df_plot["check"] * (
df_plot.apply(lambda x: 1 if (x[col] == value) else 0, axis=1)
)
# Check for numerical
if len(numerical_cols) > 0:
for col in numerical_cols:
value = data_point[col]
if check_opposite:
df_plot["check"] = df_plot["check"] * (
df_plot.apply(
lambda x: 1
if ((x[col + "_max"] >= value) & (value >= x[col + "_min"]))
else 0,
axis=1,
)
)
else:
df_plot["check"] = df_plot["check"] * (
df_plot.apply(
lambda x: 1
if ((x[col + "_max"] > value) & (value > x[col + "_min"]))
else 0,
axis=1,
)
)
return df_plot[["check"]]
def checkStability(
df_anomalies, df_rules, model, numerical_cols, categorical_cols, using_inliers
):
"""
Function that computes the "stability" metrics of the hypercubes.
First, it obtains the prototypes from the dataset and generates random samples
near them.
Then, it obtains the prediction of the original model for those dummy samples
and checks if when the prediction is inlier/outlier, there is at least one rule
that includes that datapoint within it.
It also checks the level of agreement between all the rules. Since the prototypes
belong to the same class, the function checks if the final prediction using all
rules is the same for all the prototypes.
Rules agreement:
- Choose N prototypes that represent the original hyperspace of data
- Generate M samples close to each of those N prototypes; the hypothesis
is that close points should be generally predicted belonging to the same class
- For each of those N*M datapoints (M datapoints per each N prototype) check
whether the rules (all of them) predict them as inliner or outlier; the datapoints
that come into the function are either outliers or inliers. If they are inliers,
then the rules identify an artificial datapoint (of those M*N) as inlier if it
is outside every rule. If the datapoints are outliers it's the same reversed: a
datapoint is an inlier if no rule includes it.
- Then, it is checked the % of datapoints labeled as the assumed correct class (inliers or
outliers), neighbours of that prototype compared to the total neighbours of that prototype.
- All the % for each prototype are averaged into one %.
Model agreement:
- The % of predictions for the artificial datapoints aforementioned that are the same
between the rules and the original OCSVM model.
Parameters
----------
df_anomalies : TYPE
DESCRIPTION.
df_rules : TYPE
DESCRIPTION.
model : TYPE
DESCRIPTION.
numerical_cols : TYPE
DESCRIPTION.
categorical_cols : TYPE
DESCRIPTION.
sc : TYPE
DESCRIPTION.
using_inliers : TYPE
DESCRIPTION.
Returns
-------
df_rules : TYPE
DESCRIPTION.
"""
# Ignore prints in this function
ff = open(os.devnull, "w")
xx = sys.stdout # save sys.stdout
sys.stdout = ff
if len(df_rules) == 0:
df_rules["precision_vs_model"] = 0
df_rules["rules_agreement"] = 0
return df_rules
# Choose the type of datapoints and define params
label = 1 if using_inliers else -1
df_data = df_anomalies[df_anomalies["predictions"] == label].copy()
n_samples = np.round(len(df_rules))
n_samples = n_samples if n_samples > 20 else 20 # at least 20 samples
if n_samples > len(df_data):
n_samples = len(df_data)
df_rules_aux = df_rules.copy()
df_anomalies_aux = df_anomalies.copy()
# Scaling
if len(numerical_cols):
sc = StandardScaler()
sc.fit_transform(df_anomalies[numerical_cols])
df_anomalies_aux[numerical_cols] = sc.transform(
df_anomalies_aux[numerical_cols]
)
cols_max = [x + "_max" for x in numerical_cols]
cols_min = [x + "_min" for x in numerical_cols]
# Generate Prototypes
explainer = ProtodashExplainer()
list_cols = numerical_cols + categorical_cols
(W, S, _) = explainer.explain(
df_data[list_cols].values,
df_data[list_cols].values,
m=n_samples,
kernelType="Gaussian",
sigma=2,
)
df_prototypes = df_anomalies[df_anomalies.index.isin(list(S))][
list_cols
].reset_index(drop=True)
# Generate artificial samples around the prototypes
df_samples_total = | pd.DataFrame() | pandas.DataFrame |
import unittest
import qteasy as qt
import pandas as pd
from pandas import Timestamp
import numpy as np
from numpy import int64
import itertools
import datetime
from qteasy.utilfuncs import list_to_str_format, regulate_date_format, time_str_format, str_to_list
from qteasy.utilfuncs import maybe_trade_day, is_market_trade_day, prev_trade_day, next_trade_day, prev_market_trade_day
from qteasy.utilfuncs import next_market_trade_day
from qteasy.space import Space, Axis, space_around_centre, ResultPool
from qteasy.core import apply_loop
from qteasy.built_in import SelectingFinanceIndicator
from qteasy.history import stack_dataframes
from qteasy.tsfuncs import income, indicators, name_change, get_bar
from qteasy.tsfuncs import stock_basic, trade_calendar, new_share, get_index
from qteasy.tsfuncs import balance, cashflow, top_list, index_indicators, composite
from qteasy.tsfuncs import future_basic, future_daily, options_basic, options_daily
from qteasy.tsfuncs import fund_basic, fund_net_value, index_basic
from qteasy.evaluate import eval_alpha, eval_benchmark, eval_beta, eval_fv
from qteasy.evaluate import eval_info_ratio, eval_max_drawdown, eval_sharp
from qteasy.evaluate import eval_volatility
from qteasy.tafuncs import bbands, dema, ema, ht, kama, ma, mama, mavp, mid_point
from qteasy.tafuncs import mid_price, sar, sarext, sma, t3, tema, trima, wma, adx, adxr
from qteasy.tafuncs import apo, bop, cci, cmo, dx, macd, macdext, aroon, aroonosc
from qteasy.tafuncs import macdfix, mfi, minus_di, minus_dm, mom, plus_di, plus_dm
from qteasy.tafuncs import ppo, roc, rocp, rocr, rocr100, rsi, stoch, stochf, stochrsi
from qteasy.tafuncs import trix, ultosc, willr, ad, adosc, obv, atr, natr, trange
from qteasy.tafuncs import avgprice, medprice, typprice, wclprice, ht_dcperiod
from qteasy.tafuncs import ht_dcphase, ht_phasor, ht_sine, ht_trendmode, cdl2crows
from qteasy.tafuncs import cdl3blackcrows, cdl3inside, cdl3linestrike, cdl3outside
from qteasy.tafuncs import cdl3starsinsouth, cdl3whitesoldiers, cdlabandonedbaby
from qteasy.tafuncs import cdladvanceblock, cdlbelthold, cdlbreakaway, cdlclosingmarubozu
from qteasy.tafuncs import cdlconcealbabyswall, cdlcounterattack, cdldarkcloudcover
from qteasy.tafuncs import cdldoji, cdldojistar, cdldragonflydoji, cdlengulfing
from qteasy.tafuncs import cdleveningdojistar, cdleveningstar, cdlgapsidesidewhite
from qteasy.tafuncs import cdlgravestonedoji, cdlhammer, cdlhangingman, cdlharami
from qteasy.tafuncs import cdlharamicross, cdlhighwave, cdlhikkake, cdlhikkakemod
from qteasy.tafuncs import cdlhomingpigeon, cdlidentical3crows, cdlinneck
from qteasy.tafuncs import cdlinvertedhammer, cdlkicking, cdlkickingbylength
from qteasy.tafuncs import cdlladderbottom, cdllongleggeddoji, cdllongline, cdlmarubozu
from qteasy.tafuncs import cdlmatchinglow, cdlmathold, cdlmorningdojistar, cdlmorningstar
from qteasy.tafuncs import cdlonneck, cdlpiercing, cdlrickshawman, cdlrisefall3methods
from qteasy.tafuncs import cdlseparatinglines, cdlshootingstar, cdlshortline, cdlspinningtop
from qteasy.tafuncs import cdlstalledpattern, cdlsticksandwich, cdltakuri, cdltasukigap
from qteasy.tafuncs import cdlthrusting, cdltristar, cdlunique3river, cdlupsidegap2crows
from qteasy.tafuncs import cdlxsidegap3methods, beta, correl, linearreg, linearreg_angle
from qteasy.tafuncs import linearreg_intercept, linearreg_slope, stddev, tsf, var, acos
from qteasy.tafuncs import asin, atan, ceil, cos, cosh, exp, floor, ln, log10, sin, sinh
from qteasy.tafuncs import sqrt, tan, tanh, add, div, max, maxindex, min, minindex, minmax
from qteasy.tafuncs import minmaxindex, mult, sub, sum
from qteasy.history import get_financial_report_type_raw_data, get_price_type_raw_data
from qteasy.database import DataSource
from qteasy._arg_validators import _parse_string_kwargs, _valid_qt_kwargs
class TestCost(unittest.TestCase):
def setUp(self):
self.amounts = np.array([10000, 20000, 10000])
self.op = np.array([0, 1, -0.33333333])
self.prices = np.array([10, 20, 10])
self.r = qt.Cost()
def test_rate_creation(self):
print('testing rates objects\n')
self.assertIsInstance(self.r, qt.Cost, 'Type should be Rate')
def test_rate_operations(self):
self.assertEqual(self.r['buy_fix'], 0.0, 'Item got is incorrect')
self.assertEqual(self.r['sell_fix'], 0.0, 'Item got is wrong')
self.assertEqual(self.r['buy_rate'], 0.003, 'Item got is incorrect')
self.assertEqual(self.r['sell_rate'], 0.001, 'Item got is incorrect')
self.assertEqual(self.r['buy_min'], 5., 'Item got is incorrect')
self.assertEqual(self.r['sell_min'], 0.0, 'Item got is incorrect')
self.assertEqual(self.r['slipage'], 0.0, 'Item got is incorrect')
self.assertEqual(np.allclose(self.r(self.amounts), [0.003, 0.003, 0.003]), True, 'fee calculation wrong')
def test_rate_fee(self):
self.r.buy_rate = 0.003
self.r.sell_rate = 0.001
self.r.buy_fix = 0
self.r.sell_fix = 0
self.r.buy_min = 0
self.r.sell_min = 0
self.r.slipage = 0
print('\nSell result with fixed rate = 0.001 and moq = 0:')
print(self.r.get_selling_result(self.prices, self.op, self.amounts))
test_rate_fee_result = self.r.get_selling_result(self.prices, self.op, self.amounts)
self.assertIs(np.allclose(test_rate_fee_result[0], [0., 0., -3333.3333]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_fee_result[1], 33299.999667, msg='result incorrect')
self.assertAlmostEqual(test_rate_fee_result[2], 33.333332999999996, msg='result incorrect')
print('\nSell result with fixed rate = 0.001 and moq = 1:')
print(self.r.get_selling_result(self.prices, self.op, self.amounts, 1))
test_rate_fee_result = self.r.get_selling_result(self.prices, self.op, self.amounts, 1)
self.assertIs(np.allclose(test_rate_fee_result[0], [0., 0., -3333]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_fee_result[1], 33296.67, msg='result incorrect')
self.assertAlmostEqual(test_rate_fee_result[2], 33.33, msg='result incorrect')
print('\nSell result with fixed rate = 0.001 and moq = 100:')
print(self.r.get_selling_result(self.prices, self.op, self.amounts, 100))
test_rate_fee_result = self.r.get_selling_result(self.prices, self.op, self.amounts, 100)
self.assertIs(np.allclose(test_rate_fee_result[0], [0., 0., -3300]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_fee_result[1], 32967.0, msg='result incorrect')
self.assertAlmostEqual(test_rate_fee_result[2], 33, msg='result incorrect')
print('\nPurchase result with fixed rate = 0.003 and moq = 0:')
print(self.r.get_purchase_result(self.prices, self.op, self.amounts, 0))
test_rate_fee_result = self.r.get_purchase_result(self.prices, self.op, self.amounts, 0)
self.assertIs(np.allclose(test_rate_fee_result[0], [0., 997.00897308, 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_fee_result[1], -20000.0, msg='result incorrect')
self.assertAlmostEqual(test_rate_fee_result[2], 59.82053838484547, msg='result incorrect')
print('\nPurchase result with fixed rate = 0.003 and moq = 1:')
print(self.r.get_purchase_result(self.prices, self.op, self.amounts, 1))
test_rate_fee_result = self.r.get_purchase_result(self.prices, self.op, self.amounts, 1)
self.assertIs(np.allclose(test_rate_fee_result[0], [0., 997., 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_fee_result[1], -19999.82, msg='result incorrect')
self.assertAlmostEqual(test_rate_fee_result[2], 59.82, msg='result incorrect')
print('\nPurchase result with fixed rate = 0.003 and moq = 100:')
print(self.r.get_purchase_result(self.prices, self.op, self.amounts, 100))
test_rate_fee_result = self.r.get_purchase_result(self.prices, self.op, self.amounts, 100)
self.assertIs(np.allclose(test_rate_fee_result[0], [0., 900., 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_fee_result[1], -18054., msg='result incorrect')
self.assertAlmostEqual(test_rate_fee_result[2], 54.0, msg='result incorrect')
def test_min_fee(self):
self.r.buy_rate = 0.
self.r.sell_rate = 0.
self.r.buy_fix = 0.
self.r.sell_fix = 0.
self.r.buy_min = 300
self.r.sell_min = 300
self.r.slipage = 0.
print('\npurchase result with fixed cost rate with min fee = 300 and moq = 0:')
print(self.r.get_purchase_result(self.prices, self.op, self.amounts, 0))
test_min_fee_result = self.r.get_purchase_result(self.prices, self.op, self.amounts, 0)
self.assertIs(np.allclose(test_min_fee_result[0], [0., 985, 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_min_fee_result[1], -20000.0, msg='result incorrect')
self.assertAlmostEqual(test_min_fee_result[2], 300.0, msg='result incorrect')
print('\npurchase result with fixed cost rate with min fee = 300 and moq = 10:')
print(self.r.get_purchase_result(self.prices, self.op, self.amounts, 10))
test_min_fee_result = self.r.get_purchase_result(self.prices, self.op, self.amounts, 10)
self.assertIs(np.allclose(test_min_fee_result[0], [0., 980, 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_min_fee_result[1], -19900.0, msg='result incorrect')
self.assertAlmostEqual(test_min_fee_result[2], 300.0, msg='result incorrect')
print('\npurchase result with fixed cost rate with min fee = 300 and moq = 100:')
print(self.r.get_purchase_result(self.prices, self.op, self.amounts, 100))
test_min_fee_result = self.r.get_purchase_result(self.prices, self.op, self.amounts, 100)
self.assertIs(np.allclose(test_min_fee_result[0], [0., 900, 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_min_fee_result[1], -18300.0, msg='result incorrect')
self.assertAlmostEqual(test_min_fee_result[2], 300.0, msg='result incorrect')
print('\nselling result with fixed cost rate with min fee = 300 and moq = 0:')
print(self.r.get_selling_result(self.prices, self.op, self.amounts))
test_min_fee_result = self.r.get_selling_result(self.prices, self.op, self.amounts)
self.assertIs(np.allclose(test_min_fee_result[0], [0, 0, -3333.3333]), True, 'result incorrect')
self.assertAlmostEqual(test_min_fee_result[1], 33033.333)
self.assertAlmostEqual(test_min_fee_result[2], 300.0)
print('\nselling result with fixed cost rate with min fee = 300 and moq = 1:')
print(self.r.get_selling_result(self.prices, self.op, self.amounts, 1))
test_min_fee_result = self.r.get_selling_result(self.prices, self.op, self.amounts, 1)
self.assertIs(np.allclose(test_min_fee_result[0], [0, 0, -3333]), True, 'result incorrect')
self.assertAlmostEqual(test_min_fee_result[1], 33030)
self.assertAlmostEqual(test_min_fee_result[2], 300.0)
print('\nselling result with fixed cost rate with min fee = 300 and moq = 100:')
print(self.r.get_selling_result(self.prices, self.op, self.amounts, 100))
test_min_fee_result = self.r.get_selling_result(self.prices, self.op, self.amounts, 100)
self.assertIs(np.allclose(test_min_fee_result[0], [0, 0, -3300]), True, 'result incorrect')
self.assertAlmostEqual(test_min_fee_result[1], 32700)
self.assertAlmostEqual(test_min_fee_result[2], 300.0)
def test_rate_with_min(self):
"""Test transaction cost calculated by rate with min_fee"""
self.r.buy_rate = 0.0153
self.r.sell_rate = 0.01
self.r.buy_fix = 0.
self.r.sell_fix = 0.
self.r.buy_min = 300
self.r.sell_min = 333
self.r.slipage = 0.
print('\npurchase result with fixed cost rate with buy_rate = 0.0153, min fee = 300 and moq = 0:')
print(self.r.get_purchase_result(self.prices, self.op, self.amounts, 0))
test_rate_with_min_result = self.r.get_purchase_result(self.prices, self.op, self.amounts, 0)
self.assertIs(np.allclose(test_rate_with_min_result[0], [0., 984.9305624, 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_with_min_result[1], -20000.0, msg='result incorrect')
self.assertAlmostEqual(test_rate_with_min_result[2], 301.3887520929774, msg='result incorrect')
print('\npurchase result with fixed cost rate with buy_rate = 0.0153, min fee = 300 and moq = 10:')
print(self.r.get_purchase_result(self.prices, self.op, self.amounts, 10))
test_rate_with_min_result = self.r.get_purchase_result(self.prices, self.op, self.amounts, 10)
self.assertIs(np.allclose(test_rate_with_min_result[0], [0., 980, 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_with_min_result[1], -19900.0, msg='result incorrect')
self.assertAlmostEqual(test_rate_with_min_result[2], 300.0, msg='result incorrect')
print('\npurchase result with fixed cost rate with buy_rate = 0.0153, min fee = 300 and moq = 100:')
print(self.r.get_purchase_result(self.prices, self.op, self.amounts, 100))
test_rate_with_min_result = self.r.get_purchase_result(self.prices, self.op, self.amounts, 100)
self.assertIs(np.allclose(test_rate_with_min_result[0], [0., 900, 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_with_min_result[1], -18300.0, msg='result incorrect')
self.assertAlmostEqual(test_rate_with_min_result[2], 300.0, msg='result incorrect')
print('\nselling result with fixed cost rate with sell_rate = 0.01, min fee = 333 and moq = 0:')
print(self.r.get_selling_result(self.prices, self.op, self.amounts))
test_rate_with_min_result = self.r.get_selling_result(self.prices, self.op, self.amounts)
self.assertIs(np.allclose(test_rate_with_min_result[0], [0, 0, -3333.3333]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_with_min_result[1], 32999.99967)
self.assertAlmostEqual(test_rate_with_min_result[2], 333.33333)
print('\nselling result with fixed cost rate with sell_rate = 0.01, min fee = 333 and moq = 1:')
print(self.r.get_selling_result(self.prices, self.op, self.amounts, 1))
test_rate_with_min_result = self.r.get_selling_result(self.prices, self.op, self.amounts, 1)
self.assertIs(np.allclose(test_rate_with_min_result[0], [0, 0, -3333]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_with_min_result[1], 32996.7)
self.assertAlmostEqual(test_rate_with_min_result[2], 333.3)
print('\nselling result with fixed cost rate with sell_rate = 0.01, min fee = 333 and moq = 100:')
print(self.r.get_selling_result(self.prices, self.op, self.amounts, 100))
test_rate_with_min_result = self.r.get_selling_result(self.prices, self.op, self.amounts, 100)
self.assertIs(np.allclose(test_rate_with_min_result[0], [0, 0, -3300]), True, 'result incorrect')
self.assertAlmostEqual(test_rate_with_min_result[1], 32667.0)
self.assertAlmostEqual(test_rate_with_min_result[2], 333.0)
def test_fixed_fee(self):
self.r.buy_rate = 0.
self.r.sell_rate = 0.
self.r.buy_fix = 200
self.r.sell_fix = 150
self.r.buy_min = 0
self.r.sell_min = 0
self.r.slipage = 0
print('\nselling result of fixed cost with fixed fee = 150 and moq=0:')
print(self.r.get_selling_result(self.prices, self.op, self.amounts, 0))
test_fixed_fee_result = self.r.get_selling_result(self.prices, self.op, self.amounts)
self.assertIs(np.allclose(test_fixed_fee_result[0], [0, 0, -3333.3333]), True, 'result incorrect')
self.assertAlmostEqual(test_fixed_fee_result[1], 33183.333, msg='result incorrect')
self.assertAlmostEqual(test_fixed_fee_result[2], 150.0, msg='result incorrect')
print('\nselling result of fixed cost with fixed fee = 150 and moq=100:')
print(self.r.get_selling_result(self.prices, self.op, self.amounts, 100))
test_fixed_fee_result = self.r.get_selling_result(self.prices, self.op, self.amounts, 100)
self.assertIs(np.allclose(test_fixed_fee_result[0], [0, 0, -3300.]), True,
f'result incorrect, {test_fixed_fee_result[0]} does not equal to [0,0,-3400]')
self.assertAlmostEqual(test_fixed_fee_result[1], 32850., msg='result incorrect')
self.assertAlmostEqual(test_fixed_fee_result[2], 150., msg='result incorrect')
print('\npurchase result of fixed cost with fixed fee = 200:')
print(self.r.get_purchase_result(self.prices, self.op, self.amounts, 0))
test_fixed_fee_result = self.r.get_purchase_result(self.prices, self.op, self.amounts, 0)
self.assertIs(np.allclose(test_fixed_fee_result[0], [0., 990., 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_fixed_fee_result[1], -20000.0, msg='result incorrect')
self.assertAlmostEqual(test_fixed_fee_result[2], 200.0, msg='result incorrect')
print('\npurchase result of fixed cost with fixed fee = 200:')
print(self.r.get_purchase_result(self.prices, self.op, self.amounts, 100))
test_fixed_fee_result = self.r.get_purchase_result(self.prices, self.op, self.amounts, 100)
self.assertIs(np.allclose(test_fixed_fee_result[0], [0., 900., 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_fixed_fee_result[1], -18200.0, msg='result incorrect')
self.assertAlmostEqual(test_fixed_fee_result[2], 200.0, msg='result incorrect')
def test_slipage(self):
self.r.buy_fix = 0
self.r.sell_fix = 0
self.r.buy_min = 0
self.r.sell_min = 0
self.r.buy_rate = 0.003
self.r.sell_rate = 0.001
self.r.slipage = 1E-9
print('\npurchase result of fixed rate = 0.003 and slipage = 1E-10 and moq = 0:')
print(self.r.get_purchase_result(self.prices, self.op, self.amounts, 0))
print('\npurchase result of fixed rate = 0.003 and slipage = 1E-10 and moq = 100:')
print(self.r.get_purchase_result(self.prices, self.op, self.amounts, 100))
print('\nselling result with fixed rate = 0.001 and slipage = 1E-10:')
print(self.r.get_selling_result(self.prices, self.op, self.amounts))
test_fixed_fee_result = self.r.get_selling_result(self.prices, self.op, self.amounts)
self.assertIs(np.allclose(test_fixed_fee_result[0], [0, 0, -3333.3333]), True,
f'{test_fixed_fee_result[0]} does not equal to [0, 0, -10000]')
self.assertAlmostEqual(test_fixed_fee_result[1], 33298.88855591,
msg=f'{test_fixed_fee_result[1]} does not equal to 99890.')
self.assertAlmostEqual(test_fixed_fee_result[2], 34.44444409,
msg=f'{test_fixed_fee_result[2]} does not equal to -36.666663.')
test_fixed_fee_result = self.r.get_purchase_result(self.prices, self.op, self.amounts, 0)
self.assertIs(np.allclose(test_fixed_fee_result[0], [0., 996.98909294, 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_fixed_fee_result[1], -20000.0, msg='result incorrect')
self.assertAlmostEqual(test_fixed_fee_result[2], 60.21814121353513, msg='result incorrect')
test_fixed_fee_result = self.r.get_purchase_result(self.prices, self.op, self.amounts, 100)
self.assertIs(np.allclose(test_fixed_fee_result[0], [0., 900., 0.]), True, 'result incorrect')
self.assertAlmostEqual(test_fixed_fee_result[1], -18054.36, msg='result incorrect')
self.assertAlmostEqual(test_fixed_fee_result[2], 54.36, msg='result incorrect')
class TestSpace(unittest.TestCase):
def test_creation(self):
"""
test if creation of space object is fine
"""
# first group of inputs, output Space with two discr axis from [0,10]
print('testing space objects\n')
# pars_list = [[(0, 10), (0, 10)],
# [[0, 10], [0, 10]]]
#
# types_list = ['discr',
# ['discr', 'discr']]
#
# input_pars = itertools.product(pars_list, types_list)
# for p in input_pars:
# # print(p)
# s = qt.Space(*p)
# b = s.boes
# t = s.types
# # print(s, t)
# self.assertIsInstance(s, qt.Space)
# self.assertEqual(b, [(0, 10), (0, 10)], 'boes incorrect!')
# self.assertEqual(t, ['discr', 'discr'], 'types incorrect')
#
pars_list = [[(0, 10), (0, 10)],
[[0, 10], [0, 10]]]
types_list = ['foo, bar',
['foo', 'bar']]
input_pars = itertools.product(pars_list, types_list)
for p in input_pars:
# print(p)
s = Space(*p)
b = s.boes
t = s.types
# print(s, t)
self.assertEqual(b, [(0, 10), (0, 10)], 'boes incorrect!')
self.assertEqual(t, ['enum', 'enum'], 'types incorrect')
pars_list = [[(0, 10), (0, 10)],
[[0, 10], [0, 10]]]
types_list = [['discr', 'foobar']]
input_pars = itertools.product(pars_list, types_list)
for p in input_pars:
# print(p)
s = Space(*p)
b = s.boes
t = s.types
# print(s, t)
self.assertEqual(b, [(0, 10), (0, 10)], 'boes incorrect!')
self.assertEqual(t, ['discr', 'enum'], 'types incorrect')
pars_list = [(0., 10), (0, 10)]
s = Space(pars=pars_list, par_types=None)
self.assertEqual(s.types, ['conti', 'discr'])
self.assertEqual(s.dim, 2)
self.assertEqual(s.size, (10.0, 11))
self.assertEqual(s.shape, (np.inf, 11))
self.assertEqual(s.count, np.inf)
self.assertEqual(s.boes, [(0., 10), (0, 10)])
pars_list = [(0., 10), (0, 10)]
s = Space(pars=pars_list, par_types='conti, enum')
self.assertEqual(s.types, ['conti', 'enum'])
self.assertEqual(s.dim, 2)
self.assertEqual(s.size, (10.0, 2))
self.assertEqual(s.shape, (np.inf, 2))
self.assertEqual(s.count, np.inf)
self.assertEqual(s.boes, [(0., 10), (0, 10)])
pars_list = [(1, 2), (2, 3), (3, 4)]
s = Space(pars=pars_list)
self.assertEqual(s.types, ['discr', 'discr', 'discr'])
self.assertEqual(s.dim, 3)
self.assertEqual(s.size, (2, 2, 2))
self.assertEqual(s.shape, (2, 2, 2))
self.assertEqual(s.count, 8)
self.assertEqual(s.boes, [(1, 2), (2, 3), (3, 4)])
pars_list = [(1, 2, 3), (2, 3, 4), (3, 4, 5)]
s = Space(pars=pars_list)
self.assertEqual(s.types, ['enum', 'enum', 'enum'])
self.assertEqual(s.dim, 3)
self.assertEqual(s.size, (3, 3, 3))
self.assertEqual(s.shape, (3, 3, 3))
self.assertEqual(s.count, 27)
self.assertEqual(s.boes, [(1, 2, 3), (2, 3, 4), (3, 4, 5)])
pars_list = [((1, 2, 3), (2, 3, 4), (3, 4, 5))]
s = Space(pars=pars_list)
self.assertEqual(s.types, ['enum'])
self.assertEqual(s.dim, 1)
self.assertEqual(s.size, (3,))
self.assertEqual(s.shape, (3,))
self.assertEqual(s.count, 3)
pars_list = ((1, 2, 3), (2, 3, 4), (3, 4, 5))
s = Space(pars=pars_list)
self.assertEqual(s.types, ['enum', 'enum', 'enum'])
self.assertEqual(s.dim, 3)
self.assertEqual(s.size, (3, 3, 3))
self.assertEqual(s.shape, (3, 3, 3))
self.assertEqual(s.count, 27)
self.assertEqual(s.boes, [(1, 2, 3), (2, 3, 4), (3, 4, 5)])
def test_extract(self):
"""
:return:
"""
pars_list = [(0, 10), (0, 10)]
types_list = ['discr', 'discr']
s = Space(pars=pars_list, par_types=types_list)
extracted_int, count = s.extract(3, 'interval')
extracted_int_list = list(extracted_int)
print('extracted int\n', extracted_int_list)
self.assertEqual(count, 16, 'extraction count wrong!')
self.assertEqual(extracted_int_list, [(0, 0), (0, 3), (0, 6), (0, 9), (3, 0), (3, 3),
(3, 6), (3, 9), (6, 0), (6, 3), (6, 6), (6, 9),
(9, 0), (9, 3), (9, 6), (9, 9)],
'space extraction wrong!')
extracted_rand, count = s.extract(10, 'rand')
extracted_rand_list = list(extracted_rand)
self.assertEqual(count, 10, 'extraction count wrong!')
print('extracted rand\n', extracted_rand_list)
for point in list(extracted_rand_list):
self.assertEqual(len(point), 2)
self.assertLessEqual(point[0], 10)
self.assertGreaterEqual(point[0], 0)
self.assertLessEqual(point[1], 10)
self.assertGreaterEqual(point[1], 0)
pars_list = [(0., 10), (0, 10)]
s = Space(pars=pars_list, par_types=None)
extracted_int2, count = s.extract(3, 'interval')
self.assertEqual(count, 16, 'extraction count wrong!')
extracted_int_list2 = list(extracted_int2)
self.assertEqual(extracted_int_list2, [(0, 0), (0, 3), (0, 6), (0, 9), (3, 0), (3, 3),
(3, 6), (3, 9), (6, 0), (6, 3), (6, 6), (6, 9),
(9, 0), (9, 3), (9, 6), (9, 9)],
'space extraction wrong!')
print('extracted int list 2\n', extracted_int_list2)
self.assertIsInstance(extracted_int_list2[0][0], float)
self.assertIsInstance(extracted_int_list2[0][1], (int, int64))
extracted_rand2, count = s.extract(10, 'rand')
self.assertEqual(count, 10, 'extraction count wrong!')
extracted_rand_list2 = list(extracted_rand2)
print('extracted rand list 2:\n', extracted_rand_list2)
for point in extracted_rand_list2:
self.assertEqual(len(point), 2)
self.assertIsInstance(point[0], float)
self.assertLessEqual(point[0], 10)
self.assertGreaterEqual(point[0], 0)
self.assertIsInstance(point[1], (int, int64))
self.assertLessEqual(point[1], 10)
self.assertGreaterEqual(point[1], 0)
pars_list = [(0., 10), ('a', 'b')]
s = Space(pars=pars_list, par_types='enum, enum')
extracted_int3, count = s.extract(1, 'interval')
self.assertEqual(count, 4, 'extraction count wrong!')
extracted_int_list3 = list(extracted_int3)
self.assertEqual(extracted_int_list3, [(0., 'a'), (0., 'b'), (10, 'a'), (10, 'b')],
'space extraction wrong!')
print('extracted int list 3\n', extracted_int_list3)
self.assertIsInstance(extracted_int_list3[0][0], float)
self.assertIsInstance(extracted_int_list3[0][1], str)
extracted_rand3, count = s.extract(3, 'rand')
self.assertEqual(count, 3, 'extraction count wrong!')
extracted_rand_list3 = list(extracted_rand3)
print('extracted rand list 3:\n', extracted_rand_list3)
for point in extracted_rand_list3:
self.assertEqual(len(point), 2)
self.assertIsInstance(point[0], (float, int))
self.assertLessEqual(point[0], 10)
self.assertGreaterEqual(point[0], 0)
self.assertIsInstance(point[1], str)
self.assertIn(point[1], ['a', 'b'])
pars_list = [((0, 10), (1, 'c'), ('a', 'b'), (1, 14))]
s = Space(pars=pars_list, par_types='enum')
extracted_int4, count = s.extract(1, 'interval')
self.assertEqual(count, 4, 'extraction count wrong!')
extracted_int_list4 = list(extracted_int4)
it = zip(extracted_int_list4, [(0, 10), (1, 'c'), (0, 'b'), (1, 14)])
for item, item2 in it:
print(item, item2)
self.assertTrue(all([tuple(ext_item) == item for ext_item, item in it]))
print('extracted int list 4\n', extracted_int_list4)
self.assertIsInstance(extracted_int_list4[0], tuple)
extracted_rand4, count = s.extract(3, 'rand')
self.assertEqual(count, 3, 'extraction count wrong!')
extracted_rand_list4 = list(extracted_rand4)
print('extracted rand list 4:\n', extracted_rand_list4)
for point in extracted_rand_list4:
self.assertEqual(len(point), 2)
self.assertIsInstance(point[0], (int, str))
self.assertIn(point[0], [0, 1, 'a'])
self.assertIsInstance(point[1], (int, str))
self.assertIn(point[1], [10, 14, 'b', 'c'])
self.assertIn(point, [(0., 10), (1, 'c'), ('a', 'b'), (1, 14)])
pars_list = [((0, 10), (1, 'c'), ('a', 'b'), (1, 14)), (1, 4)]
s = Space(pars=pars_list, par_types='enum, discr')
extracted_int5, count = s.extract(1, 'interval')
self.assertEqual(count, 16, 'extraction count wrong!')
extracted_int_list5 = list(extracted_int5)
for item, item2 in extracted_int_list5:
print(item, item2)
self.assertTrue(all([tuple(ext_item) == item for ext_item, item in it]))
print('extracted int list 5\n', extracted_int_list5)
self.assertIsInstance(extracted_int_list5[0], tuple)
extracted_rand5, count = s.extract(5, 'rand')
self.assertEqual(count, 5, 'extraction count wrong!')
extracted_rand_list5 = list(extracted_rand5)
print('extracted rand list 5:\n', extracted_rand_list5)
for point in extracted_rand_list5:
self.assertEqual(len(point), 2)
self.assertIsInstance(point[0], tuple)
print(f'type of point[1] is {type(point[1])}')
self.assertIsInstance(point[1], (int, np.int64))
self.assertIn(point[0], [(0., 10), (1, 'c'), ('a', 'b'), (1, 14)])
print(f'test incremental extraction')
pars_list = [(10., 250), (10., 250), (10., 250), (10., 250), (10., 250), (10., 250)]
s = Space(pars_list)
ext, count = s.extract(64, 'interval')
self.assertEqual(count, 4096)
points = list(ext)
# 已经取出所有的点,围绕其中10个点生成十个subspaces
# 检查是否每个subspace都为Space,是否都在s范围内,使用32生成点集,检查生成数量是否正确
for point in points[1000:1010]:
subspace = s.from_point(point, 64)
self.assertIsInstance(subspace, Space)
self.assertTrue(subspace in s)
self.assertEqual(subspace.dim, 6)
self.assertEqual(subspace.types, ['conti', 'conti', 'conti', 'conti', 'conti', 'conti'])
ext, count = subspace.extract(32)
points = list(ext)
self.assertGreaterEqual(count, 512)
self.assertLessEqual(count, 4096)
print(f'\n---------------------------------'
f'\nthe space created around point <{point}> is'
f'\n{subspace.boes}'
f'\nand extracted {count} points, the first 5 are:'
f'\n{points[:5]}')
def test_axis_extract(self):
# test axis object with conti type
axis = Axis((0., 5))
self.assertIsInstance(axis, Axis)
self.assertEqual(axis.axis_type, 'conti')
self.assertEqual(axis.axis_boe, (0., 5.))
self.assertEqual(axis.count, np.inf)
self.assertEqual(axis.size, 5.0)
self.assertTrue(np.allclose(axis.extract(1, 'int'), [0., 1., 2., 3., 4.]))
self.assertTrue(np.allclose(axis.extract(0.5, 'int'), [0., 0.5, 1., 1.5, 2., 2.5, 3., 3.5, 4., 4.5]))
extracted = axis.extract(8, 'rand')
self.assertEqual(len(extracted), 8)
self.assertTrue(all([(0 <= item <= 5) for item in extracted]))
# test axis object with discrete type
axis = Axis((1, 5))
self.assertIsInstance(axis, Axis)
self.assertEqual(axis.axis_type, 'discr')
self.assertEqual(axis.axis_boe, (1, 5))
self.assertEqual(axis.count, 5)
self.assertEqual(axis.size, 5)
self.assertTrue(np.allclose(axis.extract(1, 'int'), [1, 2, 3, 4, 5]))
self.assertRaises(ValueError, axis.extract, 0.5, 'int')
extracted = axis.extract(8, 'rand')
self.assertEqual(len(extracted), 8)
self.assertTrue(all([(item in [1, 2, 3, 4, 5]) for item in extracted]))
# test axis object with enumerate type
axis = Axis((1, 5, 7, 10, 'A', 'F'))
self.assertIsInstance(axis, Axis)
self.assertEqual(axis.axis_type, 'enum')
self.assertEqual(axis.axis_boe, (1, 5, 7, 10, 'A', 'F'))
self.assertEqual(axis.count, 6)
self.assertEqual(axis.size, 6)
self.assertEqual(axis.extract(1, 'int'), [1, 5, 7, 10, 'A', 'F'])
self.assertRaises(ValueError, axis.extract, 0.5, 'int')
extracted = axis.extract(8, 'rand')
self.assertEqual(len(extracted), 8)
self.assertTrue(all([(item in [1, 5, 7, 10, 'A', 'F']) for item in extracted]))
def test_from_point(self):
"""测试从一个点生成一个space"""
# 生成一个space,指定space中的一个点以及distance,生成一个sub-space
pars_list = [(0., 10), (0, 10)]
s = Space(pars=pars_list, par_types=None)
self.assertEqual(s.types, ['conti', 'discr'])
self.assertEqual(s.dim, 2)
self.assertEqual(s.size, (10., 11))
self.assertEqual(s.shape, (np.inf, 11))
self.assertEqual(s.count, np.inf)
self.assertEqual(s.boes, [(0., 10), (0, 10)])
print('create subspace from a point in space')
p = (3, 3)
distance = 2
subspace = s.from_point(p, distance)
self.assertIsInstance(subspace, Space)
self.assertEqual(subspace.types, ['conti', 'discr'])
self.assertEqual(subspace.dim, 2)
self.assertEqual(subspace.size, (4.0, 5))
self.assertEqual(subspace.shape, (np.inf, 5))
self.assertEqual(subspace.count, np.inf)
self.assertEqual(subspace.boes, [(1, 5), (1, 5)])
print('create subspace from a 6 dimensional discrete space')
s = Space(pars=[(10, 250), (10, 250), (10, 250), (10, 250), (10, 250), (10, 250)])
p = (15, 200, 150, 150, 150, 150)
d = 10
subspace = s.from_point(p, d)
self.assertIsInstance(subspace, Space)
self.assertEqual(subspace.types, ['discr', 'discr', 'discr', 'discr', 'discr', 'discr'])
self.assertEqual(subspace.dim, 6)
self.assertEqual(subspace.volume, 65345616)
self.assertEqual(subspace.size, (16, 21, 21, 21, 21, 21))
self.assertEqual(subspace.shape, (16, 21, 21, 21, 21, 21))
self.assertEqual(subspace.count, 65345616)
self.assertEqual(subspace.boes, [(10, 25), (190, 210), (140, 160), (140, 160), (140, 160), (140, 160)])
print('create subspace from a 6 dimensional continuous space')
s = Space(pars=[(10., 250), (10., 250), (10., 250), (10., 250), (10., 250), (10., 250)])
p = (15, 200, 150, 150, 150, 150)
d = 10
subspace = s.from_point(p, d)
self.assertIsInstance(subspace, Space)
self.assertEqual(subspace.types, ['conti', 'conti', 'conti', 'conti', 'conti', 'conti'])
self.assertEqual(subspace.dim, 6)
self.assertEqual(subspace.volume, 48000000)
self.assertEqual(subspace.size, (15.0, 20.0, 20.0, 20.0, 20.0, 20.0))
self.assertEqual(subspace.shape, (np.inf, np.inf, np.inf, np.inf, np.inf, np.inf))
self.assertEqual(subspace.count, np.inf)
self.assertEqual(subspace.boes, [(10, 25), (190, 210), (140, 160), (140, 160), (140, 160), (140, 160)])
print('create subspace with different distances on each dimension')
s = Space(pars=[(10., 250), (10., 250), (10., 250), (10., 250), (10., 250), (10., 250)])
p = (15, 200, 150, 150, 150, 150)
d = [10, 5, 5, 10, 10, 5]
subspace = s.from_point(p, d)
self.assertIsInstance(subspace, Space)
self.assertEqual(subspace.types, ['conti', 'conti', 'conti', 'conti', 'conti', 'conti'])
self.assertEqual(subspace.dim, 6)
self.assertEqual(subspace.volume, 6000000)
self.assertEqual(subspace.size, (15.0, 10.0, 10.0, 20.0, 20.0, 10.0))
self.assertEqual(subspace.shape, (np.inf, np.inf, np.inf, np.inf, np.inf, np.inf))
self.assertEqual(subspace.count, np.inf)
self.assertEqual(subspace.boes, [(10, 25), (195, 205), (145, 155), (140, 160), (140, 160), (145, 155)])
class TestCashPlan(unittest.TestCase):
def setUp(self):
self.cp1 = qt.CashPlan(['2012-01-01', '2010-01-01'], [10000, 20000], 0.1)
self.cp1.info()
self.cp2 = qt.CashPlan(['20100501'], 10000)
self.cp2.info()
self.cp3 = qt.CashPlan(pd.date_range(start='2019-01-01',
freq='Y',
periods=12),
[i * 1000 + 10000 for i in range(12)],
0.035)
self.cp3.info()
def test_creation(self):
self.assertIsInstance(self.cp1, qt.CashPlan, 'CashPlan object creation wrong')
self.assertIsInstance(self.cp2, qt.CashPlan, 'CashPlan object creation wrong')
self.assertIsInstance(self.cp3, qt.CashPlan, 'CashPlan object creation wrong')
# test __repr__()
print(self.cp1)
print(self.cp2)
print(self.cp3)
# test __str__()
self.cp1.info()
self.cp2.info()
self.cp3.info()
# test assersion errors
self.assertRaises(AssertionError, qt.CashPlan, '2016-01-01', [10000, 10000])
self.assertRaises(KeyError, qt.CashPlan, '2020-20-20', 10000)
def test_properties(self):
self.assertEqual(self.cp1.amounts, [20000, 10000], 'property wrong')
self.assertEqual(self.cp1.first_day, Timestamp('2010-01-01'))
self.assertEqual(self.cp1.last_day, Timestamp('2012-01-01'))
self.assertEqual(self.cp1.investment_count, 2)
self.assertEqual(self.cp1.period, 730)
self.assertEqual(self.cp1.dates, [Timestamp('2010-01-01'), Timestamp('2012-01-01')])
self.assertEqual(self.cp1.ir, 0.1)
self.assertAlmostEqual(self.cp1.closing_value, 34200)
self.assertAlmostEqual(self.cp2.closing_value, 10000)
self.assertAlmostEqual(self.cp3.closing_value, 220385.3483685)
self.assertIsInstance(self.cp1.plan, pd.DataFrame)
self.assertIsInstance(self.cp2.plan, pd.DataFrame)
self.assertIsInstance(self.cp3.plan, pd.DataFrame)
def test_operation(self):
cp_self_add = self.cp1 + self.cp1
cp_add = self.cp1 + self.cp2
cp_add_int = self.cp1 + 10000
cp_mul_int = self.cp1 * 2
cp_mul_float = self.cp2 * 1.5
cp_mul_time = 3 * self.cp2
cp_mul_time2 = 2 * self.cp1
cp_mul_time3 = 2 * self.cp3
cp_mul_float2 = 2. * self.cp3
self.assertIsInstance(cp_self_add, qt.CashPlan)
self.assertEqual(cp_self_add.amounts, [40000, 20000])
self.assertEqual(cp_add.amounts, [20000, 10000, 10000])
self.assertEqual(cp_add_int.amounts, [30000, 20000])
self.assertEqual(cp_mul_int.amounts, [40000, 20000])
self.assertEqual(cp_mul_float.amounts, [15000])
self.assertEqual(cp_mul_float.dates, [Timestamp('2010-05-01')])
self.assertEqual(cp_mul_time.amounts, [10000, 10000, 10000])
self.assertEqual(cp_mul_time.dates, [Timestamp('2010-05-01'),
Timestamp('2011-05-01'),
Timestamp('2012-04-30')])
self.assertEqual(cp_mul_time2.amounts, [20000, 10000, 20000, 10000])
self.assertEqual(cp_mul_time2.dates, [Timestamp('2010-01-01'),
Timestamp('2012-01-01'),
Timestamp('2014-01-01'),
Timestamp('2016-01-01')])
self.assertEqual(cp_mul_time3.dates, [Timestamp('2019-12-31'),
Timestamp('2020-12-31'),
Timestamp('2021-12-31'),
Timestamp('2022-12-31'),
Timestamp('2023-12-31'),
Timestamp('2024-12-31'),
Timestamp('2025-12-31'),
Timestamp('2026-12-31'),
Timestamp('2027-12-31'),
Timestamp('2028-12-31'),
Timestamp('2029-12-31'),
Timestamp('2030-12-31'),
Timestamp('2031-12-29'),
Timestamp('2032-12-29'),
Timestamp('2033-12-29'),
Timestamp('2034-12-29'),
Timestamp('2035-12-29'),
Timestamp('2036-12-29'),
Timestamp('2037-12-29'),
Timestamp('2038-12-29'),
Timestamp('2039-12-29'),
Timestamp('2040-12-29'),
Timestamp('2041-12-29'),
Timestamp('2042-12-29')])
self.assertEqual(cp_mul_float2.dates, [Timestamp('2019-12-31'),
Timestamp('2020-12-31'),
Timestamp('2021-12-31'),
Timestamp('2022-12-31'),
Timestamp('2023-12-31'),
Timestamp('2024-12-31'),
Timestamp('2025-12-31'),
Timestamp('2026-12-31'),
Timestamp('2027-12-31'),
Timestamp('2028-12-31'),
Timestamp('2029-12-31'),
Timestamp('2030-12-31')])
self.assertEqual(cp_mul_float2.amounts, [20000.0,
22000.0,
24000.0,
26000.0,
28000.0,
30000.0,
32000.0,
34000.0,
36000.0,
38000.0,
40000.0,
42000.0])
class TestPool(unittest.TestCase):
def setUp(self):
self.p = ResultPool(5)
self.items = ['first', 'second', (1, 2, 3), 'this', 24]
self.perfs = [1, 2, 3, 4, 5]
self.additional_result1 = ('abc', 12)
self.additional_result2 = ([1, 2], -1)
self.additional_result3 = (12, 5)
def test_create(self):
self.assertIsInstance(self.p, ResultPool)
def test_operation(self):
self.p.in_pool(self.additional_result1[0], self.additional_result1[1])
self.p.cut()
self.assertEqual(self.p.item_count, 1)
self.assertEqual(self.p.items, ['abc'])
for item, perf in zip(self.items, self.perfs):
self.p.in_pool(item, perf)
self.assertEqual(self.p.item_count, 6)
self.assertEqual(self.p.items, ['abc', 'first', 'second', (1, 2, 3), 'this', 24])
self.p.cut()
self.assertEqual(self.p.items, ['second', (1, 2, 3), 'this', 24, 'abc'])
self.assertEqual(self.p.perfs, [2, 3, 4, 5, 12])
self.p.in_pool(self.additional_result2[0], self.additional_result2[1])
self.p.in_pool(self.additional_result3[0], self.additional_result3[1])
self.assertEqual(self.p.item_count, 7)
self.p.cut(keep_largest=False)
self.assertEqual(self.p.items, [[1, 2], 'second', (1, 2, 3), 'this', 24])
self.assertEqual(self.p.perfs, [-1, 2, 3, 4, 5])
class TestCoreSubFuncs(unittest.TestCase):
"""Test all functions in core.py"""
def setUp(self):
pass
def test_input_to_list(self):
print('Testing input_to_list() function')
input_str = 'first'
self.assertEqual(qt.utilfuncs.input_to_list(input_str, 3), ['first', 'first', 'first'])
self.assertEqual(qt.utilfuncs.input_to_list(input_str, 4), ['first', 'first', 'first', 'first'])
self.assertEqual(qt.utilfuncs.input_to_list(input_str, 2, None), ['first', 'first'])
input_list = ['first', 'second']
self.assertEqual(qt.utilfuncs.input_to_list(input_list, 3), ['first', 'second', None])
self.assertEqual(qt.utilfuncs.input_to_list(input_list, 4, 'padder'), ['first', 'second', 'padder', 'padder'])
self.assertEqual(qt.utilfuncs.input_to_list(input_list, 1), ['first', 'second'])
self.assertEqual(qt.utilfuncs.input_to_list(input_list, -5), ['first', 'second'])
def test_point_in_space(self):
sp = Space([(0., 10.), (0., 10.), (0., 10.)])
p1 = (5.5, 3.2, 7)
p2 = (-1, 3, 10)
self.assertTrue(p1 in sp)
print(f'point {p1} is in space {sp}')
self.assertFalse(p2 in sp)
print(f'point {p2} is not in space {sp}')
sp = Space([(0., 10.), (0., 10.), range(40, 3, -2)], 'conti, conti, enum')
p1 = (5.5, 3.2, 8)
self.assertTrue(p1 in sp)
print(f'point {p1} is in space {sp}')
def test_space_in_space(self):
print('test if a space is in another space')
sp = Space([(0., 10.), (0., 10.), (0., 10.)])
sp2 = Space([(0., 10.), (0., 10.), (0., 10.)])
self.assertTrue(sp2 in sp)
self.assertTrue(sp in sp2)
print(f'space {sp2} is in space {sp}\n'
f'and space {sp} is in space {sp2}\n'
f'they are equal to each other\n')
sp2 = Space([(0, 5.), (2, 7.), (3., 9.)])
self.assertTrue(sp2 in sp)
self.assertFalse(sp in sp2)
print(f'space {sp2} is in space {sp}\n'
f'and space {sp} is not in space {sp2}\n'
f'{sp2} is a sub space of {sp}\n')
sp2 = Space([(0, 5), (2, 7), (3., 9)])
self.assertFalse(sp2 in sp)
self.assertFalse(sp in sp2)
print(f'space {sp2} is not in space {sp}\n'
f'and space {sp} is not in space {sp2}\n'
f'they have different types of axes\n')
sp = Space([(0., 10.), (0., 10.), range(40, 3, -2)])
self.assertFalse(sp in sp2)
self.assertFalse(sp2 in sp)
print(f'space {sp2} is not in space {sp}\n'
f'and space {sp} is not in space {sp2}\n'
f'they have different types of axes\n')
def test_space_around_centre(self):
sp = Space([(0., 10.), (0., 10.), (0., 10.)])
p1 = (5.5, 3.2, 7)
ssp = space_around_centre(space=sp, centre=p1, radius=1.2)
print(ssp.boes)
print('\ntest multiple diameters:')
self.assertEqual(ssp.boes, [(4.3, 6.7), (2.0, 4.4), (5.8, 8.2)])
ssp = space_around_centre(space=sp, centre=p1, radius=[1, 2, 1])
print(ssp.boes)
self.assertEqual(ssp.boes, [(4.5, 6.5), (1.2000000000000002, 5.2), (6.0, 8.0)])
print('\ntest points on edge:')
p2 = (5.5, 3.2, 10)
ssp = space_around_centre(space=sp, centre=p1, radius=3.9)
print(ssp.boes)
self.assertEqual(ssp.boes, [(1.6, 9.4), (0.0, 7.1), (3.1, 10.0)])
print('\ntest enum spaces')
sp = Space([(0, 100), range(40, 3, -2)], 'discr, enum')
p1 = [34, 12]
ssp = space_around_centre(space=sp, centre=p1, radius=5, ignore_enums=False)
self.assertEqual(ssp.boes, [(29, 39), (22, 20, 18, 16, 14, 12, 10, 8, 6, 4)])
print(ssp.boes)
print('\ntest enum space and ignore enum axis')
ssp = space_around_centre(space=sp, centre=p1, radius=5)
self.assertEqual(ssp.boes, [(29, 39),
(40, 38, 36, 34, 32, 30, 28, 26, 24, 22, 20, 18, 16, 14, 12, 10, 8, 6, 4)])
print(sp.boes)
def test_time_string_format(self):
print('Testing qt.time_string_format() function:')
t = 3.14
self.assertEqual(time_str_format(t), '3s 140.0ms')
self.assertEqual(time_str_format(t, estimation=True), '3s ')
self.assertEqual(time_str_format(t, short_form=True), '3"140')
self.assertEqual(time_str_format(t, estimation=True, short_form=True), '3"')
t = 300.14
self.assertEqual(time_str_format(t), '5min 140.0ms')
self.assertEqual(time_str_format(t, estimation=True), '5min ')
self.assertEqual(time_str_format(t, short_form=True), "5'140")
self.assertEqual(time_str_format(t, estimation=True, short_form=True), "5'")
t = 7435.0014
self.assertEqual(time_str_format(t), '2hrs 3min 55s 1.4ms')
self.assertEqual(time_str_format(t, estimation=True), '2hrs ')
self.assertEqual(time_str_format(t, short_form=True), "2H3'55\"001")
self.assertEqual(time_str_format(t, estimation=True, short_form=True), "2H")
t = 88425.0509
self.assertEqual(time_str_format(t), '1days 33min 45s 50.9ms')
self.assertEqual(time_str_format(t, estimation=True), '1days ')
self.assertEqual(time_str_format(t, short_form=True), "1D33'45\"051")
self.assertEqual(time_str_format(t, estimation=True, short_form=True), "1D")
def test_get_stock_pool(self):
print(f'start test building stock pool function\n')
share_basics = stock_basic(fields='ts_code,symbol,name,area,industry,market,list_date,exchange')
print(f'\nselect all stocks by area')
stock_pool = qt.get_stock_pool(area='上海')
print(f'{len(stock_pool)} shares selected, first 5 are: {stock_pool[0:5]}\n'
f'check if all stock areas are "上海"\n'
f'{share_basics[np.isin(share_basics.ts_code, stock_pool)].head()}')
self.assertTrue(share_basics[np.isin(share_basics.ts_code, stock_pool)]['area'].eq('上海').all())
print(f'\nselect all stocks by multiple areas')
stock_pool = qt.get_stock_pool(area='贵州,北京,天津')
print(f'\n{len(stock_pool)} shares selected, first 5 are: {stock_pool[0:5]}\n'
f'check if all stock areas are in list of ["贵州", "北京", "天津"]\n'
f'{share_basics[np.isin(share_basics.ts_code, stock_pool)].head()}')
self.assertTrue(share_basics[np.isin(share_basics.ts_code, stock_pool)]['area'].isin(['贵州',
'北京',
'天津']).all())
print(f'\nselect all stocks by area and industry')
stock_pool = qt.get_stock_pool(area='四川', industry='银行, 金融')
print(f'\n{len(stock_pool)} shares selected, first 5 are: {stock_pool[0:5]}\n'
f'check if all stock areas are "四川", and industry in ["银行", "金融"]\n'
f'{share_basics[np.isin(share_basics.ts_code, stock_pool)].head()}')
self.assertTrue(share_basics[np.isin(share_basics.ts_code, stock_pool)]['industry'].isin(['银行', '金融']).all())
self.assertTrue(share_basics[np.isin(share_basics.ts_code, stock_pool)]['area'].isin(['四川']).all())
print(f'\nselect all stocks by industry')
stock_pool = qt.get_stock_pool(industry='银行, 金融')
print(f'\n{len(stock_pool)} shares selected, first 5 are: {stock_pool[0:5]}\n'
f'check if all stocks industry in ["银行", "金融"]\n'
f'{share_basics[np.isin(share_basics.ts_code, stock_pool)].head()}')
self.assertTrue(share_basics[np.isin(share_basics.ts_code, stock_pool)]['industry'].isin(['银行', '金融']).all())
print(f'\nselect all stocks by market')
stock_pool = qt.get_stock_pool(market='主板')
print(f'\n{len(stock_pool)} shares selected, first 5 are: {stock_pool[0:5]}\n'
f'check if all stock market is "主板"\n'
f'{share_basics[np.isin(share_basics.ts_code, stock_pool)].head()}')
self.assertTrue(share_basics[np.isin(share_basics.ts_code, stock_pool)]['market'].isin(['主板']).all())
print(f'\nselect all stocks by market and list date')
stock_pool = qt.get_stock_pool(date='2000-01-01', market='主板')
print(f'\n{len(stock_pool)} shares selected, first 5 are: {stock_pool[0:5]}\n'
f'check if all stock market is "主板", and list date after "2000-01-01"\n'
f'{share_basics[np.isin(share_basics.ts_code, stock_pool)].head()}')
self.assertTrue(share_basics[np.isin(share_basics.ts_code, stock_pool)]['market'].isin(['主板']).all())
self.assertTrue(share_basics[np.isin(share_basics.ts_code, stock_pool)]['list_date'].le('2000-01-01').all())
print(f'\nselect all stocks by list date')
stock_pool = qt.get_stock_pool(date='1997-01-01')
print(f'\n{len(stock_pool)} shares selected, first 5 are: {stock_pool[0:5]}\n'
f'check if all list date after "1997-01-01"\n'
f'{share_basics[np.isin(share_basics.ts_code, stock_pool)].head()}')
self.assertTrue(share_basics[np.isin(share_basics.ts_code, stock_pool)]['list_date'].le('1997-01-01').all())
print(f'\nselect all stocks by exchange')
stock_pool = qt.get_stock_pool(exchange='SSE')
print(f'\n{len(stock_pool)} shares selected, first 5 are: {stock_pool[0:5]}\n'
f'check if all exchanges are "SSE"\n'
f'{share_basics[np.isin(share_basics.ts_code, stock_pool)].head()}')
self.assertTrue(share_basics[np.isin(share_basics.ts_code, stock_pool)]['exchange'].eq('SSE').all())
print(f'\nselect all stocks by industry, area and list date')
industry_list = ['银行', '全国地产', '互联网', '环境保护', '区域地产',
'酒店餐饮', '运输设备', '综合类', '建筑工程', '玻璃',
'家用电器', '文教休闲', '其他商业', '元器件', 'IT设备',
'其他建材', '汽车服务', '火力发电', '医药商业', '汽车配件',
'广告包装', '轻工机械', '新型电力', '多元金融', '饲料']
area_list = ['深圳', '北京', '吉林', '江苏', '辽宁', '广东',
'安徽', '四川', '浙江', '湖南', '河北', '新疆',
'山东', '河南', '山西', '江西', '青海', '湖北',
'内蒙', '海南', '重庆', '陕西', '福建', '广西',
'上海']
stock_pool = qt.get_stock_pool(date='19980101',
industry=industry_list,
area=area_list)
print(f'\n{len(stock_pool)} shares selected, first 5 are: {stock_pool[0:5]}\n'
f'check if all exchanges are "SSE"\n'
f'{share_basics[np.isin(share_basics.ts_code, stock_pool)].head()}')
self.assertTrue(share_basics[np.isin(share_basics.ts_code, stock_pool)]['list_date'].le('1998-01-01').all())
self.assertTrue(share_basics[np.isin(share_basics.ts_code, stock_pool)]['industry'].isin(industry_list).all())
self.assertTrue(share_basics[np.isin(share_basics.ts_code, stock_pool)]['area'].isin(area_list).all())
self.assertRaises(KeyError, qt.get_stock_pool, industry=25)
self.assertRaises(KeyError, qt.get_stock_pool, share_name='000300.SH')
self.assertRaises(KeyError, qt.get_stock_pool, markets='SSE')
class TestEvaluations(unittest.TestCase):
"""Test all evaluation functions in core.py"""
# 以下手动计算结果在Excel文件中
def setUp(self):
"""用np.random生成测试用数据,使用cumsum()模拟股票走势"""
self.test_data1 = pd.DataFrame([5.34892759, 5.65768696, 5.79227076, 5.56266871, 5.88189632,
6.24795001, 5.92755558, 6.38748165, 6.31331899, 5.86001665,
5.61048472, 5.30696736, 5.40406792, 5.03180571, 5.37886353,
5.78608307, 6.26540339, 6.59348026, 6.90943801, 6.70911677,
6.33015954, 6.06697417, 5.9752499, 6.45786408, 6.95273763,
6.7691991, 6.70355481, 6.28048969, 6.61344541, 6.24620003,
6.47409983, 6.4522311, 6.8773094, 6.99727832, 6.59262674,
6.59014938, 6.63758237, 6.38331869, 6.09902105, 6.35390109,
6.51993567, 6.87244592, 6.83963485, 7.08797815, 6.88003144,
6.83657323, 6.97819483, 7.01600276, 7.12554256, 7.58941523,
7.61014457, 7.21224091, 7.48174399, 7.66490854, 7.51371968,
7.11586198, 6.97147399, 6.67453301, 6.2042138, 6.33967015,
6.22187938, 5.98426993, 6.37096079, 6.55897161, 6.26422645,
6.69363762, 7.12668015, 6.83232926, 7.30524081, 7.4262041,
7.54031383, 7.17545919, 7.20659257, 7.44886016, 7.37094393,
6.88011022, 7.08142491, 6.74992833, 6.5967097, 6.21336693,
6.35565105, 6.82347596, 6.44773408, 6.84538053, 6.47966466,
6.09699528, 5.63927014, 6.01081024, 6.20585303, 6.60528206,
7.01594726, 7.03684251, 6.76574977, 7.08740846, 6.65336462,
7.07126686, 6.80058956, 6.79241977, 6.47843472, 6.39245474],
columns=['value'])
self.test_data2 = pd.DataFrame([5.09276527, 4.83828592, 4.6000911, 4.63170487, 4.63566451,
4.50546921, 4.96390044, 4.64557907, 4.25787855, 3.76585551,
3.38826334, 3.76243422, 4.06365426, 3.87084726, 3.91400935,
4.13438822, 4.27064542, 4.56776104, 5.03800296, 5.31070529,
5.39902276, 5.21186286, 5.05683114, 4.68842046, 5.11895168,
5.27151571, 5.72294993, 6.09961056, 6.26569635, 6.48806151,
6.16058885, 6.2582459, 6.38934791, 6.57831057, 6.19508831,
5.70155153, 5.20435735, 5.36538825, 5.40450056, 5.2227697,
5.37828693, 5.53058991, 6.02996797, 5.76802181, 5.66166713,
6.07988994, 5.61794367, 5.63218151, 6.10728013, 6.0324168,
6.27164431, 6.27551239, 6.52329665, 7.00470007, 7.34163113,
7.33699083, 7.67661334, 8.09395749, 7.68086668, 7.58341161,
7.46219819, 7.58671899, 7.19348298, 7.40088323, 7.47562005,
7.93342043, 8.2286081, 8.3521632, 8.43590025, 8.34977395,
8.57563095, 8.81586328, 9.08738649, 9.01542031, 8.8653815,
9.21763111, 9.04233017, 8.59533999, 8.47590075, 8.70857222,
8.78890756, 8.92697606, 9.35743773, 9.68280866, 10.15622021,
10.55908549, 10.6337894, 10.55197128, 10.65435176, 10.54611045,
10.19432562, 10.48320884, 10.36176768, 10.03186854, 10.23656092,
10.0062843, 10.13669686, 10.30758958, 9.87904176, 10.05126375],
columns=['value'])
self.test_data3 = pd.DataFrame([5.02851874, 5.20700348, 5.02410709, 5.49836387, 5.06834371,
5.10956737, 5.15314979, 5.02256472, 5.09746382, 5.23909247,
4.93410336, 4.96316186, 5.40026682, 5.7353255, 5.53438319,
5.79092139, 5.67528173, 5.89840855, 5.75379463, 6.10855386,
5.77322365, 5.84538021, 5.6103973, 5.7518655, 5.49729695,
5.13610628, 5.30524121, 5.68093462, 5.73251319, 6.04420783,
6.26929843, 6.59610234, 6.09872345, 6.25475121, 6.72927396,
6.91395783, 7.00693283, 7.36217783, 7.71516676, 7.67580263,
7.62477511, 7.73600568, 7.53457914, 7.46170277, 7.83658014,
8.11481319, 8.03705544, 7.64948845, 7.52043731, 7.67247943,
7.46511982, 7.43541798, 7.58856517, 7.9392717, 8.25406287,
7.77031632, 8.03223447, 7.86799055, 7.57630999, 7.33230519,
7.22378732, 6.85972264, 7.17548456, 7.5387846, 7.2392632,
6.8455644, 6.59557185, 6.6496796, 6.73685623, 7.18598015,
7.13619128, 6.88060157, 7.1399681, 7.30308077, 6.94942434,
7.0247815, 7.37567798, 7.50080197, 7.59719284, 7.14520561,
7.29913484, 7.79551341, 8.15497781, 8.40456095, 8.86516528,
8.53042688, 8.94268762, 8.52048006, 8.80036284, 8.91602364,
9.19953385, 8.70828953, 8.24613093, 8.18770453, 7.79548389,
7.68627967, 7.23205036, 6.98302636, 7.06515819, 6.95068113],
columns=['value'])
self.test_data4 = pd.DataFrame([4.97926539, 5.44016005, 5.45122915, 5.74485615, 5.45600553,
5.44858945, 5.2435413, 5.47315161, 5.58464303, 5.36179749,
5.38236326, 5.29614981, 5.76523508, 5.75102892, 6.15316618,
6.03852528, 6.01442228, 5.70510182, 5.22748133, 5.46762379,
5.78926267, 5.8221362, 5.61236849, 5.30615725, 5.24200611,
5.41042642, 5.59940342, 5.28306781, 4.99451932, 5.08799266,
5.38865647, 5.58229139, 5.33492845, 5.48206276, 5.09721379,
5.39190493, 5.29965087, 5.0374415, 5.50798022, 5.43107577,
5.22759507, 4.991809, 5.43153084, 5.39966868, 5.59916352,
5.66412137, 6.00611838, 5.63564902, 5.66723484, 5.29863863,
4.91115153, 5.3749929, 5.75082334, 6.08308148, 6.58091182,
6.77848803, 7.19588758, 7.64862286, 7.99818347, 7.91824794,
8.30341071, 8.45984973, 7.98700002, 8.18924931, 8.60755649,
8.66233396, 8.91018407, 9.0782739, 9.33515448, 8.95870245,
8.98426422, 8.50340317, 8.64916085, 8.93592407, 8.63145745,
8.65322862, 8.39543204, 8.37969997, 8.23394504, 8.04062872,
7.91259763, 7.57252171, 7.72670114, 7.74486117, 8.06908188,
7.99166889, 7.92155906, 8.39956136, 8.80181323, 8.47464091,
8.06557064, 7.87145573, 8.0237959, 8.39481998, 8.68525692,
8.81185461, 8.98632237, 9.0989835, 8.89787405, 8.86508591],
columns=['value'])
self.test_data5 = pd.DataFrame([4.50258923, 4.35142568, 4.07459514, 3.87791297, 3.73715985,
3.98455684, 4.07587908, 4.00042472, 4.28276612, 4.01362051,
4.13713565, 4.49312372, 4.48633159, 4.4641207, 4.13444605,
3.79107217, 4.22941629, 4.56548511, 4.92472163, 5.27723158,
5.67409193, 6.00176917, 5.88889928, 5.55256103, 5.39308314,
5.2610492, 5.30738908, 5.22222408, 4.90332238, 4.57499908,
4.96097146, 4.81531011, 4.39115442, 4.63200662, 5.04588813,
4.67866025, 5.01705123, 4.83562258, 4.60381702, 4.66187576,
4.41292828, 4.86604507, 4.42280124, 4.07517294, 4.16317319,
4.10316596, 4.42913598, 4.06609666, 3.96725913, 4.15965746,
4.12379564, 4.04054068, 3.84342851, 3.45902867, 3.17649855,
3.09773586, 3.5502119, 3.66396995, 3.66306483, 3.29131401,
2.79558533, 2.88319542, 3.03671098, 3.44645857, 3.88167161,
3.57961874, 3.60180276, 3.96702102, 4.05429995, 4.40056979,
4.05653231, 3.59600456, 3.60792477, 4.09989922, 3.73503663,
4.01892626, 3.94597242, 3.81466605, 3.71417992, 3.93767156,
4.42806557, 4.06988106, 4.03713636, 4.34408673, 4.79810156,
5.18115011, 4.89798406, 5.3960077, 5.72504875, 5.61894017,
5.1958197, 4.85275896, 5.17550207, 4.71548987, 4.62408567,
4.55488535, 4.36532649, 4.26031979, 4.25225607, 4.58627048],
columns=['value'])
self.test_data6 = pd.DataFrame([5.08639513, 5.05761083, 4.76160923, 4.62166504, 4.62923183,
4.25070173, 4.13447513, 3.90890013, 3.76687608, 3.43342482,
3.67648224, 3.6274775, 3.9385404, 4.39771627, 4.03199346,
3.93265288, 3.50059789, 3.3851961, 3.29743973, 3.2544872,
2.93692949, 2.70893003, 2.55461976, 2.20922332, 2.29054475,
2.2144714, 2.03726827, 2.39007617, 2.29866155, 2.40607111,
2.40440444, 2.79374649, 2.66541922, 2.27018079, 2.08505127,
2.55478864, 2.22415625, 2.58517923, 2.58802256, 2.94870959,
2.69301739, 2.19991535, 2.69473146, 2.64704637, 2.62753542,
2.14240825, 2.38565154, 1.94592117, 2.32243877, 2.69337246,
2.51283854, 2.62484451, 2.15559054, 2.35410875, 2.31219177,
1.96018265, 2.34711266, 2.58083322, 2.40290041, 2.20439791,
2.31472425, 2.16228248, 2.16439749, 2.20080737, 1.73293206,
1.9264407, 2.25089861, 2.69269101, 2.59296687, 2.1420998,
1.67819153, 1.98419023, 2.14479494, 1.89055376, 1.96720648,
1.9916694, 2.37227761, 2.14446036, 2.34573903, 1.86162546,
2.1410721, 2.39204939, 2.52529064, 2.47079939, 2.9299031,
3.09452923, 2.93276708, 3.21731309, 3.06248964, 2.90413406,
2.67844632, 2.45621213, 2.41463398, 2.7373913, 3.14917045,
3.4033949, 3.82283446, 4.02285451, 3.7619638, 4.10346795],
columns=['value'])
self.test_data7 = pd.DataFrame([4.75233583, 4.47668283, 4.55894263, 4.61765848, 4.622892,
4.58941116, 4.32535872, 3.88112797, 3.47237806, 3.50898953,
3.82530406, 3.6718017, 3.78918195, 4.1800752, 4.01818557,
4.40822582, 4.65474654, 4.89287256, 4.40879274, 4.65505126,
4.36876403, 4.58418934, 4.75687172, 4.3689799, 4.16126498,
4.0203982, 3.77148242, 3.38198096, 3.07261764, 2.9014741,
2.5049543, 2.756105, 2.28779058, 2.16986991, 1.8415962,
1.83319008, 2.20898291, 2.00128981, 1.75747025, 1.26676663,
1.40316876, 1.11126484, 1.60376367, 1.22523829, 1.58816681,
1.49705679, 1.80244138, 1.55128293, 1.35339409, 1.50985759,
1.0808451, 1.05892796, 1.43414812, 1.43039101, 1.73631655,
1.43940867, 1.82864425, 1.71088265, 2.12015154, 2.45417128,
2.84777618, 2.7925612, 2.90975121, 3.25920745, 3.13801182,
3.52733677, 3.65468491, 3.69395211, 3.49862035, 3.24786017,
3.64463138, 4.00331929, 3.62509565, 3.78013949, 3.4174012,
3.76312271, 3.62054004, 3.67206716, 3.60596058, 3.38636199,
3.42580676, 3.32921095, 3.02976759, 3.28258676, 3.45760838,
3.24917528, 2.94618304, 2.86980011, 2.63191259, 2.39566759,
2.53159917, 2.96273967, 3.25626185, 2.97425402, 3.16412191,
3.58280763, 3.23257727, 3.62353556, 3.12806399, 2.92532313],
columns=['value'])
# 建立一个长度为 500 个数据点的测试数据, 用于测试数据点多于250个的情况下的评价过程
self.long_data = pd.DataFrame([ 9.879, 9.916, 10.109, 10.214, 10.361, 10.768, 10.594, 10.288,
10.082, 9.994, 10.125, 10.126, 10.384, 10.734, 10.4 , 10.87 ,
11.338, 11.061, 11.415, 11.724, 12.077, 12.196, 12.064, 12.423,
12.19 , 11.729, 11.677, 11.448, 11.485, 10.989, 11.242, 11.239,
11.113, 11.075, 11.471, 11.745, 11.754, 11.782, 12.079, 11.97 ,
12.178, 11.95 , 12.438, 12.612, 12.804, 12.952, 12.612, 12.867,
12.832, 12.832, 13.015, 13.315, 13.249, 12.904, 12.776, 12.64 ,
12.543, 12.287, 12.225, 11.844, 11.985, 11.945, 11.542, 11.871,
12.245, 12.228, 12.362, 11.899, 11.962, 12.374, 12.816, 12.649,
12.252, 12.579, 12.3 , 11.988, 12.177, 12.312, 12.744, 12.599,
12.524, 12.82 , 12.67 , 12.876, 12.986, 13.271, 13.606, 13.82 ,
14.161, 13.833, 13.831, 14.137, 13.705, 13.414, 13.037, 12.759,
12.642, 12.948, 13.297, 13.483, 13.836, 14.179, 13.709, 13.655,
13.198, 13.508, 13.953, 14.387, 14.043, 13.987, 13.561, 13.391,
12.923, 12.555, 12.503, 12.292, 11.877, 12.34 , 12.141, 11.687,
11.992, 12.458, 12.131, 11.75 , 11.739, 11.263, 11.762, 11.976,
11.578, 11.854, 12.136, 12.422, 12.311, 12.56 , 12.879, 12.861,
12.973, 13.235, 13.53 , 13.531, 13.137, 13.166, 13.31 , 13.103,
13.007, 12.643, 12.69 , 12.216, 12.385, 12.046, 12.321, 11.9 ,
11.772, 11.816, 11.871, 11.59 , 11.518, 11.94 , 11.803, 11.924,
12.183, 12.136, 12.361, 12.406, 11.932, 11.684, 11.292, 11.388,
11.874, 12.184, 12.002, 12.16 , 11.741, 11.26 , 11.123, 11.534,
11.777, 11.407, 11.275, 11.679, 11.62 , 11.218, 11.235, 11.352,
11.366, 11.061, 10.661, 10.582, 10.899, 11.352, 11.792, 11.475,
11.263, 11.538, 11.183, 10.936, 11.399, 11.171, 11.214, 10.89 ,
10.728, 11.191, 11.646, 11.62 , 11.195, 11.178, 11.18 , 10.956,
11.205, 10.87 , 11.098, 10.639, 10.487, 10.507, 10.92 , 10.558,
10.119, 9.882, 9.573, 9.515, 9.845, 9.852, 9.495, 9.726,
10.116, 10.452, 10.77 , 11.225, 10.92 , 10.824, 11.096, 11.542,
11.06 , 10.568, 10.585, 10.884, 10.401, 10.068, 9.964, 10.285,
10.239, 10.036, 10.417, 10.132, 9.839, 9.556, 9.084, 9.239,
9.304, 9.067, 8.587, 8.471, 8.007, 8.321, 8.55 , 9.008,
9.138, 9.088, 9.434, 9.156, 9.65 , 9.431, 9.654, 10.079,
10.411, 10.865, 10.51 , 10.205, 10.519, 10.367, 10.855, 10.642,
10.298, 10.622, 10.173, 9.792, 9.995, 9.904, 9.771, 9.597,
9.506, 9.212, 9.688, 10.032, 9.723, 9.839, 9.918, 10.332,
10.236, 9.989, 10.192, 10.685, 10.908, 11.275, 11.72 , 12.158,
12.045, 12.244, 12.333, 12.246, 12.552, 12.958, 13.11 , 13.53 ,
13.123, 13.138, 13.57 , 13.389, 13.511, 13.759, 13.698, 13.744,
13.467, 13.795, 13.665, 13.377, 13.423, 13.772, 13.295, 13.073,
12.718, 12.388, 12.399, 12.185, 11.941, 11.818, 11.465, 11.811,
12.163, 11.86 , 11.935, 11.809, 12.145, 12.624, 12.768, 12.321,
12.277, 11.889, 12.11 , 12.606, 12.943, 12.945, 13.112, 13.199,
13.664, 14.051, 14.189, 14.339, 14.611, 14.656, 15.112, 15.086,
15.263, 15.021, 15.346, 15.572, 15.607, 15.983, 16.151, 16.215,
16.096, 16.089, 16.32 , 16.59 , 16.657, 16.752, 16.583, 16.743,
16.373, 16.662, 16.243, 16.163, 16.491, 16.958, 16.977, 17.225,
17.637, 17.344, 17.684, 17.892, 18.036, 18.182, 17.803, 17.588,
17.101, 17.538, 17.124, 16.787, 17.167, 17.138, 16.955, 17.148,
17.135, 17.635, 17.718, 17.675, 17.622, 17.358, 17.754, 17.729,
17.576, 17.772, 18.239, 18.441, 18.729, 18.319, 18.608, 18.493,
18.069, 18.122, 18.314, 18.423, 18.709, 18.548, 18.384, 18.391,
17.988, 17.986, 17.653, 17.249, 17.298, 17.06 , 17.36 , 17.108,
17.348, 17.596, 17.46 , 17.635, 17.275, 17.291, 16.933, 17.337,
17.231, 17.146, 17.148, 16.751, 16.891, 17.038, 16.735, 16.64 ,
16.231, 15.957, 15.977, 16.077, 16.054, 15.797, 15.67 , 15.911,
16.077, 16.17 , 15.722, 15.258, 14.877, 15.138, 15. , 14.811,
14.698, 14.407, 14.583, 14.704, 15.153, 15.436, 15.634, 15.453,
15.877, 15.696, 15.563, 15.927, 16.255, 16.696, 16.266, 16.698,
16.365, 16.493, 16.973, 16.71 , 16.327, 16.605, 16.486, 16.846,
16.935, 17.21 , 17.389, 17.546, 17.773, 17.641, 17.485, 17.794,
17.354, 16.904, 16.675, 16.43 , 16.898, 16.819, 16.921, 17.201,
17.617, 17.368, 17.864, 17.484],
columns=['value'])
self.long_bench = pd.DataFrame([ 9.7 , 10.179, 10.321, 9.855, 9.936, 10.096, 10.331, 10.662,
10.59 , 11.031, 11.154, 10.945, 10.625, 10.233, 10.284, 10.252,
10.221, 10.352, 10.444, 10.773, 10.904, 11.104, 10.797, 10.55 ,
10.943, 11.352, 11.641, 11.983, 11.696, 12.138, 12.365, 12.379,
11.969, 12.454, 12.947, 13.119, 13.013, 12.763, 12.632, 13.034,
12.681, 12.561, 12.938, 12.867, 13.202, 13.132, 13.539, 13.91 ,
13.456, 13.692, 13.771, 13.904, 14.069, 13.728, 13.97 , 14.228,
13.84 , 14.041, 13.963, 13.689, 13.543, 13.858, 14.118, 13.987,
13.611, 14.028, 14.229, 14.41 , 14.74 , 15.03 , 14.915, 15.207,
15.354, 15.665, 15.877, 15.682, 15.625, 15.175, 15.105, 14.893,
14.86 , 15.097, 15.178, 15.293, 15.238, 15. , 15.283, 14.994,
14.907, 14.664, 14.888, 15.297, 15.313, 15.368, 14.956, 14.802,
14.506, 14.257, 14.619, 15.019, 15.049, 14.625, 14.894, 14.978,
15.434, 15.578, 16.038, 16.107, 16.277, 16.365, 16.204, 16.465,
16.401, 16.895, 17.057, 16.621, 16.225, 16.075, 15.863, 16.292,
16.551, 16.724, 16.817, 16.81 , 17.192, 16.86 , 16.745, 16.707,
16.552, 16.133, 16.301, 16.08 , 15.81 , 15.75 , 15.909, 16.127,
16.457, 16.204, 16.329, 16.748, 16.624, 17.011, 16.548, 16.831,
16.653, 16.791, 16.57 , 16.778, 16.928, 16.932, 17.22 , 16.876,
17.301, 17.422, 17.689, 17.316, 17.547, 17.534, 17.409, 17.669,
17.416, 17.859, 17.477, 17.307, 17.245, 17.352, 17.851, 17.412,
17.144, 17.138, 17.085, 16.926, 16.674, 16.854, 17.064, 16.95 ,
16.609, 16.957, 16.498, 16.552, 16.175, 15.858, 15.697, 15.781,
15.583, 15.36 , 15.558, 16.046, 15.968, 15.905, 16.358, 16.783,
17.048, 16.762, 17.224, 17.363, 17.246, 16.79 , 16.608, 16.423,
15.991, 15.527, 15.147, 14.759, 14.792, 15.206, 15.148, 15.046,
15.429, 14.999, 15.407, 15.124, 14.72 , 14.713, 15.022, 15.092,
14.982, 15.001, 14.734, 14.713, 14.841, 14.562, 15.005, 15.483,
15.472, 15.277, 15.503, 15.116, 15.12 , 15.442, 15.476, 15.789,
15.36 , 15.764, 16.218, 16.493, 16.642, 17.088, 16.816, 16.645,
16.336, 16.511, 16.2 , 15.994, 15.86 , 15.929, 16.316, 16.416,
16.746, 17.173, 17.531, 17.627, 17.407, 17.49 , 17.768, 17.509,
17.795, 18.147, 18.63 , 18.945, 19.021, 19.518, 19.6 , 19.744,
19.63 , 19.32 , 18.933, 19.297, 19.598, 19.446, 19.236, 19.198,
19.144, 19.159, 19.065, 19.032, 18.586, 18.272, 18.119, 18.3 ,
17.894, 17.744, 17.5 , 17.083, 17.092, 16.864, 16.453, 16.31 ,
16.681, 16.342, 16.447, 16.715, 17.068, 17.067, 16.822, 16.673,
16.675, 16.592, 16.686, 16.397, 15.902, 15.597, 15.357, 15.162,
15.348, 15.603, 15.283, 15.257, 15.082, 14.621, 14.366, 14.039,
13.957, 14.141, 13.854, 14.243, 14.414, 14.033, 13.93 , 14.104,
14.461, 14.249, 14.053, 14.165, 14.035, 14.408, 14.501, 14.019,
14.265, 14.67 , 14.797, 14.42 , 14.681, 15.16 , 14.715, 14.292,
14.411, 14.656, 15.094, 15.366, 15.055, 15.198, 14.762, 14.294,
13.854, 13.811, 13.549, 13.927, 13.897, 13.421, 13.037, 13.32 ,
13.721, 13.511, 13.999, 13.529, 13.418, 13.881, 14.326, 14.362,
13.987, 14.015, 13.599, 13.343, 13.307, 13.689, 13.851, 13.404,
13.577, 13.395, 13.619, 13.195, 12.904, 12.553, 12.294, 12.649,
12.425, 11.967, 12.062, 11.71 , 11.645, 12.058, 12.136, 11.749,
11.953, 12.401, 12.044, 11.901, 11.631, 11.396, 11.036, 11.244,
10.864, 11.207, 11.135, 11.39 , 11.723, 12.084, 11.8 , 11.471,
11.33 , 11.504, 11.295, 11.3 , 10.901, 10.494, 10.825, 11.054,
10.866, 10.713, 10.875, 10.846, 10.947, 11.422, 11.158, 10.94 ,
10.521, 10.36 , 10.411, 10.792, 10.472, 10.305, 10.525, 10.853,
10.556, 10.72 , 10.54 , 10.583, 10.299, 10.061, 10.004, 9.903,
9.796, 9.472, 9.246, 9.54 , 9.456, 9.177, 9.484, 9.557,
9.493, 9.968, 9.536, 9.39 , 8.922, 8.423, 8.518, 8.686,
8.771, 9.098, 9.281, 8.858, 9.027, 8.553, 8.784, 8.996,
9.379, 9.846, 9.855, 9.502, 9.608, 9.761, 9.409, 9.4 ,
9.332, 9.34 , 9.284, 8.844, 8.722, 8.376, 8.775, 8.293,
8.144, 8.63 , 8.831, 8.957, 9.18 , 9.601, 9.695, 10.018,
9.841, 9.743, 9.292, 8.85 , 9.316, 9.288, 9.519, 9.738,
9.289, 9.785, 9.804, 10.06 , 10.188, 10.095, 9.739, 9.881,
9.7 , 9.991, 10.391, 10.002],
columns=['value'])
def test_performance_stats(self):
"""test the function performance_statistics()
"""
pass
def test_fv(self):
print(f'test with test data and empty DataFrame')
self.assertAlmostEqual(eval_fv(self.test_data1), 6.39245474)
self.assertAlmostEqual(eval_fv(self.test_data2), 10.05126375)
self.assertAlmostEqual(eval_fv(self.test_data3), 6.95068113)
self.assertAlmostEqual(eval_fv(self.test_data4), 8.86508591)
self.assertAlmostEqual(eval_fv(self.test_data5), 4.58627048)
self.assertAlmostEqual(eval_fv(self.test_data6), 4.10346795)
self.assertAlmostEqual(eval_fv(self.test_data7), 2.92532313)
self.assertAlmostEqual(eval_fv(pd.DataFrame()), -np.inf)
print(f'Error testing')
self.assertRaises(AssertionError, eval_fv, 15)
self.assertRaises(KeyError,
eval_fv,
pd.DataFrame([1, 2, 3], columns=['non_value']))
def test_max_drawdown(self):
print(f'test with test data and empty DataFrame')
self.assertAlmostEqual(eval_max_drawdown(self.test_data1)[0], 0.264274308)
self.assertEqual(eval_max_drawdown(self.test_data1)[1], 53)
self.assertEqual(eval_max_drawdown(self.test_data1)[2], 86)
self.assertTrue(np.isnan(eval_max_drawdown(self.test_data1)[3]))
self.assertAlmostEqual(eval_max_drawdown(self.test_data2)[0], 0.334690849)
self.assertEqual(eval_max_drawdown(self.test_data2)[1], 0)
self.assertEqual(eval_max_drawdown(self.test_data2)[2], 10)
self.assertEqual(eval_max_drawdown(self.test_data2)[3], 19)
self.assertAlmostEqual(eval_max_drawdown(self.test_data3)[0], 0.244452899)
self.assertEqual(eval_max_drawdown(self.test_data3)[1], 90)
self.assertEqual(eval_max_drawdown(self.test_data3)[2], 99)
self.assertTrue(np.isnan(eval_max_drawdown(self.test_data3)[3]))
self.assertAlmostEqual(eval_max_drawdown(self.test_data4)[0], 0.201849684)
self.assertEqual(eval_max_drawdown(self.test_data4)[1], 14)
self.assertEqual(eval_max_drawdown(self.test_data4)[2], 50)
self.assertEqual(eval_max_drawdown(self.test_data4)[3], 54)
self.assertAlmostEqual(eval_max_drawdown(self.test_data5)[0], 0.534206456)
self.assertEqual(eval_max_drawdown(self.test_data5)[1], 21)
self.assertEqual(eval_max_drawdown(self.test_data5)[2], 60)
self.assertTrue(np.isnan(eval_max_drawdown(self.test_data5)[3]))
self.assertAlmostEqual(eval_max_drawdown(self.test_data6)[0], 0.670062689)
self.assertEqual(eval_max_drawdown(self.test_data6)[1], 0)
self.assertEqual(eval_max_drawdown(self.test_data6)[2], 70)
self.assertTrue(np.isnan(eval_max_drawdown(self.test_data6)[3]))
self.assertAlmostEqual(eval_max_drawdown(self.test_data7)[0], 0.783577449)
self.assertEqual(eval_max_drawdown(self.test_data7)[1], 17)
self.assertEqual(eval_max_drawdown(self.test_data7)[2], 51)
self.assertTrue(np.isnan(eval_max_drawdown(self.test_data7)[3]))
self.assertEqual(eval_max_drawdown(pd.DataFrame()), -np.inf)
print(f'Error testing')
self.assertRaises(AssertionError, eval_fv, 15)
self.assertRaises(KeyError,
eval_fv,
pd.DataFrame([1, 2, 3], columns=['non_value']))
# test max drawdown == 0:
# TODO: investigate: how does divide by zero change?
self.assertAlmostEqual(eval_max_drawdown(self.test_data4 - 5)[0], 1.0770474121951792)
self.assertEqual(eval_max_drawdown(self.test_data4 - 5)[1], 14)
self.assertEqual(eval_max_drawdown(self.test_data4 - 5)[2], 50)
def test_info_ratio(self):
reference = self.test_data1
self.assertAlmostEqual(eval_info_ratio(self.test_data2, reference, 'value'), 0.075553316)
self.assertAlmostEqual(eval_info_ratio(self.test_data3, reference, 'value'), 0.018949457)
self.assertAlmostEqual(eval_info_ratio(self.test_data4, reference, 'value'), 0.056328143)
self.assertAlmostEqual(eval_info_ratio(self.test_data5, reference, 'value'), -0.004270068)
self.assertAlmostEqual(eval_info_ratio(self.test_data6, reference, 'value'), 0.009198027)
self.assertAlmostEqual(eval_info_ratio(self.test_data7, reference, 'value'), -0.000890283)
def test_volatility(self):
self.assertAlmostEqual(eval_volatility(self.test_data1), 0.748646166)
self.assertAlmostEqual(eval_volatility(self.test_data2), 0.75527442)
self.assertAlmostEqual(eval_volatility(self.test_data3), 0.654188853)
self.assertAlmostEqual(eval_volatility(self.test_data4), 0.688375814)
self.assertAlmostEqual(eval_volatility(self.test_data5), 1.089989522)
self.assertAlmostEqual(eval_volatility(self.test_data6), 1.775419308)
self.assertAlmostEqual(eval_volatility(self.test_data7), 1.962758406)
self.assertAlmostEqual(eval_volatility(self.test_data1, logarithm=False), 0.750993311)
self.assertAlmostEqual(eval_volatility(self.test_data2, logarithm=False), 0.75571473)
self.assertAlmostEqual(eval_volatility(self.test_data3, logarithm=False), 0.655331424)
self.assertAlmostEqual(eval_volatility(self.test_data4, logarithm=False), 0.692683021)
self.assertAlmostEqual(eval_volatility(self.test_data5, logarithm=False), 1.09602969)
self.assertAlmostEqual(eval_volatility(self.test_data6, logarithm=False), 1.774789504)
self.assertAlmostEqual(eval_volatility(self.test_data7, logarithm=False), 2.003329156)
self.assertEqual(eval_volatility(pd.DataFrame()), -np.inf)
self.assertRaises(AssertionError, eval_volatility, [1, 2, 3])
# 测试长数据的Volatility计算
expected_volatility = np.array([ np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
0.39955371, 0.39974258, 0.40309866, 0.40486593, 0.4055514 ,
0.40710639, 0.40708157, 0.40609006, 0.4073625 , 0.40835305,
0.41155304, 0.41218193, 0.41207489, 0.41300276, 0.41308415,
0.41292392, 0.41207645, 0.41238397, 0.41229291, 0.41164056,
0.41316317, 0.41348842, 0.41462249, 0.41474574, 0.41652625,
0.41649176, 0.41701556, 0.4166593 , 0.41684221, 0.41491689,
0.41435209, 0.41549087, 0.41849338, 0.41998049, 0.41959106,
0.41907311, 0.41916103, 0.42120773, 0.42052391, 0.42111225,
0.42124589, 0.42356445, 0.42214672, 0.42324022, 0.42476639,
0.42621689, 0.42549439, 0.42533678, 0.42539414, 0.42545038,
0.42593637, 0.42652095, 0.42665489, 0.42699563, 0.42798159,
0.42784512, 0.42898006, 0.42868781, 0.42874188, 0.42789631,
0.4277768 , 0.42776827, 0.42685216, 0.42660989, 0.42563155,
0.42618281, 0.42606281, 0.42505222, 0.42653242, 0.42555378,
0.42500842, 0.42561939, 0.42442059, 0.42395414, 0.42384356,
0.42319135, 0.42397497, 0.42488579, 0.42449729, 0.42508766,
0.42509878, 0.42456616, 0.42535577, 0.42681884, 0.42688552,
0.42779918, 0.42706058, 0.42792887, 0.42762114, 0.42894045,
0.42977398, 0.42919859, 0.42829041, 0.42780946, 0.42825318,
0.42858952, 0.42858315, 0.42805601, 0.42764751, 0.42744107,
0.42775518, 0.42707283, 0.4258592 , 0.42615335, 0.42526286,
0.4248906 , 0.42368986, 0.4232565 , 0.42265079, 0.42263954,
0.42153046, 0.42132051, 0.41995353, 0.41916605, 0.41914271,
0.41876945, 0.41740175, 0.41583884, 0.41614026, 0.41457908,
0.41472411, 0.41310876, 0.41261041, 0.41212369, 0.41211677,
0.4100645 , 0.40852504, 0.40860297, 0.40745338, 0.40698661,
0.40644546, 0.40591375, 0.40640744, 0.40620663, 0.40656649,
0.40727154, 0.40797605, 0.40807137, 0.40808913, 0.40809676,
0.40711767, 0.40724628, 0.40713077, 0.40772698, 0.40765157,
0.40658297, 0.4065991 , 0.405011 , 0.40537645, 0.40432626,
0.40390177, 0.40237701, 0.40291623, 0.40301797, 0.40324145,
0.40312864, 0.40328316, 0.40190955, 0.40246506, 0.40237663,
0.40198407, 0.401969 , 0.40185623, 0.40198313, 0.40005643,
0.39940743, 0.39850438, 0.39845398, 0.39695093, 0.39697295,
0.39663201, 0.39675444, 0.39538699, 0.39331959, 0.39326074,
0.39193287, 0.39157266, 0.39021327, 0.39062591, 0.38917591,
0.38976991, 0.38864187, 0.38872158, 0.38868096, 0.38868377,
0.38842057, 0.38654784, 0.38649517, 0.38600464, 0.38408115,
0.38323049, 0.38260215, 0.38207663, 0.38142669, 0.38003262,
0.37969367, 0.37768092, 0.37732108, 0.37741991, 0.37617779,
0.37698504, 0.37606784, 0.37499276, 0.37533731, 0.37350437,
0.37375172, 0.37385382, 0.37384003, 0.37338938, 0.37212288,
0.37273075, 0.370559 , 0.37038506, 0.37062153, 0.36964661,
0.36818564, 0.3656634 , 0.36539259, 0.36428672, 0.36502487,
0.3647148 , 0.36551435, 0.36409919, 0.36348181, 0.36254383,
0.36166601, 0.36142665, 0.35954942, 0.35846915, 0.35886759,
0.35813867, 0.35642888, 0.35375231, 0.35061783, 0.35078463,
0.34995508, 0.34688918, 0.34548257, 0.34633158, 0.34622833,
0.34652111, 0.34622774, 0.34540951, 0.34418809, 0.34276593,
0.34160916, 0.33811193, 0.33822709, 0.3391685 , 0.33883381])
test_volatility = eval_volatility(self.long_data)
test_volatility_roll = self.long_data['volatility'].values
self.assertAlmostEqual(test_volatility, np.nanmean(expected_volatility))
self.assertTrue(np.allclose(expected_volatility, test_volatility_roll, equal_nan=True))
def test_sharp(self):
self.assertAlmostEqual(eval_sharp(self.test_data1, 5, 0), 0.06135557)
self.assertAlmostEqual(eval_sharp(self.test_data2, 5, 0), 0.167858667)
self.assertAlmostEqual(eval_sharp(self.test_data3, 5, 0), 0.09950547)
self.assertAlmostEqual(eval_sharp(self.test_data4, 5, 0), 0.154928241)
self.assertAlmostEqual(eval_sharp(self.test_data5, 5, 0.002), 0.007868673)
self.assertAlmostEqual(eval_sharp(self.test_data6, 5, 0.002), 0.018306537)
self.assertAlmostEqual(eval_sharp(self.test_data7, 5, 0.002), 0.006259971)
# 测试长数据的sharp率计算
expected_sharp = np.array([ np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
-0.02346815, -0.02618783, -0.03763912, -0.03296276, -0.03085698,
-0.02851101, -0.02375842, -0.02016746, -0.01107885, -0.01426613,
-0.00787204, -0.01135784, -0.01164232, -0.01003481, -0.00022512,
-0.00046792, -0.01209378, -0.01278892, -0.01298135, -0.01938214,
-0.01671044, -0.02120509, -0.0244281 , -0.02416067, -0.02763238,
-0.027579 , -0.02372774, -0.02215294, -0.02467094, -0.02091266,
-0.02590194, -0.03049876, -0.02077131, -0.01483653, -0.02488144,
-0.02671638, -0.02561547, -0.01957986, -0.02479803, -0.02703162,
-0.02658087, -0.01641755, -0.01946472, -0.01647757, -0.01280889,
-0.00893643, -0.00643275, -0.00698457, -0.00549962, -0.00654677,
-0.00494757, -0.0035633 , -0.00109037, 0.00750654, 0.00451208,
0.00625502, 0.01221367, 0.01326454, 0.01535037, 0.02269538,
0.02028715, 0.02127712, 0.02333264, 0.02273159, 0.01670643,
0.01376513, 0.01265342, 0.02211647, 0.01612449, 0.00856706,
-0.00077147, -0.00268848, 0.00210993, -0.00443934, -0.00411912,
-0.0018756 , -0.00867461, -0.00581601, -0.00660835, -0.00861137,
-0.00678614, -0.01188408, -0.00589617, -0.00244323, -0.00201891,
-0.01042846, -0.01471016, -0.02167034, -0.02258554, -0.01306809,
-0.00909086, -0.01233746, -0.00595166, -0.00184208, 0.00750497,
0.01481886, 0.01761972, 0.01562886, 0.01446414, 0.01285826,
0.01357719, 0.00967613, 0.01636272, 0.01458437, 0.02280183,
0.02151903, 0.01700276, 0.01597368, 0.02114336, 0.02233297,
0.02585631, 0.02768459, 0.03519235, 0.04204535, 0.04328161,
0.04672855, 0.05046191, 0.04619848, 0.04525853, 0.05381529,
0.04598861, 0.03947394, 0.04665006, 0.05586077, 0.05617728,
0.06495018, 0.06205172, 0.05665466, 0.06500615, 0.0632062 ,
0.06084328, 0.05851466, 0.05659229, 0.05159347, 0.0432977 ,
0.0474047 , 0.04231723, 0.03613176, 0.03618391, 0.03591012,
0.03885674, 0.0402686 , 0.03846423, 0.04534014, 0.04721458,
0.05130912, 0.05026281, 0.05394312, 0.05529349, 0.05949243,
0.05463304, 0.06195165, 0.06767606, 0.06880985, 0.07048996,
0.07078815, 0.07420767, 0.06773439, 0.0658441 , 0.06470875,
0.06302349, 0.06456876, 0.06411282, 0.06216669, 0.067094 ,
0.07055075, 0.07254976, 0.07119253, 0.06173308, 0.05393352,
0.05681246, 0.05250643, 0.06099845, 0.0655544 , 0.06977334,
0.06636514, 0.06177949, 0.06869908, 0.06719767, 0.06178738,
0.05915714, 0.06882277, 0.06756821, 0.06507994, 0.06489791,
0.06553941, 0.073123 , 0.07576757, 0.06805446, 0.06063571,
0.05033801, 0.05206971, 0.05540306, 0.05249118, 0.05755587,
0.0586174 , 0.05051288, 0.0564852 , 0.05757284, 0.06358355,
0.06130082, 0.04925482, 0.03834472, 0.04163981, 0.04648316,
0.04457858, 0.04324626, 0.04328791, 0.04156207, 0.04818652,
0.04972634, 0.06024123, 0.06489556, 0.06255485, 0.06069815,
0.06466389, 0.07081163, 0.07895358, 0.0881782 , 0.09374151,
0.08336506, 0.08764795, 0.09080174, 0.08808926, 0.08641158,
0.07811943, 0.06885318, 0.06479503, 0.06851185, 0.07382819,
0.07047903, 0.06658251, 0.07638379, 0.08667974, 0.08867918,
0.08245323, 0.08961866, 0.09905298, 0.0961908 , 0.08562706,
0.0839014 , 0.0849072 , 0.08338395, 0.08783487, 0.09463609,
0.10332336, 0.11806497, 0.11220297, 0.11589097, 0.11678405])
test_sharp = eval_sharp(self.long_data, 5, 0.00035)
self.assertAlmostEqual(np.nanmean(expected_sharp), test_sharp)
self.assertTrue(np.allclose(self.long_data['sharp'].values, expected_sharp, equal_nan=True))
def test_beta(self):
reference = self.test_data1
self.assertAlmostEqual(eval_beta(self.test_data2, reference, 'value'), -0.017148939)
self.assertAlmostEqual(eval_beta(self.test_data3, reference, 'value'), -0.042204233)
self.assertAlmostEqual(eval_beta(self.test_data4, reference, 'value'), -0.15652986)
self.assertAlmostEqual(eval_beta(self.test_data5, reference, 'value'), -0.049195532)
self.assertAlmostEqual(eval_beta(self.test_data6, reference, 'value'), -0.026995082)
self.assertAlmostEqual(eval_beta(self.test_data7, reference, 'value'), -0.01147809)
self.assertRaises(TypeError, eval_beta, [1, 2, 3], reference, 'value')
self.assertRaises(TypeError, eval_beta, self.test_data3, [1, 2, 3], 'value')
self.assertRaises(KeyError, eval_beta, self.test_data3, reference, 'not_found_value')
# 测试长数据的beta计算
expected_beta = np.array([ np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
-0.04988841, -0.05127618, -0.04692104, -0.04272652, -0.04080598,
-0.0493347 , -0.0460858 , -0.0416761 , -0.03691527, -0.03724924,
-0.03678865, -0.03987324, -0.03488321, -0.02567672, -0.02690303,
-0.03010128, -0.02437967, -0.02571932, -0.02455681, -0.02839811,
-0.03358653, -0.03396697, -0.03466321, -0.03050966, -0.0247583 ,
-0.01629325, -0.01880895, -0.01480403, -0.01348783, -0.00544294,
-0.00648176, -0.00467036, -0.01135331, -0.0156841 , -0.02340763,
-0.02615705, -0.02730771, -0.02906174, -0.02860664, -0.02412914,
-0.02066416, -0.01744816, -0.02185133, -0.02145285, -0.02681765,
-0.02827694, -0.02394581, -0.02744096, -0.02778825, -0.02703065,
-0.03160023, -0.03615371, -0.03681072, -0.04265126, -0.04344738,
-0.04232421, -0.04705272, -0.04533344, -0.04605934, -0.05272737,
-0.05156463, -0.05134196, -0.04730733, -0.04425352, -0.03869831,
-0.04159571, -0.04223998, -0.04346747, -0.04229844, -0.04740093,
-0.04992507, -0.04621232, -0.04477644, -0.0486915 , -0.04598224,
-0.04943463, -0.05006391, -0.05362256, -0.04994067, -0.05464769,
-0.05443275, -0.05513493, -0.05173594, -0.04500994, -0.04662891,
-0.03903505, -0.0419592 , -0.04307773, -0.03925718, -0.03711574,
-0.03992631, -0.0433058 , -0.04533641, -0.0461183 , -0.05600344,
-0.05758377, -0.05959874, -0.05605942, -0.06002859, -0.06253002,
-0.06747014, -0.06427915, -0.05931947, -0.05769974, -0.04791515,
-0.05175088, -0.05748039, -0.05385232, -0.05072975, -0.05052637,
-0.05125567, -0.05005785, -0.05325104, -0.04977727, -0.04947867,
-0.05148544, -0.05739156, -0.05742069, -0.06047279, -0.0558414 ,
-0.06086126, -0.06265151, -0.06411129, -0.06828052, -0.06781762,
-0.07083409, -0.07211207, -0.06799162, -0.06913295, -0.06775162,
-0.0696265 , -0.06678248, -0.06867502, -0.06581961, -0.07055823,
-0.06448184, -0.06097973, -0.05795587, -0.0618383 , -0.06130145,
-0.06050652, -0.05936661, -0.05749424, -0.0499 , -0.05050495,
-0.04962687, -0.05033439, -0.05070116, -0.05422009, -0.05369759,
-0.05548943, -0.05907353, -0.05933035, -0.05927918, -0.06227663,
-0.06011455, -0.05650432, -0.05828134, -0.05620949, -0.05715323,
-0.05482478, -0.05387113, -0.05095559, -0.05377999, -0.05334267,
-0.05220438, -0.04001521, -0.03892434, -0.03660782, -0.04282708,
-0.04324623, -0.04127048, -0.04227559, -0.04275226, -0.04347049,
-0.04125853, -0.03806295, -0.0330632 , -0.03155531, -0.03277152,
-0.03304518, -0.03878731, -0.03830672, -0.03727434, -0.0370571 ,
-0.04509224, -0.04207632, -0.04116198, -0.04545179, -0.04584584,
-0.05287341, -0.05417433, -0.05175836, -0.05005509, -0.04268674,
-0.03442321, -0.03457309, -0.03613426, -0.03524391, -0.03629479,
-0.04361312, -0.02626705, -0.02406115, -0.03046384, -0.03181044,
-0.03375164, -0.03661673, -0.04520779, -0.04926951, -0.05726738,
-0.0584486 , -0.06220608, -0.06800563, -0.06797431, -0.07562211,
-0.07481996, -0.07731229, -0.08413381, -0.09031826, -0.09691925,
-0.11018071, -0.11952675, -0.10826026, -0.11173895, -0.10756359,
-0.10775916, -0.11664559, -0.10505051, -0.10606547, -0.09855355,
-0.10004159, -0.10857084, -0.12209301, -0.11605758, -0.11105113,
-0.1155195 , -0.11569505, -0.10513348, -0.09611072, -0.10719791,
-0.10843965, -0.11025856, -0.10247839, -0.10554044, -0.10927647,
-0.10645088, -0.09982498, -0.10542734, -0.09631372, -0.08229695])
test_beta_mean = eval_beta(self.long_data, self.long_bench, 'value')
test_beta_roll = self.long_data['beta'].values
self.assertAlmostEqual(test_beta_mean, np.nanmean(expected_beta))
self.assertTrue(np.allclose(test_beta_roll, expected_beta, equal_nan=True))
def test_alpha(self):
reference = self.test_data1
self.assertAlmostEqual(eval_alpha(self.test_data2, 5, reference, 'value', 0.5), 11.63072977)
self.assertAlmostEqual(eval_alpha(self.test_data3, 5, reference, 'value', 0.5), 1.886590071)
self.assertAlmostEqual(eval_alpha(self.test_data4, 5, reference, 'value', 0.5), 6.827021872)
self.assertAlmostEqual(eval_alpha(self.test_data5, 5, reference, 'value', 0.92), -1.192265168)
self.assertAlmostEqual(eval_alpha(self.test_data6, 5, reference, 'value', 0.92), -1.437142359)
self.assertAlmostEqual(eval_alpha(self.test_data7, 5, reference, 'value', 0.92), -1.781311545)
# 测试长数据的alpha计算
expected_alpha = np.array([ np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan,
-0.09418119, -0.11188463, -0.17938358, -0.15588172, -0.1462678 ,
-0.13089586, -0.10780125, -0.09102891, -0.03987585, -0.06075686,
-0.02459503, -0.04104284, -0.0444565 , -0.04074585, 0.02191275,
0.02255955, -0.05583375, -0.05875539, -0.06055551, -0.09648245,
-0.07913737, -0.10627829, -0.12320965, -0.12368335, -0.1506743 ,
-0.15768033, -0.13638829, -0.13065298, -0.14537834, -0.127428 ,
-0.15504529, -0.18184636, -0.12652146, -0.09190138, -0.14847221,
-0.15840648, -0.1525789 , -0.11859418, -0.14700954, -0.16295761,
-0.16051645, -0.10364859, -0.11961134, -0.10258267, -0.08090148,
-0.05727746, -0.0429945 , -0.04672356, -0.03581408, -0.0439215 ,
-0.03429495, -0.0260362 , -0.01075022, 0.04931808, 0.02779388,
0.03984083, 0.08311951, 0.08995566, 0.10522428, 0.16159058,
0.14238174, 0.14759783, 0.16257712, 0.158908 , 0.11302115,
0.0909566 , 0.08272888, 0.15261884, 0.10546376, 0.04990313,
-0.01284111, -0.02720704, 0.00454725, -0.03965491, -0.03818265,
-0.02186992, -0.06574751, -0.04846454, -0.05204211, -0.06316498,
-0.05095099, -0.08502656, -0.04681162, -0.02362027, -0.02205091,
-0.07706374, -0.10371841, -0.14434688, -0.14797935, -0.09055402,
-0.06739549, -0.08824959, -0.04855888, -0.02291244, 0.04027138,
0.09370505, 0.11472939, 0.10243593, 0.0921445 , 0.07662648,
0.07946651, 0.05450718, 0.10497677, 0.09068334, 0.15462924,
0.14231034, 0.10544952, 0.09980256, 0.14035223, 0.14942974,
0.17624102, 0.19035477, 0.2500807 , 0.30724652, 0.31768915,
0.35007521, 0.38412975, 0.34356521, 0.33614463, 0.41206165,
0.33999177, 0.28045963, 0.34076789, 0.42220356, 0.42314636,
0.50790423, 0.47713348, 0.42520169, 0.50488411, 0.48705211,
0.46252601, 0.44325578, 0.42640573, 0.37986783, 0.30652822,
0.34503393, 0.2999069 , 0.24928617, 0.24730218, 0.24326897,
0.26657905, 0.27861168, 0.26392824, 0.32552649, 0.34177792,
0.37837011, 0.37025267, 0.4030612 , 0.41339361, 0.45076809,
0.40383354, 0.47093422, 0.52505036, 0.53614256, 0.5500943 ,
0.55319293, 0.59021451, 0.52358459, 0.50605947, 0.49359168,
0.47895956, 0.49320243, 0.4908336 , 0.47310767, 0.51821564,
0.55105932, 0.57291504, 0.5599809 , 0.46868842, 0.39620087,
0.42086934, 0.38317217, 0.45934108, 0.50048866, 0.53941991,
0.50676751, 0.46500915, 0.52993663, 0.51668366, 0.46405428,
0.44100603, 0.52726147, 0.51565458, 0.49186248, 0.49001081,
0.49367648, 0.56422294, 0.58882785, 0.51334664, 0.44386256,
0.35056709, 0.36490029, 0.39205071, 0.3677061 , 0.41134736,
0.42315067, 0.35356394, 0.40324562, 0.41340007, 0.46503322,
0.44355762, 0.34854314, 0.26412842, 0.28633753, 0.32335224,
0.30761141, 0.29709569, 0.29570487, 0.28000063, 0.32802547,
0.33967726, 0.42511212, 0.46252357, 0.44244974, 0.42152907,
0.45436727, 0.50482359, 0.57339198, 0.6573356 , 0.70912003,
0.60328917, 0.6395092 , 0.67015805, 0.64241557, 0.62779142,
0.55028063, 0.46448736, 0.43709245, 0.46777983, 0.51789439,
0.48594916, 0.4456216 , 0.52008189, 0.60548684, 0.62792473,
0.56645031, 0.62766439, 0.71829315, 0.69481356, 0.59550329,
0.58133754, 0.59014148, 0.58026655, 0.61719273, 0.67373203,
0.75573056, 0.89501633, 0.8347253 , 0.87964685, 0.89015835])
test_alpha_mean = eval_alpha(self.long_data, 100, self.long_bench, 'value')
test_alpha_roll = self.long_data['alpha'].values
self.assertAlmostEqual(test_alpha_mean, np.nanmean(expected_alpha))
self.assertTrue(np.allclose(test_alpha_roll, expected_alpha, equal_nan=True))
def test_calmar(self):
"""test evaluate function eval_calmar()"""
pass
def test_benchmark(self):
reference = self.test_data1
tr, yr = eval_benchmark(self.test_data2, reference, 'value')
self.assertAlmostEqual(tr, 0.19509091)
self.assertAlmostEqual(yr, 0.929154957)
tr, yr = eval_benchmark(self.test_data3, reference, 'value')
self.assertAlmostEqual(tr, 0.19509091)
self.assertAlmostEqual(yr, 0.929154957)
tr, yr = eval_benchmark(self.test_data4, reference, 'value')
self.assertAlmostEqual(tr, 0.19509091)
self.assertAlmostEqual(yr, 0.929154957)
tr, yr = eval_benchmark(self.test_data5, reference, 'value')
self.assertAlmostEqual(tr, 0.19509091)
self.assertAlmostEqual(yr, 0.929154957)
tr, yr = eval_benchmark(self.test_data6, reference, 'value')
self.assertAlmostEqual(tr, 0.19509091)
self.assertAlmostEqual(yr, 0.929154957)
tr, yr = eval_benchmark(self.test_data7, reference, 'value')
self.assertAlmostEqual(tr, 0.19509091)
self.assertAlmostEqual(yr, 0.929154957)
def test_evaluate(self):
pass
class TestLoop(unittest.TestCase):
"""通过一个假设但精心设计的例子来测试loop_step以及loop方法的正确性"""
def setUp(self):
self.shares = ['share1', 'share2', 'share3', 'share4', 'share5', 'share6', 'share7']
self.dates = ['2016/07/01', '2016/07/04', '2016/07/05', '2016/07/06', '2016/07/07',
'2016/07/08', '2016/07/11', '2016/07/12', '2016/07/13', '2016/07/14',
'2016/07/15', '2016/07/18', '2016/07/19', '2016/07/20', '2016/07/21',
'2016/07/22', '2016/07/25', '2016/07/26', '2016/07/27', '2016/07/28',
'2016/07/29', '2016/08/01', '2016/08/02', '2016/08/03', '2016/08/04',
'2016/08/05', '2016/08/08', '2016/08/09', '2016/08/10', '2016/08/11',
'2016/08/12', '2016/08/15', '2016/08/16', '2016/08/17', '2016/08/18',
'2016/08/19', '2016/08/22', '2016/08/23', '2016/08/24', '2016/08/25',
'2016/08/26', '2016/08/29', '2016/08/30', '2016/08/31', '2016/09/01',
'2016/09/02', '2016/09/05', '2016/09/06', '2016/09/07', '2016/09/08',
'2016/09/09', '2016/09/12', '2016/09/13', '2016/09/14', '2016/09/15',
'2016/09/16', '2016/09/19', '2016/09/20', '2016/09/21', '2016/09/22',
'2016/09/23', '2016/09/26', '2016/09/27', '2016/09/28', '2016/09/29',
'2016/09/30', '2016/10/10', '2016/10/11', '2016/10/12', '2016/10/13',
'2016/10/14', '2016/10/17', '2016/10/18', '2016/10/19', '2016/10/20',
'2016/10/21', '2016/10/23', '2016/10/24', '2016/10/25', '2016/10/26',
'2016/10/27', '2016/10/29', '2016/10/30', '2016/10/31', '2016/11/01',
'2016/11/02', '2016/11/05', '2016/11/06', '2016/11/07', '2016/11/08',
'2016/11/09', '2016/11/12', '2016/11/13', '2016/11/14', '2016/11/15',
'2016/11/16', '2016/11/19', '2016/11/20', '2016/11/21', '2016/11/22']
self.dates = [pd.Timestamp(date_text) for date_text in self.dates]
self.prices = np.array([[5.35, 5.09, 5.03, 4.98, 4.50, 5.09, 4.75],
[5.66, 4.84, 5.21, 5.44, 4.35, 5.06, 4.48],
[5.79, 4.60, 5.02, 5.45, 4.07, 4.76, 4.56],
[5.56, 4.63, 5.50, 5.74, 3.88, 4.62, 4.62],
[5.88, 4.64, 5.07, 5.46, 3.74, 4.63, 4.62],
[6.25, 4.51, 5.11, 5.45, 3.98, 4.25, 4.59],
[5.93, 4.96, 5.15, 5.24, 4.08, 4.13, 4.33],
[6.39, 4.65, 5.02, 5.47, 4.00, 3.91, 3.88],
[6.31, 4.26, 5.10, 5.58, 4.28, 3.77, 3.47],
[5.86, 3.77, 5.24, 5.36, 4.01, 3.43, 3.51],
[5.61, 3.39, 4.93, 5.38, 4.14, 3.68, 3.83],
[5.31, 3.76, 4.96, 5.30, 4.49, 3.63, 3.67],
[5.40, 4.06, 5.40, 5.77, 4.49, 3.94, 3.79],
[5.03, 3.87, 5.74, 5.75, 4.46, 4.40, 4.18],
[5.38, 3.91, 5.53, 6.15, 4.13, 4.03, 4.02],
[5.79, 4.13, 5.79, 6.04, 3.79, 3.93, 4.41],
[6.27, 4.27, 5.68, 6.01, 4.23, 3.50, 4.65],
[6.59, 4.57, 5.90, 5.71, 4.57, 3.39, 4.89],
[6.91, 5.04, 5.75, 5.23, 4.92, 3.30, 4.41],
[6.71, 5.31, 6.11, 5.47, 5.28, 3.25, 4.66],
[6.33, 5.40, 5.77, 5.79, 5.67, 2.94, 4.37],
[6.07, 5.21, 5.85, 5.82, 6.00, 2.71, 4.58],
[5.98, 5.06, 5.61, 5.61, 5.89, 2.55, 4.76],
[6.46, 4.69, 5.75, 5.31, 5.55, 2.21, 4.37],
[6.95, 5.12, 5.50, 5.24, 5.39, 2.29, 4.16],
[6.77, 5.27, 5.14, 5.41, 5.26, 2.21, 4.02],
[6.70, 5.72, 5.31, 5.60, 5.31, 2.04, 3.77],
[6.28, 6.10, 5.68, 5.28, 5.22, 2.39, 3.38],
[6.61, 6.27, 5.73, 4.99, 4.90, 2.30, 3.07],
[6.25, 6.49, 6.04, 5.09, 4.57, 2.41, 2.90],
[6.47, 6.16, 6.27, 5.39, 4.96, 2.40, 2.50],
[6.45, 6.26, 6.60, 5.58, 4.82, 2.79, 2.76],
[6.88, 6.39, 6.10, 5.33, 4.39, 2.67, 2.29],
[7.00, 6.58, 6.25, 5.48, 4.63, 2.27, 2.17],
[6.59, 6.20, 6.73, 5.10, 5.05, 2.09, 1.84],
[6.59, 5.70, 6.91, 5.39, 4.68, 2.55, 1.83],
[6.64, 5.20, 7.01, 5.30, 5.02, 2.22, 2.21],
[6.38, 5.37, 7.36, 5.04, 4.84, 2.59, 2.00],
[6.10, 5.40, 7.72, 5.51, 4.60, 2.59, 1.76],
[6.35, 5.22, 7.68, 5.43, 4.66, 2.95, 1.27],
[6.52, 5.38, 7.62, 5.23, 4.41, 2.69, 1.40],
[6.87, 5.53, 7.74, 4.99, 4.87, 2.20, 1.11],
[6.84, 6.03, 7.53, 5.43, 4.42, 2.69, 1.60],
[7.09, 5.77, 7.46, 5.40, 4.08, 2.65, 1.23],
[6.88, 5.66, 7.84, 5.60, 4.16, 2.63, 1.59],
[6.84, 6.08, 8.11, 5.66, 4.10, 2.14, 1.50],
[6.98, 5.62, 8.04, 6.01, 4.43, 2.39, 1.80],
[7.02, 5.63, 7.65, 5.64, 4.07, 1.95, 1.55],
[7.13, 6.11, 7.52, 5.67, 3.97, 2.32, 1.35],
[7.59, 6.03, 7.67, 5.30, 4.16, 2.69, 1.51],
[7.61, 6.27, 7.47, 4.91, 4.12, 2.51, 1.08],
[7.21, 6.28, 7.44, 5.37, 4.04, 2.62, 1.06],
[7.48, 6.52, 7.59, 5.75, 3.84, 2.16, 1.43],
[7.66, 7.00, 7.94, 6.08, 3.46, 2.35, 1.43],
[7.51, 7.34, 8.25, 6.58, 3.18, 2.31, 1.74],
[7.12, 7.34, 7.77, 6.78, 3.10, 1.96, 1.44],
[6.97, 7.68, 8.03, 7.20, 3.55, 2.35, 1.83],
[6.67, 8.09, 7.87, 7.65, 3.66, 2.58, 1.71],
[6.20, 7.68, 7.58, 8.00, 3.66, 2.40, 2.12],
[6.34, 7.58, 7.33, 7.92, 3.29, 2.20, 2.45],
[6.22, 7.46, 7.22, 8.30, 2.80, 2.31, 2.85],
[5.98, 7.59, 6.86, 8.46, 2.88, 2.16, 2.79],
[6.37, 7.19, 7.18, 7.99, 3.04, 2.16, 2.91],
[6.56, 7.40, 7.54, 8.19, 3.45, 2.20, 3.26],
[6.26, 7.48, 7.24, 8.61, 3.88, 1.73, 3.14],
[6.69, 7.93, 6.85, 8.66, 3.58, 1.93, 3.53],
[7.13, 8.23, 6.60, 8.91, 3.60, 2.25, 3.65],
[6.83, 8.35, 6.65, 9.08, 3.97, 2.69, 3.69],
[7.31, 8.44, 6.74, 9.34, 4.05, 2.59, 3.50],
[7.43, 8.35, 7.19, 8.96, 4.40, 2.14, 3.25],
[7.54, 8.58, 7.14, 8.98, 4.06, 1.68, 3.64],
[7.18, 8.82, 6.88, 8.50, 3.60, 1.98, 4.00],
[7.21, 9.09, 7.14, 8.65, 3.61, 2.14, 3.63],
[7.45, 9.02, 7.30, 8.94, 4.10, 1.89, 3.78],
[7.37, 8.87, 6.95, 8.63, 3.74, 1.97, 3.42],
[6.88, 9.22, 7.02, 8.65, 4.02, 1.99, 3.76],
[7.08, 9.04, 7.38, 8.40, 3.95, 2.37, 3.62],
[6.75, 8.60, 7.50, 8.38, 3.81, 2.14, 3.67],
[6.60, 8.48, 7.60, 8.23, 3.71, 2.35, 3.61],
[6.21, 8.71, 7.15, 8.04, 3.94, 1.86, 3.39],
[6.36, 8.79, 7.30, 7.91, 4.43, 2.14, 3.43],
[6.82, 8.93, 7.80, 7.57, 4.07, 2.39, 3.33],
[6.45, 9.36, 8.15, 7.73, 4.04, 2.53, 3.03],
[6.85, 9.68, 8.40, 7.74, 4.34, 2.47, 3.28],
[6.48, 10.16, 8.87, 8.07, 4.80, 2.93, 3.46],
[6.10, 10.56, 8.53, 7.99, 5.18, 3.09, 3.25],
[5.64, 10.63, 8.94, 7.92, 4.90, 2.93, 2.95],
[6.01, 10.55, 8.52, 8.40, 5.40, 3.22, 2.87],
[6.21, 10.65, 8.80, 8.80, 5.73, 3.06, 2.63],
[6.61, 10.55, 8.92, 8.47, 5.62, 2.90, 2.40],
[7.02, 10.19, 9.20, 8.07, 5.20, 2.68, 2.53],
[7.04, 10.48, 8.71, 7.87, 4.85, 2.46, 2.96],
[6.77, 10.36, 8.25, 8.02, 5.18, 2.41, 3.26],
[7.09, 10.03, 8.19, 8.39, 4.72, 2.74, 2.97],
[6.65, 10.24, 7.80, 8.69, 4.62, 3.15, 3.16],
[7.07, 10.01, 7.69, 8.81, 4.55, 3.40, 3.58],
[6.80, 10.14, 7.23, 8.99, 4.37, 3.82, 3.23],
[6.79, 10.31, 6.98, 9.10, 4.26, 4.02, 3.62],
[6.48, 9.88, 7.07, 8.90, 4.25, 3.76, 3.13],
[6.39, 10.05, 6.95, 8.87, 4.59, 4.10, 2.93]])
self.op_signals = np.array([[0, 0, 0, 0, 0.25, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0.1, 0.15],
[0.2, 0.2, 0, 0, 0, 0, 0],
[0, 0, 0.1, 0, 0, 0, 0],
[0, 0, 0, 0, -0.75, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[-0.333, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, -0.5, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, -1],
[0, 0, 0, 0, 0.2, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[-0.5, 0, 0, 0.15, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0.2, 0, -1, 0.2, 0],
[0.5, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0.2, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, -0.5, 0.2],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0.2, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0.15, 0, 0],
[-1, 0, 0.25, 0.25, 0, 0.25, 0],
[0, 0, 0, 0, 0, 0, 0],
[0.25, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0.2, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, -1, 0, 0.2],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, -1, 0, 0, 0, 0, 0],
[-1, 0, 0.15, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0]])
self.cash = qt.CashPlan(['2016/07/01', '2016/08/12', '2016/09/23'], [10000, 10000, 10000])
self.rate = qt.Cost(buy_fix=0,
sell_fix=0,
buy_rate=0,
sell_rate=0,
buy_min=0,
sell_min=0,
slipage=0)
self.rate2 = qt.Cost(buy_fix=0,
sell_fix=0,
buy_rate=0,
sell_rate=0,
buy_min=10,
sell_min=5,
slipage=0)
self.op_signal_df = pd.DataFrame(self.op_signals, index=self.dates, columns=self.shares)
self.history_list = pd.DataFrame(self.prices, index=self.dates, columns=self.shares)
self.res = np.array([[0.000, 0.000, 0.000, 0.000, 555.556, 0.000, 0.000, 7500.000, 0.000, 10000.000],
[0.000, 0.000, 0.000, 0.000, 555.556, 0.000, 0.000, 7500.000, 0.000, 9916.667],
[0.000, 0.000, 0.000, 0.000, 555.556, 205.065, 321.089, 5059.722, 0.000, 9761.111],
[346.982, 416.679, 0.000, 0.000, 555.556, 205.065, 321.089, 1201.278, 0.000, 9646.112],
[346.982, 416.679, 191.037, 0.000, 555.556, 205.065, 321.089, 232.719, 0.000, 9685.586],
[346.982, 416.679, 191.037, 0.000, 138.889, 205.065, 321.089, 1891.052, 0.000, 9813.218],
[346.982, 416.679, 191.037, 0.000, 138.889, 205.065, 321.089, 1891.052, 0.000, 9803.129],
[346.982, 416.679, 191.037, 0.000, 138.889, 205.065, 321.089, 1891.052, 0.000, 9608.020],
[346.982, 416.679, 191.037, 0.000, 138.889, 205.065, 321.089, 1891.052, 0.000, 9311.573],
[346.982, 416.679, 191.037, 0.000, 138.889, 205.065, 321.089, 1891.052, 0.000, 8883.625],
[346.982, 416.679, 191.037, 0.000, 138.889, 205.065, 321.089, 1891.052, 0.000, 8751.390],
[346.982, 416.679, 191.037, 0.000, 138.889, 205.065, 321.089, 1891.052, 0.000, 8794.181],
[346.982, 416.679, 191.037, 0.000, 138.889, 205.065, 321.089, 1891.052, 0.000, 9136.570],
[231.437, 416.679, 191.037, 0.000, 138.889, 205.065, 321.089, 2472.244, 0.000, 9209.359],
[231.437, 416.679, 191.037, 0.000, 138.889, 205.065, 321.089, 2472.244, 0.000, 9093.829],
[231.437, 416.679, 191.037, 0.000, 138.889, 205.065, 321.089, 2472.244, 0.000, 9387.554],
[231.437, 416.679, 191.037, 0.000, 138.889, 205.065, 321.089, 2472.244, 0.000, 9585.959],
[231.437, 416.679, 95.519, 0.000, 138.889, 205.065, 321.089, 3035.804, 0.000, 9928.777],
[231.437, 416.679, 95.519, 0.000, 138.889, 205.065, 321.089, 3035.804, 0.000, 10060.381],
[231.437, 416.679, 95.519, 0.000, 138.889, 205.065, 321.089, 3035.804, 0.000, 10281.002],
[231.437, 416.679, 95.519, 0.000, 138.889, 205.065, 321.089, 3035.804, 0.000, 10095.561],
[231.437, 416.679, 95.519, 0.000, 138.889, 205.065, 0.000, 4506.393, 0.000, 10029.957],
[231.437, 416.679, 95.519, 0.000, 474.224, 205.065, 0.000, 2531.270, 0.000, 9875.613],
[231.437, 416.679, 95.519, 0.000, 474.224, 205.065, 0.000, 2531.270, 0.000, 9614.946],
[231.437, 416.679, 95.519, 0.000, 474.224, 205.065, 0.000, 2531.270, 0.000, 9824.172],
[115.719, 416.679, 95.519, 269.850, 474.224, 205.065, 0.000, 1854.799, 0.000, 9732.574],
[115.719, 416.679, 95.519, 269.850, 474.224, 205.065, 0.000, 1854.799, 0.000, 9968.339],
[115.719, 416.679, 95.519, 269.850, 474.224, 205.065, 0.000, 1854.799, 0.000, 10056.158],
[115.719, 416.679, 95.519, 269.850, 474.224, 205.065, 0.000, 1854.799, 0.000, 9921.492],
[115.719, 416.679, 95.519, 269.850, 474.224, 205.065, 0.000, 1854.799, 0.000, 9894.162],
[115.719, 416.679, 735.644, 269.850, 0.000, 1877.393, 0.000, 6179.774, 0.000, 20067.937],
[1073.823, 416.679, 735.644, 269.850, 0.000, 1877.393, 0.000, 0.000, 0.000, 21133.508],
[1073.823, 416.679, 735.644, 269.850, 0.000, 1877.393, 0.000, 0.000, 0.000, 20988.848],
[1073.823, 416.679, 735.644, 269.850, 0.000, 1877.393, 0.000, 0.000, 0.000, 20596.743],
[1073.823, 416.679, 735.644, 269.850, 0.000, 1877.393, 0.000, 0.000, 0.000, 19910.773],
[1073.823, 416.679, 735.644, 269.850, 0.000, 1877.393, 0.000, 0.000, 0.000, 20776.707],
[1073.823, 416.679, 735.644, 269.850, 0.000, 1877.393, 0.000, 0.000, 0.000, 20051.797],
[1073.823, 416.679, 735.644, 269.850, 0.000, 1877.393, 0.000, 0.000, 0.000, 20725.388],
[1073.823, 416.679, 735.644, 269.850, 0.000, 1877.393, 0.000, 0.000, 0.000, 20828.880],
[1073.823, 416.679, 735.644, 269.850, 0.000, 1877.393, 0.000, 0.000, 0.000, 21647.181],
[1073.823, 416.679, 735.644, 269.850, 0.000, 1877.393, 0.000, 0.000, 0.000, 21310.169],
[1073.823, 416.679, 735.644, 269.850, 0.000, 1877.393, 0.000, 0.000, 0.000, 20852.099],
[1073.823, 416.679, 735.644, 269.850, 0.000, 1877.393, 0.000, 0.000, 0.000, 21912.395],
[1073.823, 416.679, 735.644, 269.850, 0.000, 1877.393, 0.000, 0.000, 0.000, 21937.828],
[1073.823, 416.679, 735.644, 269.850, 0.000, 1877.393, 0.000, 0.000, 0.000, 21962.458],
[1073.823, 416.679, 735.644, 269.850, 0.000, 938.697, 1339.207, 0.000, 0.000, 21389.402],
[1073.823, 416.679, 735.644, 269.850, 0.000, 938.697, 1339.207, 0.000, 0.000, 22027.453],
[1073.823, 416.679, 735.644, 269.850, 0.000, 938.697, 1339.207, 0.000, 0.000, 20939.999],
[1073.823, 416.679, 735.644, 269.850, 0.000, 938.697, 1339.207, 0.000, 0.000, 21250.064],
[1073.823, 416.679, 735.644, 269.850, 0.000, 938.697, 1339.207, 0.000, 0.000, 22282.781],
[1073.823, 416.679, 735.644, 269.850, 0.000, 938.697, 1339.207, 0.000, 0.000, 21407.066],
[1073.823, 416.679, 735.644, 269.850, 0.000, 938.697, 1339.207, 0.000, 0.000, 21160.237],
[1073.823, 416.679, 735.644, 269.850, 0.000, 938.697, 1339.207, 0.000, 0.000, 21826.768],
[1073.823, 416.679, 735.644, 269.850, 0.000, 938.697, 1339.207, 0.000, 0.000, 22744.940],
[1073.823, 416.679, 735.644, 269.850, 0.000, 938.697, 1339.207, 0.000, 0.000, 23466.118],
[1073.823, 416.679, 735.644, 269.850, 0.000, 938.697, 1339.207, 0.000, 0.000, 22017.882],
[1073.823, 416.679, 735.644, 269.850, 0.000, 938.697, 1339.207, 0.000, 0.000, 23191.466],
[1073.823, 416.679, 735.644, 269.850, 0.000, 938.697, 1339.207, 0.000, 0.000, 23099.082],
[1073.823, 416.679, 735.644, 269.850, 0.000, 938.697, 1339.207, 0.000, 0.000, 22684.767],
[1073.823, 416.679, 735.644, 269.850, 0.000, 938.697, 1339.207, 0.000, 0.000, 22842.135],
[1073.823, 416.679, 735.644, 269.850, 1785.205, 938.697, 1339.207, 5001.425, 0, 33323.836],
[0.000, 416.679, 1290.692, 719.924, 1785.205, 2701.488, 1339.207, 0.000, 0.000, 32820.290],
[0.000, 416.679, 1290.692, 719.924, 1785.205, 2701.488, 1339.207, 0.000, 0.000, 33174.614],
[0.000, 416.679, 1290.692, 719.924, 1785.205, 2701.488, 1339.207, 0.000, 0.000, 35179.466],
[0.000, 416.679, 1290.692, 719.924, 1785.205, 2701.488, 1339.207, 0.000, 0.000, 34465.195],
[0.000, 416.679, 1290.692, 719.924, 1785.205, 2701.488, 1339.207, 0.000, 0.000, 34712.354],
[0.000, 416.679, 1290.692, 719.924, 1785.205, 2701.488, 1339.207, 0.000, 0.000, 35755.550],
[0.000, 416.679, 1290.692, 719.924, 1785.205, 2701.488, 1339.207, 0.000, 0.000, 37895.223],
[0.000, 416.679, 1290.692, 719.924, 1785.205, 2701.488, 1339.207, 0.000, 0.000, 37854.284],
[0.000, 416.679, 1290.692, 719.924, 1785.205, 2701.488, 1339.207, 0.000, 0.000, 37198.374],
[0.000, 416.679, 1290.692, 719.924, 1785.205, 2701.488, 1339.207, 0.000, 0.000, 35916.711],
[0.000, 416.679, 1290.692, 719.924, 1785.205, 2701.488, 1339.207, 0.000, 0.000, 35806.937],
[0.000, 416.679, 1290.692, 719.924, 1785.205, 2701.488, 1339.207, 0.000, 0.000, 36317.592],
[0.000, 416.679, 1290.692, 719.924, 1785.205, 2701.488, 1339.207, 0.000, 0.000, 37103.973],
[0.000, 416.679, 1290.692, 719.924, 1785.205, 2701.488, 1339.207, 0.000, 0.000, 35457.883],
[0.000, 416.679, 1290.692, 719.924, 1785.205, 2701.488, 1339.207, 0.000, 0.000, 36717.685],
[0.000, 416.679, 1290.692, 719.924, 1785.205, 2701.488, 1339.207, 0.000, 0.000, 37641.463],
[0.000, 416.679, 1290.692, 719.924, 1785.205, 2701.488, 1339.207, 0.000, 0.000, 36794.298],
[0.000, 416.679, 1290.692, 719.924, 1785.205, 2701.488, 1339.207, 0.000, 0.000, 37073.817],
[0.000, 416.679, 1290.692, 719.924, 1785.205, 2701.488, 1339.207, 0.000, 0.000, 35244.299],
[0.000, 416.679, 1290.692, 719.924, 1785.205, 2701.488, 1339.207, 0.000, 0.000, 37062.382],
[0.000, 416.679, 1290.692, 719.924, 1785.205, 2701.488, 1339.207, 0.000, 0.000, 37420.067],
[0.000, 416.679, 1290.692, 719.924, 1785.205, 2701.488, 1339.207, 0.000, 0.000, 38089.058],
[0.000, 416.679, 1290.692, 719.924, 1785.205, 2701.488, 1339.207, 0.000, 0.000, 39260.542],
[0.000, 416.679, 1290.692, 719.924, 1785.205, 2701.488, 1339.207, 0.000, 0.000, 42609.684],
[0.000, 416.679, 1290.692, 719.924, 1785.205, 2701.488, 1339.207, 0.000, 0.000, 43109.309],
[0.000, 416.679, 1290.692, 719.924, 1785.205, 2701.488, 1339.207, 0.000, 0.000, 42283.408],
[0.000, 416.679, 1290.692, 719.924, 0.000, 2701.488, 4379.099, 915.621, 0.000, 43622.444],
[0.000, 416.679, 1290.692, 719.924, 0.000, 2701.488, 4379.099, 915.621, 0.000, 42830.254],
[0.000, 416.679, 1290.692, 719.924, 0.000, 2701.488, 4379.099, 915.621, 0.000, 41266.463],
[0.000, 416.679, 1290.692, 719.924, 0.000, 2701.488, 4379.099, 915.621, 0.000, 41164.839],
[0.000, 416.679, 1290.692, 719.924, 0.000, 2701.488, 4379.099, 915.621, 0.000, 41797.937],
[0.000, 416.679, 1290.692, 719.924, 0.000, 2701.488, 4379.099, 915.621, 0.000, 42440.861],
[0.000, 416.679, 1290.692, 719.924, 0.000, 2701.488, 4379.099, 915.621, 0.000, 42113.839],
[0.000, 416.679, 1290.692, 719.924, 0.000, 2701.488, 4379.099, 915.621, 0.000, 43853.588],
[0.000, 416.679, 1290.692, 719.924, 0.000, 2701.488, 4379.099, 915.621, 0.000, 46216.760],
[0.000, 0.000, 1290.692, 719.924, 0.000, 2701.488, 4379.099, 5140.743, 0.000, 45408.737],
[0.000, 0.000, 2027.188, 719.924, 0.000, 2701.488, 4379.099, 0.000, 0.000, 47413.401],
[0.000, 0.000, 2027.188, 719.924, 0.000, 2701.488, 4379.099, 0.000, 0.000, 44603.718],
[0.000, 0.000, 2027.188, 719.924, 0.000, 2701.488, 4379.099, 0.000, 0.000, 44381.544]])
def test_loop_step(self):
cash, amounts, fee, value = qt.core._loop_step(pre_cash=10000,
pre_amounts=np.zeros(7, dtype='float'),
op=self.op_signals[0],
prices=self.prices[0],
rate=self.rate,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 1 result in complete looping: \n'
f'cash: {cash}\n'
f'amounts: {np.round(amounts, 2)}\n'
f'value: {value}')
self.assertAlmostEqual(cash, 7500)
self.assertTrue(np.allclose(amounts, np.array([0, 0, 0, 0, 555.5555556, 0, 0])))
self.assertAlmostEqual(value, 10000.00)
cash, amounts, fee, value = qt.core._loop_step(pre_cash=5059.722222,
pre_amounts=np.array([0, 0, 0, 0, 555.5555556,
205.0653595, 321.0891813]),
op=self.op_signals[3],
prices=self.prices[3],
rate=self.rate,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 4 result in complete looping: \n'
f'cash: {cash}\n'
f'amounts: {np.round(amounts, 2)}\n'
f'value: {value}')
self.assertAlmostEqual(cash, 1201.2775195, 5)
self.assertTrue(np.allclose(amounts, np.array([346.9824373, 416.6786936, 0, 0,
555.5555556, 205.0653595, 321.0891813])))
self.assertAlmostEqual(value, 9646.111756, 5)
cash, amounts, fee, value = qt.core._loop_step(pre_cash=6179.77423,
pre_amounts=np.array([115.7186428, 416.6786936, 735.6441811,
269.8495646, 0, 1877.393446, 0]),
op=self.op_signals[31],
prices=self.prices[31],
rate=self.rate,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 32 result in complete looping: \n'
f'cash: {cash}\n'
f'amounts: {np.round(amounts, 2)}\n'
f'value: {value}')
self.assertAlmostEqual(cash, 0, 5)
self.assertTrue(np.allclose(amounts, np.array([1073.823175, 416.6786936, 735.6441811,
269.8495646, 0, 1877.393446, 0])))
self.assertAlmostEqual(value, 21133.50798, 5)
cash, amounts, fee, value = qt.core._loop_step(pre_cash=10000,
pre_amounts=np.array([1073.823175, 416.6786936, 735.6441811,
269.8495646, 0, 938.6967231, 1339.207325]),
op=self.op_signals[60],
prices=self.prices[60],
rate=self.rate,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 61 result in complete looping: \n'
f'cash: {cash}\n'
f'amounts: {np.round(amounts, 2)}\n'
f'value: {value}')
self.assertAlmostEqual(cash, 5001.424618, 5)
self.assertTrue(np.allclose(amounts, np.array([1073.823175, 416.6786936, 735.6441811, 269.8495646,
1785.205494, 938.6967231, 1339.207325])))
self.assertAlmostEqual(value, 33323.83588, 5)
cash, amounts, fee, value = qt.core._loop_step(pre_cash=cash,
pre_amounts=amounts,
op=self.op_signals[61],
prices=self.prices[61],
rate=self.rate,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 62 result in complete looping: \n'
f'cash: {cash}\n'
f'amounts: {np.round(amounts, 2)}\n'
f'value: {value}')
self.assertAlmostEqual(cash, 0, 5)
self.assertTrue(np.allclose(amounts, np.array([0, 416.6786936, 1290.69215, 719.9239224,
1785.205494, 2701.487958, 1339.207325])))
self.assertAlmostEqual(value, 32820.29007, 5)
cash, amounts, fee, value = qt.core._loop_step(pre_cash=915.6208259,
pre_amounts=np.array([0, 416.6786936, 1290.69215, 719.9239224,
0, 2701.487958, 4379.098907]),
op=self.op_signals[96],
prices=self.prices[96],
rate=self.rate,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 97 result in complete looping: \n'
f'cash: {cash}\n'
f'amounts: {np.round(amounts, 2)}\n'
f'value: {value}')
self.assertAlmostEqual(cash, 5140.742779, 5)
self.assertTrue(np.allclose(amounts, np.array([0, 0, 1290.69215, 719.9239224, 0, 2701.487958, 4379.098907])))
self.assertAlmostEqual(value, 45408.73655, 4)
cash, amounts, fee, value = qt.core._loop_step(pre_cash=cash,
pre_amounts=amounts,
op=self.op_signals[97],
prices=self.prices[97],
rate=self.rate,
moq_buy=0,
moq_sell=0,
print_log=True)
print(f'day 98 result in complete looping: \n'
f'cash: {cash}\n'
f'amounts: {np.round(amounts, 2)}\n'
f'value: {value}')
self.assertAlmostEqual(cash, 0, 5)
self.assertTrue(np.allclose(amounts, np.array([0, 0, 2027.18825, 719.9239224, 0, 2701.487958, 4379.098907])))
self.assertAlmostEqual(value, 47413.40131, 4)
def test_loop(self):
res = apply_loop(op_list=self.op_signal_df,
history_list=self.history_list,
cash_plan=self.cash,
cost_rate=self.rate,
moq_buy=0,
moq_sell=0,
inflation_rate=0,
print_log=True)
self.assertIsInstance(res, pd.DataFrame)
print(f'in test_loop:\nresult of loop test is \n{res}')
self.assertTrue(np.allclose(res.values, self.res, 5))
print(f'test assertion errors in apply_loop: detect moqs that are not compatible')
self.assertRaises(AssertionError,
apply_loop,
self.op_signal_df,
self.history_list,
self.cash,
self.rate,
0, 1,
0,
False)
self.assertRaises(AssertionError,
apply_loop,
self.op_signal_df,
self.history_list,
self.cash,
self.rate,
1, 5,
0,
False)
print(f'test loop results with moq equal to 100')
res = apply_loop(op_list=self.op_signal_df,
history_list=self.history_list,
cash_plan=self.cash,
cost_rate=self.rate2,
moq_buy=100,
moq_sell=1,
inflation_rate=0,
print_log=True)
self.assertIsInstance(res, pd.DataFrame)
print(f'in test_loop:\nresult of loop test is \n{res}')
class TestOperatorSubFuncs(unittest.TestCase):
def setUp(self):
mask_list = [[0.0, 0.0, 0.0, 0.0],
[0.5, 0.0, 0.0, 1.0],
[0.5, 0.0, 0.3, 1.0],
[0.5, 0.0, 0.3, 0.5],
[0.5, 0.5, 0.3, 0.5],
[0.5, 0.5, 0.3, 1.0],
[0.3, 0.5, 0.0, 1.0],
[0.3, 1.0, 0.0, 1.0]]
signal_list = [[0.0, 0.0, 0.0, 0.0],
[0.5, 0.0, 0.0, 1.0],
[0.0, 0.0, 0.3, 0.0],
[0.0, 0.0, 0.0, -0.5],
[0.0, 0.5, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.5],
[-0.4, 0.0, -1.0, 0.0],
[0.0, 0.5, 0.0, 0.0]]
mask_multi = [[[0, 0, 1, 1, 0],
[0, 1, 1, 1, 0],
[1, 1, 1, 1, 0],
[1, 1, 1, 1, 1],
[1, 1, 1, 1, 1],
[1, 1, 0, 1, 1],
[0, 1, 0, 0, 1],
[1, 1, 0, 0, 1],
[1, 1, 0, 0, 1],
[1, 0, 0, 0, 1]],
[[0, 0, 1, 0, 1],
[0, 1, 1, 1, 1],
[1, 1, 0, 1, 1],
[1, 1, 1, 0, 0],
[1, 1, 0, 0, 0],
[1, 0, 0, 0, 0],
[1, 0, 0, 0, 0],
[1, 1, 0, 0, 0],
[1, 1, 0, 1, 0],
[0, 1, 0, 1, 0]],
[[0, 0, 0., 0, 1],
[0, 0, 1., 0, 1],
[0, 0, 1., 0, 1],
[1, 0, 1., 0, 1],
[1, 1, .5, 1, 1],
[1, 0, .5, 1, 0],
[1, 1, .5, 1, 0],
[0, 1, 0., 0, 0],
[1, 0, 0., 0, 0],
[0, 1, 0., 0, 0]]]
signal_multi = [[[0., 0., 1., 1., 0.],
[0., 1., 0., 0., 0.],
[1., 0., 0., 0., 0.],
[0., 0., 0., 0., 1.],
[0., 0., 0., 0., 0.],
[0., 0., -1., 0., 0.],
[-1., 0., 0., -1., 0.],
[1., 0., 0., 0., 0.],
[0., 0., 0., 0., 0.],
[0., -1., 0., 0., 0.]],
[[0., 0., 1., 0., 1.],
[0., 1., 0., 1., 0.],
[1., 0., -1., 0., 0.],
[0., 0., 1., -1., -1.],
[0., 0., -1., 0., 0.],
[0., -1., 0., 0., 0.],
[0., 0., 0., 0., 0.],
[0., 1., 0., 0., 0.],
[0., 0., 0., 1., 0.],
[-1., 0., 0., 0., 0.]],
[[0., 0., 0., 0., 1.],
[0., 0., 1., 0., 0.],
[0., 0., 0., 0., 0.],
[1., 0., 0., 0., 0.],
[0., 1., -0.5, 1., 0.],
[0., -1., 0., 0., -1.],
[0., 1., 0., 0., 0.],
[-1., 0., -1., -1., 0.],
[1., -1., 0., 0., 0.],
[-1., 1., 0., 0., 0.]]]
self.mask = np.array(mask_list)
self.multi_mask = np.array(mask_multi)
self.correct_signal = np.array(signal_list)
self.correct_multi_signal = np.array(signal_multi)
self.op = qt.Operator()
def test_ls_blend(self):
"""测试多空蒙板的混合器,三种混合方式均需要测试"""
ls_mask1 = [[0.0, 0.0, 0.0, -0.0],
[1.0, 0.0, 0.0, -1.0],
[1.0, 0.0, 1.0, -1.0],
[1.0, 0.0, 1.0, -1.0],
[1.0, 1.0, 1.0, -1.0],
[1.0, 1.0, 1.0, -1.0],
[0.0, 1.0, 0.0, -1.0],
[0.0, 1.0, 0.0, -1.0]]
ls_mask2 = [[0.0, 0.0, 0.5, -0.5],
[0.0, 0.0, 0.5, -0.3],
[0.0, 0.5, 0.5, -0.0],
[0.5, 0.5, 0.3, -0.0],
[0.5, 0.5, 0.3, -0.3],
[0.5, 0.5, 0.0, -0.5],
[0.3, 0.5, 0.0, -1.0],
[0.3, 1.0, 0.0, -1.0]]
ls_mask3 = [[0.5, 0.0, 1.0, -0.4],
[0.4, 0.0, 1.0, -0.3],
[0.3, 0.0, 0.8, -0.2],
[0.2, 0.0, 0.6, -0.1],
[0.1, 0.2, 0.4, -0.2],
[0.1, 0.3, 0.2, -0.5],
[0.1, 0.4, 0.0, -0.5],
[0.1, 0.5, 0.0, -1.0]]
# result with blender 'avg'
ls_blnd_avg = [[0.16666667, 0.00000000, 0.50000000, -0.3],
[0.46666667, 0.00000000, 0.50000000, -0.53333333],
[0.43333333, 0.16666667, 0.76666667, -0.4],
[0.56666667, 0.16666667, 0.63333333, -0.36666667],
[0.53333333, 0.56666667, 0.56666667, -0.5],
[0.53333333, 0.60000000, 0.40000000, -0.66666667],
[0.13333333, 0.63333333, 0.00000000, -0.83333333],
[0.13333333, 0.83333333, 0.00000000, -1.]]
# result with blender 'str-1.5'
ls_blnd_str_15 = [[0, 0, 1, 0],
[0, 0, 1, -1],
[0, 0, 1, 0],
[1, 0, 1, 0],
[1, 1, 1, -1],
[1, 1, 0, -1],
[0, 1, 0, -1],
[0, 1, 0, -1]]
# result with blender 'pos-2' == 'pos-2-0'
ls_blnd_pos_2 = [[0, 0, 1, -1],
[1, 0, 1, -1],
[1, 0, 1, -1],
[1, 0, 1, -1],
[1, 1, 1, -1],
[1, 1, 1, -1],
[1, 1, 0, -1],
[1, 1, 0, -1]]
# result with blender 'pos-2-0.25'
ls_blnd_pos_2_25 = [[0, 0, 1, -1],
[1, 0, 1, -1],
[1, 0, 1, 0],
[1, 0, 1, 0],
[1, 1, 1, -1],
[1, 1, 0, -1],
[0, 1, 0, -1],
[0, 1, 0, -1]]
# result with blender 'avg_pos-2' == 'pos-2-0'
ls_blnd_avg_pos_2 = [[0.00000000, 0.00000000, 0.50000000, -0.3],
[0.46666667, 0.00000000, 0.50000000, -0.53333333],
[0.43333333, 0.00000000, 0.76666667, -0.4],
[0.56666667, 0.00000000, 0.63333333, -0.36666667],
[0.53333333, 0.56666667, 0.56666667, -0.5],
[0.53333333, 0.60000000, 0.40000000, -0.66666667],
[0.13333333, 0.63333333, 0.00000000, -0.83333333],
[0.13333333, 0.83333333, 0.00000000, -1.]]
# result with blender 'avg_pos-2-0.25'
ls_blnd_avg_pos_2_25 = [[0.00000000, 0.00000000, 0.50000000, -0.3],
[0.46666667, 0.00000000, 0.50000000, -0.53333333],
[0.43333333, 0.00000000, 0.76666667, 0.00000000],
[0.56666667, 0.00000000, 0.63333333, 0.00000000],
[0.53333333, 0.56666667, 0.56666667, -0.5],
[0.53333333, 0.60000000, 0.00000000, -0.66666667],
[0.00000000, 0.63333333, 0.00000000, -0.83333333],
[0.00000000, 0.83333333, 0.00000000, -1.]]
# result with blender 'combo'
ls_blnd_combo = [[0.5, 0., 1.5, -0.9],
[1.4, 0., 1.5, -1.6],
[1.3, 0.5, 2.3, -1.2],
[1.7, 0.5, 1.9, -1.1],
[1.6, 1.7, 1.7, -1.5],
[1.6, 1.8, 1.2, -2.],
[0.4, 1.9, 0., -2.5],
[0.4, 2.5, 0., -3.]]
ls_masks = np.array([np.array(ls_mask1), np.array(ls_mask2), np.array(ls_mask3)])
# test A: the ls_blender 'str-T'
self.op.set_blender('ls', 'str-1.5')
res = qt.Operator._ls_blend(self.op, ls_masks)
print(f'test A: result of ls_blender: str-1.5: \n{res}')
self.assertTrue(np.allclose(res, ls_blnd_str_15))
# test B: the ls_blender 'pos-N-T'
self.op.set_blender('ls', 'pos-2')
res = qt.Operator._ls_blend(self.op, ls_masks)
print(f'Test B-1: result of ls_blender: pos-2: \n{res}')
self.assertTrue(np.allclose(res, ls_blnd_pos_2))
self.op.set_blender('ls', 'pos-2-0.25')
res = qt.Operator._ls_blend(self.op, ls_masks)
print(f'Test B-2: result of ls_blender: pos-2-0.25: \n{res}')
self.assertTrue(np.allclose(res, ls_blnd_pos_2_25))
# test C: the ls_blender 'avg_pos-N-T'
self.op.set_blender('ls', 'avg_pos-2')
res = qt.Operator._ls_blend(self.op, ls_masks)
print(f'Test C-1: result of ls_blender: avg_pos-2: \n{res}')
self.assertTrue(np.allclose(res, ls_blnd_pos_2, 5))
self.op.set_blender('ls', 'avg_pos-2-0.25')
res = qt.Operator._ls_blend(self.op, ls_masks)
print(f'Test C-2: result of ls_blender: avg_pos-2-0.25: \n{res}')
self.assertTrue(np.allclose(res, ls_blnd_pos_2_25, 5))
# test D: the ls_blender 'avg'
self.op.set_blender('ls', 'avg')
res = qt.Operator._ls_blend(self.op, ls_masks)
print(f'Test D: result of ls_blender: avg: \n{res}')
self.assertTrue(np.allclose(res, ls_blnd_avg))
# test E: the ls_blender 'combo'
self.op.set_blender('ls', 'combo')
res = qt.Operator._ls_blend(self.op, ls_masks)
print(f'Test E: result of ls_blender: combo: \n{res}')
self.assertTrue(np.allclose(res, ls_blnd_combo))
def test_sel_blend(self):
"""测试选股蒙板的混合器,包括所有的混合模式"""
# step2, test blending of sel masks
pass
def test_bs_blend(self):
"""测试买卖信号混合模式"""
# step3, test blending of op signals
pass
def test_unify(self):
print('Testing Unify functions\n')
l1 = np.array([[3, 2, 5], [5, 3, 2]])
res = qt.unify(l1)
target = np.array([[0.3, 0.2, 0.5], [0.5, 0.3, 0.2]])
self.assertIs(np.allclose(res, target), True, 'sum of all elements is 1')
l1 = np.array([[1, 1, 1, 1, 1], [2, 2, 2, 2, 2]])
res = qt.unify(l1)
target = np.array([[0.2, 0.2, 0.2, 0.2, 0.2], [0.2, 0.2, 0.2, 0.2, 0.2]])
self.assertIs(np.allclose(res, target), True, 'sum of all elements is 1')
def test_mask_to_signal(self):
signal = qt.mask_to_signal(self.mask)
print(f'Test A: single mask to signal, result: \n{signal}')
self.assertTrue(np.allclose(signal, self.correct_signal))
signal = qt.mask_to_signal(self.multi_mask)
print(f'Test A: single mask to signal, result: \n{signal}')
self.assertTrue(np.allclose(signal, self.correct_multi_signal))
class TestLSStrategy(qt.RollingTiming):
"""用于test测试的简单多空蒙板生成策略。基于RollingTiming滚动择时方法生成
该策略有两个参数,N与Price
N用于计算OHLC价格平均值的N日简单移动平均,判断,当移动平均值大于等于Price时,状态为看多,否则为看空
"""
def __init__(self):
super().__init__(stg_name='test_LS',
stg_text='test long/short strategy',
par_count=2,
par_types='discr, conti',
par_bounds_or_enums=([1, 5], [2, 10]),
data_types='close, open, high, low',
data_freq='d',
window_length=5)
pass
def _realize(self, hist_data: np.ndarray, params: tuple):
n, price = params
h = hist_data.T
avg = (h[0] + h[1] + h[2] + h[3]) / 4
ma = sma(avg, n)
if ma[-1] < price:
return 0
else:
return 1
class TestSelStrategy(qt.SimpleSelecting):
"""用于Test测试的简单选股策略,基于Selecting策略生成
策略没有参数,选股周期为5D
在每个选股周期内,从股票池的三只股票中选出今日变化率 = (今收-昨收)/平均股价(OHLC平均股价)最高的两支,放入中选池,否则落选。
选股比例为平均分配
"""
def __init__(self):
super().__init__(stg_name='test_SEL',
stg_text='test portfolio selection strategy',
par_count=0,
par_types='',
par_bounds_or_enums=(),
data_types='high, low, close',
data_freq='d',
sample_freq='10d',
window_length=5)
pass
def _realize(self, hist_data: np.ndarray, params: tuple):
avg = np.nanmean(hist_data, axis=(1, 2))
dif = (hist_data[:, :, 2] - np.roll(hist_data[:, :, 2], 1, 1))
dif_no_nan = np.array([arr[~np.isnan(arr)][-1] for arr in dif])
difper = dif_no_nan / avg
large2 = difper.argsort()[1:]
chosen = np.zeros_like(avg)
chosen[large2] = 0.5
return chosen
class TestSelStrategyDiffTime(qt.SimpleSelecting):
"""用于Test测试的简单选股策略,基于Selecting策略生成
策略没有参数,选股周期为5D
在每个选股周期内,从股票池的三只股票中选出今日变化率 = (今收-昨收)/平均股价(OHLC平均股价)最高的两支,放入中选池,否则落选。
选股比例为平均分配
"""
def __init__(self):
super().__init__(stg_name='test_SEL',
stg_text='test portfolio selection strategy',
par_count=0,
par_types='',
par_bounds_or_enums=(),
data_types='close, low, open',
data_freq='d',
sample_freq='w',
window_length=2)
pass
def _realize(self, hist_data: np.ndarray, params: tuple):
avg = hist_data.mean(axis=1).squeeze()
difper = (hist_data[:, :, 0] - np.roll(hist_data[:, :, 0], 1))[:, -1] / avg
large2 = difper.argsort()[0:2]
chosen = np.zeros_like(avg)
chosen[large2] = 0.5
return chosen
class TestSigStrategy(qt.SimpleTiming):
"""用于Test测试的简单信号生成策略,基于SimpleTiming策略生成
策略有三个参数,第一个参数为ratio,另外两个参数为price1以及price2
ratio是k线形状比例的阈值,定义为abs((C-O)/(H-L))。当这个比值小于ratio阈值时,判断该K线为十字交叉(其实还有丁字等多种情形,但这里做了
简化处理。
信号生成的规则如下:
1,当某个K线出现十字交叉,且昨收与今收之差大于price1时,买入信号
2,当某个K线出现十字交叉,且昨收与今收之差小于price2时,卖出信号
"""
def __init__(self):
super().__init__(stg_name='test_SIG',
stg_text='test signal creation strategy',
par_count=3,
par_types='conti, conti, conti',
par_bounds_or_enums=([2, 10], [0, 3], [0, 3]),
data_types='close, open, high, low',
window_length=2)
pass
def _realize(self, hist_data: np.ndarray, params: tuple):
r, price1, price2 = params
h = hist_data.T
ratio = np.abs((h[0] - h[1]) / (h[3] - h[2]))
diff = h[0] - np.roll(h[0], 1)
sig = np.where((ratio < r) & (diff > price1),
1,
np.where((ratio < r) & (diff < price2), -1, 0))
return sig
class TestOperator(unittest.TestCase):
"""全面测试Operator对象的所有功能。包括:
1, Strategy 参数的设置
2, 历史数据的获取与分配提取
3, 策略优化参数的批量设置和优化空间的获取
4, 策略输出值的正确性验证
5, 策略结果的混合结果确认
"""
def setUp(self):
"""prepare data for Operator test"""
print('start testing HistoryPanel object\n')
# build up test data: a 4-type, 3-share, 50-day matrix of prices that contains nan values in some days
# for some share_pool
# for share1:
data_rows = 50
share1_close = [10.04, 10, 10, 9.99, 9.97, 9.99, 10.03, 10.03, 10.06, 10.06, 10.11,
10.09, 10.07, 10.06, 10.09, 10.03, 10.03, 10.06, 10.08, 10, 9.99,
10.03, 10.03, 10.06, 10.03, 9.97, 9.94, 9.83, 9.77, 9.84, 9.91, 9.93,
9.96, 9.91, 9.91, 9.88, 9.91, 9.64, 9.56, 9.57, 9.55, 9.57, 9.61, 9.61,
9.55, 9.57, 9.63, 9.64, 9.65, 9.62]
share1_open = [10.02, 10, 9.98, 9.97, 9.99, 10.01, 10.04, 10.06, 10.06, 10.11,
10.11, 10.07, 10.06, 10.09, 10.03, 10.02, 10.06, 10.08, 9.99, 10,
10.03, 10.02, 10.06, 10.03, 9.97, 9.94, 9.83, 9.78, 9.77, 9.91, 9.92,
9.97, 9.91, 9.9, 9.88, 9.91, 9.63, 9.64, 9.57, 9.55, 9.58, 9.61, 9.62,
9.55, 9.57, 9.61, 9.63, 9.64, 9.61, 9.56]
share1_high = [10.07, 10, 10, 10, 10.03, 10.03, 10.04, 10.09, 10.1, 10.14, 10.11, 10.1,
10.09, 10.09, 10.1, 10.05, 10.07, 10.09, 10.1, 10, 10.04, 10.04, 10.06,
10.09, 10.05, 9.97, 9.96, 9.86, 9.77, 9.92, 9.94, 9.97, 9.97, 9.92, 9.92,
9.92, 9.93, 9.64, 9.58, 9.6, 9.58, 9.62, 9.62, 9.64, 9.59, 9.62, 9.63,
9.7, 9.66, 9.64]
share1_low = [9.99, 10, 9.97, 9.97, 9.97, 9.98, 9.99, 10.03, 10.03, 10.04, 10.11, 10.07,
10.05, 10.03, 10.03, 10.01, 9.99, 10.03, 9.95, 10, 9.95, 10, 10.01, 9.99,
9.96, 9.89, 9.83, 9.77, 9.77, 9.8, 9.9, 9.91, 9.89, 9.89, 9.87, 9.85, 9.6,
9.64, 9.53, 9.55, 9.54, 9.55, 9.58, 9.54, 9.53, 9.53, 9.63, 9.64, 9.59, 9.56]
# for share2:
share2_close = [9.68, 9.87, 9.86, 9.87, 9.79, 9.82, 9.8, 9.66, 9.62, 9.58, 9.69, 9.78, 9.75,
9.96, 9.9, 10.04, 10.06, 10.08, 10.24, 10.24, 10.24, 9.86, 10.13, 10.12,
10.1, 10.25, 10.24, 10.22, 10.75, 10.64, 10.56, 10.6, 10.42, 10.25, 10.24,
10.49, 10.57, 10.63, 10.48, 10.37, 10.96, 11.02, np.nan, np.nan, 10.88, 10.87, 11.01,
11.01, 11.58, 11.8]
share2_open = [9.88, 9.88, 9.89, 9.75, 9.74, 9.8, 9.62, 9.65, 9.58, 9.67, 9.81, 9.8, 10,
9.95, 10.1, 10.06, 10.14, 9.9, 10.2, 10.29, 9.86, 9.48, 10.01, 10.24, 10.26,
10.24, 10.12, 10.65, 10.64, 10.56, 10.42, 10.43, 10.29, 10.3, 10.44, 10.6,
10.67, 10.46, 10.39, 10.9, 11.01, 11.01, np.nan, np.nan, 10.82, 11.02, 10.96,
11.55, 11.74, 11.8]
share2_high = [9.91, 10.04, 9.93, 10.04, 9.84, 9.88, 9.99, 9.7, 9.67, 9.71, 9.85, 9.9, 10,
10.2, 10.11, 10.18, 10.21, 10.26, 10.38, 10.47, 10.42, 10.07, 10.24, 10.27,
10.38, 10.43, 10.39, 10.65, 10.84, 10.65, 10.73, 10.63, 10.51, 10.35, 10.46,
10.63, 10.74, 10.76, 10.54, 11.02, 11.12, 11.17, np.nan, np.nan, 10.92, 11.15,
11.11, 11.55, 11.95, 11.93]
share2_low = [9.63, 9.84, 9.81, 9.74, 9.67, 9.72, 9.57, 9.54, 9.51, 9.47, 9.68, 9.63, 9.75,
9.65, 9.9, 9.93, 10.03, 9.8, 10.14, 10.09, 9.78, 9.21, 9.11, 9.68, 10.05,
10.12, 9.89, 9.89, 10.59, 10.43, 10.34, 10.32, 10.21, 10.2, 10.18, 10.36,
10.51, 10.41, 10.32, 10.37, 10.87, 10.95, np.nan, np.nan, 10.65, 10.71, 10.75,
10.91, 11.31, 11.58]
# for share3:
share3_close = [6.64, 7.26, 7.03, 6.87, np.nan, 6.64, 6.85, 6.7, 6.39, 6.22, 5.92, 5.91, 6.11,
5.91, 6.23, 6.28, 6.28, 6.27, np.nan, 5.56, 5.67, 5.16, 5.69, 6.32, 6.14, 6.25,
5.79, 5.26, 5.05, 5.45, 6.06, 6.21, 5.69, 5.46, 6.02, 6.69, 7.43, 7.72, 8.16,
7.83, 8.7, 8.71, 8.88, 8.54, 8.87, 8.87, 8.18, 7.8, 7.97, 8.25]
share3_open = [7.26, 7, 6.88, 6.91, np.nan, 6.81, 6.63, 6.45, 6.16, 6.24, 5.96, 5.97, 5.96,
6.2, 6.35, 6.11, 6.37, 5.58, np.nan, 5.65, 5.19, 5.42, 6.3, 6.15, 6.05, 5.89,
5.22, 5.2, 5.07, 6.04, 6.12, 5.85, 5.67, 6.02, 6.04, 7.07, 7.64, 7.99, 7.59,
8.73, 8.72, 8.97, 8.58, 8.71, 8.77, 8.4, 7.95, 7.76, 8.25, 7.51]
share3_high = [7.41, 7.31, 7.14, 7, np.nan, 6.82, 6.96, 6.85, 6.5, 6.34, 6.04, 6.02, 6.12, 6.38,
6.43, 6.46, 6.43, 6.27, np.nan, 6.01, 5.67, 5.67, 6.35, 6.32, 6.43, 6.36, 5.79,
5.47, 5.65, 6.04, 6.14, 6.23, 5.83, 6.25, 6.27, 7.12, 7.82, 8.14, 8.27, 8.92,
8.76, 9.15, 8.9, 9.01, 9.16, 9, 8.27, 7.99, 8.33, 8.25]
share3_low = [6.53, 6.87, 6.83, 6.7, np.nan, 6.63, 6.57, 6.41, 6.15, 6.07, 5.89, 5.82, 5.73, 5.81,
6.1, 6.06, 6.16, 5.57, np.nan, 5.51, 5.19, 5.12, 5.69, 6.01, 5.97, 5.86, 5.18, 5.19,
4.96, 5.45, 5.84, 5.85, 5.28, 5.42, 6.02, 6.69, 7.28, 7.64, 7.25, 7.83, 8.41, 8.66,
8.53, 8.54, 8.73, 8.27, 7.95, 7.67, 7.8, 7.51]
# for sel_finance test
shares_eps = np.array([[np.nan, np.nan, np.nan],
[0.1, np.nan, np.nan],
[np.nan, 0.2, np.nan],
[np.nan, np.nan, 0.3],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, 0.2],
[0.1, np.nan, np.nan],
[np.nan, 0.3, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[0.3, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, 0.3, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, 0.3],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, 0, 0.2],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[0.1, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, 0.2],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[0.15, np.nan, np.nan],
[np.nan, 0.1, np.nan],
[np.nan, np.nan, np.nan],
[0.1, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, 0.3],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[0.2, np.nan, np.nan],
[np.nan, 0.5, np.nan],
[0.4, np.nan, 0.3],
[np.nan, np.nan, np.nan],
[np.nan, 0.3, np.nan],
[0.9, np.nan, np.nan],
[np.nan, np.nan, 0.1]])
self.date_indices = ['2016-07-01', '2016-07-04', '2016-07-05', '2016-07-06',
'2016-07-07', '2016-07-08', '2016-07-11', '2016-07-12',
'2016-07-13', '2016-07-14', '2016-07-15', '2016-07-18',
'2016-07-19', '2016-07-20', '2016-07-21', '2016-07-22',
'2016-07-25', '2016-07-26', '2016-07-27', '2016-07-28',
'2016-07-29', '2016-08-01', '2016-08-02', '2016-08-03',
'2016-08-04', '2016-08-05', '2016-08-08', '2016-08-09',
'2016-08-10', '2016-08-11', '2016-08-12', '2016-08-15',
'2016-08-16', '2016-08-17', '2016-08-18', '2016-08-19',
'2016-08-22', '2016-08-23', '2016-08-24', '2016-08-25',
'2016-08-26', '2016-08-29', '2016-08-30', '2016-08-31',
'2016-09-01', '2016-09-02', '2016-09-05', '2016-09-06',
'2016-09-07', '2016-09-08']
self.shares = ['000010', '000030', '000039']
self.types = ['close', 'open', 'high', 'low']
self.sel_finance_tyeps = ['eps']
self.test_data_3D = np.zeros((3, data_rows, 4))
self.test_data_2D = np.zeros((data_rows, 3))
self.test_data_2D2 = np.zeros((data_rows, 4))
self.test_data_sel_finance = np.empty((3, data_rows, 1))
# Build up 3D data
self.test_data_3D[0, :, 0] = share1_close
self.test_data_3D[0, :, 1] = share1_open
self.test_data_3D[0, :, 2] = share1_high
self.test_data_3D[0, :, 3] = share1_low
self.test_data_3D[1, :, 0] = share2_close
self.test_data_3D[1, :, 1] = share2_open
self.test_data_3D[1, :, 2] = share2_high
self.test_data_3D[1, :, 3] = share2_low
self.test_data_3D[2, :, 0] = share3_close
self.test_data_3D[2, :, 1] = share3_open
self.test_data_3D[2, :, 2] = share3_high
self.test_data_3D[2, :, 3] = share3_low
self.test_data_sel_finance[:, :, 0] = shares_eps.T
self.hp1 = qt.HistoryPanel(values=self.test_data_3D,
levels=self.shares,
columns=self.types,
rows=self.date_indices)
print(f'in test Operator, history panel is created for timing test')
self.hp1.info()
self.hp2 = qt.HistoryPanel(values=self.test_data_sel_finance,
levels=self.shares,
columns=self.sel_finance_tyeps,
rows=self.date_indices)
print(f'in test_Operator, history panel is created for selection finance test:')
self.hp2.info()
self.op = qt.Operator(selecting_types=['all'], timing_types='dma', ricon_types='urgent')
def test_info(self):
"""Test information output of Operator"""
print(f'test printing information of operator object')
# self.op.info()
def test_operator_ready(self):
"""test the method ready of Operator"""
pass
# print(f'operator is ready? "{self.op.ready}"')
def test_operator_add_strategy(self):
"""test adding strategies to Operator"""
pass
# self.assertIsInstance(self.op, qt.Operator)
# self.assertIsInstance(self.op.timing[0], qt.TimingDMA)
# self.assertIsInstance(self.op.selecting[0], qt.SelectingAll)
# self.assertIsInstance(self.op.ricon[0], qt.RiconUrgent)
# self.assertEqual(self.op.selecting_count, 1)
# self.assertEqual(self.op.strategy_count, 3)
# self.assertEqual(self.op.ricon_count, 1)
# self.assertEqual(self.op.timing_count, 1)
# self.assertEqual(self.op.ls_blender, 'pos-1')
# print(f'test adding strategies into existing op')
# print('test adding strategy by string')
# self.op.add_strategy('macd', 'timing')
# self.assertIsInstance(self.op.timing[0], qt.TimingDMA)
# self.assertIsInstance(self.op.timing[1], qt.TimingMACD)
# self.assertEqual(self.op.selecting_count, 1)
# self.assertEqual(self.op.strategy_count, 4)
# self.assertEqual(self.op.ricon_count, 1)
# self.assertEqual(self.op.timing_count, 2)
# self.assertEqual(self.op.ls_blender, 'pos-1')
# self.op.add_strategy('random', 'selecting')
# self.assertIsInstance(self.op.selecting[0], qt.TimingDMA)
# self.assertIsInstance(self.op.selecting[1], qt.TimingMACD)
# self.assertEqual(self.op.selecting_count, 2)
# self.assertEqual(self.op.strategy_count, 5)
# self.assertEqual(self.op.ricon_count, 1)
# self.assertEqual(self.op.timing_count, 2)
# self.assertEqual(self.op.selecting_blender, '0 or 1')
# self.op.add_strategy('none', 'ricon')
# self.assertIsInstance(self.op.ricon[0], qt.TimingDMA)
# self.assertIsInstance(self.op.ricon[1], qt.TimingMACD)
# self.assertEqual(self.op.selecting_count, 2)
# self.assertEqual(self.op.strategy_count, 6)
# self.assertEqual(self.op.ricon_count, 2)
# self.assertEqual(self.op.timing_count, 2)
# print('test adding strategy by list')
# self.op.add_strategy(['dma', 'macd'], 'timing')
# print('test adding strategy by object')
# test_ls = TestLSStrategy()
# self.op.add_strategy(test_ls, 'timing')
def test_operator_remove_strategy(self):
"""test removing strategies from Operator"""
pass
# self.op.remove_strategy(stg='macd')
def test_property_get(self):
self.assertIsInstance(self.op, qt.Operator)
self.assertIsInstance(self.op.timing[0], qt.TimingDMA)
self.assertIsInstance(self.op.selecting[0], qt.SelectingAll)
self.assertIsInstance(self.op.ricon[0], qt.RiconUrgent)
self.assertEqual(self.op.selecting_count, 1)
self.assertEqual(self.op.strategy_count, 3)
self.assertEqual(self.op.ricon_count, 1)
self.assertEqual(self.op.timing_count, 1)
print(self.op.strategies, '\n', [qt.TimingDMA, qt.SelectingAll, qt.RiconUrgent])
print(f'info of Timing strategy: \n{self.op.strategies[0].info()}')
self.assertEqual(len(self.op.strategies), 3)
self.assertIsInstance(self.op.strategies[0], qt.TimingDMA)
self.assertIsInstance(self.op.strategies[1], qt.SelectingAll)
self.assertIsInstance(self.op.strategies[2], qt.RiconUrgent)
self.assertEqual(self.op.strategy_count, 3)
self.assertEqual(self.op.op_data_freq, 'd')
self.assertEqual(self.op.op_data_types, ['close'])
self.assertEqual(self.op.opt_space_par, ([], []))
self.assertEqual(self.op.max_window_length, 270)
self.assertEqual(self.op.ls_blender, 'pos-1')
self.assertEqual(self.op.selecting_blender, '0')
self.assertEqual(self.op.ricon_blender, 'add')
self.assertEqual(self.op.opt_types, [0, 0, 0])
def test_prepare_data(self):
test_ls = TestLSStrategy()
test_sel = TestSelStrategy()
test_sig = TestSigStrategy()
self.op = qt.Operator(timing_types=[test_ls],
selecting_types=[test_sel],
ricon_types=[test_sig])
too_early_cash = qt.CashPlan(dates='2016-01-01', amounts=10000)
early_cash = qt.CashPlan(dates='2016-07-01', amounts=10000)
on_spot_cash = qt.CashPlan(dates='2016-07-08', amounts=10000)
no_trade_cash = qt.CashPlan(dates='2016-07-08, 2016-07-30, 2016-08-11, 2016-09-03',
amounts=[10000, 10000, 10000, 10000])
late_cash = qt.CashPlan(dates='2016-12-31', amounts=10000)
multi_cash = qt.CashPlan(dates='2016-07-08, 2016-08-08', amounts=[10000, 10000])
self.op.set_parameter(stg_id='t-0',
pars={'000300': (5, 10.),
'000400': (5, 10.),
'000500': (5, 6.)})
self.op.set_parameter(stg_id='s-0',
pars=())
self.op.set_parameter(stg_id='r-0',
pars=(0.2, 0.02, -0.02))
self.op.prepare_data(hist_data=self.hp1,
cash_plan=on_spot_cash)
self.assertIsInstance(self.op._selecting_history_data, list)
self.assertIsInstance(self.op._timing_history_data, list)
self.assertIsInstance(self.op._ricon_history_data, list)
self.assertEqual(len(self.op._selecting_history_data), 1)
self.assertEqual(len(self.op._timing_history_data), 1)
self.assertEqual(len(self.op._ricon_history_data), 1)
sel_hist_data = self.op._selecting_history_data[0]
tim_hist_data = self.op._timing_history_data[0]
ric_hist_data = self.op._ricon_history_data[0]
print(f'in test_prepare_data in TestOperator:')
print('selecting history data:\n', sel_hist_data)
print('originally passed data in correct sequence:\n', self.test_data_3D[:, 3:, [2, 3, 0]])
print('difference is \n', sel_hist_data - self.test_data_3D[:, :, [2, 3, 0]])
self.assertTrue(np.allclose(sel_hist_data, self.test_data_3D[:, :, [2, 3, 0]], equal_nan=True))
self.assertTrue(np.allclose(tim_hist_data, self.test_data_3D, equal_nan=True))
self.assertTrue(np.allclose(ric_hist_data, self.test_data_3D[:, 3:, :], equal_nan=True))
# raises Value Error if empty history panel is given
empty_hp = qt.HistoryPanel()
correct_hp = qt.HistoryPanel(values=np.random.randint(10, size=(3, 50, 4)),
columns=self.types,
levels=self.shares,
rows=self.date_indices)
too_many_shares = qt.HistoryPanel(values=np.random.randint(10, size=(5, 50, 4)))
too_many_types = qt.HistoryPanel(values=np.random.randint(10, size=(3, 50, 5)))
# raises Error when history panel is empty
self.assertRaises(ValueError,
self.op.prepare_data,
empty_hp,
on_spot_cash)
# raises Error when first investment date is too early
self.assertRaises(AssertionError,
self.op.prepare_data,
correct_hp,
early_cash)
# raises Error when last investment date is too late
self.assertRaises(AssertionError,
self.op.prepare_data,
correct_hp,
late_cash)
# raises Error when some of the investment dates are on no-trade-days
self.assertRaises(ValueError,
self.op.prepare_data,
correct_hp,
no_trade_cash)
# raises Error when number of shares in history data does not fit
self.assertRaises(AssertionError,
self.op.prepare_data,
too_many_shares,
on_spot_cash)
# raises Error when too early cash investment date
self.assertRaises(AssertionError,
self.op.prepare_data,
correct_hp,
too_early_cash)
# raises Error when number of d_types in history data does not fit
self.assertRaises(AssertionError,
self.op.prepare_data,
too_many_types,
on_spot_cash)
# test the effect of data type sequence in strategy definition
def test_operator_generate(self):
"""
:return:
"""
test_ls = TestLSStrategy()
test_sel = TestSelStrategy()
test_sig = TestSigStrategy()
self.op = qt.Operator(timing_types=[test_ls],
selecting_types=[test_sel],
ricon_types=[test_sig])
self.assertIsInstance(self.op, qt.Operator, 'Operator Creation Error')
self.op.set_parameter(stg_id='t-0',
pars={'000300': (5, 10.),
'000400': (5, 10.),
'000500': (5, 6.)})
self.op.set_parameter(stg_id='s-0',
pars=())
# 在所有策略的参数都设置好之前调用prepare_data会发生assertion Error
self.assertRaises(AssertionError,
self.op.prepare_data,
hist_data=self.hp1,
cash_plan=qt.CashPlan(dates='2016-07-08', amounts=10000))
self.op.set_parameter(stg_id='r-0',
pars=(0.2, 0.02, -0.02))
self.op.prepare_data(hist_data=self.hp1,
cash_plan=qt.CashPlan(dates='2016-07-08', amounts=10000))
self.op.info()
op_list = self.op.create_signal(hist_data=self.hp1)
print(f'operation list is created: as following:\n {op_list}')
self.assertTrue(isinstance(op_list, pd.DataFrame))
self.assertEqual(op_list.shape, (26, 3))
# 删除去掉重复信号的code后,信号从原来的23条变为26条,包含三条重复信号,但是删除重复信号可能导致将不应该删除的信号删除,详见
# operator.py的create_signal()函数注释836行
target_op_dates = ['2016/07/08', '2016/07/12', '2016/07/13', '2016/07/14',
'2016/07/18', '2016/07/20', '2016/07/22', '2016/07/26',
'2016/07/27', '2016/07/28', '2016/08/02', '2016/08/03',
'2016/08/04', '2016/08/05', '2016/08/08', '2016/08/10',
'2016/08/16', '2016/08/18', '2016/08/24', '2016/08/26',
'2016/08/29', '2016/08/30', '2016/08/31', '2016/09/05',
'2016/09/06', '2016/09/08']
target_op_values = np.array([[0.0, 1.0, 0.0],
[0.5, -1.0, 0.0],
[1.0, 0.0, 0.0],
[0.0, 0.0, -1.0],
[0.0, 1.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 0.5, 0.0],
[0.0, 1.0, 0.0],
[0.0, 0.0, -1.0],
[0.0, 1.0, 0.0],
[0.0, -1.0, 0.0],
[0.0, 0.0, -1.0],
[0.0, 1.0, 0.0],
[-1.0, 0.0, 0.0],
[0.0, 0.0, -1.0],
[0.0, 0.0, -1.0],
[0.0, 0.0, 1.0],
[-1.0, 0.0, 0.0],
[0.0, 1.0, 1.0],
[0.0, 1.0, 0.5],
[0.0, 1.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 0.0, -1.0],
[0.0, 1.0, 0.0]])
target_op = pd.DataFrame(data=target_op_values, index=target_op_dates, columns=['000010', '000030', '000039'])
target_op = target_op.rename(index=pd.Timestamp)
print(f'target operation list is as following:\n {target_op}')
dates_pairs = [[date1, date2, date1 == date2]
for date1, date2
in zip(target_op.index.strftime('%m-%d'), op_list.index.strftime('%m-%d'))]
signal_pairs = [[list(sig1), list(sig2), all(sig1 == sig2)]
for sig1, sig2
in zip(list(target_op.values), list(op_list.values))]
print(f'dates side by side:\n '
f'{dates_pairs}')
print(f'signals side by side:\n'
f'{signal_pairs}')
print([item[2] for item in dates_pairs])
print([item[2] for item in signal_pairs])
self.assertTrue(np.allclose(target_op.values, op_list.values, equal_nan=True))
self.assertTrue(all([date1 == date2
for date1, date2
in zip(target_op.index.strftime('%m-%d'), op_list.index.strftime('%m-%d'))]))
def test_operator_parameter_setting(self):
"""
:return:
"""
new_op = qt.Operator(selecting_types=['all'], timing_types='dma', ricon_types='urgent')
print(new_op.strategies, '\n', [qt.TimingDMA, qt.SelectingAll, qt.RiconUrgent])
print(f'info of Timing strategy in new op: \n{new_op.strategies[0].info()}')
self.op.set_parameter('t-0',
pars=(5, 10, 5),
opt_tag=1,
par_boes=((5, 10), (5, 15), (10, 15)),
window_length=10,
data_types=['close', 'open', 'high'])
self.op.set_parameter(stg_id='s-0',
pars=None,
opt_tag=1,
sample_freq='10d',
window_length=10,
data_types='close, open')
self.op.set_parameter(stg_id='r-0',
pars=None,
opt_tag=0,
sample_freq='d',
window_length=20,
data_types='close, open')
self.assertEqual(self.op.timing[0].pars, (5, 10, 5))
self.assertEqual(self.op.timing[0].par_boes, ((5, 10), (5, 15), (10, 15)))
self.assertEqual(self.op.op_data_freq, 'd')
self.assertEqual(self.op.op_data_types, ['close', 'high', 'open'])
self.assertEqual(self.op.opt_space_par,
([(5, 10), (5, 15), (10, 15), (0, 1)], ['discr', 'discr', 'discr', 'conti']))
self.assertEqual(self.op.max_window_length, 20)
self.assertRaises(AssertionError, self.op.set_parameter, stg_id='t-1', pars=(1, 2))
self.assertRaises(AssertionError, self.op.set_parameter, stg_id='t1', pars=(1, 2))
self.assertRaises(AssertionError, self.op.set_parameter, stg_id=32, pars=(1, 2))
self.op.set_blender('selecting', '0 and 1 or 2')
self.op.set_blender('ls', 'str-1.2')
self.assertEqual(self.op.ls_blender, 'str-1.2')
self.assertEqual(self.op.selecting_blender, '0 and 1 or 2')
self.assertEqual(self.op.selecting_blender_expr, ['or', 'and', '0', '1', '2'])
self.assertEqual(self.op.ricon_blender, 'add')
self.assertRaises(ValueError, self.op.set_blender, 'select', '0and1')
self.assertRaises(TypeError, self.op.set_blender, 35, '0 and 1')
self.assertEqual(self.op.opt_space_par,
([(5, 10), (5, 15), (10, 15), (0, 1)], ['discr', 'discr', 'discr', 'conti']))
self.assertEqual(self.op.opt_types, [1, 1, 0])
def test_exp_to_blender(self):
self.op.set_blender('selecting', '0 and 1 or 2')
self.assertEqual(self.op.selecting_blender_expr, ['or', 'and', '0', '1', '2'])
self.op.set_blender('selecting', '0 and ( 1 or 2 )')
self.assertEqual(self.op.selecting_blender_expr, ['and', '0', 'or', '1', '2'])
self.assertRaises(ValueError, self.op.set_blender, 'selecting', '0 and (1 or 2)')
def test_set_opt_par(self):
self.op.set_parameter('t-0',
pars=(5, 10, 5),
opt_tag=1,
par_boes=((5, 10), (5, 15), (10, 15)),
window_length=10,
data_types=['close', 'open', 'high'])
self.op.set_parameter(stg_id='s-0',
pars=(0.5,),
opt_tag=0,
sample_freq='10d',
window_length=10,
data_types='close, open')
self.op.set_parameter(stg_id='r-0',
pars=(9, -0.23),
opt_tag=1,
sample_freq='d',
window_length=20,
data_types='close, open')
self.assertEqual(self.op.timing[0].pars, (5, 10, 5))
self.assertEqual(self.op.selecting[0].pars, (0.5,))
self.assertEqual(self.op.ricon[0].pars, (9, -0.23))
self.assertEqual(self.op.opt_types, [1, 0, 1])
self.op.set_opt_par((5, 12, 9, 8, -0.1))
self.assertEqual(self.op.timing[0].pars, (5, 12, 9))
self.assertEqual(self.op.selecting[0].pars, (0.5,))
self.assertEqual(self.op.ricon[0].pars, (8, -0.1))
# test set_opt_par when opt_tag is set to be 2 (enumerate type of parameters)
self.assertRaises(ValueError, self.op.set_opt_par, (5, 12, 9, 8))
def test_stg_attribute_get_and_set(self):
self.stg = qt.TimingCrossline()
self.stg_type = 'TIMING'
self.stg_name = "CROSSLINE STRATEGY"
self.stg_text = 'Moving average crossline strategy, determine long/short position according to the cross ' \
'point' \
' of long and short term moving average prices '
self.pars = (35, 120, 10, 'buy')
self.par_boes = [(10, 250), (10, 250), (1, 100), ('buy', 'sell', 'none')]
self.par_count = 4
self.par_types = ['discr', 'discr', 'conti', 'enum']
self.opt_tag = 0
self.data_types = ['close']
self.data_freq = 'd'
self.sample_freq = 'd'
self.window_length = 270
self.assertEqual(self.stg.stg_type, self.stg_type)
self.assertEqual(self.stg.stg_name, self.stg_name)
self.assertEqual(self.stg.stg_text, self.stg_text)
self.assertEqual(self.stg.pars, self.pars)
self.assertEqual(self.stg.par_types, self.par_types)
self.assertEqual(self.stg.par_boes, self.par_boes)
self.assertEqual(self.stg.par_count, self.par_count)
self.assertEqual(self.stg.opt_tag, self.opt_tag)
self.assertEqual(self.stg.data_freq, self.data_freq)
self.assertEqual(self.stg.sample_freq, self.sample_freq)
self.assertEqual(self.stg.data_types, self.data_types)
self.assertEqual(self.stg.window_length, self.window_length)
self.stg.stg_name = 'NEW NAME'
self.stg.stg_text = 'NEW TEXT'
self.assertEqual(self.stg.stg_name, 'NEW NAME')
self.assertEqual(self.stg.stg_text, 'NEW TEXT')
self.stg.pars = (1, 2, 3, 4)
self.assertEqual(self.stg.pars, (1, 2, 3, 4))
self.stg.par_count = 3
self.assertEqual(self.stg.par_count, 3)
self.stg.par_boes = [(1, 10), (1, 10), (1, 10), (1, 10)]
self.assertEqual(self.stg.par_boes, [(1, 10), (1, 10), (1, 10), (1, 10)])
self.stg.par_types = ['conti', 'conti', 'discr', 'enum']
self.assertEqual(self.stg.par_types, ['conti', 'conti', 'discr', 'enum'])
self.stg.par_types = 'conti, conti, discr, conti'
self.assertEqual(self.stg.par_types, ['conti', 'conti', 'discr', 'conti'])
self.stg.data_types = 'close, open'
self.assertEqual(self.stg.data_types, ['close', 'open'])
self.stg.data_types = ['close', 'high', 'low']
self.assertEqual(self.stg.data_types, ['close', 'high', 'low'])
self.stg.data_freq = 'w'
self.assertEqual(self.stg.data_freq, 'w')
self.stg.window_length = 300
self.assertEqual(self.stg.window_length, 300)
def test_rolling_timing(self):
stg = TestLSStrategy()
stg_pars = {'000100': (5, 10),
'000200': (5, 10),
'000300': (5, 6)}
stg.set_pars(stg_pars)
history_data = self.hp1.values
output = stg.generate(hist_data=history_data)
self.assertIsInstance(output, np.ndarray)
self.assertEqual(output.shape, (45, 3))
lsmask = np.array([[0., 0., 1.],
[0., 0., 1.],
[1., 0., 1.],
[1., 0., 1.],
[1., 0., 1.],
[1., 0., 1.],
[1., 0., 1.],
[1., 0., 1.],
[1., 0., 1.],
[1., 0., 1.],
[1., 0., 1.],
[1., 0., 1.],
[1., 1., 1.],
[1., 1., 1.],
[1., 1., 1.],
[1., 1., 0.],
[1., 1., 0.],
[1., 1., 0.],
[1., 0., 0.],
[1., 0., 0.],
[1., 1., 0.],
[0., 1., 0.],
[0., 1., 0.],
[0., 1., 0.],
[0., 1., 0.],
[0., 1., 0.],
[0., 1., 0.],
[0., 1., 0.],
[0., 1., 0.],
[0., 1., 0.],
[0., 1., 1.],
[0., 1., 1.],
[0., 1., 1.],
[0., 1., 1.],
[0., 1., 1.],
[0., 1., 1.],
[0., 1., 1.],
[0., 1., 1.],
[0., 1., 1.],
[0., 1., 1.],
[0., 1., 1.],
[0., 1., 1.],
[0., 1., 1.],
[0., 1., 1.],
[0., 1., 1.]])
# TODO: Issue to be solved: the np.nan value are converted to 0 in the lsmask,这样做可能会有意想不到的后果
# TODO: 需要解决nan值的问题
self.assertEqual(output.shape, lsmask.shape)
self.assertTrue(np.allclose(output, lsmask, equal_nan=True))
def test_sel_timing(self):
stg = TestSelStrategy()
stg_pars = ()
stg.set_pars(stg_pars)
history_data = self.hp1['high, low, close', :, :]
seg_pos, seg_length, seg_count = stg._seg_periods(dates=self.hp1.hdates, freq=stg.sample_freq)
self.assertEqual(list(seg_pos), [0, 5, 11, 19, 26, 33, 41, 47, 49])
self.assertEqual(list(seg_length), [5, 6, 8, 7, 7, 8, 6, 2])
self.assertEqual(seg_count, 8)
output = stg.generate(hist_data=history_data, shares=self.hp1.shares, dates=self.hp1.hdates)
self.assertIsInstance(output, np.ndarray)
self.assertEqual(output.shape, (45, 3))
selmask = np.array([[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0]])
self.assertEqual(output.shape, selmask.shape)
self.assertTrue(np.allclose(output, selmask))
def test_simple_timing(self):
stg = TestSigStrategy()
stg_pars = (0.2, 0.02, -0.02)
stg.set_pars(stg_pars)
history_data = self.hp1['close, open, high, low', :, 3:50]
output = stg.generate(hist_data=history_data, shares=self.shares, dates=self.date_indices)
self.assertIsInstance(output, np.ndarray)
self.assertEqual(output.shape, (45, 3))
sigmatrix = np.array([[0.0, 1.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, -1.0, 0.0],
[1.0, 0.0, 0.0],
[0.0, 0.0, -1.0],
[0.0, 0.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 0.0, -1.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, -1.0],
[0.0, 1.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, -1.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, -1.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[-1.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 1.0, 1.0],
[0.0, 1.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 0.0, -1.0],
[0.0, 0.0, 0.0],
[0.0, 1.0, 0.0]])
side_by_side_array = np.array([[i, out_line, sig_line]
for
i, out_line, sig_line
in zip(range(len(output)), output, sigmatrix)])
print(f'output and signal matrix lined up side by side is \n'
f'{side_by_side_array}')
self.assertEqual(sigmatrix.shape, output.shape)
self.assertTrue(np.allclose(output, sigmatrix))
def test_sel_finance(self):
"""Test selecting_finance strategy, test all built-in strategy parameters"""
stg = SelectingFinanceIndicator()
stg_pars = (False, 'even', 'greater', 0, 0, 0.67)
stg.set_pars(stg_pars)
stg.window_length = 5
stg.data_freq = 'd'
stg.sample_freq = '10d'
stg.sort_ascending = False
stg.condition = 'greater'
stg.lbound = 0
stg.ubound = 0
stg._poq = 0.67
history_data = self.hp2.values
print(f'Start to test financial selection parameter {stg_pars}')
seg_pos, seg_length, seg_count = stg._seg_periods(dates=self.hp1.hdates, freq=stg.sample_freq)
self.assertEqual(list(seg_pos), [0, 5, 11, 19, 26, 33, 41, 47, 49])
self.assertEqual(list(seg_length), [5, 6, 8, 7, 7, 8, 6, 2])
self.assertEqual(seg_count, 8)
output = stg.generate(hist_data=history_data, shares=self.hp1.shares, dates=self.hp1.hdates)
self.assertIsInstance(output, np.ndarray)
self.assertEqual(output.shape, (45, 3))
selmask = np.array([[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0]])
self.assertEqual(output.shape, selmask.shape)
self.assertTrue(np.allclose(output, selmask))
# test single factor, get mininum factor
stg_pars = (True, 'even', 'less', 1, 1, 0.67)
stg.sort_ascending = True
stg.condition = 'less'
stg.lbound = 1
stg.ubound = 1
stg.set_pars(stg_pars)
print(f'Start to test financial selection parameter {stg_pars}')
output = stg.generate(hist_data=history_data, shares=self.hp1.shares, dates=self.hp1.hdates)
selmask = np.array([[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.0, 0.5]])
self.assertEqual(output.shape, selmask.shape)
self.assertTrue(np.allclose(output, selmask))
# test single factor, get max factor in linear weight
stg_pars = (False, 'linear', 'greater', 0, 0, 0.67)
stg.sort_ascending = False
stg.weighting = 'linear'
stg.condition = 'greater'
stg.lbound = 0
stg.ubound = 0
stg.set_pars(stg_pars)
print(f'Start to test financial selection parameter {stg_pars}')
output = stg.generate(hist_data=history_data, shares=self.hp1.shares, dates=self.hp1.hdates)
selmask = np.array([[0.00000, 0.33333, 0.66667],
[0.00000, 0.33333, 0.66667],
[0.00000, 0.33333, 0.66667],
[0.00000, 0.33333, 0.66667],
[0.00000, 0.33333, 0.66667],
[0.00000, 0.33333, 0.66667],
[0.00000, 0.66667, 0.33333],
[0.00000, 0.66667, 0.33333],
[0.00000, 0.66667, 0.33333],
[0.00000, 0.66667, 0.33333],
[0.00000, 0.66667, 0.33333],
[0.00000, 0.66667, 0.33333],
[0.00000, 0.66667, 0.33333],
[0.00000, 0.66667, 0.33333],
[0.00000, 0.33333, 0.66667],
[0.00000, 0.33333, 0.66667],
[0.00000, 0.33333, 0.66667],
[0.00000, 0.33333, 0.66667],
[0.00000, 0.33333, 0.66667],
[0.00000, 0.33333, 0.66667],
[0.00000, 0.33333, 0.66667],
[0.33333, 0.00000, 0.66667],
[0.33333, 0.00000, 0.66667],
[0.33333, 0.00000, 0.66667],
[0.33333, 0.00000, 0.66667],
[0.33333, 0.00000, 0.66667],
[0.33333, 0.00000, 0.66667],
[0.33333, 0.00000, 0.66667],
[0.00000, 0.00000, 1.00000],
[0.00000, 0.00000, 1.00000],
[0.00000, 0.00000, 1.00000],
[0.00000, 0.00000, 1.00000],
[0.00000, 0.00000, 1.00000],
[0.00000, 0.00000, 1.00000],
[0.00000, 0.00000, 1.00000],
[0.00000, 0.00000, 1.00000],
[0.33333, 0.00000, 0.66667],
[0.33333, 0.00000, 0.66667],
[0.33333, 0.00000, 0.66667],
[0.33333, 0.00000, 0.66667],
[0.33333, 0.00000, 0.66667],
[0.33333, 0.00000, 0.66667],
[0.33333, 0.66667, 0.00000],
[0.33333, 0.66667, 0.00000],
[0.33333, 0.66667, 0.00000]])
self.assertEqual(output.shape, selmask.shape)
self.assertTrue(np.allclose(output, selmask))
# test single factor, get max factor in linear weight
stg_pars = (False, 'proportion', 'greater', 0, 0, 0.67)
stg.sort_ascending = False
stg.weighting = 'proportion'
stg.condition = 'greater'
stg.lbound = 0
stg.ubound = 0
stg.set_pars(stg_pars)
print(f'Start to test financial selection parameter {stg_pars}')
output = stg.generate(hist_data=history_data, shares=self.hp1.shares, dates=self.hp1.hdates)
selmask = np.array([[0.00000, 0.08333, 0.91667],
[0.00000, 0.08333, 0.91667],
[0.00000, 0.08333, 0.91667],
[0.00000, 0.08333, 0.91667],
[0.00000, 0.08333, 0.91667],
[0.00000, 0.08333, 0.91667],
[0.00000, 0.91667, 0.08333],
[0.00000, 0.91667, 0.08333],
[0.00000, 0.91667, 0.08333],
[0.00000, 0.91667, 0.08333],
[0.00000, 0.91667, 0.08333],
[0.00000, 0.91667, 0.08333],
[0.00000, 0.91667, 0.08333],
[0.00000, 0.91667, 0.08333],
[0.00000, 0.50000, 0.50000],
[0.00000, 0.50000, 0.50000],
[0.00000, 0.50000, 0.50000],
[0.00000, 0.50000, 0.50000],
[0.00000, 0.50000, 0.50000],
[0.00000, 0.50000, 0.50000],
[0.00000, 0.50000, 0.50000],
[0.08333, 0.00000, 0.91667],
[0.08333, 0.00000, 0.91667],
[0.08333, 0.00000, 0.91667],
[0.08333, 0.00000, 0.91667],
[0.08333, 0.00000, 0.91667],
[0.08333, 0.00000, 0.91667],
[0.08333, 0.00000, 0.91667],
[0.00000, 0.00000, 1.00000],
[0.00000, 0.00000, 1.00000],
[0.00000, 0.00000, 1.00000],
[0.00000, 0.00000, 1.00000],
[0.00000, 0.00000, 1.00000],
[0.00000, 0.00000, 1.00000],
[0.00000, 0.00000, 1.00000],
[0.00000, 0.00000, 1.00000],
[0.08333, 0.00000, 0.91667],
[0.08333, 0.00000, 0.91667],
[0.08333, 0.00000, 0.91667],
[0.08333, 0.00000, 0.91667],
[0.08333, 0.00000, 0.91667],
[0.08333, 0.00000, 0.91667],
[0.08333, 0.91667, 0.00000],
[0.08333, 0.91667, 0.00000],
[0.08333, 0.91667, 0.00000]])
self.assertEqual(output.shape, selmask.shape)
self.assertTrue(np.allclose(output, selmask, 0.001))
# test single factor, get max factor in linear weight, threshold 0.2
stg_pars = (False, 'even', 'greater', 0.2, 0.2, 0.67)
stg.sort_ascending = False
stg.weighting = 'even'
stg.condition = 'greater'
stg.lbound = 0.2
stg.ubound = 0.2
stg.set_pars(stg_pars)
print(f'Start to test financial selection parameter {stg_pars}')
output = stg.generate(hist_data=history_data, shares=self.hp1.shares, dates=self.hp1.hdates)
selmask = np.array([[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0],
[0.5, 0.5, 0.0]])
self.assertEqual(output.shape, selmask.shape)
self.assertTrue(np.allclose(output, selmask, 0.001))
class TestLog(unittest.TestCase):
def test_init(self):
pass
class TestConfig(unittest.TestCase):
"""测试Config对象以及QT_CONFIG变量的设置和获取值"""
def test_init(self):
pass
def test_invest(self):
pass
def test_pars_string_to_type(self):
_parse_string_kwargs('000300', 'asset_pool', _valid_qt_kwargs())
class TestHistoryPanel(unittest.TestCase):
def setUp(self):
print('start testing HistoryPanel object\n')
self.data = np.random.randint(10, size=(5, 10, 4))
self.index = pd.date_range(start='20200101', freq='d', periods=10)
self.index2 = ['2016-07-01', '2016-07-04', '2016-07-05', '2016-07-06',
'2016-07-07', '2016-07-08', '2016-07-11', '2016-07-12',
'2016-07-13', '2016-07-14']
self.index3 = '2016-07-01, 2016-07-04, 2016-07-05, 2016-07-06, 2016-07-07, ' \
'2016-07-08, 2016-07-11, 2016-07-12, 2016-07-13, 2016-07-14'
self.shares = '000100,000101,000102,000103,000104'
self.htypes = 'close,open,high,low'
self.data2 = np.random.randint(10, size=(10, 5))
self.data3 = np.random.randint(10, size=(10, 4))
self.data4 = np.random.randint(10, size=(10))
self.hp = qt.HistoryPanel(values=self.data, levels=self.shares, columns=self.htypes, rows=self.index)
self.hp2 = qt.HistoryPanel(values=self.data2, levels=self.shares, columns='close', rows=self.index)
self.hp3 = qt.HistoryPanel(values=self.data3, levels='000100', columns=self.htypes, rows=self.index2)
self.hp4 = qt.HistoryPanel(values=self.data4, levels='000100', columns='close', rows=self.index3)
self.hp5 = qt.HistoryPanel(values=self.data)
self.hp6 = qt.HistoryPanel(values=self.data, levels=self.shares, rows=self.index3)
def test_properties(self):
""" test all properties of HistoryPanel
"""
self.assertFalse(self.hp.is_empty)
self.assertEqual(self.hp.row_count, 10)
self.assertEqual(self.hp.column_count, 4)
self.assertEqual(self.hp.level_count, 5)
self.assertEqual(self.hp.shape, (5, 10, 4))
self.assertSequenceEqual(self.hp.htypes, ['close', 'open', 'high', 'low'])
self.assertSequenceEqual(self.hp.shares, ['000100', '000101', '000102', '000103', '000104'])
self.assertSequenceEqual(list(self.hp.hdates), list(self.index))
self.assertDictEqual(self.hp.columns, {'close': 0, 'open': 1, 'high': 2, 'low': 3})
self.assertDictEqual(self.hp.levels, {'000100': 0, '000101': 1, '000102': 2, '000103': 3, '000104': 4})
row_dict = {Timestamp('2020-01-01 00:00:00', freq='D'): 0,
Timestamp('2020-01-02 00:00:00', freq='D'): 1,
Timestamp('2020-01-03 00:00:00', freq='D'): 2,
Timestamp('2020-01-04 00:00:00', freq='D'): 3,
Timestamp('2020-01-05 00:00:00', freq='D'): 4,
Timestamp('2020-01-06 00:00:00', freq='D'): 5,
Timestamp('2020-01-07 00:00:00', freq='D'): 6,
Timestamp('2020-01-08 00:00:00', freq='D'): 7,
Timestamp('2020-01-09 00:00:00', freq='D'): 8,
Timestamp('2020-01-10 00:00:00', freq='D'): 9}
self.assertDictEqual(self.hp.rows, row_dict)
def test_len(self):
""" test the function len(HistoryPanel)
:return:
"""
self.assertEqual(len(self.hp), 10)
def test_empty_history_panel(self):
"""测试空HP或者特殊HP如维度标签为纯数字的HP"""
test_hp = qt.HistoryPanel(self.data)
self.assertFalse(test_hp.is_empty)
self.assertIsInstance(test_hp, qt.HistoryPanel)
self.assertEqual(test_hp.shape[0], 5)
self.assertEqual(test_hp.shape[1], 10)
self.assertEqual(test_hp.shape[2], 4)
self.assertEqual(test_hp.level_count, 5)
self.assertEqual(test_hp.row_count, 10)
self.assertEqual(test_hp.column_count, 4)
self.assertEqual(test_hp.shares, list(range(5)))
self.assertEqual(test_hp.hdates, list(pd.date_range(start='20200730', periods=10, freq='d')))
self.assertEqual(test_hp.htypes, list(range(4)))
self.assertTrue(np.allclose(test_hp.values, self.data))
print(f'shares: {test_hp.shares}\nhtypes: {test_hp.htypes}')
print(test_hp)
empty_hp = qt.HistoryPanel()
self.assertTrue(empty_hp.is_empty)
self.assertIsInstance(empty_hp, qt.HistoryPanel)
self.assertEqual(empty_hp.shape[0], 0)
self.assertEqual(empty_hp.shape[1], 0)
self.assertEqual(empty_hp.shape[2], 0)
self.assertEqual(empty_hp.level_count, 0)
self.assertEqual(empty_hp.row_count, 0)
self.assertEqual(empty_hp.column_count, 0)
def test_create_history_panel(self):
""" test the creation of a HistoryPanel object by passing all data explicitly
"""
self.assertIsInstance(self.hp, qt.HistoryPanel)
self.assertEqual(self.hp.shape[0], 5)
self.assertEqual(self.hp.shape[1], 10)
self.assertEqual(self.hp.shape[2], 4)
self.assertEqual(self.hp.level_count, 5)
self.assertEqual(self.hp.row_count, 10)
self.assertEqual(self.hp.column_count, 4)
self.assertEqual(list(self.hp.levels.keys()), self.shares.split(','))
self.assertEqual(list(self.hp.columns.keys()), self.htypes.split(','))
self.assertEqual(list(self.hp.rows.keys())[0], pd.Timestamp('20200101'))
self.assertIsInstance(self.hp2, qt.HistoryPanel)
self.assertEqual(self.hp2.shape[0], 5)
self.assertEqual(self.hp2.shape[1], 10)
self.assertEqual(self.hp2.shape[2], 1)
self.assertEqual(self.hp2.level_count, 5)
self.assertEqual(self.hp2.row_count, 10)
self.assertEqual(self.hp2.column_count, 1)
self.assertEqual(list(self.hp2.levels.keys()), self.shares.split(','))
self.assertEqual(list(self.hp2.columns.keys()), ['close'])
self.assertEqual(list(self.hp2.rows.keys())[0], pd.Timestamp('20200101'))
self.assertIsInstance(self.hp3, qt.HistoryPanel)
self.assertEqual(self.hp3.shape[0], 1)
self.assertEqual(self.hp3.shape[1], 10)
self.assertEqual(self.hp3.shape[2], 4)
self.assertEqual(self.hp3.level_count, 1)
self.assertEqual(self.hp3.row_count, 10)
self.assertEqual(self.hp3.column_count, 4)
self.assertEqual(list(self.hp3.levels.keys()), ['000100'])
self.assertEqual(list(self.hp3.columns.keys()), self.htypes.split(','))
self.assertEqual(list(self.hp3.rows.keys())[0], pd.Timestamp('2016-07-01'))
self.assertIsInstance(self.hp4, qt.HistoryPanel)
self.assertEqual(self.hp4.shape[0], 1)
self.assertEqual(self.hp4.shape[1], 10)
self.assertEqual(self.hp4.shape[2], 1)
self.assertEqual(self.hp4.level_count, 1)
self.assertEqual(self.hp4.row_count, 10)
self.assertEqual(self.hp4.column_count, 1)
self.assertEqual(list(self.hp4.levels.keys()), ['000100'])
self.assertEqual(list(self.hp4.columns.keys()), ['close'])
self.assertEqual(list(self.hp4.rows.keys())[0], pd.Timestamp('2016-07-01'))
self.hp5.info()
self.assertIsInstance(self.hp5, qt.HistoryPanel)
self.assertTrue(np.allclose(self.hp5.values, self.data))
self.assertEqual(self.hp5.shape[0], 5)
self.assertEqual(self.hp5.shape[1], 10)
self.assertEqual(self.hp5.shape[2], 4)
self.assertEqual(self.hp5.level_count, 5)
self.assertEqual(self.hp5.row_count, 10)
self.assertEqual(self.hp5.column_count, 4)
self.assertEqual(list(self.hp5.levels.keys()), [0, 1, 2, 3, 4])
self.assertEqual(list(self.hp5.columns.keys()), [0, 1, 2, 3])
self.assertEqual(list(self.hp5.rows.keys())[0], pd.Timestamp('2020-07-30'))
self.hp6.info()
self.assertIsInstance(self.hp6, qt.HistoryPanel)
self.assertTrue(np.allclose(self.hp6.values, self.data))
self.assertEqual(self.hp6.shape[0], 5)
self.assertEqual(self.hp6.shape[1], 10)
self.assertEqual(self.hp6.shape[2], 4)
self.assertEqual(self.hp6.level_count, 5)
self.assertEqual(self.hp6.row_count, 10)
self.assertEqual(self.hp6.column_count, 4)
self.assertEqual(list(self.hp6.levels.keys()), ['000100', '000101', '000102', '000103', '000104'])
self.assertEqual(list(self.hp6.columns.keys()), [0, 1, 2, 3])
self.assertEqual(list(self.hp6.rows.keys())[0], pd.Timestamp('2016-07-01'))
# Error testing during HistoryPanel creating
# shape does not match
self.assertRaises(AssertionError,
qt.HistoryPanel,
self.data,
levels=self.shares, columns='close', rows=self.index)
# valus is not np.ndarray
self.assertRaises(AssertionError,
qt.HistoryPanel,
list(self.data))
# dimension/shape does not match
self.assertRaises(AssertionError,
qt.HistoryPanel,
self.data2,
levels='000100', columns=self.htypes, rows=self.index)
# value dimension over 3
self.assertRaises(AssertionError,
qt.HistoryPanel,
np.random.randint(10, size=(5, 10, 4, 2)))
# lebel value not valid
self.assertRaises(ValueError,
qt.HistoryPanel,
self.data2,
levels=self.shares, columns='close',
rows='a,b,c,d,e,f,g,h,i,j')
def test_history_panel_slicing(self):
"""测试HistoryPanel的各种切片方法
包括通过标签名称切片,通过数字切片,通过逗号分隔的标签名称切片,通过冒号分隔的标签名称切片等切片方式"""
self.assertTrue(np.allclose(self.hp['close'], self.data[:, :, 0:1]))
self.assertTrue(np.allclose(self.hp['close,open'], self.data[:, :, 0:2]))
self.assertTrue(np.allclose(self.hp[['close', 'open']], self.data[:, :, 0:2]))
self.assertTrue(np.allclose(self.hp['close:high'], self.data[:, :, 0:3]))
self.assertTrue(np.allclose(self.hp['close,high'], self.data[:, :, [0, 2]]))
self.assertTrue(np.allclose(self.hp[:, '000100'], self.data[0:1, :, ]))
self.assertTrue(np.allclose(self.hp[:, '000100,000101'], self.data[0:2, :]))
self.assertTrue(np.allclose(self.hp[:, ['000100', '000101']], self.data[0:2, :]))
self.assertTrue(np.allclose(self.hp[:, '000100:000102'], self.data[0:3, :]))
self.assertTrue(np.allclose(self.hp[:, '000100,000102'], self.data[[0, 2], :]))
self.assertTrue(np.allclose(self.hp['close,open', '000100,000102'], self.data[[0, 2], :, 0:2]))
print('start testing HistoryPanel')
data = np.random.randint(10, size=(10, 5))
# index = pd.date_range(start='20200101', freq='d', periods=10)
shares = '000100,000101,000102,000103,000104'
dtypes = 'close'
df = pd.DataFrame(data)
print('=========================\nTesting HistoryPanel creation from DataFrame')
hp = qt.dataframe_to_hp(df=df, shares=shares, htypes=dtypes)
hp.info()
hp = qt.dataframe_to_hp(df=df, shares='000100', htypes='close, open, high, low, middle', column_type='htypes')
hp.info()
print('=========================\nTesting HistoryPanel creation from initialization')
data = np.random.randint(10, size=(5, 10, 4)).astype('float')
index = pd.date_range(start='20200101', freq='d', periods=10)
dtypes = 'close, open, high,low'
data[0, [5, 6, 9], [0, 1, 3]] = np.nan
data[1:4, [4, 7, 6, 2], [1, 1, 3, 0]] = np.nan
data[4:5, [2, 9, 1, 2], [0, 3, 2, 1]] = np.nan
hp = qt.HistoryPanel(data, levels=shares, columns=dtypes, rows=index)
hp.info()
print('==========================\n输出close类型的所有历史数据\n')
self.assertTrue(np.allclose(hp['close', :, :], data[:, :, 0:1], equal_nan=True))
print(f'==========================\n输出close和open类型的所有历史数据\n')
self.assertTrue(np.allclose(hp[[0, 1], :, :], data[:, :, 0:2], equal_nan=True))
print(f'==========================\n输出第一只股票的所有类型历史数据\n')
self.assertTrue(np.allclose(hp[:, [0], :], data[0:1, :, :], equal_nan=True))
print('==========================\n输出第0、1、2个htype对应的所有股票全部历史数据\n')
self.assertTrue(np.allclose(hp[[0, 1, 2]], data[:, :, 0:3], equal_nan=True))
print('==========================\n输出close、high两个类型的所有历史数据\n')
self.assertTrue(np.allclose(hp[['close', 'high']], data[:, :, [0, 2]], equal_nan=True))
print('==========================\n输出0、1两个htype的所有历史数据\n')
self.assertTrue(np.allclose(hp[[0, 1]], data[:, :, 0:2], equal_nan=True))
print('==========================\n输出close、high两个类型的所有历史数据\n')
self.assertTrue(np.allclose(hp['close,high'], data[:, :, [0, 2]], equal_nan=True))
print('==========================\n输出close起到high止的三个类型的所有历史数据\n')
self.assertTrue(np.allclose(hp['close:high'], data[:, :, 0:3], equal_nan=True))
print('==========================\n输出0、1、3三个股票的全部历史数据\n')
self.assertTrue(np.allclose(hp[:, [0, 1, 3]], data[[0, 1, 3], :, :], equal_nan=True))
print('==========================\n输出000100、000102两只股票的所有历史数据\n')
self.assertTrue(np.allclose(hp[:, ['000100', '000102']], data[[0, 2], :, :], equal_nan=True))
print('==========================\n输出0、1、2三个股票的历史数据\n', hp[:, 0: 3])
self.assertTrue(np.allclose(hp[:, 0: 3], data[0:3, :, :], equal_nan=True))
print('==========================\n输出000100、000102两只股票的所有历史数据\n')
self.assertTrue(np.allclose(hp[:, '000100, 000102'], data[[0, 2], :, :], equal_nan=True))
print('==========================\n输出所有股票的0-7日历史数据\n')
self.assertTrue(np.allclose(hp[:, :, 0:8], data[:, 0:8, :], equal_nan=True))
print('==========================\n输出000100股票的0-7日历史数据\n')
self.assertTrue(np.allclose(hp[:, '000100', 0:8], data[0, 0:8, :], equal_nan=True))
print('==========================\nstart testing multy axis slicing of HistoryPanel object')
print('==========================\n输出000100、000120两只股票的close、open两组历史数据\n',
hp['close,open', ['000100', '000102']])
print('==========================\n输出000100、000120两只股票的close到open三组历史数据\n',
hp['close,open', '000100, 000102'])
print(f'historyPanel: hp:\n{hp}')
print(f'data is:\n{data}')
hp.htypes = 'open,high,low,close'
hp.info()
hp.shares = ['000300', '600227', '600222', '000123', '000129']
hp.info()
def test_relabel(self):
new_shares_list = ['000001', '000002', '000003', '000004', '000005']
new_shares_str = '000001, 000002, 000003, 000004, 000005'
new_htypes_list = ['close', 'volume', 'value', 'exchange']
new_htypes_str = 'close, volume, value, exchange'
temp_hp = self.hp.copy()
temp_hp.re_label(shares=new_shares_list)
print(temp_hp.info())
print(temp_hp.htypes)
self.assertTrue(np.allclose(self.hp.values, temp_hp.values))
self.assertEqual(self.hp.htypes, temp_hp.htypes)
self.assertEqual(self.hp.hdates, temp_hp.hdates)
self.assertEqual(temp_hp.shares, new_shares_list)
temp_hp = self.hp.copy()
temp_hp.re_label(shares=new_shares_str)
self.assertTrue(np.allclose(self.hp.values, temp_hp.values))
self.assertEqual(self.hp.htypes, temp_hp.htypes)
self.assertEqual(self.hp.hdates, temp_hp.hdates)
self.assertEqual(temp_hp.shares, new_shares_list)
temp_hp = self.hp.copy()
temp_hp.re_label(htypes=new_htypes_list)
self.assertTrue(np.allclose(self.hp.values, temp_hp.values))
self.assertEqual(self.hp.shares, temp_hp.shares)
self.assertEqual(self.hp.hdates, temp_hp.hdates)
self.assertEqual(temp_hp.htypes, new_htypes_list)
temp_hp = self.hp.copy()
temp_hp.re_label(htypes=new_htypes_str)
self.assertTrue(np.allclose(self.hp.values, temp_hp.values))
self.assertEqual(self.hp.shares, temp_hp.shares)
self.assertEqual(self.hp.hdates, temp_hp.hdates)
self.assertEqual(temp_hp.htypes, new_htypes_list)
print(f'test errors raising')
temp_hp = self.hp.copy()
self.assertRaises(AssertionError, temp_hp.re_label, htypes=new_shares_str)
self.assertRaises(TypeError, temp_hp.re_label, htypes=123)
self.assertRaises(AssertionError, temp_hp.re_label, htypes='wrong input!')
def test_csv_to_hp(self):
pass
def test_hdf_to_hp(self):
pass
def test_hp_join(self):
# TODO: 这里需要加强,需要用具体的例子确认hp_join的结果正确
# TODO: 尤其是不同的shares、htypes、hdates,以及它们在顺
# TODO: 序不同的情况下是否能正确地组合
print(f'join two simple HistoryPanels with same shares')
temp_hp = self.hp.join(self.hp2, same_shares=True)
self.assertIsInstance(temp_hp, qt.HistoryPanel)
def test_df_to_hp(self):
print(f'test converting DataFrame to HistoryPanel')
data = np.random.randint(10, size=(10, 5))
df1 = pd.DataFrame(data)
df2 = pd.DataFrame(data, columns=qt.str_to_list(self.shares))
df3 = pd.DataFrame(data[:, 0:4])
df4 = pd.DataFrame(data[:, 0:4], columns=qt.str_to_list(self.htypes))
hp = qt.dataframe_to_hp(df1, htypes='close')
self.assertIsInstance(hp, qt.HistoryPanel)
self.assertEqual(hp.shares, [0, 1, 2, 3, 4])
self.assertEqual(hp.htypes, ['close'])
self.assertEqual(hp.hdates, [pd.Timestamp('1970-01-01 00:00:00'),
pd.Timestamp('1970-01-01 00:00:00.000000001'),
pd.Timestamp('1970-01-01 00:00:00.000000002'),
pd.Timestamp('1970-01-01 00:00:00.000000003'),
pd.Timestamp('1970-01-01 00:00:00.000000004'),
pd.Timestamp('1970-01-01 00:00:00.000000005'),
pd.Timestamp('1970-01-01 00:00:00.000000006'),
pd.Timestamp('1970-01-01 00:00:00.000000007'),
pd.Timestamp('1970-01-01 00:00:00.000000008'),
pd.Timestamp('1970-01-01 00:00:00.000000009')])
hp = qt.dataframe_to_hp(df2, shares=self.shares, htypes='close')
self.assertIsInstance(hp, qt.HistoryPanel)
self.assertEqual(hp.shares, qt.str_to_list(self.shares))
self.assertEqual(hp.htypes, ['close'])
hp = qt.dataframe_to_hp(df3, shares='000100', column_type='htypes')
self.assertIsInstance(hp, qt.HistoryPanel)
self.assertEqual(hp.shares, ['000100'])
self.assertEqual(hp.htypes, [0, 1, 2, 3])
hp = qt.dataframe_to_hp(df4, shares='000100', htypes=self.htypes, column_type='htypes')
self.assertIsInstance(hp, qt.HistoryPanel)
self.assertEqual(hp.shares, ['000100'])
self.assertEqual(hp.htypes, qt.str_to_list(self.htypes))
hp.info()
self.assertRaises(KeyError, qt.dataframe_to_hp, df1)
def test_to_dataframe(self):
""" 测试HistoryPanel对象的to_dataframe方法
"""
print(f'START TEST == test_to_dataframe')
print(f'test converting test hp to dataframe with share == "000102":')
df_test = self.hp.to_dataframe(share='000102')
self.assertIsInstance(df_test, pd.DataFrame)
self.assertEqual(list(self.hp.hdates), list(df_test.index))
self.assertEqual(list(self.hp.htypes), list(df_test.columns))
values = df_test.values
self.assertTrue(np.allclose(self.hp[:, '000102'], values))
print(f'test DataFrame conversion with share == "000100"')
df_test = self.hp.to_dataframe(share='000100')
self.assertIsInstance(df_test, pd.DataFrame)
self.assertEqual(list(self.hp.hdates), list(df_test.index))
self.assertEqual(list(self.hp.htypes), list(df_test.columns))
values = df_test.values
self.assertTrue(np.allclose(self.hp[:, '000100'], values))
print(f'test DataFrame conversion error: type incorrect')
self.assertRaises(AssertionError, self.hp.to_dataframe, share=3.0)
print(f'test DataFrame error raising with share not found error')
self.assertRaises(KeyError, self.hp.to_dataframe, share='000300')
print(f'test DataFrame conversion with htype == "close"')
df_test = self.hp.to_dataframe(htype='close')
self.assertIsInstance(df_test, pd.DataFrame)
self.assertEqual(list(self.hp.hdates), list(df_test.index))
self.assertEqual(list(self.hp.shares), list(df_test.columns))
values = df_test.values
self.assertTrue(np.allclose(self.hp['close'].T, values))
print(f'test DataFrame conversion with htype == "high"')
df_test = self.hp.to_dataframe(htype='high')
self.assertIsInstance(df_test, pd.DataFrame)
self.assertEqual(list(self.hp.hdates), list(df_test.index))
self.assertEqual(list(self.hp.shares), list(df_test.columns))
values = df_test.values
self.assertTrue(np.allclose(self.hp['high'].T, values))
print(f'test DataFrame conversion with htype == "high" and dropna')
v = self.hp.values.astype('float')
v[:, 3, :] = np.nan
v[:, 4, :] = np.inf
test_hp = qt.HistoryPanel(v, levels=self.shares, columns=self.htypes, rows=self.index)
df_test = test_hp.to_dataframe(htype='high', dropna=True)
self.assertIsInstance(df_test, pd.DataFrame)
self.assertEqual(list(self.hp.hdates[:3]) + list(self.hp.hdates[4:]), list(df_test.index))
self.assertEqual(list(self.hp.shares), list(df_test.columns))
values = df_test.values
target_values = test_hp['high'].T
target_values = target_values[np.where(~np.isnan(target_values))].reshape(9, 5)
self.assertTrue(np.allclose(target_values, values))
print(f'test DataFrame conversion with htype == "high", dropna and treat infs as na')
v = self.hp.values.astype('float')
v[:, 3, :] = np.nan
v[:, 4, :] = np.inf
test_hp = qt.HistoryPanel(v, levels=self.shares, columns=self.htypes, rows=self.index)
df_test = test_hp.to_dataframe(htype='high', dropna=True, inf_as_na=True)
self.assertIsInstance(df_test, pd.DataFrame)
self.assertEqual(list(self.hp.hdates[:3]) + list(self.hp.hdates[5:]), list(df_test.index))
self.assertEqual(list(self.hp.shares), list(df_test.columns))
values = df_test.values
target_values = test_hp['high'].T
target_values = target_values[np.where(~np.isnan(target_values) & ~np.isinf(target_values))].reshape(8, 5)
self.assertTrue(np.allclose(target_values, values))
print(f'test DataFrame conversion error: type incorrect')
self.assertRaises(AssertionError, self.hp.to_dataframe, htype=pd.DataFrame())
print(f'test DataFrame error raising with share not found error')
self.assertRaises(KeyError, self.hp.to_dataframe, htype='non_type')
print(f'Raises ValueError when both or none parameter is given')
self.assertRaises(KeyError, self.hp.to_dataframe)
self.assertRaises(KeyError, self.hp.to_dataframe, share='000100', htype='close')
def test_to_df_dict(self):
"""测试HistoryPanel公有方法to_df_dict"""
print('test convert history panel slice by share')
df_dict = self.hp.to_df_dict('share')
self.assertEqual(self.hp.shares, list(df_dict.keys()))
df_dict = self.hp.to_df_dict()
self.assertEqual(self.hp.shares, list(df_dict.keys()))
print('test convert historypanel slice by htype ')
df_dict = self.hp.to_df_dict('htype')
self.assertEqual(self.hp.htypes, list(df_dict.keys()))
print('test raise assertion error')
self.assertRaises(AssertionError, self.hp.to_df_dict, by='random text')
self.assertRaises(AssertionError, self.hp.to_df_dict, by=3)
print('test empty hp')
df_dict = qt.HistoryPanel().to_df_dict('share')
self.assertEqual(df_dict, {})
def test_stack_dataframes(self):
print('test stack dataframes in a list')
df1 = pd.DataFrame({'a': [1, 2, 3, 4], 'b': [2, 3, 4, 5], 'c': [3, 4, 5, 6]})
df1.index = ['20200101', '20200102', '20200103', '20200104']
df2 = pd.DataFrame({'b': [4, 3, 2, 1], 'd': [1, 1, 1, 1], 'c': [6, 5, 4, 3]})
df2.index = ['20200101', '20200102', '20200104', '20200105']
df3 = pd.DataFrame({'a': [6, 6, 6, 6], 'd': [4, 4, 4, 4], 'b': [2, 4, 6, 8]})
df3.index = ['20200101', '20200102', '20200103', '20200106']
values1 = np.array([[[1., 2., 3., np.nan],
[2., 3., 4., np.nan],
[3., 4., 5., np.nan],
[4., 5., 6., np.nan],
[np.nan, np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan, np.nan]],
[[np.nan, 4., 6., 1.],
[np.nan, 3., 5., 1.],
[np.nan, np.nan, np.nan, np.nan],
[np.nan, 2., 4., 1.],
[np.nan, 1., 3., 1.],
[np.nan, np.nan, np.nan, np.nan]],
[[6., 2., np.nan, 4.],
[6., 4., np.nan, 4.],
[6., 6., np.nan, 4.],
[np.nan, np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan, np.nan],
[6., 8., np.nan, 4.]]])
values2 = np.array([[[1., np.nan, 6.],
[2., np.nan, 6.],
[3., np.nan, 6.],
[4., np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, 6.]],
[[2., 4., 2.],
[3., 3., 4.],
[4., np.nan, 6.],
[5., 2., np.nan],
[np.nan, 1., np.nan],
[np.nan, np.nan, 8.]],
[[3., 6., np.nan],
[4., 5., np.nan],
[5., np.nan, np.nan],
[6., 4., np.nan],
[np.nan, 3., np.nan],
[np.nan, np.nan, np.nan]],
[[np.nan, 1., 4.],
[np.nan, 1., 4.],
[np.nan, np.nan, 4.],
[np.nan, 1., np.nan],
[np.nan, 1., np.nan],
[np.nan, np.nan, 4.]]])
print(df1.rename(index=pd.to_datetime))
print(df2.rename(index=pd.to_datetime))
print(df3.rename(index=pd.to_datetime))
hp1 = stack_dataframes([df1, df2, df3], stack_along='shares',
shares=['000100', '000200', '000300'])
hp2 = stack_dataframes([df1, df2, df3], stack_along='shares',
shares='000100, 000300, 000200')
print('hp1 is:\n', hp1)
print('hp2 is:\n', hp2)
self.assertEqual(hp1.htypes, ['a', 'b', 'c', 'd'])
self.assertEqual(hp1.shares, ['000100', '000200', '000300'])
self.assertTrue(np.allclose(hp1.values, values1, equal_nan=True))
self.assertEqual(hp2.htypes, ['a', 'b', 'c', 'd'])
self.assertEqual(hp2.shares, ['000100', '000300', '000200'])
self.assertTrue(np.allclose(hp2.values, values1, equal_nan=True))
hp3 = stack_dataframes([df1, df2, df3], stack_along='htypes',
htypes=['close', 'high', 'low'])
hp4 = stack_dataframes([df1, df2, df3], stack_along='htypes',
htypes='open, close, high')
print('hp3 is:\n', hp3.values)
print('hp4 is:\n', hp4.values)
self.assertEqual(hp3.htypes, ['close', 'high', 'low'])
self.assertEqual(hp3.shares, ['a', 'b', 'c', 'd'])
self.assertTrue(np.allclose(hp3.values, values2, equal_nan=True))
self.assertEqual(hp4.htypes, ['open', 'close', 'high'])
self.assertEqual(hp4.shares, ['a', 'b', 'c', 'd'])
self.assertTrue(np.allclose(hp4.values, values2, equal_nan=True))
print('test stack dataframes in a dict')
df1 = pd.DataFrame({'a': [1, 2, 3, 4], 'b': [2, 3, 4, 5], 'c': [3, 4, 5, 6]})
df1.index = ['20200101', '20200102', '20200103', '20200104']
df2 = pd.DataFrame({'b': [4, 3, 2, 1], 'd': [1, 1, 1, 1], 'c': [6, 5, 4, 3]})
df2.index = ['20200101', '20200102', '20200104', '20200105']
df3 = pd.DataFrame({'a': [6, 6, 6, 6], 'd': [4, 4, 4, 4], 'b': [2, 4, 6, 8]})
df3.index = ['20200101', '20200102', '20200103', '20200106']
values1 = np.array([[[1., 2., 3., np.nan],
[2., 3., 4., np.nan],
[3., 4., 5., np.nan],
[4., 5., 6., np.nan],
[np.nan, np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan, np.nan]],
[[np.nan, 4., 6., 1.],
[np.nan, 3., 5., 1.],
[np.nan, np.nan, np.nan, np.nan],
[np.nan, 2., 4., 1.],
[np.nan, 1., 3., 1.],
[np.nan, np.nan, np.nan, np.nan]],
[[6., 2., np.nan, 4.],
[6., 4., np.nan, 4.],
[6., 6., np.nan, 4.],
[np.nan, np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan, np.nan],
[6., 8., np.nan, 4.]]])
values2 = np.array([[[1., np.nan, 6.],
[2., np.nan, 6.],
[3., np.nan, 6.],
[4., np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, 6.]],
[[2., 4., 2.],
[3., 3., 4.],
[4., np.nan, 6.],
[5., 2., np.nan],
[np.nan, 1., np.nan],
[np.nan, np.nan, 8.]],
[[3., 6., np.nan],
[4., 5., np.nan],
[5., np.nan, np.nan],
[6., 4., np.nan],
[np.nan, 3., np.nan],
[np.nan, np.nan, np.nan]],
[[np.nan, 1., 4.],
[np.nan, 1., 4.],
[np.nan, np.nan, 4.],
[np.nan, 1., np.nan],
[np.nan, 1., np.nan],
[np.nan, np.nan, 4.]]])
print(df1.rename(index=pd.to_datetime))
print(df2.rename(index=pd.to_datetime))
print(df3.rename(index=pd.to_datetime))
hp1 = stack_dataframes(dfs={'000001.SZ': df1, '000002.SZ': df2, '000003.SZ': df3},
stack_along='shares')
hp2 = stack_dataframes(dfs={'000001.SZ': df1, '000002.SZ': df2, '000003.SZ': df3},
stack_along='shares',
shares='000100, 000300, 000200')
print('hp1 is:\n', hp1)
print('hp2 is:\n', hp2)
self.assertEqual(hp1.htypes, ['a', 'b', 'c', 'd'])
self.assertEqual(hp1.shares, ['000001.SZ', '000002.SZ', '000003.SZ'])
self.assertTrue(np.allclose(hp1.values, values1, equal_nan=True))
self.assertEqual(hp2.htypes, ['a', 'b', 'c', 'd'])
self.assertEqual(hp2.shares, ['000100', '000300', '000200'])
self.assertTrue(np.allclose(hp2.values, values1, equal_nan=True))
hp3 = stack_dataframes(dfs={'close': df1, 'high': df2, 'low': df3},
stack_along='htypes')
hp4 = stack_dataframes(dfs={'close': df1, 'low': df2, 'high': df3},
stack_along='htypes',
htypes='open, close, high')
print('hp3 is:\n', hp3.values)
print('hp4 is:\n', hp4.values)
self.assertEqual(hp3.htypes, ['close', 'high', 'low'])
self.assertEqual(hp3.shares, ['a', 'b', 'c', 'd'])
self.assertTrue(np.allclose(hp3.values, values2, equal_nan=True))
self.assertEqual(hp4.htypes, ['open', 'close', 'high'])
self.assertEqual(hp4.shares, ['a', 'b', 'c', 'd'])
self.assertTrue(np.allclose(hp4.values, values2, equal_nan=True))
def test_to_csv(self):
pass
def test_to_hdf(self):
pass
def test_fill_na(self):
print(self.hp)
new_values = self.hp.values.astype(float)
new_values[[0, 1, 3, 2], [1, 3, 0, 2], [1, 3, 2, 2]] = np.nan
print(new_values)
temp_hp = qt.HistoryPanel(values=new_values, levels=self.hp.levels, rows=self.hp.rows, columns=self.hp.columns)
self.assertTrue(np.allclose(temp_hp.values[[0, 1, 3, 2], [1, 3, 0, 2], [1, 3, 2, 2]], np.nan, equal_nan=True))
temp_hp.fillna(2.3)
new_values[[0, 1, 3, 2], [1, 3, 0, 2], [1, 3, 2, 2]] = 2.3
self.assertTrue(np.allclose(temp_hp.values,
new_values, equal_nan=True))
def test_get_history_panel(self):
# TODO: implement this test case
# test get only one line of data
pass
def test_get_price_type_raw_data(self):
shares = '000039.SZ, 600748.SH, 000040.SZ'
start = '20200101'
end = '20200131'
htypes = 'open, high, low, close'
target_price_000039 = [[9.45, 9.49, 9.12, 9.17],
[9.46, 9.56, 9.4, 9.5],
[9.7, 9.76, 9.5, 9.51],
[9.7, 9.75, 9.7, 9.72],
[9.73, 9.77, 9.7, 9.73],
[9.83, 9.85, 9.71, 9.72],
[9.85, 9.85, 9.75, 9.79],
[9.96, 9.96, 9.83, 9.86],
[9.87, 9.94, 9.77, 9.93],
[9.82, 9.9, 9.76, 9.87],
[9.8, 9.85, 9.77, 9.82],
[9.84, 9.86, 9.71, 9.72],
[9.83, 9.93, 9.81, 9.86],
[9.7, 9.87, 9.7, 9.82],
[9.83, 9.86, 9.69, 9.79],
[9.8, 9.94, 9.8, 9.86]]
target_price_600748 = [[5.68, 5.68, 5.32, 5.37],
[5.62, 5.68, 5.46, 5.65],
[5.72, 5.72, 5.61, 5.62],
[5.76, 5.77, 5.6, 5.73],
[5.78, 5.84, 5.73, 5.75],
[5.89, 5.91, 5.76, 5.77],
[6.03, 6.04, 5.87, 5.89],
[5.94, 6.07, 5.94, 6.02],
[5.96, 5.98, 5.88, 5.97],
[6.04, 6.06, 5.95, 5.96],
[5.98, 6.04, 5.96, 6.03],
[6.1, 6.11, 5.89, 5.94],
[6.02, 6.12, 6., 6.1],
[5.96, 6.05, 5.88, 6.01],
[6.03, 6.03, 5.95, 5.99],
[6.02, 6.12, 5.99, 5.99]]
target_price_000040 = [[3.63, 3.83, 3.63, 3.65],
[3.99, 4.07, 3.97, 4.03],
[4.1, 4.11, 3.93, 3.95],
[4.12, 4.13, 4.06, 4.11],
[4.13, 4.19, 4.07, 4.13],
[4.27, 4.28, 4.11, 4.12],
[4.37, 4.38, 4.25, 4.29],
[4.34, 4.5, 4.32, 4.41],
[4.28, 4.35, 4.2, 4.34],
[4.41, 4.43, 4.29, 4.31],
[4.42, 4.45, 4.36, 4.41],
[4.51, 4.56, 4.33, 4.35],
[4.35, 4.55, 4.31, 4.55],
[4.3, 4.41, 4.22, 4.36],
[4.27, 4.44, 4.23, 4.34],
[4.23, 4.27, 4.18, 4.25]]
print(f'test get price type raw data with single thread')
df_list = get_price_type_raw_data(start=start, end=end, shares=shares, htypes=htypes, freq='d')
self.assertIsInstance(df_list, dict)
self.assertEqual(len(df_list), 3)
self.assertTrue(np.allclose(df_list['000039.SZ'].values, np.array(target_price_000039)))
self.assertTrue(np.allclose(df_list['600748.SH'].values, np.array(target_price_600748)))
self.assertTrue(np.allclose(df_list['000040.SZ'].values, np.array(target_price_000040)))
print(f'in get financial report type raw data, got DataFrames: \n"000039.SZ":\n'
f'{df_list["000039.SZ"]}\n"600748.SH":\n'
f'{df_list["600748.SH"]}\n"000040.SZ":\n{df_list["000040.SZ"]}')
print(f'test get price type raw data with with multi threads')
df_list = get_price_type_raw_data(start=start, end=end, shares=shares, htypes=htypes, freq='d', parallel=10)
self.assertIsInstance(df_list, dict)
self.assertEqual(len(df_list), 3)
self.assertTrue(np.allclose(df_list['000039.SZ'].values, np.array(target_price_000039)))
self.assertTrue(np.allclose(df_list['600748.SH'].values, np.array(target_price_600748)))
self.assertTrue(np.allclose(df_list['000040.SZ'].values, np.array(target_price_000040)))
print(f'in get financial report type raw data, got DataFrames: \n"000039.SZ":\n'
f'{df_list["000039.SZ"]}\n"600748.SH":\n'
f'{df_list["600748.SH"]}\n"000040.SZ":\n{df_list["000040.SZ"]}')
def test_get_financial_report_type_raw_data(self):
shares = '000039.SZ, 600748.SH, 000040.SZ'
start = '20160101'
end = '20201231'
htypes = 'eps,basic_eps,diluted_eps,total_revenue,revenue,total_share,' \
'cap_rese,undistr_porfit,surplus_rese,net_profit'
target_eps_000039 = [[1.41],
[0.1398],
[-0.0841],
[-0.1929],
[0.37],
[0.1357],
[0.1618],
[0.1191],
[1.11],
[0.759],
[0.3061],
[0.1409],
[0.81],
[0.4187],
[0.2554],
[0.1624],
[0.14],
[-0.0898],
[-0.1444],
[0.1291]]
target_eps_600748 = [[0.41],
[0.22],
[0.22],
[0.09],
[0.42],
[0.23],
[0.22],
[0.09],
[0.36],
[0.16],
[0.15],
[0.07],
[0.47],
[0.19],
[0.12],
[0.07],
[0.32],
[0.22],
[0.14],
[0.07]]
target_eps_000040 = [[-0.6866],
[-0.134],
[-0.189],
[-0.036],
[-0.6435],
[0.05],
[0.062],
[0.0125],
[0.8282],
[1.05],
[0.985],
[0.811],
[0.41],
[0.242],
[0.113],
[0.027],
[0.19],
[0.17],
[0.17],
[0.064]]
target_basic_eps_000039 = [[1.3980000e-01, 1.3980000e-01, 6.3591954e+10, 6.3591954e+10],
[-8.4100000e-02, -8.4100000e-02, 3.9431807e+10, 3.9431807e+10],
[-1.9290000e-01, -1.9290000e-01, 1.5852177e+10, 1.5852177e+10],
[3.7000000e-01, 3.7000000e-01, 8.5815341e+10, 8.5815341e+10],
[1.3570000e-01, 1.3430000e-01, 6.1660271e+10, 6.1660271e+10],
[1.6180000e-01, 1.6040000e-01, 4.2717729e+10, 4.2717729e+10],
[1.1910000e-01, 1.1900000e-01, 1.9099547e+10, 1.9099547e+10],
[1.1100000e+00, 1.1000000e+00, 9.3497622e+10, 9.3497622e+10],
[7.5900000e-01, 7.5610000e-01, 6.6906147e+10, 6.6906147e+10],
[3.0610000e-01, 3.0380000e-01, 4.3560398e+10, 4.3560398e+10],
[1.4090000e-01, 1.4050000e-01, 1.9253639e+10, 1.9253639e+10],
[8.1000000e-01, 8.1000000e-01, 7.6299930e+10, 7.6299930e+10],
[4.1870000e-01, 4.1710000e-01, 5.3962706e+10, 5.3962706e+10],
[2.5540000e-01, 2.5440000e-01, 3.3387152e+10, 3.3387152e+10],
[1.6240000e-01, 1.6200000e-01, 1.4675987e+10, 1.4675987e+10],
[1.4000000e-01, 1.4000000e-01, 5.1111652e+10, 5.1111652e+10],
[-8.9800000e-02, -8.9800000e-02, 3.4982614e+10, 3.4982614e+10],
[-1.4440000e-01, -1.4440000e-01, 2.3542843e+10, 2.3542843e+10],
[1.2910000e-01, 1.2860000e-01, 1.0412416e+10, 1.0412416e+10],
[7.2000000e-01, 7.1000000e-01, 5.8685804e+10, 5.8685804e+10]]
target_basic_eps_600748 = [[2.20000000e-01, 2.20000000e-01, 5.29423397e+09, 5.29423397e+09],
[2.20000000e-01, 2.20000000e-01, 4.49275653e+09, 4.49275653e+09],
[9.00000000e-02, 9.00000000e-02, 1.59067065e+09, 1.59067065e+09],
[4.20000000e-01, 4.20000000e-01, 8.86555586e+09, 8.86555586e+09],
[2.30000000e-01, 2.30000000e-01, 5.44850143e+09, 5.44850143e+09],
[2.20000000e-01, 2.20000000e-01, 4.34978927e+09, 4.34978927e+09],
[9.00000000e-02, 9.00000000e-02, 1.73793793e+09, 1.73793793e+09],
[3.60000000e-01, 3.60000000e-01, 8.66375241e+09, 8.66375241e+09],
[1.60000000e-01, 1.60000000e-01, 4.72875116e+09, 4.72875116e+09],
[1.50000000e-01, 1.50000000e-01, 3.76879016e+09, 3.76879016e+09],
[7.00000000e-02, 7.00000000e-02, 1.31785454e+09, 1.31785454e+09],
[4.70000000e-01, 4.70000000e-01, 7.23391685e+09, 7.23391685e+09],
[1.90000000e-01, 1.90000000e-01, 3.76072215e+09, 3.76072215e+09],
[1.20000000e-01, 1.20000000e-01, 2.35845364e+09, 2.35845364e+09],
[7.00000000e-02, 7.00000000e-02, 1.03831865e+09, 1.03831865e+09],
[3.20000000e-01, 3.20000000e-01, 6.48880919e+09, 6.48880919e+09],
[2.20000000e-01, 2.20000000e-01, 3.72209142e+09, 3.72209142e+09],
[1.40000000e-01, 1.40000000e-01, 2.22563924e+09, 2.22563924e+09],
[7.00000000e-02, 7.00000000e-02, 8.96647052e+08, 8.96647052e+08],
[4.80000000e-01, 4.80000000e-01, 6.61917508e+09, 6.61917508e+09]]
target_basic_eps_000040 = [[-1.34000000e-01, -1.34000000e-01, 2.50438755e+09, 2.50438755e+09],
[-1.89000000e-01, -1.89000000e-01, 1.32692347e+09, 1.32692347e+09],
[-3.60000000e-02, -3.60000000e-02, 5.59073338e+08, 5.59073338e+08],
[-6.43700000e-01, -6.43700000e-01, 6.80576162e+09, 6.80576162e+09],
[5.00000000e-02, 5.00000000e-02, 6.38891620e+09, 6.38891620e+09],
[6.20000000e-02, 6.20000000e-02, 5.23267082e+09, 5.23267082e+09],
[1.25000000e-02, 1.25000000e-02, 2.22420874e+09, 2.22420874e+09],
[8.30000000e-01, 8.30000000e-01, 8.67628947e+09, 8.67628947e+09],
[1.05000000e+00, 1.05000000e+00, 5.29431716e+09, 5.29431716e+09],
[9.85000000e-01, 9.85000000e-01, 3.56822382e+09, 3.56822382e+09],
[8.11000000e-01, 8.11000000e-01, 1.06613439e+09, 1.06613439e+09],
[4.10000000e-01, 4.10000000e-01, 8.13102532e+09, 8.13102532e+09],
[2.42000000e-01, 2.42000000e-01, 5.17971521e+09, 5.17971521e+09],
[1.13000000e-01, 1.13000000e-01, 3.21704120e+09, 3.21704120e+09],
[2.70000000e-02, 2.70000000e-02, 8.41966738e+08, 8.24272235e+08],
[1.90000000e-01, 1.90000000e-01, 3.77350171e+09, 3.77350171e+09],
[1.70000000e-01, 1.70000000e-01, 2.38643892e+09, 2.38643892e+09],
[1.70000000e-01, 1.70000000e-01, 1.29127117e+09, 1.29127117e+09],
[6.40000000e-02, 6.40000000e-02, 6.03256858e+08, 6.03256858e+08],
[1.30000000e-01, 1.30000000e-01, 1.66572918e+09, 1.66572918e+09]]
target_total_share_000039 = [[3.5950140e+09, 4.8005360e+09, 2.1573660e+10, 3.5823430e+09],
[3.5860750e+09, 4.8402300e+09, 2.0750827e+10, 3.5823430e+09],
[3.5860750e+09, 4.9053550e+09, 2.0791307e+10, 3.5823430e+09],
[3.5845040e+09, 4.8813110e+09, 2.1482857e+10, 3.5823430e+09],
[3.5831490e+09, 4.9764250e+09, 2.0926816e+10, 3.2825850e+09],
[3.5825310e+09, 4.8501270e+09, 2.1020418e+10, 3.2825850e+09],
[2.9851110e+09, 5.4241420e+09, 2.2438350e+10, 3.2825850e+09],
[2.9849890e+09, 4.1284000e+09, 2.2082769e+10, 3.2825850e+09],
[2.9849610e+09, 4.0838010e+09, 2.1045994e+10, 3.2815350e+09],
[2.9849560e+09, 4.2491510e+09, 1.9694345e+10, 3.2815350e+09],
[2.9846970e+09, 4.2351600e+09, 2.0016361e+10, 3.2815350e+09],
[2.9828890e+09, 4.2096630e+09, 1.9734494e+10, 3.2815350e+09],
[2.9813960e+09, 3.4564240e+09, 1.8562738e+10, 3.2793790e+09],
[2.9803530e+09, 3.0759650e+09, 1.8076208e+10, 3.2793790e+09],
[2.9792680e+09, 3.1376690e+09, 1.7994776e+10, 3.2793790e+09],
[2.9785770e+09, 3.1265850e+09, 1.7495053e+10, 3.2793790e+09],
[2.9783640e+09, 3.1343850e+09, 1.6740840e+10, 3.2035780e+09],
[2.9783590e+09, 3.1273880e+09, 1.6578389e+10, 3.2035780e+09],
[2.9782780e+09, 3.1169280e+09, 1.8047639e+10, 3.2035780e+09],
[2.9778200e+09, 3.1818630e+09, 1.7663145e+10, 3.2035780e+09]]
target_total_share_600748 = [[1.84456289e+09, 2.60058426e+09, 5.72443733e+09, 4.58026529e+08],
[1.84456289e+09, 2.60058426e+09, 5.72096899e+09, 4.58026529e+08],
[1.84456289e+09, 2.60058426e+09, 5.65738237e+09, 4.58026529e+08],
[1.84456289e+09, 2.60058426e+09, 5.50257806e+09, 4.58026529e+08],
[1.84456289e+09, 2.59868164e+09, 5.16741523e+09, 4.44998882e+08],
[1.84456289e+09, 2.59684471e+09, 5.14677280e+09, 4.44998882e+08],
[1.84456289e+09, 2.59684471e+09, 4.94955591e+09, 4.44998882e+08],
[1.84456289e+09, 2.59684471e+09, 4.79001451e+09, 4.44998882e+08],
[1.84456289e+09, 3.11401684e+09, 4.46326988e+09, 4.01064256e+08],
[1.84456289e+09, 3.11596723e+09, 4.45419136e+09, 4.01064256e+08],
[1.84456289e+09, 3.11596723e+09, 4.39652948e+09, 4.01064256e+08],
[1.84456289e+09, 3.18007783e+09, 4.26608403e+09, 4.01064256e+08],
[1.84456289e+09, 3.10935622e+09, 3.78417688e+09, 3.65651701e+08],
[1.84456289e+09, 3.10935622e+09, 3.65806574e+09, 3.65651701e+08],
[1.84456289e+09, 3.10935622e+09, 3.62063090e+09, 3.65651701e+08],
[1.84456289e+09, 3.10935622e+09, 3.50063915e+09, 3.65651701e+08],
[1.41889453e+09, 3.55940850e+09, 3.22272993e+09, 3.62124939e+08],
[1.41889453e+09, 3.56129650e+09, 3.11477476e+09, 3.62124939e+08],
[1.41889453e+09, 3.59632888e+09, 3.06836903e+09, 3.62124939e+08],
[1.08337087e+09, 3.37400726e+07, 3.00918704e+09, 3.62124939e+08]]
target_total_share_000040 = [[1.48687387e+09, 1.06757900e+10, 8.31900755e+08, 2.16091994e+08],
[1.48687387e+09, 1.06757900e+10, 7.50177302e+08, 2.16091994e+08],
[1.48687387e+09, 1.06757899e+10, 9.90255974e+08, 2.16123282e+08],
[1.48687387e+09, 1.06757899e+10, 1.03109866e+09, 2.16091994e+08],
[1.48687387e+09, 1.06757910e+10, 2.07704745e+09, 2.16123282e+08],
[1.48687387e+09, 1.06757910e+10, 2.09608665e+09, 2.16123282e+08],
[1.48687387e+09, 1.06803833e+10, 2.13354083e+09, 2.16123282e+08],
[1.48687387e+09, 1.06804090e+10, 2.11489364e+09, 2.16123282e+08],
[1.33717327e+09, 8.87361727e+09, 2.42939924e+09, 1.88489589e+08],
[1.33717327e+09, 8.87361727e+09, 2.34220254e+09, 1.88489589e+08],
[1.33717327e+09, 8.87361727e+09, 2.16390368e+09, 1.88489589e+08],
[1.33717327e+09, 8.87361727e+09, 1.07961915e+09, 1.88489589e+08],
[1.33717327e+09, 8.87361727e+09, 8.58866066e+08, 1.88489589e+08],
[1.33717327e+09, 8.87361727e+09, 6.87024393e+08, 1.88489589e+08],
[1.33717327e+09, 8.87361727e+09, 5.71554565e+08, 1.88489589e+08],
[1.33717327e+09, 8.87361727e+09, 5.54241222e+08, 1.88489589e+08],
[1.33717327e+09, 8.87361726e+09, 5.10059576e+08, 1.88489589e+08],
[1.33717327e+09, 8.87361726e+09, 4.59351639e+08, 1.88489589e+08],
[4.69593364e+08, 2.78355875e+08, 4.13430814e+08, 1.88489589e+08],
[4.69593364e+08, 2.74235459e+08, 3.83557678e+08, 1.88489589e+08]]
target_net_profit_000039 = [[np.nan],
[2.422180e+08],
[np.nan],
[2.510113e+09],
[np.nan],
[1.102220e+09],
[np.nan],
[4.068455e+09],
[np.nan],
[1.315957e+09],
[np.nan],
[3.158415e+09],
[np.nan],
[1.066509e+09],
[np.nan],
[7.349830e+08],
[np.nan],
[-5.411600e+08],
[np.nan],
[2.271961e+09]]
target_net_profit_600748 = [[np.nan],
[4.54341757e+08],
[np.nan],
[9.14476670e+08],
[np.nan],
[5.25360283e+08],
[np.nan],
[9.24502415e+08],
[np.nan],
[4.66560302e+08],
[np.nan],
[9.15265285e+08],
[np.nan],
[2.14639674e+08],
[np.nan],
[7.45093049e+08],
[np.nan],
[2.10967312e+08],
[np.nan],
[6.04572711e+08]]
target_net_profit_000040 = [[np.nan],
[-2.82458846e+08],
[np.nan],
[-9.57130872e+08],
[np.nan],
[9.22114527e+07],
[np.nan],
[1.12643819e+09],
[np.nan],
[1.31715269e+09],
[np.nan],
[5.39940093e+08],
[np.nan],
[1.51440838e+08],
[np.nan],
[1.75339071e+08],
[np.nan],
[8.04740415e+07],
[np.nan],
[6.20445815e+07]]
print('test get financial data, in multi thread mode')
df_list = get_financial_report_type_raw_data(start=start, end=end, shares=shares, htypes=htypes, parallel=4)
self.assertIsInstance(df_list, tuple)
self.assertEqual(len(df_list), 4)
self.assertEqual(len(df_list[0]), 3)
self.assertEqual(len(df_list[1]), 3)
self.assertEqual(len(df_list[2]), 3)
self.assertEqual(len(df_list[3]), 3)
# 检查确认所有数据类型正确
self.assertTrue(all(isinstance(item, pd.DataFrame) for subdict in df_list for item in subdict.values()))
# 检查是否有空数据
print(all(item.empty for subdict in df_list for item in subdict.values()))
# 检查获取的每组数据正确,且所有数据的顺序一致, 如果取到空数据,则忽略
if df_list[0]['000039.SZ'].empty:
print(f'income data for "000039.SZ" is empty')
else:
self.assertTrue(np.allclose(df_list[0]['000039.SZ'].values, target_basic_eps_000039))
if df_list[0]['600748.SH'].empty:
print(f'income data for "600748.SH" is empty')
else:
self.assertTrue(np.allclose(df_list[0]['600748.SH'].values, target_basic_eps_600748))
if df_list[0]['000040.SZ'].empty:
print(f'income data for "000040.SZ" is empty')
else:
self.assertTrue(np.allclose(df_list[0]['000040.SZ'].values, target_basic_eps_000040))
if df_list[1]['000039.SZ'].empty:
print(f'indicator data for "000039.SZ" is empty')
else:
self.assertTrue(np.allclose(df_list[1]['000039.SZ'].values, target_eps_000039))
if df_list[1]['600748.SH'].empty:
print(f'indicator data for "600748.SH" is empty')
else:
self.assertTrue(np.allclose(df_list[1]['600748.SH'].values, target_eps_600748))
if df_list[1]['000040.SZ'].empty:
print(f'indicator data for "000040.SZ" is empty')
else:
self.assertTrue(np.allclose(df_list[1]['000040.SZ'].values, target_eps_000040))
if df_list[2]['000039.SZ'].empty:
print(f'balance data for "000039.SZ" is empty')
else:
self.assertTrue(np.allclose(df_list[2]['000039.SZ'].values, target_total_share_000039))
if df_list[2]['600748.SH'].empty:
print(f'balance data for "600748.SH" is empty')
else:
self.assertTrue(np.allclose(df_list[2]['600748.SH'].values, target_total_share_600748))
if df_list[2]['000040.SZ'].empty:
print(f'balance data for "000040.SZ" is empty')
else:
self.assertTrue(np.allclose(df_list[2]['000040.SZ'].values, target_total_share_000040))
if df_list[3]['000039.SZ'].empty:
print(f'cash flow data for "000039.SZ" is empty')
else:
self.assertTrue(np.allclose(df_list[3]['000039.SZ'].values, target_net_profit_000039, equal_nan=True))
if df_list[3]['600748.SH'].empty:
print(f'cash flow data for "600748.SH" is empty')
else:
self.assertTrue(np.allclose(df_list[3]['600748.SH'].values, target_net_profit_600748, equal_nan=True))
if df_list[3]['000040.SZ'].empty:
print(f'cash flow data for "000040.SZ" is empty')
else:
self.assertTrue(np.allclose(df_list[3]['000040.SZ'].values, target_net_profit_000040, equal_nan=True))
print('test get financial data, in single thread mode')
df_list = get_financial_report_type_raw_data(start=start, end=end, shares=shares, htypes=htypes, parallel=0)
self.assertIsInstance(df_list, tuple)
self.assertEqual(len(df_list), 4)
self.assertEqual(len(df_list[0]), 3)
self.assertEqual(len(df_list[1]), 3)
self.assertEqual(len(df_list[2]), 3)
self.assertEqual(len(df_list[3]), 3)
# 检查确认所有数据类型正确
self.assertTrue(all(isinstance(item, pd.DataFrame) for subdict in df_list for item in subdict.values()))
# 检查是否有空数据,因为网络问题,有可能会取到空数据
self.assertFalse(all(item.empty for subdict in df_list for item in subdict.values()))
# 检查获取的每组数据正确,且所有数据的顺序一致, 如果取到空数据,则忽略
if df_list[0]['000039.SZ'].empty:
print(f'income data for "000039.SZ" is empty')
else:
self.assertTrue(np.allclose(df_list[0]['000039.SZ'].values, target_basic_eps_000039))
if df_list[0]['600748.SH'].empty:
print(f'income data for "600748.SH" is empty')
else:
self.assertTrue(np.allclose(df_list[0]['600748.SH'].values, target_basic_eps_600748))
if df_list[0]['000040.SZ'].empty:
print(f'income data for "000040.SZ" is empty')
else:
self.assertTrue(np.allclose(df_list[0]['000040.SZ'].values, target_basic_eps_000040))
if df_list[1]['000039.SZ'].empty:
print(f'indicator data for "000039.SZ" is empty')
else:
self.assertTrue(np.allclose(df_list[1]['000039.SZ'].values, target_eps_000039))
if df_list[1]['600748.SH'].empty:
print(f'indicator data for "600748.SH" is empty')
else:
self.assertTrue(np.allclose(df_list[1]['600748.SH'].values, target_eps_600748))
if df_list[1]['000040.SZ'].empty:
print(f'indicator data for "000040.SZ" is empty')
else:
self.assertTrue(np.allclose(df_list[1]['000040.SZ'].values, target_eps_000040))
if df_list[2]['000039.SZ'].empty:
print(f'balance data for "000039.SZ" is empty')
else:
self.assertTrue(np.allclose(df_list[2]['000039.SZ'].values, target_total_share_000039))
if df_list[2]['600748.SH'].empty:
print(f'balance data for "600748.SH" is empty')
else:
self.assertTrue(np.allclose(df_list[2]['600748.SH'].values, target_total_share_600748))
if df_list[2]['000040.SZ'].empty:
print(f'balance data for "000040.SZ" is empty')
else:
self.assertTrue(np.allclose(df_list[2]['000040.SZ'].values, target_total_share_000040))
if df_list[3]['000039.SZ'].empty:
print(f'cash flow data for "000039.SZ" is empty')
else:
self.assertTrue(np.allclose(df_list[3]['000039.SZ'].values, target_net_profit_000039, equal_nan=True))
if df_list[3]['600748.SH'].empty:
print(f'cash flow data for "600748.SH" is empty')
else:
self.assertTrue(np.allclose(df_list[3]['600748.SH'].values, target_net_profit_600748, equal_nan=True))
if df_list[3]['000040.SZ'].empty:
print(f'cash flow data for "000040.SZ" is empty')
else:
self.assertTrue(np.allclose(df_list[3]['000040.SZ'].values, target_net_profit_000040, equal_nan=True))
def test_get_composite_type_raw_data(self):
pass
class TestUtilityFuncs(unittest.TestCase):
def setUp(self):
pass
def test_str_to_list(self):
self.assertEqual(str_to_list('a,b,c,d,e'), ['a', 'b', 'c', 'd', 'e'])
self.assertEqual(str_to_list('a, b, c '), ['a', 'b', 'c'])
self.assertEqual(str_to_list('a, b: c', sep_char=':'), ['a,b', 'c'])
def test_list_or_slice(self):
str_dict = {'close': 0, 'open': 1, 'high': 2, 'low': 3}
self.assertEqual(qt.list_or_slice(slice(1, 2, 1), str_dict), slice(1, 2, 1))
self.assertEqual(qt.list_or_slice('open', str_dict), [1])
self.assertEqual(list(qt.list_or_slice('close, high, low', str_dict)), [0, 2, 3])
self.assertEqual(list(qt.list_or_slice('close:high', str_dict)), [0, 1, 2])
self.assertEqual(list(qt.list_or_slice(['open'], str_dict)), [1])
self.assertEqual(list(qt.list_or_slice(['open', 'high'], str_dict)), [1, 2])
self.assertEqual(list(qt.list_or_slice(0, str_dict)), [0])
self.assertEqual(list(qt.list_or_slice([0, 2], str_dict)), [0, 2])
self.assertEqual(list(qt.list_or_slice([True, False, True, False], str_dict)), [0, 2])
def test_label_to_dict(self):
target_list = [0, 1, 10, 100]
target_dict = {'close': 0, 'open': 1, 'high': 2, 'low': 3}
target_dict2 = {'close': 0, 'open': 2, 'high': 1, 'low': 3}
self.assertEqual(qt.labels_to_dict('close, open, high, low', target_list), target_dict)
self.assertEqual(qt.labels_to_dict(['close', 'open', 'high', 'low'], target_list), target_dict)
self.assertEqual(qt.labels_to_dict('close, high, open, low', target_list), target_dict2)
self.assertEqual(qt.labels_to_dict(['close', 'high', 'open', 'low'], target_list), target_dict2)
def test_regulate_date_format(self):
self.assertEqual(regulate_date_format('2019/11/06'), '20191106')
self.assertEqual(regulate_date_format('2019-11-06'), '20191106')
self.assertEqual(regulate_date_format('20191106'), '20191106')
self.assertEqual(regulate_date_format('191106'), '20061119')
self.assertEqual(regulate_date_format('830522'), '19830522')
self.assertEqual(regulate_date_format(datetime.datetime(2010, 3, 15)), '20100315')
self.assertEqual(regulate_date_format(pd.Timestamp('2010.03.15')), '20100315')
self.assertRaises(ValueError, regulate_date_format, 'abc')
self.assertRaises(ValueError, regulate_date_format, '2019/13/43')
def test_list_to_str_format(self):
self.assertEqual(list_to_str_format(['close', 'open', 'high', 'low']),
'close,open,high,low')
self.assertEqual(list_to_str_format(['letters', ' ', '123 4', 123, ' kk l']),
'letters,,1234,kkl')
self.assertEqual(list_to_str_format('a string input'),
'a,string,input')
self.assertEqual(list_to_str_format('already,a,good,string'),
'already,a,good,string')
self.assertRaises(AssertionError, list_to_str_format, 123)
def test_is_trade_day(self):
"""test if the funcion maybe_trade_day() and is_market_trade_day() works properly
"""
date_trade = '20210401'
date_holiday = '20210102'
date_weekend = '20210424'
date_seems_trade_day = '20210217'
date_too_early = '19890601'
date_too_late = '20230105'
date_christmas = '20201225'
self.assertTrue(maybe_trade_day(date_trade))
self.assertFalse(maybe_trade_day(date_holiday))
self.assertFalse(maybe_trade_day(date_weekend))
self.assertTrue(maybe_trade_day(date_seems_trade_day))
self.assertTrue(maybe_trade_day(date_too_early))
self.assertTrue(maybe_trade_day(date_too_late))
self.assertTrue(maybe_trade_day(date_christmas))
self.assertTrue(is_market_trade_day(date_trade))
self.assertFalse(is_market_trade_day(date_holiday))
self.assertFalse(is_market_trade_day(date_weekend))
self.assertFalse(is_market_trade_day(date_seems_trade_day))
self.assertFalse(is_market_trade_day(date_too_early))
self.assertFalse(is_market_trade_day(date_too_late))
self.assertTrue(is_market_trade_day(date_christmas))
self.assertFalse(is_market_trade_day(date_christmas, exchange='XHKG'))
date_trade = pd.to_datetime('20210401')
date_holiday = pd.to_datetime('20210102')
date_weekend = pd.to_datetime('20210424')
self.assertTrue(maybe_trade_day(date_trade))
self.assertFalse(maybe_trade_day(date_holiday))
self.assertFalse(maybe_trade_day(date_weekend))
def test_prev_trade_day(self):
"""test the function prev_trade_day()
"""
date_trade = '20210401'
date_holiday = '20210102'
prev_holiday = pd.to_datetime(date_holiday) - pd.Timedelta(2, 'd')
date_weekend = '20210424'
prev_weekend = pd.to_datetime(date_weekend) - pd.Timedelta(1, 'd')
date_seems_trade_day = '20210217'
prev_seems_trade_day = '20210217'
date_too_early = '19890601'
date_too_late = '20230105'
date_christmas = '20201225'
self.assertEqual(pd.to_datetime(prev_trade_day(date_trade)),
pd.to_datetime(date_trade))
self.assertEqual(pd.to_datetime(prev_trade_day(date_holiday)),
pd.to_datetime(prev_holiday))
self.assertEqual(pd.to_datetime(prev_trade_day(date_weekend)),
pd.to_datetime(prev_weekend))
self.assertEqual(pd.to_datetime(prev_trade_day(date_seems_trade_day)),
pd.to_datetime(prev_seems_trade_day))
self.assertEqual(pd.to_datetime(prev_trade_day(date_too_early)),
pd.to_datetime(date_too_early))
self.assertEqual(pd.to_datetime(prev_trade_day(date_too_late)),
pd.to_datetime(date_too_late))
self.assertEqual(pd.to_datetime(prev_trade_day(date_christmas)),
pd.to_datetime(date_christmas))
def test_next_trade_day(self):
""" test the function next_trade_day()
"""
date_trade = '20210401'
date_holiday = '20210102'
next_holiday = pd.to_datetime(date_holiday) + pd.Timedelta(2, 'd')
date_weekend = '20210424'
next_weekend = pd.to_datetime(date_weekend) + pd.Timedelta(2, 'd')
date_seems_trade_day = '20210217'
next_seems_trade_day = '20210217'
date_too_early = '19890601'
date_too_late = '20230105'
date_christmas = '20201225'
self.assertEqual(pd.to_datetime(next_trade_day(date_trade)),
pd.to_datetime(date_trade))
self.assertEqual(pd.to_datetime(next_trade_day(date_holiday)),
pd.to_datetime(next_holiday))
self.assertEqual(pd.to_datetime(next_trade_day(date_weekend)),
pd.to_datetime(next_weekend))
self.assertEqual(pd.to_datetime(next_trade_day(date_seems_trade_day)),
pd.to_datetime(next_seems_trade_day))
self.assertEqual(pd.to_datetime(next_trade_day(date_too_early)),
pd.to_datetime(date_too_early))
self.assertEqual(pd.to_datetime(next_trade_day(date_too_late)),
pd.to_datetime(date_too_late))
self.assertEqual(pd.to_datetime(next_trade_day(date_christmas)),
| pd.to_datetime(date_christmas) | pandas.to_datetime |
import os
import pandas as pd
import numpy as np
import datetime
import gc
class Dataset(object):
def __init__(self, train_path = 'train.csv', test_path = 'test.csv', hist_trans_path = 'historical_transactions.csv', new_trans_path='new_merchant_transactions.csv',
new_merc_path='merchants.csv', base_dir='../data'):
self.train_path = os.path.join(base_dir,train_path)
self.test_path = os.path.join(base_dir,test_path)
self.hist_trans_path = os.path.join(base_dir, hist_trans_path)
self.new_trans_path = os.path.join(base_dir, new_trans_path)
self.new_merc_path = os.path.join(base_dir, new_merc_path)
self.base_dir = base_dir
def load_train(self):
print('load train data ...')
if not os.path.isfile(self.train_path):
print('{} - train path not found ! '.format(self.train_path))
return
return pd.read_csv(self.train_path, parse_dates=['first_active_month'])
def set_outlier_col(self, df_train):
# simply set
print('set train outlier ...')
df_train['outlier'] = 0
df_train.loc[df_train['target'] <= -30, :] = 1
print('set outlier successfully')
def load_test(self):
print('load test data ... ')
if not os.path.isfile(self.test_path):
print('{} - test path not found ! '.format(self.test_path))
return
return pd.read_csv(self.test_path, parse_dates=['first_active_month'])
def get_new_columns(self, name, aggs):
return [name + '_' + k + '_' + agg for k in aggs.keys() for agg in aggs[k]]
def fill_hist_missing(self,df_hist_trans,df_new_merchant_trans):
print('filling the missing value in hist ...')
for df in [df_hist_trans, df_new_merchant_trans]:
df['category_2'].fillna(1.0, inplace=True)
df['category_3'].fillna('A', inplace=True)
df['merchant_id'].fillna('M_ID_00a6ca8a8a', inplace=True)
def load_hist_new_merchant(self):
print('load history data ...')
if not os.path.isfile(self.hist_trans_path):
print('hist trans path not found ! ')
return
if not os.path.isfile(self.new_merc_path):
print('new merchant path not found !')
return
if not os.path.isfile(self.new_trans_path):
print('new hist trans path not found !')
return
df_hist_trans = pd.read_csv(self.hist_trans_path)
df_new_merchant_trans = pd.read_csv(self.new_trans_path)
self.fill_hist_missing(df_hist_trans, df_new_merchant_trans)
for df in [df_hist_trans, df_new_merchant_trans]:
df['purchase_date'] = | pd.to_datetime(df['purchase_date']) | pandas.to_datetime |
import numpy as np
import csv
import pandas as pd
import matplotlib.pyplot as plt
import math
import tensorflow as tf
import seaborn as sns
import itertools
import operator
from sklearn.linear_model import LinearRegression, LogisticRegression
from sklearn.model_selection import train_test_split
from sklearn.feature_selection import SelectFromModel
from sklearn.naive_bayes import GaussianNB, CategoricalNB
from sklearn.tree import DecisionTreeClassifier, export_graphviz
from sklearn.metrics import classification_report, confusion_matrix, accuracy_score, roc_curve, roc_auc_score, \
recall_score, precision_score, mean_squared_error
from sklearn.preprocessing import LabelEncoder, StandardScaler
from sklearn import metrics, preprocessing, tree
from sklearn.ensemble import RandomForestClassifier
import xgboost as xgb
from sklearn.svm import LinearSVC
from sklearn.pipeline import make_pipeline
from sklearn.datasets import make_classification
from sklearn.inspection import permutation_importance
# for data and modeling
# import keras
# from keras.callbacks import EarlyStopping
# from keras.wrappers.scikit_learn import KerasClassifier
# from keras.utils import np_utils
# from keras.models import Sequential
# from keras.layers import Dense, Dropout
# from tensorflow.keras import datasets, layers, models
from six import StringIO
from IPython.display import Image
import pydotplus
from ast import literal_eval
from collections import Counter
def heatmap_confmat(ytest, ypred, name):
labels = [0, 1]
conf_mat = confusion_matrix(ytest, ypred, labels=labels)
print(conf_mat)
# heatm = sns.heatmap(conf_mat, annot=True)
# print(heatm)
group_names = ['True Neg', 'False Pos', 'False Neg', 'True Pos']
group_counts = ["{0:0.0f}".format(value) for value in conf_mat.flatten()]
group_percentages = ["{0:.2%}".format(value) for value in conf_mat.flatten() / np.sum(conf_mat)]
labels = [f"{v1}\n{v2}\n{v3}" for v1, v2, v3 in zip(group_names, group_counts, group_percentages)]
labels = np.asarray(labels).reshape(2, 2)
heat = sns.heatmap(conf_mat, annot=labels, fmt='', cmap='Blues')
heat.figure.savefig(name)
def plot_feature_importances(importance):
importances = pd.DataFrame({'feature': feature_names, 'feature_importance': importance})
plt.figure(figsize=(12, 10))
plt.title("Feature importances")
plt.xlabel("Permutation importance")
plt.barh(importances["feature"].tolist(), importances["feature_importance"].tolist())
plt.savefig("nb_heatmap_40.png")
def NaiveBayes(xtrain, ytrain, xtest, ytest, binary=False):
if binary:
nb = GaussianNB()
model = "GaussianNB"
else:
nb = CategoricalNB()
model = "CategoricalNB"
nb.fit(xtrain, ytrain)
nb.predict(xtest)
y_pred_nb = nb.predict(xtest)
y_prob_pred_nb = nb.predict_proba(xtest)
# how did our model perform?
count_misclassified = (ytest != y_pred_nb).sum()
print(model)
print("=" * 30)
print('Misclassified samples: {}'.format(count_misclassified))
accuracy = accuracy_score(ytest, y_pred_nb)
print('Accuracy: {:.5f}'.format(accuracy))
heatmap_confmat(ytest, y_pred_nb, "naive_bayes.png")
feature_importance_NB(nb, xtest, ytest)
print("Naive Bayes done")
def feature_importance_NB(model, xval, yval):
r = permutation_importance(model, xval, yval, n_repeats=30, random_state=0)
print(len(r))
imp = r.importances_mean
importance = np.add(imp[40:], imp[:40])
# importance = imp
# for i in r.importances_mean.argsort()[::-1]:
# if r.importances_mean[i] - 2 * r.importances_std[i] > 0:
# print(f"{feature_names[i]: <8}" f"{r.importances_mean[i]: .3f}" f" +/- {r.importances_std[i]: .3f}")
plot_feature_importances(importance)
# importances = pd.DataFrame({'feature': feature_names, 'feature_importance': importance})
# plt.figure(figsize=(12, 10))
# plt.title("Feature importances")
# plt.xlabel("Permutation importance")
# plt.barh(importances["feature"].tolist(), importances["feature_importance"].tolist())
# plt.savefig("nb_heatmap_40.png")
def LogRegression(x, y, xtest, ytest):
# define the model
model = LogisticRegression()
# fit the model
model.fit(x, y)
# predict y
ypred = model.predict(xtest)
print(ypred[:10])
ypred = [1 if i > 0.6 else 0 for i in ypred]
accuracy = accuracy_score(ytest, ypred)
print(accuracy)
# heatmap_confmat(ytest, ypred, "logregression_heatmap.png")
imp = np.std(x, 0) * model.coef_[0]
# imp = model.coef_[0]
# importance = imp
importance = np.add(imp[40:], imp[:40])
feature_importance = pd.DataFrame({'feature': feature_names, 'feature_importance': importance})
print(feature_importance.sort_values('feature_importance', ascending=False).head(10))
# plt.barh([x for x in range(len(importance))], importance)
importances = pd.DataFrame({'feature': feature_names, 'feature_importance': importance})
plt.figure(figsize=(12, 10))
plt.title("Feature importances")
plt.barh(importances["feature"].tolist(), importances["feature_importance"].tolist())
plt.savefig("logreg_barplot_40.png")
# xpos = [x for x in range(len(importance))]
# plt.bar(xpos, importance)
# plt.xticks(xpos, feature_names_trimmed)
# plt.savefig("linreg.png")
# w = model.coef_[0]
# feature_importance = pd.DataFrame(feature_names, columns = ["feature"])
# feature_importance["importance"] = pow(math.e, w)
# feature_importance = feature_importance.sort_values(by = ["importance"], ascending=False)
# ax = feature_importance.plot.barh(x='feature', y='importance')
# plt.savefig("linreg.png")
print("Logistic Regression done")
def RandomForest(xtrain, ytrain, xtest, ytest):
# Create a Gaussian Classifier
model = RandomForestClassifier(n_estimators=100)
# Train the model using the training sets y_pred=clf.predict(X_test)
model.fit(xtrain, ytrain)
ypred = model.predict(xtest)
print("Accuracy:", metrics.accuracy_score(ytest, ypred))
# heatmap_confmat(ytest, ypred, "randomforest_heatmap.png")
scores = model.feature_importances_
scores = np.add(scores[40:], scores[:40])
print(sorted(zip(map(lambda x: round(x, 4), scores), feature_names),
reverse=True))
importances = pd.DataFrame({'feature': feature_names, 'feature_importance': scores})
plt.figure(figsize=(12, 10))
plt.title("Feature importances")
plt.barh(importances["feature"].tolist(), importances["feature_importance"].tolist())
plt.savefig("rf_barplot_80_depth100.png")
# support = model.get_support()
# print(support)
# selected_feat = X_train.columns[(sel.get_support())]
# print(selected_feat)
# # PermutationImportance(model, xtest, ytest)
print("random forest done")
def DecisionTree(xtrain, ytrain, xtest, ytest, selection=False):
if selection:
feature_names = selection
model = DecisionTreeClassifier(max_depth=10)
# Train Decision Tree Classifer
model = model.fit(xtrain, ytrain)
# Predict the response for test dataset
ypred = model.predict(xtest)
print("Accuracy:", metrics.accuracy_score(ytest, ypred))
# VisualizationTree(model)
# heatmap_confmat(ytest, ypred, "decisiontree_heatmap_80.png")
# print("heatmap saved")
imp = model.feature_importances_
# Change for 40 or 80 features:
importance = np.add(imp[40:], imp[:40])
# importance = imp
feature_numbers = [36, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26,
27, 28, 29, 30, 31, 32, 33, 34, 35, 37, 38, 39]
feature_names = [feature_names_original[i] for i in feature_numbers]
importances = pd.DataFrame({'feature': feature_names, 'feature_importance': importance})
print(importances.sort_values('feature_importance', ascending=False).head(10))
# std = np.std([tree.feature_importances_ for tree in model.estimators_], axis=0)
# PermutationImportance(model, xtest, ytest)
plt.figure(figsize=(12, 10))
plt.title("Feature importances")
plt.barh(importances["feature"].tolist(), importances["feature_importance"].tolist())
plt.savefig("decisiontree.png")
print("decision tree done")
def VisualizationTree(clf):
feature_cols = [i for i in range(80)]
target_names = ['0', '1']
tree.plot_tree(clf,
feature_names=feature_cols,
class_names=target_names,
filled=True,
rounded=True)
plt.figure(figsize=(12, 12))
plt.savefig('tree_visualization.png', bbox_inches='tight', dpi=100, fontsize=18)
def NeuralNetwork(xtrain, ytrain, xtest, ytest, feed_forward=False):
print('X_train:', np.shape(xtrain))
print('y_train:', np.shape(ytrain))
print('X_test:', np.shape(xtest))
print('y_test:', np.shape(ytest))
model = Sequential()
# if feed_forward:
model.add(Dense(256, input_shape=(287399, 80), activation="sigmoid"))
model.add(Dense(128, activation="sigmoid"))
model.add(Dense(10, activation="softmax"))
model.add(Dense(1, activation='hard_sigmoid'))
model.summary()
sgd = keras.optimizers.SGD(lr=0.5, momentum=0.9, nesterov=True)
model.compile(loss='binary_crossentropy', optimizer='sgd', metrics=['accuracy'])
# early stopping callback
# This callback will stop the training when there is no improvement in
# the validation loss for 10 consecutive epochs.
es = keras.callbacks.EarlyStopping(monitor='val_loss',
mode='min',
patience=10,
restore_best_weights=True) # important - otherwise you just return the last weigths...
# train_data = tf.data.Dataset.from_tensor_slices(xtrain, ytrain)
model.fit(xtrain, ytrain, epochs=30)
ypred = model.predict(xtest)
ypred = [1 if i > 0.5 else 0 for i in ypred]
loss_and_metrics = model.evaluate(xtest, ytest)
print('Loss = ', loss_and_metrics[0])
print('Accuracy = ', loss_and_metrics[1])
heatmap_confmat(ytest, ypred, "neuralnet.png")
print("neural network done")
def SVM(xtrain, ytrain, xtest, ytest):
model = make_pipeline(StandardScaler(), LinearSVC(random_state=0, tol=1e-5, multi_class="crammer_singer"))
model.fit(xtrain, ytrain)
imp = model.named_steps['linearsvc'].coef_
ypred = model.predict(xtest)
print("Accuracy:", metrics.accuracy_score(ytest, ypred))
# heatmap_confmat(ytest, ypred, "svm.png")
# Uncommend for 80 features
scores = np.add(imp[0][40:], imp[0][:40])
# Uncommend for 40 features
# scores = imp[0]
# scores = [float(i) / sum(scores) for i in scores]
sorted_index = sorted(range(len(scores)), key=lambda k: scores[k])
for i in sorted_index:
print(str(feature_names[i]) + ": " + str(scores[i]))
print("SVM done")
# features_names = ['input1', 'input2']
# f_importances(scores, features_names)
# imp = coef
# imp, names = zip(*sorted(zip(imp, names)))
# plt.barh(range(len(names)), imp, align='center')
# plt.yticks(range(len(names)), names)
# plt.savefig("barplot_svm_40.png")
importances = pd.DataFrame({'feature': feature_names, 'feature_importance': scores})
plt.figure(figsize=(12, 10))
plt.title("Feature importances")
plt.barh(importances["feature"].tolist(), importances["feature_importance"].tolist())
plt.savefig("svm_barplot_40.png")
def Boost(xtrain, ytrain, xtest, ytest):
# data_dmatrix = xgb.DMatrix(data=xtrain, label=ytrain)
print(len(xtrain[0]))
print(len(feature_names))
x_train = pd.DataFrame(data=xtrain, columns=feature_names)
x_test = pd.DataFrame(data=xtest, columns=feature_names)
dtrain = xgb.DMatrix(x_train, label=ytrain)
model = xgb.XGBRegressor(objective='reg:linear', colsample_bytree=0.3, learning_rate=0.1,
max_depth=5, alpha=10, n_estimators=10)
model.fit(x_train, ytrain)
ypred = model.predict(x_test)
ypred = [0 if i < 0.5 else 1 for i in ypred]
print("Accuracy:", metrics.accuracy_score(ytest, ypred))
# params = {"objective":"reg:linear",'colsample_bytree': 0.3,'learning_rate': 0.1,
# 'max_depth': 5, 'alpha': 10}
# cv_results = xgb.cv(dtrain=data_dmatrix, params=params, nfold=3,
# num_boost_round=50,early_stopping_rounds=10,metrics="rmse", as_pandas=True, seed=123)
# model.plot_tree(xg_reg,num_trees=0)
# plt.rcParams['figure.figsize'] = [50, 10]
# plt.savefig("tree_boost.png")
xgb.plot_importance(model, max_num_features=10)
plt.rcParams['figure.figsize'] = [20, 20]
plt.savefig("tree_boost.png")
# heatmap_confmat(ytest, ypred, "heatmap_boost.png")
# feature_importance(model, xtest, ypred)
# rmse = np.sqrt(mean_squared_error(y_test, preds))
# print("RMSE: %f" % (rmse))
def feature_importance(model, xval, yval):
importance = model.coef_
for i, v in enumerate(importance):
print('Feature: %0d, Score: %.5f' % (i, v))
# plot feature importance
plt.bar([x for x in range(len(importance))], importance)
plt.savefig("testfig.png")
def Get_xy(data, one_hot=False, binary=False, type_input="normal"):
# data = data.iloc[-101830:] # Uncommend for balanced dataset when using binary
# data_split = pd.read_csv("/content/drive/MyDrive/ExplainedKinshipData/data/split_features_data.csv")
if type_input == "combined":
f = data["combined"]
elif type_input == "normal":
data["feat1and2"] = data["feat1"] + data["feat2"]
f = data["feat1and2"]
else:
f = data["tuples"]
classes = data["ptype"].values
labels = ["sibs", "bb", "ss", "ms", "md", "fs", "fd", "gfgd", "gfgs", "gmgd", "gmgs"]
if binary:
classes = [1 if i in labels else 0 for i in classes]
if one_hot:
le = preprocessing.LabelEncoder()
le.fit(labels)
classes = le.transform(data["ptype"])
X_train, X_test, y_train, y_test = train_test_split(f, classes, test_size=0.3, random_state=42)
print("Data split")
if binary:
X_train = list(X_train)
X_test = list(X_test)
# print(X_train)
else:
X_train = np.array(list(map(list, X_train)))
y_train = np.squeeze(np.array(list(y_train)))
X_test = np.array(list(map(list, X_test)))
y_test = np.squeeze(np.array(list(y_test)))
# print(y_test)
train_values, train_counts = np.unique(y_train, return_counts=True)
# print(train_values, train_counts)
test_values, test_counts = np.unique(y_test, return_counts=True)
# print(test_values, test_counts)
# print(y_train.shape)
# print(X_train.shape)
return X_train, y_train, X_test, y_test
def PermutationImportance(model, xtest, ytest):
r = permutation_importance(model, xtest, ytest, n_repeats=30, random_state=0)
for i in r.importances_mean.argsort()[::-1]:
if r.importances_mean[i] - 2 * r.importances_std[i] > 0.013:
print(f"{feature_names[i]:<8}"
f"{r.importances_mean[i]:.3f}"
f" +/- {r.importances_std[i]:.3f}")
| pd.set_option('display.expand_frame_repr', False) | pandas.set_option |
from typing import Callable
import numpy as np
import pandas as pd
from tqdm import tqdm
# filter document types
DOC_TYPES_TO_REMOVE = [
'aaib_report',
'answer',
'asylum_support_decision',
'business_finance_support_scheme',
'cma_case',
'countryside_stewardship_grant',
'drug_safety_update',
'employment_appeal_tribunal_decision',
'employment_tribunal_decision',
'esi_fund',
'export_health_certificate',
'help_page',
'html_publication',
'international_development_fund',
'maib_report',
'manual',
'manual_section',
'medical_safety_alert',
'ministerial_role',
'person',
'protected_food_drink_name',
'raib_report',
'research_for_development_output',
'residential_property_tribunal_decision',
'service_standard_report',
'simple_smart_answer',
'statutory_instrument',
'tax_tribunal_decision',
'utaac_decision'
]
COLS_TO_KEEP = ['document_type', 'base_path', 'content_id', 'first_published_at', 'text', 'title']
def create_content_store_mask(content_store_df,
doc_types_to_remove=DOC_TYPES_TO_REMOVE):
doc_type_mask = content_store_df['document_type'].isin(doc_types_to_remove)
# filter dates
date_mask = content_store_df['first_published_at'].str[:4].fillna('2000').astype(int) > 2000
# filter withdrawn documents that we want to exclude
withdrawn_mask = content_store_df['withdrawn']
# combine masks
return ~withdrawn_mask & date_mask & ~doc_type_mask
def get_relevant_subset_of_content_store(content_store_df,
doc_types_to_remove=DOC_TYPES_TO_REMOVE,
cols_to_keep=COLS_TO_KEEP):
# This currently returns quite a large subset, which can make processing very slow
# select fewer document types, and/or only more recently published documents to get a smaller DF
content_mask = create_content_store_mask(content_store_df, doc_types_to_remove=doc_types_to_remove)
return content_store_df.loc[content_mask, cols_to_keep].copy()
# sentences may be more performant
def extract_sentences(original_text):
"""
Splits paragraph text into list of sentences. \n
Note: If text contains xml tags, this does not remove them.
:param original_text: Document text to split into list of sentences.
:return: List of sentences from document text passed in.
"""
return original_text.split('. ')
def get_lists_of_embeddings_and_text(subset_content_df: pd.DataFrame,
document_embedding_fn: Callable[[list], np.ndarray],
embedding_dimension: int) -> (np.array, list):
"""
Compute embeddings from documents.
:param subset_content_df: Dataframe of documents you want to compute embeddings from.
:param document_embedding_fn: Function to use for computing document embeddings, takes a list of sentences.
:param embedding_dimension: Number of dimensions/columns for embedding vectors.
:return: An array of document embedding vectors and list of text these embeddings relate to.
"""
# initialise an empty array for embeddings
collected_doc_embeddings = np.zeros((subset_content_df.shape[0], embedding_dimension))
collected_doc_text = []
# fill array with embeddings for all docs
# and store the original raw text extracted from the documents
for i in tqdm(range(subset_content_df.shape[0])):
doc = subset_content_df.iloc[i]['text']
try:
extracted_sentences = extract_sentences(doc)
except AttributeError:
collected_doc_text.append('')
continue
if len(extracted_sentences) > 0:
doc_embedding = document_embedding_fn(extracted_sentences)
collected_doc_embeddings[i, :] = doc_embedding
collected_doc_text.append(doc)
else:
collected_doc_text.append('')
return collected_doc_embeddings, collected_doc_text
def embeddings_and_text_to_dfs(subset_content_df, collected_doc_embeddings, collected_doc_text):
# converts embeddings array into dataframe, with content id as unique key
embeddings_df = | pd.DataFrame(collected_doc_embeddings) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Created on Sun Oct 10 08:48:34 2021
@author: PatCa
"""
import numpy as np
import pandas as pd
import joblib
from pickle import dump
from sklearn.model_selection import train_test_split
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import OneHotEncoder
from sklearn.preprocessing import RobustScaler, StandardScaler
from sklearn.compose import ColumnTransformer
from sklearn.decomposition import PCA
def PCA_Data():
#Import raw data
source_feature = pd.read_csv('data/features.csv')
source_label = pd.read_csv('data/labels.csv')
#Combine features and labels and copy
source_data = pd.merge(source_feature, source_label, on="trackID")
clean_data = source_data.copy()
#Remove na and duplicates
clean_data = clean_data.dropna()
clean_data = clean_data.drop_duplicates()
#Check type
clean_data = clean_data.astype({'time_signature':int,'key':int,'mode':int})
#Rename categorical values
mode_dict = {0:'minor', 1:'major'}
key_dict = {0:'C', 1:'D', 2:'E',3:'F', 4:'G', 5:'H', 6:'I', 7:'J', 8:'K', 9:'L',
10:'M', 11:'N'}
label_dict = {'soul and reggae':1, 'pop':2, 'punk':3, 'jazz and blues':4,
'dance and electronica':5,'folk':6, 'classic pop and rock':7, 'metal':8}
clean_data['mode'] = clean_data['mode'].replace(mode_dict)
clean_data['key'] = clean_data['key'].replace(key_dict)
clean_data['genre'] = clean_data['genre'].replace(label_dict)
#Remove small categories
clean_data = clean_data[clean_data.time_signature != 0]
#Separate out text feature "tags" and remove from clean_data dataframe
word_df = pd.DataFrame(data=clean_data[['tags','genre']].to_numpy(), columns=['tags','genre'])
clean_data = clean_data.drop(columns=['title','trackID'])
#%%Split data for training and testing
train_data = clean_data
y = train_data[['genre']] #Make Dataframe
training_data = train_data.loc[:,train_data.columns != 'genre']
(X_train, X_test, Y_train, Y_test) = train_test_split(training_data, y,
test_size=0.2,
random_state=42,stratify=y)
#Separate out text data
word_df_train = pd.concat((X_train['tags'],Y_train), axis=1)
word_df_test = pd.concat((X_test['tags'],Y_test), axis=1)
X_train = X_train.drop(columns='tags')
X_test = X_test.drop(columns='tags')
#%%Check feature correlation
nc_cols = ['loudness','tempo','time_signature','key','mode','duration']
cat_feat = ['time_signature','key','mode']
cont_data = X_train.drop(columns=nc_cols)
#%% PCA on cont_data2
pca_scaler = StandardScaler()
pca_scaler.fit(cont_data)
dump(pca_scaler, open('model_artifacts/pca_scaler.pkl', 'wb'))
cont_data_norm = pca_scaler.transform(cont_data)
pca = PCA(0.95).fit(cont_data_norm)
dump(pca, open('model_artifacts/pca.pkl', 'wb'))
num_pca_cols = pca.n_components_
data_pca_array = pca.transform(cont_data_norm)
cont_data_pca = | pd.DataFrame(data=data_pca_array) | pandas.DataFrame |
import sys
sys.path.insert(0, '..')
import pandas as pd
from tqdm import tqdm
from config.config import *
def create_whole_train_split(train_meta, split_name):
train_meta = train_meta.copy()
split_dir = f'{DATA_DIR}/split/{split_name}'
os.makedirs(split_dir, exist_ok=True)
print('train nums: %s' % train_meta.shape[0])
print('train label nums: %s' % train_meta[TARGET].nunique())
train_meta['count'] = train_meta.groupby([TARGET])[ID].transform('count')
litter_image_df = train_meta[train_meta['count'] < 200]
train_rest_meta = train_meta[~train_meta[ID].isin(litter_image_df[ID].values)].reset_index(drop=True)
idx = 0
valid_indices = np.random.choice(len(train_rest_meta), 200, replace=False)
valid_split_df = train_rest_meta.loc[valid_indices]
train_indices = ~train_meta[ID].isin(valid_split_df[ID].values)
train_split_df = train_rest_meta[train_indices]
train_split_df = pd.concat((train_split_df, litter_image_df), ignore_index=True)
fname = f'{split_dir}/random_train_cv{idx}.csv'
print("train: create split file: %s; "% (fname))
print(('nums: %d; label nums: %d; max label: %s')%
(train_split_df.shape[0],train_split_df[TARGET].nunique(),train_split_df[TARGET].max()))
train_split_df.to_csv(fname, index=False)
print(train_split_df.head())
fname = f'{split_dir}/random_valid_cv{idx}.csv'
print("valid: create split file: %s; "% (fname))
print(('nums: %d; label nums: %d; max label: %s') %
(valid_split_df.shape[0],valid_split_df[TARGET].nunique(),valid_split_df[TARGET].max()))
valid_split_df.to_csv(fname, index=False)
print(valid_split_df.head())
def create_v2x_split():
train_clean_df = pd.read_csv(f'{DATA_DIR}/raw/train_clean.csv', usecols=[TARGET])
train_df = pd.read_csv(f'{DATA_DIR}/raw/train.csv', usecols=[ID, TARGET])
train_df = train_df[train_df[TARGET].isin(train_clean_df[TARGET].unique())]
landmark_mapping = {l: i for i, l in enumerate(np.sort(train_df[TARGET].unique()))}
train_df[TARGET] = train_df[TARGET].map(landmark_mapping)
idx = 0
train_split_df = pd.read_csv(f'{DATA_DIR}/split/v2c/random_train_cv{idx}.csv')
valid_split_df = pd.read_csv(f'{DATA_DIR}/split/v2c/random_valid_cv{idx}.csv')
_train_df = train_df.set_index(ID)
assert np.array_equal(_train_df.loc[train_split_df[ID].values, TARGET], train_split_df[TARGET])
assert np.array_equal(_train_df.loc[valid_split_df[ID].values, TARGET], valid_split_df[TARGET])
del _train_df
train_df = train_df[~train_df[ID].isin(valid_split_df[ID])]
train_split_df = | pd.merge(train_df, train_split_df, on=[ID, TARGET], how='left') | pandas.merge |
# -*- coding: utf-8 -*-
from datetime import timedelta
import operator
from string import ascii_lowercase
import warnings
import numpy as np
import pytest
from pandas.compat import lrange
import pandas.util._test_decorators as td
import pandas as pd
from pandas import (
Categorical, DataFrame, MultiIndex, Series, Timestamp, date_range, isna,
notna, to_datetime, to_timedelta)
import pandas.core.algorithms as algorithms
import pandas.core.nanops as nanops
import pandas.util.testing as tm
def assert_stat_op_calc(opname, alternative, frame, has_skipna=True,
check_dtype=True, check_dates=False,
check_less_precise=False, skipna_alternative=None):
"""
Check that operator opname works as advertised on frame
Parameters
----------
opname : string
Name of the operator to test on frame
alternative : function
Function that opname is tested against; i.e. "frame.opname()" should
equal "alternative(frame)".
frame : DataFrame
The object that the tests are executed on
has_skipna : bool, default True
Whether the method "opname" has the kwarg "skip_na"
check_dtype : bool, default True
Whether the dtypes of the result of "frame.opname()" and
"alternative(frame)" should be checked.
check_dates : bool, default false
Whether opname should be tested on a Datetime Series
check_less_precise : bool, default False
Whether results should only be compared approximately;
passed on to tm.assert_series_equal
skipna_alternative : function, default None
NaN-safe version of alternative
"""
f = getattr(frame, opname)
if check_dates:
df = DataFrame({'b': date_range('1/1/2001', periods=2)})
result = getattr(df, opname)()
assert isinstance(result, Series)
df['a'] = lrange(len(df))
result = getattr(df, opname)()
assert isinstance(result, Series)
assert len(result)
if has_skipna:
def wrapper(x):
return alternative(x.values)
skipna_wrapper = tm._make_skipna_wrapper(alternative,
skipna_alternative)
result0 = f(axis=0, skipna=False)
result1 = f(axis=1, skipna=False)
tm.assert_series_equal(result0, frame.apply(wrapper),
check_dtype=check_dtype,
check_less_precise=check_less_precise)
# HACK: win32
tm.assert_series_equal(result1, frame.apply(wrapper, axis=1),
check_dtype=False,
check_less_precise=check_less_precise)
else:
skipna_wrapper = alternative
result0 = f(axis=0)
result1 = f(axis=1)
tm.assert_series_equal(result0, frame.apply(skipna_wrapper),
check_dtype=check_dtype,
check_less_precise=check_less_precise)
if opname in ['sum', 'prod']:
expected = frame.apply(skipna_wrapper, axis=1)
tm.assert_series_equal(result1, expected, check_dtype=False,
check_less_precise=check_less_precise)
# check dtypes
if check_dtype:
lcd_dtype = frame.values.dtype
assert lcd_dtype == result0.dtype
assert lcd_dtype == result1.dtype
# bad axis
with pytest.raises(ValueError, match='No axis named 2'):
f(axis=2)
# all NA case
if has_skipna:
all_na = frame * np.NaN
r0 = getattr(all_na, opname)(axis=0)
r1 = getattr(all_na, opname)(axis=1)
if opname in ['sum', 'prod']:
unit = 1 if opname == 'prod' else 0 # result for empty sum/prod
expected = pd.Series(unit, index=r0.index, dtype=r0.dtype)
tm.assert_series_equal(r0, expected)
expected = pd.Series(unit, index=r1.index, dtype=r1.dtype)
tm.assert_series_equal(r1, expected)
def assert_stat_op_api(opname, float_frame, float_string_frame,
has_numeric_only=False):
"""
Check that API for operator opname works as advertised on frame
Parameters
----------
opname : string
Name of the operator to test on frame
float_frame : DataFrame
DataFrame with columns of type float
float_string_frame : DataFrame
DataFrame with both float and string columns
has_numeric_only : bool, default False
Whether the method "opname" has the kwarg "numeric_only"
"""
# make sure works on mixed-type frame
getattr(float_string_frame, opname)(axis=0)
getattr(float_string_frame, opname)(axis=1)
if has_numeric_only:
getattr(float_string_frame, opname)(axis=0, numeric_only=True)
getattr(float_string_frame, opname)(axis=1, numeric_only=True)
getattr(float_frame, opname)(axis=0, numeric_only=False)
getattr(float_frame, opname)(axis=1, numeric_only=False)
def assert_bool_op_calc(opname, alternative, frame, has_skipna=True):
"""
Check that bool operator opname works as advertised on frame
Parameters
----------
opname : string
Name of the operator to test on frame
alternative : function
Function that opname is tested against; i.e. "frame.opname()" should
equal "alternative(frame)".
frame : DataFrame
The object that the tests are executed on
has_skipna : bool, default True
Whether the method "opname" has the kwarg "skip_na"
"""
f = getattr(frame, opname)
if has_skipna:
def skipna_wrapper(x):
nona = x.dropna().values
return alternative(nona)
def wrapper(x):
return alternative(x.values)
result0 = f(axis=0, skipna=False)
result1 = f(axis=1, skipna=False)
tm.assert_series_equal(result0, frame.apply(wrapper))
tm.assert_series_equal(result1, frame.apply(wrapper, axis=1),
check_dtype=False) # HACK: win32
else:
skipna_wrapper = alternative
wrapper = alternative
result0 = f(axis=0)
result1 = f(axis=1)
tm.assert_series_equal(result0, frame.apply(skipna_wrapper))
tm.assert_series_equal(result1, frame.apply(skipna_wrapper, axis=1),
check_dtype=False)
# bad axis
with pytest.raises(ValueError, match='No axis named 2'):
f(axis=2)
# all NA case
if has_skipna:
all_na = frame * np.NaN
r0 = getattr(all_na, opname)(axis=0)
r1 = getattr(all_na, opname)(axis=1)
if opname == 'any':
assert not r0.any()
assert not r1.any()
else:
assert r0.all()
assert r1.all()
def assert_bool_op_api(opname, bool_frame_with_na, float_string_frame,
has_bool_only=False):
"""
Check that API for boolean operator opname works as advertised on frame
Parameters
----------
opname : string
Name of the operator to test on frame
float_frame : DataFrame
DataFrame with columns of type float
float_string_frame : DataFrame
DataFrame with both float and string columns
has_bool_only : bool, default False
Whether the method "opname" has the kwarg "bool_only"
"""
# make sure op works on mixed-type frame
mixed = float_string_frame
mixed['_bool_'] = np.random.randn(len(mixed)) > 0.5
getattr(mixed, opname)(axis=0)
getattr(mixed, opname)(axis=1)
if has_bool_only:
getattr(mixed, opname)(axis=0, bool_only=True)
getattr(mixed, opname)(axis=1, bool_only=True)
getattr(bool_frame_with_na, opname)(axis=0, bool_only=False)
getattr(bool_frame_with_na, opname)(axis=1, bool_only=False)
class TestDataFrameAnalytics(object):
# ---------------------------------------------------------------------
# Correlation and covariance
@td.skip_if_no_scipy
def test_corr_pearson(self, float_frame):
float_frame['A'][:5] = np.nan
float_frame['B'][5:10] = np.nan
self._check_method(float_frame, 'pearson')
@td.skip_if_no_scipy
def test_corr_kendall(self, float_frame):
float_frame['A'][:5] = np.nan
float_frame['B'][5:10] = np.nan
self._check_method(float_frame, 'kendall')
@td.skip_if_no_scipy
def test_corr_spearman(self, float_frame):
float_frame['A'][:5] = np.nan
float_frame['B'][5:10] = np.nan
self._check_method(float_frame, 'spearman')
def _check_method(self, frame, method='pearson'):
correls = frame.corr(method=method)
expected = frame['A'].corr(frame['C'], method=method)
tm.assert_almost_equal(correls['A']['C'], expected)
@td.skip_if_no_scipy
def test_corr_non_numeric(self, float_frame, float_string_frame):
float_frame['A'][:5] = np.nan
float_frame['B'][5:10] = np.nan
# exclude non-numeric types
result = float_string_frame.corr()
expected = float_string_frame.loc[:, ['A', 'B', 'C', 'D']].corr()
tm.assert_frame_equal(result, expected)
@td.skip_if_no_scipy
@pytest.mark.parametrize('meth', ['pearson', 'kendall', 'spearman'])
def test_corr_nooverlap(self, meth):
# nothing in common
df = DataFrame({'A': [1, 1.5, 1, np.nan, np.nan, np.nan],
'B': [np.nan, np.nan, np.nan, 1, 1.5, 1],
'C': [np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan]})
rs = df.corr(meth)
assert isna(rs.loc['A', 'B'])
assert isna(rs.loc['B', 'A'])
assert rs.loc['A', 'A'] == 1
assert rs.loc['B', 'B'] == 1
assert isna(rs.loc['C', 'C'])
@td.skip_if_no_scipy
@pytest.mark.parametrize('meth', ['pearson', 'spearman'])
def test_corr_constant(self, meth):
# constant --> all NA
df = DataFrame({'A': [1, 1, 1, np.nan, np.nan, np.nan],
'B': [np.nan, np.nan, np.nan, 1, 1, 1]})
rs = df.corr(meth)
assert isna(rs.values).all()
def test_corr_int(self):
# dtypes other than float64 #1761
df3 = DataFrame({"a": [1, 2, 3, 4], "b": [1, 2, 3, 4]})
df3.cov()
df3.corr()
@td.skip_if_no_scipy
def test_corr_int_and_boolean(self):
# when dtypes of pandas series are different
# then ndarray will have dtype=object,
# so it need to be properly handled
df = DataFrame({"a": [True, False], "b": [1, 0]})
expected = DataFrame(np.ones((2, 2)), index=[
'a', 'b'], columns=['a', 'b'])
for meth in ['pearson', 'kendall', 'spearman']:
with warnings.catch_warnings(record=True):
warnings.simplefilter("ignore", RuntimeWarning)
result = df.corr(meth)
tm.assert_frame_equal(result, expected)
def test_corr_cov_independent_index_column(self):
# GH 14617
df = pd.DataFrame(np.random.randn(4 * 10).reshape(10, 4),
columns=list("abcd"))
for method in ['cov', 'corr']:
result = getattr(df, method)()
assert result.index is not result.columns
assert result.index.equals(result.columns)
def test_corr_invalid_method(self):
# GH 22298
df = pd.DataFrame(np.random.normal(size=(10, 2)))
msg = ("method must be either 'pearson', "
"'spearman', 'kendall', or a callable, ")
with pytest.raises(ValueError, match=msg):
df.corr(method="____")
def test_cov(self, float_frame, float_string_frame):
# min_periods no NAs (corner case)
expected = float_frame.cov()
result = float_frame.cov(min_periods=len(float_frame))
tm.assert_frame_equal(expected, result)
result = float_frame.cov(min_periods=len(float_frame) + 1)
assert isna(result.values).all()
# with NAs
frame = float_frame.copy()
frame['A'][:5] = np.nan
frame['B'][5:10] = np.nan
result = float_frame.cov(min_periods=len(float_frame) - 8)
expected = float_frame.cov()
expected.loc['A', 'B'] = np.nan
expected.loc['B', 'A'] = np.nan
# regular
float_frame['A'][:5] = np.nan
float_frame['B'][:10] = np.nan
cov = float_frame.cov()
tm.assert_almost_equal(cov['A']['C'],
float_frame['A'].cov(float_frame['C']))
# exclude non-numeric types
result = float_string_frame.cov()
expected = float_string_frame.loc[:, ['A', 'B', 'C', 'D']].cov()
tm.assert_frame_equal(result, expected)
# Single column frame
df = DataFrame(np.linspace(0.0, 1.0, 10))
result = df.cov()
expected = DataFrame(np.cov(df.values.T).reshape((1, 1)),
index=df.columns, columns=df.columns)
tm.assert_frame_equal(result, expected)
df.loc[0] = np.nan
result = df.cov()
expected = DataFrame(np.cov(df.values[1:].T).reshape((1, 1)),
index=df.columns, columns=df.columns)
tm.assert_frame_equal(result, expected)
def test_corrwith(self, datetime_frame):
a = datetime_frame
noise = Series(np.random.randn(len(a)), index=a.index)
b = datetime_frame.add(noise, axis=0)
# make sure order does not matter
b = b.reindex(columns=b.columns[::-1], index=b.index[::-1][10:])
del b['B']
colcorr = a.corrwith(b, axis=0)
tm.assert_almost_equal(colcorr['A'], a['A'].corr(b['A']))
rowcorr = a.corrwith(b, axis=1)
tm.assert_series_equal(rowcorr, a.T.corrwith(b.T, axis=0))
dropped = a.corrwith(b, axis=0, drop=True)
tm.assert_almost_equal(dropped['A'], a['A'].corr(b['A']))
assert 'B' not in dropped
dropped = a.corrwith(b, axis=1, drop=True)
assert a.index[-1] not in dropped.index
# non time-series data
index = ['a', 'b', 'c', 'd', 'e']
columns = ['one', 'two', 'three', 'four']
df1 = DataFrame(np.random.randn(5, 4), index=index, columns=columns)
df2 = DataFrame(np.random.randn(4, 4),
index=index[:4], columns=columns)
correls = df1.corrwith(df2, axis=1)
for row in index[:4]:
tm.assert_almost_equal(correls[row],
df1.loc[row].corr(df2.loc[row]))
def test_corrwith_with_objects(self):
df1 = tm.makeTimeDataFrame()
df2 = tm.makeTimeDataFrame()
cols = ['A', 'B', 'C', 'D']
df1['obj'] = 'foo'
df2['obj'] = 'bar'
result = df1.corrwith(df2)
expected = df1.loc[:, cols].corrwith(df2.loc[:, cols])
tm.assert_series_equal(result, expected)
result = df1.corrwith(df2, axis=1)
expected = df1.loc[:, cols].corrwith(df2.loc[:, cols], axis=1)
tm.assert_series_equal(result, expected)
def test_corrwith_series(self, datetime_frame):
result = datetime_frame.corrwith(datetime_frame['A'])
expected = datetime_frame.apply(datetime_frame['A'].corr)
tm.assert_series_equal(result, expected)
def test_corrwith_matches_corrcoef(self):
df1 = DataFrame(np.arange(10000), columns=['a'])
df2 = DataFrame(np.arange(10000) ** 2, columns=['a'])
c1 = df1.corrwith(df2)['a']
c2 = np.corrcoef(df1['a'], df2['a'])[0][1]
tm.assert_almost_equal(c1, c2)
assert c1 < 1
def test_corrwith_mixed_dtypes(self):
# GH 18570
df = pd.DataFrame({'a': [1, 4, 3, 2], 'b': [4, 6, 7, 3],
'c': ['a', 'b', 'c', 'd']})
s = pd.Series([0, 6, 7, 3])
result = df.corrwith(s)
corrs = [df['a'].corr(s), df['b'].corr(s)]
expected = pd.Series(data=corrs, index=['a', 'b'])
tm.assert_series_equal(result, expected)
def test_corrwith_index_intersection(self):
df1 = pd.DataFrame(np.random.random(size=(10, 2)),
columns=["a", "b"])
df2 = pd.DataFrame(np.random.random(size=(10, 3)),
columns=["a", "b", "c"])
result = df1.corrwith(df2, drop=True).index.sort_values()
expected = df1.columns.intersection(df2.columns).sort_values()
tm.assert_index_equal(result, expected)
def test_corrwith_index_union(self):
df1 = pd.DataFrame(np.random.random(size=(10, 2)),
columns=["a", "b"])
df2 = pd.DataFrame(np.random.random(size=(10, 3)),
columns=["a", "b", "c"])
result = df1.corrwith(df2, drop=False).index.sort_values()
expected = df1.columns.union(df2.columns).sort_values()
tm.assert_index_equal(result, expected)
def test_corrwith_dup_cols(self):
# GH 21925
df1 = pd.DataFrame(np.vstack([np.arange(10)] * 3).T)
df2 = df1.copy()
df2 = pd.concat((df2, df2[0]), axis=1)
result = df1.corrwith(df2)
expected = pd.Series(np.ones(4), index=[0, 0, 1, 2])
tm.assert_series_equal(result, expected)
@td.skip_if_no_scipy
def test_corrwith_spearman(self):
# GH 21925
df = pd.DataFrame(np.random.random(size=(100, 3)))
result = df.corrwith(df**2, method="spearman")
expected = Series(np.ones(len(result)))
tm.assert_series_equal(result, expected)
@td.skip_if_no_scipy
def test_corrwith_kendall(self):
# GH 21925
df = pd.DataFrame(np.random.random(size=(100, 3)))
result = df.corrwith(df**2, method="kendall")
expected = Series(np.ones(len(result)))
tm.assert_series_equal(result, expected)
# ---------------------------------------------------------------------
# Describe
def test_bool_describe_in_mixed_frame(self):
df = DataFrame({
'string_data': ['a', 'b', 'c', 'd', 'e'],
'bool_data': [True, True, False, False, False],
'int_data': [10, 20, 30, 40, 50],
})
# Integer data are included in .describe() output,
# Boolean and string data are not.
result = df.describe()
expected = DataFrame({'int_data': [5, 30, df.int_data.std(),
10, 20, 30, 40, 50]},
index=['count', 'mean', 'std', 'min', '25%',
'50%', '75%', 'max'])
tm.assert_frame_equal(result, expected)
# Top value is a boolean value that is False
result = df.describe(include=['bool'])
expected = DataFrame({'bool_data': [5, 2, False, 3]},
index=['count', 'unique', 'top', 'freq'])
tm.assert_frame_equal(result, expected)
def test_describe_bool_frame(self):
# GH 13891
df = pd.DataFrame({
'bool_data_1': [False, False, True, True],
'bool_data_2': [False, True, True, True]
})
result = df.describe()
expected = DataFrame({'bool_data_1': [4, 2, True, 2],
'bool_data_2': [4, 2, True, 3]},
index=['count', 'unique', 'top', 'freq'])
tm.assert_frame_equal(result, expected)
df = pd.DataFrame({
'bool_data': [False, False, True, True, False],
'int_data': [0, 1, 2, 3, 4]
})
result = df.describe()
expected = DataFrame({'int_data': [5, 2, df.int_data.std(), 0, 1,
2, 3, 4]},
index=['count', 'mean', 'std', 'min', '25%',
'50%', '75%', 'max'])
tm.assert_frame_equal(result, expected)
df = pd.DataFrame({
'bool_data': [False, False, True, True],
'str_data': ['a', 'b', 'c', 'a']
})
result = df.describe()
expected = DataFrame({'bool_data': [4, 2, True, 2],
'str_data': [4, 3, 'a', 2]},
index=['count', 'unique', 'top', 'freq'])
tm.assert_frame_equal(result, expected)
def test_describe_categorical(self):
df = DataFrame({'value': np.random.randint(0, 10000, 100)})
labels = ["{0} - {1}".format(i, i + 499) for i in range(0, 10000, 500)]
cat_labels = Categorical(labels, labels)
df = df.sort_values(by=['value'], ascending=True)
df['value_group'] = pd.cut(df.value, range(0, 10500, 500),
right=False, labels=cat_labels)
cat = df
# Categoricals should not show up together with numerical columns
result = cat.describe()
assert len(result.columns) == 1
# In a frame, describe() for the cat should be the same as for string
# arrays (count, unique, top, freq)
cat = Categorical(["a", "b", "b", "b"], categories=['a', 'b', 'c'],
ordered=True)
s = Series(cat)
result = s.describe()
expected = Series([4, 2, "b", 3],
index=['count', 'unique', 'top', 'freq'])
tm.assert_series_equal(result, expected)
cat = Series(Categorical(["a", "b", "c", "c"]))
df3 = DataFrame({"cat": cat, "s": ["a", "b", "c", "c"]})
result = df3.describe()
tm.assert_numpy_array_equal(result["cat"].values, result["s"].values)
def test_describe_categorical_columns(self):
# GH 11558
columns = pd.CategoricalIndex(['int1', 'int2', 'obj'],
ordered=True, name='XXX')
df = DataFrame({'int1': [10, 20, 30, 40, 50],
'int2': [10, 20, 30, 40, 50],
'obj': ['A', 0, None, 'X', 1]},
columns=columns)
result = df.describe()
exp_columns = pd.CategoricalIndex(['int1', 'int2'],
categories=['int1', 'int2', 'obj'],
ordered=True, name='XXX')
expected = DataFrame({'int1': [5, 30, df.int1.std(),
10, 20, 30, 40, 50],
'int2': [5, 30, df.int2.std(),
10, 20, 30, 40, 50]},
index=['count', 'mean', 'std', 'min', '25%',
'50%', '75%', 'max'],
columns=exp_columns)
tm.assert_frame_equal(result, expected)
tm.assert_categorical_equal(result.columns.values,
expected.columns.values)
def test_describe_datetime_columns(self):
columns = pd.DatetimeIndex(['2011-01-01', '2011-02-01', '2011-03-01'],
freq='MS', tz='US/Eastern', name='XXX')
df = DataFrame({0: [10, 20, 30, 40, 50],
1: [10, 20, 30, 40, 50],
2: ['A', 0, None, 'X', 1]})
df.columns = columns
result = df.describe()
exp_columns = pd.DatetimeIndex(['2011-01-01', '2011-02-01'],
freq='MS', tz='US/Eastern', name='XXX')
expected = DataFrame({0: [5, 30, df.iloc[:, 0].std(),
10, 20, 30, 40, 50],
1: [5, 30, df.iloc[:, 1].std(),
10, 20, 30, 40, 50]},
index=['count', 'mean', 'std', 'min', '25%',
'50%', '75%', 'max'])
expected.columns = exp_columns
tm.assert_frame_equal(result, expected)
assert result.columns.freq == 'MS'
assert result.columns.tz == expected.columns.tz
def test_describe_timedelta_values(self):
# GH 6145
t1 = pd.timedelta_range('1 days', freq='D', periods=5)
t2 = pd.timedelta_range('1 hours', freq='H', periods=5)
df = pd.DataFrame({'t1': t1, 't2': t2})
expected = DataFrame({'t1': [5, pd.Timedelta('3 days'),
df.iloc[:, 0].std(),
pd.Timedelta('1 days'),
pd.Timedelta('2 days'),
pd.Timedelta('3 days'),
pd.Timedelta('4 days'),
pd.Timedelta('5 days')],
't2': [5, pd.Timedelta('3 hours'),
df.iloc[:, 1].std(),
pd.Timedelta('1 hours'),
pd.Timedelta('2 hours'),
pd.Timedelta('3 hours'),
pd.Timedelta('4 hours'),
pd.Timedelta('5 hours')]},
index=['count', 'mean', 'std', 'min', '25%',
'50%', '75%', 'max'])
result = df.describe()
tm.assert_frame_equal(result, expected)
exp_repr = (" t1 t2\n"
"count 5 5\n"
"mean 3 days 00:00:00 0 days 03:00:00\n"
"std 1 days 13:56:50.394919 0 days 01:34:52.099788\n"
"min 1 days 00:00:00 0 days 01:00:00\n"
"25% 2 days 00:00:00 0 days 02:00:00\n"
"50% 3 days 00:00:00 0 days 03:00:00\n"
"75% 4 days 00:00:00 0 days 04:00:00\n"
"max 5 days 00:00:00 0 days 05:00:00")
assert repr(result) == exp_repr
def test_describe_tz_values(self, tz_naive_fixture):
# GH 21332
tz = tz_naive_fixture
s1 = Series(range(5))
start = Timestamp(2018, 1, 1)
end = Timestamp(2018, 1, 5)
s2 = Series(date_range(start, end, tz=tz))
df = pd.DataFrame({'s1': s1, 's2': s2})
expected = DataFrame({'s1': [5, np.nan, np.nan, np.nan, np.nan, np.nan,
2, 1.581139, 0, 1, 2, 3, 4],
's2': [5, 5, s2.value_counts().index[0], 1,
start.tz_localize(tz),
end.tz_localize(tz), np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan]},
index=['count', 'unique', 'top', 'freq', 'first',
'last', 'mean', 'std', 'min', '25%', '50%',
'75%', 'max']
)
result = df.describe(include='all')
tm.assert_frame_equal(result, expected)
# ---------------------------------------------------------------------
# Reductions
def test_stat_op_api(self, float_frame, float_string_frame):
assert_stat_op_api('count', float_frame, float_string_frame,
has_numeric_only=True)
assert_stat_op_api('sum', float_frame, float_string_frame,
has_numeric_only=True)
assert_stat_op_api('nunique', float_frame, float_string_frame)
assert_stat_op_api('mean', float_frame, float_string_frame)
assert_stat_op_api('product', float_frame, float_string_frame)
assert_stat_op_api('median', float_frame, float_string_frame)
assert_stat_op_api('min', float_frame, float_string_frame)
assert_stat_op_api('max', float_frame, float_string_frame)
assert_stat_op_api('mad', float_frame, float_string_frame)
assert_stat_op_api('var', float_frame, float_string_frame)
assert_stat_op_api('std', float_frame, float_string_frame)
assert_stat_op_api('sem', float_frame, float_string_frame)
assert_stat_op_api('median', float_frame, float_string_frame)
try:
from scipy.stats import skew, kurtosis # noqa:F401
assert_stat_op_api('skew', float_frame, float_string_frame)
assert_stat_op_api('kurt', float_frame, float_string_frame)
except ImportError:
pass
def test_stat_op_calc(self, float_frame_with_na, mixed_float_frame):
def count(s):
return notna(s).sum()
def nunique(s):
return len(algorithms.unique1d(s.dropna()))
def mad(x):
return np.abs(x - x.mean()).mean()
def var(x):
return np.var(x, ddof=1)
def std(x):
return np.std(x, ddof=1)
def sem(x):
return np.std(x, ddof=1) / np.sqrt(len(x))
def skewness(x):
from scipy.stats import skew # noqa:F811
if len(x) < 3:
return np.nan
return skew(x, bias=False)
def kurt(x):
from scipy.stats import kurtosis # noqa:F811
if len(x) < 4:
return np.nan
return kurtosis(x, bias=False)
assert_stat_op_calc('nunique', nunique, float_frame_with_na,
has_skipna=False, check_dtype=False,
check_dates=True)
# mixed types (with upcasting happening)
assert_stat_op_calc('sum', np.sum, mixed_float_frame.astype('float32'),
check_dtype=False, check_less_precise=True)
assert_stat_op_calc('sum', np.sum, float_frame_with_na,
skipna_alternative=np.nansum)
assert_stat_op_calc('mean', np.mean, float_frame_with_na,
check_dates=True)
assert_stat_op_calc('product', np.prod, float_frame_with_na)
assert_stat_op_calc('mad', mad, float_frame_with_na)
assert_stat_op_calc('var', var, float_frame_with_na)
assert_stat_op_calc('std', std, float_frame_with_na)
assert_stat_op_calc('sem', sem, float_frame_with_na)
assert_stat_op_calc('count', count, float_frame_with_na,
has_skipna=False, check_dtype=False,
check_dates=True)
try:
from scipy import skew, kurtosis # noqa:F401
assert_stat_op_calc('skew', skewness, float_frame_with_na)
assert_stat_op_calc('kurt', kurt, float_frame_with_na)
except ImportError:
pass
# TODO: Ensure warning isn't emitted in the first place
@pytest.mark.filterwarnings("ignore:All-NaN:RuntimeWarning")
def test_median(self, float_frame_with_na, int_frame):
def wrapper(x):
if isna(x).any():
return np.nan
return np.median(x)
assert_stat_op_calc('median', wrapper, float_frame_with_na,
check_dates=True)
assert_stat_op_calc('median', wrapper, int_frame, check_dtype=False,
check_dates=True)
@pytest.mark.parametrize('method', ['sum', 'mean', 'prod', 'var',
'std', 'skew', 'min', 'max'])
def test_stat_operators_attempt_obj_array(self, method):
# GH#676
data = {
'a': [-0.00049987540199591344, -0.0016467257772919831,
0.00067695870775883013],
'b': [-0, -0, 0.0],
'c': [0.00031111847529610595, 0.0014902627951905339,
-0.00094099200035979691]
}
df1 = DataFrame(data, index=['foo', 'bar', 'baz'], dtype='O')
df2 = DataFrame({0: [np.nan, 2], 1: [np.nan, 3],
2: [np.nan, 4]}, dtype=object)
for df in [df1, df2]:
assert df.values.dtype == np.object_
result = getattr(df, method)(1)
expected = getattr(df.astype('f8'), method)(1)
if method in ['sum', 'prod']:
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize('op', ['mean', 'std', 'var',
'skew', 'kurt', 'sem'])
def test_mixed_ops(self, op):
# GH#16116
df = DataFrame({'int': [1, 2, 3, 4],
'float': [1., 2., 3., 4.],
'str': ['a', 'b', 'c', 'd']})
result = getattr(df, op)()
assert len(result) == 2
with pd.option_context('use_bottleneck', False):
result = getattr(df, op)()
assert len(result) == 2
def test_reduce_mixed_frame(self):
# GH 6806
df = DataFrame({
'bool_data': [True, True, False, False, False],
'int_data': [10, 20, 30, 40, 50],
'string_data': ['a', 'b', 'c', 'd', 'e'],
})
df.reindex(columns=['bool_data', 'int_data', 'string_data'])
test = df.sum(axis=0)
tm.assert_numpy_array_equal(test.values,
np.array([2, 150, 'abcde'], dtype=object))
tm.assert_series_equal(test, df.T.sum(axis=1))
def test_nunique(self):
df = DataFrame({'A': [1, 1, 1],
'B': [1, 2, 3],
'C': [1, np.nan, 3]})
tm.assert_series_equal(df.nunique(), Series({'A': 1, 'B': 3, 'C': 2}))
tm.assert_series_equal(df.nunique(dropna=False),
Series({'A': 1, 'B': 3, 'C': 3}))
tm.assert_series_equal(df.nunique(axis=1), Series({0: 1, 1: 2, 2: 2}))
tm.assert_series_equal(df.nunique(axis=1, dropna=False),
Series({0: 1, 1: 3, 2: 2}))
@pytest.mark.parametrize('tz', [None, 'UTC'])
def test_mean_mixed_datetime_numeric(self, tz):
# https://github.com/pandas-dev/pandas/issues/24752
df = pd.DataFrame({"A": [1, 1],
"B": [pd.Timestamp('2000', tz=tz)] * 2})
result = df.mean()
expected = pd.Series([1.0], index=['A'])
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize('tz', [None, 'UTC'])
def test_mean_excludeds_datetimes(self, tz):
# https://github.com/pandas-dev/pandas/issues/24752
# Our long-term desired behavior is unclear, but the behavior in
# 0.24.0rc1 was buggy.
df = pd.DataFrame({"A": [pd.Timestamp('2000', tz=tz)] * 2})
result = df.mean()
expected = pd.Series()
tm.assert_series_equal(result, expected)
def test_var_std(self, datetime_frame):
result = datetime_frame.std(ddof=4)
expected = datetime_frame.apply(lambda x: x.std(ddof=4))
tm.assert_almost_equal(result, expected)
result = datetime_frame.var(ddof=4)
expected = datetime_frame.apply(lambda x: x.var(ddof=4))
tm.assert_almost_equal(result, expected)
arr = np.repeat(np.random.random((1, 1000)), 1000, 0)
result = nanops.nanvar(arr, axis=0)
assert not (result < 0).any()
with pd.option_context('use_bottleneck', False):
result = nanops.nanvar(arr, axis=0)
assert not (result < 0).any()
@pytest.mark.parametrize(
"meth", ['sem', 'var', 'std'])
def test_numeric_only_flag(self, meth):
# GH 9201
df1 = DataFrame(np.random.randn(5, 3), columns=['foo', 'bar', 'baz'])
# set one entry to a number in str format
df1.loc[0, 'foo'] = '100'
df2 = DataFrame(np.random.randn(5, 3), columns=['foo', 'bar', 'baz'])
# set one entry to a non-number str
df2.loc[0, 'foo'] = 'a'
result = getattr(df1, meth)(axis=1, numeric_only=True)
expected = getattr(df1[['bar', 'baz']], meth)(axis=1)
tm.assert_series_equal(expected, result)
result = getattr(df2, meth)(axis=1, numeric_only=True)
expected = getattr(df2[['bar', 'baz']], meth)(axis=1)
tm.assert_series_equal(expected, result)
# df1 has all numbers, df2 has a letter inside
msg = r"unsupported operand type\(s\) for -: 'float' and 'str'"
with pytest.raises(TypeError, match=msg):
getattr(df1, meth)(axis=1, numeric_only=False)
msg = "could not convert string to float: 'a'"
with pytest.raises(TypeError, match=msg):
getattr(df2, meth)(axis=1, numeric_only=False)
def test_sem(self, datetime_frame):
result = datetime_frame.sem(ddof=4)
expected = datetime_frame.apply(
lambda x: x.std(ddof=4) / np.sqrt(len(x)))
tm.assert_almost_equal(result, expected)
arr = np.repeat(np.random.random((1, 1000)), 1000, 0)
result = nanops.nansem(arr, axis=0)
assert not (result < 0).any()
with pd.option_context('use_bottleneck', False):
result = nanops.nansem(arr, axis=0)
assert not (result < 0).any()
@td.skip_if_no_scipy
def test_kurt(self):
index = MultiIndex(levels=[['bar'], ['one', 'two', 'three'], [0, 1]],
codes=[[0, 0, 0, 0, 0, 0],
[0, 1, 2, 0, 1, 2],
[0, 1, 0, 1, 0, 1]])
df = DataFrame(np.random.randn(6, 3), index=index)
kurt = df.kurt()
kurt2 = df.kurt(level=0).xs('bar')
tm.assert_series_equal(kurt, kurt2, check_names=False)
assert kurt.name is None
assert kurt2.name == 'bar'
@pytest.mark.parametrize("dropna, expected", [
(True, {'A': [12],
'B': [10.0],
'C': [1.0],
'D': ['a'],
'E': Categorical(['a'], categories=['a']),
'F': to_datetime(['2000-1-2']),
'G': to_timedelta(['1 days'])}),
(False, {'A': [12],
'B': [10.0],
'C': [np.nan],
'D': np.array([np.nan], dtype=object),
'E': Categorical([np.nan], categories=['a']),
'F': [pd.NaT],
'G': to_timedelta([pd.NaT])}),
(True, {'H': [8, 9, np.nan, np.nan],
'I': [8, 9, np.nan, np.nan],
'J': [1, np.nan, np.nan, np.nan],
'K': Categorical(['a', np.nan, np.nan, np.nan],
categories=['a']),
'L': to_datetime(['2000-1-2', 'NaT', 'NaT', 'NaT']),
'M': to_timedelta(['1 days', 'nan', 'nan', 'nan']),
'N': [0, 1, 2, 3]}),
(False, {'H': [8, 9, np.nan, np.nan],
'I': [8, 9, np.nan, np.nan],
'J': [1, np.nan, np.nan, np.nan],
'K': Categorical([np.nan, 'a', np.nan, np.nan],
categories=['a']),
'L': to_datetime(['NaT', '2000-1-2', 'NaT', 'NaT']),
'M': to_timedelta(['nan', '1 days', 'nan', 'nan']),
'N': [0, 1, 2, 3]})
])
def test_mode_dropna(self, dropna, expected):
df = DataFrame({"A": [12, 12, 19, 11],
"B": [10, 10, np.nan, 3],
"C": [1, np.nan, np.nan, np.nan],
"D": [np.nan, np.nan, 'a', np.nan],
"E": Categorical([np.nan, np.nan, 'a', np.nan]),
"F": to_datetime(['NaT', '2000-1-2', 'NaT', 'NaT']),
"G": to_timedelta(['1 days', 'nan', 'nan', 'nan']),
"H": [8, 8, 9, 9],
"I": [9, 9, 8, 8],
"J": [1, 1, np.nan, np.nan],
"K": Categorical(['a', np.nan, 'a', np.nan]),
"L": to_datetime(['2000-1-2', '2000-1-2',
'NaT', 'NaT']),
"M": to_timedelta(['1 days', 'nan',
'1 days', 'nan']),
"N": np.arange(4, dtype='int64')})
result = df[sorted(list(expected.keys()))].mode(dropna=dropna)
expected = DataFrame(expected)
tm.assert_frame_equal(result, expected)
def test_mode_sortwarning(self):
# Check for the warning that is raised when the mode
# results cannot be sorted
df = DataFrame({"A": [np.nan, np.nan, 'a', 'a']})
expected = DataFrame({'A': ['a', np.nan]})
with tm.assert_produces_warning(UserWarning, check_stacklevel=False):
result = df.mode(dropna=False)
result = result.sort_values(by='A').reset_index(drop=True)
tm.assert_frame_equal(result, expected)
def test_operators_timedelta64(self):
df = DataFrame(dict(A=date_range('2012-1-1', periods=3, freq='D'),
B=date_range('2012-1-2', periods=3, freq='D'),
C=Timestamp('20120101') -
timedelta(minutes=5, seconds=5)))
diffs = DataFrame(dict(A=df['A'] - df['C'],
B=df['A'] - df['B']))
# min
result = diffs.min()
assert result[0] == diffs.loc[0, 'A']
assert result[1] == diffs.loc[0, 'B']
result = diffs.min(axis=1)
assert (result == diffs.loc[0, 'B']).all()
# max
result = diffs.max()
assert result[0] == diffs.loc[2, 'A']
assert result[1] == diffs.loc[2, 'B']
result = diffs.max(axis=1)
assert (result == diffs['A']).all()
# abs
result = diffs.abs()
result2 = abs(diffs)
expected = DataFrame(dict(A=df['A'] - df['C'],
B=df['B'] - df['A']))
tm.assert_frame_equal(result, expected)
tm.assert_frame_equal(result2, expected)
# mixed frame
mixed = diffs.copy()
mixed['C'] = 'foo'
mixed['D'] = 1
mixed['E'] = 1.
mixed['F'] = Timestamp('20130101')
# results in an object array
result = mixed.min()
expected = Series([pd.Timedelta(timedelta(seconds=5 * 60 + 5)),
pd.Timedelta(timedelta(days=-1)),
'foo', 1, 1.0,
Timestamp('20130101')],
index=mixed.columns)
tm.assert_series_equal(result, expected)
# excludes numeric
result = mixed.min(axis=1)
expected = Series([1, 1, 1.], index=[0, 1, 2])
tm.assert_series_equal(result, expected)
# works when only those columns are selected
result = mixed[['A', 'B']].min(1)
expected = Series([timedelta(days=-1)] * 3)
tm.assert_series_equal(result, expected)
result = mixed[['A', 'B']].min()
expected = Series([timedelta(seconds=5 * 60 + 5),
timedelta(days=-1)], index=['A', 'B'])
tm.assert_series_equal(result, expected)
# GH 3106
df = DataFrame({'time': date_range('20130102', periods=5),
'time2': date_range('20130105', periods=5)})
df['off1'] = df['time2'] - df['time']
assert df['off1'].dtype == 'timedelta64[ns]'
df['off2'] = df['time'] - df['time2']
df._consolidate_inplace()
assert df['off1'].dtype == 'timedelta64[ns]'
assert df['off2'].dtype == 'timedelta64[ns]'
def test_sum_corner(self):
empty_frame = DataFrame()
axis0 = empty_frame.sum(0)
axis1 = empty_frame.sum(1)
assert isinstance(axis0, Series)
assert isinstance(axis1, Series)
assert len(axis0) == 0
assert len(axis1) == 0
@pytest.mark.parametrize('method, unit', [
('sum', 0),
('prod', 1),
])
def test_sum_prod_nanops(self, method, unit):
idx = ['a', 'b', 'c']
df = pd.DataFrame({"a": [unit, unit],
"b": [unit, np.nan],
"c": [np.nan, np.nan]})
# The default
result = getattr(df, method)
expected = pd.Series([unit, unit, unit], index=idx, dtype='float64')
# min_count=1
result = getattr(df, method)(min_count=1)
expected = pd.Series([unit, unit, np.nan], index=idx)
tm.assert_series_equal(result, expected)
# min_count=0
result = getattr(df, method)(min_count=0)
expected = pd.Series([unit, unit, unit], index=idx, dtype='float64')
tm.assert_series_equal(result, expected)
result = getattr(df.iloc[1:], method)(min_count=1)
expected = pd.Series([unit, np.nan, np.nan], index=idx)
tm.assert_series_equal(result, expected)
# min_count > 1
df = pd.DataFrame({"A": [unit] * 10, "B": [unit] * 5 + [np.nan] * 5})
result = getattr(df, method)(min_count=5)
expected = pd.Series(result, index=['A', 'B'])
tm.assert_series_equal(result, expected)
result = getattr(df, method)(min_count=6)
expected = pd.Series(result, index=['A', 'B'])
tm.assert_series_equal(result, expected)
def test_sum_nanops_timedelta(self):
# prod isn't defined on timedeltas
idx = ['a', 'b', 'c']
df = pd.DataFrame({"a": [0, 0],
"b": [0, np.nan],
"c": [np.nan, np.nan]})
df2 = df.apply(pd.to_timedelta)
# 0 by default
result = df2.sum()
expected = pd.Series([0, 0, 0], dtype='m8[ns]', index=idx)
tm.assert_series_equal(result, expected)
# min_count=0
result = df2.sum(min_count=0)
tm.assert_series_equal(result, expected)
# min_count=1
result = df2.sum(min_count=1)
expected = pd.Series([0, 0, np.nan], dtype='m8[ns]', index=idx)
tm.assert_series_equal(result, expected)
def test_sum_object(self, float_frame):
values = float_frame.values.astype(int)
frame = DataFrame(values, index=float_frame.index,
columns=float_frame.columns)
deltas = frame * timedelta(1)
deltas.sum()
def test_sum_bool(self, float_frame):
# ensure this works, bug report
bools = np.isnan(float_frame)
bools.sum(1)
bools.sum(0)
def test_mean_corner(self, float_frame, float_string_frame):
# unit test when have object data
the_mean = float_string_frame.mean(axis=0)
the_sum = float_string_frame.sum(axis=0, numeric_only=True)
tm.assert_index_equal(the_sum.index, the_mean.index)
assert len(the_mean.index) < len(float_string_frame.columns)
# xs sum mixed type, just want to know it works...
the_mean = float_string_frame.mean(axis=1)
the_sum = float_string_frame.sum(axis=1, numeric_only=True)
tm.assert_index_equal(the_sum.index, the_mean.index)
# take mean of boolean column
float_frame['bool'] = float_frame['A'] > 0
means = float_frame.mean(0)
assert means['bool'] == float_frame['bool'].values.mean()
def test_stats_mixed_type(self, float_string_frame):
# don't blow up
float_string_frame.std(1)
float_string_frame.var(1)
float_string_frame.mean(1)
float_string_frame.skew(1)
def test_sum_bools(self):
df = DataFrame(index=lrange(1), columns=lrange(10))
bools = isna(df)
assert bools.sum(axis=1)[0] == 10
# ---------------------------------------------------------------------
# Cumulative Reductions - cumsum, cummax, ...
def test_cumsum_corner(self):
dm = DataFrame(np.arange(20).reshape(4, 5),
index=lrange(4), columns=lrange(5))
# ?(wesm)
result = dm.cumsum() # noqa
def test_cumsum(self, datetime_frame):
datetime_frame.loc[5:10, 0] = np.nan
datetime_frame.loc[10:15, 1] = np.nan
datetime_frame.loc[15:, 2] = np.nan
# axis = 0
cumsum = datetime_frame.cumsum()
expected = datetime_frame.apply(Series.cumsum)
tm.assert_frame_equal(cumsum, expected)
# axis = 1
cumsum = datetime_frame.cumsum(axis=1)
expected = datetime_frame.apply(Series.cumsum, axis=1)
tm.assert_frame_equal(cumsum, expected)
# works
df = DataFrame({'A': np.arange(20)}, index=np.arange(20))
result = df.cumsum() # noqa
# fix issue
cumsum_xs = datetime_frame.cumsum(axis=1)
assert np.shape(cumsum_xs) == np.shape(datetime_frame)
def test_cumprod(self, datetime_frame):
datetime_frame.loc[5:10, 0] = np.nan
datetime_frame.loc[10:15, 1] = np.nan
datetime_frame.loc[15:, 2] = np.nan
# axis = 0
cumprod = datetime_frame.cumprod()
expected = datetime_frame.apply(Series.cumprod)
tm.assert_frame_equal(cumprod, expected)
# axis = 1
cumprod = datetime_frame.cumprod(axis=1)
expected = datetime_frame.apply(Series.cumprod, axis=1)
tm.assert_frame_equal(cumprod, expected)
# fix issue
cumprod_xs = datetime_frame.cumprod(axis=1)
assert np.shape(cumprod_xs) == np.shape(datetime_frame)
# ints
df = datetime_frame.fillna(0).astype(int)
df.cumprod(0)
df.cumprod(1)
# ints32
df = datetime_frame.fillna(0).astype(np.int32)
df.cumprod(0)
df.cumprod(1)
def test_cummin(self, datetime_frame):
datetime_frame.loc[5:10, 0] = np.nan
datetime_frame.loc[10:15, 1] = np.nan
datetime_frame.loc[15:, 2] = np.nan
# axis = 0
cummin = datetime_frame.cummin()
expected = datetime_frame.apply(Series.cummin)
tm.assert_frame_equal(cummin, expected)
# axis = 1
cummin = datetime_frame.cummin(axis=1)
expected = datetime_frame.apply(Series.cummin, axis=1)
tm.assert_frame_equal(cummin, expected)
# it works
df = DataFrame({'A': np.arange(20)}, index=np.arange(20))
result = df.cummin() # noqa
# fix issue
cummin_xs = datetime_frame.cummin(axis=1)
assert np.shape(cummin_xs) == np.shape(datetime_frame)
def test_cummax(self, datetime_frame):
datetime_frame.loc[5:10, 0] = np.nan
datetime_frame.loc[10:15, 1] = np.nan
datetime_frame.loc[15:, 2] = np.nan
# axis = 0
cummax = datetime_frame.cummax()
expected = datetime_frame.apply(Series.cummax)
tm.assert_frame_equal(cummax, expected)
# axis = 1
cummax = datetime_frame.cummax(axis=1)
expected = datetime_frame.apply(Series.cummax, axis=1)
tm.assert_frame_equal(cummax, expected)
# it works
df = DataFrame({'A': np.arange(20)}, index=np.arange(20))
result = df.cummax() # noqa
# fix issue
cummax_xs = datetime_frame.cummax(axis=1)
assert np.shape(cummax_xs) == np.shape(datetime_frame)
# ---------------------------------------------------------------------
# Miscellanea
def test_count(self):
# corner case
frame = DataFrame()
ct1 = frame.count(1)
assert isinstance(ct1, Series)
ct2 = frame.count(0)
assert isinstance(ct2, Series)
# GH#423
df = DataFrame(index=lrange(10))
result = df.count(1)
expected = Series(0, index=df.index)
tm.assert_series_equal(result, expected)
df = DataFrame(columns=lrange(10))
result = df.count(0)
expected = Series(0, index=df.columns)
tm.assert_series_equal(result, expected)
df = DataFrame()
result = df.count()
expected = Series(0, index=[])
tm.assert_series_equal(result, expected)
def test_count_objects(self, float_string_frame):
dm = DataFrame(float_string_frame._series)
df = DataFrame(float_string_frame._series)
tm.assert_series_equal(dm.count(), df.count())
tm.assert_series_equal(dm.count(1), df.count(1))
def test_pct_change(self):
# GH#11150
pnl = DataFrame([np.arange(0, 40, 10),
np.arange(0, 40, 10),
np.arange(0, 40, 10)]).astype(np.float64)
pnl.iat[1, 0] = np.nan
pnl.iat[1, 1] = np.nan
pnl.iat[2, 3] = 60
for axis in range(2):
expected = pnl.ffill(axis=axis) / pnl.ffill(axis=axis).shift(
axis=axis) - 1
result = pnl.pct_change(axis=axis, fill_method='pad')
tm.assert_frame_equal(result, expected)
# ----------------------------------------------------------------------
# Index of max / min
def test_idxmin(self, float_frame, int_frame):
frame = float_frame
frame.loc[5:10] = np.nan
frame.loc[15:20, -2:] = np.nan
for skipna in [True, False]:
for axis in [0, 1]:
for df in [frame, int_frame]:
result = df.idxmin(axis=axis, skipna=skipna)
expected = df.apply(Series.idxmin, axis=axis,
skipna=skipna)
tm.assert_series_equal(result, expected)
msg = ("No axis named 2 for object type"
" <class 'pandas.core.frame.DataFrame'>")
with pytest.raises(ValueError, match=msg):
frame.idxmin(axis=2)
def test_idxmax(self, float_frame, int_frame):
frame = float_frame
frame.loc[5:10] = np.nan
frame.loc[15:20, -2:] = np.nan
for skipna in [True, False]:
for axis in [0, 1]:
for df in [frame, int_frame]:
result = df.idxmax(axis=axis, skipna=skipna)
expected = df.apply(Series.idxmax, axis=axis,
skipna=skipna)
tm.assert_series_equal(result, expected)
msg = ("No axis named 2 for object type"
" <class 'pandas.core.frame.DataFrame'>")
with pytest.raises(ValueError, match=msg):
frame.idxmax(axis=2)
# ----------------------------------------------------------------------
# Logical reductions
@pytest.mark.parametrize('opname', ['any', 'all'])
def test_any_all(self, opname, bool_frame_with_na, float_string_frame):
assert_bool_op_calc(opname, getattr(np, opname), bool_frame_with_na,
has_skipna=True)
assert_bool_op_api(opname, bool_frame_with_na, float_string_frame,
has_bool_only=True)
def test_any_all_extra(self):
df = DataFrame({
'A': [True, False, False],
'B': [True, True, False],
'C': [True, True, True],
}, index=['a', 'b', 'c'])
result = df[['A', 'B']].any(1)
expected = Series([True, True, False], index=['a', 'b', 'c'])
tm.assert_series_equal(result, expected)
result = df[['A', 'B']].any(1, bool_only=True)
tm.assert_series_equal(result, expected)
result = df.all(1)
expected = Series([True, False, False], index=['a', 'b', 'c'])
tm.assert_series_equal(result, expected)
result = df.all(1, bool_only=True)
tm.assert_series_equal(result, expected)
# Axis is None
result = df.all(axis=None).item()
assert result is False
result = df.any(axis=None).item()
assert result is True
result = df[['C']].all(axis=None).item()
assert result is True
def test_any_datetime(self):
# GH 23070
float_data = [1, np.nan, 3, np.nan]
datetime_data = [pd.Timestamp('1960-02-15'),
pd.Timestamp('1960-02-16'),
pd.NaT,
pd.NaT]
df = DataFrame({
"A": float_data,
"B": datetime_data
})
result = df.any(1)
expected = Series([True, True, True, False])
tm.assert_series_equal(result, expected)
def test_any_all_bool_only(self):
# GH 25101
df = DataFrame({"col1": [1, 2, 3],
"col2": [4, 5, 6],
"col3": [None, None, None]})
result = df.all(bool_only=True)
expected = Series(dtype=np.bool)
tm.assert_series_equal(result, expected)
df = DataFrame({"col1": [1, 2, 3],
"col2": [4, 5, 6],
"col3": [None, None, None],
"col4": [False, False, True]})
result = df.all(bool_only=True)
expected = Series({"col4": False})
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize('func, data, expected', [
(np.any, {}, False),
(np.all, {}, True),
(np.any, {'A': []}, False),
(np.all, {'A': []}, True),
(np.any, {'A': [False, False]}, False),
(np.all, {'A': [False, False]}, False),
(np.any, {'A': [True, False]}, True),
(np.all, {'A': [True, False]}, False),
(np.any, {'A': [True, True]}, True),
(np.all, {'A': [True, True]}, True),
(np.any, {'A': [False], 'B': [False]}, False),
(np.all, {'A': [False], 'B': [False]}, False),
(np.any, {'A': [False, False], 'B': [False, True]}, True),
(np.all, {'A': [False, False], 'B': [False, True]}, False),
# other types
(np.all, {'A': pd.Series([0.0, 1.0], dtype='float')}, False),
(np.any, {'A': pd.Series([0.0, 1.0], dtype='float')}, True),
(np.all, {'A': pd.Series([0, 1], dtype=int)}, False),
(np.any, {'A': pd.Series([0, 1], dtype=int)}, True),
pytest.param(np.all, {'A': pd.Series([0, 1], dtype='M8[ns]')}, False,
marks=[td.skip_if_np_lt_115]),
pytest.param(np.any, {'A': pd.Series([0, 1], dtype='M8[ns]')}, True,
marks=[td.skip_if_np_lt_115]),
pytest.param(np.all, {'A': pd.Series([1, 2], dtype='M8[ns]')}, True,
marks=[td.skip_if_np_lt_115]),
pytest.param(np.any, {'A': pd.Series([1, 2], dtype='M8[ns]')}, True,
marks=[td.skip_if_np_lt_115]),
pytest.param(np.all, {'A': pd.Series([0, 1], dtype='m8[ns]')}, False,
marks=[td.skip_if_np_lt_115]),
pytest.param(np.any, {'A': pd.Series([0, 1], dtype='m8[ns]')}, True,
marks=[td.skip_if_np_lt_115]),
pytest.param(np.all, {'A': pd.Series([1, 2], dtype='m8[ns]')}, True,
marks=[td.skip_if_np_lt_115]),
pytest.param(np.any, {'A': pd.Series([1, 2], dtype='m8[ns]')}, True,
marks=[td.skip_if_np_lt_115]),
(np.all, {'A': pd.Series([0, 1], dtype='category')}, False),
(np.any, {'A': pd.Series([0, 1], dtype='category')}, True),
(np.all, {'A': pd.Series([1, 2], dtype='category')}, True),
(np.any, {'A': pd.Series([1, 2], dtype='category')}, True),
# # Mix
# GH 21484
# (np.all, {'A': pd.Series([10, 20], dtype='M8[ns]'),
# 'B': pd.Series([10, 20], dtype='m8[ns]')}, True),
])
def test_any_all_np_func(self, func, data, expected):
# GH 19976
data = DataFrame(data)
result = func(data)
assert isinstance(result, np.bool_)
assert result.item() is expected
# method version
result = getattr(DataFrame(data), func.__name__)(axis=None)
assert isinstance(result, np.bool_)
assert result.item() is expected
def test_any_all_object(self):
# GH 19976
result = np.all(DataFrame(columns=['a', 'b'])).item()
assert result is True
result = np.any(DataFrame(columns=['a', 'b'])).item()
assert result is False
@pytest.mark.parametrize('method', ['any', 'all'])
def test_any_all_level_axis_none_raises(self, method):
df = DataFrame(
{"A": 1},
index=MultiIndex.from_product([['A', 'B'], ['a', 'b']],
names=['out', 'in'])
)
xpr = "Must specify 'axis' when aggregating by level."
with pytest.raises(ValueError, match=xpr):
getattr(df, method)(axis=None, level='out')
# ----------------------------------------------------------------------
# Isin
def test_isin(self):
# GH 4211
df = DataFrame({'vals': [1, 2, 3, 4], 'ids': ['a', 'b', 'f', 'n'],
'ids2': ['a', 'n', 'c', 'n']},
index=['foo', 'bar', 'baz', 'qux'])
other = ['a', 'b', 'c']
result = df.isin(other)
expected = DataFrame([df.loc[s].isin(other) for s in df.index])
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("empty", [[], Series(), np.array([])])
def test_isin_empty(self, empty):
# GH 16991
df = DataFrame({'A': ['a', 'b', 'c'], 'B': ['a', 'e', 'f']})
expected = DataFrame(False, df.index, df.columns)
result = df.isin(empty)
tm.assert_frame_equal(result, expected)
def test_isin_dict(self):
df = DataFrame({'A': ['a', 'b', 'c'], 'B': ['a', 'e', 'f']})
d = {'A': ['a']}
expected = DataFrame(False, df.index, df.columns)
expected.loc[0, 'A'] = True
result = df.isin(d)
tm.assert_frame_equal(result, expected)
# non unique columns
df = DataFrame({'A': ['a', 'b', 'c'], 'B': ['a', 'e', 'f']})
df.columns = ['A', 'A']
expected = DataFrame(False, df.index, df.columns)
expected.loc[0, 'A'] = True
result = df.isin(d)
tm.assert_frame_equal(result, expected)
def test_isin_with_string_scalar(self):
# GH 4763
df = DataFrame({'vals': [1, 2, 3, 4], 'ids': ['a', 'b', 'f', 'n'],
'ids2': ['a', 'n', 'c', 'n']},
index=['foo', 'bar', 'baz', 'qux'])
with pytest.raises(TypeError):
df.isin('a')
with pytest.raises(TypeError):
df.isin('aaa')
def test_isin_df(self):
df1 = DataFrame({'A': [1, 2, 3, 4], 'B': [2, np.nan, 4, 4]})
df2 = DataFrame({'A': [0, 2, 12, 4], 'B': [2, np.nan, 4, 5]})
expected = DataFrame(False, df1.index, df1.columns)
result = df1.isin(df2)
expected['A'].loc[[1, 3]] = True
expected['B'].loc[[0, 2]] = True
tm.assert_frame_equal(result, expected)
# partial overlapping columns
df2.columns = ['A', 'C']
result = df1.isin(df2)
expected['B'] = False
tm.assert_frame_equal(result, expected)
def test_isin_tuples(self):
# GH 16394
df = pd.DataFrame({'A': [1, 2, 3], 'B': ['a', 'b', 'f']})
df['C'] = list(zip(df['A'], df['B']))
result = df['C'].isin([(1, 'a')])
tm.assert_series_equal(result,
Series([True, False, False], name="C"))
def test_isin_df_dupe_values(self):
df1 = DataFrame({'A': [1, 2, 3, 4], 'B': [2, np.nan, 4, 4]})
# just cols duped
df2 = DataFrame([[0, 2], [12, 4], [2, np.nan], [4, 5]],
columns=['B', 'B'])
with pytest.raises(ValueError):
df1.isin(df2)
# just index duped
df2 = DataFrame([[0, 2], [12, 4], [2, np.nan], [4, 5]],
columns=['A', 'B'], index=[0, 0, 1, 1])
with pytest.raises(ValueError):
df1.isin(df2)
# cols and index:
df2.columns = ['B', 'B']
with pytest.raises(ValueError):
df1.isin(df2)
def test_isin_dupe_self(self):
other = | DataFrame({'A': [1, 0, 1, 0], 'B': [1, 1, 0, 0]}) | pandas.DataFrame |
"""
This is a place to create a python wrapper for the BASGRA fortran model in fortarn_BASGRA_NZ
Author: <NAME>
Created: 12/08/2020 9:32 AM
"""
import os
import ctypes as ct
import numpy as np
import pandas as pd
from subprocess import Popen
from copy import deepcopy
from input_output_keys import param_keys, out_cols, days_harvest_keys, matrix_weather_keys_pet, \
matrix_weather_keys_penman
from warnings import warn
# compiled with gfortran 64,
# https://sourceforge.net/projects/mingwbuilds/files/host-windows/releases/4.8.1/64-bit/threads-posix/seh/x64-4.8.1-release-posix-seh-rev5.7z/download
# compilation code: compile_basgra_gfortran.bat
# define the dll library path
_libpath_pet = os.path.join(os.path.dirname(__file__), 'fortran_BASGRA_NZ/BASGRA_pet.DLL')
_libpath_peyman = os.path.join(os.path.dirname(__file__), 'fortran_BASGRA_NZ/BASGRA_peyman.DLL')
_bat_path = os.path.join(os.path.dirname(__file__), 'fortran_BASGRA_NZ\\compile_BASGRA_gfortran.bat')
# this is the maximum number of weather days,
# it is hard coded into fortran_BASGRA_NZ/environment.f95 line 9
_max_weather_size = 36600
def run_basgra_nz(params, matrix_weather, days_harvest, doy_irr, verbose=False,
dll_path='default', supply_pet=True, auto_harvest=False, run_365_calendar=False):
"""
python wrapper for the fortran BASGRA code
changes to the fortran code may require changes to this function
runs the model for the period of the weather data
:param params: dictionary, see input_output_keys.py, README.md, or
https://github.com/Komanawa-Solutions-Ltd/BASGRA_NZ_PYfor more details
:param matrix_weather: pandas dataframe of weather data, maximum entries set in _max_weather_size in line 24
of this file (currently 36600)
see documentation for input columns at https://github.com/Komanawa-Solutions-Ltd/BASGRA_NZ_PY
or README.md, note expected DOY will change depending on expect_no_leap_days
:param days_harvest: days harvest dataframe must be same length as matrix_weather entries
see documentation for input columns at https://github.com/Komanawa-Solutions-Ltd/BASGRA_NZ_PY
or README.md, note expected DOY will change depending on expect_no_leap_days
:param doy_irr: a list of the days of year to irrigate on, must be integers acceptable values: (0-366)
:param verbose: boolean, if True the fortran function prints a number of statements for debugging purposes
(depreciated)
:param dll_path: path to the compiled fortran DLL to use, default was made on windows 10 64 bit, if the path does
not exist, this function will try to run the bat file to re-make the dll.
:param supply_pet: boolean, if True BASGRA expects pet to be supplied, if False the parameters required to
calculate pet from the peyman equation are expected,
the version must match the DLL if dll_path != 'default'
:param auto_harvest: boolean, if True then assumes data is formated correctly for auto harvesting, if False, then
assumes data is formatted for manual harvesting (e.g. previous version) and re-formats
internally
:param run_365_calendar: boolean, if True then run on a 365 day calender
This expects that all leap days will be removed from matrix_weather and
days_harvest. DOY is expected to be between 1 and 365. This means that datetime
objects defined by year and doy will be incorrect. instead use
get_month_day_to_nonleap_doy to map DOY to datetime via month and day. This is how
the index of the returned datetime will be passed. For example for date 2024-03-01
(2024 is a leap year) the dayofyear via a datetime object will be 61, but if
expect_no_leap_days=True basgra expects day of year to be 60. the index of the
results will be a datetime object of equivalent to 2024-03-01, so the output doy
will not match the index doy and there will be no value on 2020-02-29.
default False
:return: pd.DataFrame(index=datetime index, columns = out_cols)
"""
assert isinstance(supply_pet, bool), 'supply_pet param must be boolean'
assert isinstance(auto_harvest, bool), 'auto_harvest param must be boolean'
assert isinstance(run_365_calendar, bool), 'expect_no_leap_days must be boolean'
# define DLL library path
use_default_lib = False
if dll_path == 'default':
use_default_lib = True
if supply_pet:
dll_path = _libpath_pet
else:
dll_path = _libpath_peyman
# check that library path exists
if not os.path.exists(dll_path):
if use_default_lib:
# try to run the bat file
print('dll not found, trying to run bat to create DLL:\n{}'.format(_bat_path))
p = Popen(os.path.basename(_bat_path), cwd=os.path.dirname(_bat_path), shell=True)
stdout, stderr = p.communicate()
print('output of bat:\n{}\n{}'.format(stdout, stderr))
if not os.path.exists(dll_path):
raise EnvironmentError('default DLL path not found:\n'
'{}\n'
'see readme for more details:\n'
'{}'.format(dll_path, os.path.dirname(__file__) + 'README.md'))
else:
raise EnvironmentError('DLL path not found:\n{}'.format(dll_path))
# define expected weather keys
if supply_pet:
_matrix_weather_keys = matrix_weather_keys_pet
else:
_matrix_weather_keys = matrix_weather_keys_penman
doy_irr = np.atleast_1d(doy_irr)
# test the input variables
_test_basgra_inputs(params, matrix_weather, days_harvest, verbose, _matrix_weather_keys,
auto_harvest, doy_irr, run_365_calendar=run_365_calendar)
nout = len(out_cols)
ndays = len(matrix_weather)
nirr = len(doy_irr)
# define output indexes before data manipulation
out_index = matrix_weather.index
# copy everything and ensure order is correct
params = deepcopy(params)
matrix_weather = deepcopy(matrix_weather.loc[:, _matrix_weather_keys])
days_harvest = deepcopy(days_harvest.loc[:, days_harvest_keys])
# translate manual harvest inputs into fortran format
if not auto_harvest:
days_harvest = _trans_manual_harv(days_harvest, matrix_weather)
# get variables into right python types
params = np.array([params[e] for e in param_keys]).astype(float)
matrix_weather = matrix_weather.values.astype(float)
days_harvest = days_harvest.values.astype(float)
doy_irr = doy_irr.astype(np.int32)
# manage weather size,
weather_size = len(matrix_weather)
if weather_size < _max_weather_size:
temp = np.zeros((_max_weather_size - weather_size, matrix_weather.shape[1]), float)
matrix_weather = np.concatenate((matrix_weather, temp), 0)
y = np.zeros((ndays, nout), float) # cannot set these to nan's or it breaks fortran
# make pointers
# arrays # 99% sure this works
params_p = np.asfortranarray(params).ctypes.data_as(ct.POINTER(ct.c_double)) # 1d array, float
matrix_weather_p = np.asfortranarray(matrix_weather).ctypes.data_as(ct.POINTER(ct.c_double)) # 2d array, float
days_harvest_p = np.asfortranarray(days_harvest).ctypes.data_as(ct.POINTER(ct.c_double)) # 2d array, float
y_p = np.asfortranarray(y).ctypes.data_as(ct.POINTER(ct.c_double)) # 2d array, float
doy_irr_p = np.asfortranarray(doy_irr).ctypes.data_as(ct.POINTER(ct.c_long))
# integers
ndays_p = ct.pointer(ct.c_int(ndays))
nirr_p = ct.pointer(ct.c_int(nirr))
nout_p = ct.pointer(ct.c_int(nout))
verb_p = ct.pointer(ct.c_bool(verbose))
# load DLL
for_basgra = ct.CDLL(dll_path)
# run BASGRA
for_basgra.BASGRA_(params_p, matrix_weather_p, days_harvest_p, ndays_p, nout_p, nirr_p, doy_irr_p, y_p, verb_p)
# format results
y_p = np.ctypeslib.as_array(y_p, (ndays, nout))
y_p = y_p.flatten(order='C').reshape((ndays, nout), order='F')
y_p = pd.DataFrame(y_p, out_index, out_cols)
if run_365_calendar:
mapper = get_month_day_to_nonleap_doy(key_doy=True)
strs = [f'{y}-{mapper[doy][0]:02d}-{mapper[doy][1]:02d}' for y, doy in zip(y_p.year.values.astype(int),
y_p.doy.values.astype(int))]
y_p.loc[:, 'date'] = pd.to_datetime(strs)
else:
strs = ['{}-{:03d}'.format(int(e), int(f)) for e, f in y_p[['year', 'doy']].itertuples(False, None)]
y_p.loc[:, 'date'] = pd.to_datetime(strs, format='%Y-%j')
y_p.set_index('date', inplace=True)
return y_p
def _trans_manual_harv(days_harvest, matrix_weather):
"""
translates manual harvest data to the format expected by fortran, check the details of the data in here.
:param days_harvest: manual harvest data
:param matrix_weather: weather data, mostly to get the right size
:return: days_harvest (correct format for fortran code)
"""
days_harvest = days_harvest.set_index(['year', 'doy'])
days_harvest_out = pd.DataFrame({'year': matrix_weather.loc[:, 'year'],
'doy': matrix_weather.loc[:, 'doy'],
'frac_harv': np.zeros(len(matrix_weather)), # set filler values
'harv_trig': np.zeros(len(matrix_weather)) - 1, # set flag to not harvest
'harv_targ': np.zeros(len(matrix_weather)), # set filler values
'weed_dm_frac': np.zeros(len(matrix_weather)) * np.nan, # set nas, filled later
'reseed_trig': np.zeros(len(matrix_weather)) - 1, # set flag to not reseed
'reseed_basal': np.zeros(len(matrix_weather)), # set filler values
})
days_harvest_out = days_harvest_out.set_index(['year', 'doy'])
for k in set(days_harvest_keys) - {'year', 'doy'}:
days_harvest_out.loc[days_harvest.index, k] = days_harvest.loc[:, k]
days_harvest_out = days_harvest_out.reset_index()
# fill the weed fraction so that DMH_WEED is always calculated
if pd.isna(days_harvest_out.weed_dm_frac).iloc[0]:
warn('weed_dm_frac is na for the first day of simulation, setting to first valid weed_dm_frac\n'
'this does not affect the harvesting only the calculation of the DMH_weed variable.')
idx = np.where(pd.notna(days_harvest_out.weed_dm_frac))[0][0] # get first non-nan value
id_val = pd.Series(days_harvest_out.index).iloc[0]
days_harvest_out.loc[id_val, 'weed_dm_frac'] = days_harvest_out.loc[:, 'weed_dm_frac'].iloc[idx]
days_harvest_out.loc[:, 'weed_dm_frac'] = days_harvest_out.loc[:, 'weed_dm_frac'].fillna(method='ffill')
return days_harvest_out
def _test_basgra_inputs(params, matrix_weather, days_harvest, verbose, _matrix_weather_keys,
auto_harvest, doy_irr, run_365_calendar):
# check parameters
assert isinstance(verbose, bool), 'verbose must be boolean'
assert isinstance(params, dict)
assert set(params.keys()) == set(param_keys), 'incorrect params keys'
assert not any([np.isnan(e) for e in params.values()]), 'params cannot have na data'
assert params['reseed_harv_delay'] >= 1, 'harvest delay must be >=1'
assert params['reseed_harv_delay'] % 1 < 1e5, 'harvest delay must effectively be an integer'
# check matrix weather
assert isinstance(matrix_weather, pd.DataFrame)
assert set(matrix_weather.keys()) == set(_matrix_weather_keys), 'incorrect keys for matrix_weather'
assert pd.api.types.is_integer_dtype(matrix_weather.doy), 'doy must be an integer datatype in matrix_weather'
assert pd.api.types.is_integer_dtype(matrix_weather.year), 'year must be an integer datatype in matrix_weather'
assert len(matrix_weather) <= _max_weather_size, 'maximum run size is {} days'.format(_max_weather_size)
assert not matrix_weather.isna().any().any(), 'matrix_weather cannot have na values'
# check to make sure there are no missing days in matrix_weather
start_year = matrix_weather['year'].min()
start_day = matrix_weather.loc[matrix_weather.year == start_year, 'doy'].min()
stop_year = matrix_weather['year'].max()
stop_day = matrix_weather.loc[matrix_weather.year == stop_year, 'doy'].max()
if run_365_calendar:
assert matrix_weather.doy.max() <= 365, 'expected to have leap days removed, and all doy between 1-365'
doy_day_mapper = get_month_day_to_nonleap_doy()
inv_doy_mapper = get_month_day_to_nonleap_doy(key_doy=True)
start_mon, start_dom = inv_doy_mapper[start_day]
stop_mon, stop_dom = inv_doy_mapper[stop_day]
expected_datetimes = pd.date_range(start=f'{start_year}-{start_mon:02d}-{start_dom:02d}',
end=f'{stop_year}-{stop_mon:02d}-{stop_dom:02d}')
expected_datetimes = expected_datetimes[~((expected_datetimes.month == 2) & (expected_datetimes.day == 29))]
expected_years = expected_datetimes.year.values
expected_days = np.array(
[doy_day_mapper[(m, d)] for m, d in zip(expected_datetimes.month, expected_datetimes.day)])
addmess = ' note that leap days are expected to have been removed from matrix weather'
else:
expected_datetimes = pd.date_range(start=pd.to_datetime('{}-{}'.format(start_year, start_day), format='%Y-%j'),
end=pd.to_datetime('{}-{}'.format(stop_year, stop_day), format='%Y-%j'))
expected_years = expected_datetimes.year.values
expected_days = expected_datetimes.dayofyear.values
addmess = ''
check = ((matrix_weather['year'].values == expected_years).all() and
(matrix_weather['doy'].values == expected_days).all())
assert check, 'the date range of matrix_weather contains missing or duplicate days' + addmess
# check harvest data
assert isinstance(days_harvest, pd.DataFrame)
assert set(days_harvest.keys()) == set(days_harvest_keys), 'incorrect keys for days_harvest'
assert | pd.api.types.is_integer_dtype(days_harvest.doy) | pandas.api.types.is_integer_dtype |
# -*- coding: utf-8 -*-
"""
Created on Sat Aug 7 13:38:07 2021
@author: bferrari
"""
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn.svm import SVC
from itertools import combinations
from xgboost import XGBClassifier
from sklearn.pipeline import Pipeline
from sklearn.model_selection import cross_val_score, cross_val_predict, GridSearchCV, ParameterGrid, train_test_split
from sklearn.ensemble import RandomForestClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.metrics import confusion_matrix, recall_score, f1_score, ConfusionMatrixDisplay, plot_confusion_matrix
from sklearn.decomposition import PCA
from sklearn.linear_model import LogisticRegressionCV
from sklearn.preprocessing import StandardScaler
from sklearn.multiclass import OneVsRestClassifier
from sklearn.metrics import roc_curve, auc
from sklearn.preprocessing import label_binarize
dados_org = | pd.read_excel('final_results.xlsx') | pandas.read_excel |
# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
import pandas as pd
import pytest
from gluonts.dataset import common
from gluonts.model import deepar
from gluonts.mx.distribution.inflated_beta import ZeroAndOneInflatedBetaOutput
from gluonts.mx.trainer import Trainer
@pytest.mark.parametrize("hybridize", [False, True])
def test_symbol_and_array(hybridize: bool):
# Tests for cases like the one presented in issue 1211, in which the Inflated
# Beta outputs used a method only available to arrays and not to symbols.
# We simply go through a short training to ensure no exceptions are raised.
data = [
{
"target": [0, 0.0460043, 0.263906, 0.4103112, 1],
"start": | pd.to_datetime("1999-01-04") | pandas.to_datetime |
# Import required packages
import requests
import json
from spatialite_database import SpatialiteDatabase
import sqlite3
import csv
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
def get_report(data_url_path):
"""
Reads the data from the url and converts to text.
Additionally, it gives a report about data results.
Args:
data_url_path (str):
Path to the data
"""
print("Getting data results...")
# Values used to determine the data included in the report instance – such as attribute values,
# metric values and view filters. JSON format. Leave empty to import everything
data_details = ""
r = requests.get(data_url_path, data=data_details)
if r.ok:
print("Data results received...")
print("HTTP %i - %s" % (r.status_code, r.reason))
return r.text
else:
print("HTTP %i - %s" % (r.status_code, r.reason))
def export_to_json(data_url_path):
"""
Reads the data from the url and exports it to a json file.
Args:
data_url_path (str):
Path to the data
"""
print("Exporting data results to JSON file...")
r = get_report(data_url_path)
text_file = open("results.json", "w", encoding="utf8")
text_file.write(r)
print("Exported data results to JSON file...")
text_file.close()
def export_to_csv(data_url_path):
"""
Reads the data from the url and exports it to a json file.
Args:
data_url_path (str):
Path to the data
"""
print("Exporting data results to CSV file...")
csv_file = open('report_results.csv', "w", encoding="utf8")
csv_file.write("gender; title; first; last; street_number; street_name; city; state; country; postcode; "
"latitude;longitude; timezone_offset; timezone_description;email; dob; age; id_name; "
"id_value; picture_large; picture_medium; picture_thumbnail; nationality "+"\n")
csv_file.close()
# Load the data as json
r = get_report(data_url_path)
contents = json.loads(r)
# Write data results into a CSV
print("Writing data results to CSV file...")
for a1 in contents:
if a1 == 'results':
for a2 in contents[a1]:
print(a2)
val1 = a2['gender']
print(val1)
val2 = a2['name']['title']
print(val2)
val3 = a2['name']['first']
print(val3)
val4 = a2['name']['last']
print(val4)
val5 = str(a2['location']['street']['number'])
print(val5)
val6 = a2['location']['street']['name']
print(val6)
val7 = a2['location']['city']
print(val7)
val8 = a2['location']['state']
print(val8)
val9 = a2['location']['country']
print(val9)
val10 = str(a2['location']['postcode'])
print(val10)
val11 = str(a2['location']['coordinates']['latitude'])
print(val11)
val12 = str(a2['location']['coordinates']['longitude'])
print(val12)
val13 = str(a2['location']['timezone']['offset'])
print(val13)
val14 = a2['location']['timezone']['description']
print(val14)
val15 = a2['email']
print(val15)
val16 = str(a2['dob']['date'])
print(val16)
val17 = str(a2['dob']['age'])
print(val17)
val18 = a2['id']['name']
print(val18)
val19 = str(a2['id']['value'])
print(val19)
val20 = a2['picture']['large']
print(val20)
val21 = a2['picture']['medium']
print(val21)
val22 = a2['picture']['thumbnail']
print(val22)
val23 = a2['nat']
print(val23)
csv_file = open('report_results.csv', "a", encoding="utf8")
print("csv file opened")
csv_file.write(val1 + ";" + val2 + ";" + val3 + ";" + val4 + ";" + val5 + ";" + val6 + ";" + val7 + ";"
+ val8 + ";" + val9 + ";" + val10 + ";" + val11 + ";" + val12 + ";" + val13 + ";" + val14
+ ";" + val15 + ";" + val16 + ";" + val17 + ";" + val18 + ";" + val19 + ";" + val20 + ";"
+ val21 + ";" + val22 + ";" + val23 + "\n")
csv_file.close()
else:
continue
print("Export finished")
class Database(SpatialiteDatabase):
"""
The Database class represents a Spatialite database for creating a database with data results.
Args:
spatialite_filepath (str):
File path of the Spatialite extension
"""
def __init__(self, database_filepath, spatialite_filepath="mod_spatialite"):
SpatialiteDatabase.__init__(self, database_filepath, spatialite_filepath)
def create_tables(self):
"""
Creates all necessary tables in the database. These are:
FlightData : Stores the Flight Data coming from the API.
"""
# Create table FlightData
sql_statement = "CREATE TABLE FlightData ("
sql_statement += "id INTEGER PRIMARY KEY, "
sql_statement += "gender TEXT, "
sql_statement += "title TEXT, "
sql_statement += "first TEXT,"
sql_statement += "last TEXT, "
sql_statement += "street_number INTEGER, "
sql_statement += "street_name TEXT, "
sql_statement += "city TEXT, "
sql_statement += "state TEXT, "
sql_statement += "country TEXT, "
sql_statement += "postcode INTEGER, "
sql_statement += "latitude FLOAT, "
sql_statement += "longitude FLOAT, "
sql_statement += "timezone_offset FLOAT, "
sql_statement += "timezone_description TEXT, "
sql_statement += "email TEXT, "
sql_statement += "dob FLOAT, "
sql_statement += "age FLOAT, "
sql_statement += "id_name TEXT, "
sql_statement += "id_value TEXT, "
sql_statement += "picture_large TEXT, "
sql_statement += "picture_medium TEXT, "
sql_statement += "picture_thumbnail TEXT, "
sql_statement += "nationality TEXT)"
self.connection.execute(sql_statement)
print("Table District created")
def import_data(self, data_filepath):
"""
Imports flight data from the given filepath.
data_filepath : File path to the Flight data.
"""
# Read columns district, total population, male population, female population and number
# of households from input CSV file
print("Starting import")
file = open(data_filepath)
rows = csv.reader(file, delimiter=";")
next(rows, None) # Skip headers
# Insert the data to the PopulationDistribution table
sql_statement = "INSERT INTO FlightData (gender, title, first, last, street_number, street_name, " \
"city,state, country,postcode, latitude,longitude, timezone_offset, " \
"timezone_description,email, dob, age, id_name, id_value, picture_large, " \
"picture_medium, picture_thumbnail, nationality) "
sql_statement += "VALUES (?, ?, ?, ?, ?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)"
for row in rows:
self.cursor.execute(sql_statement, row)
self.connection.commit()
print("Flight data added")
def export_chart_country(data_file_path):
"""
Exports a pie chart with percentage of data for each country.
Args:
data_file_path (str):
File path of the exported CSV file
"""
# Read csv as pandas dataframe
fight_data = | pd.read_csv(data_file_path, sep=';') | pandas.read_csv |
from __future__ import division
from datetime import datetime
import sys
if sys.version_info < (3, 3):
import mock
else:
from unittest import mock
import pandas as pd
import numpy as np
from nose.tools import assert_almost_equal as aae
import bt
import bt.algos as algos
def test_algo_name():
class TestAlgo(algos.Algo):
pass
actual = TestAlgo()
assert actual.name == 'TestAlgo'
class DummyAlgo(algos.Algo):
def __init__(self, return_value=True):
self.return_value = return_value
self.called = False
def __call__(self, target):
self.called = True
return self.return_value
def test_algo_stack():
algo1 = DummyAlgo(return_value=True)
algo2 = DummyAlgo(return_value=False)
algo3 = DummyAlgo(return_value=True)
target = mock.MagicMock()
stack = bt.AlgoStack(algo1, algo2, algo3)
actual = stack(target)
assert not actual
assert algo1.called
assert algo2.called
assert not algo3.called
def test_run_once():
algo = algos.RunOnce()
assert algo(None)
assert not algo(None)
assert not algo(None)
def test_run_period():
target = mock.MagicMock()
dts = | pd.date_range('2010-01-01', periods=35) | pandas.date_range |
import unittest
import pandas as pd
import numpy as np
from tests.context import algotrading
from tests.context import dates
from tests.context import get_test_market_a
from tests.context import get_test_market_b
from tests.context import assert_elements_equal
import algotrading.data.features.intra_bar_features as ibf
import algotrading.data.features.shifted_features as shf
from algotrading.data.market_direction import market_direction as md
from algotrading.data.price_data_schema import price_data_schema as schema
class TestShiftedFeatures(unittest.TestCase):
def test_lead(self):
market = get_test_market_a()
lead = shf.LeadFeature(schema.open, 1)
expected_data = | pd.Series([2.3, 3.4, 3.4, 2.0, np.nan], index=dates) | pandas.Series |
#!/usr/bin/env python
# -*- coding:utf-8 -*-
'''
Created on 2017年06月04日
@author: debugo
@contact: <EMAIL>
'''
import json
import datetime
from bs4 import BeautifulSoup
import pandas as pd
from tushare.futures import domestic_cons as ct
try:
from urllib.request import urlopen, Request
from urllib.parse import urlencode
from urllib.error import HTTPError
from http.client import IncompleteRead
except ImportError:
from urllib import urlencode
from urllib2 import urlopen, Request
from urllib2 import HTTPError
from httplib import IncompleteRead
def get_cffex_daily(date = None):
"""
获取中金所日交易数据
Parameters
------
date: 日期 format:YYYY-MM-DD 或 YYYYMMDD 或 datetime.date对象 为空时为当天
Return
-------
DataFrame
中金所日交易数据(DataFrame):
symbol 合约代码
date 日期
open 开盘价
high 最高价
low 最低价
close 收盘价
pre_close 前收盘价
volume 成交量
open_interest 持仓量
turnover 成交额
settle 结算价
pre_settle 前结算价
variety 合约类别
或 None(给定日期没有交易数据)
"""
day = ct.convert_date(date) if date is not None else datetime.date.today()
try:
html = urlopen(Request(ct.CFFEX_DAILY_URL % (day.strftime('%Y%m'),
day.strftime('%d'), day.strftime('%Y%m%d')),
headers=ct.SIM_HAEDERS)).read().decode('gbk', 'ignore')
except HTTPError as reason:
if reason.code != 404:
print(ct.CFFEX_DAILY_URL % (day.strftime('%Y%m'), day.strftime('%d'),
day.strftime('%Y%m%d')), reason)
return
if html.find(u'网页错误') >= 0:
return
html = [i.replace(' ','').split(',') for i in html.split('\n')[:-2] if i[0][0] != u'小' ]
if html[0][0]!=u'合约代码':
return
dict_data = list()
day_const = day.strftime('%Y%m%d')
for row in html[1:]:
m = ct.FUTURE_SYMBOL_PATTERN.match(row[0])
if not m:
continue
row_dict = {'date': day_const, 'symbol': row[0], 'variety': m.group(1)}
for i,field in enumerate(ct.CFFEX_COLUMNS):
if row[i+1] == u"":
row_dict[field] = 0.0
else:
row_dict[field] = float(row[i+1])
row_dict[ct.OUTPUT_COLUMNS[ct.PRE_SETTLE_LOC]] = row_dict[ct.OUTPUT_COLUMNS[ct.CLOSE_LOC]] - row_dict['change1']
row_dict[ct.OUTPUT_COLUMNS[ct.PRE_CLOSE_LOC]] = row_dict[ct.OUTPUT_COLUMNS[ct.CLOSE_LOC]] - row_dict['change2']
dict_data.append(row_dict)
return pd.DataFrame(dict_data)[ct.OUTPUT_COLUMNS]
def get_czce_daily(date=None):
"""
获取郑商所日交易数据
Parameters
------
date: 日期 format:YYYY-MM-DD 或 YYYYMMDD 或 datetime.date对象 为空时为当天
Return
-------
DataFrame
郑商所日交易数据(DataFrame):
symbol 合约代码
date 日期
open 开盘价
high 最高价
low 最低价
close 收盘价
pre_close 前收盘价
volume 成交量
open_interest 持仓量
turnover 成交额
settle 结算价
pre_settle 前结算价
variety 合约类别
或 None(给定日期没有交易数据)
"""
day = ct.convert_date(date) if date is not None else datetime.date.today()
try:
html = urlopen(Request(ct.CZCE_DAILY_URL % (day.strftime('%Y'),
day.strftime('%Y%m%d')),
headers=ct.SIM_HAEDERS)).read().decode('gbk', 'ignore')
except HTTPError as reason:
if reason.code != 404:
print(ct.CZCE_DAILY_URL % (day.strftime('%Y'),
day.strftime('%Y%m%d')), reason)
return
if html.find(u'您的访问出错了') >= 0:
return
html = [i.replace(' ','').split('|') for i in html.split('\n')[:-4] if i[0][0] != u'小']
if html[1][0] != u'品种月份':
return
dict_data = list()
day_const = int(day.strftime('%Y%m%d'))
for ihtml in html[2:]:
m = ct.FUTURE_SYMBOL_PATTERN.match(ihtml[0])
if not m:
continue
row_dict = {'date': day_const, 'symbol': ihtml[0], 'variety': m.group(1)}
for i,field in enumerate(ct.CZCE_COLUMNS):
if ihtml[i+1] == "\r":
row_dict[field] = 0.0
else:
ihtml[i+1] = ihtml[i+1].replace(',','')
row_dict[field] = float(ihtml[i+1])
row_dict['pre_settle'] = row_dict['close'] - row_dict['change2']
dict_data.append(row_dict)
return pd.DataFrame(dict_data)[ct.OUTPUT_COLUMNS]
def get_shfe_vwap(date = None):
"""
获取上期所日成交均价数据
Parameters
------
date: 日期 format:YYYY-MM-DD 或 YYYYMMDD 或 datetime.date对象 为空时为当天
Return
-------
DataFrame
郑商所日交易数据(DataFrame):
symbol 合约代码
date 日期
time_range vwap时段,分09:00-10:15和09:00-15:00两类
vwap 加权平均成交均价
或 None(给定日期没有数据)
"""
day = ct.convert_date(date) if date is not None else datetime.date.today()
try:
json_data = json.loads(urlopen(Request(ct.SHFE_VWAP_URL % (day.strftime('%Y%m%d')),
headers=ct.SIM_HAEDERS)).read().decode('utf8'))
except HTTPError as reason:
if reason.code != 404:
print(ct.SHFE_DAILY_URL % (day.strftime('%Y%m%d')), reason)
return
if len(json_data['o_currefprice']) == 0:
return
df = | pd.DataFrame(json_data['o_currefprice']) | pandas.DataFrame |
import math
import pandas as pd
import csv
import pathlib
import wx
import matplotlib
import matplotlib.pylab as pL
import matplotlib.pyplot as plt
import matplotlib.backends.backend_wxagg as wxagg
import re
import numpy as np
import scipy
import scipy.interpolate
import sys
#from mpl_toolkits.mplot3d import Axes3D
#import wx.lib.inspection as wxli
class ERTAPP(wx.Frame):
def __init__(self):
wx.Frame.__init__(self, None, wx.ID_ANY, title='ERT Editing',pos=(100,100),size=(500,500))
#Built from template here: https://wiki.wxpython.org/GridSizerTutorial
#Set up Panels
def setUpPanels(self):
self.topPanel = wx.Panel(self, wx.ID_ANY,size = (1000,10),name='Top Panel')
self.infoPanel = wx.Panel(self, wx.ID_ANY,size = (1000,50),name='Info Panel')
self.chartPanel = wx.Panel(self, wx.ID_ANY,size = (1000,500),name='Chart Panel')
self.bottomPanel= wx.Panel(self, wx.ID_ANY,size = (1000,130),name='Bottom Panel')
#need to create more panels, see here: https://stackoverflow.com/questions/31286082/matplotlib-in-wxpython-with-multiple-panels
def titleSetup(self):
bmp = wx.ArtProvider.GetBitmap(wx.ART_INFORMATION, wx.ART_OTHER, (4, 4))
self.titleIco = wx.StaticBitmap(self.topPanel, wx.ID_ANY, bmp)
self.title = wx.StaticText(self.topPanel, wx.ID_ANY, 'Advanced ERT Editing')
#Declare inputs for first row
def inputSetup(self):
bmp = wx.ArtProvider.GetBitmap(wx.ART_TIP, wx.ART_OTHER, (4, 4))
self.inputOneIco = wx.StaticBitmap(self.topPanel, wx.ID_ANY, bmp)
self.labelOne = wx.StaticText(self.topPanel, wx.ID_ANY, 'Input ERT Data')
self.inputTxtOne = wx.TextCtrl(self.topPanel, wx.ID_ANY, '')
self.inputTxtOne.SetHint('Enter data file path here')
self.inputBrowseBtn = wx.Button(self.topPanel, wx.ID_ANY, 'Browse')
self.Bind(wx.EVT_BUTTON, self.onBrowse, self.inputBrowseBtn)
self.readInFileBtn = wx.Button(self.topPanel, wx.ID_ANY, 'Read Data')
self.Bind(wx.EVT_BUTTON, self.onReadIn, self.readInFileBtn)
self.inputDataType = wx.Choice(self.topPanel, id=wx.ID_ANY,choices=['.DAT (LS)','.TXT (LS)','.DAT (SAS)', '.VTK', '.XYZ'],name='.TXT (LS)')
self.Bind(wx.EVT_CHOICE,self.onDataType,self.inputDataType)
self.autoShiftBx = wx.CheckBox(self.topPanel,wx.ID_ANY, 'Auto Shift?')
self.autoShiftBx.SetValue(True)
#Row 3 item(s)
self.TxtProfileName = wx.StaticText(self.infoPanel, wx.ID_ANY, 'Profile Name: ')
self.TxtProfileRange = wx.StaticText(self.infoPanel, wx.ID_ANY, 'Profile Length: ')
self.TxtDataPts = wx.StaticText(self.infoPanel, wx.ID_ANY, 'Data Points: ')
self.TxtBlank = wx.StaticText(self.infoPanel, wx.ID_ANY, '')
self.TxtBlank2 = wx.StaticText(self.infoPanel, wx.ID_ANY, '')
self.TxtMinElectSpcng = wx.StaticText(self.infoPanel, wx.ID_ANY, 'Min. Electrode Spacing: ')
self.TxtProjectName = wx.StaticText(self.infoPanel, wx.ID_ANY, 'Project Name: ')
self.TxtArray = wx.StaticText(self.infoPanel, wx.ID_ANY, 'Array: ')
self.msgProfileName = wx.StaticText(self.infoPanel, wx.ID_ANY, '')
self.msgProfileRange = wx.StaticText(self.infoPanel, wx.ID_ANY, '')
self.msgDataPts = wx.StaticText(self.infoPanel, wx.ID_ANY, '')
self.msgMinElectSpcng = wx.StaticText(self.infoPanel, wx.ID_ANY, '')
self.msgProjectName = wx.StaticText(self.infoPanel, wx.ID_ANY, '')
self.msgArray = wx.StaticText(self.infoPanel, wx.ID_ANY, '')
# DataViz Area item(s)
def dataVizSetup(self):
self.editSlider = wx.Slider(self.chartPanel, pos=(200,0), id=wx.ID_ANY, style=wx.SL_TOP | wx.SL_AUTOTICKS | wx.SL_LABELS, name='Edit Data')
self.Bind(wx.EVT_SCROLL, self.onSliderEditEVENT, self.editSlider)
self.dataVizMsg1 = wx.StaticText(self.chartPanel, wx.ID_ANY, '')
self.dataVizMsg2 = wx.StaticText(self.chartPanel, wx.ID_ANY, '')
self.dataVizInput = wx.TextCtrl(self.chartPanel, wx.ID_ANY, '')
self.dataVizInputBtn = wx.Button(self.chartPanel, -1, "Use Value")
self.dataVizInputBtn.Bind(wx.EVT_BUTTON, self.ONdataVizInput)
self.saveEditsBtn = wx.Button(self.chartPanel, -1, "Save Edits")
self.saveEditsBtn.Bind(wx.EVT_BUTTON, self.ONSaveEdits)
self.saveEditsBtn.SetBackgroundColour((100,175,100))
self.currentChart = 'Graph'
self.editDataChoiceList = ['AppResist','Resistance','Electrode x-Dists','Variance','PctErr','PseudoX','PseudoZ']
self.editDataChoiceBool = [False]*len(self.editDataChoiceList)
self.editDataValues = []
for i in self.editDataChoiceList:
self.editDataValues.append([0,0])
self.editDataType = wx.Choice(self.chartPanel, id=wx.ID_ANY,choices=self.editDataChoiceList,name='Edit Data')
self.editDataType.Bind(wx.EVT_CHOICE, self.onSelectEditDataType)
self.setEditToggleBtn = wx.ToggleButton(self.chartPanel,wx.ID_ANY,'Unused',size=(25,30))
self.setEditToggleBtn.Bind(wx.EVT_TOGGLEBUTTON, self.onSetEditToggle)
self.labelMinRem = wx.StaticText(self.chartPanel, wx.ID_ANY, 'Min.')
self.inputTxtMinRem = wx.TextCtrl(self.chartPanel, wx.ID_ANY,style=wx.TE_PROCESS_ENTER, name='')
self.inputTxtMinRem.Bind(wx.EVT_TEXT_ENTER, self.onEditDataValueChangeEvent)
self.labelMaxRem = wx.StaticText(self.chartPanel, wx.ID_ANY,'Max.')
self.inputTxtMaxRem = wx.TextCtrl(self.chartPanel, wx.ID_ANY,style=wx.TE_PROCESS_ENTER,name= '')
self.inputTxtMaxRem.Bind(wx.EVT_TEXT_ENTER, self.onEditDataValueChangeEvent)
self.editTypeToggleBtn = wx.ToggleButton(self.chartPanel,wx.ID_ANY,'Remove',size=(25,50))
self.editTypeToggleBtn.Bind(wx.EVT_TOGGLEBUTTON, self.onEditTypeToggle)
self.editLogicToggleBtn = wx.ToggleButton(self.chartPanel,wx.ID_ANY,'OR',size=(25,25))
self.editLogicToggleBtn.Bind(wx.EVT_TOGGLEBUTTON, self.onLogicToggle)
self.removePtsBtn = wx.Button(self.chartPanel, -1, "Edit Points")
self.removePtsBtn.Bind(wx.EVT_BUTTON, self.onRemovePts)
self.electrodeToggleBtn = wx.ToggleButton(self.chartPanel,wx.ID_ANY,'On',size=(25,25))
self.electrodeToggleBtn.Bind(wx.EVT_TOGGLEBUTTON, self.ONtoggle)
self.GraphEditBtn = wx.Button(self.chartPanel, -1, "Graphic Editor", size=(100, 30))
self.GraphEditBtn.Bind(wx.EVT_BUTTON, self.graphChartEvent)
self.StatEditBtn = wx.Button(self.chartPanel, -1, "Statistical Editor", size=(100, 30))
self.Bind(wx.EVT_BUTTON, self.statChartEvent, self.StatEditBtn)
self.addGPSBtn = wx.Button(self.chartPanel, -1, "GPS Data", size=(100, 30))
self.addGPSBtn.Bind(wx.EVT_BUTTON, self.GPSChartEvent)
self.addTopoBtn = wx.Button(self.chartPanel, -1, "Topography Data", size=(100, 30))
self.addTopoBtn.Bind(wx.EVT_BUTTON, self.topoChartEvent)
self.reviewBtn = wx.Button(self.chartPanel, -1, "Review Edits", size=(100, 15))
self.reviewBtn.Bind(wx.EVT_BUTTON, self.reviewEvent)
def bottomAreaSetup(self):
# Row 4 items
self.reverseBx = wx.CheckBox(self.bottomPanel,wx.ID_ANY, 'Reverse Profile')
self.labelGPSIN = wx.StaticText(self.bottomPanel, wx.ID_ANY, 'GPS Data')
self.inputTxtGPS = wx.TextCtrl(self.bottomPanel, wx.ID_ANY, 'Enter GPS Filepath Here')
self.inputGPSBtn = wx.Button(self.bottomPanel, wx.ID_ANY, 'Browse')
self.Bind(wx.EVT_BUTTON, self.onGPSBrowse, self.inputGPSBtn)
self.Bind(wx.EVT_CHECKBOX, self.onReverse, self.reverseBx)
self.dataEditMsg = wx.StaticText(self.bottomPanel, wx.ID_ANY, '')
self.labelTopoIN = wx.StaticText(self.bottomPanel, wx.ID_ANY, 'Topo Data')
self.inputTxtTopo = wx.TextCtrl(self.bottomPanel, wx.ID_ANY, 'Enter Topo Filepath Here')
self.inputTopoBtn = wx.Button(self.bottomPanel, wx.ID_ANY, 'Browse')
self.includeTopoBx = wx.CheckBox(self.bottomPanel,wx.ID_ANY, 'Include Topography')
self.Bind(wx.EVT_BUTTON, self.onTopoBrowse, self.inputTopoBtn)
self.Bind(wx.EVT_CHECKBOX, self.onIncludeTopo, self.includeTopoBx)
#Bottom Row items
self.saveBtn = wx.Button(self.bottomPanel, wx.ID_ANY, 'Export and Save Data')
self.cancelBtn = wx.Button(self.bottomPanel, wx.ID_ANY, 'Cancel')
self.Bind(wx.EVT_BUTTON, self.onExport, self.saveBtn)
self.Bind(wx.EVT_BUTTON, self.onCancel, self.cancelBtn)
self.labelExport = wx.StaticText(self.bottomPanel, wx.ID_ANY, 'Export Data')
self.exportTXT = wx.TextCtrl(self.bottomPanel, wx.ID_ANY, 'Enter Export Filepath Here')
self.exportDataBtn = wx.Button(self.bottomPanel, wx.ID_ANY, 'Browse')
self.Bind(wx.EVT_BUTTON, self.onExportBrowse, self.exportDataBtn)
#Set up chart
def chartSetup(self):
self.chartSizer = wx.BoxSizer(wx.VERTICAL)
self.figure = matplotlib.figure.Figure()
self.canvas = wxagg.FigureCanvasWxAgg(self.chartPanel, -1, self.figure)
self.axes = self.figure.add_subplot(111)
self.axes.set_xlabel('X-Distance (m)')
self.axes.set_ylabel('Depth (m)')
self.toolbar = wxagg.NavigationToolbar2WxAgg(self.canvas)
def sizersSetup(self):
#Set up sizers
self.baseSizer = wx.BoxSizer(wx.VERTICAL)
self.topSizer = wx.BoxSizer(wx.VERTICAL)
self.titleSizer = wx.BoxSizer(wx.HORIZONTAL)
self.inputSizer = wx.BoxSizer(wx.HORIZONTAL)
#self.readMsgSizer = wx.BoxSizer(wx.HORIZONTAL)
self.profileInfoSizer = wx.BoxSizer(wx.HORIZONTAL)
self.profileTxtSizer1 = wx.BoxSizer(wx.VERTICAL)
self.profileTxtSizer2 = wx.BoxSizer(wx.VERTICAL)
self.profileMsgSizer1 = wx.BoxSizer(wx.VERTICAL)
self.profileMsgSizer2 = wx.BoxSizer(wx.VERTICAL)
self.profileInfoSizer = wx.BoxSizer(wx.HORIZONTAL)
self.ctrlSizer = wx.BoxSizer(wx.VERTICAL)
self.chartSizer = wx.BoxSizer(wx.VERTICAL)
self.dataVizSizer = wx.BoxSizer(wx.HORIZONTAL)
self.vizInfoSizer = wx.BoxSizer(wx.HORIZONTAL)
self.dataEditSizer = wx.BoxSizer(wx.HORIZONTAL)
self.bottomSizer = wx.BoxSizer(wx.VERTICAL)
self.GPSSizer = wx.BoxSizer(wx.HORIZONTAL)
self.TopoSizer = wx.BoxSizer(wx.HORIZONTAL)
self.botSizer = wx.BoxSizer(wx.HORIZONTAL)
def addtoSizers(self):
#Add items to sizers
self.titleSizer.Add(self.title, 0, wx.ALIGN_CENTER)
self.inputSizer.Add(self.labelOne, 1,wx.ALIGN_CENTER,5)
self.inputSizer.Add(self.inputTxtOne, 8,wx.EXPAND,5)
self.inputSizer.Add(self.inputBrowseBtn,1,wx.ALIGN_CENTER,5)
self.inputSizer.Add(self.inputDataType,1,wx.ALIGN_CENTER,5)
self.inputSizer.Add(self.readInFileBtn,1,wx.ALIGN_CENTER,5)
self.inputSizer.Add(self.autoShiftBx, 1, wx.ALIGN_CENTER, 5)
#self.readMsgSizer.Add(self.msgLabelOne, 0, wx.ALL,5)
self.profileTxtSizer1.Add(self.TxtProfileName, 0, wx.ALIGN_LEFT,5)
self.profileTxtSizer1.Add(self.TxtProfileRange, 0, wx.ALIGN_LEFT,5)
self.profileTxtSizer1.Add(self.TxtDataPts, 0, wx.ALIGN_LEFT,5)
self.profileTxtSizer2.Add(self.TxtMinElectSpcng, 0, wx.ALIGN_LEFT,5)
self.profileTxtSizer2.Add(self.TxtArray, 0, wx.ALIGN_LEFT,5)
self.profileTxtSizer2.Add(self.TxtProjectName, 0, wx.ALIGN_LEFT,5)
self.profileMsgSizer1.Add(self.msgProfileName, 0, wx.ALIGN_LEFT,5)
self.profileMsgSizer1.Add(self.msgProfileRange, 0, wx.ALIGN_LEFT,5)
self.profileMsgSizer1.Add(self.msgDataPts, 0, wx.ALIGN_LEFT,5)
self.profileMsgSizer2.Add(self.msgMinElectSpcng, 0, wx.ALIGN_LEFT,5)
self.profileMsgSizer2.Add(self.msgArray, 0, wx.ALIGN_LEFT,5)
self.profileMsgSizer2.Add(self.msgProjectName, 0, wx.ALIGN_LEFT,5)
self.profileInfoSizer.Add(self.profileTxtSizer1, 1,wx.ALL,5)
self.profileInfoSizer.Add(self.profileMsgSizer1,3,wx.ALL,5)
self.profileInfoSizer.Add(self.profileTxtSizer2, 1, wx.ALL, 5)
self.profileInfoSizer.Add(self.profileMsgSizer2, 3, wx.ALL, 5)
self.topSizer.Add(self.titleSizer,1,wx.ALL,5)
self.topSizer.Add(self.inputSizer, 2, wx.ALL, 5)
#self.topSizer.Add(self.readMsgSizer, 1, wx.ALL, 5)
self.vizInfoSizer.Add(self.dataVizMsg1,16,wx.ALL,5)
self.vizInfoSizer.Add(self.dataVizMsg2, 24, wx.ALL, 5)
self.vizInfoSizer.Add(self.electrodeToggleBtn,1,wx.ALL,5)
self.vizInfoSizer.Add(self.dataVizInput, 1, wx.ALL, 5)
self.vizInfoSizer.Add(self.dataVizInputBtn,3,wx.ALL,5)
self.vizInfoSizer.Add(self.saveEditsBtn,3,wx.ALL,5)
self.ctrlSizer.Add(self.GraphEditBtn, 2, wx.ALL, 5)
self.ctrlSizer.Add(self.StatEditBtn, 2, wx.ALL, 5)
self.ctrlSizer.Add(self.addGPSBtn, 2, wx.ALL, 5)
self.ctrlSizer.Add(self.addTopoBtn, 2, wx.ALL, 5)
self.ctrlSizer.Add(self.reviewBtn,1,wx.ALL,5)
self.dataEditSizer.Add(self.editDataType,5, wx.ALL, 5)
self.dataEditSizer.Add(self.setEditToggleBtn,2,wx.ALL,5)
self.dataEditSizer.Add(self.labelMinRem, 2, wx.ALL, 5)
self.dataEditSizer.Add(self.inputTxtMinRem, 3, wx.ALL, 5)
self.dataEditSizer.Add(self.inputTxtMaxRem, 3, wx.ALL, 5)
self.dataEditSizer.Add(self.labelMaxRem, 2, wx.ALL, 5)
self.dataEditSizer.Add(self.editTypeToggleBtn,3,wx.ALL,5)
self.dataEditSizer.Add(self.editLogicToggleBtn,2,wx.ALL,5)
self.dataEditSizer.Add(self.removePtsBtn, 3, wx.ALL, 5)
self.chartSizer.Add(self.vizInfoSizer, 1, wx.ALL, 5)
self.chartSizer.Add(self.editSlider,1, wx.LEFT | wx.RIGHT | wx.EXPAND,94)
self.chartSizer.Add(self.canvas, 12, wx.EXPAND)
self.chartSizer.Add(self.toolbar, 1, wx.EXPAND)
self.chartSizer.Add(self.dataEditSizer,1,wx.EXPAND)
self.dataVizSizer.Add(self.ctrlSizer,1,wx.EXPAND)
self.dataVizSizer.Add(self.chartSizer,6,wx.EXPAND)
self.GPSSizer.Add(self.dataEditMsg, 2, wx.ALL, 5)
self.GPSSizer.Add(self.reverseBx, 1, wx.ALL, 5)
self.GPSSizer.Add(self.labelGPSIN, 1, wx.ALIGN_CENTER_VERTICAL | wx.ALL, 5)
self.GPSSizer.Add(self.inputTxtGPS, 8, wx.ALIGN_CENTER_VERTICAL | wx.ALL, 5)
self.GPSSizer.Add(self.inputGPSBtn, 1, wx.ALIGN_CENTER_VERTICAL | wx.ALL, 5)
self.TopoSizer.Add(self.includeTopoBx, 2, wx.ALIGN_CENTER_VERTICAL | wx.ALL, 5)
self.TopoSizer.Add(self.labelTopoIN, 1, wx.ALIGN_CENTER_VERTICAL| wx.ALL, 5)
self.TopoSizer.Add(self.inputTxtTopo, 8, wx.ALIGN_CENTER_VERTICAL| wx.ALL, 5)
self.TopoSizer.Add(self.inputTopoBtn, 1, wx.ALIGN_CENTER_VERTICAL| wx.ALL, 5)
self.botSizer.Add(self.labelExport, 1, wx.ALL, 5)
self.botSizer.Add(self.exportTXT,6, wx.ALL, 5)
self.botSizer.Add(self.exportDataBtn,1, wx.ALL, 5)
self.botSizer.Add(self.cancelBtn, 1, wx.ALL, 5)
self.botSizer.Add(self.saveBtn, 1, wx.ALL, 5)
#btnSizer.Add(saveEditsBtn,0,wx.ALL,5)
self.bottomSizer.Add(self.GPSSizer,0, wx.ALIGN_RIGHT | wx.ALL, 5)
self.bottomSizer.Add(self.TopoSizer,0, wx.ALIGN_RIGHT | wx.ALL, 5)
self.bottomSizer.Add(self.botSizer,0, wx.ALIGN_RIGHT | wx.ALL, 5)
def addtoPanels(self):
self.topPanel.SetSizer(self.topSizer)
self.infoPanel.SetSizer(self.profileInfoSizer)
self.chartPanel.SetSizer(self.dataVizSizer)
self.bottomPanel.SetSizer(self.bottomSizer)
self.topPanel.Layout()
self.baseSizer.Add(self.topPanel,1, wx.EXPAND,1)
self.baseSizer.Add(self.infoPanel,1,wx.EXPAND,1)
self.baseSizer.Add(self.chartPanel, 10, wx.EXPAND | wx.ALL, 5)
self.baseSizer.Add(self.bottomPanel, 1, wx.EXPAND | wx.ALL, 1)
self.SetSizer(self.baseSizer)
self.SetSize(1100,950)
def variableInfo(): #To see what the 'global' variables are
pass
#self.electxDataIN: list of all electrode xdistances
#self.xCols: list with numbers of columns with x-values, from initial read-in table. varies with datatype
#self.xData: list with all x-values of data points
#self.zData: list with all z-values of data points (depth)
#self.values: list with all resist. values of data points
#self.inputDataExt: extension of file read in, selected from initial drop-down (default = .dat (LS))
#self.xDF : dataframe with only x-dist of electrodes, and all of them
#self.dataHeaders: headers from original file read in, used for column names for dataframeIN
#self.dataListIN: nested list that will be used to create dataframe, with all read-in data
#self.dataframeIN: initial dataframe from data that is read in
#self.df: dataframe formatted for editing, but remaining static as initial input data
#self.dataframeEDIT: dataframe that is manipulated during editing
#self.electrodes: sorted list of all electrode xdistances
#self.electrodesShifted: shifted, sorted list of all electrode xdistances
#self.electState:list of booleans giving status of electrode (True = in use, False = edited out)
#self.electrodeElevs: surface elevation values at each electrode
#self.dataLengthIN: number of measurements in file/length of dataframes
#self.dataframeEDITColHeaders
#self.dataShifted: indicates whether data has been shifted
setUpPanels(self)
titleSetup(self)
inputSetup(self)
dataVizSetup(self)
bottomAreaSetup(self)
chartSetup(self)
sizersSetup(self)
addtoSizers(self)
addtoPanels(self)
#wxli.InspectionTool().Show(self)
#Initial Plot
def nullFunction(self,event):
pass
def onBrowse(self,event):
with wx.FileDialog(self,"Open Data File", style= wx.FD_OPEN | wx.FD_FILE_MUST_EXIST) as fileDialog:
if fileDialog.ShowModal() == wx.ID_CANCEL:
return
self.dataPath = pathlib.Path(fileDialog.GetPath())
fName = str(self.dataPath.parent) + '\\' + self.dataPath.name
self.inputDataExt = self.dataPath.suffix
try:
with open(self.dataPath,'r') as datafile:
self.inputTxtOne.SetValue(fName)
except IOError:
wx.LogError("Cannot Open File")
if self.inputDataExt.lower() == '.txt':
self.inputDataExt = '.TXT (LS)'
n = 1
elif self.inputDataExt.lower() == '.dat':
if self.dataPath.stem.startswith('lr'):
self.inputDataExt = '.DAT (SAS)'
n = 2
else:
self.inputDataExt = '.DAT (LS)'
n = 0
elif self.inputDataExt.lower() == '.vtk':
self.inputDataExt = '.VTK'
n=3
elif self.inputDataExt.lower() == '.xyz':
self.inputDataExt = '.XYZ'
n=4
else:
wx.LogError("Cannot Open File")
if self.inputDataExt == '.DAT (LS)' or self.inputDataExt == '.TXT (LS)':
outPath = self.dataPath.stem.split('-')[0]
else:
outPath = self.dataPath.stem.split('.')[0]
if outPath.startswith('lr'):
outPath = outPath[2:]
outPath = outPath +'_pyEdit.dat'
if self.includeTopoBx.GetValue():
outPath = outPath[:-4]
outPath = outPath + "_topo.dat"
self.exportTXT.SetValue(str(self.dataPath.with_name(outPath)))
self.inputDataType.SetSelection(n)
self.readInFileBtn.SetLabelText('Read Data')
def onGPSBrowse(self,event):
with wx.FileDialog(self,"Open GPS File", style= wx.FD_OPEN | wx.FD_FILE_MUST_EXIST) as fileDialog:
if fileDialog.ShowModal() == wx.ID_CANCEL:
return
self.GPSpath = pathlib.Path(fileDialog.GetPath())
gpsFName = str(self.GPSpath.parent) + '\\' + self.GPSpath.name
self.inputTxtGPS.SetValue(gpsFName)
self.getGPSVals()
def getGPSVals(self):
with open(self.GPSpath) as GPSFile:
data = csv.reader(GPSFile)
self.gpsXData = []
self.gpsYData = []
self.gpsLabels = []
for row in enumerate(data):
if row[0] == 0:
pass #headerline
else:
r = re.split('\t+', str(row[1][0]))
if row[0] == '':
pass
else:
self.gpsLabels.append(r[2])
self.gpsXData.append(float(r[3]))
self.gpsYData.append(float(r[4]))
def onTopoBrowse(self,event):
with wx.FileDialog(self,"Open Topo File", style= wx.FD_OPEN | wx.FD_FILE_MUST_EXIST) as fileDialog:
if fileDialog.ShowModal() == wx.ID_CANCEL:
return
self.topoPath = pathlib.Path(fileDialog.GetPath())
topoFName = str(self.topoPath.parent) + '\\' + self.topoPath.name
self.inputTxtTopo.SetValue(topoFName)
self.includeTopoBx.SetValue(True)
self.getTopoVals()
self.topoText()
def onIncludeTopo(self,event):
self.topoText()
def topoText(self):
if self.includeTopoBx.GetValue() == True:
#print('topo' not in self.exportTXT.GetValue())
if 'topo' not in self.exportTXT.GetValue():
#print("It's Not in")
if len(self.exportTXT.GetValue())>0:
outPath = self.exportTXT.GetValue()
outPath = outPath[:int(len(outPath)-4)]
outPath = outPath + "_topo.dat"
self.exportTXT.SetValue(outPath)
elif self.includeTopoBx.GetValue() == False:
if '_topo' in self.exportTXT.GetValue():
outPath = self.exportTXT.GetValue()
#print(outPath)
strInd = int(outPath.find("_topo"))
strInd2 = strInd + 5
outPath = outPath[:strInd]+outPath[strInd2:]
self.exportTXT.SetValue(outPath)
def onReverse(self,event):
self.reverseText()
def reverseText(self):
if self.reverseBx.GetValue() == True:
if '_rev' not in self.exportTXT.GetValue():
if len(self.exportTXT.GetValue())>0:
outPath = self.exportTXT.GetValue()
outPath = outPath[:int(len(outPath)-4)]
outPath = outPath + "_rev.dat"
self.exportTXT.SetValue(outPath)
elif self.reverseBx.GetValue() == False:
if '_rev' in self.exportTXT.GetValue():
outPath = self.exportTXT.GetValue()
#print(outPath)
strInd = int(outPath.find("_rev"))
strInd2 = strInd + 4
outPath = outPath[:strInd]+outPath[strInd2:]
self.exportTXT.SetValue(outPath)
def getTopoVals(self):
with open(self.topoPath) as topoFile:
data = csv.reader(topoFile)
topoXData = []
topoYData = []
topoLabels = []
for row in enumerate(data):
if row[0] == 0:
pass
else:
r = re.split('\t+', str(row[1][0]))
if r[0] == '':
pass
else:
topoLabels.append(r[0])
topoXData.append(float(r[1]))
topoYData.append(float(r[2]))
self.topoDF = pd.DataFrame([topoXData, topoYData]).transpose()
self.topoDF.columns = ["xDist", "Elev"]
def onDataType(self,event):
self.inputDataExt = self.inputDataType.GetString(self.inputDataType.GetSelection())
if self.inputDataExt == '.DAT (LS)':
self.headerlines = 8
elif self.inputDataExt == '.DAT (SAS)':
self.headerlines = 5
elif self.inputDataExt == '.VTK':
self.headerlines = 5 #!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
elif self.inputDataExt == '.XYZ':
self.header = 5 #!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
elif self.inputDataExt =='':
self.headerlines = 8
else:
if len(self.inputTxtOne.GetValue()) > 0:
try:
with open(self.dataPath, 'r') as datafile:
filereader = csv.reader(datafile)
start = 0
for row in enumerate(filereader):
if start == 0:
if 'N\\tTime' in str(row[1]):
start = 1
self.headerlines = row[0]
else:
continue
else:
continue
except:
self.headerlines = -1
wx.LogError('Data File not selected')
else:
self.headerlines = -1
def onReadIn(self, event):
self.onDataType(self) #initialize number of headerlines to use
self.dataHeader = []
filepath = pathlib.Path(self.inputTxtOne.GetValue())
self.ext = str(filepath.suffix)
filename = str(filepath.stem)
self.dataframeEDITColHeaders = ['MeasID','A(x)','A(z)','B(x)','B(z)','M(x)','M(z)','N(x)','N(z)', 'aVal', 'nFac','PseudoX','PseudoZ','Resistance','AppResist','Cycles','Variance','DataLevel','DtLvlMean','PctErr','Keep']
if self.ext.lower() == '.dat':
###############Need to update to fit .txt data format
dataLst = []
self.dataLead = []
self.dataTail = []
with open(filepath) as dataFile:
data = csv.reader(dataFile)
if self.inputDataExt == '.DAT (SAS)':
self.dataHeaders = ['M(x)','aVal','nFac','AppResist']
i = 0
dataList=[]
for row in enumerate(data):
if row[0]>self.headerlines: #Read in actual data
if row[0] > self.headerlines + datalength: #Read in data tail
self.dataTail.append(row[1])
else:
#It sometimes reads the lines differently. Sometimes as a list (as it should) other times as a long string
if len(row[1]) < 4:
#Entire row is read as string
dataList.append(re.split(' +', row[1][0]))
else:
#Row is read correctly as separate columns
dataList.append(row[1])
i+=1
else:
if row[0] == 3: #Read in data length
datalength = float(row[1][0])
self.dataLead.append(row[1])#Create data lead variable for later use
datalengthIN = i
self.fileHeaderDict = {}
self.dataListIN = dataList #Formatted global nested list is created of data read in
project = self.dataLead[0][0]
array = self.dataLead[2][0]
if float(array) == 3:
array = "Dipole-Dipole"
msrmtType = 'Apparent Resistivity'
self.fileHeaderDict['Filename'] = filename
self.fileHeaderDict['Project'] = project
self.fileHeaderDict['minElectSpcng'] = round(float(self.dataLead[1][0]),2)
self.fileHeaderDict['Array'] = array
self.fileHeaderDict["Type of Measurement"] = msrmtType
self.fileHeaderDict['DataPts'] = self.dataLead[3][0]
self.dataframeIN = pd.DataFrame(self.dataListIN)
#Sometimes the data is read in with an extra column at the beginning. This fixes that.
if len(self.dataframeIN.columns) > 4:
del self.dataframeIN[0]
self.dataframeIN.reindex([0, 1, 2, 3], axis='columns')
self.dataframeIN = self.dataframeIN.astype(float)
self.dataframeIN.columns = self.dataHeaders
self.dataframeCols = [-2, -3, -4, -5, -6, 0, -7, -8, -9, 1, 2, -10, -11, -12, 3, -1, -1, -13, -14, -15,-16] # neg val ind. colums that need to be calculated
self.dataframeEDIT = pd.DataFrame()
dataframelength = len(self.dataframeIN.index)
nullList = []
keepList = []
zeroList = []
for i in range(0, dataframelength):
nullList.append(-1)
zeroList.append(0.0)
keepList.append(True)
# Create dataframe that will be used in editing process (self.dataframeEDIT) one column at a time
for item in enumerate(self.dataframeEDITColHeaders):
if self.dataframeCols[
item[0]] > -1: # Columns from dataframeIN that are directly read to dataframeEDIT
self.dataframeEDIT[item[1]] = self.dataframeIN.iloc[:, self.dataframeCols[item[0]]]
self.dataframeEDIT[item[1]] = self.dataframeEDIT[item[1]].astype(float)
elif self.dataframeCols[item[0]] == -1: # Null list (can't calculate)
self.dataframeEDIT[item[1]] = nullList
elif self.dataframeCols[item[0]] == -2: # Measure ID
for i in range(0, dataframelength):
self.dataframeEDIT.loc[i, item[1]] = i
elif self.dataframeCols[item[0]] == -3: # A(x)
self.dataframeIN['A(x)'] = self.dataframeIN['M(x)'] + self.dataframeIN['aVal'] + (self.dataframeIN['aVal']*self.dataframeIN['nFac']) + self.dataframeIN['aVal']
self.dataframeEDIT['A(x)'] = self.dataframeIN['A(x)']
elif self.dataframeCols[item[0]] == -4: # A(z)
self.dataframeEDIT[item[1]] = zeroList
elif self.dataframeCols[item[0]] == -5: # B(x)
self.dataframeIN['B(x)'] = self.dataframeIN['M(x)'] + self.dataframeIN['aVal'] + (self.dataframeIN['aVal']*self.dataframeIN['nFac'])
self.dataframeEDIT['B(x)'] = self.dataframeIN['B(x)']
elif self.dataframeCols[item[0]] == -6: # B(z)
self.dataframeEDIT[item[1]] = zeroList
#elif self.dataframeCols[item[0]] == -6: # M(x)
#Reads in directly
elif self.dataframeCols[item[0]] == -7: # M(z)
self.dataframeEDIT[item[1]] = zeroList
elif self.dataframeCols[item[0]] == -8: #N(x)
self.dataframeIN['N(x)'] = self.dataframeIN['M(x)'] + self.dataframeIN['aVal']
self.dataframeEDIT['N(x)'] = self.dataframeIN['N(x)']
elif self.dataframeCols[item[0]] == -9: # N(z)
self.dataframeEDIT[item[1]] = zeroList
elif self.dataframeCols[item[0]] == -10: # PseudoX
self.dataframeEDIT['PseudoX'] = (((self.dataframeEDIT['A(x)'] + self.dataframeEDIT[
'B(x)']) / 2) + ((self.dataframeEDIT['M(x)'] + self.dataframeEDIT['N(x)']) / 2)) / 2
elif self.dataframeCols[item[0]] == -11: # PseudoZ
n = self.dataframeEDIT['nFac']
a = self.dataframeEDIT['aVal']
self.dataframeEDIT['PseudoZ'] = round((((n ** 2) * -0.0018) + 0.2752 * n + 0.1483) * a, 1)
elif self.dataframeCols[item[0]] == -12: #Resistance
PI = math.pi
n = self.dataframeEDIT['nFac']
a = self.dataframeEDIT['aVal']
appR = self.dataframeIN['AppResist']
if self.fileHeaderDict['Array'] == 'Dipole-Dipole':
self.dataframeEDIT['Resistance'] = appR/(PI * n * (n + 1) * (n + 2) * a)
else:
print(
'Array is not Dipole-Dipole, but Dipole-Dipole k-factor used to calculate App. Resistivity')
elif self.dataframeCols[item[0]] == -13: #DataLevel
self.dataframeEDIT['DataLevel'] = nullList
uniqueDepths = self.dataframeEDIT['PseudoZ'].unique()
uniqueDepths = list(set(uniqueDepths.flatten()))
self.dataLevels = len(uniqueDepths)
dataLength = len(self.dataframeEDIT['PseudoZ'])
for i in range(0, dataLength):
self.dataframeEDIT.loc[i, 'DataLevel'] = uniqueDepths.index(
self.dataframeEDIT.loc[i, 'PseudoZ'])
elif self.dataframeCols[item[0]] == -14: #DtLvlMean
for i in uniqueDepths:
df = self.dataframeEDIT[self.dataframeEDIT.iloc[:, 12] == i]
dtLvlMean = df['AppResist'].mean()
indexList = df.index.values.tolist()
for ind in indexList:
self.dataframeEDIT.loc[ind, 'DtLvlMean'] = dtLvlMean
elif self.dataframeCols[item[0]] == -15: #PctErr
self.dataframeEDIT['PctErr'] = (abs(
self.dataframeEDIT['DtLvlMean'] - self.dataframeEDIT['AppResist'])) / \
self.dataframeEDIT['DtLvlMean']
elif self.dataframeCols[item[0]] == -16: #Keep
self.dataframeEDIT[item[1]] = keepList
else:
self.dataframeEDIT[item[1]] = nullList
elif self.inputDataExt == '.DAT (LS)': # If it's .DAT (LS)
self.dataHeaders = ["NoElectrodes",'A(x)', 'A(z)', 'B(x)', 'B(z)', 'M(x)', 'M(z)', 'N(x)', 'N(z)', 'Resistance']
datalength=12
dataList = []
for row in enumerate(data):
if row[0]>int(self.headerlines) and row[0] <= float(self.headerlines + datalength):
strrow = str(row[1])
strrow = strrow[2:-2]
splitrow = strrow.split('\\t')
if len(splitrow) != 10:
newrow = []
for i in splitrow:
val = i.strip()
newrow.append(val)
if len(newrow) < 9:
newrow = re.split(' +',newrow[0])
row = [float(i) for i in newrow]
dataList.append(row)
else:
dataList.append(splitrow)
elif row[0] <= int(self.headerlines):
if isinstance(row[1], list):
val = str(row[1])[2:-2]
else:
val = row[1]
self.dataLead.append(val)
if row[0] == 6:
datalength = float(row[1][0])
else:
self.dataTail.append(row[1])
self.dataListIN = dataList
self.fileHeaderDict = {}
project = self.dataLead[0]
dataFrmt = self.dataLead[2]
array = int(self.dataLead[3])
if array == 3:
array = "Dipole-Dipole"
msrmtType = str(self.dataLead[5])
if msrmtType.strip() == '0':
msrmtType = "Apparent Resistivity"
else:
msrmtType = 'Resistance'
self.fileHeaderDict['Filename'] = filename
self.fileHeaderDict['Project'] = project
self.fileHeaderDict['minElectSpcng'] = str(round(float(self.dataLead[1]),2))
self.fileHeaderDict['Array'] = array
self.fileHeaderDict["Type of Measurement"] = msrmtType
self.fileHeaderDict['DataPts'] = str(self.dataLead[6])
self.fileHeaderDict['DistType'] = str(self.dataLead[7])
self.dataframeIN = pd.DataFrame(self.dataListIN)
self.dataframeIN.columns = self.dataHeaders
self.dataframeCols = [-2, 1, 2, 3, 4, 5, 6, 7, 8, -3, -4, -5, -6, 9, -7, -1, -1, -8, -9, -10, -11] # neg val ind. colums that need to be calculated
self.dataframeEDIT = pd.DataFrame()
dataframelength = len(self.dataframeIN.index)
nullList = []
keepList = []
for i in range(0, dataframelength):
nullList.append(-1)
keepList.append(True)
# Create dataframe that will be used in editing process (self.dataframeEDIT) one column at a time
for item in enumerate(self.dataframeEDITColHeaders):
if self.dataframeCols[item[0]] > -1: #Columns from dataframeIN that are directly read to dataframeEDIT
self.dataframeEDIT[item[1]] = self.dataframeIN.iloc[:, self.dataframeCols[item[0]]]
self.dataframeEDIT[item[1]] = self.dataframeEDIT[item[1]].astype(float)
elif self.dataframeCols[item[0]] == -1: #Null list (can't calculate)
self.dataframeEDIT[item[1]] = nullList
elif self.dataframeCols[item[0]] == -2:#Measure ID
for i in range(0,dataframelength):
self.dataframeEDIT.loc[i,item[1]] = i
elif self.dataframeCols[item[0]] == -3: #A spacing
self.dataframeEDIT[item[1]] = abs(self.dataframeEDIT['A(x)'] - self.dataframeEDIT['B(x)'])
elif self.dataframeCols[item[0]] == -4: #N-factor
self.dataframeEDIT[item[1]] = abs(self.dataframeEDIT['B(x)'] - self.dataframeEDIT['N(x)']) / self.dataframeEDIT['aVal']
elif self.dataframeCols[item[0]] == -5:#PseduoX
self.dataframeEDIT['PseudoX'] = (((self.dataframeEDIT['A(x)']+self.dataframeEDIT['B(x)'])/2)+((self.dataframeEDIT['M(x)']+self.dataframeEDIT['N(x)'])/2))/2
elif self.dataframeCols[item[0]] == -6: #PseduoZ
n = self.dataframeEDIT['nFac']
a = self.dataframeEDIT['aVal']
self.dataframeEDIT['PseudoZ'] = round((((n**2)*-0.0018)+0.2752*n+0.1483)*a,1)
elif self.dataframeCols[item[0]] == -7:#AppResistivity
PI = math.pi
n = self.dataframeEDIT['nFac']
a = self.dataframeEDIT['aVal']
R = self.dataframeEDIT['Resistance']
if self.fileHeaderDict['Array'] == 'Dipole-Dipole':
self.dataframeEDIT['AppResist'] = PI*n*(n+1)*(n+2)*a*R
else:
print('Array is not Dipole-Dipole, but Dipole-Dipole k-factor used to calculate App. Resistivity')
elif self.dataframeCols[item[0]] == -8: #DataLevel
self.dataframeEDIT['DataLevel'] = nullList
uniqueDepths = self.dataframeEDIT['PseudoZ'].unique()
uniqueDepths = list(set(uniqueDepths.flatten()))
self.dataLevels = len(uniqueDepths)
dataLength = len(self.dataframeEDIT['PseudoZ'])
for i in range(0, dataLength):
self.dataframeEDIT.loc[i, 'DataLevel'] = uniqueDepths.index(self.dataframeEDIT.loc[i, 'PseudoZ'])
elif self.dataframeCols[item[0]] == -9: # DtLvlMean
for i in uniqueDepths:
df = self.dataframeEDIT[self.dataframeEDIT.iloc[:, 12] == i]
dtLvlMean = df['AppResist'].mean()
indexList = df.index.values.tolist()
for ind in indexList:
self.dataframeEDIT.loc[ind, 'DtLvlMean'] = dtLvlMean
elif self.dataframeCols[item[0]] == -10: #PctErr
self.dataframeEDIT['PctErr'] = (abs(
self.dataframeEDIT['DtLvlMean'] - self.dataframeEDIT['AppResist'])) / \
self.dataframeEDIT['DtLvlMean']
elif self.dataframeCols[item[0]] == -11: #Keep
self.dataframeEDIT[item[1]] = keepList
else:
self.dataframeEDIT[item[1]] = nullList
self.readInFileBtn.SetLabelText("Reset Data")
elif self.ext.lower() == '.txt':
with open(filepath, 'r') as datafile:
filereader = csv.reader(datafile)
start = 0
end = 0
fileHeader = []
data = []
for row in enumerate(filereader):
if start == 0:
if row[0] <= 13:
fileHeader.append(row[1])
fileHeader[row[0]] = fileHeader[row[0]][:]
if 'N\\tTime' in str(row[1]):
start = 1
self.headerlines = row[0]
dataHdrTemp = str(row[1])
self.dataHeaders = dataHdrTemp[2:-2].split('\\t')
self.dataHeaders[1] = dataHdrTemp[1].strip()
self.fileHeaderDict = {}
for item in fileHeader:
if len(item) > 0:
self.fileHeaderDict[str(item[0]).split(":", 1)[0]] = str(item[0]).split(":", 1)[1].strip()
elif start == 1 and end == 0:
if len(row[1]) > 0:
data.append(str(row[1])[2:-1].split('\\t'))
else:
end = 1
else:
continue
self.dataListIN = data
self.dataframeIN = pd.DataFrame(self.dataListIN)
self.dataframeCols = [0, 6, 8, 9, 11, 12, 14, 15, 17, -2, -3, 18, 20, 26, 28, 29, 27, -4, -5, -6, -7] #neg val ind. colums that need to be calculated
self.dataframeEDIT = pd.DataFrame()
dataframelength = len(self.dataframeIN.index)
nullList = []
keepList = []
for i in range(0, dataframelength):
nullList.append(-1)
keepList.append(True)
# Create dataframe that will be used in editing process (self.dataframeEDIT) one column at a time
for item in enumerate(self.dataframeEDITColHeaders):
if self.dataframeCols[item[0]] > -1:
#print(item[1])
self.dataframeEDIT[item[1]] = self.dataframeIN.iloc[:, self.dataframeCols[item[0]]]
self.dataframeEDIT[item[1]] = self.dataframeEDIT[item[1]].astype(float)
elif self.dataframeCols[item[0]] == -2:
self.dataframeEDIT[item[1]] = abs(self.dataframeEDIT['A(x)'] - self.dataframeEDIT['B(x)'])
elif self.dataframeCols[item[0]] == -3:
self.dataframeEDIT[item[1]] = abs(self.dataframeEDIT['N(x)'] - self.dataframeEDIT['M(x)']) / self.dataframeEDIT['aVal']
elif self.dataframeCols[item[0]] == -4:
self.dataframeEDIT['DataLevel'] = nullList
uniqueDepths = self.dataframeEDIT['PseudoZ'].unique()
uniqueDepths = list(set(uniqueDepths.flatten()))
self.dataLevels = len(uniqueDepths)
dataLength = len(self.dataframeEDIT['PseudoZ'])
for i in range(0, dataLength):
self.dataframeEDIT.loc[i, 'DataLevel'] = uniqueDepths.index(self.dataframeEDIT.loc[i, 'PseudoZ'])
elif self.dataframeCols[item[0]] == -5:
for i in uniqueDepths:
df = self.dataframeEDIT[self.dataframeEDIT.iloc[:, 12] == i]
dtLvlMean = df['AppResist'].mean()
indexList = df.index.values.tolist()
for ind in indexList:
self.dataframeEDIT.loc[ind, 'DtLvlMean'] = dtLvlMean
elif self.dataframeCols[item[0]] == -6:
self.dataframeEDIT['PctErr'] = (abs(self.dataframeEDIT['DtLvlMean'] - self.dataframeEDIT['AppResist'])) / self.dataframeEDIT['DtLvlMean']
elif self.dataframeCols[item[0]] == -7:
self.dataframeEDIT[item[1]] = keepList
else:
self.dataframeEDIT[item[1]] = nullList
self.dataHeaders[1] = 'MeasTime'
if len(self.dataHeaders) > 37:
self.dataHeaders[37] = 'Extra'
self.dataTail = [0,0,0,0,0,0,0]
self.dataframeIN.columns = self.dataHeaders
self.readInFileBtn.SetLabelText("Reset Data")
self.fileHeaderDict['Filename'] = filename
self.fileHeaderDict['Project'] = self.fileHeaderDict['Project name']
self.fileHeaderDict['Array'] = self.fileHeaderDict['Protocol file'][21:-4]
self.fileHeaderDict['minElectSpcng'] = self.fileHeaderDict['Smallest electrode spacing']
self.fileHeaderDict['DataPts'] = len(self.dataframeIN)
self.dataLead = []
self.dataLead.append(self.fileHeaderDict['Project name'] + " " + self.fileHeaderDict['Filename'])
self.dataLead.append(self.fileHeaderDict['minElectSpcng'])
self.dataLead.append('11') #General Array format
self.dataLead.append(self.fileHeaderDict['Sub array code']) #tells what kind of array is used
self.dataLead.append('Type of measurement (0=app.resistivity,1=resistance)')
self.dataLead.append('0') #Col 26 in .txt (col 28 is app. resistivity)
self.dataLead.append(self.fileHeaderDict['DataPts'])
self.dataLead.append('2')
self.dataLead.append('0')
elif self.ext.lower() == '.vtk':
with open(filepath, 'r') as datafile:
filereader = csv.reader(datafile)
startLocs = 0
startData = 0
startLocInd = 'POINTS'
startDataInd = 'LOOKUP_TABLE'
endLocs = 0
endData = 0
endLocInd = []
endDataInd = []
fileLead = []
fileMid = []
fileTail = []
vtkdata = []
vtklocs = []
newrow = []
xLocPts = []
yLocPts = []
zLocPts = []
vPts = []
for row in enumerate(filereader):
if startLocs == 0:
fileLead.append(row[1])
fileLead[row[0]] = fileLead[row[0]][:]
if startLocInd in str(row[1]):
startLocs = 1
elif startLocs == 1 and endLocs == 0:
if endLocInd == row[1]:
endLocs = 1
else:
newrow = re.split(' +', str(row[1][0]))
newrow = newrow[1:]
vtklocs.append(newrow)
elif startData == 0:
fileMid.append(row[1])
if startDataInd in str(row[1]):
startData = 1
elif startData == 1 and endData == 0:
if row[1] == endDataInd:
endData == 1
else:
newrow = re.split(' +', str(row[1][0]))
newrow = newrow[1:]
vtkdata.append(newrow)
else:
fileTail.append(row[1])
fileTail[row[0]] = fileTail[row[0]][:]
xPtCols = [0,3,6,9]
yPtCols = [1,4,7,10]
zPtCols = [2,5,8,11]
for r in vtklocs:
Xs = 0.0
for x in xPtCols:
Xs = Xs + float(r[x])
xLocPts.append(Xs/4.0)
Ys = 0.0
for y in yPtCols:
Ys = Ys + float(r[y])
yLocPts.append(Ys/4.0)
Zs = 0.0
for z in zPtCols:
Zs = Zs + float(r[z])
zLocPts.append(Zs/4)
for d in vtkdata:
for i in d:
vPts.append(i)
self.dataframeIN = pd.DataFrame([xLocPts, yLocPts, zLocPts, vPts]).transpose()
self.dataframeIN.columns = ['X','Y','Z','Resistivity']
print(self.dataframeIN)
#Format vtk file
self.fileHeaderDict = {}
self.fileHeaderDict['Filename'] = filename
self.fileHeaderDict['Project'] = 'NA'
self.fileHeaderDict['Array'] = 'NA'
self.fileHeaderDict['minElectSpcng'] = str(round(self.dataframeIN.loc[1,'X'] - self.dataframeIN.loc[0,'X'],1))
self.fileHeaderDict['DataPts'] = len(self.dataframeIN)
elif self.ext.lower() == '.xyz':#!!!!!!!!!!!!!!!!
with open(filepath, 'r') as datafile:
filereader = csv.reader(datafile)
start = 0
startIndicator = 'Elevation'
end = 0
endIndicator = '/'
fileHeader = []
data = []
for row in enumerate(filereader):
if start == 0:
fileHeader.append(row[1])
fileHeader[row[0]] = fileHeader[row[0]][:]
if startIndicator in str(row[1]):
start = 1
elif start == 1 and end == 0:
if endIndicator in str(row[1]):
end = 1
else:
data.append(str(row[1])[2:-1].split('\\t'))
else:
continue
######format xyz input
else:
self.datVizMsg2.SetLabelText("Filepath Error. Must be .DAT, .TXT, .VTK, or .XYZ file")
self.dataLengthIN = len(self.dataframeIN.iloc[:,0])
self.read = 0
self.generateXY()
self.generateProfileInfo()
self.graphChart()
self.read = 1
def generateXY(self):
self.xCols = []
aVals = []
nFacs = []
yCols = []
valCols = []
self.xData = []
self.yData = []
self.zData = []
self.values = []
if self.inputDataExt == '.DAT (SAS)' or self.inputDataExt == '.DAT (LS)' or self.inputDataExt == '.TXT (LS)':
self.xCols = [11]
self.electrodeCols = [1,3,5,7]
aVals = 9
nFacs = 10
zCols = 12
valCols = 14 #13 is resistance; 14 is app. resistivity
if self.autoShiftBx.GetValue():
startPt = []
for c in self.electrodeCols:
startPt.append(float(self.dataframeEDIT.iloc[:,c].min()))
startPt = min(startPt)
if startPt != 0:
self.dataShifted = True
for c in self.electrodeCols:
for i in enumerate(self.dataframeEDIT.iloc[:,c]):
self.dataframeEDIT.iloc[i[0],c] = float(i[1]) - float(startPt)
if self.inputDataExt == '.DAT (LS)' or self.inputDataExt == '.TXT (LS)':
outPath = self.dataPath.stem.split('-')[0]
elif self.inputDataExt == '.DAT (SAS)':
outPath = self.dataPath.stem.split('.')[0]
if outPath.startswith('lr'):
outPath = outPath[2:]
outPath = outPath + '_shift_pyEdit.dat'
self.exportTXT.SetValue(str(self.dataPath.with_name(outPath)))
else:
self.dataShifted = False
if self.includeTopoBx.GetValue():
outPath = self.exportTXT.GetValue()[:-4]
outPath = outPath + "_topo.dat"
self.exportTXT.SetValue(outPath)
#Get all electrode xDistances
self.electxDataIN = []
for c in self.electrodeCols:
for row in self.dataframeEDIT.iloc[:,c]:
self.electxDataIN.append(round(float(row),0))
xDataIN = self.dataframeEDIT.iloc[:,self.xCols[0]].to_list()
for item in xDataIN:
self.xData.append(float(item))
zDataIN = self.dataframeEDIT.iloc[:,zCols].to_list()
for item in zDataIN:
self.zData.append(float(item))
valIN = self.dataframeEDIT.iloc[:,valCols].to_list()
for item in valIN:
self.values.append(float(item))
xDistCols = ['B(x)', 'A(x)', 'N(x)', 'M(x)']
xDF = pd.DataFrame(self.dataframeIN.loc[:,xDistCols[:]])
xDF.columns = xDistCols
xDF = xDF.astype(float)
self.xDF = pd.DataFrame()
self.xDF['A(x)'] = xDF['A(x)']
self.xDF['B(x)'] = xDF['B(x)']
self.xDF['M(x)'] = xDF['M(x)']
self.xDF['N(x)'] = xDF['N(x)']
xList = []
for item in xDistCols:
xDistList = self.dataframeIN.loc[:,item].to_list()
for item in xDistList:
xList.append(float(item))
#print(self.dataframeIN)
minvals = self.xDF.min()
self.minXDist = minvals.min()
maxvals = self.xDF.max()
self.maxXDist = maxvals.max()
#self.minXDist = min(self.xData)
#self.maxXDist = max(self.xData)
self.minDepth = min(self.zData)
self.maxDepth = max(self.zData)
self.maxResist = max(self.values)
elif self.inputDataExt == '.VTK':
self.dataframeIN = self.dataframeIN.astype(float)
for i in range(0,len(self.dataframeIN)):
self.xData.append(self.dataframeIN.loc[i,'X'])
self.yData.append(self.dataframeIN.loc[i,'Y'])
self.zData.append(self.dataframeIN.loc[i,'Z'])
self.values.append(self.dataframeIN.loc[i,"Resistivity"])
self.minXDist = min(self.xData)
self.maxXDist = max(self.xData)
self.minDepth = min(self.zData)
self.maxDepth = max(self.zData)
self.maxResist = max(self.values)
elif self.inputDataExt == '.XYZ':
pass
else:
pass
if self.zData[0] < 0:
for i in enumerate(self.zData):
self.zData[i[0]] = self.zData[i[0]]*-1
self.maxDepth = max(self.zData)
self.minResist = min(self.values)
self.maxResist = max(self.values)
self.fileHeaderDict['DataPts'] = len(self.dataframeIN)
dt = []
dt.append(self.xData)
dt.append(self.zData)
dt.append(self.values)
cols = ['xDist', 'Depth', 'Value']
df = pd.DataFrame(dt)
df = df.transpose()
df.columns = cols
if self.inputDataExt =='.XYZ' or self.inputDataExt == '.VTK':
for i in range(0,len(self.dataframeIN)):
self.df = df.copy()
self.df.loc[i,"DtLvlMean"] = 0.0
self.df.loc[i,'PctErr'] = 0.0
self.df.loc[i,'MeasID'] = i
self.electxDataIN = self.xData
self.electxDataIN = [float(i) for i in self.electxDataIN]
self.electxDataIN = sorted(set(self.electxDataIN))
else:
pass
xDataINList = []
self.electrodes = []
for i in self.electxDataIN:
xDataINList.append(round(i,0))
self.electrodes = sorted(xDataINList)
self.electState = []
for i in self.electrodes:
self.electState.append(bool(i*0+1))
print(self.electrodes)
self.electrodesShifted = []
if self.dataShifted:
for e in self.electrodes:
self.electrodesShifted.append(e-startPt)
self.dataEditMsg.SetLabelText(str(len(self.dataframeEDIT)) + ' data pts')
def generateProfileInfo(self):
self.msgProfileName.SetLabelText(str(self.fileHeaderDict['Filename']))
self.msgProfileRange.SetLabelText(str(round(self.minXDist,0)) + " - " + str(round(self.maxXDist,0)))
self.msgDataPts.SetLabelText(str(self.fileHeaderDict['DataPts']))
self.msgArray.SetLabelText(str(self.fileHeaderDict['Array']))
self.msgProjectName.SetLabelText(str(self.fileHeaderDict['Project']))
self.msgMinElectSpcng.SetLabelText(str(self.fileHeaderDict['minElectSpcng']))
self.electrodeToggleBtn.SetValue(True)
self.electrodeToggleBtn.SetBackgroundColour((0, 255, 0))
self.sliderVal = self.editSlider.GetValue()
self.dataVizMsg2.SetLabelText('Electrode at ' + str(self.sliderVal) + ' m')
def graphChartEvent(self, event):
self.graphChart()
def graphChart(self):
self.editSlider.Show()
if self.currentChart != 'Graph':
self.editSlider.SetValue(0)
self.currentChart = 'Graph'
self.dataVizMsg1.SetLabelText('Graphical Editing Interface')
self.saveEditsBtn.Hide()
self.dataVizInput.Show()
self.dataVizInputBtn.Show()
self.electrodeToggleBtn.Show()
x = []
z = []
v = []
pe = []
n1 = []
n2 = []
KeepList = self.dataframeEDIT['Keep'].to_list()
peList = self.dataframeEDIT['PctErr'].to_list()
for i in enumerate(KeepList):
if i[1]:
x.append(self.dataframeEDIT.loc[i[0],'PseudoX'])
z.append(self.dataframeEDIT.loc[i[0],'PseudoZ'])
v.append(self.dataframeEDIT.loc[i[0],'AppResist'])
pe.append(peList[i[0]])
self.axes.clear()
if 'scipy.interpolate' in sys.modules:
self.makeColormesh(x,z,v, pe,n1,n2)
else:
ptSize = round(100/self.maxXDist*125,1)
self.axes.scatter(x,z, c=v,edgecolors='black',s=ptSize, marker='h')
def makeColormesh(self,x,z,v, pe,xOmit,zOmit):
for i in enumerate(v):
v[i[0]] = abs(float(i[1]))
xi, zi = np.linspace(min(x), max(x), 300), np.linspace(min(z), max(z), 300)
xi, zi = np.meshgrid(xi, zi)
vi = scipy.interpolate.griddata((x, z), v, (xi, zi), method='linear')
ptSize = round(100 / self.maxXDist * 35, 1)
self.figure.clear()
self.axes = self.figure.add_subplot(111)
cmap = pL.cm.binary
my_cmap = cmap(np.arange(cmap.N))
my_cmap[:,-1] = np.linspace(0,1,cmap.N)
#my_cmap = cmap(np.arange(pe))
#my_cmap[:,-1] = np.linspace(0,1,pe)
my_cmap = matplotlib.colors.ListedColormap(my_cmap)
vmax = np.percentile(v, 98)
vmin = np.percentile(v, 2)
minx = min(x)
maxx = max(x)
minz = min(z)
maxz = max(z)
norm = matplotlib.colors.LogNorm(vmin = vmin, vmax = vmax)
#im = self.axes.imshow(vi, vmin=vmin, vmax=vmax, origin='lower',
im = self.axes.imshow(vi, origin='lower',
extent=[minx, maxx, minz, maxz],
aspect='auto',
cmap='nipy_spectral',
norm = norm,
interpolation='bilinear')
self.figure.colorbar(im, orientation='horizontal')
if self.currentChart == 'Graph':
self.axes.scatter(x, z, c=pe, edgecolors=None, s=ptSize, marker='o', cmap=my_cmap)
if abs(self.minDepth) < 10 :
self.axes.set_ylim(self.maxDepth * 1.15, 0)
else:
depthrange = abs(self.maxDepth-self.minDepth)
self.axes.set_ylim(self.minDepth-(depthrange*0.05), self.maxDepth + (depthrange*0.05))
self.axes.set_xlabel('X-Distance (m)')
self.axes.set_ylabel('Depth (m)')
self.axes.xaxis.tick_top()
self.editSlider.SetMax(int(self.maxXDist))
self.editSlider.SetMin(int(self.minXDist))
self.editSlider.SetTickFreq(5)
self.canvas.draw()
elif self.currentChart == 'Review':
self.axes.scatter(xOmit, zOmit, c='black', s=ptSize/1.5, marker='x')
if abs(self.minDepth) < 10 :
self.axes.set_ylim(self.maxDepth * 1.15, 0)
else:
depthrange = abs(self.maxDepth - self.minDepth)
self.axes.set_ylim(self.minDepth - (depthrange * 0.05), self.maxDepth + (depthrange * 0.05))
self.axes.set_xlabel('X-Distance (m)')
self.axes.set_ylabel('Elev/Depth (m)')
self.axes.xaxis.tick_top()
self.canvas.draw()
#self.axes.scatter(x, z, c=pe, edgecolors='none', s=ptSize, marker='h', alpha=0.5, cmap='binary')
def statChartEvent(self,event):
self.statChart()
def statChart(self):
self.dataVizMsg1.SetLabelText('Statistical Editing Interface')
self.dataVizMsg2.SetLabelText('Move slider to % err upper limit')
self.currentChart = 'Stat'
self.saveEditsBtn.Show()
self.dataVizInput.Show()
self.editSlider.Show()
self.dataVizInputBtn.Show()
self.electrodeToggleBtn.Hide()
peIndex = int(self.dataframeEDIT.columns.get_loc('PctErr'))
KeepList = self.dataframeEDIT.loc[:,'Keep'].to_list()
peList = self.dataframeEDIT.iloc[:,peIndex].to_list()
pctErr = []
for i in enumerate(KeepList):
if i[1]:
pctErr.append(float(peList[i[0]]) * 100)
self.editSlider.SetMin(0)
self.editSlider.SetMax(int(max(pctErr)))
self.editSlider.SetValue(int(max(pctErr)))
self.editSlider.SetTickFreq(1)
self.figure.clear()
self.axes = self.figure.add_subplot(111)
self.axes.hist(pctErr, bins=30)
self.axes.set_xlim(0, max(pctErr)*1.1)
self.axes.xaxis.tick_bottom()
self.canvas.draw()
def GPSChartEvent(self,event):
self.GPSChart()
def GPSChart(self):
self.editSlider.Hide()
self.electrodeToggleBtn.Hide()
self.dataVizInput.Hide()
self.dataVizInputBtn.Hide()
self.saveEditsBtn.Hide()
self.currentChart = 'GPS'
self.dataVizMsg1.SetLabelText('GPS Data Viewer')
if len(self.GPSpath.stem) < 1:
self.GPSpath = ''
self.dataVizMsg2.SetLabelText(str(self.GPSpath.stem))
self.getGPSVals()
self.figure.clear()
self.axes = self.figure.add_subplot(111)
xRange = max(self.gpsXData) - min(self.gpsXData)
yRange = max(self.gpsYData) - min(self.gpsYData)
if xRange!=0:
slope = abs(yRange/xRange)
else:
slope = 1000
if slope < 1:
if slope < 0.2:
xFact = 0.2
yFact = 5
elif slope < 0.6:
xFact = 0.2
yFact = 3
else:
xFact = 0.2
yFact = 1
else:
if slope > 4:
xFact = 5
yFact = 0.2
elif slope > 2:
xFact = 3
yFact = 0.2
else:
xFact = 1
yFact = 0.2
lowXlim = min(self.gpsXData) - xFact*xRange
upXlim = max(self.gpsXData) + xFact*xRange
lowYlim = min(self.gpsYData) - yFact*yRange
upYlim = max(self.gpsYData) + yFact*yRange
tick_spacing = 100
self.axes.scatter(self.gpsXData,self.gpsYData, s=20, marker='h')
self.axes.plot(self.gpsXData, self.gpsYData)
self.axes.xaxis.set_major_locator(matplotlib.ticker.MultipleLocator(tick_spacing))
self.axes.yaxis.set_major_locator(matplotlib.ticker.MultipleLocator(tick_spacing))
self.axes.ticklabel_format(axis='both',style='plain')
self.axes.grid(which='major', axis='both',color=(0.8,0.8,0.8))
self.axes.set_xlim(lowXlim,upXlim)
self.axes.set_ylim(lowYlim,upYlim)
self.axes.set_xlabel('UTM Easting')
self.axes.set_ylabel('UTM Northing')
self.axes.xaxis.tick_bottom()
self.canvas.draw()
def topoChartEvent(self,event):
self.topoChart()
def topoChart(self):
self.editSlider.Hide()
self.electrodeToggleBtn.Hide()
self.dataVizInput.Hide()
self.dataVizInputBtn.Hide()
self.saveEditsBtn.Hide()
self.currentChart = 'Topo'
self.dataVizMsg1.SetLabelText('Topo Data Viewer')
self.dataVizMsg2.SetLabelText(str(self.topoPath.stem))
self.getTopoVals()
self.figure.clear()
self.axes = self.figure.add_subplot(111)
#tick_spacing = 100
self.axes.scatter(self.topoDF['xDist'],self.topoDF['Elev'], s=5, marker='h')
self.axes.plot(self.topoDF['xDist'],self.topoDF['Elev'])
self.axes.xaxis.set_major_locator(matplotlib.ticker.MultipleLocator(100))
#self.axes.yaxis.set_major_locator(matplotlib.ticker.MultipleLocator(tick_spacing))
self.axes.ticklabel_format(axis='both',style='plain')
self.axes.grid(which='major', axis='both',color=(0.8,0.8,0.8))
self.axes.set_xlim(0-max(self.topoDF['xDist'])*.2,max(self.topoDF['xDist'])*1.2)
self.axes.set_ylim(min(self.topoDF['Elev'])*0.8,max(self.topoDF['Elev'])*1.2)
self.axes.set_xlabel('X-Distance Along Profile (m)')
self.axes.set_ylabel('Elevation Above MSL (m)')
self.axes.xaxis.tick_bottom()
self.canvas.draw()
def onSliderEditEVENT(self,event):
self.onSliderEdit()
def onSliderEdit(self):
self.sliderVal = float(self.editSlider.GetValue())
if self.currentChart == 'Graph':
if self.sliderVal in self.electrodes:
self.electrodeToggleBtn.Show()
toggleState = self.electState[int(self.electrodes.index(self.sliderVal))]
self.electrodeToggleBtn.SetValue(toggleState)
if toggleState == True:
self.dataVizMsg2.SetLabelText("Electrode at " + str(self.sliderVal) + " m is in use")
self.electrodeToggleBtn.SetLabelText('On')
self.electrodeToggleBtn.SetBackgroundColour((100, 255, 100))
else:
self.electrodeToggleBtn.SetLabelText('Off')
self.electrodeToggleBtn.SetBackgroundColour((255, 100, 100))
self.dataVizMsg2.SetLabelText("Electrode at " + str(self.sliderVal) + " m is not in use")
else:
self.dataVizMsg2.SetLabelText('No Electrode at this x-location')
self.electrodeToggleBtn.Hide()
elif self.currentChart == 'Stat':
currData = 0
for i in self.dataframeEDIT["Keep"]:
if i:
currData = currData + 1
peIndex = self.dataframeEDIT.columns.get_loc('PctErr')
dataCut = 0
for r in enumerate(self.dataframeEDIT.iloc[:, peIndex]):
if float(r[1]) >= float(self.sliderVal) / 100.0:
dataCut += 1
self.dataVizMsg2.SetLabelText(str(self.sliderVal)+'% Err: '+str(dataCut) + ' points will be deleted ('+
str(round(dataCut/currData*100,1))+'% of the current data).')
else:
self.dataVizMsg2.SetLabelText('Value: ' + str(self.sliderVal))
def onEditTypeToggle(self, event):
self.editTypeToggleState = self.editTypeToggleBtn.GetValue()
if self.editTypeToggleState == True:
self.editTypeToggleBtn.SetLabelText('Keep')
elif self.editTypeToggleState == False:
self.editTypeToggleBtn.SetLabelText('Remove')
def onSelectEditDataType(self,event):
choiceListInd = self.editDataType.GetSelection()
colNumBase = [14, 13, [1, 3, 5, 7], 16, 19, 11, 12]
self.setEditToggleState = self.editDataChoiceBool[choiceListInd]
self.setEditToggleBtn.SetValue(self.setEditToggleState)
if float(self.editDataValues[choiceListInd][0]) == 0 and float(self.editDataValues[choiceListInd][1]) == 0:
#Set min value in box
if type(colNumBase[choiceListInd]) is list:
minVal = []
for i in colNumBase[choiceListInd]:
minVal.append(self.dataframeEDIT[self.dataframeEDITColHeaders[i]].min())
minVal = min(minVal)
else:
minVal = self.dataframeEDIT[self.editDataChoiceList[choiceListInd]].min()
self.inputTxtMinRem.SetValue(str(minVal))
# Set max value in box
if type(colNumBase[choiceListInd]) is list:
maxVal = []
for i in colNumBase[choiceListInd]:
maxVal.append(self.dataframeEDIT[self.dataframeEDITColHeaders[i]].max())
maxVal = max(maxVal)
else:
maxVal = self.dataframeEDIT[self.editDataChoiceList[choiceListInd]].max()
self.inputTxtMaxRem.SetValue(str(maxVal))
else:
self.inputTxtMinRem.SetValue(str(self.editDataValues[choiceListInd][0]))
self.inputTxtMaxRem.SetValue(str(self.editDataValues[choiceListInd][1]))
if self.setEditToggleState:
self.setEditToggleBtn.SetLabelText('Used')
else:
self.setEditToggleBtn.SetLabelText('Not Used')
def onSetEditToggle(self,event):
self.setEditToggleState = self.setEditToggleBtn.GetValue()
choiceListInd = self.editDataType.GetSelection()
if self.setEditToggleState == True:
if self.editDataType.GetSelection() > -1:
self.editDataChoiceBool[choiceListInd] = True
self.setEditToggleBtn.SetLabelText('Used')
else:
self.setEditToggleState = False
self.setEditToggleBtn.SetValue(False)
elif self.setEditToggleState == False:
if self.editDataType.GetSelection() > -1:
self.editDataChoiceBool[choiceListInd] = False
self.setEditToggleBtn.SetLabelText('Not Used')
self.setEditDataValues()
def onEditDataValueChangeEvent(self,event):
self.setEditDataValues()
def setEditDataValues(self):
choiceListInd = self.editDataType.GetSelection()
colNumBase = [14, 13, [1, 3, 5, 7], 16, 19, 11, 12]
#Set Min Value Box
if self.inputTxtMinRem.GetValue().isnumeric():
self.editDataValues[choiceListInd][0] = float(self.inputTxtMinRem.GetValue())
elif self.inputTxtMinRem.GetValue().lower() == 'min':
if type(colNumBase[choiceListInd]) is list:
minVal = []
for i in colNumBase[choiceListInd]:
minVal.append(self.dataframeEDIT[self.dataframeEDITColHeaders[i]].min())
minVal = min(minVal)
else:
minVal = self.dataframeEDIT[self.editDataChoiceList[choiceListInd]].min()
self.inputTxtMinRem.SetValue(str(minVal))
self.editDataValues[choiceListInd][0] = float(minVal)
else:
pass
# self.editDataChoiceList = ['AppResist', 'Resistance', 'Electrode x-Dists', 'Variance', 'PctErr', 'PseudoX','PseudoZ']
#Set Max Value Box
if self.inputTxtMaxRem.GetValue().isnumeric():
self.editDataValues[choiceListInd][1] = float(self.inputTxtMaxRem.GetValue())
elif self.inputTxtMaxRem.GetValue().lower() == 'max':
if type(colNumBase[choiceListInd]) is list:
maxVal = []
for i in colNumBase[choiceListInd]:
maxVal.append(self.dataframeEDIT[self.dataframeEDITColHeaders[i]].max())
maxVal = max(maxVal)
else:
maxVal = self.dataframeEDIT[self.editDataChoiceList[choiceListInd]].max()
self.inputTxtMaxRem.SetValue(str(maxVal))
self.editDataValues[choiceListInd][1] = float(maxVal)
else:
pass
def onLogicToggle(self, event):
self.editLogicToggleState = self.editLogicToggleBtn.GetValue()
if self.editLogicToggleState == True:
self.editLogicToggleBtn.SetLabelText('AND')
elif self.editLogicToggleState == False:
self.editLogicToggleBtn.SetLabelText('OR')
def onRemovePts(self,event):
#self.editDataChoiceList = ['AppResist', 'Resistance', 'Electrode x-Dists', 'Variance', 'PctErr', 'PseudoX','PseudoZ']
self.setEditDataValues()
colNumBase = [14,13,[1,3,5,7],16,19,11,12]
colNums = []
for i in enumerate(colNumBase):
if self.editDataChoiceBool[i[0]]:
colNums.append(i[1])
colNames = self.dataframeEDIT.columns
if len(colNums) < 1:
pass
else:
if self.editLogicToggleBtn.GetLabelText() == 'AND': #AND
# Create list to hold items if they are to be acted on; starts all true, any false value makes false
editList = []
for k in range(0, self.dataLengthIN):
editList.append(1)
index = -1
for dTypeInUse in enumerate(self.editDataChoiceBool):
if dTypeInUse[1]:
for c in colNums:
if type(c) is list:
row = -1
for r in range(0, self.dataLengthIN):
row = row + 1
listBoolCt = 0
for item in c:
if self.dataframeEDIT.iloc[r,c] >= float(self.editDataValues[dTypeInUse[0]][0]) and self.dataframeEDIT.iloc[r,c] <= float(self.editDataValues[dTypeInUse[0]][1]):
listBoolCt = listBoolCt + 1
if listBoolCt == 0:
editList[row] = 0
else: #if the columns are not a list of columns
#Iterate through each row in col c to see if it is in range
for r in self.dataframeEDIT[colNames[c]]:
index = index + 1
if r < float(self.editDataValues[i2][0]) and r > float(self.editDataValues[i2][1]):
editList[index] = 0
elif self.editLogicToggleBtn.GetLabelText() == 'OR': #OR
for dTypeInUse in enumerate(self.editDataChoiceBool):
if dTypeInUse[1]:
for c in colNums:
if type(c) is list:
#Create editList if multiple columns involved
editList = []
for k in range(0, self.dataLengthIN):
editList.append(0)
for item in c:
row = -1
for r in self.dataframeEDIT[colNames[item]]:
if r >= float(self.editDataValues[dTypeInUse[0]][0]) and r <= float(self.editDataValues[dTypeInUse[0]][1]):
if self.editTypeToggleBtn.GetLabelText() == 'Remove':
self.dataframeEDIT.loc[row, 'Keep'] = False
elif self.editTypeToggleBtn.GetLabelText() == 'Keep':
self.dataframeEDIT.loc[row, 'Keep'] = True
else:
pass
else:
if self.editTypeToggleBtn.GetLabelText() == 'Keep':
self.dataframeEDIT.loc[row, 'Keep'] = False
else:
row = -1
for r in self.dataframeEDIT[colNames[c]]:
row = row + 1
if r >= float(self.editDataValues[dTypeInUse[0]][0]) and r <= float(self.editDataValues[dTypeInUse[0]][1]):
if self.editTypeToggleBtn.GetLabelText() == 'Remove':
self.dataframeEDIT.loc[row, 'Keep'] = False
elif self.editTypeToggleBtn.GetLabelText() == 'Keep':
self.dataframeEDIT.loc[row, 'Keep'] = True
else:
pass
else:
if self.editTypeToggleBtn.GetLabelText() == 'Keep':
self.dataframeEDIT.loc[row, 'Keep'] = False
else:
pass
self.graphChart()
def ONtoggle(self,event):
self.ToggleState = self.electrodeToggleBtn.GetValue()
self.sliderVal = self.editSlider.GetValue()
if self.ToggleState == True:
self.dataVizMsg2.SetLabelText("Electrode at "+ str(self.sliderVal) +" m is in use")
self.electrodeToggleBtn.SetLabelText('On')
self.electrodeToggleBtn.SetBackgroundColour((100,255,100))
xCols = [0,1,2,3]
keep=[]
for c in xCols:
for r in enumerate(self.xDF.iloc[:,c]):
if float(r[1]) == float(self.sliderVal):
keep.append(r[0])
for i in self.dataframeEDIT.index:
if i in keep:
self.dataframeEDIT.loc[[i],['Keep']] = True
eIndex = int(self.electrodes.index(self.sliderVal))
self.electState[eIndex] = True
elif self.ToggleState == False:
self.electrodeToggleBtn.SetLabelText('Off')
self.electrodeToggleBtn.SetBackgroundColour((255,100,100))
self.dataVizMsg2.SetLabelText("Electrode at " + str(self.sliderVal) + " m is not in use")
xCols = [0,1,2,3]
lose=[]
for c in xCols:
for r in enumerate(self.xDF.iloc[:,c]):
if float(r[1]) == float(self.sliderVal):
lose.append(r[0])
for i in self.dataframeEDIT.index:
if i in lose:
self.dataframeEDIT.loc[[i],['Keep']] = False
#change self.electState to True
eIndex = int(self.electrodes.index(self.sliderVal))
self.electState[eIndex] = False
else:
self.dataVizMsg2.SetLabelText("uhh, this is wierd")
dataRetained = 0
for i in self.dataframeEDIT["Keep"]:
if i:
dataRetained = dataRetained + 1
self.dataEditMsg.SetLabelText(str(dataRetained) + '/' + str(len(self.dataframeEDIT)) + 'pts (' + str(round(dataRetained/len(self.dataframeEDIT)*100,1)) + '%)')
self.graphChart()
def ONSaveEdits(self,event):
if self.currentChart == 'Graph':
#do nothing
pass
elif self.currentChart == 'Stat':
#self.sliderVal = float(self.editSlider.GetValue())
peIndex = self.dataframeEDIT.columns.get_loc('PctErr')
lose = []
for r in enumerate(self.dataframeEDIT.iloc[:, peIndex]):
if float(r[1]) >= float(self.sliderVal)/100.0:
lose.append(r[0])
kIndex = int(self.dataframeEDIT.columns.get_loc('Keep'))
for i in self.dataframeEDIT.index:
if i in lose:
self.dataframeEDIT.iloc[i, kIndex] = False
dataRetained = 0
for i in self.dataframeEDIT["Keep"]:
if i:
dataRetained = dataRetained + 1
self.dataEditMsg.SetLabelText(str(dataRetained) + '/' + str(len(self.dataframeEDIT)) + 'pts (' + str(
round(dataRetained / len(self.dataframeEDIT) * 100, 1)) + '%)')
self.statChart()
else:
pass
def ONdataVizInput(self,event):
if self.dataVizInput.GetValue().isnumeric():
if float(self.dataVizInput.GetValue()) < float(self.editSlider.GetMin()) or float(self.dataVizInput.GetValue()) > float(self.editSlider.GetMax()):
self.dataVizMsg2.SetValue('Error: Value must integer be between '+ str(self.editSlider.GetMin())+ ' and '+str(self.editSlider.GetMax()))
else:
self.editSlider.SetValue(int(self.dataVizInput.GetValue()))
self.dataVizInput.SetValue('')
else:
self.dataVizInput.SetValue('Error: Value must be numeric')
self.onSliderEdit()
def reviewEvent(self,event):
self.reviewChart()
def reviewChart(self):
self.editSlider.Hide()
self.currentChart = 'Review'
self.dataVizMsg1.SetLabelText('Review Edits')
self.saveEditsBtn.Hide()
self.electrodeToggleBtn.Hide()
self.dataVizInput.Hide()
self.dataVizInputBtn.Hide()
x = []
z = []
v = []
pe = []
xOmit = []
zOmit = []
self.createExportDF()
for i in enumerate(self.dataframeEDIT['Keep']):
x.append(self.dataframeEDIT.loc[i[0], 'PseudoX'])
z.append(self.dataframeEDIT.loc[i[0], 'PseudoZ'])
v.append(self.dataframeEDIT.loc[i[0], 'AppResist'])
if i[1]:
pass
else:
xOmit.append(self.dataframeEDIT.loc[i[0],'PseudoX'])
zOmit.append(self.dataframeEDIT.loc[i[0],'PseudoZ'])
if 'scipy.interpolate' in sys.modules:
self.makeColormesh(x,z,v,pe,xOmit,zOmit)
else:
ptSize = round(100/self.maxXDist*125,1)
self.axes.scatter(x,z, c=v,edgecolors='black',s=ptSize, marker='h')
#self.axes.scatter(x,z, c=v,s=ptSize, marker='h')
#self.axes.scatter(xOmit,zOmit,c='black',s=ptSize-ptSize*0.333,marker = 'x')
#minz = min(z)
#maxz = max(z)
#zspace = (maxz-minz)/10
#self.axes.set_ylim(minz-zspace,maxz+zspace)
#self.axes.set_xlabel('X-Distance (m)')
#self.axes.set_ylabel('Elev/Depth (m)')
#self.axes.xaxis.tick_top()
#self.canvas.draw()
#pass
def getClosestElev(self):
if len(self.inputTxtTopo.GetValue())>0 and 'Enter Topo Filepath Here' not in self.inputTxtTopo.GetValue():
if self.topoDF['xDist'].max() > max(self.electrodes) or self.topoDF['xDist'].min() > min(self.electrodes):
if self.topoDF['xDist'].max() > max(self.electrodes):
wx.LogError("File format error. Maximum topo X-Distance is greater than maximum electrode X-Distance.")
else:
wx.LogError("File format error. Minimum topo X-Distance is less than minimum electrode X-Distance.")
else:
self.electrodeElevs = [[] for k in range(len(self.electrodes))]#blank list
for x in enumerate(self.electrodes):
elecxDist = x[1]
elecIndex = x[0]
index = np.argmin(np.abs(np.array(self.topoDF['xDist']) - elecxDist))#finds index of closest elevation
nearestTopoxDist = self.topoDF.loc[index,'xDist']
nearestTopoElev = self.topoDF.loc[index,'Elev']
if nearestTopoxDist == x[1]:
self.electrodeElevs[elecIndex] = nearestTopoElev
elif nearestTopoxDist >= x[1]:
mapNum = nearestTopoxDist - self.electrodes[elecIndex]
mapDenom = nearestTopoxDist - self.topoDF.loc[index-1,'xDist']
mVal = float(mapNum/mapDenom)
self.electrodeElevs[elecIndex] = nearestTopoElev - (nearestTopoElev-self.topoDF.loc[index-1,'Elev'])*mVal
else:
mapNum = self.electrodes[elecIndex] - nearestTopoxDist
mapDenom = self.topoDF.loc[index+1,'xDist']-nearestTopoxDist
mVal = float(mapNum/mapDenom)
self.electrodeElevs[elecIndex] = nearestTopoElev + (nearestTopoElev-self.topoDF.loc[index-1,'Elev'])*mVal
blankList = [[] for k in range(len(self.dataframeEDIT['Keep']))] # blank list
self.dataframeEDIT['SurfElevs'] = blankList
elecXDistColNames = ['A(x)','B(x)','M(x)','N(x)']
elecElevColNames = ['A(z)','B(z)','M(z)','N(z)']
elecXDistColNums = [1,3,5,7]
for c in enumerate(elecXDistColNums):
for x in enumerate(self.dataframeEDIT[elecElevColNames[c[0]]]):
elecxDist = x[1]
elecIndex = x[0]
index = np.argmin(
np.abs(np.array(self.topoDF['xDist']) - elecxDist)) # finds index of closest elevation
nearestTopoxDist = self.topoDF.loc[index, 'xDist']
nearestTopoElev = self.topoDF.loc[index, 'Elev']
if nearestTopoxDist == x[1]:
self.dataframeEDIT.iloc[elecIndex,c[1]+1] = nearestTopoElev
elif nearestTopoxDist >= x[1]:
mapNum = nearestTopoxDist - self.dataframeEDIT.iloc[elecIndex,c[1]+1]
mapDenom = nearestTopoxDist - self.topoDF.loc[index - 1, 'xDist']
mVal = float(mapNum / mapDenom)
self.dataframeEDIT.iloc[elecIndex,c[1]+1] = nearestTopoElev - (
nearestTopoElev - self.topoDF.loc[index - 1, 'Elev']) * mVal
else:
mapNum = self.dataframeEDIT.iloc[elecIndex,c[1]+1] - nearestTopoxDist
mapDenom = self.topoDF.loc[index + 1, 'xDist'] - nearestTopoxDist
mVal = float(mapNum / mapDenom)
self.dataframeEDIT.iloc[elecIndex,c[1]+1] = nearestTopoElev + (
nearestTopoElev - self.topoDF.loc[index - 1, 'Elev']) * mVal
self.dataframeEDIT['PtElev'] = self.dataframeEDIT[elecElevColNames[c[0]]] - self.dataframeEDIT['PseudoZ']
else:
pass
if self.inputDataExt == '.DAT (LS)':
self.electrodeElevs = []
for x in enumerate(self.electrodes): #Checks if there's already elevation data??
found = 0
for xc in self.electrodeCols:
if found == 0:
for i in enumerate(self.dataframeEDIT.iloc[:,xc]):
if round(float(x[1]),2) == round(float(i[1]),2):
zc = xc + 1
found = 1
elev = self.dataframeEDIT.iloc[i[0],zc]
elif found == 1:
self.electrodeElevs.append(float(elev))
else:
wx.LogError("No Topography Data Found")
def onExportBrowse(self, event):
with wx.FileDialog(self, "Select Export Filepath", style=wx.FD_OPEN | wx.FD_FILE_MUST_EXIST) as fileDialog:
if fileDialog.ShowModal() == wx.ID_CANCEL:
return
self.exportPathname = pathlib.Path(fileDialog.GetPath())
try:
with open(self.exportPathname, 'r') as exportFile:
path = exportFile.name
self.exportTXT.SetValue(path)
except IOError:
wx.LogError("Cannot Open File")
def onExport(self, event):
dataDF,keepLength = self.createExportDF()
dataLeadDF = pd.DataFrame()
dataTailDF = pd.DataFrame()
#Create Data Lead
dataLeadDF[0] = self.dataLead
if self.inputDataExt == '.DAT (SAS)':
dataLeadList = []
dataLeadList.append(self.dataLead[0][0])
dataLeadList.append(self.dataLead[1][0])
dataLeadList.append(11)
dataLeadList.append(self.dataLead[2][0])
dataLeadList.append('Type of measurement (0=app.resistivity,1=resistance)')
dataLeadList.append(0)
dataLeadList.append(keepLength)
dataLeadList.append(2)
dataLeadList.append(0)
dataLeadDF = | pd.DataFrame(dataLeadList) | pandas.DataFrame |
# Copyright 2019 Systems & Technology Research, LLC
# Use of this software is governed by the license.txt file.
#!/usr/bin/env python3
import os
import glob
import dill as pickle
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
import pdb
import itertools
from xfr import inpainting_game as inpaintgame
import warnings
import imageio
import skimage
from collections import defaultdict
from xfr import show
from xfr import utils
from xfr.utils import create_net
import pandas as pd
from skimage.transform import resize
import argparse
from collections import OrderedDict
import re
mpl.rcParams.update({'font.size':22})
mpl.use('agg')
import xfr
from xfr import inpaintgame2_dir
from xfr import xfr_root
from xfr import inpaintgame_saliencymaps_dir
try:
from pathlib import Path
except ImportError:
from pathlib2 import Path # python 2 backport
regions = OrderedDict([
('jaw+cheek', (['chin', 'jawline', 'cheek'], {
'faceside': 'symmetric',
'dilate_radius': 3,
})),
('mouth', (['lips'], {
'faceside': 'symmetric',
'dilate_radius': 9,
})),
('nose', (['nasal base', 'nasal tip', 'nasal body'], {
'faceside': 'symmetric',
'dilate_radius': 9,
})),
('ear', (['ear'], {
'faceside': 'symmetric',
'dilate_radius': 15,
})),
('eye', (['eye'], {
'faceside': 'symmetric',
'dilate_radius': 5,
})),
('eyebrow', (['eyebrow'], {
'faceside': 'symmetric',
'dilate_radius': 5,
})),
# split by side of face
('left-face', (['eye', 'eyebrow', 'cheek', 'jawline'], {
'faceside': 'left',
'dilate_radius': 9,
})),
('right-face', (['eye', 'eyebrow', 'cheek', 'jawline'], {
'faceside': 'right',
'dilate_radius': 9,
})),
# split left and right face
('left-eye', (['eye'], {
'faceside': 'left',
'dilate_radius': 5,
})),
('right-eye', (['eye'], {
'faceside': 'right',
'dilate_radius': 5,
})),
])
regions_human_labels = {
0: 'Jaw+Cheek',
1: 'Mouth',
2: 'Nose',
3: 'Ears', # excluded
4: 'Eyes', # replaced with L/R Eye
5: 'Eyebrows',
6: 'Left face',
7: 'Right face',
8: 'Left eye',
9: 'Right eye',
167: 'L/R Face',
189: 'L/R Eye',
}
def overlap_mask(smap, img, gt_mask, pred_mask):
rgb = img / max(0.0001, img.max()) * 0.4
rgb[gt_mask] = np.array([0.6, 0.6, 0.6])
rgb[pred_mask & gt_mask] = np.array([0,1,0]) # True Pos
rgb[pred_mask & np.invert(gt_mask)] = np.array([1,0,0]) # False Pos
return rgb
def make_inpaintinggame_plots(net_dict, params, human_net_labels):
""" Runs inpainting analysis and generates plots.
net_dict should be a dictionary of networks. If a network doesn't
exist in net_dict, code will try to create it with create_net.
"""
if params['threshold_type'] == 'mass-threshold':
hgame_thresholds = np.append(np.arange(2e-3, 0, -5e-6), 0)
hgame_percentile = None
elif (params['threshold_type'] == 'percent' or
params['threshold_type'] == 'percent-pixels'):
params['threshold_type'] = 'percent-pixels'
hgame_thresholds = None
hgame_percentile = np.unique(np.sort(np.append(
100*np.exp(-np.arange(0,15,0.1)),
[0,100])))
elif params['threshold_type'] == 'percent-density': # <-- Standard
hgame_thresholds = None
hgame_percentile = np.unique(np.sort(np.append(
np.arange(0,100,1),
[0,100])))
else:
raise RuntimeError('Unknown threshold type %s '
'(try mass-threshold or percent)' %
params['threshold_type'])
# ----------------- Step 1. run analysis
nonmate_classification, inpainting_v2_data = (
run_inpaintinggame_analysis(hgame_thresholds, hgame_percentile,
params=params, net_dict=net_dict))
nonmate_classification['ORIG_MASK_ID'] = nonmate_classification['MASK_ID']
# - - - - - - Combined asymetric masks
for base_net, net_inp in inpainting_v2_data.groupby('NET'):
counts = {}
for mask_id, msk_grp in net_inp.groupby(['MASK_ID']):
counts[mask_id] = len(msk_grp.loc[net_inp['TRIPLET_SET'] == 'PROBE'])
net_data = nonmate_classification.loc[
nonmate_classification['NET'] == base_net]
for left, right in [
(6, 7),
(8, 9),
]:
nonmate_classification.loc[
(nonmate_classification['NET'] == base_net) & (
(nonmate_classification['MASK_ID']==left) |
(nonmate_classification['MASK_ID']==right)),
'MASK_ID'
] = (100 + 10*left + right)
# ----------------- Step 2. generate plots
generate_plots(nonmate_classification, hgame_thresholds, hgame_percentile,
params, human_net_labels)
for base_net, net_inp in inpainting_v2_data.groupby('NET'):
print('\n%s has %d inpainted triplet examples from %d subjects.' % (
base_net,
len(net_inp.loc[net_inp['TRIPLET_SET'] == 'PROBE']),
# len(net_inp['InpaintingFile'].unique()),
len(net_inp['SUBJECT_ID'].unique()),
))
for mask_id, msk_grp in net_inp.groupby(['MASK_ID']):
print('\tmask %d contains %d images from %d subjects.' % (
mask_id,
len(msk_grp.loc[net_inp['TRIPLET_SET'] == 'PROBE']),
# len(msk_grp['InpaintingFile'].unique()),
len(msk_grp['SUBJECT_ID'].unique()),
))
del msk_grp
output_dir = params['output_dir']
if params['output_subdir'] is not None:
output_subdir = os.path.join(output_dir, params['output_subdir'])
output_dir = output_subdir
numTriplets = defaultdict(dict)
for (base_net, method), method_data in nonmate_classification.groupby(['NET', 'METHOD']):
print('\n%s + %s has %d inpainted triplet examples from %d subjects.' % (
base_net,
method,
len(method_data),
len(method_data['SUBJECT_ID'].unique()),
))
for mask_id, msk_grp in method_data.groupby(['MASK_ID']):
print('\tmask %d contains %d examples from %d subjects.' % (
mask_id,
len(msk_grp),
len(msk_grp['SUBJECT_ID'].unique()),
))
# assume all methods have the same number of triplets for a network
numTriplets[base_net][mask_id] = len(msk_grp)
del msk_grp
for base_net, numTripletsMask in numTriplets.items():
fig_ds, ax_ds = plt.subplots(1,1, figsize=(6,4), squeeze=True)
x = np.array([0, 1, 2, 3, 5, 4]) # np.arange(len(numTripletsMask))
ax_ds.bar(x, numTripletsMask.values())
ax_ds.set_xticks(x)
ax_ds.set_xticklabels([regions_human_labels[key] for key in tuple(numTripletsMask.keys())], rotation=50)
fig_ds.subplots_adjust(top=1, bottom=0.5, left=0.2, right=0.98)
show.savefig('datasets-stats-%s.png' % base_net, fig_ds, output_dir=output_dir)
# - - - - - - Generate maskoverlaps
smap_root = (
'%s{SUFFIX_AGGR}/' %
params['smap_root']
)
smap_pattern = os.path.join(
smap_root,
'{NET}/subject_ID_{SUBJECT_ID}/{ORIGINAL_BASENAME}/inpainted/{ORIG_MASK_ID:05d}-{METHOD}-saliency.npz'
)
orig_pattern = os.path.join(
inpaintgame2_dir,
'aligned/{SUBJECT_ID}/{ORIGINAL_BASENAME}/inpainted/{ORIG_MASK_ID:05d}_truth.png'
)
mask_pattern = os.path.join(
inpaintgame2_dir,
'aligned/{SUBJECT_ID}/{ORIGINAL_BASENAME}/masks/{ORIG_MASK_ID:05d}.png'
)
for keys, grp in nonmate_classification.groupby(['NET', 'MASK_ID', 'METHOD']):
for row_num, (idx, row) in enumerate(grp.iterrows()):
if row['CLS_AS_TWIN'][-1] != 1:
# must be correctly classified at the end
# continue
stable_correct = len(row['CLS_AS_TWIN']) - 1
first_correct = len(row['CLS_AS_TWIN']) - 1
else:
stable_correct = np.max(np.where(row['CLS_AS_TWIN'] == 0)[0]) + 1
first_correct = np.min(np.where(row['CLS_AS_TWIN'] == 1)[0])
if row_num >= 40:
break
num_thresh_pixels_stable = (row['TRUE_POS'] +
row['FALSE_POS'])[stable_correct]
num_thresh_pixels_first = (row['TRUE_POS'] +
row['FALSE_POS'])[first_correct]
smap = np.load(smap_pattern.format(**row), allow_pickle=True)['saliency_map']
img = imageio.imread(orig_pattern.format(**row))
img = utils.center_crop(img, convert_uint8=False)
gt_mask = imageio.imread(mask_pattern.format(**row))
gt_mask = gt_mask.astype(bool)
smap_sorted = np.sort(smap.flat)[::-1]
threshold_first = smap_sorted[num_thresh_pixels_first]
# threshold_stable = smap_sorted[num_thresh_pixels_stable]
threshold_first = smap_sorted[num_thresh_pixels_first]
# top_smap_stable = smap > threshold_stable
top_smap_first = smap > threshold_first
if np.any(np.isnan(smap)):
import pdb
pdb.set_trace()
rgb = overlap_mask(smap, img, gt_mask, top_smap_first)
fpath = os.path.join(
output_dir,
keys[0],
'mask-%d' % row['MASK_ID'],
row['METHOD'],
'%s-%d-idflip.png' % (
row['ORIGINAL_BASENAME'].replace('/', '-'),
row['ORIG_MASK_ID']
))
Path(os.path.dirname(fpath)).mkdir(exist_ok=True, parents=True)
imageio.imwrite(fpath, (rgb*255).astype(np.uint8))
# if params['threshold_type'] == 'percent-density':
# inpainted_probe = utils.center_crop(
# imageio.imread(row['InpaintingFile']),
# convert_uint8=False)
# percentiles = []
# cls = []
# fprs = [0, 0.01, 0.05, 0.10]
# for fpr in fprs:
# closest = np.argmin(np.abs(fpr * row['NEG'] - row['FALSE_POS']))
# percentiles.append(hgame_percentile[closest])
# cls.append(row['CLS_AS_TWIN'][closest])
# fpr_masks = inpaintgame.create_threshold_masks(
# smap,
# threshold_method=params['threshold_type'],
# percentiles=np.array(percentiles),
# thresholds=None,
# seed=params['seed'],
# include_zero_elements=params['include_zero_saliency'],
# blur_sigma=params['mask_blur_sigma'],
# )
# for fpr_msk, fpr in zip(fpr_masks, fprs):
# rgb = overlap_mask(smap, 0*img, gt_mask, fpr_msk)
# fpath = os.path.join(
# output_dir,
# keys[0],
# 'mask-%d' % row['MASK_ID'],
# row['METHOD'],
# '%s-%d-at-%dfpr.png' % (
# row['ORIGINAL_BASENAME'].replace('/', '-'),
# row['ORIG_MASK_ID'],
# int(np.round(fpr * 100)),
# ))
# imageio.imwrite(fpath, (rgb*255).astype(np.uint8))
# fpath = os.path.join(
# output_dir,
# keys[0],
# 'mask-%d' % row['MASK_ID'],
# row['METHOD'],
# '%s-%d-at-%dfpr-mask.png' % (
# row['ORIGINAL_BASENAME'].replace('/', '-'),
# row['ORIG_MASK_ID'],
# int(np.round(fpr * 100)),
# ))
# imageio.imwrite(fpath, (fpr_msk*255).astype(np.uint8))
# blended_probe = img.copy()
# blended_probe[fpr_msk] = inpainted_probe[fpr_msk]
# fpath = os.path.join(
# output_dir,
# keys[0],
# 'mask-%d' % row['MASK_ID'],
# row['METHOD'],
# '%s-%d-at-%dfpr-blended.png' % (
# row['ORIGINAL_BASENAME'].replace('/', '-'),
# row['ORIG_MASK_ID'],
# int(np.round(fpr * 100)),
# ))
# imageio.imwrite(fpath, (blended_probe).astype(np.uint8))
# output_dir = os.path.join(
# subj_dir,
# 'sandbox/jwilliford/generated/Note_20191211_InpaintingGame2_Result_Plots')
def skip_combination(net, method, suffix_aggr):
if net=='vgg' and (
method == 'tlEBPreluLayer'
or method == 'tlEBPposReflect'
or method == 'tlEBPnegReflect'
or method == 'meanEBP_VGG' # already included
# or method == 'ctscEBP'
):
return True
return False
human_labels_all = [
('diffOrigInpaint', 'Groundtruth'),
('inpaintingMask', 'Groundtruth - Inpainting Mask'),
('diffOrigInpaintEBP', 'Groundtruth via EBP'),
('diffOrigInpaintCEBP_median', 'cEBP Groundtruth (median)'),
('diffOrigInpaintCEBP_negW', 'cEBP Groundtruth (negW)'),
('meanEBP', 'Mean EBP'),
('tlEBP', 'Whitebox Triplet EBP'),
('tscEBP', 'Whitebox Triplet Similarity Contribution EBP'),
('ctscEBP', 'Whitebox Contrastive Triplet Similarity Contribution EBP'),
('ctscEBPv3', 'Whitebox Contrastive Triplet Similarity Contribution EBP v3'),
('ctscEBPv4',
'Whitebox Triplet Contribution CEBP v4',
'Whitebox Triplet Contribution CEBP',
),
('tsv2EBP', 'Whitebox Triplet Similarity (V2) EBP'),
('tsignEBP', 'Whitebox Triplet Sign EBP'),
('tsignCEBP', 'Whitebox Triplet Sign Contrastive EBP'),
('tsimCEBPv3',
'Whitebox Triplet Contrastive EBP v3',
'Whitebox Triplet CEBP',
),
('tsimPEBPv3',
'Whitebox Triplet EBP v3',
'Whitebox Triplet EBP',
),
('tsimCEBPv3unionSubtract',
'Whitebox Triplet Contrastive EBP (v3 union-sub)',
),
('tsimCEBPv3cross',
'Whitebox Triplet CEBP (v3 cross)',
'Whitebox Triplet CEBP (cross)',
),
('tsimCEBPv3.1', 'Whitebox Triplet Similarity Contrastive EBP (v3.1)'),
('tlEBPreluLayer', 'Whitebox Triplet EBP (from ReLU)'),
('tlEBPnegReflect', 'Whitebox Triplet EBP (neg reflect)'),
('tlEBPposReflect', 'Whitebox Triplet EBP (pos reflect)'),
('final', 'Blackbox Contrastive Triplet Similarity (2 elem)'),
('bbox-rise', 'DISE'),
('wb-rise', 'Whitebox PartialConv RISE'),
# ('pytorch-bb-rise', 'Blackbox RISE (PyTorch Implementation)'),
('pytorch-bb-bmay2rise', 'Blackbox Contrastive Triplet'),
('bb-bmay2rise', 'Blackbox RISE'),
('meanEBP_VGG', 'VGG Mean EBP'),
('meanEBP_ResNet', 'ResNet Mean EBP (Caffe)'),
('weighted_subtree_triplet_ebp', 'Subtree EBP'),
('contrastive_triplet_ebp', 'Contrastive EBP'),
('trunc_contrastive_triplet_ebp', 'Truncated cEBP'),
]
def get_base_methods(methods):
base_methods = [meth.split('_scale_')[0] for meth in methods]
base_methods = [meth.split('_trunc')[0] for meth in base_methods]
base_methods = [meth.split('-1elem_')[0] for meth in base_methods]
base_methods = [meth.split('-2elem_')[0] for meth in base_methods]
base_methods = [meth.split('-4elem_')[0] for meth in base_methods]
base_methods = [meth.split('_reluLayer')[0] for meth in base_methods]
base_methods = [meth.split('_mode')[0] for meth in base_methods]
base_methods = [meth.split('_v')[0] for meth in base_methods]
return base_methods
def get_method_labels(methods, lookup):
base_methods = get_base_methods(methods)
labels = []
for base_method in base_methods:
try:
labels.append(lookup[base_method])
except KeyError:
labels.append(base_method)
return labels
def backupMethods(method, inpainted_region, orig_imT, inp_imT, error):
""" If method could not be found, try to see if it is of known type.
Otherwise, throw passed error.
"""
if method == 'diffOrigInpaint':
smap = np.sum(np.abs(orig_imT - inp_imT), axis=0)
smap_blur = skimage.filters.gaussian(smap, 0.02 * max(smap.shape[:2]))
smap_blur[smap==0] = 0
smap = smap_blur
# smap -= smap.min()
smap /= smap.sum()
elif method.split('+')[0] == 'inpaintingMask':
smap0 = np.mean(np.abs(orig_imT - inp_imT), axis=0)
smap = inpainted_region.astype(np.float)
smap = np.maximum(smap, smap0).astype(bool).astype(np.float)
smap = skimage.filters.gaussian(smap, 0.02 * max(smap.shape[:2]))
if method == 'inpaintingMask+noise':
noise = np.random.randn(*smap.shape) * 0.5
# smap = np.maximum(smap + noise, 0)
smap = np.abs(smap + noise)
# smap -= smap.min()
smap /= smap.sum()
else:
raise error
return smap
human_net_labels_ = OrderedDict([
('vgg', 'VGG'),
('resnet', 'ResNet'),
('resnet_pytorch', 'ResNet (PyTorch)'),
('resnetv4_pytorch', 'ResNet v4'),
('resnetv6_pytorch', 'ResNet v6'),
('resnet+compat-orig', 'ResNet Fix Orig'),
('resnet+compat-scale1', 'ResNet Fix V2'),]
)
def tickformatter(x, pos):
if float.is_integer(x):
return '%d%%' % x
else:
return ''
# Classified As Nonmate
def config_axis(ax, leftmost=True):
if leftmost:
ax.set(
ylabel='Probability Non-mate',
)
ax.set(
xlabel='Top % of Salience Map - Replaced with Inpainted Twin',
xscale='symlog',
# yscale='symlog',
)
ax.grid(which='both', linestyle=':')
ax.xaxis.set_major_formatter(plt.FuncFormatter(tickformatter))
# ax.yaxis.set_major_formatter(plt.FuncFormatter(tickformatter))
def config_axis_iou(ax, leftmost=True):
if leftmost:
ax.set(
ylabel='IOU with Groundtruth',
)
ax.set(xlabel='Top % of Salience Map - Replaced with Inpainted Twin',
xscale='symlog',
# yscale='symlog',
)
ax.grid(which='both', linestyle=':')
ax.xaxis.set_major_formatter(plt.FuncFormatter(tickformatter))
# ax.yaxis.set_major_formatter(plt.FuncFormatter(tickformatter))
def avg_class_prob(grp, classifyCol, balance_masks):
if balance_masks:
prob_nonmates_mask = dict()
for mask_id, mask_grp in grp.groupby('MASK_ID'):
prob_nonmates_mask[mask_id] = np.stack(
mask_grp[classifyCol].values.tolist()).mean(axis=0)
cls_as_nonmate = np.stack([*prob_nonmates_mask.values()]).mean(axis=0)
else:
cls_as_nonmate = np.stack(grp[classifyCol].values).mean(axis=0)
# cls_as_nonmate = np.minimum(1, np.maximum(0, cls_as_nonmate))
return cls_as_nonmate
def plot_roc_curve(ax, grp, hnet, label,
method_idx, balance_masks, leftmost=True, classifyCol='CLS_AS_TWIN'):
cls_as_nonmate = avg_class_prob(grp, classifyCol, balance_masks)
# cls_as_nonmate = np.minimum(1, np.maximum(0, cls_as_nonmate))
fpos = np.stack(grp['FALSE_POS'].values).sum(axis=0)
neg = np.stack(grp['NEG'].values).sum()
fpr = fpos.astype(np.float64) / neg
tpos = np.stack(grp['TRUE_POS'].values).sum(axis=0)
pos = np.stack(grp['POS'].values).sum()
tpr = tpos.astype(np.float64) / pos
ax.plot(100*fpr,
100*tpr,
color='C%d' % (method_idx + 1),
label=label,
)
if hnet is not None:
ax.set_title(hnet)
if leftmost:
# ax.set(
# ylabel='Probability Non-mate',
# )
ax.set(
ylabel='True Positive Rate\n(Sensitivity)',
)
ax.set(
xlabel='False Positive Rate\n(1-Specificity)',
)
ax.grid(which='both', linestyle=':')
ax.xaxis.set_major_formatter(plt.FuncFormatter(tickformatter))
ax.yaxis.set_major_formatter(plt.FuncFormatter(tickformatter))
ax.legend() # loc='upper center', bbox_to_anchor=(0.5, -0.1))
def plot_cls_vs_fpr(ax, grp, hnet, label,
method_idx,
balance_masks,
leftmost=True, classifyCol='CLS_AS_TWIN'):
cls_as_nonmate = avg_class_prob(grp, classifyCol, balance_masks)
fpos = np.stack(grp['FALSE_POS'].values.tolist()).sum(axis=0)
neg = np.stack(grp['NEG'].values.tolist()).sum()
fpr = fpos.astype(np.float64) / neg
cls_at_fpr = dict()
for target in [1e-2, 5e-2]:
fpr_inds = np.argsort(np.abs(fpr - target))[:2]
closest_fprs = fpr[fpr_inds]
dists = np.abs(closest_fprs - target)
# linearly interpolate
w = 1/(dists+1e-9)
w = w / np.sum(w)
cls_at_fpr[target] = np.sum(w*cls_as_nonmate[fpr_inds])
line, = ax.plot(100*fpr,
100*cls_as_nonmate,
color='C%d' % (method_idx + 1),
# linestyle='--' if scale == 8 else '-',
label=label,
# linestyle='-' if ni==0 else '--',
linewidth=2,
)
if hnet is not None:
ax.set_title(hnet)
# config_axis(ax, leftmost=leftmost)
if leftmost:
# ax.set(
# ylabel='Probability Non-mate',
# )
ax.set(
ylabel='Classified as Inpainted Non-mate',
)
ax.set(
xscale='symlog',
xlabel='False Alarm Rate',
xlim=(0,100),
)
ax.grid(which='both', linestyle=':')
ax.xaxis.set_major_formatter(plt.FuncFormatter(tickformatter))
ax.yaxis.set_major_formatter(plt.FuncFormatter(tickformatter))
ax.legend() # loc='upper center', bbox_to_anchor=(0.5, -0.1))
return line, cls_at_fpr
def method_label_and_idx(method, methods, human_net_labels, net=None):
if net is not None:
try:
short_hnet = human_net_labels[net].split(' ')[0] + ' '
except KeyError:
short_hnet = net
warnings.warn('Net %s does not have entry in human_net_labels.' %
net
)
else:
short_hnet = ''
base_methods = get_base_methods(methods)
human_labels = [(tup[0], tup[1], tup[1] if len(tup)==2 else tup[2])
for tup in human_labels_all if
tup[0] in methods or
tup[0] in base_methods
]
human_labels_simplified = [
(key, slabel) for key, _, slabel in human_labels
]
human_labels = [(key, label) for key, label, _ in human_labels]
human_labels_lookup = OrderedDict(human_labels)
human_labels_simp_lookup = OrderedDict(human_labels_simplified)
try:
method_idx = np.where([
lbl == method for lbl in methods])[0][0]
label = get_method_labels([method], human_labels_lookup)[0]
slabel = get_method_labels([method], human_labels_simp_lookup)[0]
paren_strs = []
sparen_strs = []
try:
mat = re.search('pytorch-', method)
if mat is not None:
paren_strs.append('PyTorch/WIP')
sparen_strs.append('PyTorch/WIP')
except AttributeError:
pass
scale = None
nelems = None
try:
scale = re.search('_scale_([0-9+]*[0-9])', method).group(1)
if scale != '12':
paren_strs.append('Scale ' + scale)
sparen_strs.append('Scale ' + scale)
except AttributeError:
pass
try:
nelems = re.search('-([0-9]+)elem', method).group(1)
if int(nelems) > 1:
paren_strs.append(nelems + ' Elems')
except AttributeError:
pass
fill = None
blur_sigma = None
try:
mat = re.search('_(blur)=([0-9]+)', method)
if mat is not None:
fill = mat.group(1)
blur_sigma = mat.group(2)
paren_strs.append('Blur fill')
# sparen_strs.append('Blur fill')
if blur_sigma != '4':
paren_strs.append('Sigma ' + blur_sigma + '%')
# sparen_strs.append('Sigma ' + blur_sigma + '%')
mat = re.search('_(gray)', method)
if mat is not None:
fill = mat.group(1)
paren_strs.append('Gray fill')
sparen_strs.append('Gray fill')
mat = re.search('_(partialconv)_', method)
if mat is not None:
fill = mat.group(1)
paren_strs.append('Partial conv')
sparen_strs.append('Partial conv')
except AttributeError:
pass
try:
mat = re.search('_reluLayer', method)
if mat is not None:
paren_strs.append('ReLU')
except AttributeError:
pass
try:
topk = int(re.search('_top([0-9]+)', method).group(1))
paren_strs.append('Top %d' % topk)
# sparen_strs.append('Top %d' % topk)
except AttributeError:
pass
try:
ver = int(re.search('_v([0-9]+)', method).group(1))
paren_strs.append('V%d' % ver)
# sparen_strs.append('V%d' % ver)
except AttributeError:
pass
try:
pct_thresh = int(re.search('_pct([0-9]+)', method).group(1))
paren_strs.append('Thresh %d%%' % pct_thresh)
# sparen_strs.append('Threshold %d%%' % pct_thresh)
except AttributeError:
pass
try:
trunc = re.search('_trunc([0-9]+)', method).group(1)
paren_strs.append('Trunc ' + trunc + '% Pos')
sparen_strs.append('Truncated')
except AttributeError:
pass
if len(paren_strs) > 0:
label = '%s (%s)' % (label, ', '.join(paren_strs))
if len(sparen_strs) > 0:
slabel = '%s (%s)' % (slabel, ', '.join(sparen_strs))
# except (IndexError, KeyError) as e:
except KeyError as e:
# This is actually handled in get_method_labels now.
# try:
# # method did not exist ...
# meth, scale = method.split('_scale_')
# method_idx = np.where([
# lbl == meth for lbl in human_labels_lookup.keys()])[0][0]
# scale_suffix = ' (%s)' % scale
# label = human_labels_lookup[meth].format(NET=short_hnet) + scale_suffix
# slabel = human_labels_simp_lookup[meth].format(NET=short_hnet) + scale_suffix
# except ValueError, KeyError:
# fallback
label = method
slabel = method
assert method_idx < 10 # not supported by cmap used
return label, method_idx, slabel
def run_inpaintinggame_analysis(hgame_thresholds, hgame_percentile, params,
net_dict,
):
output_dir = params['output_dir']
cache_dir = params['cache_dir']
try:
Path(cache_dir).mkdir(exist_ok=True)
except PermissionError:
raise PermissionError('[Errno 13]: permission denied: \'%s\'! Please specify '
'\'--cache-dir\' parameter!')
params['SUFFIX_AGGR'] = ['']
reprocess = params['reprocess']
seed = params['seed']
if params['output_subdir'] is not None:
output_subdir = os.path.join(output_dir, params['output_subdir'])
output_dir = output_subdir
# os.environ['PWEAVE_OUTPUT_DIR'] = output_dir
# os.makedirs(output_dir, exist_ok=True)
Path(output_dir).mkdir(exist_ok=True, parents=True)
# csv_output_dir = os.path.join(
# output_dir,
# 'csv-data')
# Path(csv_output_dir).mkdir(exist_ok=True)
# include mates and non-mates
# smap_root = (
# '%s{SUFFIX_AGGR}/' %
# inpaintgame_saliencymaps_dir
# )
smap_root = (
'%s{SUFFIX_AGGR}/' %
params['smap_root']
)
inpainting_v2_data = dict([(net, pd.read_csv(os.path.join(
inpaintgame2_dir,
# 'filtered_masks.csv'))
'filtered_masks_threshold-{NET}.csv'.format(NET=net))))
for net in params['NET']]) # 'vgg', 'resnet']])
# '8/img/240/masks/000.png'
subj_csv_pattern = os.path.join(
inpaintgame2_dir,
'subj-{SUBJECT_ID:d}.csv'
)
subj_csv_glob = os.path.join(
inpaintgame2_dir,
'subj-*.csv'
)
smap_pattern = os.path.join(
smap_root,
# '{NET}/subject_ID_{SUBJECT_ID}/{ORIGINAL_BASENAME}/inpainted/{MASK_ID}-{METHOD}-saliency.npz'
'{NET}/subject_ID_{SUBJECT_ID}/{ORIGINAL_BASENAME}/inpainted/{MASK_ID:05d}-{METHOD}-saliency.npz'
)
orig_pattern = os.path.join(
inpaintgame2_dir,
'aligned/{SUBJECT_ID}/{ORIGINAL_BASENAME}/inpainted/{MASK_ID:05d}_truth.png'
)
mask_pattern = os.path.join(
inpaintgame2_dir,
'aligned/{SUBJECT_ID}/{ORIGINAL_BASENAME}/masks/{MASK_ID:05d}.png'
)
for net in inpainting_v2_data.keys():
inpainting_v2_data[net]['OriginalFile'] = [
orig_pattern.format(**row) for _, row in inpainting_v2_data[net].iterrows()
]
inpainting_v2_data[net]['NET'] = net
all_subj_data = []
if params['SUBJECT_ID'] is None:
for subj_csv in glob.glob(subj_csv_glob):
all_subj_data.append(
pd.read_csv(subj_csv))
else:
for subject_id in params['SUBJECT_ID']:
all_subj_data.append(
pd.read_csv(subj_csv_pattern.format(SUBJECT_ID=subject_id)))
all_subj_data = pd.concat(all_subj_data)
if params['SUBJECT_ID'] is None:
params['SUBJECT_ID'] = all_subj_data['SUBJECT_ID'].unique().tolist()
all_subj_data['ORIGINAL_BASENAME'] = [
os.path.splitext(fn)[0] for fn in all_subj_data['ORIGINAL_FILE'].values]
def get_base_net(net):
return net.split('+')[0]
nonmate_cache_fns = set()
for net in params['NET']:
base_net = get_base_net(net)
net_inp = inpainting_v2_data[base_net]
print('\n%s has %d inpainted triplet examples from %d subjects.' % (
base_net,
len(net_inp.loc[net_inp['TRIPLET_SET'] == 'PROBE']),
# len(net_inp['InpaintingFile'].unique()),
len(net_inp['SUBJECT_ID'].unique()),
))
for mask_id, msk_grp in net_inp.groupby(['MASK_ID']):
print('\tmask %d contains %d images from %d subjects.' % (
mask_id,
len(msk_grp.loc[net_inp['TRIPLET_SET'] == 'PROBE']),
# len(msk_grp['InpaintingFile'].unique()),
len(msk_grp['SUBJECT_ID'].unique()),
))
del msk_grp
combined_inpaintings = pd.concat(
inpainting_v2_data.values(),
ignore_index=True,
)
net_inp = combined_inpaintings
print('\nCombined nets have %d inpainted triplet examples from %d subjects.' % (
len(net_inp.loc[net_inp['TRIPLET_SET'] == 'PROBE']),
# len(net_inp['InpaintingFile'].unique()),
len(net_inp['SUBJECT_ID'].unique()),
))
for mask_id, msk_grp in net_inp.groupby(['MASK_ID']):
print('\tmask %d contains %d images from %d subjects.' % (
mask_id,
len(msk_grp['InpaintingFile'].unique()),
len(msk_grp['SUBJECT_ID'].unique()),
))
print(combined_inpaintings.columns)
inpainting_v2_data = combined_inpaintings
snet = None
classified_as_nonmate = [] # using operational threshold
for net_name in params['NET']:
base_net = get_base_net(net_name)
subjs_net_inp = inpainting_v2_data.loc[
(inpainting_v2_data['NET'] == base_net) &
(inpainting_v2_data['SUBJECT_ID'].isin(params['SUBJECT_ID']))]
if params['IMG_BASENAME'] is not None:
subjs_net_inp = subjs_net_inp.loc[
(subjs_net_inp['ORIGINAL_BASENAME'].isin(params['IMG_BASENAME'])) |
(subjs_net_inp['TRIPLET_SET'] == 'REF')
]
for (subject_id, mask_id), ip2grp in subjs_net_inp.groupby(
['SUBJECT_ID', 'MASK_ID']
):
if mask_id not in params['MASK_ID']:
continue
ebp_version = None # should need to be set, don't calculate EBP
if snet is None or snet.net_name != net_name:
snet = create_net(net_name, ebp_version=ebp_version,
net_dict=net_dict)
snet.net_name = net_name
ip2ref = ip2grp.loc[ip2grp['TRIPLET_SET']=='REF']
mate_embeds = snet.embeddings(
[os.path.join(inpaintgame2_dir, fn) for fn in ip2ref['OriginalFile']]
)
mate_embeds = mate_embeds / np.linalg.norm(mate_embeds, axis=1, keepdims=True)
original_gal_embed = mate_embeds.mean(axis=0, keepdims=True)
original_gal_embed = original_gal_embed / np.linalg.norm(original_gal_embed, axis=1, keepdims=True)
nonmate_embeds = snet.embeddings(
[os.path.join(inpaintgame2_dir, fn) for fn in ip2ref['InpaintingFile']]
)
nonmate_embeds = nonmate_embeds / np.linalg.norm(nonmate_embeds, axis=1, keepdims=True)
inpaint_gal_embed = nonmate_embeds.mean(axis=0, keepdims=True)
inpaint_gal_embed = inpaint_gal_embed / np.linalg.norm(inpaint_gal_embed, axis=1, keepdims=True)
# probes need to be combined with inpainted versions
ip2probe = ip2grp.loc[ip2grp['TRIPLET_SET']=='PROBE']
original_imITF = snet.preprocess_loader(
[os.path.join(inpaintgame2_dir, fn) for fn in ip2probe['OriginalFile']]
)
inpaint_imITF = snet.preprocess_loader(
[os.path.join(inpaintgame2_dir, fn) for fn in ip2probe['InpaintingFile']]
)
for (
(idx, row),
(orig_im, orig_imT,
orig_fn),
(inp_im, inp_imT,
inp_fn)) in zip(
ip2probe.iterrows(),
original_imITF,
inpaint_imITF,
):
try:
orig_imT = orig_imT.cpu().numpy()
inp_imT = inp_imT.cpu().numpy()
except AttributeError:
# for caffe
pass
for method, suffix_aggr in itertools.product(
params['METHOD'],
params['SUFFIX_AGGR'],
):
# print('Net %s, Subj %d, Mask %d, Method %s' % (net, subject_id, mask_id, method))
if skip_combination(net=net, method=method, suffix_aggr=suffix_aggr):
continue
def calc_twin_cls():
d = row.to_dict()
d['METHOD'] = method
if method == 'meanEBP_VGG':
d['NET'] = 'vgg'
d['METHOD'] = method.split('_')[0]
elif method == 'meanEBP_ResNet':
d['NET'] = 'resnet+compat-scale1'
d['METHOD'] = method.split('_')[0]
d['SUFFIX_AGGR'] = suffix_aggr
# if (
# method.startswith('wb-rise') or
# method.startswith('pytorch-')
# ):
# # wb-rise doesn't need the ebp fixes
# d['NET'] = base_net
# else:
# d['NET'] = net
smap_filename = smap_pattern.format(**d)
try:
if method.split('+')[0] == 'inpaintingMask':
raise IOError
smap = np.load(smap_filename)['saliency_map']
except IOError as e:
mask_filename = mask_pattern.format(**d)
inpainted_region = imageio.imread(mask_filename)
smap = backupMethods(method, inpainted_region,
orig_imT, inp_imT, e)
np.savez_compressed(smap_filename, saliency_map=smap)
smap = resize(smap, orig_imT.shape[1:], order=0) # interp='nearest')
smap /= smap.sum()
print(smap.max())
nonmate_embed = snet.embeddings([inp_fn])
# cls, pg_dist, pr_dist = inpaintgame.inpainting_twin_game_percent_twin_classified(
cls, pg_dist, pr_dist = (
inpaintgame.classified_as_inpainted_twin(
snet,
orig_imT,
inp_imT,
original_gal_embed,
inpaint_gal_embed,
smap,
mask_threshold_method=params['threshold_type'],
thresholds=hgame_thresholds,
percentiles=hgame_percentile,
seed=seed,
include_zero_elements=params['include_zero_saliency'],
mask_blur_sigma=params['mask_blur_sigma'],
))
return cls, pg_dist, pr_dist
if params['threshold_type'] == 'percent-density':
threshold_method_slug = 'pct-density%d' % (
len(hgame_percentile))
elif hgame_thresholds is not None:
threshold_method_slug='Thresh%d' % len(hgame_thresholds)
else:
threshold_method_slug='Percentile%d' % len(hgame_percentile),
cache_fn = (
# 'inpainted-id-hiding-game-twin-cls'
'inpainted-id-hiding-game-twin-cls-dists'
# operational-thresh{THRESH:0.4f}'
'-{SUBJECT_ID}-{MASK_ID}-{ORIGINAL_BASENAME}-0'
'-{NET}-{METHOD}{SUFFIX_AGGR}{SEED}-RetProb_'
'MskBlur{MASK_BLUR_SIGMA}-'
'{THRESHOLDS}{ZERO_SALIENCY_SUFFIX}'
).format(
SUBJECT_ID=subject_id,
ORIGINAL_BASENAME=row['ORIGINAL_BASENAME'],
METHOD=method,
NET=net,
THRESH=snet.match_threshold,
SUFFIX_AGGR=suffix_aggr,
SEED='' if seed is None else '-Seed%d' % seed,
MASK_ID=mask_id,
THRESHOLDS=threshold_method_slug,
ZERO_SALIENCY_SUFFIX='ExcludeZeroSaliency' if
not params['include_zero_saliency'] else '',
MASK_BLUR_SIGMA=params['mask_blur_sigma'],
)
assert cache_fn not in nonmate_cache_fns, (
'Are you displaying the same method multiple times?'
)
nonmate_cache_fns.add(cache_fn)
def calc_saliency_intersect_over_union():
d = row.to_dict()
d['METHOD'] = method
d['SUFFIX_AGGR'] = suffix_aggr
# if (
# method.startswith('wb-rise') or
# method.startswith('pytorch-')
# ):
# # wb-rise doesn't need the ebp fixes
# d['NET'] = base_net
# else:
# d['NET'] = net
if method == 'meanEBP_VGG':
d['NET'] = 'vgg'
d['METHOD'] = method.split('_')[0]
elif method == 'meanEBP_ResNet':
d['NET'] = 'resnet+compat-scale1'
d['METHOD'] = method.split('_')[0]
mask_filename = mask_pattern.format(**d)
inpainted_region = imageio.imread(mask_filename)
try:
if method == 'diffOrigInpaint':
raise IOError
smap_filename = smap_pattern.format(**d)
smap = np.load(smap_filename)['saliency_map']
except IOError as e:
smap = backupMethods(method, inpainted_region,
orig_imT, inp_imT, e)
# smap = resize(smap, orig_imT.shape[1:], order=0)
# smap = resize(smap, (224, 224), order=0)
smap /= smap.sum()
# inpainted_region = resize(
# inpainted_region, orig_imT.shape[1:], order=0)
nonmate_embed = snet.embeddings([inp_fn])
neg = np.sum(inpainted_region == 0)
pos = np.sum(inpainted_region != 0)
saliency_gt_overlap, fp, tp = inpaintgame.intersect_over_union_thresholded_saliency(
smap,
inpainted_region,
mask_threshold_method=params['threshold_type'],
thresholds=hgame_thresholds,
percentiles=hgame_percentile,
seed=seed,
include_zero_elements=params['include_zero_saliency'],
return_fpos=True,
return_tpos=True,
)
return saliency_gt_overlap, fp, neg, tp, pos
try:
cls_twin, pg_dist, pr_dist = utils.cache_npz(
cache_fn,
# calc_nonmate_cls,
calc_twin_cls,
reprocess_=reprocess,
cache_dir=cache_dir,
save_dict_={
'hgame_thresholds': hgame_thresholds,
'hgame_percentile': hgame_percentile,
}
)
saliency_gt_iou, false_pos, neg, true_pos, pos = utils.cache_npz(
('inpainted-id-hiding-game-saliency-IoU-withcomp-py3'
'-{SUBJECT_ID}-{MASK_ID}-{ORIGINAL_BASENAME}-0'
'-{NET}-{METHOD}{SUFFIX_AGGR}_{THRESHOLDS}{ZERO_SALIENCY_SUFFIX}').format(
SUBJECT_ID=subject_id,
ORIGINAL_BASENAME=row['ORIGINAL_BASENAME'],
METHOD=method,
NET=net,
THRESH=snet.match_threshold,
SUFFIX_AGGR=suffix_aggr,
MASK_ID=mask_id,
THRESHOLDS=threshold_method_slug,
ZERO_SALIENCY_SUFFIX='ExcludeZeroSaliency' if
not params['include_zero_saliency'] else '',
),
calc_saliency_intersect_over_union,
reprocess_=reprocess,
# reprocess_=True,
cache_dir=cache_dir,
save_dict_={
'hgame_thresholds': hgame_thresholds,
'hgame_percentile': hgame_percentile,
}
)
classified_as_nonmate.append((
net,
method,
row['ORIGINAL_BASENAME'],
inp_fn,
suffix_aggr,
subject_id,
mask_id,
np.nan, # cls_nonmate,
np.nan, # cls_nonmate[0],
np.nan, # cls_nonmate[-1],
cls_twin,
cls_twin[0],
cls_twin[-1],
saliency_gt_iou,
false_pos,
neg,
true_pos,
pos,
))
if (params['include_zero_saliency'] and
false_pos[-1] != neg
):
raise RuntimeError(
'False positive value for last threshold should be'
' the number of negative elements (%d), but is %d.'
% (neg, false_pos[-1]))
except IOError as e:
if not params['ignore_missing_saliency_maps']:
raise e
# for ret in classified_as_nonmate:
# ret.get(999999999)
print('\nNumber of nonmate classification cache files: %d\n' % len(nonmate_cache_fns))
nonmate_classification = pd.DataFrame(classified_as_nonmate, columns=[
'NET',
'METHOD',
'ORIGINAL_BASENAME',
'InpaintingFile',
'SUFFIX_AGGR',
'SUBJECT_ID',
'MASK_ID',
'CLS_AS_NONMATE',
'Orig_Cls_Nonmate',
'Twin_Cls_Nonmate',
'CLS_AS_TWIN',
'Orig_Cls_Twin',
'Twin_Cls_Twin',
'SALIENCY_GT_IOU',
'FALSE_POS',
'NEG',
'TRUE_POS',
'POS',
])
# merge asymmetric regions
# nonmate_classification.loc[
# nonmate_classification['MASK_ID']==8, 'MASK_ID'] = 4
# nonmate_classification.loc[
# nonmate_classification['MASK_ID']==9, 'MASK_ID'] = 4
# nonmate_classification.loc[
# nonmate_classification['MASK_ID']==6, 'MASK_ID'] = 6
# nonmate_classification.loc[
# nonmate_classification['MASK_ID']==7, 'MASK_ID'] = 6
assert (
len(nonmate_classification['SUBJECT_ID'].unique()) <=
len(params['SUBJECT_ID'])), (
'Number of subjects not equal!'
)
with open(os.path.join(cache_dir, 'nonmate-cls.pkl'), 'wb') as f:
pickle.dump(nonmate_classification, f)
try:
base_method = params['METHOD'][0]
nonmate_classification['ComparedToBase'] = 0.0 # large value good
for keys, grp in nonmate_classification.groupby(
['NET',
'ORIGINAL_BASENAME',
'MASK_ID',
'InpaintingFile',
]
):
base = grp.loc[grp['METHOD']==base_method].iloc[0]
for idx, row in grp.iterrows():
nonmate_classification.loc[idx, 'ComparedToBase'] = (
row['CLS_AS_TWIN'].mean() - base['CLS_AS_TWIN'].mean()
)
# for far_value in np.arange(0.10, 0, -0.01):
# FAR = np.mean(np.stack(grp['FALSE_POS'] / grp['NEG']), axis=0)
# try:
# ind_lt = np.where(np.diff(FAR < far_value))[0][0]
# # ind_gt = ind_lt + 1
# cls_as_twin = np.stack(grp['CLS_AS_TWIN'])[:, ind_lt:ind_lt+2].mean()
# except IndexError:
# cls_as_twin = np.nan
#
# print('%s, Mask %d, Method %s%s:\t%0.2f' % (
# net, mask_id, method, suffix_aggr, cls_as_twin))
# cls_at_far.append(
# (net, mask_id, method, suffix_aggr, cls_as_twin, far_value))
nonbase = nonmate_classification.loc[nonmate_classification['METHOD']!=base_method]
bad_thresh, good_thresh = (
np.percentile(nonbase['ComparedToBase'].values, (1, 99)))
nonbase.sort_values('ComparedToBase', inplace=True)
print('\nThe below images did particularly worse compared to base method:')
for idx, row in ( nonbase.loc[nonbase['ComparedToBase']
< bad_thresh].iterrows()):
print((' {NET}/subject_ID_{SUBJECT_ID}/{ORIGINAL_BASENAME}/inpainted/'
'{MASK_ID:05d}-{METHOD}*'
' ({ComparedToBase:0.04f})'.format(**row))
)
nonbase.sort_values('ComparedToBase', ascending=False, inplace=True)
print('\nThe below images did particularly better compared to base method:')
for idx, row in ( nonbase.loc[nonbase['ComparedToBase']
> good_thresh].iterrows()):
print((' {NET}/subject_ID_{SUBJECT_ID}/{ORIGINAL_BASENAME}/inpainted/'
'{MASK_ID:05d}-{METHOD}*'
' ({ComparedToBase:0.04f})'.format(**row))
)
print('\n')
except:
pass
return nonmate_classification, inpainting_v2_data
def generate_plots(nonmate_classification, hgame_thresholds, hgame_percentile,
params,
human_net_labels,
):
output_dir = params['output_dir']
if params['output_subdir'] is not None:
output_subdir = os.path.join(output_dir, params['output_subdir'])
output_dir = output_subdir
balance_masks = params['balance_masks']
unequal_method_entries = False
nonmate_classification_clone = nonmate_classification.copy(deep=True)
for net, grp0 in nonmate_classification.groupby(['NET']):
num_entries = None
for method, grp1 in grp0.groupby(['METHOD']):
print('%s %s has %d entries.' % (net, method, len(grp1)))
if num_entries is None:
num_entries = len(grp1)
elif num_entries != len(grp1):
unequal_method_entries = True
net_indices = OrderedDict(
[(net, ni) for ni, net in enumerate(params['NET'])]
)
cNets = len(net_indices)
print('#nets=%d' % cNets)
# plt.close('all')
plt_scale = 2
figL, axesL = plt.subplots(
1, 1, figsize=(5*plt_scale,2*plt_scale),
sharex=True, sharey='row', squeeze=False)
fig4, axes4 = plt.subplots(
1, cNets, figsize=(6*cNets*plt_scale, 4*plt_scale),
sharex=True, sharey='row', squeeze=False)
fig4s, axes4s = plt.subplots(
1, cNets, figsize=(6*cNets*plt_scale, 4*plt_scale),
sharex=True, sharey='row', squeeze=False)
# fig5s, axes5s = plt.subplots(
# 1, cNets, figsize=(6*cNets*plt_scale,5*plt_scale),
# sharex=True, sharey='row', squeeze=False)
cls_at_fpr_method = dict()
lines = []
for (method, suffix_aggr, net), grp in nonmate_classification.groupby(
['METHOD', 'SUFFIX_AGGR', 'NET'], sort=False
):
hnet = human_net_labels[net]
simplified_hnet = human_net_labels[net.split('+')[0]]
# print('Plotting %s' % hnet)
label, method_idx, slabel = method_label_and_idx(
method,
params['METHOD'],
human_net_labels,
)
ni = net_indices[net]
plot_cls_vs_fpr(axes4[0, ni], grp, hnet,
# method,
label,
method_idx=method_idx,
balance_masks=balance_masks,
leftmost=(ni==0))
# axes4[0,ni].legend(loc='upper center', bbox_to_anchor=(0.5, -0.16))
plot_cls_vs_fpr(axes4s[0, ni], grp, simplified_hnet,
slabel,
method_idx=method_idx,
balance_masks=balance_masks,
leftmost=(ni==0))
if ni == 0:
line, cls_at_fpr = plot_cls_vs_fpr(axesL[0, ni], grp, hnet,
slabel,
method_idx=method_idx,
balance_masks=balance_masks,
leftmost=(ni==0))
cls_at_fpr_method[method] = cls_at_fpr
line.set_linewidth(4)
lines.append(line)
axesL[0,ni].legend(loc='center')
axesL[0,ni].axis('off')
fig4s.subplots_adjust(top=0.95, bottom=0.1, left=0.15, right=0.96, hspace=0.9, wspace=0.05)
show.savefig(
'inpainted_twin_game_%s-net-split_simplified.png' %
('balanced-by-mask' if balance_masks else 'unbalanced'),
fig4s,
output_dir=output_dir)
fig4.subplots_adjust(top=0.95, bottom=0.1, left=0.15, right=0.96, hspace=0.9, wspace=0.05)
show.savefig(
'inpainted_twin_game_%s-net-split.png' %
('balanced-by-mask' if balance_masks else 'unbalanced'),
fig4,
output_dir=output_dir)
for line in lines:
line.set_visible(False)
axL = axesL[0,0]
axL.set_title('')
show.savefig('inpainted_twin_game_legend.png', figL, output_dir=output_dir,
transparent=True)
for ax in axes4s.flat:
ax.get_legend().remove()
for ax in axes4.flat:
ax.get_legend().remove()
fig4s.subplots_adjust(top=0.95, bottom=0.1, left=0.15, right=0.96, hspace=0.9, wspace=0.05)
show.savefig(
'inpainted_twin_game_%s-net-split_simplified-nolegend.png' %
('balanced-by-mask' if balance_masks else 'unbalanced'),
fig4s,
output_dir=output_dir)
fig4.subplots_adjust(top=0.95, bottom=0.1, left=0.15, right=0.96, hspace=0.9, wspace=0.05)
show.savefig(
'inpainted_twin_game_%s-net-split-nolegend.png' %
('balanced-by-mask' if balance_masks else 'unbalanced'),
fig4,
output_dir=output_dir)
# fig5s.subplots_adjust(top=0.95, bottom=0.1, left=0.10, right=0.98, hspace=0.9, wspace=0.05)
# show.savefig(
# 'inpainted_twin_game_cls_nonmate_vs_thresh_simplified_%s.png' %
# ('balanced-by-mask' if balance_masks else 'unbalanced'),
# fig5s,)
# output_dir=output_dir)
plt.close('all')
cls_at_fpr_method_msk = defaultdict(dict)
for mask_id, grp0 in nonmate_classification.groupby(
['MASK_ID'], sort=False
):
fig4s, axes4s = plt.subplots(
1, 1, figsize=(8*cNets*plt_scale,1.8*plt_scale),
sharex=True, sharey='row', squeeze=False)
for (method, suffix_aggr), grp in grp0.groupby(
['METHOD', 'SUFFIX_AGGR'], sort=False
):
label, method_idx, slabel = method_label_and_idx(
method,
params['METHOD'],
human_net_labels,
)
ni = 0
_, cls_at_fpr = plot_cls_vs_fpr(
axes4s[0, ni], grp, None, slabel,
method_idx=method_idx,
balance_masks=balance_masks, leftmost=(ni==0))
cls_at_fpr_method_msk[method][mask_id] = cls_at_fpr
# axes4s[0, ni].set_xlim(0, 10)
axes4s[0, ni].set(ylabel='Classified as\nInpainted\nNon-mate',
)
axes4s[0, ni].xaxis.set_major_formatter(
plt.FuncFormatter(tickformatter))
axes4s[0, ni].get_legend().remove()
# fig4s.subplots_adjust(top=0.98, bottom=0.13, left=0.13, right=0.98, hspace=0.9, wspace=0.05)
fig4s.subplots_adjust(top=0.98, bottom=0.22, left=0.16, right=0.96, hspace=0.9, wspace=0.05)
# region = regions.keys()[mask_id] # <- doesn't work in python3
# region = [*regions.keys()][mask_id] # <- doesn't work in python2
try:
region = list(regions.keys())[mask_id]
except IndexError as e:
if mask_id == 167:
region = 'left-or-right-face'
elif mask_id == 189:
region = 'left-or-right-eye'
else:
raise e
fn = 'inpainted_twin_game_simplified_%s_mask%d_%s.png' % (
('balanced-by-mask' if balance_masks else 'unbalanced'),
mask_id,
region,
)
show.savefig(fn, fig4s, output_dir=output_dir)
plt.close('all')
csv_rows = []
for method, cls_at_fpr_maskid in cls_at_fpr_method_msk.items():
nrow = dict()
print(method)
print('\tOverall\t%0.9f\t%0.9f' % (
cls_at_fpr_method[method][1e-2],
cls_at_fpr_method[method][5e-2],
))
nrow['method'] = method
nrow['all,far=1e-2'] = cls_at_fpr_method[method][1e-2]
nrow['all,far=5e-2'] = cls_at_fpr_method[method][5e-2]
for mask_id in [2, 189, 5]:
#for mask_id, cls_at_fpr in cls_at_fpr_maskid.items():
cls_at_fpr = cls_at_fpr_maskid[mask_id]
print('\t%d\t%0.9f\t%0.9f \t(%s)' % (
mask_id,
cls_at_fpr[1e-2],
cls_at_fpr[5e-2],
regions_human_labels[mask_id],
))
nrow['%s,far=1e-2' % regions_human_labels[mask_id]] = cls_at_fpr[1e-2]
nrow['%s,far=5e-2' % regions_human_labels[mask_id]] = cls_at_fpr[5e-2]
csv_rows.append(nrow)
csv_results = | pd.DataFrame(csv_rows) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""L05 Welliton - KNN with Time Audio Features.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1THyHhrvgkGnNdoTOdrDm7I3JMIiazjz4
"""
import os
import random
import librosa
import scipy
import numpy as np
import pandas as pd
import sklearn
from sklearn.preprocessing import StandardScaler
from sklearn.neighbors import KNeighborsClassifier
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score
from sklearn.impute import SimpleImputer
from mlxtend.plotting import plot_decision_regions
from scipy.linalg import norm
#visualização
import seaborn
import librosa.display
import IPython.display as ipd
import matplotlib.pyplot as plt
from google.colab import drive
drive.mount('/content/drive')
"""# **Dataset e Pré-Processamento**"""
df_audio = pd.read_csv('/content/drive/My Drive/Audio Dataset/timeaudiofeatures1.csv')
df_audio.shape
df_audio[95:105]
"""CLASSES:
- Kick = 0
- Snare = 1
- Clap = 2
- Tom = 3
- Closed Hihat = 4
TIPOS DE FEATURES:
1. *Valores Discretos* = **Zero-Crossing Rate**, Número de vezes que o sinal atravessa o valor zero por causa de uma oscilação
2. *Valores Contínuos* = **Root-Mean Square**, Valores médios de um sinal
3. *Valores Contínuos* = **Amplitude Envelope**, Valores máximos que representam os picos do sinal
4. *Valores Categóricos Ordinais* = **Low = 0 | Mid = 0.5 | High = 1**, Localização e faixa de alcance no domínio da frequência
5. *Valores Categóricos Ordinais* = **Fast = 0 | Slow = 1**, parâmetro que avalia o quão rápido o sinal decai
6. *Valores Categóricos Nominais* = **Synthesized = 0 | Acoustic = 0.5 | Natural = 1**, Fonte sonora proveniente, se foi sintetizada, gerado de um instrumento ou uma fonte natural
**- CONVERTENDO CLASSES & VARIÁVEIS CATEGÓRICAS ORDINAIS**
"""
df_mod = df_audio
f = {'Low': 0, 'Mid': 1, 'High': 2}
t = {'Slow': 0, 'Fast': 1}
c = {'KICK': 0, 'SNARE': 1, 'CLAP': 2, 'TOM': 3, 'CLS HIHAT': 4}
df_mod['FREQ. RANGE'] = df_mod['FREQ. RANGE'].map(f)
df_mod['TIME DECAY'] = df_mod['TIME DECAY'].map(t)
df_mod['CLASS'] = df_mod['CLASS'].map(c)
df_mod[295:305]
"""**- CONVERTENDO VARIÁVEIS CATEGÓRICAS NOMINAIS (One-Hot Encoding)**"""
pd.get_dummies(df_mod)
"""Eliminando uma das colunas para evitar redundância"""
df_mod2 = pd.get_dummies(df_mod, drop_first=True)
df_mod2
"""Colocando a coluna das labels por último """
df_mod2.columns
new_colums = ['AMP', 'RMS', 'ZCR', 'FREQ. RANGE', 'TIME DECAY','SOURCE_Natural', 'SOURCE_Synthesized', 'CLASS']
df_mod2 = df_mod2[new_colums]
df_mod2
"""**- LIDANDO COM DADOS FALTANTES**"""
df_mod2[346:347]
#Eliminando linhas com valores faltantes
df_mod2 = df_mod2.dropna(axis=0)
#imputer = SimpleImputer(missing_values=np.nan, strategy='mean')
#df_mod3 = df_mod2.values
#df_mod3 = imputer.fit_transform(df_mod2.values)
#df_mod3
"""# **KNN (k-Nearest Neighbor Model)**
Separando em array de Features (X) e array Classes (y), transformando de tabela para matriz
"""
X = df_mod2.iloc[:, 0:7].values
X[0:5]
y = df_mod2['CLASS'].values
y
audio_all = pd.DataFrame(df_mod2)
audio_data = pd.DataFrame(X)
audio_labels = | pd.DataFrame(y) | pandas.DataFrame |
#Loading libraries
import cv2
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import os
from scipy import ndimage
import math
import keras
import ast
import operator as op
import re
from tensorflow.keras.preprocessing.image import ImageDataGenerator
#Suppressing warning
def warn(*args, **kwargs):
pass
import warnings
warnings.warn = warn
#Global Variable
dict_clean_img = {} #BINARY IMAGE DICTIONAY
dict_img = {} #ORIGINAL IMAGE DICTIONARY
#Keras support channel first (1,28,28) only
keras.backend.set_image_data_format("channels_first")
#loading models
try:
model = keras.models.load_model('models/DCNN_10AD_sy.h5')
model_robusta = keras.models.load_model('models/DCNN_10AD_2_sy.h5')
#Model_Robusta is used when model predicts with lower probability
except:
print('Model couldnot be loaded')
#%%
def image_resize(image, width = None, height = None, inter = cv2.INTER_LINEAR):
'''
image_resize : Function to resize images
argument:
image (Matrix) : image to resize
width (Integer) : width of the required image
height (Integer): height of required image
inter (Method) : Interpolation/Extrapolation method
output:
image (Matrix) : image resized
'''
dim = None
(h, w) = image.shape[:2]
# if both the width and height are None, then return the
# original image
if width is None and height is None:
return image
# check to see if the width is None
if width is None:
# calculate the ratio of the height and construct the
# dimensions
r = height / float(h)
dim = (int(w * r), height)
# otherwise, the height is None
else:
# calculate the ratio of the width and construct the
# dimensions
r = width / float(w)
dim = (width, int(h * r))
# resize the image
resized = cv2.resize(image, dim, interpolation = inter)
# return the resized image
return resized
'''
Evaluate New
'''
# supported operators
operators = {ast.Add: op.add, ast.Sub: op.sub, ast.Mult: op.mul,
ast.Div: op.truediv, ast.Pow: op.pow, ast.BitXor: op.xor,
ast.USub: op.neg}
def eval_expr(expr):
"""
>>> eval_expr('2^6')
4
>>> eval_expr('2**6')
64
>>> eval_expr('1 + 2*3**(4^5) / (6 + -7)')
-5.0
"""
return eval_(ast.parse(expr, mode='eval').body)
def eval_(node):
if isinstance(node, ast.Num): # <number>
return node.n
elif isinstance(node, ast.BinOp): # <left> <operator> <right>
return operators[type(node.op)](eval_(node.left), eval_(node.right))
elif isinstance(node, ast.UnaryOp): # <operator> <operand> e.g., -1
return operators[type(node.op)](eval_(node.operand))
else:
raise TypeError(node)
'''
Workspace Detection
'''
def sort_contours(cnts, method="left-to-right"):
'''
sort_contours : Function to sort contours
argument:
cnts (array): image contours
method(string) : sorting direction
output:
cnts(list): sorted contours
boundingBoxes(list): bounding boxes
'''
# initialize the reverse flag and sort index
reverse = False
i = 0
# handle if we need to sort in reverse
if method == "right-to-left" or method == "bottom-to-top":
reverse = True
# handle if we are sorting against the y-coordinate rather than
# the x-coordinate of the bounding box
if method == "top-to-bottom" or method == "bottom-to-top":
i = 1
# construct the list of bounding boxes and sort them from top to
# bottom
boundingBoxes = [cv2.boundingRect(c) for c in cnts]
(cnts, boundingBoxes) = zip(*sorted(zip(cnts, boundingBoxes),
key=lambda b:b[1][i], reverse=reverse))
# return the list of sorted contours and bounding boxes
return (cnts, boundingBoxes)
def getBestShift(img):
'''
getBestShift : Function to calculate centre of mass and get the best shifts
argument:
img (array) : gray scale image
output:
shiftx, shifty: x,y shift direction
'''
cy,cx = ndimage.measurements.center_of_mass(img)
rows,cols = img.shape
shiftx = np.round(cols/2.0-cx).astype(int)
shifty = np.round(rows/2.0-cy).astype(int)
return shiftx,shifty
def shift(img,sx,sy):
'''
Shift : Function to shift the image in given direction
argument:
img (array) : gray scale image
sx, sy : x, y direction
output:
shifted : shifted image
'''
rows,cols = img.shape
M = np.float32([[1,0,sx],[0,1,sy]])
shifted = cv2.warpAffine(img,M,(cols,rows))
return shifted
#%%
#Data Generator using tensorflow method
train_datagen = ImageDataGenerator(
data_format='channels_first',
zca_whitening = True,
rotation_range = 0.2)
#%%
def predict(img,x1,y1,x2,y2, proba = False, acc_thresh = 0.60):
'''
predict : Function to predict the character
argument:
x1,y1(int,int) : Top left corner point
x2,y2(int,int) : Bottom right corner point
acc_thresh(0-1) : Probability threshold for calling model_robusta
proba(bool) : If probability values is wanted in return
output:
c[index](int) : predicted character
'''
gray = img[y1:y2, x1:x2]
# Steps to remove noises in image due to cropping
temp = gray.copy()
kernel_temp = np.ones((3,3), np.uint8)
temp_tmp = cv2.dilate(temp, kernel_temp, iterations=3)
# Find the contours - To check whether its disjoint character or noise
if(cv2.__version__ == '3.3.1'):
xyz,contours_tmp,hierarchy = cv2.findContours(temp_tmp,cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)
else:
contours_tmp,hierarchy = cv2.findContours(temp_tmp,cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)
if(len(contours_tmp) > 1):
# Find the contours
if(cv2.__version__ == '3.3.1'):
xyz,contours,hierarchy = cv2.findContours(gray,cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)
else:
contours,hierarchy = cv2.findContours(gray,cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)
#Creating a mask of only zeros
mask = np.ones(gray.shape[:2], dtype="uint8") * 0
#print('Yes')
# Find the index of the largest contour
areas = [cv2.contourArea(c) for c in contours]
max_index = np.argmax(areas)
cnt=contours[max_index]
cv2.drawContours(mask, [cnt], -1, 255, -1)
#Drawing those contours which are noises and then taking bitwise and
gray = cv2.bitwise_and(temp, temp, mask=mask)
grayN = process_img (gray, resize_flag = 0)
classes = model.predict(grayN, batch_size=2)
ind = np.argmax(classes)
c = ['0','1','2','3','4','5','6','7','8','9','+','-','*','(',')']
if(c[ind] == '5' or c[ind] == '6'):
#print('5/6 got called, Previous:',c[ind])
grayN_1 = process_img (gray, resize_flag = 1)
classes = model.predict(grayN_1, batch_size=2)
ind = np.argmax(classes)
#print('5/6 got called, Current:',c[ind])
if (proba == True):
return classes[0][ind]
return c[ind]
if (classes[0][ind] < acc_thresh):
#print('Poor Handwriting, Augmenting and Testing Prediction, Previous:',c[ind])
grayN_2 = process_img (gray, resize_flag = 1, preproc = 1)
imgs = train_datagen.flow(grayN_2, batch_size=10)
yhats = model_robusta.predict_generator(imgs, steps=10, verbose=0)
yhats = np.mean(yhats,axis=0)
classes = yhats[:,None].reshape(1,15)
ind = np.argmax(classes)
#print('Poor Handwriting, Augmenting and Testing Prediction, Current:',c[ind])
if (proba == True):
return classes[0][ind]
return c[ind]
if (proba == True):
return classes[0][ind]
return c[ind]
#%%
def process_img (gray, resize_flag = 1, preproc = 0):
'''
process_img : Function to pre process image for prediction
argument:
gray (Matrix (np.uint8)) : image of character to be resized and processed
resize_flag : method used for resizing image
preproc (method [bool]) : 0 : No erosion DIlation, 1 : Erosion, Dilation
output:
grayS (Matrix (0-1)) : Normalised image of character resized and processed
'''
gray = gray.copy()
#Image Pre Processing
if (preproc == 0):
gray = cv2.GaussianBlur(gray,(7,7),0)
else :
kernel = np.ones((3,3), np.uint8)
gray = cv2.dilate(gray, kernel, iterations=1)
gray = cv2.GaussianBlur(gray,(5,5),1)
gray = cv2.dilate(gray, kernel, iterations=2)
gray = cv2.erode(gray, kernel,iterations=2)
#Removing rows and columns where all the pixels are black
while np.sum(gray[0]) == 0:
gray = gray[1:]
while np.sum(gray[:,0]) == 0:
gray = np.delete(gray,0,1)
while np.sum(gray[-1]) == 0:
gray = gray[:-1]
while np.sum(gray[:,-1]) == 0:
gray = np.delete(gray,-1,1)
rows,cols = gray.shape
if(resize_flag) == 1:
interpolation=cv2.INTER_AREA
else:
interpolation=cv2.INTER_CUBIC
# Making the aspect ratio same before re-sizing
if rows > cols:
factor = 20.0/rows
rows = 20
cols = int(round(cols*factor))
# first cols than rows
gray = cv2.resize(gray, (cols,rows),interpolation=interpolation)
else:
factor = 20.0/cols
cols = 20
rows = int(round(rows*factor))
# first cols than rows
gray = cv2.resize(gray, (cols, rows),interpolation=interpolation)
# Padding to a 28 * 28 image
colsPadding = (int(math.ceil((28-cols)/2.0)),int(math.floor((28-cols)/2.0)))
rowsPadding = (int(math.ceil((28-rows)/2.0)),int(math.floor((28-rows)/2.0)))
gray = np.lib.pad(gray,(rowsPadding,colsPadding),'constant')
# Get the best shifts
shiftx,shifty = getBestShift(gray)
shifted = shift(gray,shiftx,shifty)
grayS = shifted
grayS = grayS.reshape(1,1,28,28)
#Normalize the image
grayS = grayS/255
return grayS
def extract_box(img, show=True):
'''
Function to extract the boxes in the ruled worksheet
Input : Image with rectangle, show figures
Output : Extract workspaces locations
'''
image_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
#Otsu thresholding
thresh, binary_image = cv2.threshold(image_gray, 0, 255, cv2.THRESH_BINARY_INV+cv2.THRESH_OTSU)
# Defining a kernel length
kernel_length = np.array(binary_image).shape[1]//80
verticle_kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (1, kernel_length))
hori_kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (kernel_length, 1))
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (3, 3))
# Morphological operation to detect vertical lines from an image
img_temp1 = cv2.erode(binary_image, verticle_kernel, iterations=3)
verticle_lines_img = cv2.dilate(img_temp1, verticle_kernel, iterations=3)
# Morphological operation to detect horizontal lines from an image
img_temp2 = cv2.erode(binary_image, hori_kernel, iterations=3)
horizontal_lines_img = cv2.dilate(img_temp2, hori_kernel, iterations=4)
#Join horizontal and vertical images
alpha = 0.5
beta = 1.0 - alpha
img_final_bin = cv2.addWeighted(verticle_lines_img, alpha, horizontal_lines_img, beta, 0.0)
img_final_bin = cv2.erode(~img_final_bin, kernel, iterations=2)
(thresh, img_final_bin) = cv2.threshold(img_final_bin, 0,255, cv2.THRESH_BINARY_INV+cv2.THRESH_OTSU)
#Find and sort the contours
if(cv2.__version__ == '3.3.1'):
xyz,contours, hierarchy = cv2.findContours(img_final_bin, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
else:
contours, hierarchy = cv2.findContours(img_final_bin, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
(contours, boundingBoxes) = sort_contours(contours, method="top-to-bottom")
area = []
for contour in contours:
area.append(cv2.contourArea(contour))
s = np.argsort(area) #sorted area
workspace_contours = []
#Find the correct boxes where area is between 40% and 50% of the largest rectangle
for i,contour in enumerate(contours):
if cv2.contourArea(contour) >= area[s[-1]]*0.40 and cv2.contourArea(contour) < area[s[-1]]*0.50:
workspace_contours.append(contour)
#A list to containg rectangle locs
rectangle_locs =[]
#finding out locations of rectangle
for cnt in workspace_contours:
x,y,w,h = cv2.boundingRect(cnt)
rectangle_locs.append([x,y,w,h])
if (show):
fig = plt.figure(figsize=(7,9))
fig.suptitle('Extracted Workspaces')
i=1
l = len(rectangle_locs)
for rect in rectangle_locs:
ax = fig.add_subplot(l,1,i)
ax.imshow(img[rect[1]:rect[1]+rect[3], rect[0]:rect[0]+rect[2]])
i = i+1
plt.show()
return rectangle_locs
'''
Line Detection
'''
def find_good_contours_thres(conts, alpha = 0.002):
'''
Function to find threshold of good contours on basis of 10% of maximum area
Input: Contours, threshold for removing noises
Output: Contour area threshold
For image dim 3307*4676
alpha(text_segment) = 0.01
alpha(extract_line) = 0.002
'''
#Calculating areas of contours and appending them to a list
areas = []
for c in conts:
areas.append([cv2.contourArea(c)**2])
#alpha is controlling paramter
thres = alpha * max(areas)[0]
return thres
def extract_line(image, beta=0.7, alpha=0.002, show = True):
'''
Function to extracts the line from the image
Assumption : Sufficient gap b/w lines
argument:
img (array): image array
beta (0-1) : Parameter to differentiate line
alpha (0-1) : Parameter to select good contours
show(bool) : to show figures or not
output:
uppers[diff_index] : Upper points (x,y)
lowers[diff_index] : lower points(x,y)
'''
img = image.copy()
H,W = img.shape[:2]
h5 = int(.02 * H)
w5 = int(.02 * W)
img[:h5,:] = [255,255,255]
img[-h5:,:] = [255,255,255]
img[:,:w5] = [255,255,255]
img[:,-w5:] = [255,255,255]
#Converting image to gray
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
#Binary thresholding and inverting at 127
th, threshed = cv2.threshold(gray, 127, 255, cv2.THRESH_BINARY_INV|cv2.THRESH_OTSU)
#Selecting elliptical element for dilation
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(3,3))
dilation = cv2.dilate(threshed,kernel,iterations = 1)
#Saving a copy of dilated image for taking bitwise_and operation
temp = dilation.copy()
# Find the contours
if(cv2.__version__ == '3.3.1'):
xyz,contours,hierarchy = cv2.findContours(dilation,cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)
else:
contours,hierarchy = cv2.findContours(dilation,cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)
cont_thresh = find_good_contours_thres(contours, alpha=alpha)
#Creating a mask of only ones
mask = np.ones(dilation.shape[:2], dtype="uint8") * 255
#Drawing those contours which are noises and then taking bitwise and
for c in contours:
if( cv2.contourArea(c)**2 < cont_thresh):
cv2.drawContours(mask, [c], -1, 0, -1)
cleaned_img = cv2.bitwise_and(temp, temp, mask=mask)
#Dilating the cleaned image for better detection of line in cases where
#exponents are little up the line
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(3,3))
dil_cleaned_img = cv2.dilate(cleaned_img,kernel,iterations = 10)
#Getting back the cleaned original image without noise
cleaned_orig = cv2.erode(cleaned_img, kernel, iterations=1)
##find and draw the upper and lower boundary of each lines
hist = cv2.reduce(dil_cleaned_img,1, cv2.REDUCE_AVG).reshape(-1)
th = 1
H,W = img.shape[:2]
uppers = np.array([y for y in range(H-1) if hist[y]<=th and hist[y+1]>th])
lowers = np.array([y for y in range(H-1) if hist[y]>th and hist[y+1]<=th])
diff_1 = np.array([j-i for i,j in zip(uppers,lowers)])
diff_index_1 = np.array([True if j > beta*(np.mean(diff_1)-np.std(diff_1)) else False for j in diff_1 ])
uppers = uppers[diff_index_1]
lowers = lowers[diff_index_1]
#Extending uppers and lowers indexes to avoid cutting of chars of lines
#Extended more uppers by 33% as exponential might lie above
uppers[1:] = [i-int(j)/3 for i,j in zip(uppers[1:], diff_1[1:])]
lowers[:-1] = [i+int(j)/4 for i,j in zip(lowers[:-1], diff_1[:-1])]
diff_2 = np.array([j-i for i,j in zip(uppers,lowers)])
diff_index_2 = np.array([True]*len(uppers))
#Combining rogue exponentials into their deserving lines. This happens when
#exponential and lines are separated by some distance
for i,diff in enumerate(diff_2):
if(i>0):
if( (diff_2[i-1] < (diff/2)) and (( lowers[i-1]-uppers[i]) > ((lowers[i-1]-uppers[i-1])/5)) ):
uppers[i] = uppers[i-1]
diff_2[i] = diff_2[i]+diff_2[i-1]
diff_index_2[i-1] = False
print('Merging')
diff_index = diff_index_2
cleaned_orig_rec = cv2.cvtColor(cleaned_orig, cv2.COLOR_GRAY2BGR)
#For changing color of intermediate lines, keeping count
col_ct = 0
for left,right in zip(uppers[diff_index], lowers[diff_index]):
#print(left,right)
col1 = (153,255,255)
col2 = (255,255,153)
if(col_ct % 2 == 0):
col= col1
else:
col=col2
cv2.rectangle(cleaned_orig_rec ,(0+10,left),(W-15,right),col,4)
col_ct += 1
if(show == True):
fig0 = plt.figure(figsize=(15,5))
ax1 = fig0.add_subplot(1,3,1)
ax1.set_title('Original Image')
ax1.imshow(img)
ax1.axis('off')
ax2 = fig0.add_subplot(1,3,2)
ax2.set_title('Cleaned Image')
ax2.imshow(cv2.cvtColor(cleaned_img, cv2.COLOR_GRAY2RGB))
ax2.axis('off')
ax3 = fig0.add_subplot(1,3,3)
ax3.set_title('Noises')
ax3.imshow(cv2.cvtColor(mask, cv2.COLOR_BGR2RGB))
ax3.axis('off')
fig0.suptitle('Denoising')
plt.show()
fig1 = plt.figure(figsize=(15,5))
fig1.suptitle('Line Detection')
ax1 = fig1.add_subplot(1,2,1)
ax1.axis("off")
ax1.imshow(cv2.cvtColor(cleaned_orig,cv2.COLOR_BGR2RGB))
ax2 = fig1.add_subplot(1,2,2)
ax2.axis("off")
ax2.imshow(cv2.cvtColor(cleaned_orig_rec, cv2.COLOR_BGR2RGB))
plt.show()
return cleaned_orig, uppers[diff_index], lowers[diff_index]
def evaluate(df,A,B,X,Y):
'''Function to evaluate mathematical equation and give bool output
Input: Dataframe
Values
Output:
Boolean T/F
'''
#Evaluating Expression
actual = A*X*X+(B*Y)
try:#If BODMAS is correct and Mathematically equation is correct
pred = df["exp"].apply(lambda d: "**" if d==1 else "")
pred = "".join(list(pred+df["pred"]))
#looking for non digits in the start of the string for
#ignoring equal to's
matchesN = re.findall('^\-\-', pred)
if(len(matchesN) > 0):
for s in matchesN:
pred = pred.replace(s,'')
#looking for broken 5's
matches5 = re.findall(r'5\*\*-\D*', pred)
if(len(matches5) > 0):
for s in matches5:
sn = s.split('5**-')
snew = sn[0]+'5'+sn[1]
pred = pred.replace(s,snew)
#This except block is fired when brackets are un necessarily used
#while writing the answerscripts and in strings
matchesB_left = re.findall(r'\d\(\d', pred)
matchesB_right = re.findall(r'\d\)\d', pred)
if(len(matchesB_left) > 0 or len(matchesB_right) > 0):
for s in matchesB_left:
sn = s.split('(')
snew = sn[0]+'*('+sn[1]
pred = pred.replace(s,snew)
for s in matchesB_right:
sn = s.split(')')
snew = sn[0]+')*'+sn[1]
pred = pred.replace(s,snew)
ans = eval_expr(pred)
print(pred )
# if(ans == actual):
# val='Correct'
# else:
# val='Wrong'
# print(ans, actual, val)
if(df['pred_proba'].mean() < 0.75):
return 5
except Exception as e:
print(pred,'-',e)
return 5
return actual==ans
def text_segment(Y1,Y2,X1,X2,box_num,line_name, dict_clean = dict_clean_img,\
acc_thresh = 0.60, show = True):
'''
text_segment : Function to segment the characters
Input:
Box coordinates -X1,Y1,X2,Y2
box_num - name of box
line_name - name of line
model - Deep Learning model to be used for prediction
dict_clean - dictionary of clean box images
Output :
box_num - name of box
line_name -name of line
df_char - Dataframe of characters of that particular line
'''
img = dict_clean[box_num][Y1:Y2,X1:X2].copy()
L_H = Y2-Y1
## apply some dilation and erosion to join the gaps
#Selecting elliptical element for dilation
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(3,3))
dilation = cv2.dilate(img,kernel,iterations = 2)
erosion = cv2.erode(dilation,kernel,iterations = 1)
# Find the contours
if(cv2.__version__ == '3.3.1'):
xyz,contours,hierarchy = cv2.findContours(erosion,cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)
else:
contours,hierarchy = cv2.findContours(erosion,cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)
ct_th = find_good_contours_thres(contours, alpha=0.005)
cnts = []
for c in contours:
if( cv2.contourArea(c)**2 > ct_th):
cnts.append(c)
contours_sorted,bounding_boxes = sort_contours(cnts,method="left-to-right")
char_locs = []
img = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)
i = 0
char_type =[]
while i in range(0, len(contours_sorted)):
x,y,w,h = bounding_boxes[i]
exp = 0
if i+1 != len(contours_sorted):
x1,y1,w1,h1 = bounding_boxes[i+1]
if abs(x-x1) < 10 and (h1+h) < 70:
#print(h+h1)
minX = min(x,x1)
minY = min(y,y1)
maxX = max(x+w, x1+w1)
maxY = max(y+h, y1+h1)
x,y,x11,y11 = minX, minY, maxX, maxY
x,y,w,h = x,y,x11-x,y11-y
i = i+2
continue
#char_locs.append([x,y,x+w,y+h])
if(h<0.10*L_H and w<0.10*L_H):
#print('Yes')
i=i+1
continue
char_locs.append([x-2,y+Y1-2,x+w+1,y+h+Y1+1,w*h]) #Normalised location of char w.r.t box image
cv2.rectangle(img,(x,y),(x+w,y+h),(153,180,255),2)
if i!=0:
if y+h < (L_H*(1/2)) and y < bounding_boxes[i-1][1] and h < bounding_boxes[i-1][3]:
exp = 1
cv2.rectangle(img,(x,y),(x+w,y+h),(0,255,0),2)
i = i+1
char_type.append(exp)
if(show == True):
plt.figure(figsize=(15,8))
plt.axis("on")
plt.imshow(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))
plt.show()
df_char = pd.DataFrame(char_locs)
df_char.columns=['X1','Y1','X2','Y2','area']
df_char['exp'] = char_type
df_char['pred'] = df_char.apply(lambda c: predict(dict_clean[box_num],c['X1'],\
c['Y1'],c['X2'], c['Y2'], acc_thresh=acc_thresh), axis=1 )
df_char['pred_proba'] = df_char.apply(lambda c: predict(dict_clean[box_num],c['X1'],\
c['Y1'],c['X2'], c['Y2'], proba=True, acc_thresh=acc_thresh), axis=1 )
df_char['line_name'] = line_name
df_char['box_num'] = box_num
return [box_num,line_name,df_char]
#%%
def checker(image_path,A=-1,B=-1,X=-1,Y=-1, acc_thresh=0.60):
'''
Main driver function to manage calling and executing algorithm
argument:
image_path (string): image path
A, B, X, Y (int) : coefficients
'''
#reading image
img_i = cv2.imread(image_path)
if(type(img_i) != np.ndarray):
return 'Invalid Image passed'
#Aspect Ratio calculation
asp_h = img_i.shape[0]
asp_w = img_i.shape[1]
asp_ratio = round(asp_h/asp_w , 1)
if(asp_ratio != 1.4):
print('Poor Image Aspect Ratio : Results Will be affected')
if (asp_h > 4676):#Oversized image
img = image_resize(img_i, height = 4676, width = 3307, inter = cv2.INTER_AREA)
elif(asp_h < 4676):#Less sized image
img = image_resize(img_i, height = 4676, width = 3307, inter = cv2.INTER_LINEAR)
print('Image less than 300 dpi might have reduced accuracy')
else:
img = img_i
#Workspaces Detection
workspaces = extract_box(img)
if(len(workspaces) != 3):
print('Invalid worksheet image passed, please scan properly')
return -1
#Defining dataframe for storing infos about every line detected
df_lines = pd.DataFrame()
for r,rect in enumerate(workspaces):
#Cropping boxes for sending to line detection module
box = img[rect[1]:rect[1]+rect[3], rect[0]:rect[0]+rect[2]]
H,W = box.shape[:2]
#Extracting lines present in the boxes
cleaned_orig,y1s,y2s = extract_line(box, show=False)
x1s = [0]*len(y1s)
x2s = [W]*len(y1s)
# if(len(y1s)-len(y2s) == 0):
# print('Lines in workspace-%d : %d' %(r, len(y1s)))
df = pd.DataFrame([y1s,y2s,x1s,x2s]).transpose()
df.columns = ['y1','y2','x1','x2']
df['box_num'] = r
df_lines= | pd.concat([df_lines, df]) | pandas.concat |
"""
Medical lexicon NLP extraction pipeline
File contains: Compares the validation set with the NLP pipeline's labeling and outputs some relevant statistics afterwards.
-- (c) <NAME> 2019 - Team D in the HST 953 class
"""
from na_pipeline_tool.utils import logger
from na_pipeline_tool.utils import config
from na_pipeline_tool.utils import helper_classes
from na_pipeline_tool import utils
from na_pipeline_tool.utils import progressbar
import re
import pandas as pd
from joblib import Parallel, delayed
import sys, os
import collections
import numpy as np
import sklearn.metrics
class ValidationModule(helper_classes.Module):
def __init__(self):
super().__init__()
self._validation_set = config.get_pipeline_config_item(self.module_name(), 'validation_set_file', None)
self._df_notes_labeled_path = config.get_pipeline_config_item(self.module_name(), 'input_note_file', None)
self._loaded_df = None
self._compare_df = None
self._orig_df = None
self._loaded_validation = None
self._loaded_validation_labels = None
self._loaded_validation_label_map = None
logger.log_info('Loading validation note labeling file')
self._loading_validation_labeling_file()
logger.log_info('DONE: Loading validation note labeling file')
logger.log_info('Loading NLP pipeline processed note files')
self._loading_note_files()
logger.log_info('DONE: NLP pipeline processed note files')
logger.log_info('Computing and outputting statistics')
self._do_statistics()
def _loading_note_files(self):
if not self._df_notes_labeled_path:
raise RuntimeError('Please specify a valid note input file.')
def load_file(path):
filename = utils.default_dataframe_name(path)
assert os.path.isfile(filename), 'Could not find note parquet file: {}'.format(filename)
df = pd.read_parquet(filename)
df.columns = [_.upper() for _ in df.columns]
assert 'ROW_ID' in list(df.columns), 'Notes file need to have columns: Row_id, predicted_categories'
assert 'PREDICTED_CATEGORIES' in list(df.columns), "Processed note file needs to have the PREDICTED_CATEGORIES column generated by e.g. the negation module."
df['PREDICTED_CATEGORIES'] = df.PREDICTED_CATEGORIES.str.upper()
df['PREDICTED_CATEGORIES'] = df.PREDICTED_CATEGORIES.str.replace(' ', '_')
df['PREDICTED_CATEGORIES'] = df.PREDICTED_CATEGORIES.str.split('|')
if 'FOUND_EVIDENCE' in list(df.columns):
df['FOUND_EVIDENCE'] = df['FOUND_EVIDENCE'].astype(bool)
df = df[df['FOUND_EVIDENCE']]
return df
self._loaded_df = load_file(self._df_notes_labeled_path)
unique_labels = []
for _ in [*self._loaded_df.PREDICTED_CATEGORIES, self._loaded_validation_labels]:
unique_labels.extend(_)
unique_labels = set(unique_labels)
lbl_id = 2
self._loaded_validation_label_map = {"NONE" : 1}
for _lbl in unique_labels:
if _lbl == "NONE":
continue
self._loaded_validation_label_map[_lbl] = lbl_id
lbl_id += 1
self._loaded_df['PREDICTED_CATEGORIES'] = self._loaded_df.PREDICTED_CATEGORIES.apply(lambda x: [self._loaded_validation_label_map[_] for _ in x])
self._loaded_validation['NOTE_TYPES'] = self._loaded_validation.NOTE_TYPES.apply(lambda x: [self._loaded_validation_label_map[_] for _ in x])
def _loading_validation_labeling_file(self):
assert self._validation_set, 'Please specify a validation labeling file.'
try:
with open(self._validation_set, 'r') as file:
self._loaded_validation = file.readlines()
self._loaded_validation = self._loaded_validation[1:]
self._loaded_validation = [_.strip() for _ in self._loaded_validation]
self._loaded_validation = [_.split(',') for _ in self._loaded_validation]
self._loaded_validation = [[int(_[0]), [_.upper().replace(' ', '_') for _ in str(_[1]).split('|')], (int(_[2]) > 0)] for _ in self._loaded_validation]
self._loaded_validation = pd.DataFrame(self._loaded_validation, columns=['ROW_ID', 'NOTE_TYPES', 'VALID_INCLUDED'])
self._loaded_validation.loc[~self._loaded_validation['VALID_INCLUDED'], 'NOTE_TYPES'] = pd.Series([['NONE']]*self._loaded_validation.shape[0])
except:
raise RuntimeError('Error while processing validation labeling file. Check file structure.')
self._loaded_validation_labels = []
for _i, _ in self._loaded_validation.iterrows():
self._loaded_validation_labels.extend(_['NOTE_TYPES'])
self._loaded_validation_labels = set(self._loaded_validation_labels)
def dump_examples_for_comparison(self):
if not self._compare_df:
logger.log_warn('Could not find comparison df - Skipping dumping of exemplary notes.')
return
self._get_examples_for_categories = [_.upper() for _ in self.get_examples_for_categories]
if not self._get_examples_for_categories:
logger.log_warn('No categories specified for dumping exemplary sentences.')
return
unknown_categories = [_ for _ in self._get_examples_for_categories if not _ in [*self._get_examples_for_categories, 'NO_FINDING']]
if unknown_categories:
logger.log_warn('The following categories are not present in the provided dataframes: {}'.format(unknown_categories))
return
example_list = []
# for _cat in self._get_examples_for_categories:
# # Get example sentences
def _do_statistics(self):
validset = self._loaded_validation.sort_values('ROW_ID').reset_index(drop=True)[['ROW_ID', 'NOTE_TYPES']].copy()
validset = validset.drop_duplicates(subset=['ROW_ID'])
predicted = self._loaded_df[['ROW_ID', 'PREDICTED_CATEGORIES']].copy()
predicted = predicted.rename(columns={'PREDICTED_CATEGORIES' : 'PREDICTED_CAT'})
predicted = predicted.drop_duplicates(subset=['ROW_ID'])
validset = validset.merge(predicted, how='left', on='ROW_ID')
validset.loc[validset['PREDICTED_CAT'].isnull(), 'PREDICTED_CAT'] = | pd.Series([[1]]*validset.shape[0]) | pandas.Series |
import numpy as np
import pandas as pd
import sys
import os
import pandas.core.indexes
sys.modules['pandas.indexes'] = pandas.core.indexes
import time
import yaml
import json
import matplotlib.pyplot as plt
import keras
import tensorflow as tf
from keras.models import Sequential, load_model, Model
from keras.layers import Dense, Dropout, Flatten, Conv3D, MaxPooling3D, BatchNormalization, Activation, Input, concatenate
from keras.callbacks import EarlyStopping
from keras.backend.tensorflow_backend import set_session
from keras.utils import multi_gpu_model
from sklearn.metrics import roc_auc_score
from sklearn.model_selection import ParameterGrid
from helper import dataset, model
from imaging_predictive_models import imaging_dataset
from clinical_predictive_models import clinical_dataset, MLP
from multimodal_prediction_helper import multimodal_dataset
from keras_helper import EpochEvaluation
#### ENVIRONMENT AND SESSION SET UP ####################################################################
# set the environment variable
os.environ["KERAS_BACKEND"] = "tensorflow"
# Silence INFO logs
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '1'
# create a configuration protocol
config = tf.ConfigProto()
# set the allow_growth option to true in the protocol
config.gpu_options.allow_growth = True
# define GPU to use
config.gpu_options.visible_device_list = "0,1"
# start a sesstion that uses the configuration protocol
set_session(tf.Session(config=config))
#### READ CONFIGURATION FILE ###########################################################################
def join(loader,node):
seq = loader.construct_sequence(node)
return ''.join(str(i) for i in seq)
yaml.add_constructor('!join',join)
cfg = yaml.load(open('config.yml', 'r'))
#### ASSIGN PATHS AND VARIABLES #########################################################################
dataset_name = cfg['dataset name']
img_splits_path = cfg['imaging dataset']['splits path']
img_feat_splits_path = 'data/' + cfg['imaging dataset']['feature splits path']
img_models_path = cfg['imaging dataset']['models path']
img_params_folder = '../TOF-based/modeling_results/1kplus_multimodal/params/'
img_scores_folder = '../TOF-based/modeling_results/1kplus_multimodal/performance_scores/'
clin_splits_path = cfg['clinical dataset']['splits path']
clin_feat_splits_path = 'data/'+ cfg['clinical dataset']['feature splits path']
clin_models_path = cfg['clinical dataset']['models path']
clin_params_folder = '../clinical parameter-based/modeling_results/1kplus_multimodal/params/'
clin_scores_folder = '../clinical parameter-based/modeling_results/1kplus_multimodal/performance_scores/'
num_splits = cfg['number of runs']
#### LOAD BOTH CLINICAL AND IMAGING DATA #################################################################
img_data = imaging_dataset(dataset_name)
img_sets = img_data.assign_train_val_test_sets(img_splits_path)
clin_data = clinical_dataset(dataset_name)
clin_sets = clin_data.assign_train_val_test_sets(clin_splits_path)
features = multimodal_dataset(dataset_name)
features.load_feature_sets(img_feat_splits_path, clin_feat_splits_path)
def train_and_evaluate_CNN(training_data, test_data, params, num_training_runs = 100):
X_tr, y_tr = training_data
X_te, y_te = test_data
AUC_trs = []
AUC_tes = []
for i in range(num_training_runs):
model = Sequential()
model.add(Conv3D(params['num_filters'][0], params['arc_params']['filter_size'], strides = params['arc_params']['filter_stride'],
padding="same",kernel_regularizer= keras.regularizers.l2(params['l2_reg']),input_shape=(156,192,64,1)))
model.add(Activation('relu'))
model.add(MaxPooling3D(pool_size= params['arc_params']['pool_size']))
model.add(Conv3D(params['num_filters'][1], params['arc_params']['filter_size'], strides = params['arc_params']['filter_stride'],
padding="same",kernel_regularizer= keras.regularizers.l2(params['l2_reg']) ))
model.add(Activation('relu'))
model.add(MaxPooling3D(pool_size=params['arc_params']['pool_size']))
model.add(Conv3D(params['num_filters'][2], params['arc_params']['filter_size'], strides = params['arc_params']['filter_stride'],
padding="same",kernel_regularizer= keras.regularizers.l2(params['l2_reg'])))
model.add(Activation('relu'))
model.add(MaxPooling3D(pool_size=params['arc_params']['pool_size']))
model.add(Flatten())
model.add(Dense(params['num_neurons_in_powers']*params['num_filters'][2], activation='relu',kernel_regularizer= keras.regularizers.l2(params['l2_reg'])))
model.add(Dropout(params['dropout']))
model.add(Dense(2 , activation='softmax',kernel_regularizer= keras.regularizers.l2(params['l2_reg'])))
optimizer = keras.optimizers.Adam(lr = params['learning_rate'])
model.compile(loss='binary_crossentropy',optimizer=optimizer)
parallel_model = multi_gpu_model(model, 2)
parallel_model.compile(loss='binary_crossentropy',optimizer=optimizer)
e_stop = EarlyStopping(monitor = 'val_loss', min_delta = 0.02, patience = 2
, mode='auto')
callbacks = [e_stop]
start = time.time()
history = parallel_model.fit(X_tr, y_tr, callbacks = callbacks, validation_data = (X_te,y_te),
batch_size = params['batch_size'], epochs=20,verbose = 0)
end = time.time()
model.set_weights(parallel_model.get_weights())
probs_tr = model.predict(X_tr, batch_size = 8)
probs_te = model.predict(X_te, batch_size = 8)
score_tr = roc_auc_score(y_tr, probs_tr)
score_te = roc_auc_score(y_te, probs_te)
AUC_trs.append(score_tr)
AUC_tes.append(score_te)
print('Training time for run %i was around %i minutes'%(i, np.floor((end-start)/60)))
keras.backend.clear_session()
return AUC_trs, AUC_tes
def train_and_evaluate_MLP(training_data, test_data, params, num_training_runs = 100):
X_tr, y_tr = training_data
X_te, y_te = test_data
AUC_trs = []
AUC_tes = []
for i in range(num_training_runs):
e_stop = EarlyStopping(monitor = 'val_loss', min_delta = 0.01, patience = 5, mode='min')
callbacks = [e_stop]
optimizer = keras.optimizers.Adam(lr = params['learning_rate'])
model = Sequential()
model.add(Dense(params['num_neurons'],input_dim = 7, kernel_initializer = 'glorot_uniform', activation = 'relu', kernel_regularizer = keras.regularizers.l2(params['l2_ratio'])))
model.add(Dropout(params['dropout_rate']))
model.add(Dense(2, kernel_initializer = 'glorot_uniform', activation = 'softmax', kernel_regularizer = keras.regularizers.l2(params['l2_ratio'])))
model.compile(loss = 'binary_crossentropy', optimizer = optimizer)
history = model.fit(X_tr, y_tr, callbacks= callbacks, validation_data = (X_te, y_te), epochs = 100, batch_size = params['batch_size'], verbose = 0)
probs_tr = model.predict(X_tr, batch_size = 8)
probs_te = model.predict(X_te, batch_size = 8)
score_tr = roc_auc_score(y_tr, probs_tr)
score_te = roc_auc_score(y_te, probs_te)
AUC_trs.append(score_tr)
AUC_tes.append(score_te)
keras.backend.clear_session()
return AUC_trs, AUC_tes
def train_and_evaluate_end_to_end(img_X_tr, clin_X_tr, y_tr, img_X_te, clin_X_te, y_te, params,num_training_runs = 100):
AUC_trs = []
AUC_tes = []
for i in range(num_training_runs):
img_input = Input(shape= (156,192,64,1), name='image_input')
clin_input = Input(shape= (clin_X_tr.shape[1],), name='clinical_input')
x1 = Conv3D(params['num_filters'][0], (3,3,3), strides = (1,1,1),padding="same",
kernel_regularizer= keras.regularizers.l2(params['l2_ratio']))(img_input)
x1 = Activation('relu')(x1)
x1 = MaxPooling3D(pool_size=(3,3,3))(x1)
x1 = Conv3D(params['num_filters'][1], (3,3,3), strides = (1,1,1),padding="same",
kernel_regularizer= keras.regularizers.l2(params['l2_ratio']))(x1)
x1 = Activation('relu')(x1)
x1 = MaxPooling3D(pool_size=(3,3,3))(x1)
x1 = Conv3D(params['num_filters'][2], (3,3,3), strides = (1,1,1),padding="same",
kernel_regularizer= keras.regularizers.l2(params['l2_ratio']))(x1)
x1 = Activation('relu')(x1)
x1 = MaxPooling3D(pool_size=(3,3,3))(x1)
x1 = Flatten()(x1)
x1 = Dense(params['num_filters'][2]*2, activation='relu',
kernel_regularizer= keras.regularizers.l2(params['l2_ratio']))(x1)
x1 = Dropout(params['dropout_rate'])(x1)
x1 = Dense(params['num_neurons_embedding'][1], activation='relu',
kernel_regularizer= keras.regularizers.l2(params['l2_ratio']))(x1)
x2 = Dense(params['num_neurons_MLP'], activation = 'relu',
kernel_regularizer= keras.regularizers.l2(params['l2_ratio']))(clin_input)
x2 = Dropout(params['dropout_rate'])(x2)
x2 = Dense(params['num_neurons_embedding'][0], activation='relu',
kernel_regularizer= keras.regularizers.l2(params['l2_ratio']))(x2)
x = concatenate([x1, x2])
x = Dense(params['num_neurons_final'], activation = 'relu',
kernel_regularizer= keras.regularizers.l1(params['l2_ratio']))(x)
x= Dropout(params['dropout_rate'])(x)
output = Dense(2,activation= 'softmax', kernel_regularizer= keras.regularizers.l2(params['l2_ratio']))(x)
model = Model(inputs=[img_input, clin_input], outputs=[output])
optimizer = keras.optimizers.Adam(lr = params['learning_rate'])
model.compile(loss='binary_crossentropy', optimizer = optimizer)
e_stop = EarlyStopping(monitor = 'val_loss', min_delta = 0.02, patience = 2, mode='auto')
callbacks = [e_stop]
start= time.time()
history = model.fit(
{'image_input' : img_X_tr,
'clinical_input' : clin_X_tr},#inputs
y_tr, #output
callbacks = callbacks,
validation_data= ([img_X_te, clin_X_te],y_te),
epochs=20,
batch_size= params['batch_size'],
verbose=0)
end= time.time()
probs_tr = model.predict([img_X_tr,clin_X_tr],batch_size = 8)
probs_te = model.predict([img_X_te,clin_X_te],batch_size = 8)
score_tr = roc_auc_score(y_tr, probs_tr)
score_te = roc_auc_score(y_te, probs_te)
AUC_trs.append(score_tr)
AUC_tes.append(score_te)
print('Training time for run %i was around %i minutes'%(i, np.floor((end-start)/60)))
keras.backend.clear_session()
return AUC_trs, AUC_tes
def train_and_evaluate_feat_extract(img_X_tr, clin_X_tr, y_tr, img_X_te, clin_X_te, y_te, params,num_training_runs = 100):
AUC_trs = []
AUC_tes = []
for i in range(num_training_runs):
img_input = Input(shape= (img_X_tr.shape[1],), name='image_input')
clin_input = Input(shape= (clin_X_tr.shape[1],), name='clinical_input')
dense1 = Dense(params['num_neurons_embedding'][0], activation = 'relu',
kernel_regularizer= keras.regularizers.l2(params['l2_ratio']))(clin_input)
dense2 = Dense(params['num_neurons_embedding'][1], activation = 'relu',
kernel_regularizer= keras.regularizers.l2(params['l2_ratio']))(img_input)
x = concatenate([dense1, dense2])
x = Dense(params['num_neurons_final'], activation = 'relu',
kernel_regularizer= keras.regularizers.l2(params['l2_ratio']))(x)
x= Dropout(params['dropout_rate'])(x)
output = Dense(2, activation= 'softmax', kernel_regularizer= keras.regularizers.l2(params['l2_ratio']))(x)
optimizer = keras.optimizers.Adam(lr = params['learning_rate'])
model = Model(inputs=[img_input, clin_input], outputs=[output])
model.compile(loss='binary_crossentropy', optimizer = optimizer)
e_stop = EarlyStopping(monitor = 'val_loss', min_delta = 0.01, patience = 5, mode='auto')
callbacks = [e_stop]
history = model.fit({'image_input' : img_X_tr,
'clinical_input' : clin_X_tr},
y_tr,
callbacks = callbacks,
validation_data= ([img_X_te, clin_X_te],y_te),
epochs=100,
batch_size= params['batch_size'],
verbose=0)
probs_tr = model.predict([img_X_tr,clin_X_tr],batch_size = 8)
probs_te = model.predict([img_X_te,clin_X_te],batch_size = 8)
score_tr = roc_auc_score(y_tr, probs_tr)
score_te = roc_auc_score(y_te, probs_te)
AUC_trs.append(score_tr)
AUC_tes.append(score_te)
keras.backend.clear_session()
return AUC_trs, AUC_tes
# fix seed
np.random.seed(1)
tf.set_random_seed(2)
import random as rn
rn.seed(3)
options = [ 'CNN', 'end-to-end']
if 'MLP' in options:
for i in range(num_splits):
X_tr = clin_sets[i]['train_data']
y_tr = clin_sets[i]['train_labels']
X_val = clin_sets[i]['val_data']
y_val = clin_sets[i]['val_labels']
X_te = clin_sets[i]['test_data']
y_te = clin_sets[i]['test_labels']
X_train = np.concatenate((X_tr,X_val))
y_train = np.concatenate((y_tr,y_val))
y_tr = pd.get_dummies(y_tr)
y_val = pd.get_dummies(y_val)
y_te = pd.get_dummies(y_te)
y_train = pd.get_dummies(y_train.reshape(250,))
with open(clin_params_folder+ 'best_MLP_multimodal_tuning_parameters_split_'+str(i+1)+'.json') as json_file:
tuning_params = json.load(json_file)
print(tuning_params)
AUC_trs, AUC_tes = train_and_evaluate_MLP((X_train,y_train),(X_te,y_te),tuning_params,num_training_runs=100)
np.savetxt('../clinical parameter-based/modeling_results/1kplus_multimodal/performance_scores/outer_loop_AUC_performance_over_100_runs_model_'+str(i+1)+'.csv', [AUC_trs, AUC_tes], delimiter=",")
if 'CNN' in options:
for i in range(num_splits):
X_tr = img_sets[i]['train_data']
y_tr = img_sets[i]['train_labels']
X_val = img_sets[i]['val_data']
y_val = img_sets[i]['val_labels']
X_te = img_sets[i]['test_data']
y_te = img_sets[i]['test_labels']
X_train = np.concatenate((X_tr,X_val))
y_train = np.concatenate((y_tr,y_val))
y_tr = pd.get_dummies(y_tr)
y_val = pd.get_dummies(y_val)
y_te = pd.get_dummies(y_te)
y_train = pd.get_dummies(y_train)
with open(img_params_folder+ 'best_tuning_params_split_'+str(i+1)+'.json') as json_file:
tuning_params = json.load(json_file)
print(tuning_params)
AUC_trs, AUC_tes = train_and_evaluate_CNN((X_train,y_train),(X_te,y_te),tuning_params,num_training_runs=100)
np.savetxt('../TOF-based/modeling_results/1kplus_multimodal/performance_scores/outer_loop_AUC_performance_over_100_runs_model_'+str(i+1)+'.csv', [AUC_trs, AUC_tes], delimiter=",")
if 'feature' in options:
for i in range(num_splits):
img_X_tr = features.img_sets[i]['train_data']
img_X_val = features.img_sets[i]['val_data']
img_X_train = np.concatenate((img_X_tr,img_X_val))
img_X_te = features.img_sets[i]['test_data']
clin_X_tr = features.clin_sets[i]['train_data']
clin_X_val = features.clin_sets[i]['val_data']
clin_X_train = np.concatenate((clin_X_tr,clin_X_val))
clin_X_te = features.clin_sets[i]['test_data']
y_tr = features.img_sets[i]['train_labels']
y_val = features.img_sets[i]['val_labels']
y_train = np.concatenate((y_tr,y_val))
y_te = features.img_sets[i]['test_labels']
y_tr = pd.get_dummies(y_tr)
y_val = | pd.get_dummies(y_val) | pandas.get_dummies |
# CHIN, <NAME>. How to Write Up and Report PLS Analyses. In: Handbook of
# Partial Least Squares. Berlin, Heidelberg: Springer Berlin Heidelberg,
# 2010. p. 655–690.
import pandas
import numpy as np
from numpy import inf
import pandas as pd
from .pylspm import PyLSpm
from .boot import PyLSboot
def isNaN(num):
return num != num
def blindfolding(data_, lvmodel, mvmodel, scheme,
regression, h='0', maxit='100', HOC='true'):
model = PyLSpm(data_, lvmodel, mvmodel, scheme,
regression, h, maxit, HOC=HOC)
data2_ = model.data
# observation/distance must not be interger
distance = 7
Q2 = pd.DataFrame(0, index=data2_.columns.values,
columns=range(distance))
SSE = pd.DataFrame(0, index=data2_.columns.values,
columns=range(distance))
SSO = pd.DataFrame(0, index=data2_.columns.values,
columns=range(distance))
mean = | pd.DataFrame.mean(data2_) | pandas.DataFrame.mean |
# Copyright 1999-2021 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import pandas as pd
from ...core import OutputType, recursive_tile
from ...serialization.serializables import StringField, BoolField, AnyField, ListField
from ..core import SERIES_TYPE
from ..datasource.dataframe import from_pandas as from_pandas_df
from ..datasource.series import from_pandas as from_pandas_series
from ..initializer import Series as asseries
from ..operands import DataFrameOperand, DataFrameOperandMixin
from ..reduction.unique import unique
from ..utils import gen_unknown_index_value
_encoding_dtype_kind = ['O', 'S', 'U']
class DataFrameGetDummies(DataFrameOperand, DataFrameOperandMixin):
prefix = AnyField('prefix')
prefix_sep = StringField('prefix_sep')
dummy_na = BoolField('dummy_na')
columns = ListField('columns')
sparse = BoolField('sparse')
drop_first = BoolField('drop_first')
dtype = AnyField('dtype')
def __init__(self, prefix=None, prefix_sep=None, dummy_na=None,
columns=None, sparse=None, drop_first=None, dtype=None, **kws):
super().__init__(prefix=prefix, prefix_sep=prefix_sep, dummy_na=dummy_na,
columns=columns, sparse=sparse, drop_first=drop_first, dtype=dtype, **kws)
self.output_types = [OutputType.dataframe]
@classmethod
def tile(cls, op):
inp = op.inputs[0]
out = op.outputs[0]
if len(inp.chunks) == 1:
chunk_op = op.copy().reset_key()
chunk_param = out.params
chunk_param['index'] = (0, 0)
chunk = chunk_op.new_chunk(inp.chunks, kws=[chunk_param])
new_op = op.copy().reset_key()
param = out.params
param['chunks'] = [chunk]
param['nsplits'] = ((np.nan,), (np.nan,))
return new_op.new_dataframe(op.inputs, kws=[param])
elif isinstance(inp, SERIES_TYPE):
unique_inp = yield from recursive_tile(unique(inp))
chunks = []
for c in inp.chunks:
chunk_op = op.copy().reset_key()
chunk_param = out.params
chunk_param['index_value'] = gen_unknown_index_value(c.index_value)
chunk_param['index'] = (c.index[0], 0)
chunk = chunk_op.new_chunk([c] + unique_inp.chunks, kws=[chunk_param])
chunks.append(chunk)
new_op = op.copy().reset_key()
param = out.params
param['chunks'] = chunks
param['nsplits'] = (tuple([np.nan] * inp.chunk_shape[0]), (np.nan,))
return new_op.new_dataframe(op.inputs, kws=[param])
else:
if op.columns:
encoding_columns = op.columns
else:
encoding_columns = []
for idx, dtype in enumerate(inp.dtypes.values):
if dtype.kind in _encoding_dtype_kind:
column = inp.dtypes.index[idx]
encoding_columns.append(column)
# reindex, make encoding columns in the end of dataframe, to keep pace with pandas.get_dummies
total_columns = list(inp.columns.to_pandas().array)
for col in encoding_columns:
total_columns.remove(col)
total_columns.extend(encoding_columns)
inp = yield from recursive_tile(inp[total_columns])
unique_chunks = dict()
for col in encoding_columns:
unique_chunks[col] = yield from recursive_tile(unique(inp[col]))
chunks = []
prefix = op.prefix
column_to_prefix = dict()
for c in inp.chunks:
chunk_op = op.copy().reset_key()
chunk_op.columns = []
if isinstance(chunk_op.prefix, list):
chunk_op.prefix = []
chunk_param = c.params
chunk_param['shape'] = (np.nan, np.nan)
chunk_columns = c.dtypes.index
inp_chunk = [c]
for chunk_column in chunk_columns:
if chunk_column in encoding_columns:
chunk_op.columns.append(chunk_column)
inp_chunk.extend(unique_chunks[chunk_column].chunks)
if isinstance(prefix, list):
if chunk_column in column_to_prefix.keys():
chunk_op.prefix.append(column_to_prefix[chunk_column])
else:
column_to_prefix[chunk_column] = prefix[0]
chunk_op.prefix.append(prefix[0])
prefix = prefix[1:]
chunk = chunk_op.new_chunk(inp_chunk, kws=[chunk_param])
chunks.append(chunk)
new_op = op.copy()
kw = out.params.copy()
kw['chunks'] = chunks
kw['nsplits'] = (tuple([np.nan] * inp.chunk_shape[0]), tuple([np.nan] * inp.chunk_shape[1]))
return new_op.new_dataframe(op.inputs, kws=[kw])
@classmethod
def execute(cls, ctx, op):
inp = ctx[op.inputs[0].key]
result_length = inp.shape[0]
unique_inputs = []
for unique_input in op.inputs[1:]:
unique_inputs.append(ctx[unique_input.key].tolist())
if unique_inputs:
if isinstance(inp, pd.Series):
extra_series = pd.Series(unique_inputs[0])
inp = pd.concat([inp, extra_series])
else:
# make all unique_input's length the same, then get a dataframe
max_length = len(max(unique_inputs, key=len))
unique_inputs = [unique_list + [unique_list[0]] * (max_length - len(unique_list)) for unique_list in
unique_inputs]
extra_dataframe = pd.DataFrame(dict(zip(op.columns, unique_inputs)))
# add the columns that need not to encode, to concat extra_dataframe and inp
total_columns = list(inp.columns.array)
for col in op.columns:
total_columns.remove(col)
remain_columns = total_columns
not_encode_columns = []
if len(remain_columns) > 0:
for col in remain_columns:
not_encode_columns.append([inp[col].iloc[0]] * max_length)
not_encode_dataframe = pd.DataFrame(dict(zip(remain_columns, not_encode_columns)))
extra_dataframe = pd.concat([not_encode_dataframe, extra_dataframe], axis=1)
inp = | pd.concat([inp, extra_dataframe], axis=0) | pandas.concat |
# -*- coding: utf-8 -*-
"""
Created on Wed Mar 31 02:10:49 2021
@author: mofarrag
"""
import numpy as np
import pandas as pd
import datetime as dt
import os
import gdal
from types import ModuleType
import matplotlib.pyplot as plt
import matplotlib.dates as dates
from Hapi.raster import Raster
from Hapi.giscatchment import GISCatchment as GC
import Hapi.performancecriteria as PC
from Hapi.visualizer import Visualize as Vis
class Catchment():
"""
================================
Catchment
================================
Catchment class include methods to read the meteorological and Spatial inputs
of the distributed hydrological model. Catchment class also reads the data
of the gauges, it is a super class that has the run subclass, so you
need to build the catchment object and hand it as an inpit to the Run class
to run the model
methods:
1-ReadRainfall
2-ReadTemperature
3-ReadET
4-ReadFlowAcc
5-ReadFlowDir
6-ReadFlowPathLength
7-ReadParameters
8-ReadLumpedModel
9-ReadLumpedInputs
10-ReadGaugeTable
11-ReadDischargeGauges
12-ReadParametersBounds
13-ExtractDischarge
14-PlotHydrograph
15-PlotDistributedQ
16-SaveResults
"""
def __init__(self, name, StartDate, EndDate, fmt="%Y-%m-%d", SpatialResolution = 'Lumped',
TemporalResolution = "Daily"):
"""
=============================================================================
Catchment(name, StartDate, EndDate, fmt="%Y-%m-%d", SpatialResolution = 'Lumped',
TemporalResolution = "Daily")
=============================================================================
Parameters
----------
name : [str]
Name of the Catchment.
StartDate : [str]
starting date.
EndDate : [str]
end date.
fmt : [str], optional
format of the given date. The default is "%Y-%m-%d".
SpatialResolution : TYPE, optional
Lumped or 'Distributed' . The default is 'Lumped'.
TemporalResolution : TYPE, optional
"Hourly" or "Daily". The default is "Daily".
Returns
-------
None.
"""
self.name = name
self.StartDate = dt.datetime.strptime(StartDate,fmt)
self.EndDate = dt.datetime.strptime(EndDate,fmt)
self.SpatialResolution = SpatialResolution
self.TemporalResolution = TemporalResolution
if TemporalResolution == "Daily":
self.Timef = 24
self.Index = pd.date_range(self.StartDate, self.EndDate, freq = "D" )
elif TemporalResolution == "Hourly":
self.Timef = 1
self.Index = pd.date_range(self.StartDate, self.EndDate, freq = "H" )
else:
#TODO calculate the teporal resolution factor
# q mm , area sq km (1000**2)/1000/f/60/60 = 1/(3.6*f)
# if daily tfac=24 if hourly tfac=1 if 15 min tfac=0.25
self.Tfactor = 24
pass
def ReadRainfall(self,Path):
"""
=========================================================
ReadRainfall(Path)
=========================================================
Parameters
----------
Path : [String]
path to the Folder contains precipitation rasters.
Returns
-------
Prec : [array attribute]
array containing the spatial rainfall values
"""
if not hasattr(self, "Prec"):
# data type
assert type(Path) == str, "PrecPath input should be string type"
# check wether the path exists or not
assert os.path.exists(Path), Path + " you have provided does not exist"
# check wether the folder has the rasters or not
assert len(os.listdir(Path)) > 0, Path+" folder you have provided is empty"
# read data
self.Prec = Raster.ReadRastersFolder(Path)
self.TS = self.Prec.shape[2] + 1 # no of time steps =length of time series +1
assert type(self.Prec) == np.ndarray, "array should be of type numpy array"
print("Rainfall data are read successfully")
def ReadTemperature(self,Path, ll_temp=None):
"""
=========================================================
ReadTemperature(Path)
=========================================================
Parameters
----------
Path : [String]
path to the Folder contains temperature rasters.
Returns
-------
Temp : [array attribute]
array containing the spatial temperature values
"""
if not hasattr(self, 'Temp'):
# data type
assert type(Path) == str, "PrecPath input should be string type"
# check wether the path exists or not
assert os.path.exists(Path), Path + " you have provided does not exist"
# check wether the folder has the rasters or not
assert len(os.listdir(Path)) > 0, Path+" folder you have provided is empty"
# read data
self.Temp = Raster.ReadRastersFolder(Path)
assert type(self.Temp) == np.ndarray, "array should be of type numpy array"
if ll_temp is None:
self.ll_temp = np.zeros_like(self.Temp,dtype=np.float32)
avg = self.Temp.mean(axis=2)
for i in range(self.Temp.shape[0]):
for j in range(self.Temp.shape[1]):
self.ll_temp[i,j,:] = avg[i,j]
print("Temperature data are read successfully")
def ReadET(self,Path):
"""
=========================================================
ReadET(Path)
=========================================================
Parameters
----------
Path : [String]
path to the Folder contains Evapotranspiration rasters.
Returns
-------
ET : [array attribute]
array containing the spatial Evapotranspiration values
"""
if not hasattr(self, 'ET'):
# data type
assert type(Path) == str, "PrecPath input should be string type"
# check wether the path exists or not
assert os.path.exists(Path), Path + " you have provided does not exist"
# check wether the folder has the rasters or not
assert len(os.listdir(Path)) > 0, Path+" folder you have provided is empty"
# read data
self.ET = Raster.ReadRastersFolder(Path)
assert type(self.ET) == np.ndarray, "array should be of type numpy array"
print("Potential Evapotranspiration data are read successfully")
def ReadFlowAcc(self, Path):
"""
=========================================================
ReadET(Path)
=========================================================
Parameters
----------
Path : [String]
path to the Flow Accumulation raster of the catchment
(it should include the raster name and extension).
Returns
-------
FlowAcc : [array attribute]
array containing the spatial Evapotranspiration values
rows: [integer]
number of rows in the flow acc array
cols:[integer]
number of columns in the flow acc array
NoDataValue:[numeric]
the NoDataValue
no_elem : [integer]
number of cells in the domain
"""
# data type
assert type(Path) == str, "PrecPath input should be string type"
# check wether the path exists or not
assert os.path.exists(Path), Path + " you have provided does not exist"
# check the extension of the accumulation file
assert Path[-4:] == ".tif", "please add the extension at the end of the Flow accumulation raster path input"
# check wether the path exists or not
assert os.path.exists(Path), Path + " you have provided does not exist"
FlowAcc = gdal.Open(Path)
[self.rows,self.cols] = FlowAcc.ReadAsArray().shape
# check flow accumulation input raster
self.NoDataValue = np.float32(FlowAcc.GetRasterBand(1).GetNoDataValue())
self.FlowAccArr = FlowAcc.ReadAsArray()
self.no_elem = np.size(self.FlowAccArr[:,:])-np.count_nonzero((self.FlowAccArr[self.FlowAccArr==self.NoDataValue]))
self.acc_val = [int(self.FlowAccArr[i,j]) for i in range(self.rows) for j in range(self.cols) if self.FlowAccArr[i,j] != self.NoDataValue]
self.acc_val = list(set(self.acc_val))
self.acc_val.sort()
acc_val_mx = max(self.acc_val)
if not (acc_val_mx == self.no_elem or acc_val_mx == self.no_elem -1):
message = """ flow accumulation raster values are not correct max value should equal number of cells or number of cells -1 """
message = message + " Max Value in the Flow Acc raster is " + str(acc_val_mx)
message = message + " while No of cells are " + str(self.no_elem)
print(message)
# assert acc_val_mx == self.no_elem or acc_val_mx == self.no_elem -1,
# location of the outlet
# outlet is the cell that has the max flow_acc
self.Outlet = np.where(self.FlowAccArr == np.nanmax(self.FlowAccArr[self.FlowAccArr != self.NoDataValue ]))
# calculate area covered by cells
geo_trans = FlowAcc.GetGeoTransform() # get the coordinates of the top left corner and cell size [x,dx,y,dy]
dx = np.abs(geo_trans[1])/1000.0 # dx in Km
dy = np.abs(geo_trans[-1])/1000.0 # dy in Km
# area of the cell
self.px_area = dx*dy
# no_cells=np.size(raster[:,:])-np.count_nonzero(raster[raster==no_val])
self.px_tot_area = self.no_elem*self.px_area # total area of pixels
print("Flow Accmulation input is read successfully")
def ReadFlowDir(self, Path):
"""
================================================================
ReadFlowDir(Path)
================================================================
ReadFlowDir method reads the flow direction raster
Parameters
----------
Path : [str]
Path to the flow direction raster.
Returns
-------
FlowDirArr : [array].
array of the flow direction raster
FDT : [dictionary]
flow direction table
"""
# data type
assert type(Path) == str, "PrecPath input should be string type"
# check wether the path exists or not
assert os.path.exists(Path), Path + " you have provided does not exist"
# check the extension of the accumulation file
assert Path[-4:] == ".tif", "please add the extension at the end of the Flow accumulation raster path input"
# check wether the path exists or not
assert os.path.exists(Path), Path + " you have provided does not exist"
FlowDir = gdal.Open(Path)
[rows,cols] = FlowDir.ReadAsArray().shape
# check flow direction input raster
fd_noval = np.float32(FlowDir.GetRasterBand(1).GetNoDataValue())
self.FlowDirArr = FlowDir.ReadAsArray()
fd_val = [int(self.FlowDirArr[i,j]) for i in range(rows) for j in range(cols) if self.FlowDirArr[i,j] != fd_noval]
fd_val = list(set(fd_val))
fd_should = [1,2,4,8,16,32,64,128]
assert all(fd_val[i] in fd_should for i in range(len(fd_val))), "flow direction raster should contain values 1,2,4,8,16,32,64,128 only "
# create the flow direction table
self.FDT = GC.FlowDirecTable(FlowDir)
print("Flow Direction input is read successfully")
def ReadFlowPathLength(self, Path):
"""
==============================================================
ReadFlowPathLength(Path)
==============================================================
ReadFlowPathLength method reads the flow path length
Parameters
----------
Path : [str]
Path to the file.
Returns
-------
FPLArr : [array]
flpw path length array
rows : [integer]
number of rows in the flow acc array
cols : [integer]
number of columns in the flow acc array
NoDataValue : [numeric]
the NoDataValue
no_elem : [integer]
number of cells in the domain
"""
# data type
assert type(Path) == str, "PrecPath input should be string type"
# input values
FPL_ext = Path[-4:]
assert FPL_ext == ".tif", "please add the extension at the end of the Flow accumulation raster path input"
# check wether the path exists or not
assert os.path.exists(Path), Path + " you have provided does not exist"
FPL = gdal.Open(Path)
[self.rows,self.cols] = FPL.ReadAsArray().shape
# check flow accumulation input raster
self.NoDataValue = np.float32(FPL.GetRasterBand(1).GetNoDataValue())
self.FPLArr = FPL.ReadAsArray()
self.no_elem = np.size(self.FPLArr[:,:])-np.count_nonzero((self.FPLArr[self.FPLArr==self.NoDataValue]))
print("Flow Path length input is read successfully")
def ReadParameters(self, Path, Snow=0, Maxbas=False):
"""
============================================================================
ReadParameters(Path, Snow, Maxbas=False)
============================================================================
ReadParameters method reads the parameters' raster
Parameters
----------
Path : [str]
path to the folder where the raster exist.
Snow : [integer]
0 if you dont want to run the snow related processes and 1 if there is snow.
in case of 1 (simulate snow processes) parameters related to snow simulation
has to be provided. The default is 0.
Maxbas : [bool], optional
True if the routing is Maxbas. The default is False.
Returns
-------
Parameters : [array].
3d array containing the parameters
Snow : [integer]
0/1
Maxbas : [bool]
True/False
"""
if self.SpatialResolution == 'Distributed':
# data type
assert type(Path) == str, "PrecPath input should be string type"
# check wether the path exists or not
assert os.path.exists(Path), Path + " you have provided does not exist"
# check wether the folder has the rasters or not
assert len(os.listdir(Path)) > 0, Path+" folder you have provided is empty"
# parameters
self.Parameters = Raster.ReadRastersFolder(Path)
else:
self.Parameters = pd.read_csv(Path, index_col = 0, header = None)[1].tolist()
assert Snow == 0 or Snow == 1, " snow input defines whether to consider snow subroutine or not it has to be 0 or 1"
self.Snow = Snow
self.Maxbas = Maxbas
if self.SpatialResolution == 'Distributed':
if Snow == 1 and Maxbas:
assert self.Parameters.shape[2] == 16, "current version of HBV (with snow) takes 16 parameter you have entered "+str(self.Parameters.shape[2])
elif Snow == 0 and Maxbas:
assert self.Parameters.shape[2] == 11, "current version of HBV (with snow) takes 11 parameter you have entered "+str(self.Parameters.shape[2])
elif Snow == 1 and not Maxbas:
assert self.Parameters.shape[2] == 17, "current version of HBV (with snow) takes 17 parameter you have entered "+str(self.Parameters.shape[2])
elif Snow == 0 and not Maxbas:
assert self.Parameters.shape[2] == 12, "current version of HBV (with snow) takes 12 parameter you have entered "+str(self.Parameters.shape[2])
else:
if Snow == 1 and Maxbas:
assert len(self.Parameters) == 16, "current version of HBV (with snow) takes 16 parameter you have entered "+str(len(self.Parameters))
elif Snow == 0 and Maxbas:
assert len(self.Parameters) == 11, "current version of HBV (with snow) takes 11 parameter you have entered "+str(len(self.Parameters))
elif Snow == 1 and not Maxbas:
assert len(self.Parameters) == 17, "current version of HBV (with snow) takes 17 parameter you have entered "+str(len(self.Parameters))
elif Snow == 0 and not Maxbas:
assert len(self.Parameters) == 12, "current version of HBV (with snow) takes 12 parameter you have entered "+str(len(self.Parameters))
print("Parameters are read successfully")
def ReadLumpedModel(self, LumpedModel, CatArea, InitialCond, q_init=None):
"""
=============================================================================
ReadLumpedModel(LumpedModel, CatArea, InitialCond, q_init=None)
=============================================================================
Parameters
----------
LumpedModel : [module]
HBV module.
CatArea : [numeric]
Catchment area (km2).
InitialCond : [list]
list of the inial condition [SnowPack,SoilMoisture,Upper Zone,
Lower Zone, Water Content].
q_init : [numeric], optional
initial discharge. The default is None.
Returns
-------
LumpedModel : [module].
the lumped conceptual model.
q_init : [numeric]
initial discharge.
InitialCond : [list]
initial conditions.
"""
assert isinstance(LumpedModel,ModuleType) , "ConceptualModel should be a module or a python file contains functions "
self.LumpedModel = LumpedModel
self.CatArea = CatArea
assert len(InitialCond) == 5, "state variables are 5 and the given initial values are "+str(len(InitialCond))
self.InitialCond = InitialCond
if q_init != None:
assert type(q_init) == float, "q_init should be of type float"
self.q_init = q_init
if self.InitialCond != None:
assert type(self.InitialCond)==list, "init_st should be of type list"
print("Lumped model is read successfully")
def ReadLumpedInputs(self, Path, ll_temp=None):
"""
================================================================
ReadLumpedInputs(Path, ll_temp=None)
================================================================
ReadLumpedInputs method read the meteorological data of lumped mode
[precipitation, Evapotranspiration, temperature, long term average temperature]
Parameters
----------
Path : [string]
Path to the input file, data has to be in the order of
[date, precipitation, ET, Temp].
ll_temp : [bool], optional
average long term temperature, if None it is calculated inside the
code. The default is None.
Returns
-------
data : [array].
meteorological data.
ll_temp : [array]
average long term temperature.
"""
self.data = pd.read_csv(Path,header=0 ,delimiter=',',#"\t", #skiprows=11,
index_col=0)
self.data = self.data.values
if ll_temp is None :
self.ll_temp = np.zeros(shape=(len(self.data)),dtype=np.float32)
self.ll_temp = self.data[:,2].mean()
assert np.shape(self.data)[1] == 3 or np.shape(self.data)[1] == 4," meteorological data should be of length at least 3 (prec, ET, temp) or 4(prec, ET, temp, tm) "
print("Lumped Model inputs are read successfully")
def ReadGaugeTable(self, Path, FlowaccPath=''):
"""
==========================================================================
ReadGaugeTable(self, Path, FlowaccPath='')
==========================================================================
ReadGaugeTable reads the table where the data about the gauges are listed
[x coordinate, y coordinate, 'area ratio', 'weight'], the coordinates are
mandatory to enter , to locate the gauges and be able to extract the
discharge at the coresponding cells.
Parameters
----------
Path : [str]
Path to the gauge file.
FlowaccPath : [str], optional
Path to the Flow acc raster. The default is ''.
Returns
-------
GaugesTable : [dataframe]
the table of the gauges.
"""
# read the gauge table
self.GaugesTable = pd.read_csv(Path)
col_list = self.GaugesTable.columns.tolist()
if FlowaccPath != '' and 'cell_row' not in col_list:
# if hasattr(self, 'FlowAcc'):
FlowAcc = gdal.Open(FlowaccPath)
# calculate the nearest cell to each station
self.GaugesTable.loc[:,["cell_row","cell_col"]] = GC.NearestCell(FlowAcc,self.GaugesTable[['id','x','y','weight']][:])
print("Gauge Table is read successfully")
def ReadDischargeGauges(self, Path, delimiter=",", column='id',fmt="%Y-%m-%d",
Split=False, Date1='', Date2=''):
"""
=========================================================================
ReadDischargeGauges(Path, delimiter=",", column='id',fmt="%Y-%m-%d",
Split=False, Date1='', Date2='')
==========================================================================
ReadDischargeGauges method read the gauge discharge data, discharge of
each gauge has to be stored separetly in a file, and the name of the file
has to be stored in the Gauges table you entered ubing the method "ReadGaugeTable"
under the column "id", the file should contains the date at the first column
Parameters
----------
Path : [str]
Path the the gauge discharge data.
delimiter : [str], optional
the delimiter between the date and the discharge column. The default is ",".
column : [str], optional
the name of the column where you stored the files names. The default is 'id'.
fmt : [str], optional
date format. The default is "%Y-%m-%d".
Split : bool, optional
True if you want to split the data between two dates. The default is False.
Date1 : [str], optional
start date. The default is ''.
Date2 : [str], optional
end date. The default is ''.
Returns
-------
QGauges : [dataframe].
dataframe containing the discharge data
"""
if self.TemporalResolution == "Daily":
ind = | pd.date_range(self.StartDate, self.EndDate, freq="D") | pandas.date_range |
import io
import requests
import pandas as pd
def request_meteo(year, month, stationID):
"""
Function that calls Meteo Canada's API to extract a weather station's hourly weather data at a given year and
month.
:param year: (int) year
:param month: (int) month
:param stationID: (int) id of the weather station that you wish to query
:return: (Pandas DataFrame) data frame that contains the hourly weather data at a given year and month
"""
# TODO: docstring
query = '?format=csv&stationID={}&Year={}&Month={}&timeframe=1'.format(stationID, year, month)
response = requests.get('http://climat.meteo.gc.ca/climate_data/bulk_data_e.html{}'.format(query))
textio = io.StringIO(response.content.decode('utf-8'))
''' Modified by <NAME>
Removed the header=14 parameter because it was not loading the column names from the csv file collected from the API.
colnames=["Year", "Month", "Day", "Time", "Data Quality", "Temp Flag", "Weather","Dew Point Temp Flag", "Rel Hum Flag", "Wind Dir Flag", "Wind Spd Flag","Stn Press Flag", "Hmdx Flag", "Wind Chill Flag", "Visibility Flag"]
table = pd.read_csv(textio, names=colnames, header=None, decimal=',')
table = pd.read_csv(textio, header=14, decimal=',')
'''
table = pd.read_csv(textio, decimal=',')
return table
def get_meteo(year_begin, year_end):
"""
Function that returns the weather for all dates between two years. Note that weather data is produced hourly.
:param year_begin: (int) first year
:param year_end: (int) last year
:return: (Pandas DataFrame) data frame that contains the hourly weather data between year_begin and year_end
"""
st_hubert = 48374
# Call climate data API
df_meteo = pd.concat([request_meteo(y, m, stationID=st_hubert)
for y in range(year_begin, year_end + 1)
for m in range(1, 13)])
return df_meteo
def get_meteo_at_date(time_stamp):
"""
Function that only returns the weather for a given date. Note that it doesn't seem possible to only request one day,
thus we specify a year and a month and afterwards filter out the other dates.
:param time_stamp: (DateTime object) date on which weather is needed
:return: (Pandas DataFrame) weather data on a given date
"""
st_hubert = 48374
# Call climate data API
df_meteo = request_meteo(time_stamp.year, time_stamp.month, stationID=st_hubert)
print(df_meteo)
return df_meteo.loc[ | pd.to_datetime(df_meteo["Date/Time"]) | pandas.to_datetime |
import collections
import numpy as np
import pytest
from pandas.core.dtypes.dtypes import CategoricalDtype
import pandas as pd
from pandas import (
Categorical,
DataFrame,
Index,
Series,
isna,
)
import pandas._testing as tm
class TestCategoricalMissing:
def test_isna(self):
exp = np.array([False, False, True])
cat = Categorical(["a", "b", np.nan])
res = cat.isna()
tm.assert_numpy_array_equal(res, exp)
def test_na_flags_int_categories(self):
# #1457
categories = list(range(10))
labels = np.random.randint(0, 10, 20)
labels[::5] = -1
cat = Categorical(labels, categories, fastpath=True)
repr(cat)
tm.assert_numpy_array_equal(isna(cat), labels == -1)
def test_nan_handling(self):
# Nans are represented as -1 in codes
c = Categorical(["a", "b", np.nan, "a"])
tm.assert_index_equal(c.categories, Index(["a", "b"]))
tm.assert_numpy_array_equal(c._codes, np.array([0, 1, -1, 0], dtype=np.int8))
c[1] = np.nan
tm.assert_index_equal(c.categories, Index(["a", "b"]))
tm.assert_numpy_array_equal(c._codes, np.array([0, -1, -1, 0], dtype=np.int8))
# Adding nan to categories should make assigned nan point to the
# category!
c = Categorical(["a", "b", np.nan, "a"])
tm.assert_index_equal(c.categories, Index(["a", "b"]))
tm.assert_numpy_array_equal(c._codes, np.array([0, 1, -1, 0], dtype=np.int8))
def test_set_dtype_nans(self):
c = Categorical(["a", "b", np.nan])
result = c._set_dtype(CategoricalDtype(["a", "c"]))
tm.assert_numpy_array_equal(result.codes, np.array([0, -1, -1], dtype="int8"))
def test_set_item_nan(self):
cat = Categorical([1, 2, 3])
cat[1] = np.nan
exp = Categorical([1, np.nan, 3], categories=[1, 2, 3])
tm.assert_categorical_equal(cat, exp)
@pytest.mark.parametrize(
"fillna_kwargs, msg",
[
(
{"value": 1, "method": "ffill"},
"Cannot specify both 'value' and 'method'.",
),
({}, "Must specify a fill 'value' or 'method'."),
({"method": "bad"}, "Invalid fill method. Expecting .* bad"),
(
{"value": Series([1, 2, 3, 4, "a"])},
"Cannot setitem on a Categorical with a new category",
),
],
)
def test_fillna_raises(self, fillna_kwargs, msg):
# https://github.com/pandas-dev/pandas/issues/19682
# https://github.com/pandas-dev/pandas/issues/13628
cat = Categorical([1, 2, 3, None, None])
with pytest.raises(ValueError, match=msg):
cat.fillna(**fillna_kwargs)
@pytest.mark.parametrize("named", [True, False])
def test_fillna_iterable_category(self, named):
# https://github.com/pandas-dev/pandas/issues/21097
if named:
Point = collections.namedtuple("Point", "x y")
else:
Point = lambda *args: args # tuple
cat = Categorical(np.array([Point(0, 0), Point(0, 1), None], dtype=object))
result = cat.fillna(Point(0, 0))
expected = Categorical([Point(0, 0), Point(0, 1), Point(0, 0)])
tm.assert_categorical_equal(result, expected)
def test_fillna_array(self):
# accept Categorical or ndarray value if it holds appropriate values
cat = Categorical(["A", "B", "C", None, None])
other = cat.fillna("C")
result = cat.fillna(other)
tm.assert_categorical_equal(result, other)
assert isna(cat[-1]) # didnt modify original inplace
other = np.array(["A", "B", "C", "B", "A"])
result = cat.fillna(other)
expected = Categorical(["A", "B", "C", "B", "A"], dtype=cat.dtype)
tm.assert_categorical_equal(result, expected)
assert isna(cat[-1]) # didnt modify original inplace
@pytest.mark.parametrize(
"values, expected",
[
([1, 2, 3], np.array([False, False, False])),
([1, 2, np.nan], np.array([False, False, True])),
([1, 2, np.inf], np.array([False, False, True])),
([1, 2, pd.NA], np.array([False, False, True])),
],
)
def test_use_inf_as_na(self, values, expected):
# https://github.com/pandas-dev/pandas/issues/33594
with pd.option_context("mode.use_inf_as_na", True):
cat = Categorical(values)
result = cat.isna()
tm.assert_numpy_array_equal(result, expected)
result = Series(cat).isna()
expected = Series(expected)
tm.assert_series_equal(result, expected)
result = DataFrame(cat).isna()
expected = DataFrame(expected)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"values, expected",
[
([1, 2, 3], np.array([False, False, False])),
([1, 2, np.nan], np.array([False, False, True])),
([1, 2, np.inf], np.array([False, False, True])),
([1, 2, pd.NA], np.array([False, False, True])),
],
)
def test_use_inf_as_na_outside_context(self, values, expected):
# https://github.com/pandas-dev/pandas/issues/33594
# Using isna directly for Categorical will fail in general here
cat = Categorical(values)
with pd.option_context("mode.use_inf_as_na", True):
result = pd.isna(cat)
tm.assert_numpy_array_equal(result, expected)
result = pd.isna(Series(cat))
expected = Series(expected)
tm.assert_series_equal(result, expected)
result = pd.isna(DataFrame(cat))
expected = DataFrame(expected)
| tm.assert_frame_equal(result, expected) | pandas._testing.assert_frame_equal |
from isitfit.utils import logger
from .tagsSuggestBasic import TagsSuggestBasic
from ..utils import MAX_ROWS
import os
import json
from ..apiMan import ApiMan
class TagsSuggestAdvanced(TagsSuggestBasic):
def __init__(self, ctx):
logger.debug("TagsSuggestAdvanced::constructor")
# api manager
self.api_man = ApiMan(tryAgainIn=2, ctx=ctx)
# proceed with parent constructor
return super().__init__(ctx)
def prepare(self):
logger.debug("TagsSuggestAdvanced::prepare")
self.api_man.register()
def suggest(self):
logger.info("Uploading ec2 names to s3")
logger.debug("TagsSuggestAdvanced::suggest")
# if status is not ok yet, ping again
if self.api_man.r_register['isitfitapi_status']['code']=='Registration in progress':
self.api_man.register()
# boto3 s3 client
s3_client = self.api_man.boto3_session.client('s3' )
import tempfile
from isitfit.dotMan import DotMan
with tempfile.NamedTemporaryFile(suffix='.csv', prefix='isitfit-ec2names-', delete=True, dir=DotMan().tempdir()) as fh:
logger.debug("Will use temporary file %s"%fh.name)
self.tags_df.to_csv(fh.name, index=False)
self.s3_key_suffix = 'tags_request.csv'
s3_path = os.path.join(self.api_man.r_sts['Account'], self.api_man.r_sts['UserId'], self.s3_key_suffix)
logger.debug("s3 PUT bucket=%s path=%s"%(self.api_man.r_body['s3_bucketName'], s3_path))
s3_client.put_object(Bucket=self.api_man.r_body['s3_bucketName'], Key=s3_path, Body=fh)
# POST /tags/suggest
r2, dt_now = self._tags_suggest()
# now listen on sqs
any_found = False
for m in self.api_man.listen_sqs('tags suggest', dt_now):
# if done
if m is None: break
# process messages
any_found = True
logger.info("Server message: %s"%m.body_decoded['status'])
if m.body_decoded['status'] != 'calculation complete':
continue
if m.body_decoded['status'] == 'calculation complete':
# upon calculation complete message
if 's3_key_suffix' not in m.body_decoded:
logger.debug("(Missing s3_key_suffix key from body. Aborting)")
return
self.csv_fn = None
from isitfit.dotMan import DotMan
with tempfile.NamedTemporaryFile(suffix='.csv', prefix='isitfit-tags-suggestAdvanced-', delete=False, dir=DotMan().tempdir()) as fh:
self.csv_fn = fh.name
s3_path = os.path.join(self.api_man.r_body['s3_keyPrefix'], m.body_decoded['s3_key_suffix'])
logger.info("Downloading tag suggestions from isitfit server")
logger.debug("Getting s3 file %s"%s3_path)
logger.debug("Saving it into %s"%fh.name)
response = s3_client.get_object(Bucket=self.api_man.r_body['s3_bucketName'], Key=s3_path)
fh.write(response['Body'].read())
logger.debug("TagsSuggestAdvanced:suggest .. read_csv")
import pandas as pd
self.suggested_df = | pd.read_csv(self.csv_fn, nrows=MAX_ROWS) | pandas.read_csv |
# -*- coding: utf-8 -*-
"""
Created on Mon May 14 17:29:16 2018
@author: jdkern
"""
from __future__ import division
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
def exchange(year):
df_Path66 = pd.read_csv('../Stochastic_engine/Synthetic_demand_pathflows/syn_Path66.csv',header=0,index_col=0)
df_Path3 = | pd.read_csv('../Stochastic_engine/Synthetic_demand_pathflows/syn_Path3.csv',header=0,index_col=0) | pandas.read_csv |
""" test fancy indexing & misc """
from datetime import datetime
import re
import weakref
import numpy as np
import pytest
import pandas.util._test_decorators as td
from pandas.core.dtypes.common import (
is_float_dtype,
is_integer_dtype,
)
import pandas as pd
from pandas import (
DataFrame,
Index,
NaT,
Series,
date_range,
offsets,
timedelta_range,
)
import pandas._testing as tm
from pandas.core.api import Float64Index
from pandas.tests.indexing.common import _mklbl
from pandas.tests.indexing.test_floats import gen_obj
# ------------------------------------------------------------------------
# Indexing test cases
class TestFancy:
"""pure get/set item & fancy indexing"""
def test_setitem_ndarray_1d(self):
# GH5508
# len of indexer vs length of the 1d ndarray
df = DataFrame(index=Index(np.arange(1, 11)))
df["foo"] = np.zeros(10, dtype=np.float64)
df["bar"] = np.zeros(10, dtype=complex)
# invalid
msg = "Must have equal len keys and value when setting with an iterable"
with pytest.raises(ValueError, match=msg):
df.loc[df.index[2:5], "bar"] = np.array([2.33j, 1.23 + 0.1j, 2.2, 1.0])
# valid
df.loc[df.index[2:6], "bar"] = np.array([2.33j, 1.23 + 0.1j, 2.2, 1.0])
result = df.loc[df.index[2:6], "bar"]
expected = Series(
[2.33j, 1.23 + 0.1j, 2.2, 1.0], index=[3, 4, 5, 6], name="bar"
)
tm.assert_series_equal(result, expected)
def test_setitem_ndarray_1d_2(self):
# GH5508
# dtype getting changed?
df = DataFrame(index=Index(np.arange(1, 11)))
df["foo"] = np.zeros(10, dtype=np.float64)
df["bar"] = np.zeros(10, dtype=complex)
msg = "Must have equal len keys and value when setting with an iterable"
with pytest.raises(ValueError, match=msg):
df[2:5] = np.arange(1, 4) * 1j
def test_getitem_ndarray_3d(
self, index, frame_or_series, indexer_sli, using_array_manager
):
# GH 25567
obj = gen_obj(frame_or_series, index)
idxr = indexer_sli(obj)
nd3 = np.random.randint(5, size=(2, 2, 2))
msgs = []
if frame_or_series is Series and indexer_sli in [tm.setitem, tm.iloc]:
msgs.append(r"Wrong number of dimensions. values.ndim > ndim \[3 > 1\]")
if using_array_manager:
msgs.append("Passed array should be 1-dimensional")
if frame_or_series is Series or indexer_sli is tm.iloc:
msgs.append(r"Buffer has wrong number of dimensions \(expected 1, got 3\)")
if using_array_manager:
msgs.append("indexer should be 1-dimensional")
if indexer_sli is tm.loc or (
frame_or_series is Series and indexer_sli is tm.setitem
):
msgs.append("Cannot index with multidimensional key")
if frame_or_series is DataFrame and indexer_sli is tm.setitem:
msgs.append("Index data must be 1-dimensional")
if isinstance(index, pd.IntervalIndex) and indexer_sli is tm.iloc:
msgs.append("Index data must be 1-dimensional")
if isinstance(index, (pd.TimedeltaIndex, pd.DatetimeIndex, pd.PeriodIndex)):
msgs.append("Data must be 1-dimensional")
if len(index) == 0 or isinstance(index, pd.MultiIndex):
msgs.append("positional indexers are out-of-bounds")
msg = "|".join(msgs)
potential_errors = (IndexError, ValueError, NotImplementedError)
with pytest.raises(potential_errors, match=msg):
idxr[nd3]
def test_setitem_ndarray_3d(self, index, frame_or_series, indexer_sli):
# GH 25567
obj = gen_obj(frame_or_series, index)
idxr = indexer_sli(obj)
nd3 = np.random.randint(5, size=(2, 2, 2))
if indexer_sli is tm.iloc:
err = ValueError
msg = f"Cannot set values with ndim > {obj.ndim}"
else:
err = ValueError
msg = "|".join(
[
r"Buffer has wrong number of dimensions \(expected 1, got 3\)",
"Cannot set values with ndim > 1",
"Index data must be 1-dimensional",
"Data must be 1-dimensional",
"Array conditional must be same shape as self",
]
)
with pytest.raises(err, match=msg):
idxr[nd3] = 0
def test_getitem_ndarray_0d(self):
# GH#24924
key = np.array(0)
# dataframe __getitem__
df = DataFrame([[1, 2], [3, 4]])
result = df[key]
expected = Series([1, 3], name=0)
tm.assert_series_equal(result, expected)
# series __getitem__
ser = Series([1, 2])
result = ser[key]
assert result == 1
def test_inf_upcast(self):
# GH 16957
# We should be able to use np.inf as a key
# np.inf should cause an index to convert to float
# Test with np.inf in rows
df = DataFrame(columns=[0])
df.loc[1] = 1
df.loc[2] = 2
df.loc[np.inf] = 3
# make sure we can look up the value
assert df.loc[np.inf, 0] == 3
result = df.index
expected = Float64Index([1, 2, np.inf])
tm.assert_index_equal(result, expected)
def test_setitem_dtype_upcast(self):
# GH3216
df = DataFrame([{"a": 1}, {"a": 3, "b": 2}])
df["c"] = np.nan
assert df["c"].dtype == np.float64
df.loc[0, "c"] = "foo"
expected = DataFrame(
[{"a": 1, "b": np.nan, "c": "foo"}, {"a": 3, "b": 2, "c": np.nan}]
)
tm.assert_frame_equal(df, expected)
@pytest.mark.parametrize("val", [3.14, "wxyz"])
def test_setitem_dtype_upcast2(self, val):
# GH10280
df = DataFrame(
np.arange(6, dtype="int64").reshape(2, 3),
index=list("ab"),
columns=["foo", "bar", "baz"],
)
left = df.copy()
left.loc["a", "bar"] = val
right = DataFrame(
[[0, val, 2], [3, 4, 5]],
index=list("ab"),
columns=["foo", "bar", "baz"],
)
tm.assert_frame_equal(left, right)
assert is_integer_dtype(left["foo"])
assert is_integer_dtype(left["baz"])
def test_setitem_dtype_upcast3(self):
left = DataFrame(
np.arange(6, dtype="int64").reshape(2, 3) / 10.0,
index=list("ab"),
columns=["foo", "bar", "baz"],
)
left.loc["a", "bar"] = "wxyz"
right = DataFrame(
[[0, "wxyz", 0.2], [0.3, 0.4, 0.5]],
index=list("ab"),
columns=["foo", "bar", "baz"],
)
tm.assert_frame_equal(left, right)
assert is_float_dtype(left["foo"])
assert is_float_dtype(left["baz"])
def test_dups_fancy_indexing(self):
# GH 3455
df = tm.makeCustomDataframe(10, 3)
df.columns = ["a", "a", "b"]
result = df[["b", "a"]].columns
expected = Index(["b", "a", "a"])
tm.assert_index_equal(result, expected)
def test_dups_fancy_indexing_across_dtypes(self):
# across dtypes
df = DataFrame([[1, 2, 1.0, 2.0, 3.0, "foo", "bar"]], columns=list("aaaaaaa"))
df.head()
str(df)
result = DataFrame([[1, 2, 1.0, 2.0, 3.0, "foo", "bar"]])
result.columns = list("aaaaaaa")
# TODO(wesm): unused?
df_v = df.iloc[:, 4] # noqa
res_v = result.iloc[:, 4] # noqa
tm.assert_frame_equal(df, result)
def test_dups_fancy_indexing_not_in_order(self):
# GH 3561, dups not in selected order
df = DataFrame(
{"test": [5, 7, 9, 11], "test1": [4.0, 5, 6, 7], "other": list("abcd")},
index=["A", "A", "B", "C"],
)
rows = ["C", "B"]
expected = DataFrame(
{"test": [11, 9], "test1": [7.0, 6], "other": ["d", "c"]}, index=rows
)
result = df.loc[rows]
tm.assert_frame_equal(result, expected)
result = df.loc[Index(rows)]
tm.assert_frame_equal(result, expected)
rows = ["C", "B", "E"]
with pytest.raises(KeyError, match="not in index"):
df.loc[rows]
# see GH5553, make sure we use the right indexer
rows = ["F", "G", "H", "C", "B", "E"]
with pytest.raises(KeyError, match="not in index"):
df.loc[rows]
def test_dups_fancy_indexing_only_missing_label(self):
# List containing only missing label
dfnu = DataFrame(np.random.randn(5, 3), index=list("AABCD"))
with pytest.raises(
KeyError,
match=re.escape(
"\"None of [Index(['E'], dtype='object')] are in the [index]\""
),
):
dfnu.loc[["E"]]
# ToDo: check_index_type can be True after GH 11497
@pytest.mark.parametrize("vals", [[0, 1, 2], list("abc")])
def test_dups_fancy_indexing_missing_label(self, vals):
# GH 4619; duplicate indexer with missing label
df = DataFrame({"A": vals})
with pytest.raises(KeyError, match="not in index"):
df.loc[[0, 8, 0]]
def test_dups_fancy_indexing_non_unique(self):
# non unique with non unique selector
df = DataFrame({"test": [5, 7, 9, 11]}, index=["A", "A", "B", "C"])
with pytest.raises(KeyError, match="not in index"):
df.loc[["A", "A", "E"]]
def test_dups_fancy_indexing2(self):
# GH 5835
# dups on index and missing values
df = DataFrame(np.random.randn(5, 5), columns=["A", "B", "B", "B", "A"])
with pytest.raises(KeyError, match="not in index"):
df.loc[:, ["A", "B", "C"]]
def test_dups_fancy_indexing3(self):
# GH 6504, multi-axis indexing
df = DataFrame(
np.random.randn(9, 2), index=[1, 1, 1, 2, 2, 2, 3, 3, 3], columns=["a", "b"]
)
expected = df.iloc[0:6]
result = df.loc[[1, 2]]
tm.assert_frame_equal(result, expected)
expected = df
result = df.loc[:, ["a", "b"]]
tm.assert_frame_equal(result, expected)
expected = df.iloc[0:6, :]
result = df.loc[[1, 2], ["a", "b"]]
tm.assert_frame_equal(result, expected)
def test_duplicate_int_indexing(self, indexer_sl):
# GH 17347
ser = Series(range(3), index=[1, 1, 3])
expected = Series(range(2), index=[1, 1])
result = indexer_sl(ser)[[1]]
tm.assert_series_equal(result, expected)
def test_indexing_mixed_frame_bug(self):
# GH3492
df = DataFrame(
{"a": {1: "aaa", 2: "bbb", 3: "ccc"}, "b": {1: 111, 2: 222, 3: 333}}
)
# this works, new column is created correctly
df["test"] = df["a"].apply(lambda x: "_" if x == "aaa" else x)
# this does not work, ie column test is not changed
idx = df["test"] == "_"
temp = df.loc[idx, "a"].apply(lambda x: "-----" if x == "aaa" else x)
df.loc[idx, "test"] = temp
assert df.iloc[0, 2] == "-----"
def test_multitype_list_index_access(self):
# GH 10610
df = DataFrame(np.random.random((10, 5)), columns=["a"] + [20, 21, 22, 23])
with pytest.raises(KeyError, match=re.escape("'[26, -8] not in index'")):
df[[22, 26, -8]]
assert df[21].shape[0] == df.shape[0]
def test_set_index_nan(self):
# GH 3586
df = DataFrame(
{
"PRuid": {
17: "nonQC",
18: "nonQC",
19: "nonQC",
20: "10",
21: "11",
22: "12",
23: "13",
24: "24",
25: "35",
26: "46",
27: "47",
28: "48",
29: "59",
30: "10",
},
"QC": {
17: 0.0,
18: 0.0,
19: 0.0,
20: np.nan,
21: np.nan,
22: np.nan,
23: np.nan,
24: 1.0,
25: np.nan,
26: np.nan,
27: np.nan,
28: np.nan,
29: np.nan,
30: np.nan,
},
"data": {
17: 7.9544899999999998,
18: 8.0142609999999994,
19: 7.8591520000000008,
20: 0.86140349999999999,
21: 0.87853110000000001,
22: 0.8427041999999999,
23: 0.78587700000000005,
24: 0.73062459999999996,
25: 0.81668560000000001,
26: 0.81927080000000008,
27: 0.80705009999999999,
28: 0.81440240000000008,
29: 0.80140849999999997,
30: 0.81307740000000006,
},
"year": {
17: 2006,
18: 2007,
19: 2008,
20: 1985,
21: 1985,
22: 1985,
23: 1985,
24: 1985,
25: 1985,
26: 1985,
27: 1985,
28: 1985,
29: 1985,
30: 1986,
},
}
).reset_index()
result = (
df.set_index(["year", "PRuid", "QC"])
.reset_index()
.reindex(columns=df.columns)
)
tm.assert_frame_equal(result, df)
def test_multi_assign(self):
# GH 3626, an assignment of a sub-df to a df
df = DataFrame(
{
"FC": ["a", "b", "a", "b", "a", "b"],
"PF": [0, 0, 0, 0, 1, 1],
"col1": list(range(6)),
"col2": list(range(6, 12)),
}
)
df.iloc[1, 0] = np.nan
df2 = df.copy()
mask = ~df2.FC.isna()
cols = ["col1", "col2"]
dft = df2 * 2
dft.iloc[3, 3] = np.nan
expected = DataFrame(
{
"FC": ["a", np.nan, "a", "b", "a", "b"],
"PF": [0, 0, 0, 0, 1, 1],
"col1": Series([0, 1, 4, 6, 8, 10]),
"col2": [12, 7, 16, np.nan, 20, 22],
}
)
# frame on rhs
df2.loc[mask, cols] = dft.loc[mask, cols]
tm.assert_frame_equal(df2, expected)
# with an ndarray on rhs
# coerces to float64 because values has float64 dtype
# GH 14001
expected = DataFrame(
{
"FC": ["a", np.nan, "a", "b", "a", "b"],
"PF": [0, 0, 0, 0, 1, 1],
"col1": [0.0, 1.0, 4.0, 6.0, 8.0, 10.0],
"col2": [12, 7, 16, np.nan, 20, 22],
}
)
df2 = df.copy()
df2.loc[mask, cols] = dft.loc[mask, cols].values
tm.assert_frame_equal(df2, expected)
def test_multi_assign_broadcasting_rhs(self):
# broadcasting on the rhs is required
df = DataFrame(
{
"A": [1, 2, 0, 0, 0],
"B": [0, 0, 0, 10, 11],
"C": [0, 0, 0, 10, 11],
"D": [3, 4, 5, 6, 7],
}
)
expected = df.copy()
mask = expected["A"] == 0
for col in ["A", "B"]:
expected.loc[mask, col] = df["D"]
df.loc[df["A"] == 0, ["A", "B"]] = df["D"]
tm.assert_frame_equal(df, expected)
# TODO(ArrayManager) setting single item with an iterable doesn't work yet
# in the "split" path
@td.skip_array_manager_not_yet_implemented
def test_setitem_list(self):
# GH 6043
# iloc with a list
df = DataFrame(index=[0, 1], columns=[0])
df.iloc[1, 0] = [1, 2, 3]
df.iloc[1, 0] = [1, 2]
result = DataFrame(index=[0, 1], columns=[0])
result.iloc[1, 0] = [1, 2]
tm.assert_frame_equal(result, df)
def test_string_slice(self):
# GH 14424
# string indexing against datetimelike with object
# dtype should properly raises KeyError
df = DataFrame([1], Index([pd.Timestamp("2011-01-01")], dtype=object))
assert df.index._is_all_dates
with pytest.raises(KeyError, match="'2011'"):
df["2011"]
with pytest.raises(KeyError, match="'2011'"):
df.loc["2011", 0]
def test_string_slice_empty(self):
# GH 14424
df = DataFrame()
assert not df.index._is_all_dates
with pytest.raises(KeyError, match="'2011'"):
df["2011"]
with pytest.raises(KeyError, match="^0$"):
df.loc["2011", 0]
def test_astype_assignment(self):
# GH4312 (iloc)
df_orig = DataFrame(
[["1", "2", "3", ".4", 5, 6.0, "foo"]], columns=list("ABCDEFG")
)
df = df_orig.copy()
df.iloc[:, 0:2] = df.iloc[:, 0:2].astype(np.int64)
expected = DataFrame(
[[1, 2, "3", ".4", 5, 6.0, "foo"]], columns=list("ABCDEFG")
)
tm.assert_frame_equal(df, expected)
df = df_orig.copy()
df.iloc[:, 0:2] = df.iloc[:, 0:2]._convert(datetime=True, numeric=True)
expected = DataFrame(
[[1, 2, "3", ".4", 5, 6.0, "foo"]], columns=list("ABCDEFG")
)
tm.assert_frame_equal(df, expected)
# GH5702 (loc)
df = df_orig.copy()
df.loc[:, "A"] = df.loc[:, "A"].astype(np.int64)
expected = DataFrame(
[[1, "2", "3", ".4", 5, 6.0, "foo"]], columns=list("ABCDEFG")
)
tm.assert_frame_equal(df, expected)
df = df_orig.copy()
df.loc[:, ["B", "C"]] = df.loc[:, ["B", "C"]].astype(np.int64)
expected = DataFrame(
[["1", 2, 3, ".4", 5, 6.0, "foo"]], columns=list("ABCDEFG")
)
tm.assert_frame_equal(df, expected)
def test_astype_assignment_full_replacements(self):
# full replacements / no nans
df = DataFrame({"A": [1.0, 2.0, 3.0, 4.0]})
df.iloc[:, 0] = df["A"].astype(np.int64)
expected = DataFrame({"A": [1, 2, 3, 4]})
tm.assert_frame_equal(df, expected)
df = DataFrame({"A": [1.0, 2.0, 3.0, 4.0]})
df.loc[:, "A"] = df["A"].astype(np.int64)
expected = DataFrame({"A": [1, 2, 3, 4]})
tm.assert_frame_equal(df, expected)
@pytest.mark.parametrize("indexer", [tm.getitem, tm.loc])
def test_index_type_coercion(self, indexer):
# GH 11836
# if we have an index type and set it with something that looks
# to numpy like the same, but is actually, not
# (e.g. setting with a float or string '0')
# then we need to coerce to object
# integer indexes
for s in [Series(range(5)), Series(range(5), index=range(1, 6))]:
assert s.index.is_integer()
s2 = s.copy()
indexer(s2)[0.1] = 0
assert s2.index.is_floating()
assert indexer(s2)[0.1] == 0
s2 = s.copy()
indexer(s2)[0.0] = 0
exp = s.index
if 0 not in s:
exp = Index(s.index.tolist() + [0])
tm.assert_index_equal(s2.index, exp)
s2 = s.copy()
indexer(s2)["0"] = 0
assert s2.index.is_object()
for s in [Series(range(5), index=np.arange(5.0))]:
assert s.index.is_floating()
s2 = s.copy()
indexer(s2)[0.1] = 0
assert s2.index.is_floating()
assert indexer(s2)[0.1] == 0
s2 = s.copy()
indexer(s2)[0.0] = 0
tm.assert_index_equal(s2.index, s.index)
s2 = s.copy()
indexer(s2)["0"] = 0
assert s2.index.is_object()
class TestMisc:
def test_float_index_to_mixed(self):
df = DataFrame({0.0: np.random.rand(10), 1.0: np.random.rand(10)})
df["a"] = 10
expected = DataFrame({0.0: df[0.0], 1.0: df[1.0], "a": [10] * 10})
tm.assert_frame_equal(expected, df)
def test_float_index_non_scalar_assignment(self):
df = DataFrame({"a": [1, 2, 3], "b": [3, 4, 5]}, index=[1.0, 2.0, 3.0])
df.loc[df.index[:2]] = 1
expected = DataFrame({"a": [1, 1, 3], "b": [1, 1, 5]}, index=df.index)
tm.assert_frame_equal(expected, df)
def test_loc_setitem_fullindex_views(self):
df = DataFrame({"a": [1, 2, 3], "b": [3, 4, 5]}, index=[1.0, 2.0, 3.0])
df2 = df.copy()
df.loc[df.index] = df.loc[df.index]
tm.assert_frame_equal(df, df2)
def test_rhs_alignment(self):
# GH8258, tests that both rows & columns are aligned to what is
# assigned to. covers both uniform data-type & multi-type cases
def run_tests(df, rhs, right_loc, right_iloc):
# label, index, slice
lbl_one, idx_one, slice_one = list("bcd"), [1, 2, 3], slice(1, 4)
lbl_two, idx_two, slice_two = ["joe", "jolie"], [1, 2], slice(1, 3)
left = df.copy()
left.loc[lbl_one, lbl_two] = rhs
tm.assert_frame_equal(left, right_loc)
left = df.copy()
left.iloc[idx_one, idx_two] = rhs
tm.assert_frame_equal(left, right_iloc)
left = df.copy()
left.iloc[slice_one, slice_two] = rhs
tm.assert_frame_equal(left, right_iloc)
xs = np.arange(20).reshape(5, 4)
cols = ["jim", "joe", "jolie", "joline"]
df = DataFrame(xs, columns=cols, index=list("abcde"), dtype="int64")
# right hand side; permute the indices and multiplpy by -2
rhs = -2 * df.iloc[3:0:-1, 2:0:-1]
# expected `right` result; just multiply by -2
right_iloc = df.copy()
right_iloc["joe"] = [1, 14, 10, 6, 17]
right_iloc["jolie"] = [2, 13, 9, 5, 18]
right_iloc.iloc[1:4, 1:3] *= -2
right_loc = df.copy()
right_loc.iloc[1:4, 1:3] *= -2
# run tests with uniform dtypes
run_tests(df, rhs, right_loc, right_iloc)
# make frames multi-type & re-run tests
for frame in [df, rhs, right_loc, right_iloc]:
frame["joe"] = frame["joe"].astype("float64")
frame["jolie"] = frame["jolie"].map("@{}".format)
right_iloc["joe"] = [1.0, "@-28", "@-20", "@-12", 17.0]
right_iloc["jolie"] = ["@2", -26.0, -18.0, -10.0, "@18"]
run_tests(df, rhs, right_loc, right_iloc)
def test_str_label_slicing_with_negative_step(self):
SLC = pd.IndexSlice
for idx in [ | _mklbl("A", 20) | pandas.tests.indexing.common._mklbl |
import numpy as np
import pandas as pd
import seaborn as sns
import xarray as xa
import tensorly as tl
from .common import subplotLabel, getSetup
from gmm.tensor import minimize_func, gen_points_GMM
def makeFigure():
"""Get a list of the axis objects and create a figure."""
# Get list of axis objects
ax, f = getSetup((12, 6), (3, 4))
# Add subplot labels
subplotLabel(ax)
blob_DF = make_synth_pic(magnitude=1000)
plot_synth_pic(blob_DF, t=0, ax=ax[0])
plot_synth_pic(blob_DF, t=6, ax=ax[1])
plot_synth_pic(blob_DF, t=12, ax=ax[2])
plot_synth_pic(blob_DF, t=19, ax=ax[3])
rank = 5
n_cluster = 4
blob_xarray = make_blob_tensor(blob_DF)
maximizedNK, optCP, optPTfactors, _, _, preNormOptCP = minimize_func(blob_xarray, rank=rank, n_cluster=n_cluster)
for i in np.arange(0, 4):
print(i)
points = gen_points_GMM(maximizedNK, preNormOptCP, optPTfactors, i * 6, n_cluster)
points_DF = | pd.DataFrame({"Cluster": points[1], "X": points[0][:, 0], "Y": points[0][:, 1]}) | pandas.DataFrame |
import threading
from sklearn import preprocessing
import settings
import pandas as pd
from bson import ObjectId
import json
import datetime
from constants import MAX_FILE_SIZE
from db.encoding import EncodingHelper
class MongoDataStream(object):
def __init__(self, collection, start_date, end_date, chunk_size=10000, max_items=None):
self.db = settings.get_db()
self.source = self.db[collection]
# total number of batches of data in the db/collection
if not max_items:
self.len = self.source.find({'Document.g_timestamp': {'$gte': start_date, '$lt': end_date}}).count()
else:
self.len = max_items
self.slices = int(self.len / chunk_size)
self.data = [] # always keep 2 slices 1 to read from and 1 as a buffer
self.lock = threading.Lock()
self.cond = threading.Condition()
self.available = True
self.start = start_date
self.end = end_date
self.offset = 0
self.chunk_size = chunk_size
self.data = [self.__fetch__() for _ in xrange(2)]
self.order = []
def _get_next_(self, offset):
return self.order[offset:offset + self.chunk_size]
def reset_stream(self):
with self.lock:
self.offset = 0
self.slices = int(self.len / self.chunk_size)
def __fetch__(self):
with self.lock:
offset = self.offset
ids = self._get_next_(offset)
data = self.source.find({'Document.g_timestamp': {'$gte': self.start, '$lt': self.end},
'Document.uuid': {"$in": ids}})
if self.slices == 0: return
with self.lock:
self.data.append(data)
self.slices -= 1
self.offset += self.chunk_size
self.available = True
with self.cond:
self.cond.notifyAll()
def __pre_load__(self):
t = threading.Thread(target=self.__fetch__)
t.daemon = True
t.start()
def get_doc_ids(self):
"""Retrieves the ids of all users in the db with their status(paid/unpaid)"""
ids = self.source.find({"UserId": "123123123"}, {"_id": 0, "Document.uuid": 1, "Document.is_paying": 1})
payment_stats = self.source.find() # insert the query here
return ids, payment_stats
def read(self):
while len(self.data) > 0:
with self.lock:
t_avl = self.available or self.slices == 0
while not t_avl:
with self.cond:
self.cond.wait(1)
with self.lock:
t_avl = self.available or self.slices == 0
with self.lock:
d = self.data.pop()
self.available = False
yield d
self.__pre_load__()
return
class MongoDataStreamReader(object):
def __init__(self, stream, features, normalize=False):
self.stream = stream
self.features = features
self.normalize = normalize
def set_order(self, ids):
self.stream.order = ids
def reset_cursor(self):
self.stream.reset_stream()
def get_training_set(self):
return self.stream.get_doc_ids()
def set_normalize(self, value):
self.normalize = value
def read(self):
data = self.stream.read()
for d in data:
doc = []
if d is None:
return
for dd in d:
tmp = dd['Document']
for f in self.features:
doc.append(tmp[f])
if self.normalize:
yield preprocessing.normalize(doc)
else:
yield doc
class MongoReader(object):
def __init__(self):
self.db = settings.get_db()
self.encoding_helper = EncodingHelper()
def read(self, collection, start_date, end_date,
aggregate_steps=None,
filter_nulls=True,
exclude_id=False,
fields=None,
sample=True,
limit=True):
# TODO: simplify this to 1 query
# Lets assume that all the docs look alike, filter so that all fields MUST have a value
fields = fields if fields is not None else self.db[collection].find_one()
doc_filter = {}
if fields != None:
for f in fields:
key = f['name']
doc_filter[key] = {
'$nin': [None, 'NULL', 'null', ''],
'$exists': True
}
pipeline = []
if (filter_nulls):
pipeline.append({'$match': doc_filter})
if exclude_id:
pipeline.append({"$project": {"_id": 0}})
if aggregate_steps != None:
pipeline = pipeline + aggregate_steps
output = self.db[collection].aggregate(pipeline)
return output
def read_as_pandas_frame(self, collection, start_date, end_date,
aggregate_steps=None,
filter_nulls=True,
exclude_id=False,
fields=None,
sample=False,
limit=True):
data = self.read(collection, start_date, end_date, aggregate_steps=aggregate_steps, filter_nulls=filter_nulls,
exclude_id=exclude_id,
fields=fields, sample=sample, limit=limit)
data = list(data)
df = pd.DataFrame(data)
if fields is not None:
for f in fields:
if 'encoding' not in f:
continue
df = self.encoding_helper.encode_frame_field(df, f)
return df
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.close()
def close(self):
self.db.logout()
class MongoBufferedReader(object):
def __init__(self):
self.db = settings.get_db()
self.encoding_helper = EncodingHelper()
def sample(self, collection, columns, count=100):
columns = columns if columns is not None else [x for x in self.db[collection].find_one()]
doc_filter = {}
if columns is not None:
for fname in columns:
key = fname
doc_filter[key] = {
'$nin': [None, 'NULL', 'null', ''],
'$exists': True
}
pipeline = []
projection = {col: 1 for col in columns}
query = {}
for dcf in doc_filter:
query[dcf] = doc_filter[dcf]
pipeline.append({'$match': query})
pipeline.append({'$project': projection})
pipeline.append({'$sample': {'size': count}})
output = self.db[collection].aggregate(pipeline)
data = list(output) # pandas will not read iterators
df = | pd.DataFrame(data) | pandas.DataFrame |
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
from matplotlib import gridspec
import warnings
import nltk
from shift_detector.checks.check import Report
from shift_detector.checks.statistical_checks.categorical_statistical_check import CategoricalStatisticalCheck
from shift_detector.checks.statistical_checks.numerical_statistical_check import NumericalStatisticalCheck
from shift_detector.checks.statistical_checks.statistical_check import StatisticalCheck, StatisticalReport
from shift_detector.precalculations.text_metadata import TextMetadata
from shift_detector.utils.column_management import ColumnType
from shift_detector.utils.errors import UnknownMetadataReturnColumnTypeError
from shift_detector.utils.visualization import PLOT_GRID_WIDTH, PLOT_ROW_HEIGHT, PlotData
class TextMetadataStatisticalCheck(StatisticalCheck):
def __init__(self, text_metadata_types=None, language='en', infer_language=False, significance=0.01,
sample_size=None, use_equal_dataset_sizes=False, sampling_seed=0):
nltk.download('stopwords')
nltk.download('universal_tagset')
nltk.download('punkt')
nltk.download('averaged_perceptron_tagger')
super().__init__(significance, sample_size, use_equal_dataset_sizes, sampling_seed)
self.metadata_precalculation = TextMetadata(text_metadata_types, language=language,
infer_language=infer_language)
def significant_columns(self, pvalues):
return set(column for column in pvalues.columns.levels[0]
if len(super(type(self), self).significant_columns(pvalues[column])) > 0)
def significant_metadata(self, mdtype_pvalues):
return set(mdtype for mdtype in self.metadata_precalculation.text_metadata_types
if mdtype.metadata_name() in super(type(self), self).significant_columns(mdtype_pvalues))
def significant_metadata_names(self, mdtype_pvalues):
return sorted([mdtype.metadata_name() for mdtype in self.significant_metadata(mdtype_pvalues)])
def explanation_header(self, numerical_test_name, categorical_test_name, any_significant):
header = 'Statistical tests performed:\n' + \
'\t- numerical metadata: {}\n'.format(numerical_test_name) + \
'\t- categorical metadata: {}\n'.format(categorical_test_name) + \
'Significance level: {}\n'.format(str(self.significance))
if any_significant:
header += '\nSome text metadata metrics on the following columns are unlikely to be equally distributed.\n'
return header
def explain(self, pvalues):
explanation = {}
for column in sorted(self.significant_columns(pvalues)):
explanation[column] = 'Significant metadata:\n\t\t- {significant_metadata}'.format(
significant_metadata='\n\t\t- '.join(self.significant_metadata_names(pvalues[column]))
)
return explanation
@staticmethod
def metadata_plot(figure, tile, column, mdtype, df1, df2):
col_mdtype_tuple = (column, mdtype.metadata_name())
if mdtype.metadata_return_type() == ColumnType.categorical:
CategoricalStatisticalCheck.column_plot(figure, tile, col_mdtype_tuple, df1, df2)
elif mdtype.metadata_return_type() == ColumnType.numerical:
NumericalStatisticalCheck.column_plot(figure, tile, col_mdtype_tuple, df1, df2)
else:
raise UnknownMetadataReturnColumnTypeError(mdtype)
@staticmethod
def plot_all_metadata(plot_data):
rows = sum([plot.required_rows for plot in plot_data])
cols = 1
fig = plt.figure(figsize=(PLOT_GRID_WIDTH, PLOT_ROW_HEIGHT * rows), tight_layout=True)
grid = gridspec.GridSpec(rows, cols)
occupied_rows = 0
for i, plot in enumerate(plot_data):
plot.plot_function(fig, tile=grid[occupied_rows:(occupied_rows + plot.required_rows), i % cols])
occupied_rows += plot.required_rows
plt.show()
def plot_data(self, significant_columns, pvalues, df1, df2):
plot_data = []
for column in sorted(significant_columns):
for mdtype in sorted(self.significant_metadata(pvalues[column])):
if mdtype.metadata_return_type == ColumnType.categorical:
distinct_count = len(set(df1[(column, mdtype)].unique()).union(set(df2[(column, mdtype)].unique())))
required_height = 1.5 + 0.3 * distinct_count
required_rows = int(np.ceil(required_height / PLOT_ROW_HEIGHT))
else:
required_rows = 1
plot_data.append(
PlotData(lambda figure, tile, col=column, meta=mdtype:
self.metadata_plot(figure, tile, col, meta, df1, df2),
required_rows)
)
return plot_data
def metadata_figure(self, pvalues, df1, df2):
significant_columns = self.significant_columns(pvalues)
if not significant_columns:
return []
return [lambda plots=tuple(self.plot_data(significant_columns, pvalues, df1, df2)):
self.plot_all_metadata(plots)]
def run(self, store) -> Report:
df1, df2 = store[self.metadata_precalculation]
part1, part2 = self.adjust_dataset_sizes(df1, df2)
categorical_check = CategoricalStatisticalCheck()
numerical_check = NumericalStatisticalCheck()
pvalues = | pd.DataFrame(columns=df1.columns, index=['pvalue']) | pandas.DataFrame |
from __future__ import division #brings in Python 3.0 mixed type calculation rules
import datetime
import inspect
import numpy as np
import numpy.testing as npt
import os.path
import pandas as pd
import sys
from tabulate import tabulate
import unittest
print("Python version: " + sys.version)
print("Numpy version: " + np.__version__)
# #find parent directory and import model
# parent_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir))
# sys.path.append(parent_dir)
from ..trex_exe import Trex
test = {}
class TestTrex(unittest.TestCase):
"""
Unit tests for T-Rex model.
"""
print("trex unittests conducted at " + str(datetime.datetime.today()))
def setUp(self):
"""
Setup routine for trex unit tests.
:return:
"""
pass
# setup the test as needed
# e.g. pandas to open trex qaqc csv
# Read qaqc csv and create pandas DataFrames for inputs and expected outputs
def tearDown(self):
"""
Teardown routine for trex unit tests.
:return:
"""
pass
# teardown called after each test
# e.g. maybe write test results to some text file
def create_trex_object(self):
# create empty pandas dataframes to create empty object for testing
df_empty = pd.DataFrame()
# create an empty trex object
trex_empty = Trex(df_empty, df_empty)
return trex_empty
def test_app_rate_parsing(self):
"""
unittest for function app_rate_testing:
method extracts 1st and maximum from each list in a series of lists of app rates
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
expected_results = pd.Series([], dtype="object")
result = pd.Series([], dtype="object")
expected_results = [[0.34, 0.78, 2.34], [0.34, 3.54, 2.34]]
try:
trex_empty.app_rates = pd.Series([[0.34], [0.78, 3.54], [2.34, 1.384, 2.22]], dtype='object')
# trex_empty.app_rates = ([[0.34], [0.78, 3.54], [2.34, 1.384, 2.22]])
# parse app_rates Series of lists
trex_empty.app_rate_parsing()
result = [trex_empty.first_app_rate, trex_empty.max_app_rate]
npt.assert_allclose(result,expected_results,rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_conc_initial(self):
"""
unittest for function conc_initial:
conc_0 = (app_rate * self.frac_act_ing * food_multiplier)
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
result = pd.Series([], dtype = 'float')
expected_results = [12.7160, 9.8280, 11.2320]
try:
# specify an app_rates Series (that is a series of lists, each list representing
# a set of application rates for 'a' model simulation)
trex_empty.app_rates = pd.Series([[0.34, 1.384, 13.54], [0.78, 11.34, 3.54],
[2.34, 1.384, 3.4]], dtype='float')
trex_empty.food_multiplier_init_sg = pd.Series([110., 15., 240.], dtype='float')
trex_empty.frac_act_ing = pd.Series([0.34, 0.84, 0.02], dtype='float')
for i in range(len(trex_empty.frac_act_ing)):
result[i] = trex_empty.conc_initial(i, trex_empty.app_rates[i][0], trex_empty.food_multiplier_init_sg[i])
npt.assert_allclose(result,expected_results,rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_conc_timestep(self):
"""
unittest for function conc_timestep:
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
result = pd.Series([], dtype = 'float')
expected_results = [6.25e-5, 0.039685, 7.8886e-30]
try:
trex_empty.foliar_diss_hlife = pd.Series([.25, 0.75, 0.01], dtype='float')
conc_0 = pd.Series([0.001, 0.1, 10.0])
for i in range(len(conc_0)):
result[i] = trex_empty.conc_timestep(i, conc_0[i])
npt.assert_allclose(result,expected_results,rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_percent_to_frac(self):
"""
unittest for function percent_to_frac:
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
expected_results = pd.Series([.04556, .1034, .9389], dtype='float')
try:
trex_empty.percent_incorp = pd.Series([4.556, 10.34, 93.89], dtype='float')
result = trex_empty.percent_to_frac(trex_empty.percent_incorp)
npt.assert_allclose(result,expected_results,rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_inches_to_feet(self):
"""
unittest for function inches_to_feet:
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
expected_results = pd.Series([0.37966, 0.86166, 7.82416], dtype='float')
try:
trex_empty.bandwidth = pd.Series([4.556, 10.34, 93.89], dtype='float')
result = trex_empty.inches_to_feet(trex_empty.bandwidth)
npt.assert_allclose(result,expected_results,rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_at_bird(self):
"""
unittest for function at_bird:
adjusted_toxicity = self.ld50_bird * (aw_bird / self.tw_bird_ld50) ** (self.mineau_sca_fact - 1)
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
result = pd.Series([], dtype = 'float')
expected_results = pd.Series([69.17640, 146.8274, 56.00997], dtype='float')
try:
trex_empty.ld50_bird = pd.Series([100., 125., 90.], dtype='float')
trex_empty.tw_bird_ld50 = pd.Series([175., 100., 200.], dtype='float')
trex_empty.mineau_sca_fact = pd.Series([1.15, 0.9, 1.25], dtype='float')
# following variable is unique to at_bird and is thus sent via arg list
trex_empty.aw_bird_sm = pd.Series([15., 20., 30.], dtype='float')
for i in range(len(trex_empty.aw_bird_sm)):
result[i] = trex_empty.at_bird(i, trex_empty.aw_bird_sm[i])
npt.assert_allclose(result,expected_results,rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_at_bird1(self):
"""
unittest for function at_bird1; alternative approach using more vectorization:
adjusted_toxicity = self.ld50_bird * (aw_bird / self.tw_bird_ld50) ** (self.mineau_sca_fact - 1)
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
result = pd.Series([], dtype = 'float')
expected_results = pd.Series([69.17640, 146.8274, 56.00997], dtype='float')
try:
trex_empty.ld50_bird = pd.Series([100., 125., 90.], dtype='float')
trex_empty.tw_bird_ld50 = pd.Series([175., 100., 200.], dtype='float')
trex_empty.mineau_sca_fact = pd.Series([1.15, 0.9, 1.25], dtype='float')
trex_empty.aw_bird_sm = pd.Series([15., 20., 30.], dtype='float')
# for i in range(len(trex_empty.aw_bird_sm)):
# result[i] = trex_empty.at_bird(i, trex_empty.aw_bird_sm[i])
result = trex_empty.at_bird1(trex_empty.aw_bird_sm)
npt.assert_allclose(result,expected_results,rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_fi_bird(self):
"""
unittest for function fi_bird:
food_intake = (0.648 * (aw_bird ** 0.651)) / (1 - mf_w_bird)
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
expected_results = pd.Series([4.19728, 22.7780, 59.31724], dtype='float')
try:
#?? 'mf_w_bird_1' is a constant (i.e., not an input whose value changes per model simulation run); thus it should
#?? be specified here as a constant and not a pd.series -- if this is correct then go ahead and change next line
trex_empty.mf_w_bird_1 = pd.Series([0.1, 0.8, 0.9], dtype='float')
trex_empty.aw_bird_sm = pd.Series([15., 20., 30.], dtype='float')
result = trex_empty.fi_bird(trex_empty.aw_bird_sm, trex_empty.mf_w_bird_1)
npt.assert_allclose(result,expected_results,rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_sc_bird(self):
"""
unittest for function sc_bird:
m_s_a_r = ((self.app_rate * self.frac_act_ing) / 128) * self.density * 10000 # maximum seed application rate=application rate*10000
risk_quotient = m_s_a_r / self.noaec_bird
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
expected_results = pd.Series([6.637969, 77.805, 34.96289, np.nan], dtype='float')
try:
trex_empty.app_rates = pd.Series([[0.34, 1.384, 13.54], [0.78, 11.34, 3.54],
[2.34, 1.384, 3.4], [3.]], dtype='object')
trex_empty.app_rate_parsing() #get 'first_app_rate' per model simulation run
trex_empty.frac_act_ing = pd.Series([0.15, 0.20, 0.34, np.nan], dtype='float')
trex_empty.density = pd.Series([8.33, 7.98, 6.75, np.nan], dtype='float')
trex_empty.noaec_bird = pd.Series([5., 1.25, 12., np.nan], dtype='float')
result = trex_empty.sc_bird()
npt.assert_allclose(result,expected_results,rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_sa_bird_1(self):
"""
# unit test for function sa_bird_1
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
result_sm = pd.Series([], dtype = 'float')
result_md = pd.Series([], dtype = 'float')
result_lg = pd.Series([], dtype = 'float')
expected_results_sm = pd.Series([0.228229, 0.704098, 0.145205], dtype = 'float')
expected_results_md = pd.Series([0.126646, 0.540822, 0.052285], dtype = 'float')
expected_results_lg = pd.Series([0.037707, 0.269804, 0.01199], dtype = 'float')
try:
trex_empty.frac_act_ing = pd.Series([0.34, 0.84, 0.02], dtype='float')
trex_empty.app_rates = pd.Series([[0.34, 1.384, 13.54], [0.78, 11.34, 3.54],
[2.34, 1.384, 3.4]], dtype='float')
trex_empty.app_rate_parsing() #get 'first_app_rate' per model simulation run
trex_empty.density = pd.Series([8.33, 7.98, 6.75], dtype='float')
# following parameter values are needed for internal call to "test_at_bird"
# results from "test_at_bird" test using these values are [69.17640, 146.8274, 56.00997]
trex_empty.ld50_bird = pd.Series([100., 125., 90.], dtype='float')
trex_empty.tw_bird_ld50 = pd.Series([175., 100., 200.], dtype='float')
trex_empty.mineau_sca_fact = pd.Series([1.15, 0.9, 1.25], dtype='float')
trex_empty.aw_bird_sm = pd.Series([15., 20., 30.], dtype='float')
trex_empty.aw_bird_md = pd.Series([115., 120., 130.], dtype='float')
trex_empty.aw_bird_lg = pd.Series([1015., 1020., 1030.], dtype='float')
#reitierate constants here (they have been set in 'trex_inputs'; repeated here for clarity)
trex_empty.mf_w_bird_1 = 0.1
trex_empty.nagy_bird_coef_sm = 0.02
trex_empty.nagy_bird_coef_md = 0.1
trex_empty.nagy_bird_coef_lg = 1.0
result_sm = trex_empty.sa_bird_1("small")
npt.assert_allclose(result_sm,expected_results_sm,rtol=1e-4, atol=0, err_msg='', verbose=True)
result_md = trex_empty.sa_bird_1("medium")
npt.assert_allclose(result_md,expected_results_md,rtol=1e-4, atol=0, err_msg='', verbose=True)
result_lg = trex_empty.sa_bird_1("large")
npt.assert_allclose(result_lg,expected_results_lg,rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab_sm = [result_sm, expected_results_sm]
tab_md = [result_md, expected_results_md]
tab_lg = [result_lg, expected_results_lg]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab_sm, headers='keys', tablefmt='rst'))
print(tabulate(tab_md, headers='keys', tablefmt='rst'))
print(tabulate(tab_lg, headers='keys', tablefmt='rst'))
return
def test_sa_bird_2(self):
"""
# unit test for function sa_bird_2
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
result_sm = pd.Series([], dtype = 'float')
result_md = pd.Series([], dtype = 'float')
result_lg = pd.Series([], dtype = 'float')
expected_results_sm =pd.Series([0.018832, 0.029030, 0.010483], dtype = 'float')
expected_results_md = pd.Series([2.774856e-3, 6.945353e-3, 1.453192e-3], dtype = 'float')
expected_results_lg =pd.Series([2.001591e-4, 8.602729e-4, 8.66163e-5], dtype = 'float')
try:
trex_empty.frac_act_ing = pd.Series([0.34, 0.84, 0.02], dtype='float')
trex_empty.app_rates = pd.Series([[0.34, 1.384, 13.54], [0.78, 11.34, 3.54],
[2.34, 1.384, 3.4]], dtype='object')
trex_empty.app_rate_parsing() #get 'first_app_rate' per model simulation run
trex_empty.density = pd.Series([8.33, 7.98, 6.75], dtype='float')
trex_empty.max_seed_rate = pd.Series([33.19, 20.0, 45.6])
# following parameter values are needed for internal call to "test_at_bird"
# results from "test_at_bird" test using these values are [69.17640, 146.8274, 56.00997]
trex_empty.ld50_bird = pd.Series([100., 125., 90.], dtype='float')
trex_empty.tw_bird_ld50 = pd.Series([175., 100., 200.], dtype='float')
trex_empty.mineau_sca_fact = pd.Series([1.15, 0.9, 1.25], dtype='float')
trex_empty.aw_bird_sm = pd.Series([15., 20., 30.], dtype='float')
trex_empty.aw_bird_md = pd.Series([115., 120., 130.], dtype='float')
trex_empty.aw_bird_lg = pd.Series([1015., 1020., 1030.], dtype='float')
#reitierate constants here (they have been set in 'trex_inputs'; repeated here for clarity)
trex_empty.nagy_bird_coef_sm = 0.02
trex_empty.nagy_bird_coef_md = 0.1
trex_empty.nagy_bird_coef_lg = 1.0
result_sm = trex_empty.sa_bird_2("small")
npt.assert_allclose(result_sm,expected_results_sm,rtol=1e-4, atol=0, err_msg='', verbose=True)
result_md = trex_empty.sa_bird_2("medium")
npt.assert_allclose(result_md,expected_results_md,rtol=1e-4, atol=0, err_msg='', verbose=True)
result_lg = trex_empty.sa_bird_2("large")
npt.assert_allclose(result_lg,expected_results_lg,rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab_sm = [result_sm, expected_results_sm]
tab_md = [result_md, expected_results_md]
tab_lg = [result_lg, expected_results_lg]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab_sm, headers='keys', tablefmt='rst'))
print(tabulate(tab_md, headers='keys', tablefmt='rst'))
print(tabulate(tab_lg, headers='keys', tablefmt='rst'))
return
def test_sa_mamm_1(self):
"""
# unit test for function sa_mamm_1
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
result_sm = pd.Series([], dtype = 'float')
result_md = pd.Series([], dtype = 'float')
result_lg = pd.Series([], dtype = 'float')
expected_results_sm =pd.Series([0.022593, 0.555799, 0.010178], dtype = 'float')
expected_results_md = pd.Series([0.019298, 0.460911, 0.00376], dtype = 'float')
expected_results_lg =pd.Series([0.010471, 0.204631, 0.002715], dtype = 'float')
try:
trex_empty.frac_act_ing = pd.Series([0.34, 0.84, 0.02], dtype='float')
trex_empty.app_rates = pd.Series([[0.34, 1.384, 13.54], [0.78, 11.34, 3.54],
[2.34, 1.384, 3.4]], dtype='object')
trex_empty.app_rate_parsing() #get 'first_app_rate' per model simulation run
trex_empty.density = pd.Series([8.33, 7.98, 6.75], dtype='float')
# following parameter values are needed for internal call to "test_at_bird"
# results from "test_at_bird" test using these values are [69.17640, 146.8274, 56.00997]
trex_empty.tw_mamm = pd.Series([350., 225., 390.], dtype='float')
trex_empty.ld50_mamm = pd.Series([321., 100., 400.], dtype='float')
trex_empty.aw_mamm_sm = pd.Series([15., 20., 30.], dtype='float')
trex_empty.aw_mamm_md = pd.Series([35., 45., 25.], dtype='float')
trex_empty.aw_mamm_lg = pd.Series([1015., 1020., 1030.], dtype='float')
#reitierate constants here (they have been set in 'trex_inputs'; repeated here for clarity)
trex_empty.mf_w_bird_1 = 0.1
trex_empty.nagy_mamm_coef_sm = 0.015
trex_empty.nagy_mamm_coef_md = 0.035
trex_empty.nagy_mamm_coef_lg = 1.0
result_sm = trex_empty.sa_mamm_1("small")
npt.assert_allclose(result_sm,expected_results_sm,rtol=1e-4, atol=0, err_msg='', verbose=True)
result_md = trex_empty.sa_mamm_1("medium")
npt.assert_allclose(result_md,expected_results_md,rtol=1e-4, atol=0, err_msg='', verbose=True)
result_lg = trex_empty.sa_mamm_1("large")
npt.assert_allclose(result_lg,expected_results_lg,rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab_sm = [result_sm, expected_results_sm]
tab_md = [result_md, expected_results_md]
tab_lg = [result_lg, expected_results_lg]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab_sm, headers='keys', tablefmt='rst'))
print(tabulate(tab_md, headers='keys', tablefmt='rst'))
print(tabulate(tab_lg, headers='keys', tablefmt='rst'))
return
def test_sa_mamm_2(self):
"""
# unit test for function sa_mamm_2
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
result_sm = pd.Series([], dtype = 'float')
result_md = pd.Series([], dtype = 'float')
result_lg = pd.Series([], dtype = 'float')
expected_results_sm =pd.Series([2.46206e-3, 3.103179e-2, 1.03076e-3], dtype = 'float')
expected_results_md = pd.Series([1.304116e-3, 1.628829e-2, 4.220702e-4], dtype = 'float')
expected_results_lg =pd.Series([1.0592147e-4, 1.24391489e-3, 3.74263186e-5], dtype = 'float')
try:
trex_empty.frac_act_ing = pd.Series([0.34, 0.84, 0.02], dtype='float')
trex_empty.app_rates = pd.Series([[0.34, 1.384, 13.54], [0.78, 11.34, 3.54],
[2.34, 1.384, 3.4]], dtype='object')
trex_empty.app_rate_parsing() #get 'first_app_rate' per model simulation run
trex_empty.density = pd.Series([8.33, 7.98, 6.75], dtype='float')
trex_empty.max_seed_rate = pd.Series([33.19, 20.0, 45.6])
# following parameter values are needed for internal call to "test_at_bird"
# results from "test_at_bird" test using these values are [69.17640, 146.8274, 56.00997]
trex_empty.tw_mamm = pd.Series([350., 225., 390.], dtype='float')
trex_empty.ld50_mamm = pd.Series([321., 100., 400.], dtype='float')
trex_empty.aw_mamm_sm = pd.Series([15., 20., 30.], dtype='float')
trex_empty.aw_mamm_md = pd.Series([35., 45., 25.], dtype='float')
trex_empty.aw_mamm_lg = pd.Series([1015., 1020., 1030.], dtype='float')
#reitierate constants here (they have been set in 'trex_inputs'; repeated here for clarity)
trex_empty.mf_w_mamm_1 = 0.1
trex_empty.nagy_mamm_coef_sm = 0.015
trex_empty.nagy_mamm_coef_md = 0.035
trex_empty.nagy_mamm_coef_lg = 1.0
result_sm = trex_empty.sa_mamm_2("small")
npt.assert_allclose(result_sm,expected_results_sm,rtol=1e-4, atol=0, err_msg='', verbose=True)
result_md = trex_empty.sa_mamm_2("medium")
npt.assert_allclose(result_md,expected_results_md,rtol=1e-4, atol=0, err_msg='', verbose=True)
result_lg = trex_empty.sa_mamm_2("large")
npt.assert_allclose(result_lg,expected_results_lg,rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab_sm = [result_sm, expected_results_sm]
tab_md = [result_md, expected_results_md]
tab_lg = [result_lg, expected_results_lg]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab_sm, headers='keys', tablefmt='rst'))
print(tabulate(tab_md, headers='keys', tablefmt='rst'))
print(tabulate(tab_lg, headers='keys', tablefmt='rst'))
return
def test_sc_mamm(self):
"""
# unit test for function sc_mamm
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
result_sm = pd.Series([], dtype = 'float')
result_md = pd.Series([], dtype = 'float')
result_lg = pd.Series([], dtype = 'float')
expected_results_sm =pd.Series([2.90089, 15.87995, 8.142130], dtype = 'float')
expected_results_md = pd.Series([2.477926, 13.16889, 3.008207], dtype = 'float')
expected_results_lg =pd.Series([1.344461, 5.846592, 2.172211], dtype = 'float')
try:
trex_empty.frac_act_ing = pd.Series([0.34, 0.84, 0.02], dtype='float')
trex_empty.app_rates = pd.Series([[0.34, 1.384, 13.54], [0.78, 11.34, 3.54],
[2.34, 1.384, 3.4]], dtype='object')
trex_empty.app_rate_parsing() #get 'first_app_rate' per model simulation run
trex_empty.density = pd.Series([8.33, 7.98, 6.75], dtype='float')
# following parameter values are needed for internal call to "test_at_bird"
# results from "test_at_bird" test using these values are [69.17640, 146.8274, 56.00997]
trex_empty.tw_mamm = pd.Series([350., 225., 390.], dtype='float')
trex_empty.noael_mamm = pd.Series([2.5, 3.5, 0.5], dtype='float')
trex_empty.aw_mamm_sm = pd.Series([15., 20., 30.], dtype='float')
trex_empty.aw_mamm_md = pd.Series([35., 45., 25.], dtype='float')
trex_empty.aw_mamm_lg = pd.Series([1015., 1020., 1030.], dtype='float')
#reitierate constants here (they have been set in 'trex_inputs'; repeated here for clarity)
trex_empty.mf_w_mamm_1 = 0.1
trex_empty.nagy_mamm_coef_sm = 0.015
trex_empty.nagy_mamm_coef_md = 0.035
trex_empty.nagy_mamm_coef_lg = 1.0
result_sm = trex_empty.sc_mamm("small")
npt.assert_allclose(result_sm,expected_results_sm,rtol=1e-4, atol=0, err_msg='', verbose=True)
result_md = trex_empty.sc_mamm("medium")
npt.assert_allclose(result_md,expected_results_md,rtol=1e-4, atol=0, err_msg='', verbose=True)
result_lg = trex_empty.sc_mamm("large")
npt.assert_allclose(result_lg,expected_results_lg,rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab_sm = [result_sm, expected_results_sm]
tab_md = [result_md, expected_results_md]
tab_lg = [result_lg, expected_results_lg]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab_sm, headers='keys', tablefmt='rst'))
print(tabulate(tab_md, headers='keys', tablefmt='rst'))
print(tabulate(tab_lg, headers='keys', tablefmt='rst'))
return
def test_ld50_rg_bird(self):
"""
# unit test for function ld50_rg_bird (LD50ft-2 for Row/Band/In-furrow granular birds)
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
result = pd.Series([], dtype = 'float')
expected_results = pd.Series([346.4856, 25.94132, np.nan], dtype='float')
try:
# following parameter values are unique for ld50_bg_bird
trex_empty.application_type = pd.Series(['Row/Band/In-furrow-Granular',
'Row/Band/In-furrow-Granular',
'Row/Band/In-furrow-Liquid'], dtype='object')
trex_empty.frac_act_ing = pd.Series([0.34, 0.84, 0.02], dtype='float')
trex_empty.app_rates = pd.Series([[0.34, 1.384, 13.54], [0.78, 11.34, 3.54],
[2.34, 1.384, 3.4]], dtype='object')
trex_empty.app_rate_parsing() #get 'max app rate' per model simulation run
trex_empty.frac_incorp = pd.Series([0.25, 0.76, 0.05], dtype= 'float')
trex_empty.bandwidth = pd.Series([2., 10., 30.], dtype = 'float')
trex_empty.row_spacing = pd.Series([20., 32., 50.], dtype = 'float')
# following parameter values are needed for internal call to "test_at_bird"
# results from "test_at_bird" test using these values are [69.17640, 146.8274, 56.00997]
trex_empty.ld50_bird = pd.Series([100., 125., 90.], dtype='float')
trex_empty.tw_bird_ld50 = pd.Series([175., 100., 200.], dtype='float')
trex_empty.mineau_sca_fact = pd.Series([1.15, 0.9, 1.25], dtype='float')
trex_empty.aw_bird_sm = pd.Series([15., 20., 30.], dtype='float')
result = trex_empty.ld50_rg_bird(trex_empty.aw_bird_sm)
npt.assert_allclose(result,expected_results,rtol=1e-4, atol=0,
equal_nan=True, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_ld50_rg_bird1(self):
"""
# unit test for function ld50_rg_bird1 (LD50ft-2 for Row/Band/In-furrow granular birds)
this is a duplicate of the 'test_ld50_rg_bird' method using a more vectorized approach to the
calculations; if desired other routines could be modified similarly
--comparing this method with 'test_ld50_rg_bird' it appears (for this test) that both run in the same time
--but I don't think this would be the case when 100's of model simulation runs are executed (and only a small
--number of the application_types apply to this method; thus I conclude we continue to use the non-vectorized
--approach -- should be revisited when we have a large run to execute
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
result = pd.Series([], dtype = 'float')
expected_results = pd.Series([346.4856, 25.94132, np.nan], dtype='float')
try:
# following parameter values are unique for ld50_bg_bird
trex_empty.application_type = pd.Series(['Row/Band/In-furrow-Granular',
'Row/Band/In-furrow-Granular',
'Row/Band/In-furrow-Liquid'], dtype='object')
trex_empty.frac_act_ing = pd.Series([0.34, 0.84, 0.02], dtype='float')
trex_empty.app_rates = pd.Series([[0.34, 1.384, 13.54], [0.78, 11.34, 3.54],
[2.34, 1.384, 3.4]], dtype='object')
trex_empty.app_rate_parsing() #get 'max app rate' per model simulation run
trex_empty.frac_incorp = pd.Series([0.25, 0.76, 0.05], dtype= 'float')
trex_empty.bandwidth = pd.Series([2., 10., 30.], dtype = 'float')
trex_empty.row_spacing = pd.Series([20., 32., 50.], dtype = 'float')
# following parameter values are needed for internal call to "test_at_bird"
# results from "test_at_bird" test using these values are [69.17640, 146.8274, 56.00997]
trex_empty.ld50_bird = pd.Series([100., 125., 90.], dtype='float')
trex_empty.tw_bird_ld50 = pd.Series([175., 100., 200.], dtype='float')
trex_empty.mineau_sca_fact = pd.Series([1.15, 0.9, 1.25], dtype='float')
trex_empty.aw_bird_sm = pd.Series([15., 20., 30.], dtype='float')
result = trex_empty.ld50_rg_bird1(trex_empty.aw_bird_sm)
npt.assert_allclose(result, expected_results, rtol=1e-4, atol=0, equal_nan=True, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_ld50_bl_bird(self):
"""
# unit test for function ld50_bl_bird (LD50ft-2 for broadcast liquid birds)
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
expected_results = pd.Series([46.19808, 33.77777, np.nan], dtype='float')
try:
# following parameter values are unique for ld50_bl_bird
trex_empty.application_type = pd.Series(['Broadcast-Liquid', 'Broadcast-Liquid',
'Non-Broadcast'], dtype='object')
trex_empty.frac_act_ing = pd.Series([0.34, 0.84, 0.02], dtype='float')
trex_empty.app_rates = pd.Series([[0.34, 1.384, 13.54], [0.78, 11.34, 3.54],
[2.34, 1.384, 3.4]], dtype='object')
# following parameter values are needed for internal call to "test_at_bird"
# results from "test_at_bird" test using these values are [69.17640, 146.8274, 56.00997]
trex_empty.ld50_bird = pd.Series([100., 125., 90.], dtype='float')
trex_empty.tw_bird_ld50 = pd.Series([175., 100., 200.], dtype='float')
trex_empty.mineau_sca_fact = pd.Series([1.15, 0.9, 1.25], dtype='float')
trex_empty.aw_bird_sm = pd.Series([15., 20., 30.], dtype='float')
result = trex_empty.ld50_bl_bird(trex_empty.aw_bird_sm)
npt.assert_allclose(result,expected_results,rtol=1e-4, atol=0,
err_msg='', verbose=True, equal_nan=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_ld50_bg_bird(self):
"""
# unit test for function ld50_bg_bird (LD50ft-2 for broadcast granular)
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
expected_results = pd.Series([46.19808, np.nan, 0.4214033], dtype='float')
try:
# following parameter values are unique for ld50_bg_bird
trex_empty.application_type = pd.Series(['Broadcast-Granular', 'Broadcast-Liquid',
'Broadcast-Granular'], dtype='object')
trex_empty.frac_act_ing = pd.Series([0.34, 0.84, 0.02], dtype='float')
trex_empty.app_rates = pd.Series([[0.34, 1.384, 13.54], [0.78, 11.34, 3.54],
[2.34, 1.384, 3.4]], dtype='object')
trex_empty.frac_act_ing = pd.Series([0.34, 0.84, 0.02], dtype='float')
# following parameter values are needed for internal call to "test_at_bird"
# results from "test_at_bird" test using these values are [69.17640, 146.8274, 56.00997]
trex_empty.ld50_bird = | pd.Series([100., 125., 90.], dtype='float') | pandas.Series |
# ----------------------------------------------------------------------------
# Copyright (c) 2016-2021, QIIME 2 development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file LICENSE, distributed with this software.
# ----------------------------------------------------------------------------
import unittest
import pandas as pd
import pandas.util.testing as pdt
import qiime2
from q2_taxa import collapse, filter_table, filter_seqs
class CollapseTests(unittest.TestCase):
def assert_index_equal(self, a, b):
# this method is derived from scikit-bio 0.5.1
pdt.assert_index_equal(a, b,
exact=True,
check_names=True,
check_exact=True)
def assert_data_frame_almost_equal(self, left, right):
# this method is derived from scikit-bio 0.5.1
pdt.assert_frame_equal(left, right,
check_dtype=True,
check_index_type=True,
check_column_type=True,
check_frame_type=True,
check_less_precise=False,
check_names=True,
by_blocks=False,
check_exact=False)
self.assert_index_equal(left.index, right.index)
def test_collapse(self):
table = pd.DataFrame([[2.0, 2.0], [1.0, 1.0], [9.0, 8.0], [0.0, 4.0]],
index=['A', 'B', 'C', 'D'],
columns=['feat1', 'feat2'])
taxonomy = pd.Series(['a; b; c', 'a; b; d'],
index=['feat1', 'feat2'])
actual = collapse(table, taxonomy, 1)
expected = pd.DataFrame([[4.0], [2.0], [17.0], [4.0]],
index=['A', 'B', 'C', 'D'],
columns=['a'])
self.assert_data_frame_almost_equal(actual, expected)
actual = collapse(table, taxonomy, 2)
expected = pd.DataFrame([[4.0], [2.0], [17.0], [4.0]],
index=['A', 'B', 'C', 'D'],
columns=['a;b'])
self.assert_data_frame_almost_equal(actual, expected)
actual = collapse(table, taxonomy, 3)
expected = pd.DataFrame([[2.0, 2.0], [1.0, 1.0], [9.0, 8.0],
[0.0, 4.0]],
index=['A', 'B', 'C', 'D'],
columns=['a;b;c', 'a;b;d'])
self.assert_data_frame_almost_equal(actual, expected)
def test_collapse_missing_level(self):
table = pd.DataFrame([[2.0, 2.0], [1.0, 1.0], [9.0, 8.0], [0.0, 4.0]],
index=['A', 'B', 'C', 'D'],
columns=['feat1', 'feat2'])
taxonomy = pd.Series(['a; b', 'a; b; d'],
index=['feat1', 'feat2'])
actual = collapse(table, taxonomy, 1)
expected = pd.DataFrame([[4.0], [2.0], [17.0], [4.0]],
index=['A', 'B', 'C', 'D'],
columns=['a'])
self.assert_data_frame_almost_equal(actual, expected)
actual = collapse(table, taxonomy, 2)
expected = pd.DataFrame([[4.0], [2.0], [17.0], [4.0]],
index=['A', 'B', 'C', 'D'],
columns=['a;b'])
self.assert_data_frame_almost_equal(actual, expected)
actual = collapse(table, taxonomy, 3)
expected = pd.DataFrame([[2.0, 2.0], [1.0, 1.0], [9.0, 8.0],
[0.0, 4.0]],
index=['A', 'B', 'C', 'D'],
columns=['a;b;__', 'a;b;d'])
self.assert_data_frame_almost_equal(actual, expected)
def test_collapse_bad_level(self):
table = pd.DataFrame([[2.0, 2.0], [1.0, 1.0], [9.0, 8.0], [0.0, 4.0]],
index=['A', 'B', 'C', 'D'],
columns=['feat1', 'feat2'])
taxonomy = pd.Series(['a; b; c', 'a; b; d'],
index=['feat1', 'feat2'])
with self.assertRaisesRegex(ValueError, 'of 42 is larger'):
collapse(table, taxonomy, 42)
with self.assertRaisesRegex(ValueError, 'of 0 is too low'):
collapse(table, taxonomy, 0)
def test_collapse_missing_table_ids_in_taxonomy(self):
table = pd.DataFrame([[2.0, 2.0],
[1.0, 1.0],
[9.0, 8.0],
[0.0, 4.0]],
index=['A', 'B', 'C', 'D'],
columns=['feat1', 'feat2'])
taxonomy = pd.Series(['a; b; c', 'a; b; d'],
index=['feat1', 'feat3'])
with self.assertRaisesRegex(ValueError, 'missing.*feat2'):
collapse(table, taxonomy, 1)
class FilterTable(unittest.TestCase):
def test_filter_no_filters(self):
table = pd.DataFrame([[2.0, 2.0], [1.0, 1.0], [9.0, 8.0], [0.0, 4.0]],
index=['A', 'B', 'C', 'D'],
columns=['feat1', 'feat2'])
taxonomy = qiime2.Metadata(
pd.DataFrame(['aa; bb; cc', 'aa; bb; dd ee'],
index=pd.Index(['feat1', 'feat2'], name='id'),
columns=['Taxon']))
with self.assertRaisesRegex(ValueError, 'At least one'):
filter_table(table, taxonomy)
def test_alt_delimiter(self):
table = pd.DataFrame([[2.0, 2.0], [1.0, 1.0], [9.0, 8.0], [0.0, 4.0]],
index=['A', 'B', 'C', 'D'],
columns=['feat1', 'feat2'])
taxonomy = qiime2.Metadata(
pd.DataFrame(['aa; bb; cc', 'aa; bb; dd ee'],
index=pd.Index(['feat1', 'feat2'], name='id'),
columns=['Taxon']))
# include with delimiter
obs = filter_table(table, taxonomy, include='<EMAIL>',
query_delimiter='@peanut@')
pdt.assert_frame_equal(obs, table, check_like=True)
# exclude with delimiter
obs = filter_table(table, taxonomy, exclude='<EMAIL>',
query_delimiter='@peanut@')
exp = pd.DataFrame([[2.0], [1.0], [9.0]],
index=['A', 'B', 'C'],
columns=['feat1'])
pdt.assert_frame_equal(obs, exp, check_like=True)
def test_filter_table_unknown_mode(self):
table = pd.DataFrame([[2.0, 2.0], [1.0, 1.0], [9.0, 8.0], [0.0, 4.0]],
index=['A', 'B', 'C', 'D'],
columns=['feat1', 'feat2'])
taxonomy = qiime2.Metadata(
pd.DataFrame(['aa; bb; cc', 'aa; bb; dd ee'],
index=pd.Index(['feat1', 'feat2'], name='id'),
columns=['Taxon']))
with self.assertRaisesRegex(ValueError, 'Unknown mode'):
filter_table(table, taxonomy, include='bb', mode='not-a-mode')
def test_filter_table_include(self):
table = pd.DataFrame([[2.0, 2.0], [1.0, 1.0], [9.0, 8.0], [0.0, 4.0]],
index=['A', 'B', 'C', 'D'],
columns=['feat1', 'feat2'])
taxonomy = qiime2.Metadata(
pd.DataFrame(['aa; bb; cc', 'aa; bb; dd ee'],
index=pd.Index(['feat1', 'feat2'], name='id'),
columns=['Taxon']))
# keep both features
obs = filter_table(table, taxonomy, include='bb')
pdt.assert_frame_equal(obs, table, check_like=True)
obs = filter_table(table, taxonomy, include='cc,ee')
pdt.assert_frame_equal(obs, table, check_like=True)
# keep feat1 only
obs = filter_table(table, taxonomy, include='cc')
exp = pd.DataFrame([[2.0], [1.0], [9.0]],
index=['A', 'B', 'C'],
columns=['feat1'])
pdt.assert_frame_equal(obs, exp, check_like=True)
obs = filter_table(table, taxonomy, include='aa; bb; cc')
exp = pd.DataFrame([[2.0], [1.0], [9.0]],
index=['A', 'B', 'C'],
columns=['feat1'])
pdt.assert_frame_equal(obs, exp, check_like=True)
# keep feat2 only
obs = filter_table(table, taxonomy, include='dd')
exp = pd.DataFrame([[2.0], [1.0], [8.0], [4.0]],
index=['A', 'B', 'C', 'D'],
columns=['feat2'])
pdt.assert_frame_equal(obs, exp, check_like=True)
obs = filter_table(table, taxonomy, include='ee')
exp = pd.DataFrame([[2.0], [1.0], [8.0], [4.0]],
index=['A', 'B', 'C', 'D'],
columns=['feat2'])
pdt.assert_frame_equal(obs, exp, check_like=True)
obs = filter_table(table, taxonomy, include='dd ee')
exp = pd.DataFrame([[2.0], [1.0], [8.0], [4.0]],
index=['A', 'B', 'C', 'D'],
columns=['feat2'])
pdt.assert_frame_equal(obs, exp, check_like=True)
obs = filter_table(table, taxonomy, include='aa; bb; dd ee')
exp = pd.DataFrame([[2.0], [1.0], [8.0], [4.0]],
index=['A', 'B', 'C', 'D'],
columns=['feat2'])
pdt.assert_frame_equal(obs, exp, check_like=True)
# keep no features
with self.assertRaisesRegex(ValueError, expected_regex='empty table'):
obs = filter_table(table, taxonomy, include='peanut!')
def test_filter_table_include_exact_match(self):
table = pd.DataFrame([[2.0, 2.0], [1.0, 1.0], [9.0, 8.0], [0.0, 4.0]],
index=['A', 'B', 'C', 'D'],
columns=['feat1', 'feat2'])
taxonomy = qiime2.Metadata(
pd.DataFrame(['aa; bb; cc', 'aa; bb; dd ee'],
index=pd.Index(['feat1', 'feat2'], name='id'),
columns=['Taxon']))
# keep both features
obs = filter_table(table, taxonomy, include='aa; bb; cc,aa; bb; dd ee',
mode='exact')
pdt.assert_frame_equal(obs, table, check_like=True)
# keep feat1 only
obs = filter_table(table, taxonomy, include='aa; bb; cc',
mode='exact')
exp = pd.DataFrame([[2.0], [1.0], [9.0]],
index=['A', 'B', 'C'],
columns=['feat1'])
pdt.assert_frame_equal(obs, exp, check_like=True)
# keep feat2 only
obs = filter_table(table, taxonomy, include='aa; bb; dd ee',
mode='exact')
exp = pd.DataFrame([[2.0], [1.0], [8.0], [4.0]],
index=['A', 'B', 'C', 'D'],
columns=['feat2'])
pdt.assert_frame_equal(obs, exp, check_like=True)
# keep no features
with self.assertRaisesRegex(ValueError, expected_regex='empty table'):
obs = filter_table(table, taxonomy, include='bb', mode='exact')
def test_filter_table_exclude(self):
table = pd.DataFrame([[2.0, 2.0], [1.0, 1.0], [9.0, 8.0], [0.0, 4.0]],
index=['A', 'B', 'C', 'D'],
columns=['feat1', 'feat2'])
taxonomy = qiime2.Metadata(
pd.DataFrame(['aa; bb; cc', 'aa; bb; dd ee'],
index=pd.Index(['feat1', 'feat2'], name='id'),
columns=['Taxon']))
# keep both features
obs = filter_table(table, taxonomy, exclude='ab')
pdt.assert_frame_equal(obs, table, check_like=True)
obs = filter_table(table, taxonomy, exclude='xx')
pdt.assert_frame_equal(obs, table, check_like=True)
# keep feat1 only
obs = filter_table(table, taxonomy, exclude='dd')
exp = pd.DataFrame([[2.0], [1.0], [9.0]],
index=['A', 'B', 'C'],
columns=['feat1'])
pdt.assert_frame_equal(obs, exp, check_like=True)
obs = filter_table(table, taxonomy, exclude='dd ee')
exp = pd.DataFrame([[2.0], [1.0], [9.0]],
index=['A', 'B', 'C'],
columns=['feat1'])
pdt.assert_frame_equal(obs, exp, check_like=True)
obs = filter_table(table, taxonomy, exclude='aa; bb; dd ee')
exp = pd.DataFrame([[2.0], [1.0], [9.0]],
index=['A', 'B', 'C'],
columns=['feat1'])
pdt.assert_frame_equal(obs, exp, check_like=True)
# keep feat2 only
obs = filter_table(table, taxonomy, exclude='cc')
exp = pd.DataFrame([[2.0], [1.0], [8.0], [4.0]],
index=['A', 'B', 'C', 'D'],
columns=['feat2'])
pdt.assert_frame_equal(obs, exp, check_like=True)
obs = filter_table(table, taxonomy, exclude='aa; bb; cc')
exp = pd.DataFrame([[2.0], [1.0], [8.0], [4.0]],
index=['A', 'B', 'C', 'D'],
columns=['feat2'])
pdt.assert_frame_equal(obs, exp, check_like=True)
# keep no features
with self.assertRaisesRegex(ValueError, expected_regex='empty table'):
obs = filter_table(table, taxonomy, exclude='aa')
with self.assertRaisesRegex(ValueError, expected_regex='empty table'):
obs = filter_table(table, taxonomy, exclude='aa; bb')
def test_filter_table_exclude_exact_match(self):
table = pd.DataFrame([[2.0, 2.0], [1.0, 1.0], [9.0, 8.0], [0.0, 4.0]],
index=['A', 'B', 'C', 'D'],
columns=['feat1', 'feat2'])
taxonomy = qiime2.Metadata(
pd.DataFrame(['aa; bb; cc', 'aa; bb; dd ee'],
index=pd.Index(['feat1', 'feat2'], name='id'),
columns=['Taxon']))
# keep both features
obs = filter_table(table, taxonomy, exclude='peanut!',
mode='exact')
pdt.assert_frame_equal(obs, table, check_like=True)
# keep feat1 only
obs = filter_table(table, taxonomy, exclude='aa; bb; dd ee',
mode='exact')
exp = pd.DataFrame([[2.0], [1.0], [9.0]],
index=['A', 'B', 'C'],
columns=['feat1'])
pdt.assert_frame_equal(obs, exp, check_like=True)
obs = filter_table(table, taxonomy, exclude='aa; bb; dd ee,aa',
mode='exact')
exp = pd.DataFrame([[2.0], [1.0], [9.0]],
index=['A', 'B', 'C'],
columns=['feat1'])
pdt.assert_frame_equal(obs, exp, check_like=True)
# keep feat2 only
obs = filter_table(table, taxonomy, exclude='aa; bb; cc',
mode='exact')
exp = pd.DataFrame([[2.0], [1.0], [8.0], [4.0]],
index=['A', 'B', 'C', 'D'],
columns=['feat2'])
pdt.assert_frame_equal(obs, exp, check_like=True)
obs = filter_table(table, taxonomy, exclude='aa; bb; cc,aa',
mode='exact')
exp = pd.DataFrame([[2.0], [1.0], [8.0], [4.0]],
index=['A', 'B', 'C', 'D'],
columns=['feat2'])
pdt.assert_frame_equal(obs, exp, check_like=True)
# keep no features
with self.assertRaisesRegex(ValueError, expected_regex='empty table'):
obs = filter_table(table, taxonomy,
exclude='aa; bb; cc,aa; bb; dd ee',
mode='exact')
def test_filter_table_include_exclude(self):
table = pd.DataFrame([[2.0, 2.0], [1.0, 1.0], [9.0, 8.0], [0.0, 4.0]],
index=['A', 'B', 'C', 'D'],
columns=['feat1', 'feat2'])
taxonomy = qiime2.Metadata(
pd.DataFrame(['aa; bb; cc', 'aa; bb; dd ee'],
index=pd.Index(['feat1', 'feat2'], name='id'),
columns=['Taxon']))
# keep both features
obs = filter_table(table, taxonomy, include='aa', exclude='peanut!')
pdt.assert_frame_equal(obs, table, check_like=True)
# keep feat1 only - feat2 dropped at exclusion step
obs = filter_table(table, taxonomy, include='aa', exclude='ee')
exp = pd.DataFrame([[2.0], [1.0], [9.0]],
index=['A', 'B', 'C'],
columns=['feat1'])
pdt.assert_frame_equal(obs, exp, check_like=True)
# keep feat1 only - feat2 dropped at inclusion step
obs = filter_table(table, taxonomy, include='cc', exclude='ee')
exp = pd.DataFrame([[2.0], [1.0], [9.0]],
index=['A', 'B', 'C'],
columns=['feat1'])
pdt.assert_frame_equal(obs, exp, check_like=True)
# keep feat2 only - feat1 dropped at exclusion step
obs = filter_table(table, taxonomy, include='aa', exclude='cc')
exp = pd.DataFrame([[2.0], [1.0], [8.0], [4.0]],
index=['A', 'B', 'C', 'D'],
columns=['feat2'])
pdt.assert_frame_equal(obs, exp, check_like=True)
# keep feat2 only - feat1 dropped at inclusion step
obs = filter_table(table, taxonomy, include='ee', exclude='cc')
exp = pd.DataFrame([[2.0], [1.0], [8.0], [4.0]],
index=['A', 'B', 'C', 'D'],
columns=['feat2'])
pdt.assert_frame_equal(obs, exp, check_like=True)
# keep no features - all dropped at exclusion
with self.assertRaisesRegex(ValueError, expected_regex='empty table'):
obs = filter_table(table, taxonomy,
include='aa',
exclude='bb',
mode='exact')
# keep no features - one dropped at inclusion, one dropped at exclusion
with self.assertRaisesRegex(ValueError, expected_regex='empty table'):
obs = filter_table(table, taxonomy,
include='cc',
exclude='cc',
mode='exact')
# keep no features - all dropped at inclusion
with self.assertRaisesRegex(ValueError, expected_regex='empty table'):
obs = filter_table(table, taxonomy,
include='peanut',
exclude='bb',
mode='exact')
def test_filter_table_underscores_escaped(self):
table = pd.DataFrame([[2.0, 2.0], [1.0, 1.0], [9.0, 8.0], [0.0, 4.0]],
index=['A', 'B', 'C', 'D'],
columns=['feat1', 'feat2'])
taxonomy = qiime2.Metadata(
pd.DataFrame(['aa; bb; cc', 'aa; bb; dd ee'],
index=pd.Index(['feat1', 'feat2'], name='id'),
columns=['Taxon']))
# keep feat1 only - underscore not treated as a wild card
obs = filter_table(table, taxonomy, include='cc,d_')
exp = pd.DataFrame([[2.0], [1.0], [9.0]],
index=['A', 'B', 'C'],
columns=['feat1'])
pdt.assert_frame_equal(obs, exp, check_like=True)
# keep feat1 only - underscore in query matches underscore in
# taxonomy annotation
taxonomy = qiime2.Metadata(
pd.DataFrame(['aa; bb; c_', 'aa; bb; dd ee'],
index= | pd.Index(['feat1', 'feat2'], name='id') | pandas.Index |
# ********************************************************************************** #
# #
# Project: FastClassAI workbecnch #
# #
# Author: <NAME> #
# Contact: <EMAIL> #
# #
# This notebook is a part of Skin AanaliticAI development kit, created #
# for evaluation of public datasets used for skin cancer detection with #
# large number of AI models and data preparation pipelines. #
# #
# License: MIT #
# Copyright (C) 2021.01.30 <NAME> #
# https://opensource.org/licenses/MIT #
# #
# ********************************************************************************** #
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os # allow changing, and navigating files and folders,
import sys
import re # module to use regular expressions,
import glob # lists names in folders that match Unix shell patterns
import random # functions that use and generate random numbers
import cv2
import numpy as np # support for multi-dimensional arrays and matrices
import pandas as pd # library for data manipulation and analysis
import seaborn as sns # advance plots, for statistics,
import matplotlib as mpl # to get some basif functions, heping with plot mnaking
import scipy.cluster.hierarchy as sch
import matplotlib.pyplot as plt # for making plots,
from src.utils.image_augmentation import * # to create batch_labels files,
from src.utils.data_loaders import load_encoded_imgbatch_using_logfile, load_raw_img_batch
from PIL import Image, ImageDraw
from matplotlib.font_manager import FontProperties
from sklearn.metrics import accuracy_score
from sklearn.neighbors import KNeighborsClassifier
from sklearn.preprocessing import StandardScaler
from sklearn.pipeline import Pipeline
from sklearn.model_selection import ParameterGrid
from sklearn.model_selection import train_test_split
from sklearn.decomposition import PCA
from sklearn.dummy import DummyClassifier
# Function, ............................................................................................
def perfrom_grid_search(*, X, y, train_proportion=0.7, pipe, grid, method_name=np.nan, verbose=False):
# check the data, ................................
assert type(X)==np.ndarray, "Incorrect obj type" # Test input df,
assert type(y)==np.ndarray, "Incorrect obj type" # Test input df,
# Data preparation, ...............................
# .. Split data into train/test sets
X_tr, X_te, y_tr, y_te = train_test_split(
X, y,
train_size=train_proportion,
test_size=(1-train_proportion),
random_state=0
)
# .. test dimensions,
if verbose==True:
print('Number of combinations:', len(grid))
print("Input Data shapes are:", "train=",X_tr.shape," test=",X_te.shape)
else:
pass
# Grid Search, ...............................
# Save accuracy on test set
test_scores = []
# Enumerate combinations starting from 1
for i, params_dict in enumerate(grid, 1):
if verbose==True:
# Print progress
if i-1==0:
print(f"GridSearch: ", end="")
if i>1 and i<len(grid)-1:
print(".",end="")
if i==len(grid):
print(".", end="\n")
else:
pass
# Set parameters
pipe.set_params(**params_dict)
# Fit a k-NN classifier
pipe.fit(X_tr, y_tr)
# Save accuracy on test set
params_dict['train_accuracy'] = pipe.score(X_tr, y_tr)
params_dict['test_accuracy'] = pipe.score(X_te, y_te)
params_dict['method'] = method_name
# Save result
test_scores.append(params_dict)
if verbose==True:
print('done')
else:
pass
# prepare the results, ...................
scores_df = | pd.DataFrame(test_scores) | pandas.DataFrame |
import os
import pandas as pd
import numpy as np
from typing import List
from datetime import datetime
PATH: str = os.path.join('data', 'raw')
DAOSTAK_SRCS: List[str] = [os.path.join(PATH, 'daostack_members.csv')]
DAOHAUS_SRCS: List[str] = [os.path.join(PATH, 'daohaus_members.csv'), os.path.join(PATH, 'daohaus_rage_quits.csv')]
def get_df(src: str) -> pd.DataFrame:
return pd.read_csv(src, header=0)
def clean_df(df: pd.DataFrame, col: str) -> pd.DataFrame:
dff: pd.DataFrame = df
dff.loc[:, col] = dff[col]
dff.rename(columns={col: 'date'}, inplace=True)
return dff
def sequence_members(df: pd.DataFrame) -> pd.DataFrame:
dff: pd.DataFrame = df
dff.loc[:, 'date'] = pd.to_datetime(dff.loc[:, 'date'], unit='s').dt.date
dff = dff.groupby('date').size().reset_index(name='members')
# fill gaps
first = dff['date'].min()
#last = dff['date'].max()
last = datetime.strptime('01/12/2020', "%d/%m/%Y") # fill with fix date
idx = pd.date_range(start=first, end=last, freq='D')
filler = pd.DataFrame({'date': idx, 'members': 0})
dff = dff.append(filler, ignore_index=True)
dff.loc[:, 'date'] = | pd.to_datetime(dff.loc[:, 'date']) | pandas.to_datetime |
#%%
############################################################################
# IMPORTS
############################################################################
import pandas as pd
import numpy as np
from utils import model_zoo, data_transformer
import argparse
import pickle
import os
#%%
############################################################################
# CONSTANTS & PARAMETERS
############################################################################
# Default file Locations and model name (parameters)
MODEL_NAME = "KERAS_LENET5"
PICKLE_PATH = "C:/kaggle/kaggle_keypoints/pickle"
MODEL_PATH = "C:/kaggle/kaggle_keypoints/models"
# Processing behavior (parameters)
NORMALIZE_LABELS = False
VERBOSE = True
USE30 = True
# Processing behavior (constants)
AVAILABLE_MODELS = ["KERAS_LENET5", "KERAS_INCEPTION", "KERAS_KAGGLE1", "KERAS_NAIMISHNET", "KERAS_CONVNET5", "KERAS_INCEPTIONV3", "KERAS_KAGGLE2", "KERAS_RESNET50", "KERAS_RESNET", "KERAS_RESNEXT50", "KERAS_RESNEXT101"]
TEST_DATA_FILE = "cleandata_naive_test.pkl"
TEST_IDS_FILE = "raw_id_lookup.pkl"
OVERLAP_FILE = "cleandata_naive_overlap.pkl"
TEST8_DATA_FILE = "cleandata_test8.pkl"
TEST30_DATA_FILE = "cleandata_test30.pkl"
#%%
############################################################################
# ARGUMENT SPECIFICATION
############################################################################
parser = argparse.ArgumentParser(description = "Performs predictions for the Kaggle Facial Keypoints Detection challenge.")
# Commandline arguments
parser.add_argument('-nv', '--no_verbose', action = 'store_true', help = 'Disables verbose output mode for more detailed descriptions of process.')
parser.add_argument('-pp', '--pickle_path', type = str, default = "C:/kaggle/kaggle_keypoints/pickle", help = "Path to location of output pickle files (post processing files).")
parser.add_argument('-mp', '--model_path', type = str, default = "C:/kaggle/kaggle_keypoints/models", help = "Path to location of output model files.")
parser.add_argument('-m', '--model_name', type = str, default = "KERAS_LENET5", help = "Name of the model to train.")
parser.add_argument('-pa', '--partial', action = 'store_true', help = 'Trains only using the 8-value dataset (vs. the full 30-value dataset)')
parser.add_argument('-nl', '--normalize_labels', action = 'store_true', help = "Enables the normalization of prediction label values prior to training.")
############################################################################
# ARGUMENT PARSING
############################################################################
def process_arguments(parsed_args, display_args = False):
global VERBOSE, PICKLE_PATH, MODEL_PATH, MODEL_NAME, NORMALIZE_LABELS, USE30
args = vars(parser.parse_args())
if display_args:
print("".join(["\PREDICT Arguments in use:\n", "-" * 30, "\n"]))
for arg in args:
print("Parameter '%s' == %s" % (arg, str(getattr(parser.parse_args(), arg))))
print("\n")
# Assign arguments to globals
VERBOSE = not args['no_verbose']
USE30 = not args['partial']
MODEL_NAME = args['model_name']
NORMALIZE_LABELS = args['normalize_labels']
MODEL_PATH = str(args['model_path']).lower().strip().replace('\\', '/')
PICKLE_PATH = str(args['pickle_path']).lower().strip().replace('\\', '/')
# validate the presence of the paths
for p, v, l in zip([MODEL_PATH, PICKLE_PATH], ['model_path', 'pickle_path'], ['Model file path', 'Pickle file path']):
if not os.path.exists(p):
raise RuntimeError(" ".join([l, "'%s'" % p, "specified in parameter `%s` does not exist." % v]))
# validate the parameters entered
if not MODEL_NAME in AVAILABLE_MODELS:
raise RuntimeError("Parameter `model_name` value of '%s' is invalid. Must be in list: %s" % (MODEL_NAME, str(AVAILABLE_MODELS)))
#%%
############################################################################
# LOAD DATA
############################################################################
# load the data for training
def load_data(pickle_path, test_file, id_file, overlap_file, verbose = True):
if verbose: print("".join(["-" * 50, "\n>>> BEGIN LOAD DATA <<<\n", "-" * 50, "\n"]))
if not pickle_path.endswith("/"): pickle_path = "".join([pickle_path, "/"])
test_file = "".join([pickle_path, test_file])
id_file = "".join([pickle_path, id_file])
overlap_file = "".join([pickle_path, overlap_file])
for f, l in zip([test_file, id_file, overlap_file], ['Test', 'Test IDs', 'Overlap']):
if not os.path.isfile(f):
raise RuntimeError("%s file '%s' not found - training cancelled." % (l, f))
test = pickle.load(open(test_file, "rb"))
if verbose: print("Test file '%s' loaded; shape: %s" % (test_file, str(test.shape)))
ids = pickle.load(open(id_file, "rb"))
if verbose: print("Test IDs file '%s' loaded; shape: %s" % (id_file, str(ids.shape)))
overlap = pickle.load(open(overlap_file, "rb"))
if verbose: print("Overlap file '%s' loaded; shape: %s" % (overlap_file, str(overlap.shape)))
if verbose: print("".join(["\n", "-" * 50, "\n>>> END LOAD DATA <<<\n", "-" * 50, "\n"]))
return test, ids, overlap
# %%
############################################################################
# PREDICT MODEL (GENERIC HANDLER)
############################################################################
def predict_model(model_path, pickle_path, model_name, normalize_labels, test, ids, overlap, predict_file, skip_output = False, skip_overlap = False, full = True, verbose = True):
if verbose: print("".join(["-" * 50, "\n>>> BEGIN PREDICT ON %s <<<\n" % model_name, "-" * 50, "\n"]))
# load helper modules for models and data transformation
models = model_zoo.Models(model_path = MODEL_PATH)
xform = data_transformer.Xform(pickle_path = PICKLE_PATH, verbose = VERBOSE)
# validate the existence of the model output path; if it doesn't exist, create it
if model_path.endswith("/"): sep_add = ""
else: sep_add = "/"
validate_path = "".join([model_path, sep_add, model_name])
if not os.path.exists(validate_path):
if verbose: print("Model output path '%s' does not yet exist, creating it." % validate_path)
os.makedirs(validate_path)
# call the training module specific to the algorithm called
if model_name == "KERAS_LENET5":
feature_name = "ALL_FEATURES"
pred = predict_model_lenet5(models = models, xform = xform, test = test, ids = ids,
feature_name = feature_name, full = full, verbose = verbose)
elif model_name == "KERAS_INCEPTIONV3":
feature_name = "ALL_FEATURES"
pred, _ = predict_model_inceptionv3(models = models, xform = xform, test = test, ids = ids,
feature_name = feature_name, full = full, verbose = verbose)
elif model_name == "KERAS_RESNET50":
feature_name = "ALL_FEATURES"
pred = predict_model_resnet50(models = models, xform = xform, test = test, ids = ids,
feature_name = feature_name, full = full, verbose = verbose)
elif model_name == "KERAS_RESNEXT50":
feature_name = "ALL_FEATURES"
pred = predict_model_resnext50(models = models, xform = xform, test = test, ids = ids,
feature_name = feature_name, full = full, verbose = verbose)
elif model_name == "KERAS_RESNEXT101":
feature_name = "ALL_FEATURES"
pred = predict_model_resnext101(models = models, xform = xform, test = test, ids = ids,
feature_name = feature_name, full = full, verbose = verbose)
elif model_name == "KERAS_RESNET":
feature_name = "ALL_FEATURES"
pred = predict_model_resnet(models = models, xform = xform, test = test, ids = ids,
feature_name = feature_name, full = full, verbose = verbose)
elif model_name == "KERAS_INCEPTION":
feature_name = "ALL_FEATURES"
Y_main, Y_aux1, Y_aux2, Y_main_cols, Y_aux1_cols, Y_aux2_cols = predict_model_inception(models = models,
xform = xform, test = test, ids = ids, feature_name = feature_name, full = full, verbose = verbose)
pred = [Y_main, Y_aux1, Y_aux2, Y_main_cols, Y_aux1_cols, Y_aux2_cols]
elif model_name == "KERAS_KAGGLE1":
feature_name = "ALL_FEATURES"
pred = predict_model_kaggle1(models = models, xform = xform, test = test, ids = ids,
feature_name = feature_name, full = full, verbose = verbose)
elif model_name == "KERAS_KAGGLE2":
feature_name = "ALL_FEATURES"
pred = predict_model_kaggle2(models = models, xform = xform, test = test, ids = ids,
feature_name = feature_name, full = full, verbose = verbose)
elif model_name == "KERAS_CONVNET5":
feature_name = "ALL_FEATURES"
pred = predict_model_convnet5(models = models, xform = xform, test = test, ids = ids,
feature_name = feature_name, full = full, verbose = verbose)
elif model_name == "KERAS_NAIMISHNET":
if full:
feature_name = ['left_eye_center', 'right_eye_center', 'left_eye_inner_corner', 'left_eye_outer_corner',
'right_eye_inner_corner', 'right_eye_outer_corner', 'left_eyebrow_inner_end', 'left_eyebrow_outer_end',
'right_eyebrow_inner_end', 'right_eyebrow_outer_end', 'nose_tip', 'mouth_left_corner', 'mouth_right_corner',
'mouth_center_top_lip', 'mouth_center_bottom_lip']
else:
feature_name = ['left_eye_center', 'right_eye_center', 'nose_tip', 'mouth_center_bottom_lip']
pred = predict_model_naimishnet(models = models, xform = xform, test = test, ids = ids, feature_name = feature_name,
normalize_labels = normalize_labels, full = full, verbose = verbose)
else:
raise RuntimeError("Model name '%s' not understood; cancelling training." % model_name)
if not skip_output:
# this branch for normal output against TEST
output_prediction(model_path = model_path, model_name = model_name, Y = pred, test = test, ids = ids, feature_name = feature_name,
predict_file = predict_file, xform = xform, overlap = overlap, normalize_labels = normalize_labels, skip_overlap = skip_overlap, full = full, verbose = verbose)
else:
# this branch for output of STACK cross validation
output_stack(model_path = model_path, model_name = model_name, Y = pred, test = test, ids = ids, feature_name = feature_name,
predict_file = predict_file, xform = xform, overlap = overlap, normalize_labels = normalize_labels, skip_overlap = skip_overlap, full = full, verbose = verbose)
if verbose: print("".join(["-" * 50, "\n>>> END PREDICT ON %s <<<\n" % model_name, "-" * 50, "\n"]))
return pred
# %%
############################################################################
# PREDICT MODEL NAIMISHNET
############################################################################
def predict_model_naimishnet(models, xform, test, ids, feature_name, normalize_labels, full = True, verbose = True):
# create empty DF for capturing inferenced values (unpivoted x,y coordinates to columns)
submission = pd.DataFrame({'image_id':int(), 'variable':'', 'value':float()},index=[1])
submission = submission[(submission.index == -1)]
df = {}
for keypoint in feature_name:
X, subset = xform.PrepareTest(test, ids, keypoint, verbose = verbose)
subset = subset[['image_id']]
Y = models.predict_keras_naimishnet(X = X, feature_name = keypoint, full = full, verbose = verbose)
# un-normalize the predictions
mod_subset = subset.copy()
for i, lbl in zip(range(Y.shape[1]), ['_x', '_y']):
if normalize_labels:
Y[:,i] = xform.UnNormalize_Labels(Y[:,i])
# ensure pixel boundaries are clipped between 0.0 and 96.0
Y[:,i] = np.clip(Y[:,i], 0.0, 96.0)
col = "".join([keypoint, lbl])
mod_subset[col] = Y[:,i]
submission = submission.append(pd.melt(mod_subset, id_vars = ['image_id']), ignore_index = True)
submission.columns = ['image_id','feature_name','location']
return submission
#%%
############################################################################
# PREDICT MODEL LENET5
############################################################################
def predict_model_lenet5(models, xform, test, ids, feature_name, full = True, verbose = True):
X, _ = xform.PrepareTest(test = test, ids = ids, feature_name = feature_name, verbose = verbose)
Y = models.predict_keras_lenet5(X = X, feature_name = feature_name, full = full, verbose = verbose)
return Y
#%%
############################################################################
# PREDICT MODEL KAGGLE1
############################################################################
def predict_model_kaggle1(models, xform, test, ids, feature_name, full = True, verbose = True):
X, _ = xform.PrepareTest(test = test, ids = ids, feature_name = feature_name, verbose = verbose)
Y = models.predict_keras_kaggle1(X = X, feature_name = feature_name, full = full, verbose = verbose)
return Y
#%%
############################################################################
# PREDICT MODEL KAGGLE2
############################################################################
def predict_model_kaggle2(models, xform, test, ids, feature_name, full = True, verbose = True):
X, _ = xform.PrepareTest(test = test, ids = ids, feature_name = feature_name, verbose = verbose)
Y = models.predict_keras_kaggle2(X = X, feature_name = feature_name, full = full, verbose = verbose)
return Y
#%%
############################################################################
# PREDICT MODEL INCEPTIONV3
############################################################################
def predict_model_inceptionv3(models, xform, test, ids, feature_name, full = True, verbose = True):
X, _ = xform.PrepareTest(test = test, ids = ids, feature_name = feature_name, verbose = verbose)
Y, Y_cols = models.predict_keras_inceptionv3(X = X, feature_name = feature_name, full = full, verbose = verbose)
return Y, Y_cols
#%%
############################################################################
# PREDICT MODEL RESNET
############################################################################
def predict_model_resnet(models, xform, test, ids, feature_name, full = True, verbose = True):
X, _ = xform.PrepareTest(test = test, ids = ids, feature_name = feature_name, verbose = verbose)
Y = models.predict_keras_resnet(X = X, feature_name = feature_name, full = full, verbose = verbose)
return Y
#%%
############################################################################
# PREDICT MODEL RESNET50
############################################################################
def predict_model_resnet50(models, xform, test, ids, feature_name, full = True, verbose = True):
X, _ = xform.PrepareTest(test = test, ids = ids, feature_name = feature_name, verbose = verbose)
Y = models.predict_keras_resnet50(X = X, feature_name = feature_name, full = full, verbose = verbose)
return Y
#%%
############################################################################
# PREDICT MODEL RESNEXT50
############################################################################
def predict_model_resnext50(models, xform, test, ids, feature_name, full = True, verbose = True):
X, _ = xform.PrepareTest(test = test, ids = ids, feature_name = feature_name, verbose = verbose)
Y = models.predict_keras_resnext50(X = X, feature_name = feature_name, full = full, verbose = verbose)
return Y
#%%
############################################################################
# PREDICT MODEL RESNEXT101
############################################################################
def predict_model_resnext101(models, xform, test, ids, feature_name, full = True, verbose = True):
X, _ = xform.PrepareTest(test = test, ids = ids, feature_name = feature_name, verbose = verbose)
Y = models.predict_keras_resnext101(X = X, feature_name = feature_name, full = full, verbose = verbose)
return Y
#%%
############################################################################
# PREDICT MODEL CONVNET5
############################################################################
def predict_model_convnet5(models, xform, test, ids, feature_name, full = True, verbose = True):
X, _ = xform.PrepareTest(test = test, ids = ids, feature_name = feature_name, verbose = verbose)
Y = models.predict_keras_convnet5(X = X, feature_name = feature_name, full = full, verbose = verbose)
return Y
#%%
############################################################################
# PREDICT MODEL INCEPTION
############################################################################
def predict_model_inception(models, xform, test, ids, feature_name, full = True, verbose = True):
X, _ = xform.PrepareTest(test, ids, feature_name, verbose = verbose)
Y_main, Y_aux1, Y_aux2, Y_main_cols, Y_aux1_cols, Y_aux2_cols = models.predict_keras_inception(X = X, feature_name = feature_name, full = full, verbose = verbose)
return Y_main, Y_aux1, Y_aux2, Y_main_cols, Y_aux1_cols, Y_aux2_cols
############################################################################
# OUTPUT PREDICTIONS (STACK)
############################################################################
def output_stack(model_path, model_name, Y, feature_name, test, ids, predict_file, xform, overlap, normalize_labels, skip_overlap = False, full = True, verbose = True):
if full:
train_cols = ['left_eye_center_x', 'left_eye_center_y', 'right_eye_center_x', 'right_eye_center_y', 'left_eye_inner_corner_x',
'left_eye_inner_corner_y', 'left_eye_outer_corner_x', 'left_eye_outer_corner_y', 'right_eye_inner_corner_x',
'right_eye_inner_corner_y', 'right_eye_outer_corner_x','right_eye_outer_corner_y', 'left_eyebrow_inner_end_x',
'left_eyebrow_inner_end_y', 'left_eyebrow_outer_end_x', 'left_eyebrow_outer_end_y', 'right_eyebrow_inner_end_x',
'right_eyebrow_inner_end_y', 'right_eyebrow_outer_end_x', 'right_eyebrow_outer_end_y', 'nose_tip_x', 'nose_tip_y',
'mouth_left_corner_x', 'mouth_left_corner_y', 'mouth_right_corner_x', 'mouth_right_corner_y', 'mouth_center_top_lip_x',
'mouth_center_top_lip_y', 'mouth_center_bottom_lip_x', 'mouth_center_bottom_lip_y', 'image']
else:
train_cols = ['left_eye_center_x', 'left_eye_center_y', 'right_eye_center_x', 'right_eye_center_y', 'nose_tip_x', 'nose_tip_y',
'mouth_center_bottom_lip_x', 'mouth_center_bottom_lip_y', 'image']
# generate output for LeNet, Kaggle1, Kaggle2, ConvNet, InceptionV3, and ResNet50
if model_name in ['KERAS_LENET5', 'KERAS_KAGGLE1', 'KERAS_KAGGLE2', 'KERAS_CONVNET5', 'KERAS_INCEPTIONV3', 'KERAS_RESNET50', 'KERAS_RESNET', 'KERAS_RESNEXT50', 'KERAS_RESNEXT101']:
Y = pd.DataFrame(Y, columns = [c for c in train_cols if not 'image' == c], index = test.image_id.values)
Y.index.rename('image_id', inplace = True)
# write the predictions file
Y.to_csv(predict_file, index = True)
print("Predictions written to '%s'." % predict_file)
elif model_name == 'KERAS_INCEPTION':
created_files, blend_vals = [], None
for j, l, cols in zip([Y[0], Y[1], Y[2]], ['main_model', 'aux1_model', 'aux2_model'], [Y[3], Y[4], Y[5]]):
for ncol, col in enumerate(cols):
#ol = overlap.copy()
#print(l, col)
__loop_pred_file = predict_file.replace("".join([model_name, "/"]), "".join([model_name, "/", l.upper(), "__"])).replace(".csv", "".join(["_", col.replace("/", "_"), ".csv"]))
created_files.append(__loop_pred_file)
j_df = pd.DataFrame(j[ncol], columns = [c for c in train_cols if not 'image' == c], index = test.image_id.values)
j_df.index.rename('image_id', inplace = True)
for c in [c for c in train_cols if not 'image' == c]:
if NORMALIZE_LABELS:
vals = xform.UnNormalize_Labels(j_df[c].values)
j_df[c] = vals
j_df[c] = np.clip(j_df[c], 0.0, 96.0)
if blend_vals is None:
blend_vals = j_df.values
else:
blend_vals = np.mean((blend_vals, j_df.values), axis = 0)
# write the predictions file
#j_df.to_csv(__loop_pred_file, index = True)
#print("Predictions written to '%s'." % __loop_pred_file)
# now iterate over all the created files and create a blend
df_combined = pd.DataFrame(blend_vals, columns = [c for c in train_cols if not 'image' == c], index = test.image_id.values)
df_combined.index.rename('image_id', inplace = True)
df_combined.to_csv(predict_file, index = True)
print("\nBlended predictions written to '%s' (mean average of all %d Inception model predictions).\n\n" % (predict_file, len(created_files)))
elif model_name == "KERAS_NAIMISHNET":
df = {}
df['image_id'] = test.image_id.values
for c in [c for c in train_cols if not 'image' == c]:
df[c] = Y[(Y.image_id.isin(test.image_id.values) & (Y.feature_name == c))].location.values
df = | pd.DataFrame(df) | pandas.DataFrame |
# Imports
from sqlalchemy import String, Integer, Float, Boolean, Column, and_, ForeignKey
from connection import Connection
from datetime import datetime, time, date
import time
from pytz import timezone
import pandas as pd
import numpy as np
import os
from os import listdir
from os.path import isfile, join
from openpyxl import load_workbook
import openpyxl
# Import modules
from connection import Connection
from user import UserData
from test import TestData
from tables import User, Tests, TestsQuestions, Formative, FormativeQuestions
class ProcessEvaluationData(TestData):
"""
A class to handle process evaluation data. To initialize the class you need to specify the SQL connection String.
"""
def __init__(self, connection_string):
# First we create a session with the DB.
self.session = Connection.connection(connection_string)
self.data = pd.DataFrame()
# These are empty dataframes for transition proportions.
self.transitions_glb = pd.DataFrame()
self.transitions_eight_grade = pd.DataFrame()
self.transitions_nine_grade = pd.DataFrame()
self.transitions_ten_grade = pd.DataFrame()
self.transitions_eleven_grade = pd.DataFrame()
# General methods for process evaluation
def read_files(self, **kwargs):
"""
This function goes through the specified directories and reades files into a temporary dictionary called
temp_data. The data is read as dataframe and stored with a key as the name of the file (e.g., user_2019).
After reading in all the files the function changes the directory to the global one (the where it started from).
"""
self.temp_data = {}
for i in kwargs:
grade = i
year = kwargs[grade]
current_dir = os.getcwd()
if grade != 'user':
os.chdir(current_dir + '/Data/{}_grade/{}_grade_{}'.format(grade, grade, year))
else:
os.chdir(current_dir + '/Data/{}/{}_{}'.format(grade, grade, year))
for f in listdir(os.path.abspath(os.getcwd())):
if (f.split('.')[1] == 'csv') and (f.split('.')[0] != 'Comments'): # This part makes sure that xlsx files are excluded.
self.temp_data[str(f.split('.')[0])] = pd.read_csv(str(os.path.abspath(f)),
encoding = 'utf-8', engine='c')
os.chdir(current_dir)
def transitions_global(self, trial = None):
"""
The method is designed for calculating transition statistics. The function has two modes: Trial = True/False.
When the trial is True then the it takes searches for directories with the trial date (2019). Otherwise the function
takes the past year (current year - 1).
"""
def tranistions_grouped(group_by):
"""
The group_by argument needs to be specified to show on which variable/s the aggrigation shall be implemented on.
"""
if trial:
year = 2019
else:
now = datetime.now()
year = now.year - 1
self.year = year
self.global_step_one = {} # Step 1: Registration
self.global_step_two = {} # Step 2: Pre-test
self.global_step_three = {} # Step 3: Post-test
self.read_files(**{'user' : self.year,
'eight' : self.year,
'nine' : self.year,
'ten' : self.year,
'eleven' : self.year})
"""
After reading in the files the method creates dictionaries for each step (3 dictionaries in total).
"""
for i in self.temp_data.keys():
if 'post' in i: # assembles the post-test data
self.global_step_three[i]= pd.DataFrame(self.temp_data[i].drop_duplicates(['user_id'])\
.groupby([i for i in group_by])['user_id'].count())
self.global_step_two[i] = pd.DataFrame(self.temp_data[i].drop_duplicates(['user_id'])\
.groupby([i for i in group_by])['user_id'].count())
elif 'dropouts' in i: # adds droupouts data to step two.
self.global_step_two[i] = pd.DataFrame(self.temp_data[i].drop_duplicates(['user_id'])\
.groupby([i for i in group_by])['user_id'].count())
elif 'user' in i: # add user data to step one.
self.global_step_one[i] = pd.DataFrame(self.temp_data[i]\
.groupby([i for i in group_by])['user_id'].count())
df1 = pd.concat(self.global_step_three.values(), axis = 1)
df1 = pd.DataFrame(df1.sum(axis=1, skipna=True))
df1.rename(columns={ 0 : 'Step_Three'}, inplace = True)
df2 = pd.concat(self.global_step_two.values(), axis = 1, sort=True)
df2 = pd.DataFrame(df2.sum(axis=1, skipna=True))
df2.rename(columns={ 0 : 'Step_Two'}, inplace = True)
df3 = pd.concat(self.global_step_one.values(), axis = 1)
df3 = pd.DataFrame(df3.sum(axis=1, skipna=True))
df3.rename(columns={ 0 : 'Step_One'}, inplace = True)
transitions = pd.concat([df3, df2, df1], axis = 1, sort=True)
transitions = transitions.T.assign(Total = lambda x: x.sum(1)).T
pc_change = transitions.pct_change(axis = 'columns').round(2) # Calculates percentage change between the steps and rounds to the second digit.
pc_change.rename(columns = {'Step_One' : 'Step_One',
'Step_Two' : 'Step_Two',
'Step_Three' : 'Step_Three',
'Step_One' : 'Step_One_change',
'Step_Two' : 'Step_Two_change',
'Step_Three' : 'Step_Three_change'}, inplace = True)
transitions_pc = pd.concat([transitions, pc_change], axis = 1)
transitions_pc.drop('Step_One_change', axis = 1, inplace = True)
return transitions_pc
def transition_time(group_by):
pre_date = {}
for i in self.temp_data.keys():
if 'pre' in i:
pre_date.update({ i : self.temp_data[i][['user_id', 'user_grade', 'user_created_at', 'region_name', 'user_sex', 'pre_tests_res_date']]\
.drop_duplicates(subset = 'pre_tests_res_date', keep="last") })
post_date = {}
for i in self.temp_data.keys():
if 'post' in i:
post_date.update({i : self.temp_data[i][['user_id', 'user_grade', 'user_created_at', 'region_name', 'user_sex', 'pre_tests_res_date']]\
.drop_duplicates(subset = 'pre_tests_res_date', keep="last")})
d1 = | pd.concat(pre_date, ignore_index=True) | pandas.concat |
# -*- coding: utf-8 -*-
"""
Created on Wed Sep 25 16:14:12 2019
@author: <NAME>
"""
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
#import graphviz
import os
import seaborn as sns
from scipy.stats import chi2_contingency
os.chdir("E:\PYTHON NOTES\projects\cab fare prediction")
dataset_train=pd.read_csv("train_cab.csv")
dataset_test=pd.read_csv("test.csv")
dataset_train.describe()
# dimension of data
# dimension of data
dataset_train.shape
# Number of rows
dataset_train.shape[0]
# number of columns
dataset_train.shape[1]
# name of columns
list(dataset_train)
# data detailat
dataset_train.info()
dataset_train.isnull().sum()
dataset_test.isnull().sum()
sns.heatmap(dataset_train.isnull(),yticklabels=False,cbar=False, cmap='coolwarm')
#datetime change into reqired format
data=[dataset_train,dataset_test]
for i in data:
i["pickup_datetime"]=pd.to_datetime(i["pickup_datetime"],errors="coerce")
dataset_train.info()
dataset_test.info()
dataset_train.isnull().sum()
dataset_test.isna().sum()
dataset_train=dataset_train.dropna(subset=["pickup_datetime"],how="all")
dataset_train["fare_amount"]=dataset_train["fare_amount"].astype(float)
np.where(dataset_train["fare_amount"]=="430-")
dataset_train["fare_amount"].loc[1123]=430
dataset_train["fare_amount"]=dataset_train["fare_amount"].astype(float)
#we will convery passanger count in to catogorical varibale ,cause passangor caount is not contineous varibale
dataset_obj=["passenger_count"]
dataset_int=["fare_amount","pickup_longitude","pickup_latitude","dropoff_longitude","dropoff_latitude"]
# data visulization
import seaborn as sns
import matplotlib.pyplot as plt
#$stting up the sns for plots
sns.set(style="darkgrid",palette="Set1")
#some histogram plot from seaborn lib
plt.figure(figsize=(20,20))
plt.subplot(321)
_=sns.distplot(dataset_train["fare_amount"],bins=50)
plt.subplot(322)
_=sns.distplot(dataset_train["pickup_longitude"],bins=50)
plt.subplot(323)
_=sns.distplot(dataset_train["pickup_latitude"],bins=50)
plt.subplot(324)
_ = sns.distplot(dataset_train['dropoff_longitude'],bins=50)
plt.subplot(325)
_ = sns.distplot(dataset_train['dropoff_latitude'],bins=50)
plt.show()
plt.savefig('hist.png')
import scipy.stats as stats
#Some Bee Swarmplots
# plt.title('Cab Fare w.r.t passenger_count')
plt.figure(figsize=(25,25))
#_=sns.swarmplot(x="passenger_count",y="fare_amount",data=dataset_train)
#Jointplots for Bivariate Analysis.
#Here Scatter plot has regression line between 2 variables along with separate Bar plots of both variables.
#Also its annotated with pearson correlation coefficient and p value.
_=sns.jointplot(x="fare_amount",y="pickup_longitude",data=dataset_train,kind="reg")
_.annotate(stats.pearsonr)
#plt.savefig("jointfplo.png")
plt.show()
_=sns.jointplot(x="fare_amount",y="pickup_latitude",data=dataset_train,kind="reg")
_.annotate(stats.pearsonr)
_=sns.jointplot(x="fare_amount",y="dropoff_longitude",data=dataset_train,kind="reg")
_.annotate(stats.pearsonr)
_=sns.jointplot(x="fare_amount",y="dropoff_latitude",data=dataset_train,kind="reg")
_.annotate(stats.pearsonr)
# some violineplots to see spread d variable
plt.figure(figsize=(20,20))
plt.subplot(321)
_=sns.violinplot(y="fare_amount",data=dataset_train)
plt.subplot(322)
_=sns.violinplot(y="pickup_longitude",data=dataset_train)
plt.subplot(323)
_ = sns.violinplot(y='pickup_latitude',data=dataset_train)
plt.subplot(324)
_ = sns.violinplot(y='dropoff_longitude',data=dataset_train)
plt.subplot(325)
_ = sns.violinplot(y='dropoff_latitude',data=dataset_train)
plt.savefig("violine.png")
plt.show()
#pairplot for all numeric varibale
_=sns.pairplot(dataset_train.loc[:,dataset_int],kind="scatter",dropna=True)
_.fig.suptitle("pairwise plot all numeric varibale")
#plt.savefig("pairwise.png")
plt.show()
#removing values which are not within the desired range outlier depanding upon basic understanding of dataset
#1.Fare amount has a negative value, which doesn't make sense. A price amount cannot be -ve and also cannot be 0. So we will remove these fields.
sum(dataset_train["fare_amount"]<1)
dataset_train[dataset_train["fare_amount"]<1]
dataset_train=dataset_train.drop(dataset_train[dataset_train["fare_amount"]<1].index,axis=0)
#dataset_train.loc[dataset_train["fare_amount"]<1,"fare_amount"]=np.nan
#2. passanger count varibale /// passanger count cound not increse more than 6
sum(dataset_train["passenger_count"]>6)
for i in range (4,11):
print("passanger_count_above"+ str(i)+ "={}".format(sum(dataset_train["passenger_count"]>i)))
# so 20 observations of passenger_count is consistenly above from 6,7,8,9,10 passenger_counts, let's check them.
dataset_train[dataset_train["passenger_count"]>6]
#Also we need to see if there are any passenger_count<1
dataset_train[dataset_train["passenger_count"]<1]
len(dataset_train[dataset_train["passenger_count"]<1])
dataset_test["passenger_count"].unique()
# We will remove 20 observation which are above 6 value because a cab cannot hold these number of passengers.
dataset_train=dataset_train.drop(dataset_train[dataset_train["passenger_count"]<1].index,axis=0)
dataset_train=dataset_train.drop(dataset_train[dataset_train["passenger_count"]>6].index,axis=0)
#dataset_train.loc[dataset_train["passenger_count"]<1,"passenger_count"]=np.nan
#dataset_train.loc[dataset_train["passenger_count"]>6,"passenger_count"]=np.nan
sum(dataset_train["passenger_count"]<1)
#3.Latitudes range from -90 to 90.Longitudes range from -180 to 180. Removing which does not satisfy these ranges
print("pickup_longitude above 180 ={}".format(sum(dataset_train["pickup_longitude"]>180)))
print("pickup_longitude above -180 = {}".format(sum(dataset_train["pickup_longitude"]<-180)))
print("pickup_latitude above 90 ={}".format(sum(dataset_train["pickup_latitude"]>90)))
print("pickup_latitude above -90 ={}".format(sum(dataset_train["pickup_latitude"]<-90)))
print('dropoff_longitude above 180={}'.format(sum(dataset_train['dropoff_longitude']>180)))
print('dropoff_longitude below -180={}'.format(sum(dataset_train['dropoff_longitude']<-180)))
print('dropoff_latitude below -90={}'.format(sum(dataset_train['dropoff_latitude']<-90)))
print('dropoff_latitude above 90={}'.format(sum(dataset_train['dropoff_latitude']>90)))
#for test data
print("pickup_longitude above 180 ={}".format(sum(dataset_test["pickup_longitude"]>180)))
print("pickup_longitude above -180 = {}".format(sum(dataset_test["pickup_longitude"]<-180)))
print("pickup_latitude above 90 ={}".format(sum(dataset_test["pickup_latitude"]>90)))
print("pickup_latitude above -90 ={}".format(sum(dataset_test["pickup_latitude"]<-90)))
print('dropoff_longitude above 180={}'.format(sum(dataset_test['dropoff_longitude']>180)))
print('dropoff_longitude below -180={}'.format(sum(dataset_test['dropoff_longitude']<-180)))
print('dropoff_latitude below -90={}'.format(sum(dataset_test['dropoff_latitude']<-90)))
print('dropoff_latitude above 90={}'.format(sum(dataset_test['dropoff_latitude']>90)))
#There's only one outlier which is in variable pickup_latitude.So we will remove it with nan.
#Also we will see if there are any values equal to 0.
for i in ["pickup_longitude","pickup_latitude","dropoff_longitude","dropoff_latitude"]:
print(i,"equal to 0={}".format(sum(dataset_train[i]==0)))
#for test data
for i in ["pickup_longitude","pickup_latitude","dropoff_longitude","dropoff_latitude"]:
print(i,"equal to 0={}".format(sum(dataset_test[i]==0)))
#there are values which are equal to 0. we will remove them.
# There's only one outlier which is in variable pickup_latitude.So we will remove it with nan
dataset_train=dataset_train.drop(dataset_train[dataset_train["pickup_latitude"]>90].index,axis=0)
#there are values which are equal to 0. we will remove them.
for i in ["pickup_longitude","pickup_latitude","dropoff_longitude","dropoff_latitude"]:
dataset_train=dataset_train.drop(dataset_train[dataset_train[i]==0].index,axis=0)
# for i in ['pickup_longitude','pickup_latitude','dropoff_longitude','dropoff_latitude']:
# train.loc[train[i]==0,i] = np.nan
# train.loc[train['pickup_latitude']>90,'pickup_latitude'] = np.nan
dataset_train.shape
#Missing Value Analysis
missing_value=dataset_train.isnull().sum()
missing_value = missing_value.reset_index()
missing_value = missing_value.rename(columns = {'index': 'Variables', 0: 'Missing_percentage'})
missing_value
#find out percentage of null value
missing_value['Missing_percentage'] = (missing_value['Missing_percentage']/len(dataset_train))*100
missing_value = missing_value.sort_values('Missing_percentage', ascending = False).reset_index(drop = True)
dataset_train.info()
dataset_train["fare_amount"]=dataset_train["fare_amount"].fillna(dataset_train["fare_amount"].median())
dataset_train["passenger_count"]=dataset_train["passenger_count"].fillna(dataset_train["passenger_count"].mode()[0])
dataset_train.isnull().sum()
dataset_train["passenger_count"]=dataset_train["passenger_count"].round().astype(object)
dataset_train["passenger_count"].unique()
#outliers analysis by box plot
plt.figure(figsize=(20,5))
plt.xlim(0,100)
sns.boxplot(x=dataset_train["fare_amount"],data=dataset_train,orient="h")
# sum(dataset_train['fare_amount']<22.5)/len(dataset_train['fare_amount'])*100
#Bivariate Boxplots: Boxplot for Numerical Variable Vs Categorical Variable.
plt.figure(figsize=(20,10))
plt.xlim(0,100)
_=sns.boxplot(x=dataset_train["fare_amount"],y=dataset_train["passenger_count"],data=dataset_train,orient="h")
def outlier_detect(df):
for i in df.describe().columns:
q1=df.describe().at["25%",i]
q3=df.describe().at["75%",i]
IQR=(q3-q1)
ltv=(q1-1.5*IQR)
utv=(q3+1.5*IQR)
x=np.array(df[i])
p=[]
for j in x:
if j<ltv:
p.append(ltv)
elif j>utv:
p.append(utv)
else:
p.append(j)
df[i]=p
return (df)
dataset_int1=outlier_detect(dataset_train.loc[:,dataset_int])
dataset_test_obj=["passenger_count"]
dataset_test_int=["pickup_longitude","pickup_latitude","dropoff_longitude","dropoff_latitude"]
dataset_test1=outlier_detect(dataset_test.loc[:,dataset_test_int])
dataset_test1=pd.concat([dataset_test1,dataset_test["passenger_count"]],axis=1)
dataset_test=pd.concat([dataset_test1,dataset_test["pickup_datetime"]],axis=1)
#determine corr
corr=dataset_int1.corr()
f,ax=plt.subplots(figsize=(7,5))
sns.heatmap(corr,mask=np.zeros_like(corr,dtype=np.bool),cmap=sns.diverging_palette(220,10,as_cmap=True),square=True,ax=ax)
# """feature engineering"""
#1.we will derive new features from pickup_datetime variable
#new features will be year,month,day_of_week,hour
dataset_train1=pd.concat([dataset_int1,dataset_train["passenger_count"]],axis=1)
dataset_train2=pd.concat([dataset_train1,dataset_train["pickup_datetime"]],axis=1)
#dataset_train2.isna().sum()
data=[dataset_train2,dataset_test]
for i in data:
i["year"]=i["pickup_datetime"].apply(lambda row:row.year)
i["month"]=i["pickup_datetime"].apply(lambda row:row.month)
i["day_of_week"] = i["pickup_datetime"].apply(lambda row: row.dayofweek)
i["hour"] = i["pickup_datetime"].apply(lambda row: row.hour)
# train2_nodummies=dataset_train2.copy()
# dataset_train2=train2_nodummies.copy()
plt.figure(figsize=(20,10))
sns.countplot(dataset_train2["year"])
# plt.savefig('year.png')
plt.figure(figsize=(20,10))
sns.countplot(dataset_train2['month'])
# plt.savefig('month.png')
plt.figure(figsize=(20,10))
sns.countplot(dataset_train2['day_of_week'])
# plt.savefig('day_of_week.png')
plt.figure(figsize=(20,10))
sns.countplot(dataset_train2['hour'])
# plt.savefig('hour.png')
plt.show
#Now we will use month,day_of_week,hour to derive new features like sessions in a day,seasons in a year,week:weekend/weekday
# for sessions in a day using hour columns
def f(x):
if(x>=5) and (x<=11):
return "morning"
elif (x>=12) and (x<=16):
return "afternoon"
elif (x>=17) and (x<=20):
return "evening"
elif (x>=21) and (x<=23):
return "night pm"
elif (x>=0) and (x<=4):
return "night am"
dataset_train2["sessions"]=dataset_train2["hour"].apply(f)
dataset_test['session'] = dataset_test['hour'].apply(f)
#for seasons in a year using month column
def g(x):
if (x>=3) and (x<=5):
return "spring"
elif (x>=6) and (x<=8):
return "summer"
elif (x>=9) and (x<=11):
return "fall"
else :
return "winter"
dataset_train2['seasons'] = dataset_train2['month'].apply(g)
dataset_test['seasons'] = dataset_test['month'].apply(g)
#for week / weekend in a day of week columns
def h(x):
if (x>=0) and (x<=4):
return "weekday"
elif (x>=5) and (x<=6):
return "weekend"
dataset_train2['week'] = dataset_train2['day_of_week'].apply(h)
dataset_test['week'] = dataset_test['day_of_week'].apply(h)
dataset_train2['passenger_count'].describe()
dataset_train2.isnull().sum()
dataset_test.isna().sum()
#creating dummy varibale
temp=pd.get_dummies(dataset_train2["passenger_count"],prefix="passenger_count")
dataset_train2=dataset_train2.join(temp)
temp = pd.get_dummies(dataset_test['passenger_count'], prefix = 'passenger_count')
dataset_test = dataset_test.join(temp)
temp = pd.get_dummies(dataset_test['seasons'], prefix = 'seasons')
dataset_test = dataset_test.join(temp)
temp=pd.get_dummies(dataset_train2["seasons"],prefix = "season" )
dataset_train2=pd.concat([dataset_train2,temp],axis=1)
temp = pd.get_dummies(dataset_train2['week'], prefix = 'week')
dataset_train2=pd.concat([dataset_train2,temp],axis=1)
temp = pd.get_dummies(dataset_test['week'], prefix = 'week')
dataset_test = dataset_test.join(temp)
temp = pd.get_dummies(dataset_train2['sessions'], prefix = 'sessions')
dataset_train2= | pd.concat([dataset_train2,temp],axis=1) | pandas.concat |
import pandas as pd
import numpy as np
df =pd.read_csv('movies_metadata.csv',low_memory=False)
data={ 'id': df['id'],
'title':df['title'],
'overview':df['overview'],
'poster_path':df['poster_path']
}
mdbEnd= | pd.DataFrame(data) | pandas.DataFrame |
# -*- coding:utf-8 -*-
# !/usr/bin/env python
"""
Date: 2022/1/12 14:55
Desc: 东方财富网-数据中心-股东分析
https://data.eastmoney.com/gdfx/
"""
import pandas as pd
import requests
from tqdm import tqdm
def stock_gdfx_free_holding_statistics_em(date: str = "20210930") -> pd.DataFrame:
"""
东方财富网-数据中心-股东分析-股东持股统计-十大流通股东
https://data.eastmoney.com/gdfx/HoldingAnalyse.html
:param date: 报告期
:type date: str
:return: 十大流通股东
:rtype: pandas.DataFrame
"""
url = "https://datacenter-web.eastmoney.com/api/data/v1/get"
params = {
"sortColumns": "STATISTICS_TIMES,COOPERATION_HOLDER_MARK",
"sortTypes": "-1,-1",
"pageSize": "500",
"pageNumber": "1",
"reportName": "RPT_COOPFREEHOLDERS_ANALYSISNEW",
"columns": "ALL",
"source": "WEB",
"client": "WEB",
"filter": f"""(HOLDNUM_CHANGE_TYPE="001")(END_DATE='{'-'.join([date[:4], date[4:6], date[6:]])}')""",
}
r = requests.get(url, params=params)
data_json = r.json()
total_page = data_json["result"]["pages"]
big_df = pd.DataFrame()
for page in tqdm(range(1, total_page + 1)):
params.update({"pageNumber": page})
r = requests.get(url, params=params)
data_json = r.json()
temp_df = pd.DataFrame(data_json["result"]["data"])
big_df = big_df.append(temp_df, ignore_index=True)
big_df.reset_index(inplace=True)
big_df["index"] = big_df.index + 1
big_df.columns = [
"序号",
"-",
"-",
"股东名称",
"股东类型",
"-",
"统计次数",
"公告日后涨幅统计-10个交易日-平均涨幅",
"公告日后涨幅统计-10个交易日-最大涨幅",
"公告日后涨幅统计-10个交易日-最小涨幅",
"公告日后涨幅统计-30个交易日-平均涨幅",
"公告日后涨幅统计-30个交易日-最大涨幅",
"公告日后涨幅统计-30个交易日-最小涨幅",
"公告日后涨幅统计-60个交易日-平均涨幅",
"公告日后涨幅统计-60个交易日-最大涨幅",
"公告日后涨幅统计-60个交易日-最小涨幅",
"持有个股",
]
big_df = big_df[
[
"序号",
"股东名称",
"股东类型",
"统计次数",
"公告日后涨幅统计-10个交易日-平均涨幅",
"公告日后涨幅统计-10个交易日-最大涨幅",
"公告日后涨幅统计-10个交易日-最小涨幅",
"公告日后涨幅统计-30个交易日-平均涨幅",
"公告日后涨幅统计-30个交易日-最大涨幅",
"公告日后涨幅统计-30个交易日-最小涨幅",
"公告日后涨幅统计-60个交易日-平均涨幅",
"公告日后涨幅统计-60个交易日-最大涨幅",
"公告日后涨幅统计-60个交易日-最小涨幅",
"持有个股",
]
]
big_df["统计次数"] = pd.to_numeric(big_df["统计次数"])
big_df["公告日后涨幅统计-10个交易日-平均涨幅"] = pd.to_numeric(big_df["公告日后涨幅统计-10个交易日-平均涨幅"])
big_df["公告日后涨幅统计-10个交易日-最大涨幅"] = pd.to_numeric(big_df["公告日后涨幅统计-10个交易日-最大涨幅"])
big_df["公告日后涨幅统计-10个交易日-最小涨幅"] = pd.to_numeric(big_df["公告日后涨幅统计-10个交易日-最小涨幅"])
big_df["公告日后涨幅统计-30个交易日-平均涨幅"] = pd.to_numeric(big_df["公告日后涨幅统计-30个交易日-平均涨幅"])
big_df["公告日后涨幅统计-30个交易日-最大涨幅"] = pd.to_numeric(big_df["公告日后涨幅统计-30个交易日-最大涨幅"])
big_df["公告日后涨幅统计-30个交易日-最小涨幅"] = pd.to_numeric(big_df["公告日后涨幅统计-30个交易日-最小涨幅"])
big_df["公告日后涨幅统计-60个交易日-平均涨幅"] = pd.to_numeric(big_df["公告日后涨幅统计-60个交易日-平均涨幅"])
big_df["公告日后涨幅统计-60个交易日-最大涨幅"] = pd.to_numeric(big_df["公告日后涨幅统计-60个交易日-最大涨幅"])
big_df["公告日后涨幅统计-60个交易日-最小涨幅"] = pd.to_numeric(big_df["公告日后涨幅统计-60个交易日-最小涨幅"])
return big_df
def stock_gdfx_holding_statistics_em(date: str = "20210930") -> pd.DataFrame:
"""
东方财富网-数据中心-股东分析-股东持股统计-十大股东
https://data.eastmoney.com/gdfx/HoldingAnalyse.html
:param date: 报告期
:type date: str
:return: 十大股东
:rtype: pandas.DataFrame
"""
url = "https://datacenter-web.eastmoney.com/api/data/v1/get"
params = {
"sortColumns": "STATISTICS_TIMES,COOPERATION_HOLDER_MARK",
"sortTypes": "-1,-1",
"pageSize": "500",
"pageNumber": "1",
"reportName": "RPT_COOPHOLDERS_ANALYSIS",
"columns": "ALL",
"source": "WEB",
"client": "WEB",
"filter": f"""(HOLDNUM_CHANGE_TYPE="001")(END_DATE='{'-'.join([date[:4], date[4:6], date[6:]])}')""",
}
r = requests.get(url, params=params)
data_json = r.json()
total_page = data_json["result"]["pages"]
big_df = pd.DataFrame()
for page in tqdm(range(1, total_page + 1)):
params.update({"pageNumber": page})
r = requests.get(url, params=params)
data_json = r.json()
temp_df = pd.DataFrame(data_json["result"]["data"])
big_df = big_df.append(temp_df, ignore_index=True)
big_df.reset_index(inplace=True)
big_df["index"] = big_df.index + 1
big_df.columns = [
"序号",
"-",
"-",
"股东名称",
"股东类型",
"-",
"统计次数",
"公告日后涨幅统计-10个交易日-平均涨幅",
"公告日后涨幅统计-10个交易日-最大涨幅",
"公告日后涨幅统计-10个交易日-最小涨幅",
"公告日后涨幅统计-30个交易日-平均涨幅",
"公告日后涨幅统计-30个交易日-最大涨幅",
"公告日后涨幅统计-30个交易日-最小涨幅",
"公告日后涨幅统计-60个交易日-平均涨幅",
"公告日后涨幅统计-60个交易日-最大涨幅",
"公告日后涨幅统计-60个交易日-最小涨幅",
"持有个股",
]
big_df = big_df[
[
"序号",
"股东名称",
"股东类型",
"统计次数",
"公告日后涨幅统计-10个交易日-平均涨幅",
"公告日后涨幅统计-10个交易日-最大涨幅",
"公告日后涨幅统计-10个交易日-最小涨幅",
"公告日后涨幅统计-30个交易日-平均涨幅",
"公告日后涨幅统计-30个交易日-最大涨幅",
"公告日后涨幅统计-30个交易日-最小涨幅",
"公告日后涨幅统计-60个交易日-平均涨幅",
"公告日后涨幅统计-60个交易日-最大涨幅",
"公告日后涨幅统计-60个交易日-最小涨幅",
"持有个股",
]
]
big_df["统计次数"] = pd.to_numeric(big_df["统计次数"])
big_df["公告日后涨幅统计-10个交易日-平均涨幅"] = pd.to_numeric(big_df["公告日后涨幅统计-10个交易日-平均涨幅"])
big_df["公告日后涨幅统计-10个交易日-最大涨幅"] = pd.to_numeric(big_df["公告日后涨幅统计-10个交易日-最大涨幅"])
big_df["公告日后涨幅统计-10个交易日-最小涨幅"] = pd.to_numeric(big_df["公告日后涨幅统计-10个交易日-最小涨幅"])
big_df["公告日后涨幅统计-30个交易日-平均涨幅"] = pd.to_numeric(big_df["公告日后涨幅统计-30个交易日-平均涨幅"])
big_df["公告日后涨幅统计-30个交易日-最大涨幅"] = pd.to_numeric(big_df["公告日后涨幅统计-30个交易日-最大涨幅"])
big_df["公告日后涨幅统计-30个交易日-最小涨幅"] = pd.to_numeric(big_df["公告日后涨幅统计-30个交易日-最小涨幅"])
big_df["公告日后涨幅统计-60个交易日-平均涨幅"] = pd.to_numeric(big_df["公告日后涨幅统计-60个交易日-平均涨幅"])
big_df["公告日后涨幅统计-60个交易日-最大涨幅"] = pd.to_numeric(big_df["公告日后涨幅统计-60个交易日-最大涨幅"])
big_df["公告日后涨幅统计-60个交易日-最小涨幅"] = pd.to_numeric(big_df["公告日后涨幅统计-60个交易日-最小涨幅"])
return big_df
def stock_gdfx_free_holding_change_em(date: str = "20210930") -> pd.DataFrame:
"""
东方财富网-数据中心-股东分析-股东持股变动统计-十大流通股东
https://data.eastmoney.com/gdfx/HoldingAnalyse.html
:param date: 报告期
:type date: str
:return: 十大流通股东
:rtype: pandas.DataFrame
"""
url = "https://datacenter-web.eastmoney.com/api/data/v1/get"
params = {
"sortColumns": "HOLDER_NUM,HOLDER_NEW",
"sortTypes": "-1,-1",
"pageSize": "500",
"pageNumber": "1",
"reportName": "RPT_FREEHOLDERS_BASIC_INFONEW",
"columns": "ALL",
"source": "WEB",
"client": "WEB",
"filter": f"(END_DATE='{'-'.join([date[:4], date[4:6], date[6:]])}')",
}
r = requests.get(url, params=params)
data_json = r.json()
total_page = data_json["result"]["pages"]
big_df = pd.DataFrame()
for page in tqdm(range(1, total_page + 1)):
params.update({"pageNumber": page})
r = requests.get(url, params=params)
data_json = r.json()
temp_df = pd.DataFrame(data_json["result"]["data"])
big_df = big_df.append(temp_df, ignore_index=True)
big_df.reset_index(inplace=True)
big_df["index"] = big_df.index + 1
big_df.columns = [
"序号",
"-",
"-",
"股东名称",
"-",
"股东类型",
"-",
"-",
"-",
"期末持股只数统计-总持有",
"期末持股只数统计-新进",
"期末持股只数统计-增加",
"期末持股只数统计-减少",
"期末持股只数统计-不变",
"-",
"流通市值统计",
"持有个股",
"-",
"-",
]
big_df = big_df[
[
"序号",
"股东名称",
"股东类型",
"期末持股只数统计-总持有",
"期末持股只数统计-新进",
"期末持股只数统计-增加",
"期末持股只数统计-不变",
"期末持股只数统计-减少",
"流通市值统计",
"持有个股",
]
]
big_df["期末持股只数统计-总持有"] = pd.to_numeric(big_df["期末持股只数统计-总持有"])
big_df["期末持股只数统计-新进"] = pd.to_numeric(big_df["期末持股只数统计-新进"])
big_df["期末持股只数统计-增加"] = pd.to_numeric(big_df["期末持股只数统计-增加"])
big_df["期末持股只数统计-不变"] = pd.to_numeric(big_df["期末持股只数统计-不变"])
big_df["期末持股只数统计-减少"] = pd.to_numeric(big_df["期末持股只数统计-减少"])
big_df["流通市值统计"] = pd.to_numeric(big_df["流通市值统计"])
return big_df
def stock_gdfx_holding_change_em(date: str = "20210930") -> pd.DataFrame:
"""
东方财富网-数据中心-股东分析-股东持股变动统计-十大股东
https://data.eastmoney.com/gdfx/HoldingAnalyse.html
:param date: 报告期
:type date: str
:return: 十大流通股东
:rtype: pandas.DataFrame
"""
url = "https://datacenter-web.eastmoney.com/api/data/v1/get"
params = {
"sortColumns": "HOLDER_NUM,HOLDER_NEW",
"sortTypes": "-1,-1",
"pageSize": "500",
"pageNumber": "1",
"reportName": "RPT_HOLDERS_BASIC_INFO",
"columns": "ALL",
"source": "WEB",
"client": "WEB",
"filter": f"(END_DATE='{'-'.join([date[:4], date[4:6], date[6:]])}')",
}
r = requests.get(url, params=params)
data_json = r.json()
total_page = data_json["result"]["pages"]
big_df = pd.DataFrame()
for page in tqdm(range(1, total_page + 1)):
params.update({"pageNumber": page})
r = requests.get(url, params=params)
data_json = r.json()
temp_df = pd.DataFrame(data_json["result"]["data"])
big_df = big_df.append(temp_df, ignore_index=True)
big_df.reset_index(inplace=True)
big_df["index"] = big_df.index + 1
big_df.columns = [
"序号",
"-",
"-",
"股东名称",
"-",
"股东类型",
"-",
"-",
"-",
"期末持股只数统计-总持有",
"期末持股只数统计-新进",
"期末持股只数统计-增加",
"期末持股只数统计-减少",
"期末持股只数统计-不变",
"-",
"-",
"持有个股",
"流通市值统计",
]
big_df = big_df[
[
"序号",
"股东名称",
"股东类型",
"期末持股只数统计-总持有",
"期末持股只数统计-新进",
"期末持股只数统计-增加",
"期末持股只数统计-不变",
"期末持股只数统计-减少",
"流通市值统计",
"持有个股",
]
]
big_df["期末持股只数统计-总持有"] = pd.to_numeric(big_df["期末持股只数统计-总持有"])
big_df["期末持股只数统计-新进"] = pd.to_numeric(big_df["期末持股只数统计-新进"])
big_df["期末持股只数统计-增加"] = pd.to_numeric(big_df["期末持股只数统计-增加"])
big_df["期末持股只数统计-不变"] = pd.to_numeric(big_df["期末持股只数统计-不变"])
big_df["期末持股只数统计-减少"] = pd.to_numeric(big_df["期末持股只数统计-减少"])
big_df["流通市值统计"] = pd.to_numeri | c(big_df["流通市值统计"]) | pandas.to_numeric |
# -*- coding: utf-8 -*-
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
import sklearn
import json
import datetime
import math
from random import randint
import sklearn
from sklearn.preprocessing import LabelEncoder, OneHotEncoder
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LinearRegression
from sklearn.preprocessing import PolynomialFeatures
from sklearn.model_selection import cross_val_score
from sklearn.tree import DecisionTreeRegressor
from sklearn.linear_model import LassoCV
from sklearn.ensemble import RandomForestRegressor
from sklearn.metrics import accuracy_score
import matplotlib.pyplot as plt
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import classification_report
"""## Importing data"""
matches = pd.read_csv("../Data/matches.csv")
matches.head()
attributes = pd.read_csv("../Data/attributes.csv")
attributes.head()
batsmen = open("../Data/batsmen.json",)
batsmen_data = json.load(batsmen)
bowlers = open("../Data/bowlers.json",)
bowlers_data = json.load(bowlers)
invtmap = open("../Data/invtmap.json",)
invtmap_data = json.load(invtmap)
scorecard = open("../Data/scorecard.json",)
scorecard_data = json.load(scorecard)
region = open("../Data/region.json",)
region_data = json.load(region)
tmap = open("../Data/tmap.json",)
tmap_data = json.load(tmap)
"""## Model 1
## Making the Database
"""
def get_matches(team_1, team_2, date, num_years=5):
matches_team = matches[matches["Team_1"] == team_1]
matches_team = matches_team[matches["Team_2"] == team_2]
matches_team1 = matches[matches["Team_1"] == team_2]
matches_team1 = matches_team1[matches["Team_2"] == team_1]
matches_team = pd.concat([matches_team, matches_team1], axis=0)
matches_team["Date"] = | pd.to_datetime(matches_team["Date"]) | pandas.to_datetime |
'''
This convert data from txt to csv
'''
import argparse
import csv
import pandas as pd
parser = argparse.ArgumentParser(
description="data name"
)
parser.add_argument(
"--data",
type=str,
help="choose dataset: spheres, mnist, fmnist, cifar10",
default="spheres",
)
args = parser.parse_args()
if __name__ == "__main__":
# lrs = [0.01, 0.02, 0.03, 0.04, 0.05, 0.06, 0.07, 0.08, 0.09, 0.1]
# perps = [5, 10, 15, 20, 25, 30, 35, 40, 45, 50]
# # change txt to csv
# for lr in lrs:
# for perp in perps:
# with open(f'{args.data}_result_{lr}_{perp}.txt', 'r') as in_file:
# stripped = (line.strip() for line in in_file)
# lines = (line.split("\t") for line in stripped if line)
# with open(f'{args.data}_{lr}_{perp}.csv', 'w') as out_file:
# writer = csv.writer(out_file)
# writer.writerow(('0', '1'))
# writer.writerows(lines)
# # add label
# for lr in lrs:
# for perp in perps:
# x = pd.read_csv(f'{args.data}_{lr}_{perp}.csv', float_precision='round_trip')
# df = pd.DataFrame(x)
# y = pd.read_csv(f'visualization/public/results/{args.data}/pca.csv', float_precision='round_trip')
# df2 = pd.DataFrame(y)
# df['label'] = df2['label']
# df.to_csv(f"atsne_{args.data}_{lr}_{perp}.csv", index=False)
samples = [1,2,5,10,20,30,50,60,80,100]
# change txt to csv
for sample in samples:
with open(f'atsne_{sample}.txt', 'r') as in_file:
stripped = (line.strip() for line in in_file)
lines = (line.split("\t") for line in stripped if line)
with open(f'atsne_tmp{sample}.csv', 'w') as out_file:
writer = csv.writer(out_file)
writer.writerow(('0', '1'))
writer.writerows(lines)
for sample in samples:
x = pd.read_csv(f'atsne_tmp{sample}.csv', float_precision='round_trip')
df = pd.DataFrame(x)
y = pd.read_csv(f'tsne_{sample}.csv', float_precision='round_trip')
df2 = | pd.DataFrame(y) | pandas.DataFrame |
import dash
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output, State
# import pandas_datareader.data as web
import plotly.graph_objs as go
from datetime import datetime
import pandas as pd
import numpy as np
import os
import flask
import psycopg2
from pathlib import Path
from dash import Dash
import time
from .layout import html_layout
import psycopg2
user = os.environ["POSTGRES_USER"]
host = os.environ["POSTGRES_HOSTNAME"]
password = os.environ["POSTGRES_PASSWORD"]
dbname = os.environ["POSTGRES_DBNAME"]
# Settings for psycopg Postgres connector
con = psycopg2.connect(database=dbname, user=user, password=password, host=host)
sql_query_2= "SELECT * FROM groupby_article_large_edits limit 50;"
df2 = pd.read_sql_query(sql_query_2, con)
dict_df2 = {}
for index, row in df2.iterrows():
dict_df2[row['id']]=row['entity_title']
def Add_Dash(server):
"""Create a Dash app."""
external_stylesheets = ['/static/dist/css/styles.css',
'https://fonts.googleapis.com/css?family=Lato',
'https://use.fontawesome.com/releases/v5.8.1/css/all.css']
external_scripts = ['/static/dist/js/includes/jquery.min.js',
'/static/dist/js/main.js']
dash_app = Dash(server=server,
external_stylesheets=external_stylesheets,
external_scripts=external_scripts,
routes_pathname_prefix='/timeseries/')
dash_app.index_string = html_layout
selected_dropdown_value = [41688778,52644751,61008894] # 13404: "Hong Kong",43971623: "Hong Kong Protests 2014",61008894: "Hong Kong Protests 2019",
dropdown = {41688778: " Brexit", 52644751 : "Efforts to impeach Donald Trump",61008894:"2019 Hong Kong protests"}
trace1 = []
trace2 = []
trace3 = []
for stock in selected_dropdown_value:
sql_query_2_3 = "SELECT * FROM timescale_ts22 where entity_id = "+str(stock)+";"
df = | pd.read_sql_query(sql_query_2_3, con) | pandas.read_sql_query |
import sdi_utils.gensolution as gs
import sdi_utils.set_logging as slog
import sdi_utils.textfield_parser as tfp
import pandas as pd
EXAMPLE_ROWS =5
try:
api
except NameError:
class api:
class Message:
def __init__(self,body = None,attributes = ""):
self.body = body
self.attributes = attributes
def send(port,msg) :
if isinstance(msg,api.Message) :
print('Port: ', port)
print('Attributes: ', msg.attributes)
print('Body: ', str(msg.body))
else :
print(str(msg))
return msg
def call(config,msg):
api.config = config
return process(msg)
def set_port_callback(port, callback) :
df = | pd.DataFrame({'icol': [1, 2, 3, 4, 5], 'col 2': [1, 2, 3, 4, 5], 'col3': [100, 200, 300, 400, 500]}) | pandas.DataFrame |
import pandas as pd
from sklearn import metrics
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.tree import DecisionTreeClassifier
from sklearn.cluster import KMeans
import matplotlib.pyplot as plt
import seaborn as sns
import streamlit as st
import os
# Get directory paths
root_dir = os.path.dirname(os.path.dirname(__file__))
data_dir = os.path.join(root_dir, "data/{}")
@st.cache
def load_data(directory):
# Read and process the data
dataset = pd.read_csv(directory.format('diabetes.csv'))
feature_cols = list(dataset.columns)
feature_cols.remove('Outcome')
x_data = dataset[feature_cols]
y_data = dataset['Outcome']
return x_data, y_data
# Displays the confusion matrix and metrics associated with the model
def display_prediction(model_name, target, prediction, labels):
# Sub-header
st.subheader(model_name)
# Plot the confusion matrix
confusion_matrix = metrics.confusion_matrix(target, prediction)
fig, ax = plt.subplots()
sns.heatmap(pd.DataFrame(confusion_matrix), annot=True, cmap="Blues_r", fmt='g')
ax.title.set_text('Confusion matrix')
ax.set_ylabel('Target label')
ax.set_xlabel('Predicted label')
ax.xaxis.set_ticklabels(labels)
ax.yaxis.set_ticklabels(labels)
st.pyplot(fig)
# Metrics
st.write('Accuracy: ', metrics.accuracy_score(target, prediction))
st.write('Precision: ', metrics.precision_score(target, prediction))
st.write('Recall: ', metrics.recall_score(target, prediction))
st.write('F1 Score: ', metrics.f1_score(target, prediction))
st.write('AUC: ', metrics.roc_auc_score(target, prediction))
def app():
# Load data
st.title('Diabetes Classification App')
x, y = load_data(data_dir)
labels = ['No Diabetes', 'Diabetes']
# Explore data
st.header('Explore data')
column = st.selectbox('Select a variable to explore', x.columns)
col1, col2 = st.columns(2)
with col1:
st.subheader(labels[0])
st.bar_chart(x[y == 0][column].value_counts())
with col2:
st.subheader(labels[1])
st.bar_chart(x[y == 1][column].value_counts())
# Train models
st.header('Model comparison')
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.25, random_state=0)
col1, col2, col3, col4 = st.columns(4)
with col1:
k_means_model = KMeans(n_clusters=2)
k_means_model.fit(x_train)
k_means_prediction = k_means_model.predict(x_test)
display_prediction('K-Means', y_test, k_means_prediction, labels)
with col2:
random_forest_model = RandomForestClassifier()
random_forest_model.fit(x_train, y_train)
random_forest_prediction = random_forest_model.predict(x_test)
display_prediction('Random Forest', y_test, random_forest_prediction, labels)
with col3:
logistic_regression_model = LogisticRegression(max_iter=1000)
logistic_regression_model.fit(x_train, y_train)
logistic_regression_prediction = logistic_regression_model.predict(x_test)
display_prediction('L. Regression', y_test, logistic_regression_prediction, labels)
with col4:
decision_tree_model = DecisionTreeClassifier()
decision_tree_model.fit(x_train, y_train)
decision_tree_prediction = decision_tree_model.predict(x_test)
display_prediction('Decision Tree', y_test, decision_tree_prediction, labels)
# Predictions
st.header('Prediction')
col1, col2 = st.columns(2)
with col1:
st.subheader('User data')
user_data = {}
for i in x.columns:
user_data[i] = st.number_input(i, value=x[i].mean())
with col2:
st.subheader('Predictions')
user_dataframe = | pd.DataFrame(user_data, index=[0]) | pandas.DataFrame |
#!/usr/bin/python
# -*- coding: utf-8 -*-
import numpy as np
import pandas as pd
def EMA(DF, N):
return pd.Series.ewm(DF, span=N, min_periods=N - 1, adjust=True).mean()
def MA(DF, N):
return pd.Series.rolling(DF, N).mean()
def SMA(DF, N, M):
DF = DF.fillna(0)
z = len(DF)
var = np.zeros(z)
var[0] = DF[0]
for i in range(1, z):
var[i] = (DF[i] * M + var[i - 1] * (N - M)) / N
for i in range(z):
DF[i] = var[i]
return DF
def ATR(DF, N):
C = DF['close']
H = DF['high']
L = DF['low']
TR1 = MAX(MAX((H - L), ABS(REF(C, 1) - H)), ABS(REF(C, 1) - L))
atr = MA(TR1, N)
return atr
def HHV(DF, N):
return pd.Series.rolling(DF, N).max()
def LLV(DF, N):
return pd.Series.rolling(DF, N).min()
def SUM(DF, N):
return pd.Series.rolling(DF, N).sum()
def ABS(DF):
return abs(DF)
def MAX(A, B):
var = IF(A > B, A, B)
return var
def MIN(A, B):
var = IF(A < B, A, B)
return var
def IF(COND, V1, V2):
var = np.where(COND, V1, V2)
for i in range(len(var)):
V1[i] = var[i]
return V1
def REF(DF, N):
var = DF.diff(N)
var = DF - var
return var
def STD(DF, N):
return pd.Series.rolling(DF, N).std()
def MACD(DF, FAST, SLOW, MID):
EMAFAST = EMA(DF, FAST)
EMASLOW = EMA(DF, SLOW)
DIFF = EMAFAST - EMASLOW
DEA = EMA(DIFF, MID)
MACD = (DIFF - DEA) * 2
DICT = {'DIFF': DIFF, 'DEA': DEA, 'MACD': MACD}
VAR = pd.DataFrame(DICT)
return VAR
def KDJ(DF, N, M1, M2):
C = DF['close']
H = DF['high']
L = DF['low']
RSV = (C - LLV(L, N)) / (HHV(H, N) - LLV(L, N)) * 100
K = SMA(RSV, M1, 1)
D = SMA(K, M2, 1)
J = 3 * K - 2 * D
DICT = {'KDJ_K': K, 'KDJ_D': D, 'KDJ_J': J}
VAR = pd.DataFrame(DICT)
return VAR
def OSC(DF, N, M): # 变动速率线
C = DF['close']
OS = (C - MA(C, N)) * 100
MAOSC = EMA(OS, M)
DICT = {'OSC': OS, 'MAOSC': MAOSC}
VAR = pd.DataFrame(DICT)
return VAR
def BBI(DF, N1, N2, N3, N4): # 多空指标
C = DF['close']
bbi = (MA(C, N1) + MA(C, N2) + MA(C, N3) + MA(C, N4)) / 4
DICT = {'BBI': bbi}
VAR = pd.DataFrame(DICT)
return VAR
def BBIBOLL(DF, N1, N2, N3, N4, N, M): # 多空布林线
bbiboll = BBI(DF, N1, N2, N3, N4)
UPER = bbiboll + M * STD(bbiboll, N)
DOWN = bbiboll - M * STD(bbiboll, N)
DICT = {'BBIBOLL': bbiboll, 'UPER': UPER, 'DOWN': DOWN}
VAR = pd.DataFrame(DICT)
return VAR
def PBX(DF, N1, N2, N3, N4, N5, N6): # 瀑布线
C = DF['close']
PBX1 = (EMA(C, N1) + EMA(C, 2 * N1) + EMA(C, 4 * N1)) / 3
PBX2 = (EMA(C, N2) + EMA(C, 2 * N2) + EMA(C, 4 * N2)) / 3
PBX3 = (EMA(C, N3) + EMA(C, 2 * N3) + EMA(C, 4 * N3)) / 3
PBX4 = (EMA(C, N4) + EMA(C, 2 * N4) + EMA(C, 4 * N4)) / 3
PBX5 = (EMA(C, N5) + EMA(C, 2 * N5) + EMA(C, 4 * N5)) / 3
PBX6 = (EMA(C, N6) + EMA(C, 2 * N6) + EMA(C, 4 * N6)) / 3
DICT = {'PBX1': PBX1, 'PBX2': PBX2, 'PBX3': PBX3,
'PBX4': PBX4, 'PBX5': PBX5, 'PBX6': PBX6}
VAR = pd.DataFrame(DICT)
return VAR
def BOLL(DF, N): # 布林线
C = DF['close']
boll = MA(C, N)
UB = boll + 2 * STD(C, N)
LB = boll - 2 * STD(C, N)
DICT = {'BOLL': boll, 'UB': UB, 'LB': LB}
VAR = pd.DataFrame(DICT)
return VAR
def ROC(DF, N, M): # 变动率指标
C = DF['close']
roc = 100 * (C - REF(C, N)) / REF(C, N)
MAROC = MA(roc, M)
DICT = {'ROC': roc, 'MAROC': MAROC}
VAR = | pd.DataFrame(DICT) | pandas.DataFrame |
# Imports
import streamlit as st
import streamlit.components.v1 as components
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
import time
import os.path
# ML dependency imports
from sklearn.preprocessing import StandardScaler, MinMaxScaler
from sklearn.decomposition import PCA
from sklearn.manifold import TSNE
from sklearn.cluster import KMeans
from sklearn.ensemble import RandomForestClassifier
from sklearn.datasets import make_regression
from sklearn.linear_model import LinearRegression, Lasso
from sklearn.model_selection import train_test_split
from streamlit.type_util import Key
# Page Settings
st.set_page_config(page_title="California Wildfire ML", page_icon="./img/fav.png", initial_sidebar_state="collapsed")
#"""
#--------------------------
#---- MACHINE LEARNING ----
#--------------------------
#"""
def main():
print("IN MAIN")
# If data has not been cleaned, then clean it
if os.path.isfile("./data/clean/fire_data_clean.csv") == False:
print("CLEANING FIRE")
clean_fire()
if os.path.isfile("./data/clean/drought_data_clean.csv") == False:
print("CLEANING DROUGHT")
clean_drought()
if os.path.isfile("./data/clean/precip_data_clean.csv") == False:
print("CLEANING RAIN")
clean_percip()
# # Init sidebar with header text
# st.sidebar.header("Menu")
# # Add URL for github repository
# st.sidebar.write("[View on GitHub](https://github.com/josephchancey/ca-wildfire-ml)")
def old_fire_dataset():
unclean_fire = pd.read_csv("./data/fire_data.csv")
return unclean_fire
def old_precip_dataset():
unclean_precip = pd.read_csv("./data/precip_data.csv")
return unclean_precip
def old_drought_dataset():
unclean_drought = pd.read_csv("./data/drought_data.csv")
return unclean_drought
def clean_fire():
if os.path.isfile("./data/clean/fire_data_clean.csv") == False:
# import fire data csv
fireFile = "./data/fire_data.csv"
# read the file and store in a data frame
fireData = pd.read_csv(fireFile)
# remove extraneous columns
fireData = fireData[["incident_id","incident_name","incident_county","incident_acres_burned",
"incident_dateonly_created","incident_dateonly_extinguished"]]
# rename columns
fireData = fireData.rename(columns={"incident_id":"ID","incident_name":"Name","incident_county":"County",
"incident_acres_burned":"AcresBurned","incident_dateonly_created":"Started",
"incident_dateonly_extinguished":"Extinguished"})
# check for duplicates, then drop ID column
fireData.drop_duplicates(subset=["ID"])
fireData = fireData[["Name","County","AcresBurned","Started","Extinguished"]]
# create a column that contains the duration
# first convert date columns to datetime
fireData["Started"] = pd.to_datetime(fireData["Started"])
fireData["Extinguished"] = pd.to_datetime(fireData["Extinguished"])
# subtract the dates
fireData["Duration"] = fireData["Extinguished"] - fireData["Started"]
# convert duration to string and remove "days"
fireData["Duration"] = fireData["Duration"].astype(str)
fireData["Duration"] = fireData["Duration"].str.replace("days","")
# replace NaT with NaN and convert back to float
fireData["Duration"] = fireData["Duration"].replace(["NaT"],"NaN")
fireData["Duration"] = fireData["Duration"].astype(float)
# add one day to duration to capture fires that started and were extinguished in the same day
fireData["Duration"] = fireData["Duration"] + 1
# create a column for year and filter for fires during or after 2013
fireData["Year"] = fireData["Started"].dt.year
fireData = fireData.loc[(fireData["Year"]>=2013),:]
# create a column to hold the year and month of the start date
fireData["Date"] = fireData["Started"].apply(lambda x: x.strftime('%Y-%m'))
fireData = fireData[["Date", "County", "Duration", "AcresBurned"]]
# drop nulls
fireData = fireData.dropna()
# reset the index
fireData.reset_index(inplace=True,drop=True)
# export as csv
fireData.to_csv("./data/clean/fire_data_clean.csv",index=False)
return fireData
else:
# This prevents the cleaning from being ran each time this function is called, checks if cleaning is done already
fireData = | pd.read_csv("./data/clean/fire_data_clean.csv") | pandas.read_csv |
import pytest
import pandas as pd
from pandas import compat
import pandas.util.testing as tm
import pandas.util._test_decorators as td
from pandas.util.testing import assert_frame_equal, assert_raises_regex
COMPRESSION_TYPES = [None, 'bz2', 'gzip',
pytest.param('xz', marks=td.skip_if_no_lzma)]
def decompress_file(path, compression):
if compression is None:
f = open(path, 'rb')
elif compression == 'gzip':
import gzip
f = gzip.GzipFile(path, 'rb')
elif compression == 'bz2':
import bz2
f = bz2.BZ2File(path, 'rb')
elif compression == 'xz':
lzma = compat.import_lzma()
f = lzma.open(path, 'rb')
else:
msg = 'Unrecognized compression type: {}'.format(compression)
raise ValueError(msg)
result = f.read().decode('utf8')
f.close()
return result
@pytest.mark.parametrize('compression', COMPRESSION_TYPES)
def test_compression_roundtrip(compression):
df = pd.DataFrame([[0.123456, 0.234567, 0.567567],
[12.32112, 123123.2, 321321.2]],
index=['A', 'B'], columns=['X', 'Y', 'Z'])
with tm.ensure_clean() as path:
df.to_json(path, compression=compression)
assert_frame_equal(df, pd.read_json(path, compression=compression))
# explicitly ensure file was compressed.
uncompressed_content = decompress_file(path, compression)
assert_frame_equal(df, pd.read_json(uncompressed_content))
def test_compress_zip_value_error():
df = pd.DataFrame([[0.123456, 0.234567, 0.567567],
[12.32112, 123123.2, 321321.2]],
index=['A', 'B'], columns=['X', 'Y', 'Z'])
with tm.ensure_clean() as path:
import zipfile
pytest.raises(zipfile.BadZipfile, df.to_json, path, compression="zip")
def test_read_zipped_json():
uncompressed_path = tm.get_data_path("tsframe_v012.json")
uncompressed_df = pd.read_json(uncompressed_path)
compressed_path = tm.get_data_path("tsframe_v012.json.zip")
compressed_df = pd.read_json(compressed_path, compression='zip')
assert_frame_equal(uncompressed_df, compressed_df)
@pytest.mark.parametrize('compression', COMPRESSION_TYPES)
def test_with_s3_url(compression):
boto3 = pytest.importorskip('boto3')
pytest.importorskip('s3fs')
moto = pytest.importorskip('moto')
df = | pd.read_json('{"a": [1, 2, 3], "b": [4, 5, 6]}') | pandas.read_json |
import pandas as pd
import numpy as np
from mvn_historical_drawdowns import read_data
from db_connection import create_connection, odbc_engine
from dateutil.relativedelta import relativedelta
import re
def write_data(df):
engine = create_connection()
tsql_chunksize = 2097 // len(df.columns)
tsql_chunksize = 1000 if tsql_chunksize > 1000 else tsql_chunksize
df.to_sql('time_series_proxy_220122', engine, if_exists='append', index=False, chunksize=tsql_chunksize)
def read_local_file(file_path):
l1_cases = pd.read_excel(r'{}'.format(file_path),
sheet_name='Level 1 Definitions')
l2_cases = pd.read_excel(r'{}'.format(file_path),
sheet_name='Level 2 Definitions')
l1_cases.columns = ['Asset_Type', 'Price_Type', 'Source_Type', 'Source_Price', 'Cutoff']
l1_cases['proxy_level'] = 1
l2_cases.columns = ['Asset_Type', 'Price_Type', 'Methodology', 'Source_Type', 'Source_Price', 'Source_Level',
'Parameters']
l2_cases['proxy_level'] = 2
dependency_tbl = pd.concat([l1_cases[['Asset_Type', 'Price_Type', 'Source_Type', 'Source_Price', 'proxy_level']],
l2_cases[['Asset_Type', 'Price_Type', 'Source_Type', 'Source_Price', 'proxy_level']]])
dependency_tbl['Source_Type'] = np.where(dependency_tbl['Source_Type'].isna(),
dependency_tbl['Asset_Type'], dependency_tbl['Source_Type'])
dependency_tbl['Source_Price'] = np.where(dependency_tbl['Source_Price'].isna(),
dependency_tbl['Price_Type'], dependency_tbl['Source_Price'])
return l1_cases, l2_cases, dependency_tbl
def read_dependency(asset_type, price_type, proxy_level):
asset_type_list = [asset_type]
price_type_list = [price_type]
source_type_list = []
source_price_list = []
proxy_level_list = [proxy_level]
cnxn = odbc_engine()
cursor = cnxn.cursor()
while proxy_level >= 1:
sql_str = '''
SELECT Source_Type, Source_Price from dependency_graph
WHERE Proxy_Level = {} and Asset_Type = '{}' and Price_Type = '{}'
'''.format(proxy_level_list[-1], asset_type_list[-1], price_type_list[-1])
rows = cursor.execute(sql_str)
for row in rows.fetchall():
if row[0] is None and row[1] is None:
source_type_list.append(asset_type)
source_price_list.append(price_type)
if proxy_level >= 1:
asset_type_list.append(asset_type)
price_type_list.append(price_type)
else:
source_type_list.append(row[0])
source_price_list.append(row[1])
if proxy_level >= 1:
asset_type_list.append(row[0])
price_type_list.append(row[1])
proxy_level = proxy_level - 1
if proxy_level >= 1:
proxy_level_list.append(proxy_level)
asset_type_list = asset_type_list[:-1]
price_type_list = price_type_list[:-1]
return asset_type_list[::-1], price_type_list[::-1], source_type_list[::-1], \
source_price_list[::-1], proxy_level_list[::-1]
def apply_proxy_level_1(asset_code, price_type, cutoff="NA"):
at, pt, st, sp, pl = read_dependency(asset_code, price_type, 1)
df = read_data(st[0], sp[0])
df['Asset_Code'] = at[0]
df['Price_Type'] = pt[0]
df['Proxy_Level'] = 1
df['Proxy_Name'] = np.nan
df = df[['Asset_Code', 'Price_Type', 'Date', 'Price', 'Proxy_Level', 'Proxy_Name']]
if cutoff == "NA":
write_data(df)
else:
df = df[df.Date >= cutoff]
return write_data(df)
def parse_check_overlap_period(min_date, max_date, min_period, max_period, div_method):
'''
Method to check if overlap period condition is satisfied, if satisfied returns overlap start and end date.
:param div_method:
:param min_date:
:param max_date:
:param min_period:
:param max_period:
:return: Boolean
'''
value_min = int(re.findall(r'\d+', min_period)[0])
value_max = int(re.findall(r'\d+', max_period)[0])
if 'M' in min_period:
overlap_min_end_date = min_date + relativedelta(months=value_min)
elif 'Y' in min_period:
overlap_min_end_date = min_date + relativedelta(years=value_min)
else:
raise ValueError("Minimum overlap period is not correct")
if 'M' in max_period:
overlap_max_end_date = min_date + relativedelta(months=value_max)
elif 'Y' in max_period:
overlap_max_end_date = min_date + relativedelta(years=value_max)
else:
raise ValueError("Minimum overlap period is not correct")
# Check if min date is less than max date
if overlap_min_end_date > overlap_max_end_date:
raise ValueError("Overlap Minimum Date should be less than Overlap Maximum Date")
if div_method == 'Average Calendar Quarterly':
month_factor = 0 if overlap_min_end_date.month % 3 == 0 and \
overlap_min_end_date.day == overlap_min_end_date.days_in_month else 1
overlap_min_end_date, overlap_max_end_date = overlap_min_end_date + \
pd.tseries.offsets.QuarterEnd() * month_factor, \
overlap_max_end_date + \
pd.tseries.offsets.QuarterEnd() * month_factor
elif div_method == 'Average Calendar Monthly':
month_factor = 0 if overlap_min_end_date.day % overlap_min_end_date.days_in_month == 0 else 1
overlap_min_end_date, overlap_max_end_date = overlap_min_end_date + \
pd.tseries.offsets.MonthEnd() * month_factor, \
overlap_max_end_date + \
pd.tseries.offsets.MonthEnd() * month_factor
elif div_method == 'Average Calendar Semi-Annual':
year_factor = 0 if overlap_min_end_date.day == overlap_min_end_date.days_in_month \
and overlap_min_end_date.month == 12 else 1
overlap_min_end_date, overlap_max_end_date = overlap_min_end_date + \
| pd.tseries.offsets.YearEnd() | pandas.tseries.offsets.YearEnd |