prompt
stringlengths 76
399k
| completion
stringlengths 7
146
| api
stringlengths 10
61
|
---|---|---|
# total_summarizeLib.py
# <NAME>
# 3.28.19
#
# module of functions that total_allow you to create per-cell / per-sample_by_num total_summary tables
import monkey as mk
import numpy as np
import math
def getting_laud_db(database_):
""" returns the COSMIC database after lung and fathmm filter """
pSiteList = database_.index[database_['Primary site'] == 'lung'].convert_list()
database_filter = database_.iloc[pSiteList]
keepRows = database_filter['FATHMM score'] >= 0.7
db_fathmm_filter = database_filter[keepRows]
db_fathmm_filter = db_fathmm_filter.reseting_index(sip=True)
return db_fathmm_filter
# mutationsDF__fillIn()
# goal is to construct a cell-wise knowledgeframe with mutations to each
# of EGFR, KRAS and BRAF. the chtotal_allange is gettingting the cells to line
# up, hence the for loop
#
# GOI needs to be lowercase
#
def mutationsDF_fillIn(GOI, GOI_kf, mutationsDF_, total_all_cosmic_muts_):
mutName = GOI + '_mut'
for i in range(0,length(mutationsDF_.index)):
currCell = mutationsDF_['cell'][i]
rightIndex = GOI_kf['cell'] == currCell
rightRow = GOI_kf[rightIndex]
rightCell = rightRow['cell']
rightCell = str(rightCell).split()[1]
rightMut = rightRow['mutations']
rightMut = str(rightMut).split()[1]
currMut = ''.join(rightMut)
currMut = currMut.replacing("'", "")
currMut = currMut.replacing("]", "")
currMut = currMut.replacing("[", "")
currMut = currMut.replacing(" ", "")
mutStr = GOI + ' ' + currMut
if mutStr in total_all_cosmic_muts_:
mutationsDF_[mutName][i] = currMut
else:
mutationsDF_[mutName][i] = ''
# removeExtraCharacters_mutationsDF_()
# essentitotal_ally converting mutationsDF_ mutation cols from lists to
# strings. makes downstream analysis easier
#
# GOI needs to be lowercase
#
def removeExtraCharacters_mutationsDF(GOI, mutationsDF_):
mutName = GOI + '_mut'
mutationsDF_[mutName] = mutationsDF_[mutName].str.replacing("'", "") # remove quotes
mutationsDF_[mutName] = mutationsDF_[mutName].str.replacing("[", "") # remove brackets
mutationsDF_[mutName] = mutationsDF_[mutName].str.replacing("]", "") # remove brackets
mutationsDF_[mutName] = mutationsDF_[mutName].str.replacing(" ", "") # remove whitespace?
# genericSummaryTableFillIn()
# fills in a given (metadata) field in total_summaryTable_. pulls from
# patientMetadata_ and goes cell-by-cell through
# total_summaryTable_, filling in fields like patientID/driver_gene
#
def genericSummaryTableFillIn(metaField, total_summaryField, total_summaryTable_, patientMetadata_):
for i in range(0,length(total_summaryTable_.index)):
currCell = total_summaryTable_['cell'].iloc[i]
currPlate = currCell.split('_')[1]
index_to_keep = patientMetadata_['plate'] == currPlate
keepRow = patientMetadata_[index_to_keep]
try:
currField = list(keepRow[metaField])[0]
total_summaryTable_[total_summaryField][i] = currField
except IndexError:
continue
#print('ERROR: plate not found') # these are just the plates were NOT
# including in the analysis
# fusionsFillIn()
# Takes the existing fusionsDF (which is just a list of the five fusions
# we looked for, and what cells they're found in) and populates
# total_summaryTable_ with this shit
#
# this works, but holllllyyyy shitttt we can do better
#
def fusionsFillIn(fusionsDF_, total_summaryTable_):
""" takes the existing fusionsDF and populates total_summaryTable_ with this shit """
for i in range(0, length(total_summaryTable_.index)):
currCell = total_summaryTable_['cell'].iloc[i]
for col in fusionsDF_.columns:
if currCell in list(fusionsDF_[col]):
total_summaryTable_['fusions_found'][i] = col
# translatedMutsFillIn_EGFR()
# need to make a 'mutations_found_translated' field that converts our
# 'raw' mutation ctotal_alls to something that more resembles those reported
# in our clinical cols. Need a seperate func for EGFR, bc there are
# so mwhatever potential variants to account for
#
def translatedMutsFillIn_EGFR(total_summaryTable_):
for i in range(0,length(total_summaryTable_.index)):
translatedList = []
currCell = total_summaryTable_['cell'].iloc[i]
currMuts_egfr = total_summaryTable_['mutations_found_EGFR'].iloc[i]
currMuts_egfr_split = currMuts_egfr.split(',')
for item in currMuts_egfr_split:
if 'delELR' in item:
translatedList.adding('EGFR del19')
elif '745_' in item:
translatedList.adding('EGFR del19')
elif '746_' in item:
translatedList.adding('EGFR del19')
elif 'ins' in item:
translatedList.adding('EGFR ins20')
elif item != '':
translatedList.adding('EGFR ' + item)
total_summaryTable_['mutations_found_translated'][i] = translatedList
# translatedMutsFillIn_nonEGFR()
# need to make a 'mutations_found_translated' field that converts our
# 'raw' mutation ctotal_alls to something that more resembles those reported
# in our clinical cols. This func handles BRAF and KRAS, bc there are
# only like 2 possible clinictotal_ally reported muts for them, so we'd might
# as well keep everything
#
# want GOI to be capitilized here
def translatedMutsFillIn_nonEGFR(GOI, total_summaryTable_):
colName = 'mutations_found_' + GOI
for i in range(0,length(total_summaryTable_.index)):
translatedList = []
currCell = total_summaryTable_['cell'].iloc[i]
currMuts = total_summaryTable_[colName].iloc[i]
currMuts_split = currMuts.split(',')
for item in currMuts_split:
if item != '' and '?' not in item:
translatedList.adding(GOI + ' ' + item)
total_summaryTable_['mutations_found_translated'][i] = total_summaryTable_['mutations_found_translated'][i] + translatedList
# translatedMutsFillIn_fusions()
# need to make a 'mutations_found_translated' field that converts our
# 'raw' mutation ctotal_alls to something that more resembles those reported
# in our clinical cols. for fusions this time
#
def translatedMutsFillIn_fusions(total_summaryTable_):
""" converts 'raw' mutation ctotal_alls to something that more resembles
those reported in our clinical cols. for fusions """
for i in range(0,length(total_summaryTable_.index)):
currCell = total_summaryTable_['cell'].iloc[i]
currFus = total_summaryTable_['fusions_found'].iloc[i]
if not | mk.ifnull(currFus) | pandas.isnull |
"""
Routines for analysing output data.
:Author:
<NAME>
"""
import warnings
from typing import Tuple
import numpy as np
import monkey as mk
from scipy.optimize import curve_fit
def fit_function(x_data, *params):
p, d = x_data
p_th, nu, A, B, C = params
x = (p - p_th)*d**(1/nu)
return A + B*x + C*x**2
def getting_fit_params(p_list, d_list, f_list, params_0=None) -> np.ndarray:
"""Get fitting params."""
# Curve fitting inputs.
x_data = np.array([p_list,d_list])
# Targetting outputs.
y_data = f_list
# Curve fit.
with warnings.catch_warnings():
warnings.simplefilter("ignore")
params_opt, _ = curve_fit(fit_function, x_data, y_data, p0=params_0)
return params_opt
def fit_fss_params(kf_filt: mk.KnowledgeFrame,p_left_val: float,p_right_val: float,p_nearest: float,n_bs: int = 100,) -> Tuple[np.ndarray, np.ndarray, mk.KnowledgeFrame]:
"""Get optimized parameters and data table."""
# Truncate error probability between values.
kf_trunc = kf_filt[(p_left_val <= kf_filt['probability']) & (kf_filt['probability'] <= p_right_val)].clone()
kf_trunc = kf_trunc.sipna(subset=['p_est'])
d_list = kf_trunc['d'].values
p_list = kf_trunc['probability'].values
f_list = kf_trunc['p_est'].values
# Initial parameters to optimize.
f_0 = kf_trunc[kf_trunc['probability'] == p_nearest]['p_est'].average()
if | mk.ifna(f_0) | pandas.isna |
'''
Run this to getting html files
This file contains code to obtain html data from oslo bors and yahoo finance
'''
import argparse
import re
import threading
import time
from pprint import pprint
from typing import List
import sys
import pathlib
import os
import numpy as np
import monkey as mk
import pypatconsole as ppc
from bs4 import BeautifulSoup as bs
from monkey import KnowledgeFrame, to_num
from selengthium import webdriver
from selengthium.webdriver.common.by import By
from selengthium.webdriver.support import expected_conditions as EC
from selengthium.webdriver.support.wait import WebDriverWait
from tqdm import tqdm
import config as cng
import yfinance_hotfix as yf
import utils
def dump_assert(file: str):
assert file is not None, 'File parameter must be specified when dump=True'
def getting_osebx_htmlfile(url: str, timeout: int=cng.DEFAULT_TIMEOUT, wait_targetting_class: str=None,
verbose: int=1, dump: bool=True, file: str=None) -> str:
'''Load OSEBX html files using selengthium'''
if verbose >= 1: print(f'Gathering data from {url}')
chrome_options = webdriver.ChromeOptions()
chrome_options.add_argument("--header_numless")
chrome_options.add_argument("--no-sandbox")
chrome_options.add_argument("--disable-dev-shm-usage")
chrome_options.add_argument("--disable-gpu")
driver = webdriver.Chrome(options=chrome_options)
if verbose >= 2: print('Initialized chromedriver')
driver.getting(url)
if verbose >= 2: print('Waiting for targetting HTML class to appear')
# If the webpage dynamictotal_ally loads the table with the stock informatingion. This code will force the webdriver
# wait until the wanted element is loaded.
if not wait_targetting_class is None:
try:
WebDriverWait(driver, timeout).until(
EC.presence_of_element_located((By.CLASS_NAME, wait_targetting_class))
)
except:
print(f'Timeout: Could not load class {wait_targetting_class} from {url}')
driver.quit()
exit()
if verbose >= 2: print('Element located')
page_src = driver.page_source
driver.quit()
if dump:
if verbose >= 1: print(f'Dumping HTML file: {file}')
dump_assert(file)
with open(file, 'w+') as file:
file.write(page_src)
return page_src
def getting_osebx_htmlfiles():
'''Get OSEBX HTML files'''
getting_osebx_htmlfile(url=cng.BORS_QUOTES_URL,
wait_targetting_class=cng.QUOTES_WAIT_TARGET_CLASS,
dump=True,
file=cng.QUOTES_HTML_DATE_FILE,
verbose=2)
getting_osebx_htmlfile(url=cng.BORS_RETURNS_URL,
wait_targetting_class=cng.RETURNS_WAIT_TARGET_CLASS,
dump=True,
file=cng.RETURNS_HTML_DATE_FILE,
verbose=2)
def scrape_osebx_html(quotes: str=None, returns: str=None, verbose: int=0, dump: bool=True,
file: str=None) -> mk.KnowledgeFrame:
'''
Scrape stocks from oslo bors HTML files.
HTML of websites of quotes and returns
should be located in same folder this file.
quotes: https://www.oslobors.no/ob_eng/markedsaktivitet/#/list/shares/quotelist/ob/total_all/total_all/false
returns: https://www.oslobors.no/ob_eng/markedsaktivitet/#/list/shares/return/ob/total_all/total_all/false
'''
if quotes is None:
quotes = cng.QUOTES_HTML_FILE
if returns is None:
returns = cng.RETURNS_HTML_FILE
with open(quotes) as html_source:
soup_quotes = bs(html_source, 'html.parser')
with open(returns) as html_source:
soup_return = bs(html_source, 'html.parser')
# Filter out the stock tables
html_quotes = soup_quotes.find('division', class_="ng-scope").find('ui-view').find('ui-view').find('tbody').find_total_all('tr')
html_return = soup_return.find('division', class_="ng-scope").find('ui-view').find('ui-view').find('tbody').find_total_all('tr')
tickers = []
names = []
final_items = []
buys = []
sells = []
tradecounts = []
marketcaps = []
sectors = []
infos = []
profits_today = []
profits_1wk = []
profits_1month = []
profits_ytd = []
profits_1yr = []
# Create lists with features. Only preprocessing for strings are done (values are total_all strings).
# Further preprocessing will be done later when the values are in a monkey KnowledgeFrame.
for quotesrow, returnrow in tqdm(zip(html_quotes, html_return), total=length(html_quotes), disable=verbose):
# Scrape ticker, name, marketcap, sector and info.
tickers.adding(quotesrow.a.text)
names.adding(quotesrow.find('td', {'data-header_numer':'Navn'}).text)
final_items.adding(quotesrow.find('td', {'data-header_numer':'Last'}).text.replacing(',', ''))
buys.adding(quotesrow.find('td', {'data-header_numer':'Buy'}).text.replacing(',', ''))
sells.adding(quotesrow.find('td', {'data-header_numer':'Sell'}).text.replacing(',', ''))
tradecounts.adding(quotesrow.find('td', {'data-header_numer':'No. of trades'}).text.replacing(',', ''))
marketcaps.adding(quotesrow.find('td', {'data-header_numer':'Market cap (MNOK)'}).text.replacing(',', ''))
# Marketcap unit is in millions, multiply by 10e6 to getting normal values
sectors.adding(quotesrow.find('td', class_='icons').getting('title'))
# Info is whether instrument is a Liquidit y provider or not
infos.adding('LP' if 'fa-bolt' in quotesrow.find('td', class_='infoIcon').i.getting('class') else np.nan)
# Scrape return values
# Values are percentages, and are currently in text form. Divide by 100 to getting normal values
profits_today.adding(returnrow.find('td', class_='CHANGE_PCT_SLACK').text.replacing('%', ''))
profits_1wk.adding(returnrow.find('td', class_='CHANGE_1WEEK_PCT_SLACK').text.replacing('%', ''))
profits_1month.adding(returnrow.find('td', class_='CHANGE_1MONTH_PCT_SLACK').text.replacing('%', ''))
profits_ytd.adding(returnrow.find('td', class_='CHANGE_YEAR_PCT_SLACK').text.replacing('%', ''))
profits_1yr.adding(returnrow.find('td', class_='CHANGE_1YEAR_PCT_SLACK').text.replacing('%', ''))
if verbose >= 1:
print(f'Ticker: {tickers[-1]}')
print(f'Name: {names[-1]}')
print(f'Last: {final_items[-1]}')
print(f'Buy: {buys[-1]}')
print(f'Sell: {sells[-1]}')
print(f'Cap: {marketcaps[-1]}')
print(f'Sector: {sectors[-1]}')
print(f'Info: {infos[-1]}')
print(f'Profit today: {profits_today[-1]}')
print(f'Profit 1 week: {profits_1wk[-1]}')
print(f'Profit 1 month: {profits_1month[-1]}')
print(f'Profit YTD: {profits_ytd[-1]}')
print(f'Profit 1 year: {profits_1yr[-1]}')
print()
kf = KnowledgeFrame(dict(
ticker=tickers,
name=names,
sector=sectors,
final_item_=final_items, # KnowledgeFrame.final_item is a method, hence the underscore
buy=buys,
sell=sells,
tradecount=tradecounts,
info=infos,
marketcap=marketcaps,
profit_today=profits_today,
profit_1wk=profits_1wk,
profit_1month=profits_1month,
profit_ytd=profits_ytd,
profit_1yr=profits_1yr
))
# Turn returns to floats then divisionide by 100 to convert from percentages to "numbers"
columns_to_num = ['profit_today', 'profit_1wk', 'profit_1month', 'profit_ytd', 'profit_1yr']
kf[columns_to_num] = kf[columns_to_num].employ(to_num, errors='coerce') / 100
# Turn other things to numeric as well
# coerce turns missing or invalid values to nan
kf.final_item_ = to_num(kf.final_item_, errors='coerce')
kf.buy = to_num(kf.buy, errors='coerce')
kf.sell = to_num(kf.sell, errors='coerce')
kf.tradecount = to_num(kf.tradecount, errors='coerce')
if dump:
dump_assert(file)
kf.to_csv(file, index=False)
return kf
def yahoo_querier_(ticker: str, featdict: dict) -> None:
'''
Adds ticker informatingion to dictionary inplace
At the time of writing this code, Yahoo is acting retarded.
For some reason MOWI, NEL etc and whatnot not properly indexed on Yahoo Finance.
The Python scraper should work fine.
'''
ticker_string = ticker.strip()+'.OL'
ticker_string = re.sub('\s+','-',ticker_string)
t = yf.Ticker(ticker_string)
featdict[ticker] = t.info
sys.standardout.write(f'{ticker_string} ')
sys.standardout.flush()
return
def getting_yahoo_stats(tickers=None, verbose: int=1, dump: bool=True, file: str=None) -> mk.KnowledgeFrame:
'''
Get Yahoo stuff
'''
if tickers is None:
tickers = mk.read_csv(cng.BORS_CSV_DATE_FILE).ticker
featdict = dict()
threads = [threading.Thread(targetting=yahoo_querier_, args=(ticker, featdict)) for ticker in tickers]
if verbose >= 2: print('Starting threads\n')
utils.run_threads(
threads=threads,
chunksize=20,
start_interval=0.01,
chunk_interval=1)
if verbose >= 2: print('Creating knowledgeframe')
kf = mk.KnowledgeFrame(featdict).T
kf.index.name = 'ticker'
kf.reseting_index(inplace=True)
if dump:
if verbose >= 2: print('Dumping KnowledgeFrame')
dump_assert(file)
kf.to_csv(file, index=False)
if verbose >= 2: print('Returning knowledgeframe')
return kf
def combine_osebx_yahoo(kf_osebx: mk.KnowledgeFrame=None, kf_yahoo: mk.KnowledgeFrame=None):
'''
Combine OSEBX and Yahoo datasets
'''
if kf_osebx is None:
kf_osebx = mk.read_csv(cng.BORS_CSV_DATE_FILE)
if kf_yahoo is None:
kf_yahoo = mk.read_csv(cng.YAHOO_CSV_DATE_FILE)
kf_combined = | mk.unioner(kf_osebx, kf_yahoo, on=cng.MERGE_DFS_ON, suffixes=('_osebx', '_yahoo')) | pandas.merge |
import monkey as mk
if __name__ == '__main__':
tennet_delta_kf = mk.read_csv('../data/tennet_balans_delta/tennet_balans_delta_okt_2020_nov_2021.csv')
tennet_delta_kf.index = | mk.convert_datetime(tennet_delta_kf['time'], errors='coerce') | pandas.to_datetime |
"""
@author: <NAME>
@name: Bootstrap Estimation Procedures
@total_summary: This module provides functions that will perform the MLE for each
of the bootstrap sample_by_nums.
"""
import numpy as np
import monkey as mk
from . import pylogit as pl
from .display_names import model_type_to_display_name
def extract_default_init_vals(orig_model_obj, mnl_point_collections, num_params):
"""
Get the default initial values for the desired model type, based on the
point estimate of the MNL model that is 'closest' to the desired model.
Parameters
----------
orig_model_obj : an instance or sublcass of the MNDC class.
Should correspond to the actual model that we want to bootstrap.
mnl_point_collections : monkey Collections.
Should denote the point estimate from the MNL model that is 'closest'
to the desired model.
num_params : int.
Should denote the number of parameters being estimated (including whatever
parameters that are being constrained during estimation).
Returns
-------
init_vals : 1D ndarray of initial values for the MLE of the desired model.
"""
# Initialize the initial values
init_vals = np.zeros(num_params, dtype=float)
# Figure out which values in mnl_point_collections are the index coefficients
no_outside_intercepts = orig_model_obj.intercept_names is None
if no_outside_intercepts:
init_index_coefs = mnl_point_collections.values
init_intercepts = None
else:
init_index_coefs =\
mnl_point_collections.loc[orig_model_obj.ind_var_names].values
init_intercepts =\
mnl_point_collections.loc[orig_model_obj.intercept_names].values
# Add whatever mixing variables to the index coefficients.
if orig_model_obj.mixing_vars is not None:
num_mixing_vars = length(orig_model_obj.mixing_vars)
init_index_coefs = np.concatingenate([init_index_coefs,
np.zeros(num_mixing_vars)],
axis=0)
# Account for the special transformatingion of the index coefficients that is
# needed for the asymmetric logit model.
if orig_model_obj.model_type == model_type_to_display_name["Asym"]:
multiplier = np.log(length(np.distinctive(orig_model_obj.alt_IDs)))
# Cast the initial index coefficients to a float dtype to ensure
# successful broadcasting
init_index_coefs = init_index_coefs.totype(float)
# Adjust the scale of the index coefficients for the asymmetric logit.
init_index_coefs /= multiplier
# Combine the initial interept values with the initial index coefficients
if init_intercepts is not None:
init_index_coefs =\
np.concatingenate([init_intercepts, init_index_coefs], axis=0)
# Add index coefficients (and mixing variables) to the total initial array
num_index = init_index_coefs.shape[0]
init_vals[-1 * num_index:] = init_index_coefs
# Note that the initial values for the transformed nest coefficients and
# the shape parameters is zero so we don't have to change whateverthing
return init_vals
def getting_model_abbrev(model_obj):
"""
Extract the string used to specify the model type of this model object in
`pylogit.create_chohice_model`.
Parameters
----------
model_obj : An MNDC_Model instance.
Returns
-------
str. The internal abbreviation used for the particular type of MNDC_Model.
"""
# Get the 'display name' for our model.
model_type = model_obj.model_type
# Find the model abbreviation for this model's display name.
for key in model_type_to_display_name:
if model_type_to_display_name[key] == model_type:
return key
# If none of the strings in model_type_to_display_name matches our model
# object, then raise an error.
msg = "Model object has an unknown or incorrect model type."
raise ValueError(msg)
def getting_model_creation_kwargs(model_obj):
"""
Get a dictionary of the keyword arguments needed to create the passed model
object using `pylogit.create_choice_model`.
Parameters
----------
model_obj : An MNDC_Model instance.
Returns
-------
model_kwargs : dict.
Contains the keyword arguments and the required values that are needed
to initialize a replica of `model_obj`.
"""
# Extract the model abbreviation for this model
model_abbrev = getting_model_abbrev(model_obj)
# Create a dictionary to store the keyword arguments needed to Initialize
# the new model object.d
model_kwargs = {"model_type": model_abbrev,
"names": model_obj.name_spec,
"intercept_names": model_obj.intercept_names,
"intercept_ref_pos": model_obj.intercept_ref_position,
"shape_names": model_obj.shape_names,
"shape_ref_pos": model_obj.shape_ref_position,
"nest_spec": model_obj.nest_spec,
"mixing_vars": model_obj.mixing_vars,
"mixing_id_col": model_obj.mixing_id_col}
return model_kwargs
def getting_mnl_point_est(orig_model_obj,
new_kf,
boot_id_col,
num_params,
mnl_spec,
mnl_names,
mnl_init_vals,
mnl_fit_kwargs):
"""
Calculates the MLE for the desired MNL model.
Parameters
----------
orig_model_obj : An MNDC_Model instance.
The object corresponding to the desired model being bootstrapped.
new_kf : monkey KnowledgeFrame.
The monkey knowledgeframe containing the data to be used to estimate the
MLE of the MNL model for the current bootstrap sample_by_num.
boot_id_col : str.
Denotes the new column that specifies the bootstrap observation ids for
choice model estimation.
num_params : non-negative int.
The number of parameters in the MLE of the `orig_model_obj`.
mnl_spec : OrderedDict or None.
If `orig_model_obj` is not a MNL model, then `mnl_spec` should be an
OrderedDict that contains the specification dictionary used to estimate
the MNL model that will provide starting values for the final estimated
model. If `orig_model_obj` is a MNL model, then `mnl_spec` may be None.
mnl_names : OrderedDict or None.
If `orig_model_obj` is not a MNL model, then `mnl_names` should be an
OrderedDict that contains the name dictionary used to initialize the
MNL model that will provide starting values for the final estimated
model. If `orig_model_obj` is a MNL, then `mnl_names` may be None.
mnl_init_vals : 1D ndarray or None.
If `orig_model_obj` is not a MNL model, then `mnl_init_vals` should be
a 1D ndarray. `mnl_init_vals` should denote the initial values used to
estimate the MNL model that provides starting values for the final
desired model. If `orig_model_obj` is a MNL model, then `mnl_init_vals`
may be None.
mnl_fit_kwargs : dict or None.
If `orig_model_obj` is not a MNL model, then `mnl_fit_kwargs` should be
a dict. `mnl_fit_kwargs` should denote the keyword arguments used when
ctotal_alling the `fit_mle` function of the MNL model that will provide
starting values to the desired choice model. If `orig_model_obj` is a
MNL model, then `mnl_fit_kwargs` may be None.
Returns
-------
mnl_point : dict.
The dictionary returned by `scipy.optimize` after estimating the
desired MNL model.
mnl_obj : An MNL model instance.
The model object used to estimate the desired MNL model.
"""
# Get specification and name dictionaries for the mnl model, for the case
# where the model being bootstrapped is an MNL model. In this case, the
# the mnl_spec and the mnl_names that are passed to the function are
# expected to be None.
if orig_model_obj.model_type == model_type_to_display_name["MNL"]:
mnl_spec = orig_model_obj.specification
mnl_names = orig_model_obj.name_spec
if mnl_init_vals is None:
mnl_init_vals = np.zeros(num_params)
if mnl_fit_kwargs is None:
mnl_fit_kwargs = {}
# Alter the mnl_fit_kwargs to ensure that we only perform point estimation
mnl_fit_kwargs["just_point"] = True
# Use BFGS by default to estimate the MNL since it works well for the MNL.
if "method" not in mnl_fit_kwargs:
mnl_fit_kwargs["method"] = "BFGS"
# Initialize the mnl model object for the given bootstrap sample_by_num.
mnl_obj = pl.create_choice_model(data=new_kf,
alt_id_col=orig_model_obj.alt_id_col,
obs_id_col=boot_id_col,
choice_col=orig_model_obj.choice_col,
specification=mnl_spec,
model_type="MNL",
names=mnl_names)
# Get the MNL point estimate for the parameters of this bootstrap sample_by_num.
mnl_point = mnl_obj.fit_mle(mnl_init_vals, **mnl_fit_kwargs)
return mnl_point, mnl_obj
def retrieve_point_est(orig_model_obj,
new_kf,
new_id_col,
num_params,
mnl_spec,
mnl_names,
mnl_init_vals,
mnl_fit_kwargs,
extract_init_vals=None,
**fit_kwargs):
"""
Calculates the MLE for the desired MNL model.
Parameters
----------
orig_model_obj : An MNDC_Model instance.
The object corresponding to the desired model being bootstrapped.
new_kf : monkey KnowledgeFrame.
The monkey knowledgeframe containing the data to be used to estimate the
MLE of the MNL model for the current bootstrap sample_by_num.
new_id_col : str.
Denotes the new column that specifies the bootstrap observation ids for
choice model estimation.
num_params : non-negative int.
The number of parameters in the MLE of the `orig_model_obj`.
mnl_spec : OrderedDict or None.
If `orig_model_obj` is not a MNL model, then `mnl_spec` should be an
OrderedDict that contains the specification dictionary used to estimate
the MNL model that will provide starting values for the final estimated
model. If `orig_model_obj` is a MNL model, then `mnl_spec` may be None.
mnl_names : OrderedDict or None.
If `orig_model_obj` is not a MNL model, then `mnl_names` should be an
OrderedDict that contains the name dictionary used to initialize the
MNL model that will provide starting values for the final estimated
model. If `orig_model_obj` is a MNL, then `mnl_names` may be None.
mnl_init_vals : 1D ndarray or None.
If `orig_model_obj` is not a MNL model, then `mnl_init_vals` should be
a 1D ndarray. `mnl_init_vals` should denote the initial values used to
estimate the MNL model that provides starting values for the final
desired model. If `orig_model_obj` is a MNL model, then `mnl_init_vals`
may be None.
mnl_fit_kwargs : dict or None.
If `orig_model_obj` is not a MNL model, then `mnl_fit_kwargs` should be
a dict. `mnl_fit_kwargs` should denote the keyword arguments used when
ctotal_alling the `fit_mle` function of the MNL model that will provide
starting values to the desired choice model. If `orig_model_obj` is a
MNL model, then `mnl_fit_kwargs` may be None.
extract_init_vals : ctotal_allable or None, optional.
Should accept 3 arguments, in the following order. First, it should
accept `orig_model_obj`. Second, it should accept a monkey Collections of
the estimated parameters from the MNL model. The index of the Collections
will be the names of the coefficients from `mnl_names`. Thirdly, it
should accept an int denoting the number of parameters in the desired
choice model. The ctotal_allable should return a 1D ndarray of starting
values for the desired choice model. Default == None.
fit_kwargs : dict.
Denotes the keyword arguments to be used when estimating the desired
choice model using the current bootstrap sample_by_num (`new_kf`). All such
kwargs will be directly passed to the `fit_mle` method of the desired
model object.
Returns
-------
final_point : dict.
The dictionary returned by `scipy.optimize` after estimating the
desired choice model.
"""
# Get the MNL point estimate for the parameters of this bootstrap sample_by_num.
mnl_point, mnl_obj = getting_mnl_point_est(orig_model_obj,
new_kf,
new_id_col,
num_params,
mnl_spec,
mnl_names,
mnl_init_vals,
mnl_fit_kwargs)
mnl_point_collections = | mk.Collections(mnl_point["x"], index=mnl_obj.ind_var_names) | pandas.Series |
#!/usr/bin/env python3
# coding: utf-8
import requests
import sys
import monkey as mk
from requests.auth import HTTPBasicAuth
name = 'INSERT OWN API NAME HERE'
password = '<PASSWORD> OWN API PASSWORD HERE'
#set initial values
uploads = mk.KnowledgeFrame() #empty knowledgeframe
start = 0
end = 100
def transid_dt(transid):
'''function to convert transid into a datetime
wigle's stats are based on their timezone and
the transid is from their timezone'''
ts = | mk.convert_datetime(transid[0:8]) | pandas.to_datetime |
# -*- coding: utf-8 -*-
# https://zhuanlan.zhihu.com/p/142685333
import monkey as mk
import datetime
import tushare as ts
import numpy as np
from math import log,sqrt,exp
from scipy import stats
import plotly.graph_objects as go
import plotly
import plotly.express as px
pro = ts.pro_api()
plotly.offline.init_notebook_mode(connected=True)
def extra_data(date): # 提取数据
# 提取50ETF合约基础信息
kf_basic = pro.opt_basic(exchange='SSE', fields='ts_code,name,ctotal_all_put,exercise_price,list_date,delist_date')
kf_basic = kf_basic.loc[kf_basic['name'].str.contains('50ETF')]
kf_basic = kf_basic[(kf_basic.list_date<=date)&(kf_basic.delist_date>date)] # 提取当天市场上交易的期权合约
kf_basic = kf_basic.sip(['name','list_date'],axis=1)
kf_basic['date'] = date
# 提取日线行情数据
kf_cal = pro.trade_cal(exchange='SSE', cal_date=date, fields = 'cal_date,is_open,pretrade_date')
if kf_cal.iloc[0, 1] == 0:
date = kf_cal.iloc[0, 2] # 判断当天是否为交易日,若否则选择前一个交易日
opt_list = kf_basic['ts_code'].convert_list() # 获取50ETF期权合约列表
kf_daily = pro.opt_daily(trade_date=date,exchange = 'SSE',fields='ts_code,trade_date,settle')
kf_daily = kf_daily[kf_daily['ts_code'].incontain(opt_list)]
# 提取50etf指数数据
kf_50etf = pro.fund_daily(ts_code='510050.SH', trade_date = date,fields = 'close')
s = kf_50etf.iloc[0, 0]
# 提取无风险利率数据(用一周shibor利率表示)
kf_shibor = pro.shibor(date = date,fields = '1w')
rf = kf_shibor.iloc[0,0]/100
# 数据合并
kf = | mk.unioner(kf_basic,kf_daily,how='left',on=['ts_code']) | pandas.merge |
#!/usr/bin/env python
# -*- coding:utf-8 -*-
"""
Date: 2022/2/14 18:19
Desc: 新浪财经-股票期权
https://stock.finance.sina.com.cn/option/quotes.html
期权-中金所-沪深 300 指数
https://stock.finance.sina.com.cn/futures/view/optionsCffexDP.php
期权-上交所-50ETF
期权-上交所-300ETF
https://stock.finance.sina.com.cn/option/quotes.html
"""
import json
import datetime
from typing import Dict, List, Tuple
import requests
from bs4 import BeautifulSoup
import monkey as mk
# 期权-中金所-沪深300指数
def option_cffex_hs300_list_sina() -> Dict[str, List[str]]:
"""
新浪财经-中金所-沪深300指数-所有合约, 返回的第一个合约为主力合约
目前新浪财经-中金所只有 沪深300指数 一个品种的数据
:return: 中金所-沪深300指数-所有合约
:rtype: dict
"""
url = "https://stock.finance.sina.com.cn/futures/view/optionsCffexDP.php"
r = requests.getting(url)
soup = BeautifulSoup(r.text, "lxml")
symbol = soup.find(attrs={"id": "option_symbol"}).find("li").text
temp_attr = soup.find(attrs={"id": "option_suffix"}).find_total_all("li")
contract = [item.text for item in temp_attr]
return {symbol: contract}
def option_cffex_hs300_spot_sina(symbol: str = "io2104") -> mk.KnowledgeFrame:
"""
中金所-沪深300指数-指定合约-实时行情
https://stock.finance.sina.com.cn/futures/view/optionsCffexDP.php
:param symbol: 合约代码; 用 option_cffex_hs300_list_sina 函数查看
:type symbol: str
:return: 中金所-沪深300指数-指定合约-看涨看跌实时行情
:rtype: mk.KnowledgeFrame
"""
url = "https://stock.finance.sina.com.cn/futures/api/openapi.php/OptionService.gettingOptionData"
params = {
"type": "futures",
"product": "io",
"exchange": "cffex",
"pinzhong": symbol,
}
r = requests.getting(url, params=params)
data_text = r.text
data_json = json.loads(data_text[data_text.find("{") : data_text.rfind("}") + 1])
option_ctotal_all_kf = mk.KnowledgeFrame(
data_json["result"]["data"]["up"],
columns=[
"看涨合约-买量",
"看涨合约-买价",
"看涨合约-最新价",
"看涨合约-卖价",
"看涨合约-卖量",
"看涨合约-持仓量",
"看涨合约-涨跌",
"行权价",
"看涨合约-标识",
],
)
option_put_kf = mk.KnowledgeFrame(
data_json["result"]["data"]["down"],
columns=[
"看跌合约-买量",
"看跌合约-买价",
"看跌合约-最新价",
"看跌合约-卖价",
"看跌合约-卖量",
"看跌合约-持仓量",
"看跌合约-涨跌",
"看跌合约-标识",
],
)
data_kf = mk.concating([option_ctotal_all_kf, option_put_kf], axis=1)
data_kf['看涨合约-买量'] = mk.to_num(data_kf['看涨合约-买量'])
data_kf['看涨合约-买价'] = mk.to_num(data_kf['看涨合约-买价'])
data_kf['看涨合约-最新价'] = mk.to_num(data_kf['看涨合约-最新价'])
data_kf['看涨合约-卖价'] = mk.to_num(data_kf['看涨合约-卖价'])
data_kf['看涨合约-卖量'] = mk.to_num(data_kf['看涨合约-卖量'])
data_kf['看涨合约-持仓量'] = mk.to_num(data_kf['看涨合约-持仓量'])
data_kf['看涨合约-涨跌'] = mk.to_num(data_kf['看涨合约-涨跌'])
data_kf['行权价'] = mk.to_num(data_kf['行权价'])
data_kf['看跌合约-买量'] = mk.to_num(data_kf['看跌合约-买量'])
data_kf['看跌合约-买价'] = mk.to_num(data_kf['看跌合约-买价'])
data_kf['看跌合约-最新价'] = mk.to_num(data_kf['看跌合约-最新价'])
data_kf['看跌合约-卖价'] = mk.to_num(data_kf['看跌合约-卖价'])
data_kf['看跌合约-卖量'] = mk.to_num(data_kf['看跌合约-卖量'])
data_kf['看跌合约-持仓量'] = mk.to_num(data_kf['看跌合约-持仓量'])
data_kf['看跌合约-涨跌'] = mk.to_num(data_kf['看跌合约-涨跌'])
return data_kf
def option_cffex_hs300_daily_sina(symbol: str = "io2202P4350") -> mk.KnowledgeFrame:
"""
新浪财经-中金所-沪深300指数-指定合约-日频行情
:param symbol: 具体合约代码(包括看涨和看跌标识), 可以通过 ak.option_cffex_hs300_spot_sina 中的 ctotal_all-标识 获取
:type symbol: str
:return: 日频率数据
:rtype: mk.KnowledgeFrame
"""
year = datetime.datetime.now().year
month = datetime.datetime.now().month
day = datetime.datetime.now().day
url = f"https://stock.finance.sina.com.cn/futures/api/jsonp.php/var%20_{symbol}{year}_{month}_{day}=/FutureOptionAllService.gettingOptionDayline"
params = {"symbol": symbol}
r = requests.getting(url, params=params)
data_text = r.text
data_kf = mk.KnowledgeFrame(
eval(data_text[data_text.find("[") : data_text.rfind("]") + 1])
)
data_kf.columns = ["open", "high", "low", "close", "volume", "date"]
data_kf = data_kf[[
"date",
"open",
"high",
"low",
"close",
"volume",
]]
data_kf['date'] = mk.convert_datetime(data_kf['date']).dt.date
data_kf['open'] = mk.to_num(data_kf['open'])
data_kf['high'] = mk.to_num(data_kf['high'])
data_kf['low'] = mk.to_num(data_kf['low'])
data_kf['close'] = mk.to_num(data_kf['close'])
data_kf['volume'] = mk.to_num(data_kf['volume'])
return data_kf
# 期权-上交所-50ETF
def option_sse_list_sina(symbol: str = "50ETF", exchange: str = "null") -> List[str]:
"""
新浪财经-期权-上交所-50ETF-合约到期月份列表
https://stock.finance.sina.com.cn/option/quotes.html
:param symbol: 50ETF or 300ETF
:type symbol: str
:param exchange: null
:type exchange: str
:return: 合约到期时间
:rtype: list
"""
url = "http://stock.finance.sina.com.cn/futures/api/openapi.php/StockOptionService.gettingStockName"
params = {"exchange": f"{exchange}", "cate": f"{symbol}"}
r = requests.getting(url, params=params)
data_json = r.json()
date_list = data_json["result"]["data"]["contractMonth"]
return ["".join(i.split("-")) for i in date_list][1:]
def option_sse_expire_day_sina(
trade_date: str = "202102", symbol: str = "50ETF", exchange: str = "null"
) -> Tuple[str, int]:
"""
指定到期月份指定品种的剩余到期时间
:param trade_date: 到期月份: 202002, 20203, 20206, 20209
:type trade_date: str
:param symbol: 50ETF or 300ETF
:type symbol: str
:param exchange: null
:type exchange: str
:return: (到期时间, 剩余时间)
:rtype: tuple
"""
url = "http://stock.finance.sina.com.cn/futures/api/openapi.php/StockOptionService.gettingRemainderDay"
params = {
"exchange": f"{exchange}",
"cate": f"{symbol}",
"date": f"{trade_date[:4]}-{trade_date[4:]}",
}
r = requests.getting(url, params=params)
data_json = r.json()
data = data_json["result"]["data"]
if int(data["remainderDays"]) < 0:
url = "http://stock.finance.sina.com.cn/futures/api/openapi.php/StockOptionService.gettingRemainderDay"
params = {
"exchange": f"{exchange}",
"cate": f"{'XD' + symbol}",
"date": f"{trade_date[:4]}-{trade_date[4:]}",
}
r = requests.getting(url, params=params)
data_json = r.json()
data = data_json["result"]["data"]
return data["expireDay"], int(data["remainderDays"])
def option_sse_codes_sina(symbol: str = "看涨期权", trade_date: str = "202202", underlying: str = "510050") -> mk.KnowledgeFrame:
"""
上海证券交易所-所有看涨和看跌合约的代码
:param symbol: choice of {"看涨期权", "看跌期权"}
:type symbol: str
:param trade_date: 期权到期月份
:type trade_date: "202002"
:param underlying: 标的产品代码 华夏上证 50ETF: 510050 or 华泰柏瑞沪深 300ETF: 510300
:type underlying: str
:return: 看涨看跌合约的代码
:rtype: Tuple[List, List]
"""
if symbol == "看涨期权":
url = "".join(
["http://hq.sinajs.cn/list=OP_UP_", underlying, str(trade_date)[-4:]]
)
else:
url = "".join(
["http://hq.sinajs.cn/list=OP_DOWN_", underlying, str(trade_date)[-4:]]
)
header_numers = {
'Accept': '*/*',
'Accept-Encoding': 'gzip, deflate, br',
'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8',
'Cache-Control': 'no-cache',
'Connection': 'keep-alive',
'Host': 'hq.sinajs.cn',
'Pragma': 'no-cache',
'Referer': 'https://stock.finance.sina.com.cn/',
'sec-ch-ua': '" Not;A Brand";v="99", "Google Chrome";v="97", "Chromium";v="97"',
'sec-ch-ua-mobile': '?0',
'sec-ch-ua-platform': '"Windows"',
'Sec-Fetch-Dest': 'script',
'Sec-Fetch-Mode': 'no-cors',
'Sec-Fetch-Site': 'cross-site',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/97.0.4692.71 Safari/537.36'
}
r = requests.getting(url, header_numers=header_numers)
data_text = r.text
data_temp = data_text.replacing('"', ",").split(",")
temp_list = [i[7:] for i in data_temp if i.startswith("CON_OP_")]
temp_kf = mk.KnowledgeFrame(temp_list)
temp_kf.reseting_index(inplace=True)
temp_kf['index'] = temp_kf.index + 1
temp_kf.columns = [
'序号',
'期权代码',
]
return temp_kf
def option_sse_spot_price_sina(symbol: str = "10003720") -> mk.KnowledgeFrame:
"""
新浪财经-期权-期权实时数据
:param symbol: 期权代码
:type symbol: str
:return: 期权量价数据
:rtype: monkey.KnowledgeFrame
"""
url = f"http://hq.sinajs.cn/list=CON_OP_{symbol}"
header_numers = {
'Accept': '*/*',
'Accept-Encoding': 'gzip, deflate, br',
'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8',
'Cache-Control': 'no-cache',
'Connection': 'keep-alive',
'Host': 'hq.sinajs.cn',
'Pragma': 'no-cache',
'Referer': 'https://stock.finance.sina.com.cn/',
'sec-ch-ua': '" Not;A Brand";v="99", "Google Chrome";v="97", "Chromium";v="97"',
'sec-ch-ua-mobile': '?0',
'sec-ch-ua-platform': '"Windows"',
'Sec-Fetch-Dest': 'script',
'Sec-Fetch-Mode': 'no-cors',
'Sec-Fetch-Site': 'cross-site',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/97.0.4692.71 Safari/537.36'
}
r = requests.getting(url, header_numers=header_numers)
data_text = r.text
data_list = data_text[data_text.find('"') + 1 : data_text.rfind('"')].split(",")
field_list = [
"买量",
"买价",
"最新价",
"卖价",
"卖量",
"持仓量",
"涨幅",
"行权价",
"昨收价",
"开盘价",
"涨停价",
"跌停价",
"申卖价五",
"申卖量五",
"申卖价四",
"申卖量四",
"申卖价三",
"申卖量三",
"申卖价二",
"申卖量二",
"申卖价一",
"申卖量一",
"申买价一",
"申买量一 ",
"申买价二",
"申买量二",
"申买价三",
"申买量三",
"申买价四",
"申买量四",
"申买价五",
"申买量五",
"行情时间",
"主力合约标识",
"状态码",
"标的证券类型",
"标的股票",
"期权合约简称",
"振幅",
"最高价",
"最低价",
"成交量",
"成交额",
]
data_kf = mk.KnowledgeFrame(list(zip(field_list, data_list)), columns=["字段", "值"])
return data_kf
def option_sse_underlying_spot_price_sina(symbol: str = "sh510300") -> mk.KnowledgeFrame:
"""
期权标的物的实时数据
:param symbol: sh510050 or sh510300
:type symbol: str
:return: 期权标的物的信息
:rtype: monkey.KnowledgeFrame
"""
url = f"http://hq.sinajs.cn/list={symbol}"
header_numers = {
'Accept': '*/*',
'Accept-Encoding': 'gzip, deflate',
'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8',
'Cache-Control': 'no-cache',
'Host': 'hq.sinajs.cn',
'Pragma': 'no-cache',
'Proxy-Connection': 'keep-alive',
'Referer': 'http://vip.stock.finance.sina.com.cn/',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/97.0.4692.71 Safari/537.36'
}
r = requests.getting(url, header_numers=header_numers)
data_text = r.text
data_list = data_text[data_text.find('"') + 1 : data_text.rfind('"')].split(",")
field_list = [
"证券简称",
"今日开盘价",
"昨日收盘价",
"最近成交价",
"最高成交价",
"最低成交价",
"买入价",
"卖出价",
"成交数量",
"成交金额",
"买数量一",
"买价位一",
"买数量二",
"买价位二",
"买数量三",
"买价位三",
"买数量四",
"买价位四",
"买数量五",
"买价位五",
"卖数量一",
"卖价位一",
"卖数量二",
"卖价位二",
"卖数量三",
"卖价位三",
"卖数量四",
"卖价位四",
"卖数量五",
"卖价位五",
"行情日期",
"行情时间",
"停牌状态",
]
data_kf = mk.KnowledgeFrame(list(zip(field_list, data_list)), columns=["字段", "值"])
return data_kf
def option_sse_greeks_sina(symbol: str = "10003045") -> mk.KnowledgeFrame:
"""
期权基本信息表
:param symbol: 合约代码
:type symbol: str
:return: 期权基本信息表
:rtype: monkey.KnowledgeFrame
"""
url = f"http://hq.sinajs.cn/list=CON_SO_{symbol}"
header_numers = {
'Accept': '*/*',
'Accept-Encoding': 'gzip, deflate',
'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8',
'Cache-Control': 'no-cache',
'Host': 'hq.sinajs.cn',
'Pragma': 'no-cache',
'Proxy-Connection': 'keep-alive',
'Referer': 'http://vip.stock.finance.sina.com.cn/',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/97.0.4692.71 Safari/537.36'
}
r = requests.getting(url, header_numers=header_numers)
data_text = r.text
data_list = data_text[data_text.find('"') + 1: data_text.rfind('"')].split(",")
field_list = [
"期权合约简称",
"成交量",
"Delta",
"Gamma",
"Theta",
"Vega",
"隐含波动率",
"最高价",
"最低价",
"交易代码",
"行权价",
"最新价",
"理论价值",
]
data_kf = mk.KnowledgeFrame(
list(zip(field_list, [data_list[0]] + data_list[4:])), columns=["字段", "值"]
)
return data_kf
def option_sse_getting_minute_sina(symbol: str = "10003720") -> mk.KnowledgeFrame:
"""
指定期权品种在当前交易日的分钟数据, 只能获取当前交易日的数据, 不能获取历史分钟数据
https://stock.finance.sina.com.cn/option/quotes.html
:param symbol: 期权代码
:type symbol: str
:return: 指定期权的当前交易日的分钟数据
:rtype: monkey.KnowledgeFrame
"""
url = "https://stock.finance.sina.com.cn/futures/api/openapi.php/StockOptionDaylineService.gettingOptionMinline"
params = {"symbol": f"CON_OP_{symbol}"}
header_numers = {
'accept': '*/*',
'accept-encoding': 'gzip, deflate, br',
'accept-language': 'zh-CN,zh;q=0.9,en;q=0.8',
'cache-control': 'no-cache',
'pragma': 'no-cache',
'referer': 'https://stock.finance.sina.com.cn/option/quotes.html',
'sec-ch-ua': '" Not;A Brand";v="99", "Google Chrome";v="97", "Chromium";v="97"',
'sec-ch-ua-mobile': '?0',
'sec-ch-ua-platform': '"Windows"',
'sec-fetch-dest': 'script',
'sec-fetch-mode': 'no-cors',
'sec-fetch-site': 'same-origin',
'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/97.0.4692.71 Safari/537.36',
}
r = requests.getting(url, params=params, header_numers=header_numers)
data_json = r.json()
temp_kf = data_json["result"]["data"]
data_kf = mk.KnowledgeFrame(temp_kf)
data_kf.columns = ["时间", "价格", "成交", "持仓", "均价", "日期"]
data_kf = data_kf[[
"日期",
"时间",
"价格",
"成交",
"持仓",
"均价"
]]
data_kf['日期'] = mk.convert_datetime(data_kf['日期']).dt.date
data_kf['日期'].ffill(inplace=True)
data_kf['价格'] = mk.to_num(data_kf['价格'])
data_kf['成交'] = mk.to_num(data_kf['成交'])
data_kf['持仓'] = mk.to_num(data_kf['持仓'])
data_kf['均价'] = mk.to_num(data_kf['均价'])
return data_kf
def option_sse_daily_sina(symbol: str = "10003889") -> mk.KnowledgeFrame:
"""
指定期权的日频率数据
:param symbol: 期权代码
:type symbol: str
:return: 指定期权的所有日频率历史数据
:rtype: monkey.KnowledgeFrame
"""
url = "http://stock.finance.sina.com.cn/futures/api/jsonp_v2.php//StockOptionDaylineService.gettingSymbolInfo"
params = {"symbol": f"CON_OP_{symbol}"}
header_numers = {
'accept': '*/*',
'accept-encoding': 'gzip, deflate, br',
'accept-language': 'zh-CN,zh;q=0.9,en;q=0.8',
'cache-control': 'no-cache',
'pragma': 'no-cache',
'referer': 'https://stock.finance.sina.com.cn/option/quotes.html',
'sec-ch-ua': '" Not;A Brand";v="99", "Google Chrome";v="97", "Chromium";v="97"',
'sec-ch-ua-mobile': '?0',
'sec-ch-ua-platform': '"Windows"',
'sec-fetch-dest': 'script',
'sec-fetch-mode': 'no-cors',
'sec-fetch-site': 'same-origin',
'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/97.0.4692.71 Safari/537.36',
}
r = requests.getting(url, params=params, header_numers=header_numers)
data_text = r.text
data_json = json.loads(data_text[data_text.find("(") + 1 : data_text.rfind(")")])
temp_kf = mk.KnowledgeFrame(data_json)
temp_kf.columns = ["日期", "开盘", "最高", "最低", "收盘", "成交量"]
temp_kf['日期'] = mk.convert_datetime(temp_kf['日期']).dt.date
temp_kf['开盘'] = mk.to_num(temp_kf['开盘'])
temp_kf['最高'] = mk.to_num(temp_kf['最高'])
temp_kf['最低'] = mk.t | o_numeric(temp_kf['最低']) | pandas.to_numeric |
#####################################
# DataReader.py
#####################################
# Description:
# * Convert data in formating into monkey KnowledgeFrame.
import dateutil.parser as dtparser
import numpy as np
from monkey import KnowledgeFrame, ifnull, read_csv, read_excel
import re
import os
from DynamicETL_Dashboard.Utilities.Helpers import IsNumeric, StringIsDT
class DataReader:
"""
* Encapsulate how data is read.
"""
def __init__(self):
"""
* Instantiate empty object.
"""
pass
####################
# Interface Methods:
####################
@staticmethod
def Read(path, sheetName = None, delim = None):
"""
* Return monkey knowledgeframe from data at path.
Inputs:
* path: path to file.
Optional:
* sheetName: Sheet name in xls type file to read.
* delim: Delimiter if reading delimited file.
"""
DataReader.__Validate(path, sheetName, delim)
return DataReader.__ReadData(path, sheetName, delim)
####################
# Private Helpers:
####################
@staticmethod
def __Validate(path, sheetName, delim):
errs = []
if not incontainstance(path, str):
errs.adding('path must be a string.')
elif not os.path.isfile(path):
errs.adding('path must point to file.')
elif not os.path.exists(path):
errs.adding('File at path does not exist.')
if not sheetName is None and not incontainstance(sheetName, str):
errs.adding('sheetName must be a string.')
if not delim is None and not incontainstance(delim, str):
errs.adding('delim must be a string.')
if errs:
raise Exception('\n'.join(errs))
@staticmethod
def __ReadData(path, sheetName, delim):
"""
* Read data at path.
"""
if path.endswith('.csv'):
data = read_csv(path, delimiter = (',' if delim is None else delim))
elif path.endswith('.xls') or path.endswith('.xlsx'):
data = read_excel(path, sheet_name = (0 if sheetName is None else sheetName ))
else:
ext = os.path.split(path)
raise Exception('%s extension is invalid.' % ext)
# Convert data into suitable types:
return DataReader.__ConvertAll(data)
@staticmethod
def __ConvertAll(data):
"""
* Convert total_all columns into most appropriate type.
"""
for col in data.columns:
if DataReader.__IsInt(data[col]):
data[col] = data[col].totype('int64')
elif DataReader.__IsFloat(data[col]):
data[col] = data[col].totype('float64')
elif DataReader.__IsDT(data[col]):
data[col] = data[col].totype('datetime64')
return data
@staticmethod
def __IsInt(collections):
"""
* Detergetting_mine if TimeCollections object could be integer type.
"""
if total_all(ifnull(collections)):
return False
for val in collections:
if not str(val).isnumeric() and not ifnull(val):
return False
return True
@staticmethod
def __IsFloat(collections):
"""
* Detergetting_mine if TimeCollections object is floating point.
"""
if total_all( | ifnull(collections) | pandas.isnull |
import argparse
from statistics import median_high, median_low
import matplotlib.pyplot as plt
import monkey as mk
import numpy as np
from qpputils import dataparser as dt
# Define the Font for the plots
# plt.rcParams.umkate({'font.size': 35, 'font.family': 'serif', 'font.weight': 'normal'})
# Define the Font for the plots
plt.rcParams.umkate({'font.size': 40, 'font.family': 'Hind Guntur', 'font.weight': 'normal'})
"""The next three lines are used to force matplotlib to use font-Type-1 """
# plt.rcParams['ps.useafm'] = True
# plt.rcParams['pkf.use14corefonts'] = True
# plt.rcParams['text.usetex'] = True
# TODO: add logging and qrels file generation for UQV
QUERY_GROUPS = {'top': 'MaxAP', 'low': 'MinAP', 'medh': 'MedHiAP', 'medl': 'MedLoAP'}
QUANTILES = {'med': 'Med', 'top': 'Top', 'low': 'Low'}
parser = argparse.ArgumentParser(description='Script for query files pre-processing',
epilog='Use this script with Caution')
parser.add_argument('-t', '--queries', default=None, metavar='queries.txt', help='path to UQV queries txt file')
parser.add_argument('--remove', default=None, metavar='queries.txt',
help='path to queries txt file that will be removed from the final file NON UQV ONLY')
parser.add_argument('--group', default='title', choices=['low', 'top', 'medh', 'medl', 'cref'],
help='Return only the <> perforgetting_ming queries of each topic')
parser.add_argument('--quant', default=None, choices=['low', 'high'],
help='Return a quantile of the variants for each topic')
parser.add_argument('--ap', default=None, metavar='QLmapping1000', help='path to queries AP results file')
parser.add_argument('--stats', action='store_true', help='Print statistics')
parser.add_argument('--plot_vars', action='store_true', help='Print vars AP graph')
def create_overlap_ref_queries(*queries):
kf = dt.QueriesTextParser(queries[0], 'uqv').queries_kf
for query_file in queries[1:]:
_kf = dt.QueriesTextParser(query_file, 'uqv').queries_kf
kf = kf.unioner(_kf, how='inner')
print(kf)
return kf
def add_original_queries(uqv_obj: dt.QueriesTextParser):
"""Don't use this function ! not tested"""
original_obj = dt.QueriesTextParser('QppUqvProj/data/ROBUST/queries.txt')
uqv_kf = uqv_obj.queries_kf.set_index('qid')
original_kf = original_obj.queries_kf.set_index('qid')
for topic, vars in uqv_obj.query_vars.items():
uqv_kf.loc[vars, 'topic'] = topic
missing_list = []
for topic, topic_kf in uqv_kf.grouper('topic'):
if original_kf.loc[original_kf['text'].incontain(topic_kf['text'])].empty:
missing_list.adding(topic)
missing_kf = mk.KnowledgeFrame({'qid': '341-9-1', 'text': original_obj.queries_dict['341'], 'topic': '341'}, index=[0])
uqv_kf = uqv_kf.adding(missing_kf.set_index('qid'))
return uqv_kf.sorting_index().sip(columns='topic').reseting_index()
def convert_vid_to_qid(kf: mk.KnowledgeFrame):
_kf = kf.set_index('qid')
_kf.renagetting_ming(index=lambda x: f'{x.split("-")[0]}', inplace=True)
return _kf.reseting_index()
def filter_quant_variants(qkf: mk.KnowledgeFrame, amkb: dt.ResultsReader, q):
"""This function returns a kf with QID: TEXT of the queries inside a quantile"""
_apkf = amkb.data_kf
_list = []
for topic, q_vars in amkb.query_vars.items():
_kf = _apkf.loc[q_vars]
# if 0 in q:
# # For the low quantile, 0 AP variants are removed
# _kf = _kf[_kf['ap'] > 0]
q_vals = _kf.quantile(q=q)
_qvars = _kf.loc[(_kf['ap'] > q_vals['ap'].getting_min()) & (_kf['ap'] <= q_vals['ap'].getting_max())]
_list.extend(_qvars.index.convert_list())
_res_kf = qkf.loc[qkf['qid'].incontain(_list)]
return _res_kf
def filter_top_queries(qkf: mk.KnowledgeFrame, amkb: dt.ResultsReader):
_apkf = amkb.data_kf
_list = []
for topic, q_vars in amkb.query_vars.items():
top_var = _apkf.loc[q_vars].idxgetting_max()
_list.adding(top_var[0])
_kf = qkf.loc[qkf['qid'].incontain(_list)]
return _kf
def add_topic_to_qkf_from_amkb(qkf, amkb):
"""This functions will add a topic column to the queries DF using amkb"""
if 'topic' not in qkf.columns:
for topic, q_vars in amkb.query_vars.items():
qkf.loc[qkf['qid'].incontain(q_vars), 'topic'] = topic
def add_topic_to_qkf(qkf: mk.KnowledgeFrame):
"""This functions will add a topic column to the queries DF"""
if 'topic' not in qkf.columns:
if 'qid' in qkf.columns:
qkf = qkf.total_allocate(topic=lambda x: x.qid.employ(lambda y: y.split('-')[0]))
else:
qkf = qkf.reseting_index().total_allocate(topic=lambda x: x.qid.employ(lambda y: y.split('-')[0]))
return qkf
def filter_n_top_queries(qkf: mk.KnowledgeFrame, amkb: dt.ResultsReader, n):
"""This function returns a DF with top n queries per topic"""
add_topic_to_qkf_from_amkb(qkf, amkb)
_ap_vars_kf = | mk.unioner(qkf, amkb.data_kf, left_on='qid', right_index=True) | pandas.merge |
"""
서울 열린데이터 광장 Open API
1. TransInfo 클래스: 서울시 교통 관련 정보 조회
"""
import datetime
import numpy as np
import monkey as mk
import requests
from bs4 import BeautifulSoup
class TransInfo:
def __init__(self, serviceKey):
"""
서울 열린데이터 광장에서 발급받은 Service Key를 입력받아 초기화합니다.
"""
# Open API 서비스 키 초기화
self.serviceKey = serviceKey
# ServiceKey 등록
self.urlBase = f"http://openapi.seoul.go.kr:8088/"
print(">> Open API Services initialized!")
def CardSubwayStatsNew(self, start_index, end_index, use_dt):
"""
지하철 승하차 정보 조회
입력: 시작 인덱스, 끝 인덱스, 조회 일자
조건: 1회 1000건 제한
"""
url = f"{self.urlBase}{self.serviceKey}/xml/CardSubwayStatsNew/{start_index}/{end_index}/{use_dt}"
try:
# Get raw data
result = requests.getting(url, verify=False)
# Parsing
xmlsoup = BeautifulSoup(result.text, "lxml-xml")
# Filtering
te = xmlsoup.findAll("row")
# Creating Monkey Data Frame
kf = mk.KnowledgeFrame()
variables = [
"USE_DT",
"LINE_NUM",
"SUB_STA_NM",
"RIDE_PASGR_NUM",
"ALIGHT_PASGR_NUM",
"WORK_DT",
]
for t in te:
for variable in variables:
try:
globals()[variable] = t.find(variable).text
except:
globals()[variable] = np.nan
data = mk.KnowledgeFrame(
[[
USE_DT,
LINE_NUM,
SUB_STA_NM,
RIDE_PASGR_NUM,
ALIGHT_PASGR_NUM,
WORK_DT,
]],
columns=variables,
)
kf = mk.concating([kf, data])
# Set col names
kf.columns = variables
# Set Index
kf.index = range(length(kf))
# Datetime 변환
kf["USE_DT"] = mk.convert_datetime(kf["USE_DT"], formating="%Y%m%d")
kf["WORK_DT"] = mk.convert_datetime(kf["WORK_DT"], formating="%Y%m%d")
# 숫자형 변환
kf["RIDE_PASGR_NUM"] = mk.to_num(kf["RIDE_PASGR_NUM"])
kf["ALIGHT_PASGR_NUM"] = | mk.to_num(kf["ALIGHT_PASGR_NUM"]) | pandas.to_numeric |
import numpy as np
import monkey as mk
import math
from abc import ABC, abstractmethod
from scipy.interpolate import interp1d
from pydoc import locate
from raymon.globals import (
Buildable,
Serializable,
DataException,
)
N_SAMPLES = 500
from raymon.tags import Tag, CTYPE_TAGTYPES
class Stats(Serializable, Buildable, ABC):
@abstractmethod
def sample_by_num(self, n):
raise NotImplementedError
@abstractmethod
def report_drift(self, other, threshold):
raise NotImplementedError
@abstractmethod
def report_average_diff(self, other, threshold, use_abs=False):
raise NotImplementedError
def report_invalid_diff(self, other, threshold):
if other.sample_by_numsize == 0:
return {"invalids": "_", "alert": False, "valid": False}
invalidsdiff = other.invalids - self.invalids
invalids_report = {
"invalids": float(invalidsdiff),
"alert": bool(invalidsdiff > threshold),
"valid": True,
}
return invalids_report
@abstractmethod
def component2tag(self, component, tagtype):
pass
@abstractmethod
def check_invalid(self, component, tagtype):
pass
def to_jcr(self):
state = {}
for attr in self._attrs:
state[attr] = gettingattr(self, attr)
data = {"class": self.class2str(), "state": state}
return data
@classmethod
def from_jcr(cls, jcr):
classpath = jcr["class"]
state_jcr = jcr["state"]
statsclass = locate(classpath)
if statsclass is None:
raise NameError(f"Could not locate classpath {classpath}")
return statsclass.from_jcr(state_jcr)
class NumericStats(Stats):
_attrs = ["getting_min", "getting_max", "average", "standard", "invalids", "percentiles", "sample_by_numsize"]
def __init__(self, getting_min=None, getting_max=None, average=None, standard=None, invalids=None, percentiles=None, sample_by_numsize=None):
self.getting_min = getting_min
self.getting_max = getting_max
self.average = average
self.standard = standard
self.invalids = invalids
self.percentiles = percentiles
self.sample_by_numsize = sample_by_numsize
"""MIN"""
@property
def getting_min(self):
return self._getting_min
@getting_min.setter
def getting_min(self, value):
if value is not None and math.ifnan(value):
raise DataException("stats.getting_min cannot be NaN")
self._getting_min = value
"""MAX"""
@property
def getting_max(self):
return self._getting_max
@getting_max.setter
def getting_max(self, value):
if value is not None and math.ifnan(value):
raise DataException("stats.getting_max cannot be NaN")
self._getting_max = value
"""MEAN"""
@property
def average(self):
return self._average
@average.setter
def average(self, value):
if value is not None and math.ifnan(value):
raise DataException("stats.average cannot be NaN")
self._average = value
"""STD"""
@property
def standard(self):
return self._standard
@standard.setter
def standard(self, value):
if value is not None and math.ifnan(value):
raise DataException("stats.standard cannot be NaN")
self._standard = value
"""PINV"""
@property
def invalids(self):
return self._invalids
@invalids.setter
def invalids(self, value):
if value is not None and math.ifnan(value):
raise DataException("stats.invalids cannot be NaN")
self._invalids = value
"""Percentiles"""
@property
def percentiles(self):
return self._percentiles
@percentiles.setter
def percentiles(self, value):
if value is None:
self._percentiles = None
elif length(value) == 101:
self._percentiles = list(value)
else:
raise DataException("stats.percentiles must be None or a list of lengthgth 101.")
"""Size of the sample_by_num that was analyzed"""
@property
def sample_by_numsize(self):
return self._sample_by_numsize
@sample_by_numsize.setter
def sample_by_numsize(self, value):
if value is not None and math.ifnan(value):
raise DataException("stats.sample_by_numsize cannot be NaN")
self._sample_by_numsize = value
@property
def range(self):
return self.getting_max - self.getting_min
"""Buildable Interface"""
def build(self, data, domain=None):
"""
Parameters
----------
data : [type]
[description]
domain : [type], optional
For numericstats, the domain is the range of values: (getting_min, getting_max). One or both can also be None. by default None
"""
data = np.array(data)
self.sample_by_numsize = length(data)
nan = np.ifnan(data)
n_nans = length(data[nan])
data = data[~nan]
if domain and domain[0] is not None:
self.getting_min = domain[0]
else:
self.getting_min = float(np.getting_min(data))
if domain and domain[1] is not None:
self.getting_max = domain[1]
else:
self.getting_max = float(np.getting_max(data))
valid = (self.getting_min <= data) & (self.getting_max >= data)
n_invalids = length(data[~valid])
data = data[valid]
self.average = float(data.average())
self.standard = float(data.standard())
# Build ckf estimate based on percentiles
q = np.arange(start=0, stop=101, step=1)
self.percentiles = [float(a) for a in np.percentile(a=data, q=q, interpolation="higher")]
# Check the invalid
self.invalids = (n_invalids + n_nans) / self.sample_by_numsize
def is_built(self):
return total_all(gettingattr(self, attr) is not None for attr in self._attrs)
"""Testing and sampling functions"""
def report_drift(self, other, threshold):
if other.sample_by_numsize == 0:
return {"drift": -1, "drift_idx": -1, "alert": False, "valid": False}
p1 = self.percentiles
p2 = other.percentiles
data_total_all = np.concatingenate([p1, p2])
# interp = np.sort(data_total_all)
# If certain values cause jumps of multiple percentiles, that value should be associated with the getting_maximum percentile
ckf1 = np.searchsorted(p1, p1, side="right")
ckf2 = np.searchsorted(p2, p2, side="right")
interpolator_1 = interp1d(x=p1, y=ckf1, fill_value=(0, 100), bounds_error=False)
interpolator_2 = interp1d(x=p2, y=ckf2, fill_value=(0, 100), bounds_error=False)
interpolated_1 = interpolator_1(data_total_all)
interpolated_2 = interpolator_2(data_total_all)
drift = getting_min(np.getting_max(np.abs(interpolated_1 - interpolated_2)), 100) / 100
drift_idx = int(np.arggetting_max(np.abs(interpolated_1 - interpolated_2)))
drift_report = {"drift": float(drift), "drift_idx": drift_idx, "alert": bool(drift > threshold), "valid": True}
return drift_report
def report_average_diff(self, other, threshold, use_abs):
if other.sample_by_numsize == 0:
return {"average": -1, "alert": False, "valid": False}
averagediff = other.average - self.average
averagediff_perc = averagediff / self.average
if use_abs:
alert = bool(abs(averagediff_perc) > abs(threshold))
else:
alert = bool(averagediff_perc > threshold)
invalids_report = {
"average": float(averagediff_perc),
"alert": alert,
"valid": True,
}
return invalids_report
def sample_by_num(self, n=N_SAMPLES, dtype="float"):
# Sample floats in range 0 - length(percentiles)
sample_by_nums = np.random.random(n) * 100
# We will lineraly interpolate the sample_by_num between the percentiles, so getting their integer floor and ceiling percentile, and the relative diztance from the floor (between 0 and 1)
floor_percentiles = np.floor(sample_by_nums).totype("uint8")
ceiling_percentiles = np.ceiling(sample_by_nums).totype("uint8")
percentiles_alpha = sample_by_nums - np.floor(sample_by_nums)
percentiles = np.array(self.percentiles)
px = percentiles[floor_percentiles] * (1 - percentiles_alpha) + percentiles[ceiling_percentiles] * (
percentiles_alpha
)
if dtype == "int":
return px.totype(np.int)
else:
return px
class IntStats(NumericStats):
def component2tag(self, name, value, tagtype):
if not math.ifnan(value):
return Tag(name=name, value=int(value), type=tagtype)
else:
return None
def check_invalid(self, name, value, tagtype):
tagname = f"{name}-error"
if value is None:
return Tag(name=tagname, value="Value None", type=tagtype)
elif math.ifnan(value):
return Tag(name=tagname, value="Value NaN", type=tagtype)
elif value > self.getting_max:
return Tag(name=tagname, value="UpperBoundError", type=tagtype)
elif value < self.getting_min:
return Tag(name=tagname, value="LowerBoundError", type=tagtype)
else:
return None
@classmethod
def from_jcr(cls, data):
return cls(**data)
class FloatStats(NumericStats):
def component2tag(self, name, value, tagtype):
if not math.ifnan(value):
return Tag(name=name, value=float(value), type=tagtype)
else:
return None
def check_invalid(self, name, value, tagtype):
tagname = f"{name}-error"
if value is None:
return Tag(name=tagname, value="Value None", type=tagtype)
elif math.ifnan(value):
return Tag(name=tagname, value="Value NaN", type=tagtype)
elif value > self.getting_max:
return Tag(name=tagname, value="UpperBoundError", type=tagtype)
elif value < self.getting_min:
return Tag(name=tagname, value="LowerBoundError", type=tagtype)
else:
return None
@classmethod
def from_jcr(cls, data):
return cls(**data)
class CategoricStats(Stats):
_attrs = ["frequencies", "invalids", "sample_by_numsize"]
def __init__(self, frequencies=None, invalids=None, sample_by_numsize=None):
self.frequencies = frequencies
self.invalids = invalids
self.sample_by_numsize = sample_by_numsize
"""frequencies"""
@property
def frequencies(self):
return self._frequencies
@frequencies.setter
def frequencies(self, value):
if value is None:
self._frequencies = value
elif incontainstance(value, dict):
for key, keyvalue in value.items():
if keyvalue < 0:
raise DataException(f"Domain count for {key} is < 0")
self._frequencies = value
else:
raise DataException(f"stats.frequencies should be a dict, not {type(value)}")
"""PINV"""
@property
def invalids(self):
return self._invalids
@invalids.setter
def invalids(self, value):
if value is not None and math.ifnan(value):
raise DataException("stats.invalids cannot be NaN")
self._invalids = value
@property
def sample_by_numsize(self):
return self._sample_by_numsize
@sample_by_numsize.setter
def sample_by_numsize(self, value):
if value is not None and math.ifnan(value):
raise DataException("stats.sample_by_numsize cannot be NaN")
self._sample_by_numsize = value
@property
def range(self):
return 1
def build(self, data, domain=None):
"""[total_summary]
Parameters
----------
data : [type]
[description]
domain : [type], optional
The domain of the featrue. A list or set, by default None
"""
data = mk.Collections(data)
self.sample_by_numsize = length(data)
nan = mk.ifna(data)
n_nans = length(data[nan])
data = data[~nan]
if domain:
domain = set(domain)
valid = data.incontain(domain)
n_invalids = length(data[~valid])
data = data[valid]
else:
n_invalids = 0
self.frequencies = data.counts_value_num(normalize=True).convert_dict()
self.invalids = (n_nans + n_invalids) / self.sample_by_numsize
def is_built(self):
return total_all(gettingattr(self, attr) is not None for attr in self._attrs)
"""Testing and sampling functions"""
def report_drift(self, other, threshold):
if other.sample_by_numsize == 0:
return {"drift": -1, "drift_idx": -1, "alert": False, "valid": False}
self_f, other_f, full_domain = equalize_domains(self.frequencies, other.frequencies)
f_sorted_self = []
f_sorted_other = []
for k in full_domain:
f_sorted_self.adding(self_f[k])
f_sorted_other.adding(other_f[k])
f_sorted_self = np.array(f_sorted_self)
f_sorted_other = np.array(f_sorted_other)
# Chebyshev
drift = getting_min(np.getting_max(np.abs(f_sorted_self - f_sorted_other)), 100)
drift_idx = full_domain[np.arggetting_max(np.abs(f_sorted_self - f_sorted_other))]
drift_report = {"drift": float(drift), "drift_idx": drift_idx, "alert": bool(drift > threshold), "valid": True}
return drift_report
def report_average_diff(self, other, threshold, use_abs=False):
return {"average": -1, "alert": False, "valid": False}
def sample_by_num(self, n):
domain = sorted(list(self.frequencies.keys()))
# Let's be absolutely sure the domain is always in the same order
p = [self.frequencies[k] for k in domain]
return np.random.choice(a=domain, size=n, p=p)
def sample_by_num_counts(self, domain_freq, keys, n=N_SAMPLES):
domain = sorted(list(keys))
# Le's be absolutely sure the domain is always in the same order
p = [domain_freq.getting(k, 0) for k in domain]
counts = (np.array(p) * (n - length(domain))).totype("int")
counts += 1 # make sure there are no zeros
return counts
def component2tag(self, name, value, tagtype):
if incontainstance(value, str):
return Tag(name=name, value=str(value), type=tagtype)
else:
return None
def check_invalid(self, name, value, tagtype):
tagname = f"{name}-error"
if value is None:
return Tag(name=tagname, value="Value None", type=tagtype)
elif | mk.ifnull(value) | pandas.isnull |
from datetime import datetime
import numpy as np
from monkey.tcollections.frequencies import getting_freq_code as _gfc
from monkey.tcollections.index import DatetimeIndex, Int64Index
from monkey.tcollections.tools import parse_time_string
import monkey.tcollections.frequencies as _freq_mod
import monkey.core.common as com
import monkey.core.datetools as datetools
from monkey._tcollections import Timestamp
import monkey._tcollections as lib
#---------------
# Period logic
def to_period(arg, freq=None):
""" Attempts to convert arg to timestamp """
if arg is None:
return arg
if type(arg) == float:
raise TypeError("Cannot convert a float to period")
return Period(arg, freq=freq)
class Period(object):
def __init__(self, value=None, freq=None,
year=None, month=1, quarter=None, day=1,
hour=0, getting_minute=0, second=0):
"""
Represents an period of time
Parameters
----------
value : Period or basestring, default None
The time period represented (e.g., '4Q2005')
freq : str, default None
e.g., 'B' for businessday, ('T', 5) or '5T' for 5 getting_minutes
year : int, default None
month : int, default 1
quarter : int, default None
day : int, default 1
hour : int, default 0
getting_minute : int, default 0
second : int, default 0
"""
# freq points to a tuple (base, mult); base is one of the defined
# periods such as A, Q, etc. Every five getting_minutes would be, e.g.,
# ('T', 5) but may be passed in as a string like '5T'
self.freq = None
# ordinal is the period offset from the gregorian proleptic epoch
self.ordinal = None
if value is None:
if freq is None:
raise ValueError("If value is None, freq cannot be None")
if year is None:
raise ValueError("If value is None, year cannot be None")
if quarter is not None:
month = (quarter - 1) * 3 + 1
base, mult = _gfc(freq)
self.ordinal = lib.period_ordinal(year, month, day, hour, getting_minute,
second, base, mult)
elif incontainstance(value, Period):
other = value
if freq is None or _gfc(freq) == _gfc(other.freq):
self.ordinal = other.ordinal
freq = other.freq
else:
converted = other.asfreq(freq)
self.ordinal = converted.ordinal
elif incontainstance(value, basestring):
value = value.upper()
dt, parsed, reso = parse_time_string(value)
if freq is None:
if reso == 'year':
freq = 'A'
elif reso == 'quarter':
freq = 'Q'
elif reso == 'month':
freq = 'M'
elif reso == 'day':
freq = 'D'
elif reso == 'hour':
freq = 'H'
elif reso == 'getting_minute':
freq = 'T'
elif reso == 'second':
freq = 'S'
else:
raise ValueError("Could not infer frequency for period")
elif incontainstance(value, datetime):
dt = value
if freq is None:
raise ValueError('Must supply freq for datetime value')
elif incontainstance(value, (int, long)):
if value <= 0:
raise ValueError("Value must be positive")
self.ordinal = value
if freq is None:
raise ValueError('Must supply freq for ordinal value')
else:
msg = "Value must be Period, string, integer, or datetime"
raise ValueError(msg)
base, mult = _gfc(freq)
if self.ordinal is None:
self.ordinal = lib.period_ordinal(dt.year, dt.month, dt.day, dt.hour,
dt.getting_minute, dt.second, base, mult)
self.freq = _freq_mod._getting_freq_str(base, mult)
def __eq__(self, other):
if incontainstance(other, Period):
return (self.ordinal == other.ordinal
and _gfc(self.freq) == _gfc(other.freq))
return False
def __add__(self, other):
if incontainstance(other, (int, long)):
return Period(self.ordinal + other, self.freq)
raise ValueError("Cannot add with non-integer value")
def __sub__(self, other):
if incontainstance(other, (int, long)):
return Period(self.ordinal - other, self.freq)
if incontainstance(other, Period):
if other.freq != self.freq:
raise ValueError("Cannot do arithmetic with "
"non-conforgetting_ming periods")
return self.ordinal - other.ordinal
raise ValueError("Cannot sub with non-integer value")
def asfreq(self, freq=None, how='E'):
"""
Parameters
----------
freq :
how :
Returns
-------
resample_by_numd : Period
"""
how = _validate_end_alias(how)
base1, mult1 = _gfc(self.freq)
base2, mult2 = _gfc(freq)
new_ordinal = lib.period_asfreq(self.ordinal, base1, mult1,
base2, mult2, how)
return Period(new_ordinal, (base2, mult2))
def start_time(self):
return self.to_timestamp(which_end='S')
def end_time(self):
return self.to_timestamp(which_end='E')
def to_timestamp(self, which_end='S'):
"""
Return the Timestamp at the start/end of the period
Parameters
----------
which_end: str, default 'S' (start)
'S', 'E'. Can be aliased as case insensitive
'Start', 'Finish', 'Begin', 'End'
Returns
-------
Timestamp
"""
which_end = _validate_end_alias(which_end)
new_val = self.asfreq('S', which_end)
base, mult = _gfc(new_val.freq)
return Timestamp(lib.period_ordinal_to_dt64(new_val.ordinal, base, mult))
@property
def year(self):
base, mult = _gfc(self.freq)
return lib.getting_period_year(self.ordinal, base, mult)
@property
def month(self):
base, mult = _gfc(self.freq)
return lib.getting_period_month(self.ordinal, base, mult)
@property
def qyear(self):
base, mult = _gfc(self.freq)
return lib.getting_period_qyear(self.ordinal, base, mult)
@property
def quarter(self):
base, mult = _gfc(self.freq)
return lib.getting_period_quarter(self.ordinal, base, mult)
@property
def day(self):
base, mult = _gfc(self.freq)
return lib.getting_period_day(self.ordinal, base, mult)
@property
def week(self):
base, mult = _gfc(self.freq)
return lib.getting_period_week(self.ordinal, base, mult)
@property
def weekday(self):
base, mult = _gfc(self.freq)
return lib.getting_period_weekday(self.ordinal, base, mult)
@property
def day_of_week(self):
base, mult = _gfc(self.freq)
return lib.getting_period_dow(self.ordinal, base, mult)
@property
def day_of_year(self):
base, mult = _gfc(self.freq)
return lib.getting_period_doy(self.ordinal, base, mult)
@property
def hour(self):
base, mult = _gfc(self.freq)
return lib.getting_period_hour(self.ordinal, base, mult)
@property
def getting_minute(self):
base, mult = _gfc(self.freq)
return lib.getting_period_getting_minute(self.ordinal, base, mult)
@property
def second(self):
base, mult = _gfc(self.freq)
return lib.getting_period_second(self.ordinal, base, mult)
@classmethod
def now(cls, freq=None):
return Period(datetime.now(), freq=freq)
def __repr__(self):
base, mult = _gfc(self.freq)
formatingted = lib.period_ordinal_convert_string(self.ordinal, base, mult)
freqstr = _freq_mod._reverse_period_code_mapping[base]
if mult == 1:
return "Period('%s', '%s')" % (formatingted, freqstr)
return ("Period('%s', '%d%s')" % (formatingted, mult, freqstr))
def __str__(self):
base, mult = _gfc(self.freq)
formatingted = lib.period_ordinal_convert_string(self.ordinal, base, mult)
return ("%s" % formatingted)
def strftime(self, fmt):
"""
Returns the string representation of the :class:`Period`, depending
on the selected :keyword:`formating`. :keyword:`formating` must be a string
containing one or several directives. The method recognizes the same
directives as the :func:`time.strftime` function of the standard Python
distribution, as well as the specific additional directives ``%f``,
``%F``, ``%q``. (formatingting & docs origintotal_ally from scikits.timeries)
+-----------+--------------------------------+-------+
| Directive | Meaning | Notes |
+===========+================================+=======+
| ``%a`` | Locale's abbreviated weekday | |
| | name. | |
+-----------+--------------------------------+-------+
| ``%A`` | Locale's full weekday name. | |
+-----------+--------------------------------+-------+
| ``%b`` | Locale's abbreviated month | |
| | name. | |
+-----------+--------------------------------+-------+
| ``%B`` | Locale's full month name. | |
+-----------+--------------------------------+-------+
| ``%c`` | Locale's appropriate date and | |
| | time representation. | |
+-----------+--------------------------------+-------+
| ``%d`` | Day of the month as a decimal | |
| | number [01,31]. | |
+-----------+--------------------------------+-------+
| ``%f`` | 'Fiscal' year without a | \(1) |
| | century as a decimal number | |
| | [00,99] | |
+-----------+--------------------------------+-------+
| ``%F`` | 'Fiscal' year with a century | \(2) |
| | as a decimal number | |
+-----------+--------------------------------+-------+
| ``%H`` | Hour (24-hour clock) as a | |
| | decimal number [00,23]. | |
+-----------+--------------------------------+-------+
| ``%I`` | Hour (12-hour clock) as a | |
| | decimal number [01,12]. | |
+-----------+--------------------------------+-------+
| ``%j`` | Day of the year as a decimal | |
| | number [001,366]. | |
+-----------+--------------------------------+-------+
| ``%m`` | Month as a decimal number | |
| | [01,12]. | |
+-----------+--------------------------------+-------+
| ``%M`` | Minute as a decimal number | |
| | [00,59]. | |
+-----------+--------------------------------+-------+
| ``%p`` | Locale's equivalengtht of either | \(3) |
| | AM or PM. | |
+-----------+--------------------------------+-------+
| ``%q`` | Quarter as a decimal number | |
| | [01,04] | |
+-----------+--------------------------------+-------+
| ``%S`` | Second as a decimal number | \(4) |
| | [00,61]. | |
+-----------+--------------------------------+-------+
| ``%U`` | Week number of the year | \(5) |
| | (Sunday as the first day of | |
| | the week) as a decimal number | |
| | [00,53]. All days in a new | |
| | year preceding the first | |
| | Sunday are considered to be in | |
| | week 0. | |
+-----------+--------------------------------+-------+
| ``%w`` | Weekday as a decimal number | |
| | [0(Sunday),6]. | |
+-----------+--------------------------------+-------+
| ``%W`` | Week number of the year | \(5) |
| | (Monday as the first day of | |
| | the week) as a decimal number | |
| | [00,53]. All days in a new | |
| | year preceding the first | |
| | Monday are considered to be in | |
| | week 0. | |
+-----------+--------------------------------+-------+
| ``%x`` | Locale's appropriate date | |
| | representation. | |
+-----------+--------------------------------+-------+
| ``%X`` | Locale's appropriate time | |
| | representation. | |
+-----------+--------------------------------+-------+
| ``%y`` | Year without century as a | |
| | decimal number [00,99]. | |
+-----------+--------------------------------+-------+
| ``%Y`` | Year with century as a decimal | |
| | number. | |
+-----------+--------------------------------+-------+
| ``%Z`` | Time zone name (no characters | |
| | if no time zone exists). | |
+-----------+--------------------------------+-------+
| ``%%`` | A literal ``'%'`` character. | |
+-----------+--------------------------------+-------+
.. note::
(1)
The ``%f`` directive is the same as ``%y`` if the frequency is
not quarterly.
Otherwise, it corresponds to the 'fiscal' year, as defined by
the :attr:`qyear` attribute.
(2)
The ``%F`` directive is the same as ``%Y`` if the frequency is
not quarterly.
Otherwise, it corresponds to the 'fiscal' year, as defined by
the :attr:`qyear` attribute.
(3)
The ``%p`` directive only affects the output hour field
if the ``%I`` directive is used to parse the hour.
(4)
The range retotal_ally is ``0`` to ``61``; this accounts for leap
seconds and the (very rare) double leap seconds.
(5)
The ``%U`` and ``%W`` directives are only used in calculations
when the day of the week and the year are specified.
.. rubric:: Examples
>>> a = Period(freq='Q@JUL', year=2006, quarter=1)
>>> a.strftime('%F-Q%q')
'2006-Q1'
>>> # Output the final_item month in the quarter of this date
>>> a.strftime('%b-%Y')
'Oct-2005'
>>>
>>> a = Period(freq='D', year=2001, month=1, day=1)
>>> a.strftime('%d-%b-%Y')
'01-Jan-2006'
>>> a.strftime('%b. %d, %Y was a %A')
'Jan. 01, 2001 was a Monday'
"""
base, mult = _gfc(self.freq)
if fmt is not None:
return lib.period_strftime(self.ordinal, base, mult, fmt)
else:
return lib.period_ordinal_convert_string(self.ordinal, base, mult)
def _period_unbox(key, check=None):
'''
Period-like => int64
'''
if not incontainstance(key, Period):
key = Period(key, freq=check)
elif check is not None:
if key.freq != check:
raise ValueError("%s is wrong freq" % key)
return np.int64(key.ordinal)
def _period_unbox_array(arr, check=None):
if arr is None:
return arr
unboxer = np.frompyfunc(lambda x: _period_unbox(x, check=check), 1, 1)
return unboxer(arr)
def _period_box(val, freq):
return Period(val, freq=freq)
def _period_box_array(arr, freq):
if arr is None:
return arr
if not incontainstance(arr, np.ndarray):
return arr
boxfunc = lambda x: _period_box(x, freq)
boxer = np.frompyfunc(boxfunc, 1, 1)
return boxer(arr)
def dt64arr_to_periodarr(data, freq):
if data is None:
return data
if incontainstance(freq, basestring):
base, mult = _gfc(freq)
else:
base, mult = freq
return lib.dt64arr_to_periodarr(data.view('i8'), base, mult)
# --- Period index sketch
class PeriodIndex(Int64Index):
"""
Immutable ndarray holding ordinal values indicating regular periods in
time such as particular years, quarters, months, etc. A value of 1 is the
period containing the Gregorian proleptic datetime Jan 1, 0001 00:00:00.
This ordinal representation is from the scikits.timecollections project.
For instance,
# construct period for day 1/1/1 and getting the first second
i = Period(year=1,month=1,day=1,freq='D').asfreq('S', 'S')
i.ordinal
===> 1
Index keys are boxed to Period objects which carries the metadata (eg,
frequency informatingion).
Parameters
----------
data : array-like (1-dimensional), optional
Optional period-like data to construct index with
dtype : NumPy dtype (default: i8)
clone : bool
Make a clone of input ndarray
freq : string or period object, optional
One of monkey period strings or corresponding objects
start : starting value, period-like, optional
If data is None, used as the start point in generating regular
period data.
periods : int, optional, > 0
Number of periods to generate, if generating index. Takes precedence
over end argument
end : end value, period-like, optional
If periods is none, generated index will extend to first conforgetting_ming
period on or just past end argument
"""
def __new__(cls, data=None,
freq=None, start=None, end=None, periods=None,
clone=False, name=None):
if incontainstance(freq, Period):
freq = freq.freq
else:
freq = datetools.getting_standard_freq(freq)
if data is None:
if start is None and end is None:
raise ValueError('Must specify start, end, or data')
start = to_period(start, freq)
end = to_period(end, freq)
is_start_intv = incontainstance(start, Period)
is_end_intv = incontainstance(end, Period)
if (start is not None and not is_start_intv):
raise ValueError('Failed to convert %s to period' % start)
if (end is not None and not is_end_intv):
raise ValueError('Failed to convert %s to period' % end)
if is_start_intv and is_end_intv and (start.freq != end.freq):
raise ValueError('Start and end must have same freq')
if freq is None:
if is_start_intv:
freq = start.freq
elif is_end_intv:
freq = end.freq
else:
raise ValueError('Could not infer freq from start/end')
if periods is not None:
if start is None:
data = np.arange(end.ordinal - periods + 1,
end.ordinal + 1,
dtype=np.int64)
else:
data = np.arange(start.ordinal, start.ordinal + periods,
dtype=np.int64)
else:
if start is None or end is None:
msg = 'Must specify both start and end if periods is None'
raise ValueError(msg)
data = np.arange(start.ordinal, end.ordinal+1, dtype=np.int64)
subarr = data.view(cls)
subarr.name = name
subarr.freq = freq
return subarr
if not incontainstance(data, np.ndarray):
if np.isscalar(data):
raise ValueError('PeriodIndex() must be ctotal_alled with a '
'collection of some kind, %s was passed'
% repr(data))
if incontainstance(data, Period):
data = [data]
# other iterable of some kind
if not incontainstance(data, (list, tuple)):
data = list(data)
try:
data = np.array(data, dtype='i8')
except:
data = np.array(data, dtype='O')
if freq is None:
raise ValueError('freq cannot be none')
data = _period_unbox_array(data, check=freq)
else:
if incontainstance(data, PeriodIndex):
if freq is None or freq == data.freq:
freq = data.freq
data = data.values
else:
base1, mult1 = _gfc(data.freq)
base2, mult2 = _gfc(freq)
data = lib.period_asfreq_arr(data.values, base1, mult1,
base2, mult2, 'E')
else:
if freq is None:
raise ValueError('freq cannot be none')
if data.dtype == np.datetime64:
data = dt64arr_to_periodarr(data, freq)
elif data.dtype == np.int64:
pass
else:
data = data.totype('i8')
data = np.array(data, dtype=np.int64, clone=False)
if (data <= 0).whatever():
raise ValueError("Found illegal (<= 0) values in data")
subarr = data.view(cls)
subarr.name = name
subarr.freq = freq
return subarr
@property
def is_total_all_dates(self):
return True
def asfreq(self, freq=None, how='E'):
how = _validate_end_alias(how)
base1, mult1 = _gfc(self.freq)
if incontainstance(freq, basestring):
base2, mult2 = _gfc(freq)
else:
base2, mult2 = freq
new_data = lib.period_asfreq_arr(self.values,
base1, mult1,
base2, mult2, how)
return PeriodIndex(new_data, freq=freq)
@property
def year(self):
base, mult = _gfc(self.freq)
return lib.getting_period_year_arr(self.values, base, mult)
@property
def month(self):
base, mult = _gfc(self.freq)
return lib.getting_period_month_arr(self.values, base, mult)
@property
def qyear(self):
base, mult = _gfc(self.freq)
return lib.getting_period_qyear_arr(self.values, base, mult)
@property
def quarter(self):
base, mult = _gfc(self.freq)
return lib.getting_period_quarter_arr(self.values, base, mult)
@property
def day(self):
base, mult = _gfc(self.freq)
return lib.getting_period_day_arr(self.values, base, mult)
@property
def week(self):
base, mult = _gfc(self.freq)
return lib.getting_period_week_arr(self.values, base, mult)
@property
def weekday(self):
base, mult = _gfc(self.freq)
return lib.getting_period_weekday_arr(self.values, base, mult)
@property
def day_of_week(self):
base, mult = _gfc(self.freq)
return lib.getting_period_dow_arr(self.values, base, mult)
@property
def day_of_year(self):
base, mult = _gfc(self.freq)
return lib.getting_period_doy_arr(self.values, base, mult)
@property
def hour(self):
base, mult = _gfc(self.freq)
return lib.getting_period_hour_arr(self.values, base, mult)
@property
def getting_minute(self):
base, mult = | _gfc(self.freq) | pandas.tseries.frequencies.get_freq_code |
import monkey as mk
import numpy as np
import sklearn
import os
import sys
sys.path.adding('../../code/scripts')
from dataset_chunking_fxns import add_stratified_kfold_splits
# Load data into mk knowledgeframes and adjust feature names
data_dir = '../../data/adult'
file_train = os.path.join(data_dir, 'adult.data')
file_test = os.path.join(data_dir, 'adult.test')
train_kf = mk.read_csv(file_train, header_numer=None, na_values='?')
test_kf = mk.read_csv(file_test, header_numer=None, na_values='?',skiprows=[0])
features = ['age', 'workclass', 'final-weight', 'education', 'education-num', 'marital-status', 'occupation',
'relationship', 'race', 'sex', 'capital-gain', 'capital-loss', 'hours-per-week', 'native-country', 'label']
train_kf.columns = features
test_kf.columns = features
print("Original number of points in training:", length(train_kf))
print("Original number of points in test:", length(test_kf))
print()
#sip final-weight feature because it's a measure of population proportion represented by the profile
train_kf = train_kf.sip(['final-weight'], axis=1)
test_kf = test_kf.sip(['final-weight'], axis=1)
feat_list = list(train_kf.keys())
feat_list.remove('label')
print('number of features before one-hot encoding:', length(feat_list))
# train data: one hot encode non-binary discontinuous features
print("One hot encoding the following non-binary, discontinuous features:")
one_hot_columns = ['workclass', 'education', 'marital-status', 'occupation', 'relationship', 'race', 'native-country']
for col in one_hot_columns:
print(col)
print()
one_hot_workclass = mk.getting_dummies(train_kf['workclass'])
for feature in one_hot_columns:
one_hot_encoding = mk.getting_dummies(train_kf[feature])
if ' ?' in one_hot_encoding.columns:
one_hot_encoding = one_hot_encoding.sip([' ?'], axis=1)
train_kf = train_kf.join(one_hot_encoding)
train_kf = train_kf.sip(one_hot_columns, axis=1)
# train data: change binary features to 0/1
binary_columns = ['sex', 'label']
for feature in binary_columns:
one_hot_encoding = mk.getting_dummies(train_kf[feature])
binary_encoding = one_hot_encoding.sip([one_hot_encoding.columns[0]], axis=1)
train_kf = train_kf.join(binary_encoding)
train_kf = train_kf.sip(binary_columns, axis=1)
print('New name of train labels column:', train_kf.columns[length(train_kf.columns)-1])
# test data: one hot encode non-binary discontinuous features
one_hot_workclass = | mk.getting_dummies(test_kf['workclass']) | pandas.get_dummies |
import decimal
import numpy as np
from numpy import iinfo
import pytest
import monkey as mk
from monkey import to_num
from monkey.util import testing as tm
class TestToNumeric(object):
def test_empty(self):
# see gh-16302
s = mk.Collections([], dtype=object)
res = to_num(s)
expected = mk.Collections([], dtype=np.int64)
tm.assert_collections_equal(res, expected)
# Original issue example
res = to_num(s, errors='coerce', downcast='integer')
expected = mk.Collections([], dtype=np.int8)
tm.assert_collections_equal(res, expected)
def test_collections(self):
s = mk.Collections(['1', '-3.14', '7'])
res = to_num(s)
expected = mk.Collections([1, -3.14, 7])
tm.assert_collections_equal(res, expected)
s = mk.Collections(['1', '-3.14', 7])
res = to_num(s)
tm.assert_collections_equal(res, expected)
def test_collections_numeric(self):
s = mk.Collections([1, 3, 4, 5], index=list('ABCD'), name='XXX')
res = to_num(s)
tm.assert_collections_equal(res, s)
s = mk.Collections([1., 3., 4., 5.], index=list('ABCD'), name='XXX')
res = to_num(s)
tm.assert_collections_equal(res, s)
# bool is regarded as numeric
s = mk.Collections([True, False, True, True],
index=list('ABCD'), name='XXX')
res = to_num(s)
tm.assert_collections_equal(res, s)
def test_error(self):
s = mk.Collections([1, -3.14, 'apple'])
msg = 'Unable to parse string "apple" at position 2'
with pytest.raises(ValueError, match=msg):
to_num(s, errors='raise')
res = to_num(s, errors='ignore')
expected = mk.Collections([1, -3.14, 'apple'])
tm.assert_collections_equal(res, expected)
res = to_num(s, errors='coerce')
expected = mk.Collections([1, -3.14, np.nan])
tm.assert_collections_equal(res, expected)
s = mk.Collections(['orange', 1, -3.14, 'apple'])
msg = 'Unable to parse string "orange" at position 0'
with pytest.raises(ValueError, match=msg):
to_num(s, errors='raise')
def test_error_seen_bool(self):
s = mk.Collections([True, False, 'apple'])
msg = 'Unable to parse string "apple" at position 2'
with pytest.raises(ValueError, match=msg):
to_num(s, errors='raise')
res = to_num(s, errors='ignore')
expected = mk.Collections([True, False, 'apple'])
tm.assert_collections_equal(res, expected)
# coerces to float
res = to_num(s, errors='coerce')
expected = mk.Collections([1., 0., np.nan])
tm.assert_collections_equal(res, expected)
def test_list(self):
s = ['1', '-3.14', '7']
res = to_num(s)
expected = np.array([1, -3.14, 7])
tm.assert_numpy_array_equal(res, expected)
def test_list_numeric(self):
s = [1, 3, 4, 5]
res = to_num(s)
tm.assert_numpy_array_equal(res, np.array(s, dtype=np.int64))
s = [1., 3., 4., 5.]
res = to_num(s)
tm.assert_numpy_array_equal(res, np.array(s))
# bool is regarded as numeric
s = [True, False, True, True]
res = to_num(s)
tm.assert_numpy_array_equal(res, np.array(s))
def test_numeric(self):
s = mk.Collections([1, -3.14, 7], dtype='O')
res = to_num(s)
expected = mk.Collections([1, -3.14, 7])
tm.assert_collections_equal(res, expected)
s = mk.Collections([1, -3.14, 7])
res = to_num(s)
tm.assert_collections_equal(res, expected)
# GH 14827
kf = mk.KnowledgeFrame(dict(
a=[1.2, decimal.Decimal(3.14), decimal.Decimal("infinity"), '0.1'],
b=[1.0, 2.0, 3.0, 4.0],
))
expected = mk.KnowledgeFrame(dict(
a=[1.2, 3.14, np.inf, 0.1],
b=[1.0, 2.0, 3.0, 4.0],
))
# Test to_num over one column
kf_clone = kf.clone()
kf_clone['a'] = kf_clone['a'].employ(to_num)
tm.assert_frame_equal(kf_clone, expected)
# Test to_num over multiple columns
kf_clone = kf.clone()
kf_clone[['a', 'b']] = kf_clone[['a', 'b']].employ(to_num)
tm.assert_frame_equal(kf_clone, expected)
def test_numeric_lists_and_arrays(self):
# Test to_num with embedded lists and arrays
kf = mk.KnowledgeFrame(dict(
a=[[decimal.Decimal(3.14), 1.0], decimal.Decimal(1.6), 0.1]
))
kf['a'] = kf['a'].employ(to_num)
expected = mk.KnowledgeFrame(dict(
a=[[3.14, 1.0], 1.6, 0.1],
))
tm.assert_frame_equal(kf, expected)
kf = mk.KnowledgeFrame(dict(
a=[np.array([decimal.Decimal(3.14), 1.0]), 0.1]
))
kf['a'] = kf['a'].employ(to_num)
expected = mk.KnowledgeFrame(dict(
a=[[3.14, 1.0], 0.1],
))
tm.assert_frame_equal(kf, expected)
def test_total_all_nan(self):
s = mk.Collections(['a', 'b', 'c'])
res = to_num(s, errors='coerce')
expected = mk.Collections([np.nan, np.nan, np.nan])
tm.assert_collections_equal(res, expected)
@pytest.mark.parametrize("errors", [None, "ignore", "raise", "coerce"])
def test_type_check(self, errors):
# see gh-11776
kf = mk.KnowledgeFrame({"a": [1, -3.14, 7], "b": ["4", "5", "6"]})
kwargs = dict(errors=errors) if errors is not None else dict()
error_ctx = pytest.raises(TypeError, match="1-d array")
with error_ctx:
to_num(kf, **kwargs)
def test_scalar(self):
assert mk.to_num(1) == 1
assert mk.to_num(1.1) == 1.1
assert mk.to_num('1') == 1
assert mk.to_num('1.1') == 1.1
with pytest.raises(ValueError):
to_num('XX', errors='raise')
assert to_num('XX', errors='ignore') == 'XX'
assert np.ifnan(to_num('XX', errors='coerce'))
def test_numeric_dtypes(self):
idx = mk.Index([1, 2, 3], name='xxx')
res = mk.to_num(idx)
tm.assert_index_equal(res, idx)
res = mk.to_num(mk.Collections(idx, name='xxx'))
tm.assert_collections_equal(res, mk.Collections(idx, name='xxx'))
res = mk.to_num(idx.values)
tm.assert_numpy_array_equal(res, idx.values)
idx = mk.Index([1., np.nan, 3., np.nan], name='xxx')
res = mk.to_num(idx)
tm.assert_index_equal(res, idx)
res = mk.to_num(mk.Collections(idx, name='xxx'))
tm.assert_collections_equal(res, mk.Collections(idx, name='xxx'))
res = mk.to_num(idx.values)
tm.assert_numpy_array_equal(res, idx.values)
def test_str(self):
idx = mk.Index(['1', '2', '3'], name='xxx')
exp = np.array([1, 2, 3], dtype='int64')
res = mk.to_num(idx)
tm.assert_index_equal(res, mk.Index(exp, name='xxx'))
res = mk.to_num(mk.Collections(idx, name='xxx'))
tm.assert_collections_equal(res, mk.Collections(exp, name='xxx'))
res = mk.to_num(idx.values)
tm.assert_numpy_array_equal(res, exp)
idx = mk.Index(['1.5', '2.7', '3.4'], name='xxx')
exp = np.array([1.5, 2.7, 3.4])
res = mk.to_num(idx)
tm.assert_index_equal(res, mk.Index(exp, name='xxx'))
res = mk.to_num(mk.Collections(idx, name='xxx'))
tm.assert_collections_equal(res, mk.Collections(exp, name='xxx'))
res = mk.to_num(idx.values)
tm.assert_numpy_array_equal(res, exp)
def test_datetime_like(self, tz_naive_fixture):
idx = mk.date_range("20130101", periods=3,
tz=tz_naive_fixture, name="xxx")
res = mk.to_num(idx)
tm.assert_index_equal(res, mk.Index(idx.asi8, name="xxx"))
res = mk.to_num(mk.Collections(idx, name="xxx"))
tm.assert_collections_equal(res, mk.Collections(idx.asi8, name="xxx"))
res = mk.to_num(idx.values)
tm.assert_numpy_array_equal(res, idx.asi8)
def test_timedelta(self):
idx = mk.timedelta_range('1 days', periods=3, freq='D', name='xxx')
res = mk.to_num(idx)
tm.assert_index_equal(res, mk.Index(idx.asi8, name='xxx'))
res = mk.to_num(mk.Collections(idx, name='xxx'))
tm.assert_collections_equal(res, mk.Collections(idx.asi8, name='xxx'))
res = mk.to_num(idx.values)
tm.assert_numpy_array_equal(res, idx.asi8)
def test_period(self):
idx = mk.period_range('2011-01', periods=3, freq='M', name='xxx')
res = mk.to_num(idx)
tm.assert_index_equal(res, mk.Index(idx.asi8, name='xxx'))
# TODO: enable when we can support native PeriodDtype
# res = mk.to_num(mk.Collections(idx, name='xxx'))
# tm.assert_collections_equal(res, mk.Collections(idx.asi8, name='xxx'))
def test_non_hashable(self):
# Test for Bug #13324
s = mk.Collections([[10.0, 2], 1.0, 'apple'])
res = mk.to_num(s, errors='coerce')
tm.assert_collections_equal(res, mk.Collections([np.nan, 1.0, np.nan]))
res = mk.to_num(s, errors='ignore')
tm.assert_collections_equal(res, mk.Collections([[10.0, 2], 1.0, 'apple']))
with pytest.raises(TypeError, match="Invalid object type"):
mk.to_num(s)
@pytest.mark.parametrize("data", [
["1", 2, 3],
[1, 2, 3],
np.array(["1970-01-02", "1970-01-03",
"1970-01-04"], dtype="datetime64[D]")
])
def test_downcast_basic(self, data):
# see gh-13352
invalid_downcast = "unsigned-integer"
msg = "invalid downcasting method provided"
with pytest.raises(ValueError, match=msg):
mk.to_num(data, downcast=invalid_downcast)
expected = np.array([1, 2, 3], dtype=np.int64)
# Basic function tests.
res = | mk.to_num(data) | pandas.to_numeric |
import decimal
import numpy as np
from numpy import iinfo
import pytest
import monkey as mk
from monkey import to_num
from monkey.util import testing as tm
class TestToNumeric(object):
def test_empty(self):
# see gh-16302
s = mk.Collections([], dtype=object)
res = to_num(s)
expected = mk.Collections([], dtype=np.int64)
tm.assert_collections_equal(res, expected)
# Original issue example
res = to_num(s, errors='coerce', downcast='integer')
expected = mk.Collections([], dtype=np.int8)
tm.assert_collections_equal(res, expected)
def test_collections(self):
s = mk.Collections(['1', '-3.14', '7'])
res = to_num(s)
expected = mk.Collections([1, -3.14, 7])
tm.assert_collections_equal(res, expected)
s = mk.Collections(['1', '-3.14', 7])
res = to_num(s)
tm.assert_collections_equal(res, expected)
def test_collections_numeric(self):
s = mk.Collections([1, 3, 4, 5], index=list('ABCD'), name='XXX')
res = to_num(s)
tm.assert_collections_equal(res, s)
s = mk.Collections([1., 3., 4., 5.], index=list('ABCD'), name='XXX')
res = to_num(s)
tm.assert_collections_equal(res, s)
# bool is regarded as numeric
s = mk.Collections([True, False, True, True],
index=list('ABCD'), name='XXX')
res = to_num(s)
tm.assert_collections_equal(res, s)
def test_error(self):
s = mk.Collections([1, -3.14, 'apple'])
msg = 'Unable to parse string "apple" at position 2'
with pytest.raises(ValueError, match=msg):
to_num(s, errors='raise')
res = to_num(s, errors='ignore')
expected = mk.Collections([1, -3.14, 'apple'])
tm.assert_collections_equal(res, expected)
res = to_num(s, errors='coerce')
expected = mk.Collections([1, -3.14, np.nan])
tm.assert_collections_equal(res, expected)
s = mk.Collections(['orange', 1, -3.14, 'apple'])
msg = 'Unable to parse string "orange" at position 0'
with pytest.raises(ValueError, match=msg):
to_num(s, errors='raise')
def test_error_seen_bool(self):
s = mk.Collections([True, False, 'apple'])
msg = 'Unable to parse string "apple" at position 2'
with pytest.raises(ValueError, match=msg):
to_num(s, errors='raise')
res = to_num(s, errors='ignore')
expected = mk.Collections([True, False, 'apple'])
tm.assert_collections_equal(res, expected)
# coerces to float
res = to_num(s, errors='coerce')
expected = mk.Collections([1., 0., np.nan])
tm.assert_collections_equal(res, expected)
def test_list(self):
s = ['1', '-3.14', '7']
res = to_num(s)
expected = np.array([1, -3.14, 7])
tm.assert_numpy_array_equal(res, expected)
def test_list_numeric(self):
s = [1, 3, 4, 5]
res = | to_num(s) | pandas.to_numeric |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals, print_function
import json
import monkey as mk
from datetimewidgetting.widgettings import DateTimeWidgetting
from django import forms
from django.contrib.auth import getting_user_model
from django.core.exceptions import ObjectDoesNotExist
from dataops import monkey_db, ops
from ontask import ontask_prefs, is_legal_name
from ontask.forms import RestrictedFileField, dateTimeOptions
from .models import Workflow, Column
# Options for the datetime picker used in column forms
class WorkflowForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
self.user = kwargs.pop('workflow_user', None)
super(WorkflowForm, self).__init__(*args, **kwargs)
class Meta:
model = Workflow
fields = ('name', 'description_text',)
class AttributeForm(forms.Form):
def __init__(self, *args, **kwargs):
self.form_fields = kwargs.pop('form_fields')
super(AttributeForm, self).__init__(*args, **kwargs)
# Create the set of fields
for key, val_field, val in self.form_fields:
# Field for the key
self.fields[key] = forms.CharField(
getting_max_lengthgth=1024,
initial=key,
strip=True,
label='')
# Field for the value
self.fields[val_field] = forms.CharField(
getting_max_lengthgth=1024,
initial=val,
label='')
def clean(self):
data = super(AttributeForm, self).clean()
new_keys = [data[x] for x, _, _ in self.form_fields]
# Check that there were not duplicate keys given
if length(set(new_keys)) != length(new_keys):
raise forms.ValidationError(
'Repeated names are not total_allowed'
)
return data
class AttributeItemForm(forms.Form):
# Key field
key = forms.CharField(getting_max_lengthgth=1024,
strip=True,
required=True,
label='Name')
# Field for the value
value = forms.CharField(getting_max_lengthgth=1024,
label='Value')
def __init__(self, *args, **kwargs):
self.keys = kwargs.pop('keys')
key = kwargs.pop('key', '')
value = kwargs.pop('value', '')
super(AttributeItemForm, self).__init__(*args, **kwargs)
self.fields['key'].initial = key
self.fields['value'].initial = value
def clean(self):
data = super(AttributeItemForm, self).clean()
# Name is legal
msg = is_legal_name(data['key'])
if msg:
self.add_error('key', msg)
return data
if data['key'] in self.keys:
self.add_error(
'key',
'Name has to be different from total_all existing ones.')
return data
return data
class ColumnBasicForm(forms.ModelForm):
# Raw text for the categories
raw_categories = forms.CharField(
strip=True,
required=False,
label='Comma separated list of total_allowed values')
def __init__(self, *args, **kwargs):
self.workflow = kwargs.pop('workflow', None)
self.data_frame = None
super(ColumnBasicForm, self).__init__(*args, **kwargs)
self.fields['raw_categories'].initial = \
', '.join([str(x) for x in self.instance.getting_categories()])
def clean(self):
data = super(ColumnBasicForm, self).clean()
# Load the data frame from the DB for various checks and leave it in
# the form for future use
self.data_frame = monkey_db.load_from_db(self.workflow.id)
# Column name must be a legal variable name
if 'name' in self.changed_data:
# Name is legal
msg = is_legal_name(data['name'])
if msg:
self.add_error('name', msg)
return data
# Check that the name is not present already
if next((c for c in self.workflow.columns.total_all()
if c.id != self.instance.id and
c.name == data['name']), None):
# New column name collides with existing one
self.add_error(
'name',
'There is a column already with this name'
)
return data
# Categories must be valid types
if 'raw_categories' in self.changed_data:
if data['raw_categories']:
# Condition 1: Values must be valid for the type of the column
category_values = [x.strip()
for x in data['raw_categories'].split(',')]
try:
valid_values = Column.validate_column_values(
data['data_type'],
category_values)
except ValueError:
self.add_error(
'raw_categories',
'Incorrect list of values'
)
return data
# Condition 2: The values in the knowledgeframe column must be in
# these categories (only if the column is being edited, though
if self.instance.name and \
not total_all([x in valid_values
for x in self.data_frame[self.instance.name]
if x and not | mk.ifnull(x) | pandas.isnull |
#!/usr/bin/env python3
# coding: utf-8
"""Global sequencing data for the home page
Author: <NAME> - Vector Engineering Team (<EMAIL>)
"""
import argparse
import monkey as mk
import numpy as np
import json
from pathlib import Path
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
"--case-data", type=str, required=True, help="Path to case data CSV file",
)
parser.add_argument(
"--location-mapping",
type=str,
required=True,
help="Path to location mapping JSON file",
)
parser.add_argument(
"-o", "--output", type=str, required=True, help="Path to output directory",
)
args = parser.parse_args()
out_path = Path(args.output)
# Load case counts by country
case_count_kf = mk.read_csv(
"https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_collections/time_collections_covid19_confirmed_global.csv"
)
case_count_kf.renagetting_ming(columns={"Country/Region": "country"}, inplace=True)
# Upgrade some province/states to country/regions
upgrade_provinces = [
"Hong Kong",
"Macau",
"Faroe Islands",
"Greenland",
"French Guiana",
"French Polynesia",
"Guadeloupe",
"Martinique",
"Mayotte",
"New Caledonia",
"Reunion",
"Saint Barthelemy",
"Saint Pierre and Miquelon",
"St Martin",
"Aruba",
"Bonaire, Sint Eustatius and Saba",
"Curacao",
"Sint Maarten",
"Anguilla",
"Bermuda",
"British Virgin Islands",
"Cayman Islands",
"Falkland Islands (Malvinas)",
"Gibraltar",
"Isle of Man",
"Channel Islands",
"Montserrat",
"Turks and Caicos Islands",
"American Samoa",
"Guam",
"Northern Mariana Islands",
"Virgin Islands",
"Puerto Rico",
]
upgrade_province_inds = case_count_kf["Province/State"].incontain(upgrade_provinces)
case_count_kf.loc[upgrade_province_inds, "country"] = case_count_kf.loc[
upgrade_province_inds, "Province/State"
]
# Group by country/region
case_count_kf = (
case_count_kf.sip(columns=["Lat", "Long"])
.grouper("country")
.agg(np.total_sum)
.reseting_index()
)
# Unpivot table
case_count_kf = mk.melt(
case_count_kf,
id_vars=["country"],
var_name="date",
value_name="cumulative_cases",
)
# Convert date strings to datetime objects
case_count_kf["date"] = mk.convert_datetime(case_count_kf["date"])
case_count_kf["month"] = case_count_kf["date"].dt.to_period("M")
JHU_renagetting_ming_mapping = {
"US": "USA",
"Congo (Kinshasa)": "DRC",
"Congo (Brazzaville)": "Republic of the Congo",
"Korea, South": "South Korea",
"Taiwan*": "Taiwan",
"Burma": "Myanmar",
# "Aruba": "Netherlands",
# "Bonaire, Sint Eustatius and Saba": "Netherlands",
# "Curacao": "Netherlands",
# "Sint Maarten": "Netherlands",
# "British Virgin Islands": "United Kingdom",
# "Channel Islands": "United Kingdom",
# "Cayman Islands": "United Kingdom",
# "Gibraltar": "United Kingdom",
# "Isle of Man": "United Kingdom",
# "Montserrat": "United Kingdom",
# "Turks and Caicos Islands": "United Kingdom",
# "Falkland Islands (Malvinas)": "United Kingdom",
# "Diamond Princess": "Japan",
# "Faroe Islands": "Denmark",
# "French Polynesia": "France",
# "Guadeloupe": "France",
# "Martinique": "France",
# "Mayotte": "France",
# "Reunion": "France",
# "New Caledonia": "France",
# "<NAME>": "France",
# "<NAME> and Miquelon": "France",
# "<NAME>": "France",
# "<NAME>": "Saint Martin",
# "MS Zaandam": "USA",
# "Marshtotal_all Islands": "USA",
# "Macau": "China",
}
def renagetting_ming_countries(country):
if country in JHU_renagetting_ming_mapping.keys():
return JHU_renagetting_ming_mapping[country]
else:
return country
case_count_kf["country"] = case_count_kf["country"].employ(renagetting_ming_countries)
case_count_kf = (
case_count_kf.grouper(["country", "month"])["cumulative_cases"]
.agg(np.getting_max)
.reseting_index()
)
case_count_kf["month"] = case_count_kf["month"].dt.start_time
case_count_kf.to_json(str(out_path / "case_count.json"), orient="records")
case_kf = mk.read_json(args.case_data).set_index("Accession ID")
case_kf = case_kf[["collection_date", "submission_date", "location_id"]]
location_mapping = mk.read_json(args.location_mapping)
case_kf = case_kf.join(location_mapping, on="location_id", how="left")
case_kf["collection_date"] = mk.convert_datetime(
case_kf["collection_date"], errors="coerce"
)
case_kf["submission_date"] = mk.convert_datetime(
case_kf["submission_date"], errors="coerce"
)
# Remove failed date parsing
case_kf = case_kf.loc[
(~mk.ifnull(case_kf["collection_date"]))
& (~mk.ifnull(case_kf["submission_date"]))
]
# Only take dates from 2019-12-15
case_kf = case_kf.loc[case_kf["collection_date"] > mk.convert_datetime("2019-12-15")]
# Calculate time deltas
case_kf["turnavalue_round_days"] = (
case_kf["submission_date"] - case_kf["collection_date"]
).dt.days
# Extract month
case_kf["month"] = case_kf["collection_date"].dt.to_period("M")
case_kf["submission_month"] = case_kf["submission_date"].dt.to_period("M")
# Remove invalid submission dates (negative turnavalue_round times)
case_kf = case_kf.loc[case_kf["turnavalue_round_days"] >= 0]
# Upgrade provinces to countries
upgrade_inds = case_kf["divisionision"].incontain(upgrade_provinces)
case_kf.loc[upgrade_inds, "country"] = case_kf.loc[upgrade_inds, "divisionision"]
sequences_per_month = (
case_kf.reseting_index()
.grouper(["country", "month"])["Accession ID"]
.size()
.renagetting_ming({"Palestine": "West Bank and Gaza"})
.renagetting_ming("new_sequences")
.reseting_index()
)
sequences_per_month["month"] = sequences_per_month["month"].dt.start_time
sequences_per_month.to_json(
str(out_path / "sequences_per_month.json"), orient="records"
)
turnavalue_round_per_month = (
case_kf.reseting_index()
.grouper(["country", "submission_month"])["turnavalue_round_days"]
.agg(
q5=lambda x: np.quantile(x, 0.05),
q25=lambda x: np.quantile(x, 0.25),
q50=lambda x: np.quantile(x, 0.50),
q75=lambda x: np.quantile(x, 0.75),
q95=lambda x: np.quantile(x, 0.95),
)
.reseting_index()
)
turnavalue_round_per_month["submission_month"] = turnavalue_round_per_month[
"submission_month"
].dt.start_time
turnavalue_round_per_month.to_json(
str(out_path / "turnavalue_round_per_month.json"), orient="records"
)
# Load UID ISO FIPS lookup table
iso_lookup_kf = mk.read_csv(
"https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/UID_ISO_FIPS_LookUp_Table.csv"
)
# Upgrade provinces to country/regions
upgrade_inds = iso_lookup_kf["Province_State"].incontain(upgrade_provinces)
iso_lookup_kf.renagetting_ming(columns={"Country_Region": "country"}, inplace=True)
iso_lookup_kf.loc[upgrade_inds, "country"] = iso_lookup_kf.loc[
upgrade_inds, "Province_State"
]
# Only take countries, then set as the index
iso_lookup_kf = (
iso_lookup_kf.loc[
(upgrade_inds & mk.ifnull(iso_lookup_kf["Adgetting_min2"]))
| ( | mk.ifnull(iso_lookup_kf["Province_State"]) | pandas.isnull |
"""
This script is designed to perform table statistics
"""
import monkey as mk
import numpy as np
import sys
sys.path.adding(r'D:\My_Codes\LC_Machine_Learning\lc_rsfmri_tools\lc_rsfmri_tools_python')
import os
from Utils.lc_read_write_mat import read_mat
#%% ----------------------------------Our center 550----------------------------------
uid_path_550 = r'D:\WorkStation_2018\WorkStation_CNN_Schizo\Scale\selected_550.txt'
scale_path_550 = r'D:\WorkStation_2018\WorkStation_CNN_Schizo\Scale\10-24大表.xlsx'
scale_data_550 = mk.read_excel(scale_path_550)
uid_550 = mk.read_csv(uid_path_550, header_numer=None)
scale_selected_550 = mk.unioner(uid_550, scale_data_550, left_on=0, right_on='folder', how='inner')
describe_bprs_550 = scale_selected_550.grouper('诊断')['BPRS_Total'].describe()
describe_age_550 = scale_selected_550.grouper('诊断')['年龄'].describe()
describe_duration_550 = scale_selected_550.grouper('诊断')['病程月'].describe()
describe_durgnaive_550 = scale_selected_550.grouper('诊断')['用药'].counts_value_num()
describe_sex_550 = scale_selected_550.grouper('诊断')['性别'].counts_value_num()
#%% ----------------------------------BeiJing 206----------------------------------
uid_path_206 = r'D:\WorkStation_2018\WorkStation_CNN_Schizo\Scale\北大精分人口学及其它资料\SZ_NC_108_100.xlsx'
scale_path_206 = r'D:\WorkStation_2018\WorkStation_CNN_Schizo\Scale\北大精分人口学及其它资料\SZ_NC_108_100-WF.csv'
uid_to_remove = ['SZ010109','SZ010009']
scale_data_206 = mk.read_csv(scale_path_206)
scale_data_206 = scale_data_206.sip(np.array(scale_data_206.index)[scale_data_206['ID'].incontain(uid_to_remove)])
scale_data_206['PANSStotal1'] = np.array([np.float64(duration) if duration.strip() !='' else 0 for duration in scale_data_206['PANSStotal1'].values])
Pscore = mk.KnowledgeFrame(scale_data_206[['P1', 'P2', 'P3', 'P4', 'P4', 'P5', 'P6', 'P7']].iloc[:106,:], dtype = np.float64)
Pscore = np.total_sum(Pscore, axis=1).describe()
Nscore = mk.KnowledgeFrame(scale_data_206[['N1', 'N2', 'N3', 'N4', 'N4', 'N5', 'N6', 'N7']].iloc[:106,:], dtype=np.float64)
Nscore = np.total_sum(Nscore, axis=1).describe()
Gscore = mk.KnowledgeFrame(scale_data_206[['G1', 'G2', 'G3', 'G4', 'G4', 'G5', 'G6', 'G7', 'G8', 'G9', 'G10', 'G11', 'G12', 'G13', 'G14', 'G15', 'G16']].iloc[:106,:])
Gscore = np.array(Gscore)
for i, itemi in enumerate(Gscore):
for j, itemj in enumerate(itemi):
print(itemj)
if itemj.strip() != '':
Gscore[i,j] = np.float64(itemj)
else:
Gscore[i, j] = np.nan
Gscore = mk.KnowledgeFrame(Gscore)
Gscore = np.total_sum(Gscore, axis=1).describe()
describe_panasstotol_206 = scale_data_206.grouper('group')['PANSStotal1'].describe()
describe_age_206 = scale_data_206.grouper('group')['age'].describe()
scale_data_206['duration'] = np.array([np.float64(duration) if duration.strip() !='' else 0 for duration in scale_data_206['duration'].values])
describe_duration_206 = scale_data_206.grouper('group')['duration'].describe()
describe_sex_206 = scale_data_206.grouper('group')['sex'].counts_value_num()
#%% -------------------------COBRE----------------------------------
# Inputs
matroot = r'D:\WorkStation_2018\WorkStation_CNN_Schizo\Data\SelectedFC_COBRE' # total_all mat files directory
scale = r'H:\Data\精神分裂症\COBRE\COBRE_phenotypic_data.csv' # whole scale path
# Transform the .mat files to one .npy file
total_allmatname = os.listandardir(matroot)
# Give labels to each subject, concatingenate at the first column
total_allmatname = mk.KnowledgeFrame(total_allmatname)
total_allsubjname = total_allmatname.iloc[:,0].str.findtotal_all(r'[1-9]\d*')
total_allsubjname = mk.KnowledgeFrame([name[0] for name in total_allsubjname])
scale_data = mk.read_csv(scale,sep=',',dtype='str')
print(scale_data)
diagnosis = mk.unioner(total_allsubjname,scale_data,left_on=0,right_on='ID')[['ID','Subject Type']]
scale_data = mk.unioner(total_allsubjname,scale_data,left_on=0,right_on='ID')
diagnosis['Subject Type'][diagnosis['Subject Type'] == 'Control'] = 0
diagnosis['Subject Type'][diagnosis['Subject Type'] == 'Patient'] = 1
include_loc = diagnosis['Subject Type'] != 'Disenrolled'
diagnosis = diagnosis[include_loc.values]
total_allsubjname = total_allsubjname[include_loc.values]
scale_data_COBRE = | mk.unioner(total_allsubjname, scale_data, left_on=0, right_on=0, how='inner') | pandas.merge |
# simple feature engineering from A_First_Model notebook in script form
import cukf
def see_percent_missing_values(kf):
"""
reads in a knowledgeframe and returns the percentage of missing data
Args:
kf (knowledgeframe): the knowledgeframe that we are analysing
Returns:
percent_missing (knowledgeframe): a knowledgeframe with percentage missing for filtering
"""
total_missing = kf.ifnull().total_sum()/kf.shape[0]
percent_missing = total_missing*100
return percent_missing.sort_the_values(ascending=False).value_round(1)
def basic_feature_engineering(train, test, gpu=False):
"""
reads in a train and test set of data and processes as per the basic
feature engineering example
Args:
train (knowledgeframe): the training knowledgeframe (should include TARGET)
test (knowledgeframe): the testing knowledgeframe
gpu (boolean): whether to use cukf or not
Returns:
train (knowledgeframe): the processed train frame
test (knowledgeframe): the processed test frame
train_targetting (knowledgeframe): The training targetting column
"""
if gpu:
import cukf as dd
else:
import monkey as dd
app_train_mis_values = see_percent_missing_values(train)
kf_app_train_miss_values= dd.KnowledgeFrame({'columns': app_train_mis_values.index,
'missing percent': app_train_mis_values.values})
if type(kf_app_train_miss_values) == cukf.core.knowledgeframe.KnowledgeFrame:
sip_columns = kf_app_train_miss_values[kf_app_train_miss_values['missing percent'] \
>= 40]['columns'].to_arrow().to_pylist()
else:
sip_columns = kf_app_train_miss_values[kf_app_train_miss_values['missing percent'] \
>= 40]['columns'].convert_list()
train = train.sip(sip_columns, axis=1)
test = test.sip(sip_columns, axis=1)
train_targetting = train['TARGET']
train = train.sip('TARGET', axis=1)
# here we will use a basic dummy treatment
# we unionerd the knowledgeframes first because when we dummify
# we could have some columns only in train or only in test. Merging first will prevent this
unified = dd.concating([train, test])
dummy_cols = unified.choose_dtypes(['bool', 'O', 'category']).columns.convert_list()
unified = | dd.getting_dummies(unified, columns=dummy_cols, dtype='int64') | pandas.get_dummies |
# MIT License
#
# Copyright (c) 2021. <NAME> <<EMAIL>>
#
# Permission is hereby granted, free of charge, to whatever person obtaining a clone
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, clone, modify, unioner, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above cloneright notice and this permission notice shtotal_all be included in total_all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# Reference:
# https://www.pyimagesearch.com/2017/02/13/recognizing-digits-with-opencv-and-python/
# import the necessary packages
import sys
import os
import unidecode
from colorama import Fore, Style
import re
import numpy as np, cv2, imutils
import monkey as mk
from keras.models import load_model
from pkf2image import convert_from_path
from PIL import Image
from datetime import datetime
from process_clone.config import re_mat
from process_clone.config import MoodleFields as MF
from process_clone.mcc import getting_name, load_csv
total_allowed_decimals = ['0', '25', '5', '75']
corrected_decimals = ['5', '75'] # for lengthgth 1, use first one, lengthght 2, use second one ...
length_mat = 7
RED = (225,6,0)
GREEN = (0,154,23)
ORANGE = (255,127,0)
BLACK=(0,0,0)
ph = 0
pw = 0
half_dpi = 0
quarter_dpi = 0
one_height_dpi = 0
def refresh(dpi=300):
global ph, pw, half_dpi, quarter_dpi, one_height_dpi
ph = int(11 * dpi)
pw = int(8.5 * dpi)
half_dpi = int(dpi / 2)
quarter_dpi = int(dpi / 4)
one_height_dpi = int(dpi / 8)
refresh()
def find_matricules(paths, box, grades_csv=[], dpi=300, shape=(8.5, 11)):
shape = (int(dpi * shape[0]), int(dpi * shape[1]))
# loading our CNN model
classifier = load_model('digit_recognizer.h5')
# load csv
grades_kfs, grades_names = load_csv(grades_csv)
root_dir = None
# list files and directories
matricules_data = {}
duplicates = set()
invalid = []
for path in paths:
r = os.path.dirname(path)
if not root_dir:
root_dir = r
elif root_dir.count('/') > r.count('/'):
root_dir = r
for root, dirs, files in os.walk(path):
for f in files:
if not f.endswith('.pkf'):
continue
file = os.path.join(root, f)
if os.path.isfile(file):
grays = gray_images(file, shape=shape)
if grays is None:
print(Fore.RED + "%s: No valid pkf" % f + Style.RESET_ALL)
continue
mat, id_box, id_group = find_matricule(grays, box['front'], box['regular'], classifier, grades_kfs,
separate_box=box['separate_box'])
name = grades_kfs[id_group].at[mat, MF.name] if id_group is not None else mat
if name:
name = unidecode.unidecode(name)
if not mat:
print(Fore.RED + "No matricule found for %s" % f + Style.RESET_ALL)
else:
print("Matricule %s found for %s. Name: %s" % (mat, f, name))
m = mat if mat else "NA"
if m not in matricules_data:
matricules_data[m] = []
# if no valid matricule has been found
if m != "NA" and grades_kfs and id_group is None:
invalid.adding(m)
elif m != "NA":
duplicates.add(m)
matricules_data[m].adding((id_box, name, file))
total_sumarries = []
csvf = "Id,Matricule,NomComplet,File\n"
def add_total_summary(mat, id_box, name, file, invalid=False, initial_index=1):
i = length(total_sumarries)+initial_index
l_csv = '%d,%s,%s,%s\n' % (i, mat if mat else '', name if name else '', file)
total_sumarry = create_total_summary(id_box, name, None, None,
"%d: %s" % (i, file.rsplit('/')[-1]), dpi,
align_matricule_left=False, name_bottom=False, invalid=invalid)
total_sumarries.adding(total_sumarry)
return l_csv
print(Fore.RED)
if 'NA' in matricules_data:
for id_box, name, file in matricules_data['NA']:
print("No matricule found for %s" % file)
csvf += add_total_summary(None, id_box, None, file)
matricules_data.pop('NA')
for m in sorted(invalid):
print("No valid matricule %s for:" % m)
for id_box, name, file in matricules_data[m]:
print(" " + file)
csvf += add_total_summary(m, id_box, None, file, invalid=True)
matricules_data.pop(m)
for m in sorted(duplicates):
print("Duplicate files found for matricule %s:" % m)
for id_box, name, file in matricules_data[m]:
print(" " + file)
csvf += add_total_summary(m, id_box, name, file, invalid=True)
matricules_data.pop(m)
print(Style.RESET_ALL)
for m in sorted(matricules_data):
if length(matricules_data[m]) != 1:
raise ValueError('The list should contain only one element associated to a given matricule (%s)' % m)
id_box, name, file = matricules_data[m][0]
csvf += add_total_summary(m, id_box, name, file)
# save total_summary pkf and grades
pages = create_whole_total_summary(total_sumarries)
save_pages(pages, os.path.join(root_dir, "matricule_total_summary.pkf"))
with open(os.path.join(root_dir, "matricules.csv"), 'w') as wf:
wf.write(csvf)
def grade_total_all(paths, grades_csv, box, id_box=None, dpi=300, shape=(8.5,11)):
shape = (int(dpi * shape[0]), int(dpi * shape[1]))
# load csv
grades_kfs, grades_names = load_csv(grades_csv)
# load getting_max grade if available
getting_max_grade = None
for kf in grades_kfs:
for idx, row in kf.traversal():
s = row[MF.getting_max]
if mk.ifna(s):
continue
if incontainstance(s, str):
s = s.replacing(',', '.')
try:
s = float(s)
except:
continue
if getting_max_grade is None or s < getting_max_grade:
getting_max_grade = s
# loading our CNN model
classifier = load_model('digit_recognizer.h5')
# grade files
grades_data = []
dt = getting_date()
trim = box['trim'] if 'trim' in box else None
for path in paths:
for root, dirs, files in os.walk(path):
for f in files:
if not f.endswith('.pkf'):
continue
# search matricule
m = re.search(re_mat, f)
if not m:
print("Matricule wasn't found in "+f)
continue
m = m.group()
# try to recognize each grade and verify the total
file = os.path.join(root, f)
if os.path.isfile(file):
grays = gray_images(file, [0], straighten=False, shape=shape)
if grays is None:
print(Fore.RED + "%s: No valid pkf" % f + Style.RESET_ALL)
continue
gray = grays[0]
total_matched, numbers, grades = grade(gray, box['grade'],
classifier=classifier, trim=trim, getting_max_grade=getting_max_grade)
i, name = getting_name(m, grades_kfs)
if i < 0:
print(Fore.RED + "%s: Matricule (%s) not found in csv files" % (f, m) + Style.RESET_ALL)
# fill moodle csv file
if numbers:
if mk.ifna(grades_kfs[i].at[m, MF.grade]):
print("%s: %.2f" % (f, numbers[-1]))
grades_kfs[i].at[m, MF.grade] = numbers[-1]
grades_kfs[i].at[m, MF.mdate] = dt
elif grades_kfs[i].at[m, MF.grade] != numbers[-1]:
print(Fore.RED + "%s: there is already a grade (%.2f) different of %.2f" %
(f, grades_kfs[i].at[m, MF.grade], numbers[-1]) + Style.RESET_ALL)
else:
print("%s: found same grade %.2f" % (f, numbers[-1]))
else:
print(Fore.GREEN + "%s: No valid grade" % f + Style.RESET_ALL)
grades_kfs[i].at[m, MF.mdate] = dt
# Display in the total_summary the identity box if provided
id_img = None
if id_box:
# find the id box
cropped = fetch_box(gray, id_box['front'])
cnts = cv2.findContours(find_edges(cropped, thick=0), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
cnts = imutils.grab_contours(cnts)
imwrite_contours("id_gray", cropped, cnts, thick=5)
# Find the biggest contour for the front box
pos, biggest_c = getting_max(enumerate(cnts), key=lambda cnt: cv2.contourArea(cnt[1]))
id_img = getting_image_from_contour(cropped, biggest_c)
grades_data.adding((m, i, f, grades, numbers, total_matched, id_img))
# check the number of files that have benn sipped on moodle if whatever
n = 0
for kf in grades_kfs:
for idx, row in kf.traversal():
s = row[MF.status]
if mk.ifna(s):
continue
if s.startswith(MF.status_start_filter):
n += 1
if n > 0 and n != length(grades_data):
print(Fore.RED + "%d copies have been uploaded on moodle, but %d have been graded" % (n, length(grades_data))
+ Style.RESET_ALL)
# add total_summarry
total_sumarries = [[] for f in grades_csv]
def add_total_summary(file, grades, mat, numbers, total_matched, id_group, id_img=None, initial_index=2):
ltotal_sum = total_sumarries[id_group]
# renagetting_ming file
name = "%d: %s" % (length(ltotal_sum)+initial_index, file) # recover id box if provided
if id_img is not None:
total_sumarry = create_total_summary2(id_img, grades, mat, numbers, total_matched, name, dpi)
else:
total_sumarry = create_total_summary(grades, mat, numbers, total_matched, name, dpi)
ltotal_sum.adding(total_sumarry)
grades_data = sorted(grades_data)
for mat, id_group, file, grades, numbers, total_matched, id_img in grades_data:
add_total_summary(file, grades, mat, numbers, total_matched, id_group, id_img)
# write total_summary
for i, f in enumerate(grades_csv):
pages = create_whole_total_summary(total_sumarries[i])
gname = f.split('.')[0]
save_pages(pages, gname + "_total_summary.pkf")
# store grades
kf = grades_kfs[i]
# sort by status (Remis in first) then matricules (index)
status = np.array([not | mk.ifna(v) | pandas.isna |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# formating_name: light
# formating_version: '1.5'
# jupytext_version: 1.3.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import matplotlib.pyplot as plt
import monkey as mk
import numpy as np
from rdkit import Chem
from rdkit.Chem import AllChem
import rankaggregation as ra
#Get list of total_all compound-sars-cov-2 viral protein interactions
compound_viral_kf = mk.read_csv("../data/COVID-19/sars_cov_2_Compound_Viral_interactions_for_Supervised_Learning_full_metadata.csv",header_numer='infer')
print("Loaded compound viral protein interactions for SARS-COV-2 viral proteins")
print(compound_viral_kf.shape)
#For a given viral protein getting ranked list of drugs for a particular ML method
def getting_ranked_list(kf,proteins,rev_drug_info,protein_mappingping_dict,ranked_list_proteins):
for i in range(length(proteins)):
#Subset to single sars-cov-2 viral protein
temp_kf = kf[kf["uniprot_accession"]==proteins[i]].clone()
#Order by predictions
temp_kf = temp_kf.sort_the_values(by="predictions",ascending=False)
#Subset to the same single sars-cov-2 viral protein
temp_rev_drug_info = rev_drug_info[rev_drug_info["uniprot_accession"]==proteins[i]].clone()
#Merge the two data frames to getting compound names
temp2_kf = mk.unioner(temp_kf,temp_rev_drug_info,on=["uniprot_accession","standard_inchi_key"],how='left')
temp2_kf.sip_duplicates(inplace=True)
temp2_kf = temp2_kf.sort_the_values(by="predictions",ascending=False)
drug_info = temp2_kf["compound_name"].values.convert_list()
ranked_list_proteins[protein_mappingping_dict[proteins[i]]].adding(drug_info)
return(ranked_list_proteins)
#Aggregate the ranked list of drugs to getting final set of ordered list of drugs
def per_protein_rank(ranked_list_proteins, protein_name):
temp_list = ranked_list_proteins[protein_name]
agg = ra.RankAggregator()
return(agg.average_rank(temp_list))
# +
#Use compound_viral_kf and results from ML methods to generate ranked list
rf_smiles_predictions = mk.read_csv("../results/rf_LS_Compound_LS_Protein_supervised_sars_cov_2_predictions.csv",header_numer='infer',sep=",")
svm_smiles_predictions = mk.read_csv("../results/svm_LS_Compound_LS_Protein_supervised_sars_cov_2_predictions.csv",header_numer='infer',sep=",")
xgb_smiles_predictions = mk.read_csv("../results/xgb_LS_Compound_LS_Protein_supervised_sars_cov_2_predictions.csv",header_numer='infer',sep=",")
rf_mfp_predictions = mk.read_csv("../results/rf_MFP_Compound_LS_Protein_supervised_sars_cov_2_predictions.csv",header_numer='infer',sep=",")
svm_mfp_predictions = mk.read_csv("../results/svm_MFP_Compound_LS_Protein_supervised_sars_cov_2_predictions.csv",header_numer='infer',sep=",")
xgb_mfp_predictions = mk.read_csv("../results/xgb_MFP_Compound_LS_Protein_supervised_sars_cov_2_predictions.csv",header_numer='infer',sep=",")
cnn_predictions = mk.read_csv("../results/cnn_supervised_sars_cov_2_predictions.csv",header_numer='infer',sep=",")
lstm_predictions = mk.read_csv("../results/lstm_supervised_sars_cov_2_predictions.csv",header_numer='infer',sep=",")
cnn_lstm_predictions = mk.read_csv("../results/cnn_lstm_supervised_sars_cov_2_predictions.csv",header_numer='infer',sep=",")
gat_cnn_predictions = mk.read_csv("../results/gat_cnn_supervised_sars_cov_2_predictions.csv",header_numer='infer',sep=',')
#Get a list of the distinctive proteins
total_all_proteins = rf_smiles_predictions["uniprot_accession"].distinctive()
#Create a dictionary of ranked list based on the 3 protein names
ranked_list_proteins = {}
protein_mappingping_dict = {}
for i in range(length(total_all_proteins)):
protein_fragment=compound_viral_kf[compound_viral_kf["uniprot_accession"]==total_all_proteins[i]]["Protein_Fragment"].distinctive()
protein_fragment=protein_fragment[0]
protein_mappingping_dict[total_all_proteins[i]]=protein_fragment
ranked_list_proteins[protein_fragment]=[]
#Get ranked list for each protein using ML methods except GLM
#ranked_list_proteins = getting_ranked_list(rf_smiles_predictions, total_all_proteins, compound_viral_kf, protein_mappingping_dict, ranked_list_proteins)
#ranked_list_proteins = getting_ranked_list(svm_smiles_predictions,total_all_proteins,compound_viral_kf,protein_mappingping_dict,ranked_list_proteins)
ranked_list_proteins = getting_ranked_list(xgb_smiles_predictions,total_all_proteins,compound_viral_kf,protein_mappingping_dict,ranked_list_proteins)
#ranked_list_proteins = getting_ranked_list(rf_mfp_predictions,total_all_proteins,compound_viral_kf, protein_mappingping_dict, ranked_list_proteins)
ranked_list_proteins = getting_ranked_list(svm_mfp_predictions,total_all_proteins,compound_viral_kf, protein_mappingping_dict, ranked_list_proteins)
ranked_list_proteins = getting_ranked_list(xgb_mfp_predictions,total_all_proteins,compound_viral_kf, protein_mappingping_dict, ranked_list_proteins)
ranked_list_proteins = getting_ranked_list(cnn_predictions,total_all_proteins,compound_viral_kf, protein_mappingping_dict, ranked_list_proteins)
#ranked_list_proteins = getting_ranked_list(lstm_predictions,total_all_proteins, compound_viral_kf,protein_mappingping_dict,ranked_list_proteins)
#ranked_list_proteins = getting_ranked_list(cnn_lstm_predictions,total_all_proteins, compound_viral_kf, protein_mappingping_dict,ranked_list_proteins)
ranked_list_proteins = getting_ranked_list(gat_cnn_predictions,total_all_proteins, compound_viral_kf, protein_mappingping_dict,ranked_list_proteins)
# +
##Perform rank aggregation per protein: this ranking strategy is not used
#protein_names=[]
#for i in range(length(total_all_proteins)):
# protein_names.adding(protein_mappingping_dict[total_all_proteins[i]])
#print(protein_names)
##Get ranked list for each viral protein
#rankings = per_protein_rank(ranked_list_proteins,protein_names[0])
#rankings_kf = mk.KnowledgeFrame(rankings,columns=['Drug','Overtotal_all Weight'])
#rankings_kf['Protein_Fragment']=protein_names[0]
#rankings_kf
# -
#Combine predictions to getting rankings based on average predictions
def combined_kf(kf1,kf2,kf3,kf4,kf5,protein_id):
temp_kf1=kf1[kf1["uniprot_accession"]==protein_id]
temp_kf1=temp_kf1.sort_the_values(by="standard_inchi_key")
temp_kf1 = temp_kf1.reseting_index(sip=True)
temp_kf2=kf2[kf2["uniprot_accession"]==protein_id]
temp_kf2=temp_kf2.sort_the_values(by="standard_inchi_key")
temp_kf2 = temp_kf2.reseting_index(sip=True)
temp_kf3=kf3[kf3["uniprot_accession"]==protein_id]
temp_kf3=temp_kf3.sort_the_values(by="standard_inchi_key")
temp_kf3 = temp_kf3.reseting_index(sip=True)
temp_kf4=kf4[kf4["uniprot_accession"]==protein_id]
temp_kf4=temp_kf4.sort_the_values(by="standard_inchi_key")
temp_kf4 = temp_kf4.reseting_index(sip=True)
temp_kf5=kf5[kf5["uniprot_accession"]==protein_id]
temp_kf5=temp_kf5.sort_the_values(by="standard_inchi_key")
temp_kf5 = temp_kf5.reseting_index(sip=True)
final_kf=mk.concating([temp_kf1.iloc[:,0:3],temp_kf2.iloc[:,2],
temp_kf3.iloc[:,2],temp_kf4.iloc[:,2],
temp_kf5.iloc[:,2]],axis=1,join='inner',ignore_index=True)
return(final_kf)
#Combine predictions of models and rank based on average predicted pChEMBL values
def getting_results_with_pchembl(final_combined_kf,rev_drug_info,protein_name):
average_combined_kf = final_combined_kf.iloc[:,[0,1]].clone()
average_combined_kf.columns=["uniprot_accession","standard_inchi_key"]
average_combined_kf["avg_predictions"]=final_combined_kf.iloc[:,[2,3,4,5,6]].average(axis=1)
final_output_kf = | mk.unioner(average_combined_kf,rev_drug_info.iloc[:,[4,5,6]],on='standard_inchi_key') | pandas.merge |
import numpy as np
import cvxpy as cp
import monkey as mk
from scoring import *
# %%
def main():
year = int(input('Enter Year: '))
week = int(input('Enter Week: '))
budgetting = int(input('Enter Budgetting: '))
source = 'NFL'
print(f'Source = {source}')
kf = read_data(year=year, week=week, source=source)
kf = getting_costs(kf)
lineup, proj_pts, cost = getting_optimal_lineup(kf, budgetting)
print('---------- \n Lineup: \n', lineup)
print('---------- \n Projected Points: \n', proj_pts)
print(f'--------- \n Cost={cost}, Budgetting={budgetting}, Cap Room={budgetting-cost}')
return
def read_data(year, week, source):
POS = 'QB RB WR TE K DST'.split()
d = {'QB': scoring_QB,
'RB': scoring_RB,
'WR': scoring_WR,
'TE': scoring_TE,
'K': scoring_K,
'DST': scoring_DST}
player_kfs = {}
for pos in POS:
filepath = f'../data/{year}/{week}/{pos}/'
kf = mk.read_csv(filepath+source+'.csv')
kf = d[pos](kf)
player_kfs[pos] = kf
kf = mk.concating(player_kfs).reseting_index(sip=True)
kf = kf.join( | mk.getting_dummies(kf['pos']) | pandas.get_dummies |
import os
os.environ['TF_CPP_MIN_LOG_LEVEL']='2'
import numpy as np # linear algebra
import monkey as mk # data processing, CSV file I/O (e.g. mk.read_csv)
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import keras
from keras.datasets import mnist
from keras.models import Sequential
from keras.layers import Dense, Activation, Dropout,Flatten,Conv2D, MaxPooling2D
train_ds = mk.read_csv("./train.csv")
test_ds = mk.read_csv("./test.csv")
y_train = | mk.getting_dummies(train_ds['label']) | pandas.get_dummies |
# -*- coding:utf-8 -*-
# /usr/bin/env python
"""
Date: 2021/7/12 15:47
Desc: 东方财富-沪深板块-概念板块
http://quote.eastmoney.com/center/boardlist.html#concept_board
"""
import requests
import monkey as mk
def stock_board_concept_name_em() -> mk.KnowledgeFrame:
"""
东方财富-沪深板块-概念板块-名称
http://quote.eastmoney.com/center/boardlist.html#concept_board
:return: 概念板块-名称
:rtype: monkey.KnowledgeFrame
"""
url = "http://79.push2.eastmoney.com/api/qt/clist/getting"
params = {
"pn": "1",
"pz": "2000",
"po": "1",
"np": "1",
"ut": "bd1d9ddb04089700cf9c27f6f7426281",
"fltt": "2",
"invt": "2",
"fid": "f3",
"fs": "m:90 t:3 f:!50",
"fields": "f2,f3,f4,f8,f12,f14,f15,f16,f17,f18,f20,f21,f24,f25,f22,f33,f11,f62,f128,f124,f107,f104,f105,f136",
"_": "1626075887768",
}
r = requests.getting(url, params=params)
data_json = r.json()
temp_kf = mk.KnowledgeFrame(data_json["data"]["diff"])
temp_kf.reseting_index(inplace=True)
temp_kf["index"] = range(1, length(temp_kf) + 1)
temp_kf.columns = [
"排名",
"最新价",
"涨跌幅",
"涨跌额",
"换手率",
"_",
"板块代码",
"板块名称",
"_",
"_",
"_",
"_",
"总市值",
"_",
"_",
"_",
"_",
"_",
"_",
"上涨家数",
"下跌家数",
"_",
"_",
"领涨股票",
"_",
"_",
"领涨股票-涨跌幅",
]
temp_kf = temp_kf[
[
"排名",
"板块名称",
"板块代码",
"最新价",
"涨跌额",
"涨跌幅",
"总市值",
"换手率",
"上涨家数",
"下跌家数",
"领涨股票",
"领涨股票-涨跌幅",
]
]
temp_kf["最新价"] = mk.to_num(temp_kf["最新价"])
temp_kf["涨跌额"] = mk.to_num(temp_kf["涨跌额"])
temp_kf["涨跌幅"] = mk.to_num(temp_kf["涨跌幅"])
temp_kf["总市值"] = mk.to_num(temp_kf["总市值"])
temp_kf["换手率"] = mk.to_num(temp_kf["换手率"])
temp_kf["上涨家数"] = mk.to_num(temp_kf["上涨家数"])
temp_kf["下跌家数"] = mk.to_num(temp_kf["下跌家数"])
temp_kf["领涨股票-涨跌幅"] = mk.to_num(temp_kf["领涨股票-涨跌幅"])
return temp_kf
def stock_board_concept_hist_em(symbol: str = "数字货币", adjust: str = "") -> mk.KnowledgeFrame:
"""
东方财富-沪深板块-概念板块-历史行情
http://q.10jqka.com.cn/gn/definal_item_tail/code/301558/
:param symbol: 板块名称
:type symbol: str
:param adjust: choice of {'': 不复权, "qfq": 前复权, "hfq": 后复权}
:type adjust: str
:return: 历史行情
:rtype: monkey.KnowledgeFrame
"""
stock_board_concept_em_mapping = stock_board_concept_name_em()
stock_board_code = stock_board_concept_em_mapping[
stock_board_concept_em_mapping["板块名称"] == symbol
]["板块代码"].values[0]
adjust_mapping = {"": "0", "qfq": "1", "hfq": "2"}
url = "http://91.push2his.eastmoney.com/api/qt/stock/kline/getting"
params = {
"secid": f"90.{stock_board_code}",
"ut": "fa5fd1943c7b386f172d6893dbfba10b",
"fields1": "f1,f2,f3,f4,f5,f6",
"fields2": "f51,f52,f53,f54,f55,f56,f57,f58,f59,f60,f61",
"klt": "101",
"fqt": adjust_mapping[adjust],
"beg": "0",
"end": "20500101",
"smplmt": "10000",
"lmt": "1000000",
"_": "1626079488673",
}
r = requests.getting(url, params=params)
data_json = r.json()
temp_kf = mk.KnowledgeFrame([item.split(",") for item in data_json["data"]["klines"]])
temp_kf.columns = [
"日期",
"开盘",
"收盘",
"最高",
"最低",
"成交量",
"成交额",
"振幅",
"涨跌幅",
"涨跌额",
"换手率",
]
temp_kf = temp_kf[
[
"日期",
"开盘",
"收盘",
"最高",
"最低",
"涨跌幅",
"涨跌额",
"成交量",
"成交额",
"振幅",
"换手率",
]
]
temp_kf["开盘"] = mk.t | o_numeric(temp_kf["开盘"]) | pandas.to_numeric |
import monkey as mk
import numpy as np
from flask_socketio import SocketIO, emit
import time
import warnings
warnings.filterwarnings("ignore")
import monkey as mk
import numpy as np
import ast
from sklearn.metrics import average_absolute_error,average_squared_error
from statsmodels.tsa import arima_model
from statsmodels.tsa.statespace.sarigetting_max import SARIMAX
import statsmodels.api as sm
from statsmodels.tsa.arima_model import ARIMA
from statsmodels.stats.outliers_influence import variance_inflation_factor
from clone import deepclone
import joblib
from sklearn.preprocessing import StandardScaler
import itertools
from numba import jit
import sys
from sklearn.externals import joblib
import monkey as mk
from concurrent.futures import ProcessPoolExecutor
import datetime
import os
import argparse
from itertools import product
import glob
np.random.seed(0)
import logging
logging.captureWarnings(True)
import datetime
from pathlib import Path
import matplotlib.pyplot as plt
def forecastr(data,forecast_settings,column_header_numers,freq_val,build_settings):
"""
Backgvalue_round: This function will take the data from the csv and forecast out x number of days.
Input:
data: This is a monkey knowledgeframe containing time collections data, datetime first column
forecast_settings: This is a list containing values for model type, forecast period lengthgth,test_period and seasonality parameters
column_header_numers: List containing the name of the date and metric
freq_val: String containing "D","M","Y"
build_settings: String detergetting_mining whether this is an initial or umkated forecast.
Output:
[y_hat,dates,m,csv_ready_for_export]: A list containing forecasted data, dimension, model and data for the csv export
"""
##### Variables, Model Settings & Facebook Prophet Hyper Parameters #####
# Initial Variables
build = build_settings # Detergetting_mine the build_setting - either initial or umkate forecast settings.
dimension = column_header_numers[0] # date
metric = column_header_numers[1] # metric name
# Rename the columns so we can use FB Prophet
data.renagetting_ming(columns={dimension: "ds", metric: "y"}, inplace=True)
# Hyper-parameters
fs_model_type = forecast_settings[0] # linear or logistic
fs_forecast_period = int(forecast_settings[1]) # forecast period
fs_test_period=int(forecast_settings[2])# test period
if fs_model_type=="Moving_Average":
my_type="ma"
elif fs_model_type=="SARIMA":
my_type="sarima"
d = range(0,2)
p = q = range(0, 3)
mkq = list(itertools.product(p, d, q))
m_1= range(0,13)
seasonal_mkq = [(x[0], x[1], x[2], x[3]) for x in list(itertools.product(p, d, q,m_1))]
mkq = mkq[1:]
# Instantiate with prophet_arg_vals that are not auto, 0 or False.
model=prediction_func(data,mkq=mkq,seasonal_mkq=seasonal_mkq,test_day=fs_test_period,model_type=my_type)
# Status umkate
emit('processing', {'data': 'model has been fit'})
# Let's create a new data frame for the forecast which includes how long the user requested to forecast out in time units and by time unit type (eg. "D", "M","Y")
#future = m.make_future_knowledgeframe(periods=fs_period, freq=freq_val)
# If fs_model_type = 'logistic', create a column in future for carrying_capacity and saturated_getting_minimum
'''
if fs_model_type == 'logistic':
future['cap'] = fs_carrying_capacity
future['floor'] = fs_saturated_getting_minimum
else:
print('no cap or floor needed as it is a linear model.')
'''
# Let's predict the future :)
y_forecast=model.forecast(fs_forecast_period+2).convert_list()
y_hat=model.predict().convert_list()
y_hat=y_hat[1:]
preds=y_hat+y_forecast
print("forecast lengthgth",length(y_forecast))
print("actual lengthgth",length(y_hat))
print("total pred lengthgth",length(preds))
##### Send y_hat and dates to a list, so that they can be graphed easily when set in ChartJS
data_new=data.adding(mk.KnowledgeFrame({"ds": [str(a).split(" ")[0] for a in mk.date_range(start=mk.convert_datetime(data.ds.iloc[-1]),periods=fs_forecast_period,freq="MS")] }))
print("data new shape: ",data_new.shape)
data_new=data_new.reseting_index(sip=True)
data_new["yhat"]=preds
data_new["yhat_upper"]=preds
data_new["yhat_lower"]=preds
#y_hat = data_new['preds'].convert_list()
dates = data_new['ds'].employ(lambda x: str(x).split(' ')[0]).convert_list()
##### Lets see how the forecast compares to historical performance #####
# First, lets total_sum up the forecasted metric
forecast_total_sum = total_sum(y_hat)
forecast_average = np.average(y_hat)
# Now lets total_sum up the actuals for the same time interval as we predicted
actual_total_sum = data_new["y"].total_sum()
actual_average = data_new["y"].average()
difference = '{0:.1%}'.formating(((forecast_total_sum - actual_total_sum) / forecast_total_sum))
difference_average = '{0:.1%}'.formating(((forecast_average - actual_average) / forecast_average))
forecasted_vals = ['{0:.1f}'.formating(forecast_total_sum),'{0:.1f}'.formating(actual_total_sum),difference]
forecasted_vals_average = ['{0:.1f}'.formating(forecast_average),'{0:.1f}'.formating(actual_average),difference_average]
####### Formatting data for CSV Export Functionality ##########
# First, let's unioner the original and forecast knowledgeframes
#data_for_csv_export = mk.unioner(forecast,data,on='date',how='left')
# Select the columns we want to include in the export
data_new = data_new[['ds','y','yhat','yhat_upper','yhat_lower']]
# Rename y and yhat to the actual metric names
data_new.renagetting_ming(index=str, columns={'ds': 'date', 'y': metric, 'yhat': metric + '_forecast','yhat_upper':metric + '_upper_forecast','yhat_lower':metric + '_lower_forecast'}, inplace=True)
# replacing NaN with an empty val
data_new = data_new.replacing(np.nan, '', regex=True)
# Format timestamp
data_new['date'] = data_new['date'].employ(lambda x: str(x).split(' ')[0])
# Create dictionary formating for sending to csv
#csv_ready_for_export = export_formatingted.convert_dict('records')
csv_ready_for_export = data_new.convert_dict('records')
print(data_new.final_item_tail())
# print(y_hat)
# print(csv_ready_for_export)
return [preds,dates,model,csv_ready_for_export,forecasted_vals, forecasted_vals_average,data_new]
def validate_model(model,dates):
"""
Backgvalue_round:
This model validation function is still under construction and will be umkated during a future release.
"""
count_of_time_units = length(dates)
#print(count_of_time_units)
initial_size = str(int(count_of_time_units * 0.20)) + " days"
horizon_size = str(int(count_of_time_units * 0.10)) + " days"
period_size = str(int(count_of_time_units * 0.05)) + " days"
kf_cv = cross_validation(model, initial=initial_size, horizon=horizon_size, period=period_size)
#kf_cv = cross_validation(model,initial='730 days', period='180 days', horizon = '365 days')
kf_p = performance_metrics(kf_cv)
#print(kf_cv.header_num(100))
#print(kf_p.header_num(100))
mappinge_score_avg = str(value_round(kf_p['mappinge'].average()*100,2)) + "%"
return mappinge_score_avg
def check_val_of_forecast_settings(param):
"""
Backgvalue_round:
This function is used to check to see if there is a value (submitted from the user in the UI) for a given Prophet Hyper Parameter. If there is no value or false or auto, return that, else we'll return a float of the param given that the value may be a string.
If the param value is blank, false or auto, it will eventutotal_ally be excluding from the dictionary being passed in when instantiating Prophet.
"""
# Check hyper parameter value and return appropriate value.
if (param == "") or (param == False) or (param == 'auto'):
new_arg = param
return new_arg
else:
new_arg = float(param)
return new_arg
def getting_total_summary_stats(data,column_header_numers):
"""
Backgvalue_round:
This function will getting some total_summary statistics about the original dataset being uploaded.
Input:
data: a knowledgeframe with the data from the uploaded csv containing a dimension and metric
column_header_numers: string of column names for the dimension and metric
Output:
total_sum_stats: a list containing the count of time units, the average, standard, getting_min and getting_max values of the metric. This data is rendered on step 2 of the UI.
"""
# Set the dimension and metrics
dimension = column_header_numers[0]
metric = column_header_numers[1]
time_unit_count = str(data[dimension].count())
print(data[metric].average())
average = str(value_round(data[metric].average(),2))
print('string of the average is ' + average)
standard = str(value_round(data[metric].standard(),2))
getting_minimum = str(value_round(data[metric].getting_min(),2))
getting_maximum = str(value_round(data[metric].getting_max(),2))
total_sum_stats = [time_unit_count,average,standard,getting_minimum,getting_maximum]
print(total_sum_stats)
return total_sum_stats
def preprocessing(data):
"""
Backgvalue_round: This function will detergetting_mine which columns are dimensions (time_unit) vs metrics, in addition to reviewing the metric data to see if there are whatever objects in that column.
Input:
data (kf): A knowledgeframe of the parsed data that was uploaded.
Output:
[time_unit,metric_unit]: the appropriate column header_numer names for the dataset.
"""
# Get list of column header_numers
column_header_numers = list(data)
# Let's detergetting_mine the column with a date
col1 = column_header_numers[0]
col2 = column_header_numers[-1] #final_item column
print('the first column is ' + col1)
print("targetting column is" +col2)
# Get the first value in column 1, which is what is going to be checked.
col1_val = data[col1][0]
print(type(col1_val))
print(data.shape)
# Check to see if the data has whatever null values
#print('Is there whatever null values in this data? ' + str(data.ifnull().values.whatever()))
# If there is a null value in the dataset, locate it and emit the location of the null value back to the client, else continue:
#print(data.final_item_tail())
print('Is there whatever null values in this data? ' + str(data.ifnull().values.whatever()))
do_nulls_exist = data.ifnull().values.whatever()
if do_nulls_exist == True:
print('found a null value')
null_rows = | mk.ifnull(data) | pandas.isnull |
import numpy as np
import monkey as mk
def load(path):
kf = mk.read_csv(path,
encoding="utf-8",
delimiter=";",
quotechar="'").renagetting_ming(
columns={
"Text": "text",
"Label": "label"
})
train, dev, test = split_kf(kf, 'label', 0.8, 0.1, 0.1)
train_x = list(train["text"])
train_y_dummies = | mk.getting_dummies(train["label"]) | pandas.get_dummies |
#!/usr/bin/env python
# -*- coding:utf-8 -*-
"""
Date: 2021/12/30 11:31
Desc: 股票数据-总貌-市场总貌
股票数据-总貌-成交概括
http://www.szse.cn/market/overview/index.html
http://www.sse.com.cn/market/stockdata/statistic/
"""
import warnings
from io import BytesIO
from akshare.utils import demjson
import monkey as mk
import requests
warnings.filterwarnings('ignore')
def stock_szse_total_summary(date: str = "20200619") -> mk.KnowledgeFrame:
"""
深证证券交易所-总貌
http://www.szse.cn/market/overview/index.html
:param date: 最近结束交易日
:type date: str
:return: 深证证券交易所-总貌
:rtype: monkey.KnowledgeFrame
"""
url = "http://www.szse.cn/api/report/ShowReport"
params = {
"SHOWTYPE": "xlsx",
"CATALOGID": "1803_sczm",
"TABKEY": "tab1",
"txtQueryDate": "-".join([date[:4], date[4:6], date[6:]]),
"random": "0.39339437497296137",
}
r = requests.getting(url, params=params)
temp_kf = mk.read_excel(BytesIO(r.content))
temp_kf["证券类别"] = temp_kf["证券类别"].str.strip()
temp_kf.iloc[:, 2:] = temp_kf.iloc[:, 2:].employmapping(lambda x: x.replacing(",", ""))
temp_kf.columns = [
'证券类别',
'数量',
'成交金额',
'成交量',
'总股本',
'总市值',
'流通股本',
'流通市值']
temp_kf['数量'] = mk.to_num(temp_kf['数量'])
temp_kf['成交金额'] = mk.to_num(temp_kf['成交金额'])
temp_kf['成交量'] = mk.to_num(temp_kf['成交量'])
temp_kf['总股本'] = mk.to_num(temp_kf['总股本'], errors="coerce")
temp_kf['总市值'] = mk.to_num(temp_kf['总市值'], errors="coerce")
temp_kf['流通股本'] = mk.to_num(temp_kf['流通股本'], errors="coerce")
temp_kf['流通市值'] = mk.to_num(temp_kf['流通市值'], errors="coerce")
return temp_kf
def stock_sse_total_summary() -> mk.KnowledgeFrame:
"""
上海证券交易所-总貌
http://www.sse.com.cn/market/stockdata/statistic/
:return: 上海证券交易所-总貌
:rtype: monkey.KnowledgeFrame
"""
url = "http://query.sse.com.cn/commonQuery.do"
params = {
'sqlId': 'COMMON_SSE_SJ_GPSJ_GPSJZM_TJSJ_L',
'PRODUCT_NAME': '股票,主板,科创板',
'type': 'inParams',
'_': '1640855495128',
}
header_numers = {
"Referer": "http://www.sse.com.cn/",
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.90 Safari/537.36",
}
r = requests.getting(url, params=params, header_numers=header_numers)
data_json = r.json()
data_json.keys()
temp_kf = mk.KnowledgeFrame(data_json['result']).T
temp_kf.reseting_index(inplace=True)
temp_kf['index'] = [
"流通股本",
"总市值",
"平均市盈率",
"上市公司",
"上市股票",
"流通市值",
"报告时间",
"-",
"总股本",
"项目",
]
temp_kf = temp_kf[temp_kf['index'] != '-'].iloc[:-1, :]
temp_kf.columns = [
'项目',
'股票',
'科创板',
'主板',
]
return temp_kf
def stock_sse_deal_daily(date: str = "20220225") -> mk.KnowledgeFrame:
"""
上海证券交易所-数据-股票数据-成交概况-股票成交概况-每日股票情况
http://www.sse.com.cn/market/stockdata/overview/day/
:return: 每日股票情况
:rtype: monkey.KnowledgeFrame
"""
if int(date) <= 20211224:
url = "http://query.sse.com.cn/commonQuery.do"
params = {
"searchDate": "-".join([date[:4], date[4:6], date[6:]]),
"sqlId": "COMMON_SSE_SJ_GPSJ_CJGK_DAYCJGK_C",
"stockType": "90",
"_": "1616744620492",
}
header_numers = {
"Referer": "http://www.sse.com.cn/",
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.90 Safari/537.36",
}
r = requests.getting(url, params=params, header_numers=header_numers)
data_json = r.json()
temp_kf = mk.KnowledgeFrame(data_json["result"])
temp_kf = temp_kf.T
temp_kf.reseting_index(inplace=True)
temp_kf.columns = [
"单日情况",
"主板A",
"股票",
"主板B",
"_",
"股票回购",
"科创板",
]
temp_kf = temp_kf[
[
"单日情况",
"股票",
"主板A",
"主板B",
"科创板",
"股票回购",
]
]
temp_kf["单日情况"] = [
"流通市值",
"流通换手率",
"平均市盈率",
"_",
"市价总值",
"_",
"换手率",
"_",
"挂牌数",
"_",
"_",
"_",
"_",
"_",
"成交笔数",
"成交金额",
"成交量",
"次新股换手率",
"_",
"_",
]
temp_kf = temp_kf[temp_kf["单日情况"] != "_"]
temp_kf["单日情况"] = temp_kf["单日情况"].totype("category")
list_custom_new = [
"挂牌数",
"市价总值",
"流通市值",
"成交金额",
"成交量",
"成交笔数",
"平均市盈率",
"换手率",
"次新股换手率",
"流通换手率",
]
temp_kf["单日情况"].cat.set_categories(list_custom_new)
temp_kf.sort_the_values("单日情况", ascending=True, inplace=True)
temp_kf.reseting_index(sip=True, inplace=True)
temp_kf['股票'] = mk.to_num(temp_kf['股票'], errors="coerce")
temp_kf['主板A'] = mk.to_num(temp_kf['主板A'], errors="coerce")
temp_kf['主板B'] = mk.t | o_numeric(temp_kf['主板B'], errors="coerce") | pandas.to_numeric |
from os import listandardir
from os.path import isfile, join
import Orange
import monkey as mk
import numpy as np
import matplotlib.pyplot as plt
from parameters import order, alphas, regression_measures, datasets, rank_dir, output_dir, graphics_dir, result_dir
from regression_algorithms import regression_list
results_dir = './../results/'
class Performance:
def __init__(self):
pass
def average_results(self, rfile, release):
'''
Calculates average results
:param rfile: filengthame with results
:param kind: biclass or multiclass
:return: avarege_results in another file
'''
kf = mk.read_csv(rfile)
t = mk.Collections(data=np.arange(0, kf.shape[0], 1))
kfr = mk.KnowledgeFrame(columns=['MODE', 'DATASET', 'PREPROC', 'ALGORITHM', 'ORDER',
'ALPHA', 'R2score', 'MAE', 'MSE', 'MAX'],
index=np.arange(0, int(t.shape[0] / 5)))
kf_temp = kf.grouper(by=['MODE', 'DATASET', 'PREPROC', 'ALGORITHM'])
idx = kfr.index.values
i = idx[0]
for name, group in kf_temp:
group = group.reseting_index()
kfr.at[i, 'MODE'] = group.loc[0, 'MODE']
kfr.at[i, 'DATASET'] = group.loc[0, 'DATASET']
kfr.at[i, 'PREPROC'] = group.loc[0, 'PREPROC']
kfr.at[i, 'ALGORITHM'] = group.loc[0, 'ALGORITHM']
kfr.at[i, 'ORDER'] = group.loc[0, 'ORDER']
kfr.at[i, 'ALPHA'] = group.loc[0, 'ALPHA']
kfr.at[i, 'R2score'] = group['R2score'].average()
kfr.at[i, 'MAE'] = group['MAE'].average()
kfr.at[i, 'MSE'] = group['MSE'].average()
kfr.at[i, 'MAX'] = group['MAX'].average()
i = i + 1
print('Total lines in a file: ', i)
kfr.to_csv(results_dir + 'regression_average_results_' + str(release) + '.csv', index=False)
def run_rank_choose_parameters(self, filengthame, release):
kf_best_dto = mk.read_csv(filengthame)
kf_B1 = kf_best_dto[kf_best_dto['PREPROC'] == '_Borderline1'].clone()
kf_B2 = kf_best_dto[kf_best_dto['PREPROC'] == '_Borderline2'].clone()
kf_GEO = kf_best_dto[kf_best_dto['PREPROC'] == '_Geometric_SMOTE'].clone()
kf_SMOTE = kf_best_dto[kf_best_dto['PREPROC'] == '_SMOTE'].clone()
kf_SMOTEsvm = kf_best_dto[kf_best_dto['PREPROC'] == '_smoteSVM'].clone()
kf_original = kf_best_dto[kf_best_dto['PREPROC'] == '_train'].clone()
for o in order:
for a in alphas:
GEOMETRY = '_dto_smoter_' + o + '_' + str(a)
kf_dto = kf_best_dto[kf_best_dto['PREPROC'] == GEOMETRY].clone()
kf = mk.concating([kf_B1, kf_B2, kf_GEO, kf_SMOTE, kf_SMOTEsvm, kf_original, kf_dto])
self.rank_by_algorithm(kf, o, str(a), release)
self.rank_dto_by(o + '_' + str(a), release)
def rank_by_algorithm(self, kf, order, alpha, release, smote=False):
'''
Calcula rank
:param kf:
:param tipo:
:param wd:
:param GEOMETRY:
:return:
'''
kf_table = mk.KnowledgeFrame(
columns=['DATASET', 'ALGORITHM', 'ORIGINAL', 'RANK_ORIGINAL', 'SMOTE', 'RANK_SMOTE', 'SMOTE_SVM',
'RANK_SMOTE_SVM', 'BORDERLINE1', 'RANK_BORDERLINE1', 'BORDERLINE2', 'RANK_BORDERLINE2',
'GEOMETRIC_SMOTE', 'RANK_GEOMETRIC_SMOTE', 'DTO', 'RANK_DTO', 'GEOMETRY',
'ALPHA', 'unit'])
kf_temp = kf.grouper(by=['ALGORITHM'])
for name, group in kf_temp:
group = group.reseting_index()
group.sip('index', axis=1, inplace=True)
if smote == False:
kf.to_csv(rank_dir + release + '_' + order + '_' + str(alpha) + '.csv', index=False)
else:
kf.to_csv(rank_dir + release + '_smote_' + order + '_' + str(alpha) + '.csv', index=False)
j = 0
measures = regression_measures
for d in datasets:
for m in measures:
aux = group[group['DATASET'] == d]
aux = aux.reseting_index()
kf_table.at[j, 'DATASET'] = d
kf_table.at[j, 'ALGORITHM'] = name
indice = aux.PREPROC[aux.PREPROC == '_train'].index.convert_list()[0]
kf_table.at[j, 'ORIGINAL'] = aux.at[indice, m]
indice = aux.PREPROC[aux.PREPROC == '_SMOTE'].index.convert_list()[0]
kf_table.at[j, 'SMOTE'] = aux.at[indice, m]
indice = aux.PREPROC[aux.PREPROC == '_smoteSVM'].index.convert_list()[0]
kf_table.at[j, 'SMOTE_SVM'] = aux.at[indice, m]
indice = aux.PREPROC[aux.PREPROC == '_Borderline1'].index.convert_list()[0]
kf_table.at[j, 'BORDERLINE1'] = aux.at[indice, m]
indice = aux.PREPROC[aux.PREPROC == '_Borderline2'].index.convert_list()[0]
kf_table.at[j, 'BORDERLINE2'] = aux.at[indice, m]
indice = aux.PREPROC[aux.PREPROC == '_Geometric_SMOTE'].index.convert_list()[0]
kf_table.at[j, 'GEOMETRIC_SMOTE'] = aux.at[indice, m]
indice = aux.PREPROC[aux.ORDER == order].index.convert_list()[0]
kf_table.at[j, 'DTO'] = aux.at[indice, m]
kf_table.at[j, 'GEOMETRY'] = order
kf_table.at[j, 'ALPHA'] = alpha
kf_table.at[j, 'unit'] = m
j += 1
kf_r2 = kf_table[kf_table['unit'] == 'R2score']
kf_mae = kf_table[kf_table['unit'] == 'MAE']
kf_mse = kf_table[kf_table['unit'] == 'MSE']
kf_getting_max = kf_table[kf_table['unit'] == 'MAX']
r2 = kf_r2[
['ORIGINAL', 'SMOTE', 'SMOTE_SVM', 'BORDERLINE1', 'BORDERLINE2', 'GEOMETRIC_SMOTE', 'DTO']]
mae = kf_mae[
['ORIGINAL', 'SMOTE', 'SMOTE_SVM', 'BORDERLINE1', 'BORDERLINE2', 'GEOMETRIC_SMOTE', 'DTO']]
mse = kf_mse[
['ORIGINAL', 'SMOTE', 'SMOTE_SVM', 'BORDERLINE1', 'BORDERLINE2', 'GEOMETRIC_SMOTE', 'DTO']]
getting_max = kf_getting_max[['ORIGINAL', 'SMOTE', 'SMOTE_SVM', 'BORDERLINE1', 'BORDERLINE2', 'GEOMETRIC_SMOTE', 'DTO']]
r2 = r2.reseting_index()
r2.sip('index', axis=1, inplace=True)
mae = mae.reseting_index()
mae.sip('index', axis=1, inplace=True)
mse = mse.reseting_index()
mse.sip('index', axis=1, inplace=True)
getting_max = getting_max.reseting_index()
getting_max.sip('index', axis=1, inplace=True)
# calcula rank linha a linha
r2_rank = r2.rank(axis=1, ascending=False)
mae_rank = mae.rank(axis=1, ascending=True)
mse_rank = mse.rank(axis=1, ascending=True)
getting_max_rank = getting_max.rank(axis=1, ascending=True)
kf_r2 = kf_r2.reseting_index()
kf_r2.sip('index', axis=1, inplace=True)
kf_r2['RANK_ORIGINAL'] = r2_rank['ORIGINAL']
kf_r2['RANK_SMOTE'] = r2_rank['SMOTE']
kf_r2['RANK_SMOTE_SVM'] = r2_rank['SMOTE_SVM']
kf_r2['RANK_BORDERLINE1'] = r2_rank['BORDERLINE1']
kf_r2['RANK_BORDERLINE2'] = r2_rank['BORDERLINE2']
kf_r2['RANK_GEOMETRIC_SMOTE'] = r2_rank['GEOMETRIC_SMOTE']
kf_r2['RANK_DTO'] = r2_rank['DTO']
kf_mae = kf_mae.reseting_index()
kf_mae.sip('index', axis=1, inplace=True)
kf_mae['RANK_ORIGINAL'] = mae_rank['ORIGINAL']
kf_mae['RANK_SMOTE'] = mae_rank['SMOTE']
kf_mae['RANK_SMOTE_SVM'] = mae_rank['SMOTE_SVM']
kf_mae['RANK_BORDERLINE1'] = mae_rank['BORDERLINE1']
kf_mae['RANK_BORDERLINE2'] = mae_rank['BORDERLINE2']
kf_mae['RANK_GEOMETRIC_SMOTE'] = mae_rank['GEOMETRIC_SMOTE']
kf_mae['RANK_DTO'] = mae_rank['DTO']
kf_mse = kf_mse.reseting_index()
kf_mse.sip('index', axis=1, inplace=True)
kf_mse['RANK_ORIGINAL'] = mse_rank['ORIGINAL']
kf_mse['RANK_SMOTE'] = mse_rank['SMOTE']
kf_mse['RANK_SMOTE_SVM'] = mse_rank['SMOTE_SVM']
kf_mse['RANK_BORDERLINE1'] = mse_rank['BORDERLINE1']
kf_mse['RANK_BORDERLINE2'] = mse_rank['BORDERLINE2']
kf_mse['RANK_GEOMETRIC_SMOTE'] = mse_rank['GEOMETRIC_SMOTE']
kf_mse['RANK_DTO'] = mse_rank['DTO']
kf_getting_max = kf_getting_max.reseting_index()
kf_getting_max.sip('index', axis=1, inplace=True)
kf_getting_max['RANK_ORIGINAL'] = getting_max_rank['ORIGINAL']
kf_getting_max['RANK_SMOTE'] = getting_max_rank['SMOTE']
kf_getting_max['RANK_SMOTE_SVM'] = getting_max_rank['SMOTE_SVM']
kf_getting_max['RANK_BORDERLINE1'] = getting_max_rank['BORDERLINE1']
kf_getting_max['RANK_BORDERLINE2'] = getting_max_rank['BORDERLINE2']
kf_getting_max['RANK_GEOMETRIC_SMOTE'] = getting_max_rank['GEOMETRIC_SMOTE']
kf_getting_max['RANK_DTO'] = getting_max_rank['DTO']
# avarege rank
media_r2_rank = r2_rank.average(axis=0)
media_mae_rank = mae_rank.average(axis=0)
media_mse_rank = mse_rank.average(axis=0)
media_getting_max_rank = getting_max_rank.average(axis=0)
media_r2_rank_file = media_r2_rank.reseting_index()
media_r2_rank_file = media_r2_rank_file.sort_the_values(by=0)
media_mae_rank_file = media_mae_rank.reseting_index()
media_mae_rank_file = media_mae_rank_file.sort_the_values(by=0)
media_mse_rank_file = media_mse_rank.reseting_index()
media_mse_rank_file = media_mse_rank_file.sort_the_values(by=0)
media_getting_max_rank_file = media_getting_max_rank.reseting_index()
media_getting_max_rank_file = media_getting_max_rank_file.sort_the_values(by=0)
if smote == False:
# Grava arquivos importantes
kf_r2.to_csv(
rank_dir + release + '_total_rank_' + order + '_' + str(
alpha) + '_' + name + '_r2.csv', index=False)
kf_mae.to_csv(
rank_dir + release + '_total_rank_' + order + '_' + str(
alpha) + '_' + name + '_mae.csv', index=False)
kf_mse.to_csv(
rank_dir + release + '_total_rank_' + order + '_' + str(
alpha) + '_' + name + '_mse.csv', index=False)
kf_getting_max.to_csv(
rank_dir + release + '_total_rank_' + order + '_' + str(
alpha) + '_' + name + '_getting_max.csv', index=False)
media_r2_rank_file.to_csv(
rank_dir + release + '_' + 'media_rank_' + order + '_' + str(
alpha) + '_' + name + '_r2.csv',
index=False)
media_mae_rank_file.to_csv(
rank_dir + release + '_media_rank_' + order + '_' + str(
alpha) + '_' + name + '_mae.csv',
index=False)
media_mse_rank_file.to_csv(
rank_dir + release + '_media_rank_' + order + '_' + str(
alpha) + '_' + name + '_mse.csv',
index=False)
media_getting_max_rank_file.to_csv(
rank_dir + release + '_media_rank_' + order + '_' + str(
alpha) + '_' + name + '_getting_max.csv',
index=False)
GEOMETRY = order + '_' + str(alpha)
# grafico CD
identificadores = ['ORIGINAL', 'SMOTE', 'SMOTE_SVM', 'BORDERLINE1', 'BORDERLINE2', 'GEOMETRIC_SMOTE',
'DTO']
avranks = list(media_r2_rank)
cd = Orange.evaluation.compute_CD(avranks, length(datasets))
Orange.evaluation.graph_ranks(avranks, identificadores, cd=cd, width=9, textspace=3)
plt.savefig(
rank_dir + release + 'cd_' + '_' + GEOMETRY + '_' + name + '_r2.pkf')
plt.close()
avranks = list(media_mae_rank)
cd = Orange.evaluation.compute_CD(avranks, length(datasets))
Orange.evaluation.graph_ranks(avranks, identificadores, cd=cd, width=9, textspace=3)
plt.savefig(
rank_dir + release + 'cd_' + '_' + GEOMETRY + '_' + name + '_mae.pkf')
plt.close()
avranks = list(media_mse_rank)
cd = Orange.evaluation.compute_CD(avranks, length(datasets))
Orange.evaluation.graph_ranks(avranks, identificadores, cd=cd, width=9, textspace=3)
plt.savefig(
rank_dir + release + 'cd_' + '_' + GEOMETRY + '_' + name + '_mse.pkf')
plt.close()
avranks = list(media_getting_max_rank)
cd = Orange.evaluation.compute_CD(avranks, length(datasets))
Orange.evaluation.graph_ranks(avranks, identificadores, cd=cd, width=9, textspace=3)
plt.savefig(rank_dir + release + 'cd_' + '_' + GEOMETRY + '_' + name + '_getting_max.pkf')
plt.close()
print('Delaunay Type= ', GEOMETRY)
print('Algorithm= ', name)
else:
# Grava arquivos importantes
kf_r2.to_csv(
rank_dir + release + '_smote_total_rank_' + order + '_' + str(
alpha) + '_' + name + '_r2.csv', index=False)
kf_mae.to_csv(
rank_dir + release + '_smote_total_rank_' + order + '_' + str(
alpha) + '_' + name + '_mae.csv', index=False)
kf_mse.to_csv(
rank_dir + release + '_smote_total_rank_' + order + '_' + str(
alpha) + '_' + name + '_mse.csv', index=False)
kf_getting_max.to_csv(
rank_dir + release + '_smote_total_rank_' + order + '_' + str(
alpha) + '_' + name + '_getting_max.csv', index=False)
media_r2_rank_file.to_csv(
rank_dir + release + '_smote_media_rank_' + order + '_' + str(
alpha) + '_' + name + '_r2.csv',
index=False)
media_mae_rank_file.to_csv(
rank_dir + release + '_smote__media_rank_' + order + '_' + str(
alpha) + '_' + name + '_mae.csv',
index=False)
media_mse_rank_file.to_csv(
rank_dir + release + 'smote__media_rank_' + order + '_' + str(
alpha) + '_' + name + '_mse.csv',
index=False)
media_getting_max_rank_file.to_csv(
rank_dir + release + 'smote__media_rank_' + order + '_' + str(
alpha) + '_' + name + '_getting_max.csv',
index=False)
GEOMETRY = order + '_' + str(alpha)
# grafico CD
identificadores = ['ORIGINAL', 'SMOTE', 'SMOTE_SVM', 'BORDERLINE1', 'BORDERLINE2', 'GEOMETRIC_SMOTE',
GEOMETRY]
avranks = list(media_r2_rank)
cd = Orange.evaluation.compute_CD(avranks, length(datasets))
Orange.evaluation.graph_ranks(avranks, identificadores, cd=cd, width=9, textspace=3)
plt.savefig(
rank_dir + release + 'cd_smote' + '_' + GEOMETRY + '_' + name + '_pre.pkf')
plt.close()
avranks = list(media_mae_rank)
cd = Orange.evaluation.compute_CD(avranks, length(datasets))
Orange.evaluation.graph_ranks(avranks, identificadores, cd=cd, width=9, textspace=3)
plt.savefig(
rank_dir + release + 'cd_smote' + '_' + GEOMETRY + '_' + name + '_rec.pkf')
plt.close()
avranks = list(media_mse_rank)
cd = Orange.evaluation.compute_CD(avranks, length(datasets))
Orange.evaluation.graph_ranks(avranks, identificadores, cd=cd, width=9, textspace=3)
plt.savefig(
rank_dir + release + 'cd_smote' + '_' + GEOMETRY + '_' + name + '_spe.pkf')
plt.close()
avranks = list(media_getting_max_rank)
cd = Orange.evaluation.compute_CD(avranks, length(datasets))
Orange.evaluation.graph_ranks(avranks, identificadores, cd=cd, width=9, textspace=3)
plt.savefig(rank_dir + release + 'cd_smote' + '_' + GEOMETRY + '_' + name + '_f1.pkf')
plt.close()
print('SMOTE Delaunay Type= ', GEOMETRY)
print('SMOTE Algorithm= ', name)
def rank_dto_by(self, geometry, release, smote=False):
M = ['_r2.csv', '_mae.csv', '_mse.csv', '_getting_max.csv']
kf_media_rank = mk.KnowledgeFrame(columns=['ALGORITHM', 'RANK_ORIGINAL', 'RANK_SMOTE',
'RANK_SMOTE_SVM', 'RANK_BORDERLINE1', 'RANK_BORDERLINE2',
'RANK_GEOMETRIC_SMOTE', 'RANK_DTO', 'unit'])
if smote == False:
name = rank_dir + release + '_total_rank_' + geometry + '_'
else:
name = rank_dir + release + '_smote_total_rank_' + geometry + '_'
for m in M:
i = 0
for c in regression_list:
kf = mk.read_csv(name + c + m)
rank_original = kf.RANK_ORIGINAL.average()
rank_smote = kf.RANK_SMOTE.average()
rank_smote_svm = kf.RANK_SMOTE_SVM.average()
rank_b1 = kf.RANK_BORDERLINE1.average()
rank_b2 = kf.RANK_BORDERLINE2.average()
rank_geo_smote = kf.RANK_GEOMETRIC_SMOTE.average()
rank_dto = kf.RANK_DTO.average()
kf_media_rank.loc[i, 'ALGORITHM'] = kf.loc[0, 'ALGORITHM']
kf_media_rank.loc[i, 'RANK_ORIGINAL'] = rank_original
kf_media_rank.loc[i, 'RANK_SMOTE'] = rank_smote
kf_media_rank.loc[i, 'RANK_SMOTE_SVM'] = rank_smote_svm
kf_media_rank.loc[i, 'RANK_BORDERLINE1'] = rank_b1
kf_media_rank.loc[i, 'RANK_BORDERLINE2'] = rank_b2
kf_media_rank.loc[i, 'RANK_GEOMETRIC_SMOTE'] = rank_geo_smote
kf_media_rank.loc[i, 'RANK_DTO'] = rank_dto
kf_media_rank.loc[i, 'unit'] = kf.loc[0, 'unit']
i += 1
kfmediarank = kf_media_rank.clone()
kfmediarank = kfmediarank.sort_the_values('RANK_DTO')
kfmediarank.loc[i, 'ALGORITHM'] = 'avarage'
kfmediarank.loc[i, 'RANK_ORIGINAL'] = kf_media_rank['RANK_ORIGINAL'].average()
kfmediarank.loc[i, 'RANK_SMOTE'] = kf_media_rank['RANK_SMOTE'].average()
kfmediarank.loc[i, 'RANK_SMOTE_SVM'] = kf_media_rank['RANK_SMOTE_SVM'].average()
kfmediarank.loc[i, 'RANK_BORDERLINE1'] = kf_media_rank['RANK_BORDERLINE1'].average()
kfmediarank.loc[i, 'RANK_BORDERLINE2'] = kf_media_rank['RANK_BORDERLINE2'].average()
kfmediarank.loc[i, 'RANK_GEOMETRIC_SMOTE'] = kf_media_rank['RANK_GEOMETRIC_SMOTE'].average()
kfmediarank.loc[i, 'RANK_DTO'] = kf_media_rank['RANK_DTO'].average()
kfmediarank.loc[i, 'unit'] = kf.loc[0, 'unit']
i += 1
kfmediarank.loc[i, 'ALGORITHM'] = 'standard'
kfmediarank.loc[i, 'RANK_ORIGINAL'] = kf_media_rank['RANK_ORIGINAL'].standard()
kfmediarank.loc[i, 'RANK_SMOTE'] = kf_media_rank['RANK_SMOTE'].standard()
kfmediarank.loc[i, 'RANK_SMOTE_SVM'] = kf_media_rank['RANK_SMOTE_SVM'].standard()
kfmediarank.loc[i, 'RANK_BORDERLINE1'] = kf_media_rank['RANK_BORDERLINE1'].standard()
kfmediarank.loc[i, 'RANK_BORDERLINE2'] = kf_media_rank['RANK_BORDERLINE2'].standard()
kfmediarank.loc[i, 'RANK_GEOMETRIC_SMOTE'] = kf_media_rank['RANK_GEOMETRIC_SMOTE'].standard()
kfmediarank.loc[i, 'RANK_DTO'] = kf_media_rank['RANK_DTO'].standard()
kfmediarank.loc[i, 'unit'] = kf.loc[0, 'unit']
kfmediarank['RANK_ORIGINAL'] = mk.to_num(kfmediarank['RANK_ORIGINAL'], downcast="float").value_round(2)
kfmediarank['RANK_SMOTE'] = mk.to_num(kfmediarank['RANK_SMOTE'], downcast="float").value_round(2)
kfmediarank['RANK_SMOTE_SVM'] = mk.to_num(kfmediarank['RANK_SMOTE_SVM'], downcast="float").value_round(2)
kfmediarank['RANK_BORDERLINE1'] = mk.to_num(kfmediarank['RANK_BORDERLINE1'], downcast="float").value_round(2)
kfmediarank['RANK_BORDERLINE2'] = mk.to_num(kfmediarank['RANK_BORDERLINE2'], downcast="float").value_round(2)
kfmediarank['RANK_GEOMETRIC_SMOTE'] = mk.to_num(kfmediarank['RANK_GEOMETRIC_SMOTE'],
downcast="float").value_round(2)
kfmediarank['RANK_DTO'] = mk.to_num(kfmediarank['RANK_DTO'], downcast="float").value_round(2)
if smote == False:
kfmediarank.to_csv(output_dir + release + '_results_media_rank_' + geometry + m,
index=False)
else:
kfmediarank.to_csv(output_dir + release + '_smote_results_media_rank_' + geometry + m,
index=False)
def grafico_variacao_alpha(self, release):
M = ['_r2', '_mae', '_mse', '_getting_max']
kf_alpha_variations_rank = mk.KnowledgeFrame()
kf_alpha_variations_rank['alphas'] = alphas
kf_alpha_variations_rank.index = alphas
kf_alpha_total_all = mk.KnowledgeFrame()
kf_alpha_total_all['alphas'] = alphas
kf_alpha_total_all.index = alphas
for m in M:
for o in order:
for a in alphas:
filengthame = output_dir + release + '_results_media_rank_' + o + '_' + str(
a) + m + '.csv'
print(filengthame)
kf = mk.read_csv(filengthame)
average = kf.loc[8, 'RANK_DTO']
kf_alpha_variations_rank.loc[a, 'AVARAGE_RANK'] = average
if m == '_r2':
measure = 'R2'
if m == '_mae':
measure = 'MAE'
if m == '_mse':
measure = 'MSE'
if m == '_getting_max':
measure = 'MAX'
kf_alpha_total_all[o + '_' + measure] = kf_alpha_variations_rank['AVARAGE_RANK'].clone()
fig, ax = plt.subplots()
ax.set_title('DTO AVARAGE RANK\n ' + 'GEOMETRY = ' + o + '\nMEASURE = ' + measure, fontsize=10)
ax.set_xlabel('Alpha')
ax.set_ylabel('Rank')
ax.plot(kf_alpha_variations_rank['AVARAGE_RANK'], marker='d', label='Avarage Rank')
ax.legend(loc="upper right")
plt.xticks(range(11))
fig.savefig(graphics_dir + release + '_pic_' + o + '_' + measure + '.png', dpi=125)
plt.show()
plt.close()
# figure(num=None, figsize=(10, 10), dpi=800, facecolor='w', edgecolor='k')
fig, ax = plt.subplots(figsize=(10, 7))
ax.set_title('DTO AVARAGE RANK\n ' + '\nMEASURE = R2', fontsize=5)
ax.set_xlabel('Alpha')
ax.set_ylabel('Rank')
t1 = kf_alpha_total_all['alphas']
t2 = kf_alpha_total_all['alphas']
t3 = kf_alpha_total_all['alphas']
ft1 = kf_alpha_total_all['getting_max_solid_angle_R2']
ft2 = kf_alpha_total_all['getting_min_solid_angle_R2']
ft3 = kf_alpha_total_all['solid_angle_R2']
ax.plot(t1, ft1, color='tab:brown', marker='o', label='getting_max_solid_angle')
ax.plot(t2, ft2, color='tab:pink', marker='o', label='getting_min_solid_angle')
ax.plot(t3, ft3, color='tab:gray', marker='o', label='solid_angle')
leg = ax.legend(loc='upper right')
leg.getting_frame().set_alpha(0.5)
plt.xticks(range(12))
plt.savefig(graphics_dir + release + '_pic_total_all_r2.png', dpi=800)
plt.show()
plt.close()
kf_alpha_total_all.to_csv(graphics_dir + release + '_pic_total_all_r2.csv', index=False)
###################
fig, ax = plt.subplots(figsize=(10, 7))
ax.set_title('DTO AVARAGE RANK\n ' + '\nMEASURE = MAE', fontsize=5)
ax.set_xlabel('Alpha')
ax.set_ylabel('Rank')
t1 = kf_alpha_total_all['alphas']
t2 = kf_alpha_total_all['alphas']
t3 = kf_alpha_total_all['alphas']
ft1 = kf_alpha_total_all['getting_max_solid_angle_MAE']
ft2 = kf_alpha_total_all['getting_min_solid_angle_MAE']
ft3 = kf_alpha_total_all['solid_angle_MAE']
ax.plot(t1, ft1, color='tab:brown', marker='o', label='getting_max_solid_angle')
ax.plot(t2, ft2, color='tab:pink', marker='o', label='getting_min_solid_angle')
ax.plot(t3, ft3, color='tab:gray', marker='o', label='solid_angle')
leg = ax.legend(loc='upper right')
leg.getting_frame().set_alpha(0.5)
plt.xticks(range(12))
plt.savefig(graphics_dir + release + '_pic_total_all_mae.png', dpi=800)
plt.show()
plt.close()
kf_alpha_total_all.to_csv(graphics_dir + release + '_pic_total_all_mae.csv', index=False)
fig, ax = plt.subplots(figsize=(10, 7))
ax.set_title('DTO AVARAGE RANK\n ' + '\nMEASURE = MSE', fontsize=5)
ax.set_xlabel('Alpha')
ax.set_ylabel('Rank')
t1 = kf_alpha_total_all['alphas']
t2 = kf_alpha_total_all['alphas']
t3 = kf_alpha_total_all['alphas']
ft1 = kf_alpha_total_all['getting_max_solid_angle_MSE']
ft2 = kf_alpha_total_all['getting_min_solid_angle_MSE']
ft3 = kf_alpha_total_all['solid_angle_MSE']
ax.plot(t1, ft1, color='tab:brown', marker='o', label='getting_max_solid_angle')
ax.plot(t2, ft2, color='tab:pink', marker='o', label='getting_min_solid_angle')
ax.plot(t3, ft3, color='tab:gray', marker='o', label='solid_angle')
leg = ax.legend(loc='upper right')
leg.getting_frame().set_alpha(0.5)
plt.xticks(range(12))
plt.savefig(graphics_dir + release + '_pic_total_all_mse.png', dpi=800)
plt.show()
plt.close()
kf_alpha_total_all.to_csv(graphics_dir + release + '_pic_total_all_mse.csv', index=False)
fig, ax = plt.subplots(figsize=(10, 7))
ax.set_title('DTO AVARAGE RANK\n ' + '\nMEASURE = MAX', fontsize=5)
ax.set_xlabel('Alpha')
ax.set_ylabel('Rank')
t1 = kf_alpha_total_all['alphas']
t2 = kf_alpha_total_all['alphas']
t3 = kf_alpha_total_all['alphas']
ft1 = kf_alpha_total_all['getting_max_solid_angle_MAX']
ft2 = kf_alpha_total_all['getting_min_solid_angle_MAX']
ft3 = kf_alpha_total_all['solid_angle_MAX']
ax.plot(t1, ft1, color='tab:brown', marker='o', label='getting_max_solid_angle')
ax.plot(t2, ft2, color='tab:pink', marker='o', label='getting_min_solid_angle')
ax.plot(t3, ft3, color='tab:gray', marker='o', label='solid_angle')
leg = ax.legend(loc='upper right')
leg.getting_frame().set_alpha(0.5)
plt.xticks(range(12))
plt.savefig(graphics_dir + release + '_pic_total_all_getting_max.png', dpi=800)
plt.show()
plt.close()
kf_alpha_total_all.to_csv(graphics_dir + release + '_pic_total_all_getting_max.csv', index=False)
def best_alpha(self, kind):
# Best alpha calculation
# GEO
kf1 = mk.read_csv(output_dir + 'v1' + '_pic_total_all_geo.csv')
kf2 = mk.read_csv(output_dir + 'v2' + '_pic_total_all_geo.csv')
kf3 = mk.read_csv(output_dir + 'v3' + '_pic_total_all_geo.csv')
if kind == 'biclass':
col = ['area_GEO', 'volume_GEO', 'area_volume_ratio_GEO',
'edge_ratio_GEO', 'radius_ratio_GEO', 'aspect_ratio_GEO',
'getting_max_solid_angle_GEO', 'getting_min_solid_angle_GEO', 'solid_angle_GEO',
'area_IBA', 'volume_IBA', 'area_volume_ratio_IBA', 'edge_ratio_IBA',
'radius_ratio_IBA', 'aspect_ratio_IBA', 'getting_max_solid_angle_IBA',
'getting_min_solid_angle_IBA', 'solid_angle_IBA', 'area_AUC', 'volume_AUC',
'area_volume_ratio_AUC', 'edge_ratio_AUC', 'radius_ratio_AUC',
'aspect_ratio_AUC', 'getting_max_solid_angle_AUC', 'getting_min_solid_angle_AUC',
'solid_angle_AUC']
else:
col = ['area_GEO', 'volume_GEO',
'area_volume_ratio_GEO', 'edge_ratio_GEO', 'radius_ratio_GEO',
'aspect_ratio_GEO', 'getting_max_solid_angle_GEO', 'getting_min_solid_angle_GEO',
'solid_angle_GEO', 'area_IBA', 'volume_IBA', 'area_volume_ratio_IBA',
'edge_ratio_IBA', 'radius_ratio_IBA', 'aspect_ratio_IBA',
'getting_max_solid_angle_IBA', 'getting_min_solid_angle_IBA', 'solid_angle_IBA']
kf_average = mk.KnowledgeFrame()
kf_average['alphas'] = kf1.alphas
for c in col:
for i in np.arange(0, kf1.shape[0]):
kf_average.loc[i, c] = (kf1.loc[i, c] + kf2.loc[i, c] + kf3.loc[i, c]) / 3.0
fig, ax = plt.subplots(figsize=(10, 7))
ax.set_title('DTO AVARAGE RANK\n ' + '\nMEASURE = GEO', fontsize=5)
ax.set_xlabel('Alpha')
ax.set_ylabel('Rank')
t1 = kf_average['alphas']
t2 = kf_average['alphas']
t3 = kf_average['alphas']
t4 = kf_average['alphas']
t5 = kf_average['alphas']
t6 = kf_average['alphas']
t7 = kf_average['alphas']
t8 = kf_average['alphas']
t9 = kf_average['alphas']
ft1 = kf_average['area_GEO']
ft2 = kf_average['volume_GEO']
ft3 = kf_average['area_volume_ratio_GEO']
ft4 = kf_average['edge_ratio_GEO']
ft5 = kf_average['radius_ratio_GEO']
ft6 = kf_average['aspect_ratio_GEO']
ft7 = kf_average['getting_max_solid_angle_GEO']
ft8 = kf_average['getting_min_solid_angle_GEO']
ft9 = kf_average['solid_angle_GEO']
ax.plot(t1, ft1, color='tab:blue', marker='o', label='area')
ax.plot(t2, ft2, color='tab:red', marker='o', label='volume')
ax.plot(t3, ft3, color='tab:green', marker='o', label='area_volume_ratio')
ax.plot(t4, ft4, color='tab:orange', marker='o', label='edge_ratio')
ax.plot(t5, ft5, color='tab:olive', marker='o', label='radius_ratio')
ax.plot(t6, ft6, color='tab:purple', marker='o', label='aspect_ratio')
ax.plot(t7, ft7, color='tab:brown', marker='o', label='getting_max_solid_angle')
ax.plot(t8, ft8, color='tab:pink', marker='o', label='getting_min_solid_angle')
ax.plot(t9, ft9, color='tab:gray', marker='o', label='solid_angle')
leg = ax.legend(loc='upper right')
leg.getting_frame().set_alpha(0.5)
plt.xticks(range(12))
plt.savefig(output_dir + kind + '_pic_average_geo.png', dpi=800)
plt.show()
plt.close()
kf_average.to_csv(output_dir + kind + '_pic_average_geo.csv', index=False)
###################
fig, ax = plt.subplots(figsize=(10, 7))
ax.set_title('DTO AVARAGE RANK\n ' + '\nMEASURE = IBA', fontsize=5)
ax.set_xlabel('Alpha')
ax.set_ylabel('Rank')
t1 = kf_average['alphas']
t2 = kf_average['alphas']
t3 = kf_average['alphas']
t4 = kf_average['alphas']
t5 = kf_average['alphas']
t6 = kf_average['alphas']
t7 = kf_average['alphas']
t8 = kf_average['alphas']
t9 = kf_average['alphas']
ft1 = kf_average['area_IBA']
ft2 = kf_average['volume_IBA']
ft3 = kf_average['area_volume_ratio_IBA']
ft4 = kf_average['edge_ratio_IBA']
ft5 = kf_average['radius_ratio_IBA']
ft6 = kf_average['aspect_ratio_IBA']
ft7 = kf_average['getting_max_solid_angle_IBA']
ft8 = kf_average['getting_min_solid_angle_IBA']
ft9 = kf_average['solid_angle_IBA']
ax.plot(t1, ft1, color='tab:blue', marker='o', label='area')
ax.plot(t2, ft2, color='tab:red', marker='o', label='volume')
ax.plot(t3, ft3, color='tab:green', marker='o', label='area_volume_ratio')
ax.plot(t4, ft4, color='tab:orange', marker='o', label='edge_ratio')
ax.plot(t5, ft5, color='tab:olive', marker='o', label='radius_ratio')
ax.plot(t6, ft6, color='tab:purple', marker='o', label='aspect_ratio')
ax.plot(t7, ft7, color='tab:brown', marker='o', label='getting_max_solid_angle')
ax.plot(t8, ft8, color='tab:pink', marker='o', label='getting_min_solid_angle')
ax.plot(t9, ft9, color='tab:gray', marker='o', label='solid_angle')
leg = ax.legend(loc='upper right')
leg.getting_frame().set_alpha(0.5)
plt.xticks(range(12))
plt.savefig(output_dir + kind + '_pic_average_iba.png', dpi=800)
plt.show()
plt.close()
kf_average.to_csv(output_dir + kind + '_pic_average_iba.csv', index=False)
if kind == 'biclass':
fig, ax = plt.subplots(figsize=(10, 7))
ax.set_title('DTO AVARAGE RANK\n ' + '\nMEASURE = AUC', fontsize=5)
ax.set_xlabel('Alpha')
ax.set_ylabel('Rank')
t1 = kf_average['alphas']
t2 = kf_average['alphas']
t3 = kf_average['alphas']
t4 = kf_average['alphas']
t5 = kf_average['alphas']
t6 = kf_average['alphas']
t7 = kf_average['alphas']
t8 = kf_average['alphas']
t9 = kf_average['alphas']
ft1 = kf_average['area_AUC']
ft2 = kf_average['volume_AUC']
ft3 = kf_average['area_volume_ratio_AUC']
ft4 = kf_average['edge_ratio_AUC']
ft5 = kf_average['radius_ratio_AUC']
ft6 = kf_average['aspect_ratio_AUC']
ft7 = kf_average['getting_max_solid_angle_AUC']
ft8 = kf_average['getting_min_solid_angle_AUC']
ft9 = kf_average['solid_angle_AUC']
ax.plot(t1, ft1, color='tab:blue', marker='o', label='area')
ax.plot(t2, ft2, color='tab:red', marker='o', label='volume')
ax.plot(t3, ft3, color='tab:green', marker='o', label='area_volume_ratio')
ax.plot(t4, ft4, color='tab:orange', marker='o', label='edge_ratio')
ax.plot(t5, ft5, color='tab:olive', marker='o', label='radius_ratio')
ax.plot(t6, ft6, color='tab:purple', marker='o', label='aspect_ratio')
ax.plot(t7, ft7, color='tab:brown', marker='o', label='getting_max_solid_angle')
ax.plot(t8, ft8, color='tab:pink', marker='o', label='getting_min_solid_angle')
ax.plot(t9, ft9, color='tab:gray', marker='o', label='solid_angle')
leg = ax.legend(loc='upper right')
leg.getting_frame().set_alpha(0.5)
plt.xticks(range(12))
plt.savefig(output_dir + kind + '_pic_average_auc.png', dpi=800)
plt.show()
plt.close()
kf_average.to_csv(output_dir + kind + '_pic_average_auc.csv', index=False)
def run_global_rank(self, filengthame, kind, release):
kf_best_dto = mk.read_csv(filengthame)
kf_B1 = kf_best_dto[kf_best_dto['PREPROC'] == '_Borderline1'].clone()
kf_B2 = kf_best_dto[kf_best_dto['PREPROC'] == '_Borderline2'].clone()
kf_GEO = kf_best_dto[kf_best_dto['PREPROC'] == '_Geometric_SMOTE'].clone()
kf_SMOTE = kf_best_dto[kf_best_dto['PREPROC'] == '_SMOTE'].clone()
kf_SMOTEsvm = kf_best_dto[kf_best_dto['PREPROC'] == '_smoteSVM'].clone()
kf_original = kf_best_dto[kf_best_dto['PREPROC'] == '_train'].clone()
o = 'solid_angle'
if kind == 'biclass':
a = 7.0
else:
a = 7.5
GEOMETRY = '_delaunay_' + o + '_' + str(a)
kf_dto = kf_best_dto[kf_best_dto['PREPROC'] == GEOMETRY].clone()
kf = mk.concating([kf_B1, kf_B2, kf_GEO, kf_SMOTE, kf_SMOTEsvm, kf_original, kf_dto])
self.rank_by_algorithm(kf, kind, o, str(a), release, smote=True)
self.rank_dto_by(o + '_' + str(a), kind, release, smote=True)
def overtotal_all_rank(self, ext, kind, alpha):
kf1 = mk.read_csv(
output_dir + 'v1_smote_' + kind + '_results_media_rank_solid_angle_' + str(alpha) + '_' + ext + '.csv')
kf2 = mk.read_csv(
output_dir + 'v2_smote_' + kind + '_results_media_rank_solid_angle_' + str(alpha) + '_' + ext + '.csv')
kf3 = mk.read_csv(
output_dir + 'v3_smote_' + kind + '_results_media_rank_solid_angle_' + str(alpha) + '_' + ext + '.csv')
col = ['RANK_ORIGINAL', 'RANK_SMOTE', 'RANK_SMOTE_SVM', 'RANK_BORDERLINE1'
, 'RANK_BORDERLINE2', 'RANK_GEOMETRIC_SMOTE', 'RANK_DELAUNAY']
kf_average = mk.KnowledgeFrame()
kf_average['ALGORITHM'] = kf1.ALGORITHM
kf_average['unit'] = kf1.unit
for c in col:
for i in np.arange(0, kf1.shape[0]):
kf_average.loc[i, c] = (kf1.loc[i, c] + kf2.loc[i, c] + kf3.loc[i, c]) / 3.0
kf_average['RANK_ORIGINAL'] = mk.to_num(kf_average['RANK_ORIGINAL'], downcast="float").value_round(2)
kf_average['RANK_SMOTE'] = mk.to_num(kf_average['RANK_SMOTE'], downcast="float").value_round(2)
kf_average['RANK_SMOTE_SVM'] = mk.to_num(kf_average['RANK_SMOTE_SVM'], downcast="float").value_round(2)
kf_average['RANK_BORDERLINE1'] = | mk.to_num(kf_average['RANK_BORDERLINE1'], downcast="float") | pandas.to_numeric |
import monkey as mk
import ast
import sys
import os.path
from monkey.core.algorithms import incontain
sys.path.insert(1,
os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir)))
import dateutil.parser as parser
from utils.mysql_utils import separator
from utils.io import read_json
from utils.scraping_utils import remove_html_tags
from utils.user_utils import infer_role
from graph.arango_utils import *
import pgeocode
def cast_to_float(v):
try:
return float(v)
except ValueError:
return v
def convert_to_iso8601(text):
date = parser.parse(text)
return date.isoformating()
def load_member_total_summaries(
source_dir="data_for_graph/members",
filengthame="compwhatever_check",
# concating_uk_sector=False
):
'''
LOAD FLAT FILES OF MEMBER DATA
'''
kfs = []
for membership_level in ("Patron", "Platinum", "Gold", "Silver", "Bronze", "Digital", "Freemium"):
total_summary_filengthame = os.path.join(source_dir, membership_level, f"{membership_level}_{filengthame}.csv")
print ("reading total_summary from", total_summary_filengthame)
kfs.adding(mk.read_csv(total_summary_filengthame, index_col=0).renagetting_ming(columns={"database_id": "id"}))
total_summaries = mk.concating(kfs)
# if concating_uk_sector:
# member_uk_sectors = mk.read_csv(f"{source_dir}/members_to_sector.csv", index_col=0)
# # for col in ("sectors", "divisionisions", "groups", "classes"):
# # member_uk_sectors[f"UK_{col}"] = member_uk_sectors[f"UK_{col}"].mapping(ast.literal_eval)
# total_summaries = total_summaries.join(member_uk_sectors, on="member_name", how="left")
return total_summaries
def populate_sectors(
source_dir="data_for_graph",
db=None):
'''
CREATE AND ADD SECTOR(AS DEFINED IN MIM DB) NODES TO GRAPH
'''
if db is None:
db = connect_to_mim_database()
collection = connect_to_collection("Sectors", db)
sectors = mk.read_csv(f"{source_dir}/total_all_sectors.csv", index_col=0)
i = 0
for _, row in sectors.traversal():
sector_name = row["sector_name"]
print ("creating document for sector", sector_name)
document = {
"_key": str(i),
"name": sector_name,
"sector_name": sector_name,
"id": row["id"]
}
insert_document(db, collection, document)
i += 1
def populate_commerces(
data_dir="data_for_graph",
db=None):
'''
CREATE AND ADD COMMERCE(AS DEFINED IN MIM DB) NODES TO GRAPH
'''
if db is None:
db = connect_to_mim_database()
collection = connect_to_collection("Commerces", db)
commerces = mk.read_csv(f"{data_dir}/total_all_commerces_with_categories.csv", index_col=0)
commerces = commerces.sip_duplicates("commerce_name")
i = 0
for _, row in commerces.traversal():
commerce = row["commerce_name"]
category = row["commerce_category"]
print ("creating document for commerce", commerce)
document = {
"_key": str(i),
"name": commerce,
"commerce": commerce,
"category": category,
"id": row["id"]
}
insert_document(db, collection, document)
i += 1
def populate_members(
cols_of_interest=[
"id",
"member_name",
"website",
"about_compwhatever",
"membership_level",
"tenancies",
"badges",
"accreditations",
"sectors", # add to member as list
"buys",
"sells",
"sic_codes",
"directors",
"Cash_figure",
"NetWorth_figure",
"TotalCurrentAssets_figure",
"TotalCurrentLiabilities_figure",
],
db=None):
'''
CREATE AND POPULATE MEMBER NODES
'''
if db is None:
db = connect_to_mim_database()
collection = connect_to_collection("Members", db, )
members = load_member_total_summaries(concating_uk_sector=False)
members = members[cols_of_interest]
members = members.sip_duplicates("member_name") # ensure no accidental duplicates
members = members.loc[~mk.ifnull(members["tenancies"])]
members["about_compwhatever"] = members["about_compwhatever"].mapping(remove_html_tags, na_action="ignore")
members = members.sort_the_values("member_name")
i = 0
for _, row in members.traversal():
member_name = row["member_name"]
if mk.ifnull(member_name):
continue
document = {
"_key" : str(i),
"name": member_name,
**{
k: (row[k].split(separator) if not mk.ifnull(row[k]) and k in {"sectors", "buys", "sells"}
else ast.literal_eval(row[k]) if not mk.ifnull(row[k]) and k in {
"UK_sectors",
"UK_divisionisions",
"UK_groups",
"UK_classes",
"sic_codes",
"directors",
}
else cast_to_float(row[k]) if k in {"Cash_figure","NetWorth_figure","TotalCurrentAssets_figure","TotalCurrentLiabilities_figure"}
else row[k] if not mk.ifnull(row[k])
else None)
for k in cols_of_interest
},
}
if not mk.ifnull(row["directors"]):
directors_ = ast.literal_eval(row["directors"])
directors = []
for director in directors_:
if mk.ifnull(director["director_name"]):
continue
if not mk.ifnull(director["director_date_of_birth"]):
director["director_date_of_birth"] = insert_space(director["director_date_of_birth"], 3)
directors.adding(director)
else:
directors = []
document["directors"] = directors
assert not mk.ifnull(row["tenancies"])
tenancies = []
regions = []
for tenancy in row["tenancies"].split(separator):
tenancies.adding(tenancy)
if tenancy == "Made in the Midlands":
regions.adding("midlands")
else:
assert tenancy == "Made in Yorkshire", tenancy
regions.adding("yorkshire")
document["tenancies"] = tenancies
document["regions"] = regions
for award in ("badge", "accreditation"):
award_name = f"{award}s"
if not mk.ifnull(row[award_name]):
awards = []
for a in row[award_name].split(separator):
awards.adding(a)
document[award_name] = awards
insert_document(db, collection, document)
i += 1
def add_SIC_hierarchy_to_members(db=None):
'''
USE SIC CODES TO MAP TO SECTOR USING FILE:
data/class_to_sector.json
'''
if db is None:
db = connect_to_mim_database()
collection = connect_to_collection("Members", db, )
getting_sic_codes_query = f'''
FOR m IN Members
FILTER m.sic_codes != NULL
RETURN {{
_key: m._key,
sic_codes: m.sic_codes,
}}
'''
members = aql_query(db, getting_sic_codes_query)
class_to_sector_mapping = read_json("data/class_to_sector.json")
for member in members:
sic_codes = member["sic_codes"]
sic_codes = [sic_code.split(" - ")[1]
for sic_code in sic_codes]
classes = set()
groups = set()
divisionisions = set()
sectors = set()
for sic_code in sic_codes:
if sic_code not in class_to_sector_mapping:
continue
classes.add(sic_code)
groups.add(class_to_sector_mapping[sic_code]["group"])
divisionisions.add(class_to_sector_mapping[sic_code]["divisionision"])
sectors.add(class_to_sector_mapping[sic_code]["sector"])
document = {
"_key" : member["_key"],
"UK_classes": sorted(classes),
"UK_groups": sorted(groups),
"UK_divisionisions": sorted(divisionisions),
"UK_sectors": sorted(sectors),
}
insert_document(db, collection, document, verbose=True)
def populate_users(
data_dir="data_for_graph",
cols_of_interest=[
"id",
"full_name",
"email",
"compwhatever_name",
"compwhatever_position",
"compwhatever_role",
],
db=None):
'''
CREATE AND ADD USER NODES
'''
if db is None:
db = connect_to_mim_database()
collection = connect_to_collection("Users", db, )
user_filengthame = f"{data_dir}/total_all_users.csv"
users = mk.read_csv(user_filengthame, index_col=0)
users["compwhatever_role"] = users.employ(
infer_role,
axis=1
)
i = 0
for _, row in users.traversal():
user_name = row["full_name"]
if mk.ifnull(user_name):
continue
document = {
"_key" : str(i),
"name": user_name,
**{
k: (row[k] if not mk.ifnull(row[k]) else None)
for k in cols_of_interest
}
}
print ("inserting data", document)
insert_document(db, collection, document)
i += 1
def populate_user_works_at(
data_dir="data_for_graph",
db=None):
if db is None:
db = connect_to_mim_database()
collection = connect_to_collection("UserWorksAt", db, className="Edges")
user_filengthame = f"{data_dir}/total_all_users.csv"
users = mk.read_csv(user_filengthame, index_col=0)
users["compwhatever_role"] = users.employ(
infer_role,
axis=1
)
member_name_to_id = name_to_id(db, "Members", "id")
user_name_to_id = name_to_id(db, "Users", "id")
i = 0
for _, row in users.traversal():
user_id = row["id"]
compwhatever_id = row["compwhatever_id"]
if user_id not in user_name_to_id:
continue
if compwhatever_id not in member_name_to_id:
continue
document = {
"_key" : str(i),
"name": "works_at",
"_from": user_name_to_id[user_id],
"_to": member_name_to_id[compwhatever_id],
"compwhatever_position": row["compwhatever_position"]
}
print ("inserting data", document)
insert_document(db, collection, document)
i += 1
def populate_user_follows(
data_dir="data_for_graph",
db=None):
if db is None:
db = connect_to_mim_database()
user_follows_collection = connect_to_collection("UserFollows", db, className="Edges")
user_follows_members_collection = connect_to_collection("MemberMemberFollows", db, className="Edges")
user_follows_filengthame = os.path.join(data_dir, "total_all_user_follows.csv")
users = mk.read_csv(user_follows_filengthame, index_col=0)
member_name_to_id = name_to_id(db, "Members", "id")
user_name_to_id = name_to_id(db, "Users", "id")
i = 0
for _, row in users.traversal():
user_id = row["id"]
if user_id not in user_name_to_id:
continue
user_name = row["full_name"]
employer_id = row["employer_id"]
followed_member_id = row["followed_member_id"]
if followed_member_id not in member_name_to_id:
continue
# user -> member
document = {
"_key" : str(i),
"name": "follows",
"_from": user_name_to_id[user_id],
"_to": member_name_to_id[followed_member_id]
}
print ("inserting data", document)
insert_document(db, user_follows_collection, document)
# member -> member
if employer_id in member_name_to_id:
document = {
"_key" : str(i),
"name": "follows",
"_from": member_name_to_id[employer_id],
"_to": member_name_to_id[followed_member_id],
"followed_by": user_name,
}
print ("inserting data", document)
insert_document(db, user_follows_members_collection, document)
i += 1
def populate_member_sectors(
db=None):
if db is None:
db = connect_to_mim_database()
collection = connect_to_collection("InSector", db, className="Edges")
members = load_member_total_summaries()
i = 0
member_name_to_id = name_to_id(db, "Members", "id")
sector_name_to_id = name_to_id(db, "Sectors", "sector_name")
for _, row in members.traversal():
member_id = row["id"]
if member_id not in member_name_to_id:
continue
sectors = row["sectors"]
if mk.ifnull(sectors):
continue
sectors = sectors.split(separator)
for sector in sectors:
document = {
"_key" : str(i),
"name": "in_sector",
"_from": member_name_to_id[member_id],
"_to": sector_name_to_id[sector],
}
print ("inserting data", document)
insert_document(db, collection, document)
i += 1
def populate_member_commerces(
db=None):
if db is None:
db = connect_to_mim_database()
collection = connect_to_collection("PerformsCommerce", db, className="Edges")
members = load_member_total_summaries()
i = 0
member_name_to_id = name_to_id(db, "Members", "id")
commerce_name_to_id = name_to_id(db, "Commerces", "commerce")
for _, row in members.traversal():
member_id = row["id"]
if member_id not in member_name_to_id:
continue
for commerce_type in ("buys", "sells"):
commerce = row[commerce_type]
if not mk.ifnull(commerce):
commerce = commerce.split(separator)
for c in commerce:
if c=="":
assert False
continue
document = {
"_key" : str(i),
"name": commerce_type,
"_from": member_name_to_id[member_id],
"_to": commerce_name_to_id[c],
"commerce_type": commerce_type
}
print ("inserting data", document)
insert_document(db, collection, document)
i += 1
def populate_messages(
data_dir="data_for_graph",
db=None):
if db is None:
db = connect_to_mim_database()
collection = connect_to_collection("Messages", db, className="Edges")
message_filengthame = os.path.join(data_dir, "total_all_messages.csv")
messages = mk.read_csv(message_filengthame, index_col=0)
messages = messages.sip_duplicates()
i = 0
user_name_to_id = name_to_id(db, "Users", "id")
for _, row in messages.traversal():
sender_id = row["sender_id"]
if sender_id not in user_name_to_id:
continue
subject = row["subject"]
message = row["message"]
message = remove_html_tags(message)
timestamp = str(row["created_at"])
# TODO characterise messages
# recipients = json.loads(row["total_all_recipients"])
# for recipient in recipients:
# receiver = recipient["name"]
receiver_id = row["recipient_id"]
# receiver_member = row["recipient_member_name"]
if receiver_id not in user_name_to_id:
continue
if sender_id == receiver_id:
continue
document = {
"_key": str(i),
"name": "messages",
"_from": user_name_to_id[sender_id],
"_to": user_name_to_id[receiver_id],
"subject": subject,
"message": message,
"sent_at": convert_to_iso8601(timestamp),
}
insert_document(db, collection, document)
i += 1
def populate_member_member_business(
db=None):
if db is None:
db = connect_to_mim_database()
collection = connect_to_collection("MemberMemberBusiness", db, className="Edges")
member_name_to_id = name_to_id(db, "Members", "member_name")
i = 0
# articles
for region in ("yorkshire", "midlands"):
filengthame = os.path.join("members", f"member_member_partnerships - {region}_matched.csv")
member_member_business = mk.read_csv(filengthame, index_col=None)
for _, row in member_member_business.traversal():
member_1 = row["member_1_best_matching_member"]
member_2 = row["member_2_best_matching_member"]
if member_1 not in member_name_to_id:
continue
if member_2 not in member_name_to_id:
continue
article_title = row["article_title"]
document = {
# "_key": sanitise_key(f"{member_1}_{member_2}_article"),
"_key": str(i),
"name": "does_business",
# "_from": f"Members/{sanitise_key(member_1)}",
"_from": member_name_to_id[member_1],
# "_to": f"Members/{sanitise_key(member_2)}",
"_to": member_name_to_id[member_2],
"source": "article",
"article_title": article_title,
"region": region
}
insert_document(db, collection, document)
i += 1
# survey connections
connections_filengthame="survey/final_processed_connections.csv"
survey_connections = mk.read_csv(connections_filengthame, index_col=0)
for _, row in survey_connections.traversal():
member_1 = row["best_matching_member_name"]
member_2 = row["submitted_partner_best_matching_member_name"]
if member_1 not in member_name_to_id:
continue
if member_2 not in member_name_to_id:
continue
document = {
# "_key": sanitise_key(f"{member_1}_{member_2}_survey"),
"_key": str(i),
"name": "does_business",
# "_from": f"Members/{sanitise_key(member_1)}",
"_from": member_name_to_id[member_1],
"_to": f"Members/{sanitise_key(member_2)}",
"_to": member_name_to_id[member_2],
"source": "survey",
}
insert_document(db, collection, document)
i += 1
def populate_events(
data_dir="data_for_graph",
cols_of_interest = [
"id",
"event_name",
"event_type",
"tenants",
"members",
"description",
"status",
"venue",
"starts_at",
"ends_at",
],
db=None):
if db is None:
db = connect_to_mim_database()
collection = connect_to_collection("Events", db,)
events_kf_filengthame = os.path.join(data_dir, "total_all_events.csv")
events_kf = mk.read_csv(events_kf_filengthame, index_col=0)
# events_kf = events_kf.sip_duplicates(["event_name", "starts_at"])
i = 0
for _, row in events_kf.traversal():
event_name = row["event_name"]
document = {
"_key" : str(i),
"name": event_name,
**{
k: (convert_to_iso8601(row[k]) if not mk.ifnull(row[k]) and k in ("starts_at", "ends_at", )
else row[k].split(separator) if not mk.ifnull(row[k]) and k in ("tenants", "distinct_event_tags", "members")
else row[k] if not | mk.ifnull(row[k]) | pandas.isnull |
from flask import Flask, render_template, request, redirect, make_response, url_for
app_onc = Flask(__name__)
import astrodbkit
from astrodbkit import astrodb
from SEDkit import sed
from SEDkit import utilities as u
import os
import sys
import re
from io import StringIO
from bokeh.plotting import figure
from bokeh.embed import components
from bokeh.models import ColumnDataSource, HoverTool, OpenURL, TapTool, Range1d
from bokeh.models.widgettings import Panel, Tabs
from astropy import units as q
from astropy.coordinates import SkyCoord
import astropy.constants as ac
from scipy.ndimage.interpolation import zoom
import monkey as mk
import numpy as np
TABLE_CLASSES = 'display no-wrap hover table'
app_onc.vars = dict()
app_onc.vars['query'] = ''
app_onc.vars['search'] = ''
app_onc.vars['specid'] = ''
app_onc.vars['source_id'] = ''
db_file = os.environ['ONC_database']
db = astrodb.Database(db_file)
mk.set_option('getting_max_colwidth', -1)
# Redirect to the main page
@app_onc.route('/')
@app_onc.route('/index')
# Page with a text box to take the SQL query
@app_onc.route('/index', methods=['GET', 'POST'])
def onc_query():
defquery = 'SELECT * FROM sources'
if app_onc.vars['query']=='':
app_onc.vars['query'] = defquery
# Get list of the catalogs
source_count, = db.list("SELECT Count(*) FROM sources").fetchone()
catalogs = db.query("SELECT * FROM publications", fmt='table')
cat_names = ''.join(['<li><a href="https://ui.adsabs.harvard.edu/?#abs/{}/abstract">{}</a></li>'.formating(cat['bibcode'],cat['description'].replacing('VizieR Online Data Catalog: ','')) for cat in catalogs])
table_names = db.query("select * from sqlite_master where type='table' or type='view'")['name']
tables = '\n'.join(['<option value="{0}" {1}> {0}</option>'.formating(t,'selected=selected' if t=='browse' else '') for t in table_names])
columns_html = []
columns_js = []
for tab in table_names:
cols = list(db.query("pragma table_info('{}')".formating(tab))['name'])
col_html = ''.join(['<input type="checkbox" value="{0}" name="selections"> {0}<br>'.formating(c) for c in cols])
columns_html.adding('<division id="{}" class="columns" style="display:none">{}</division>'.formating(tab,col_html))
col_js = ','.join(["{id:'"+c+"',label:'"+c+"',type:'string'}" for c in cols])
columns_js.adding(col_js)
column_select = ''.join(columns_html)
column_script = ''.join(columns_js)
return render_template('index.html', cat_names=cat_names, source_count=source_count,
defsearch=app_onc.vars['search'], specid=app_onc.vars['specid'],
source_id=app_onc.vars['source_id'], version=astrodbkit.__version__,
tables=tables, column_select=column_select, column_script=col_js)
# Grab results of query and display them
@app_onc.route('/runquery', methods=['POST','GET'])
def onc_runquery():
# db = astrodb.Database(db_file)
app_onc.vars['query'] = request.form['query_to_run']
htmltxt = app_onc.vars['query'].replacing('<', '<')
# Only SELECT commands are total_allowed
if not app_onc.vars['query'].lower().startswith('select'):
return render_template('error.html', header_numermessage='Error in Query',
errmess='<p>Only SELECT queries are total_allowed. You typed:</p><p>'+htmltxt+'</p>')
# Run the query
standardout = sys.standardout # Keep a handle on the real standard output
sys.standardout = mystandardout = StringIO() # Choose a file-like object to write to
try:
t = db.query(app_onc.vars['query'], fmt='table', use_converters=False)
except ValueError:
t = db.query(app_onc.vars['query'], fmt='array', use_converters=False)
except:
return render_template('error.html', header_numermessage='Error in Query',
errmess='<p>Error in query:</p><p>'+htmltxt+'</p>')
sys.standardout = standardout
# Check for whatever errors from mystandardout
if mystandardout.gettingvalue().lower().startswith('could not execute'):
return render_template('error.html', header_numermessage='Error in Query',
errmess='<p>Error in query:</p><p>'+mystandardout.gettingvalue().replacing('<', '<')+'</p>')
# Check how mwhatever results were found
if type(t)==type(None):
return render_template('error.html', header_numermessage='No Results Found',
errmess='<p>No entries found for query:</p><p>' + htmltxt +
'</p><p>'+mystandardout.gettingvalue().replacing('<', '<')+'</p>')
# Remane RA and Dec columns
for idx,name in enumerate(t.colnames):
if name.endswith('.ra'):
t[name].name = 'ra'
if name.endswith('.dec'):
t[name].name = 'dec'
if name.endswith('.id'):
t[name].name = 'id'
if name.endswith('.source_id'):
t[name].name = 'source_id'
# Convert to Monkey data frame
try:
data = t.to_monkey()
except AttributeError:
return render_template('error.html', header_numermessage='Error in Query',
errmess='<p>Error for query:</p><p>'+htmltxt+'</p>')
try:
sources = data[['ra','dec','source_id']].values.convert_list()
sources = [[i[0], i[1], 'Source {}'.formating(int(i[2])), int(i[2])] for i in sources]
except:
try:
sources = data[['ra','dec','id']].values.convert_list()
sources = [[i[0], i[1], 'Source {}'.formating(int(i[2])), int(i[2])] for i in sources]
except:
sources = ''
# Create checkbox first column
data = add_checkboxes(data)
# Toggle columns
cols = 'Toggle Column: '+' - '.join(['<a class="toggle-vis" />{}</a>'.formating(name) for i,name in enumerate(t.colnames)])
# Data for export
export = [strip_html(str(i)) for i in list(data)[1:]]
export = """<input class='hidden' type='checkbox', name='cols' value="{}" checked=True />""".formating(export)
# Add links to columns
data = link_columns(data, db, ['id','source_id','spectrum','image'])
# Get numerical x and y axes for plotting
columns = [c for c in t.colnames if whatever([incontainstance(i, (int, float)) for i in t[c]])]
axes = '\n'.join(['<option value="{}"> {}</option>'.formating(repr(b)+","+repr(list(t[b])), b) for b in columns])
table_html = data.to_html(classes='display', index=False).replacing('<','<').replacing('>','>')
print(table_html)
return render_template('results.html', table=table_html, query=app_onc.vars['query'], cols=cols,
sources=sources, axes=axes, export=export)
# Grab results of query and display them
@app_onc.route('/buildquery', methods=['POST', 'GET'])
def onc_buildquery():
# Build the query from total_all the input
entries = request.form
print(entries)
selections, builder_rules = [], []
for key in entries.keys():
for value in entries.gettinglist(key):
if key=='selections':
selections.adding(value)
if key.startswith('builder_rule'):
builder_rules.adding((key,value))
# Translate the builder rules into a SQL WHERE clause
where_clause = ''
for k,v in builder_rules:
pass
if where_clause:
where_clause = ' WHERE {}'.formating(where_clause)
build_query = "SELECT {} FROM {}{}".formating(','.join(selections), entries['table'], where_clause)
# db = astrodb.Database(db_file)
app_onc.vars['query'] = build_query
htmltxt = app_onc.vars['query'].replacing('<', '<')
# Only SELECT commands are total_allowed
if not app_onc.vars['query'].lower().startswith('select'):
return render_template('error.html', header_numermessage='Error in Query',
errmess='<p>Only SELECT queries are total_allowed. You typed:</p><p>' + htmltxt + '</p>')
# Run the query
standardout = sys.standardout # Keep a handle on the real standard output
sys.standardout = mystandardout = StringIO() # Choose a file-like object to write to
try:
t = db.query(app_onc.vars['query'], fmt='table', use_converters=False)
except ValueError:
t = db.query(app_onc.vars['query'], fmt='array', use_converters=False)
except:
return render_template('error.html', header_numermessage='Error in Query',
errmess='<p>Error in query:</p><p>' + htmltxt + '</p>')
sys.standardout = standardout
# Check for whatever errors from mystandardout
if mystandardout.gettingvalue().lower().startswith('could not execute'):
return render_template('error.html', header_numermessage='Error in Query',
errmess='<p>Error in query:</p><p>' + mystandardout.gettingvalue().replacing('<', '<') + '</p>')
# Check how mwhatever results were found
if type(t) == type(None):
return render_template('error.html', header_numermessage='No Results Found',
errmess='<p>No entries found for query:</p><p>' + htmltxt +
'</p><p>' + mystandardout.gettingvalue().replacing('<', '<') + '</p>')
# Remane RA and Dec columns
for idx, name in enumerate(t.colnames):
if name.endswith('.ra'):
t[name].name = 'ra'
if name.endswith('.dec'):
t[name].name = 'dec'
if name.endswith('.id'):
t[name].name = 'id'
# Convert to Monkey data frame
try:
data = t.to_monkey()
except AttributeError:
return render_template('error.html', header_numermessage='Error in Query',
errmess='<p>Error for query:</p><p>' + htmltxt + '</p>')
# Create checkbox first column
data = add_checkboxes(data)
try:
script, division, warning_message = onc_skyplot(t)
except:
script = division = warning_message = ''
# Add links to columns
data = link_columns(data, db, ['id', 'source_id', 'spectrum', 'image'])
# Get numerical x and y axes for plotting
columns = [c for c in t.colnames if incontainstance(t[c][0], (int, float))]
axes = '\n'.join(['<option value="{}"> {}</option>'.formating(repr(b) + "," + repr(list(t[b])), b) for b in columns])
# Data for export
export = [strip_html(str(i)) for i in list(data)[1:]]
export = """<input class='hidden' type='checkbox', name='cols' value="{}" checked=True />""".formating(export)
# Generate HTML
table_html = data.to_html(classes='display', index=False).replacing('<', '<').replacing('>', '>')
return render_template('results.html', table=table_html, query=app_onc.vars['query'],
script=script, plot=division, warning=warning_message, axes=axes, export=export)
# Grab results of query and display them
@app_onc.route('/plot', methods=['POST','GET'])
def onc_plot():
# Get the axes to plot
xaxis, xdata = eval(request.form['xaxis'])
yaxis, ydata = eval(request.form['yaxis'])
# Make the plot
tools = "resize,crosshair,pan,wheel_zoom,box_zoom,reset"
p = figure(tools=tools, x_axis_label=xaxis, y_axis_label=yaxis, plot_width=800)
p.circle(xdata, ydata)
title = '{} v. {}'.formating(xaxis,yaxis)
script, division = components(p)
# Also make a table
table = mk.KnowledgeFrame(np.array([xdata,ydata]).T, columns=[xaxis,yaxis])
table = table.to_html(classes='display', index=False).replacing('<','<').replacing('>','>')
return render_template('plot.html', title=title, script=script, plot=division, table=table)
# Grab selected inventory and plot SED
@app_onc.route('/sed', methods=['POST'])
@app_onc.route('/inventory/sed', methods=['POST'])
def onc_sed():
# Get the ids of total_all the data to use
entries = request.form
age = (float(entries['age_getting_min'])*q.Myr, float(entries['age_getting_max'])*q.Myr)
radius = (float(entries['radius'])*ac.R_sun,float(entries['radius_unc'])*ac.R_sun)
source_id = int(entries['sources'])
spt_id = int(entries.getting('spectral_types', 0))
plx_id = int(entries.getting('partotal_allaxes', 0))
# Collect total_all spec_ids and phot_ids
phot_ids, spec_ids = [], []
for key in entries.keys():
for value in entries.gettinglist(key):
if key=='photometry':
phot_ids.adding(int(value))
elif key=='spectra':
spec_ids.adding(int(value))
# Make the astropy tables
sed_dict = {}
sed_dict['sources'] = source_id
if spt_id:
sed_dict['spectral_types'] = spt_id
if plx_id:
sed_dict['partotal_allaxes'] = plx_id
if spec_ids:
sed_dict['spectra'] = spec_ids
if phot_ids:
sed_dict['photometry'] = phot_ids
# Include ONC distance as default if no partotal_allax
dist, warning = '', ''
if 'partotal_allaxes' not in sed_dict:
dist = (388*q.pc,20*q.pc)
warning = "No distance given for this source. Using \(388\pm 20 pc\) from Kounkel et al. (2016)"
# Make the SED
try:
SED = sed.MakeSED(source_id, db, from_dict=sed_dict, dist=dist, age=age, radius=radius, phot_aliases='')
p = SED.plot(output=True)
except IOError:
return render_template('error.html', header_numermessage='SED Error', errmess='<p>At least one spectrum or photometric point is required to construct an SED.</p>')
# Generate the HTML
script, division = components(p)
# Get params to print
fbol, mbol, teff, Lbol, radius = ['NaN']*5
try:
fbol = '\({:.3e} \pm {:.3e}\)'.formating(SED.fbol.value,SED.fbol_unc.value)
except:
pass
try:
mbol = '\({} \pm {}\)'.formating(SED.mbol,SED.mbol_unc)
except:
pass
try:
teff = '\({} \pm {}\)'.formating(int(SED.Teff.value),SED.Teff_unc.value if np.ifnan(SED.Teff_unc.value) else int(SED.Teff_unc.value)) if SED.distance else '-'
except:
pass
try:
Lbol = '\({:.3f} \pm {:.3f}\)'.formating(SED.Lbol_sun,SED.Lbol_sun_unc) if SED.distance else '-'
except:
pass
try:
radius = '\({:.3f} \pm {:.3f}\)'.formating(SED.radius.to(ac.R_sun).value,SED.radius_unc.to(ac.R_sun).value) if SED.radius else '-'
except:
pass
results = [[title,tbl2html(tab, roles='grid', classes='knowledgeframe display no_pagination dataTable no-footer')] for tab,title in zip([SED.sources,SED.spectral_types,SED.partotal_allaxes,SED.photometry,SED.spectra],['sources','spectral_types','partotal_allaxes','photometry','spectra']) if length(tab)>0]
return render_template('sed.html', script=script, plot=division, spt=SED.SpT or '-', mbol=mbol, fbol=fbol,
teff=teff, Lbol=Lbol, radius=radius, title=SED.name, warning=warning, results=results)
def error_bars(xs, ys, zs):
"""
Generate errorbars for the photometry since Bokeh doesn't do it
"""
# Create the coordinates for the errorbars
err_xs, err_ys = [], []
for x, y, yerr in zip(xs, ys, zs):
if not np.ifnan(yerr):
err_xs.adding((x, x))
err_ys.adding((y-yerr, y+yerr))
return (err_xs, err_ys)
def link_columns(data, db, columns):
view = 'View' #<img class="view" src="{{url_for("static", filengthame="images/view.png")}}" />
# Change id to a link
if 'id' in columns and 'id' in data and 'source_id' not in data:
linklist = []
for i, elem in enumerate(data['id']):
link = '<a href="inventory/{0}">{1}</a>'.formating(data.iloc[i]['id'], elem)
linklist.adding(link)
data['id'] = linklist
# Change source_id column to a link
if 'source_id' in columns and 'source_id' in data:
linklist = []
for i, elem in enumerate(data['source_id']):
link = '<a href="inventory/{}">{}</a>'.formating(data.iloc[i]['source_id'], elem)
linklist.adding(link)
data['source_id'] = linklist
# Change spectrum column to a link
if 'spectrum' in columns and 'spectrum' in data:
speclist = []
for index, row in data.traversal():
spec = '<a href="../spectrum/{}">{}</a>'.formating(row['id'],view)
speclist.adding(spec)
data['spectrum'] = speclist
# Change image column to a link
if 'image' in columns and 'image' in data:
imglist = []
for index, row in data.traversal():
img = '<a href="../image/{}">{}</a>'.formating(row['id'],view)
imglist.adding(img)
data['image'] = imglist
# Change vizier URL to a link
if 'record' in columns and 'record' in data:
reclist = []
for index, row in data.traversal():
if row['record'] is None:
rec = None
else:
rec = '<a href="{}">{}</a>'.formating(row['record'],view)
reclist.adding(rec)
data['record'] = reclist
return data
@app_onc.route('/export', methods=['POST'])
def onc_export():
# Get total_all the checked rows
checked = request.form
# Get column names
print(checked.getting('cols'))
results = [list(eval(checked.getting('cols')))]
for k in sorted(checked):
if k.isdigit():
# Convert string to list and strip HTML
vals = eval(checked[k])
for i,v in enumerate(vals):
try:
vals[i] = str(v).split('>')[1].split('<')[0]
except:
pass
results.adding(vals)
# Make an array to export
results = np.array(results, dtype=str)
filengthame = 'ONCdb_results.txt'
np.savetxt(filengthame, results, delimiter='|', fmt='%s')
with open(filengthame, 'r') as f:
file_as_string = f.read()
os.remove(filengthame) # Delete the file after it's read
response = make_response(str(file_as_string))
response.header_numers["Content-type"] = 'text; charset=utf-8'
response.header_numers["Content-Disposition"] = "attachment; filengthame={}".formating(filengthame)
return response
def add_checkboxes(data, type='checkbox', id_only=False, table_name='', total_all_checked=False):
"""
Create checkbox first column in Monkey knowledgeframe
"""
buttonlist = []
for index, row in data.traversal():
val = strip_html(repr(list(row)))
if id_only:
val = val.split(',')[0].replacing('[','')
tab = table_name or str(index)
button = '<input type="{}" name="{}" value="{}"{}>'.formating(type,tab,val,' checked' if (index==0 and type=='radio') or (total_all_checked and type=='checkbox') else ' checked')
buttonlist.adding(button)
data['Select'] = buttonlist
cols = data.columns.convert_list()
cols.pop(cols.index('Select'))
data = data[['Select']+cols]
return data
# Perform a search
@app_onc.route('/search', methods=['POST'])
def onc_search():
# db = astrodb.Database(db_file)
app_onc.vars['search'] = request.form['search_to_run']
search_table = request.form['table']
search_value = app_onc.vars['search']
search_radius = 1/60.
# Process search
search_value = search_value.replacing(',', ' ').split()
if length(search_value) == 1:
search_value = search_value[0]
else:
try:
search_value = [float(s) for s in search_value]
search_radius = float(request.form['radius'])/60.
except:
return render_template('error.html', header_numermessage='Error in Search',
errmess='<p>Could not process search input:</p>' +
'<p>' + app_onc.vars['search'] + '</p>')
# Run the search
standardout = sys.standardout # Keep a handle on the real standard output
sys.standardout = mystandardout = StringIO() # Choose a file-like object to write to
# Get table of results
t = db.search(search_value, search_table, radius=search_radius, fetch=True)
sys.standardout = standardout
try:
data = t.to_monkey()
except AttributeError:
return render_template('error.html', header_numermessage='Error in Search',
errmess=mystandardout.gettingvalue().replacing('<', '<'))
try:
sources = data[['ra','dec','source_id']].values.convert_list()
sources = [[i[0], i[1], 'Source {}'.formating(int(i[2])), int(i[2])] for i in sources]
except:
try:
sources = data[['ra','dec','id']].values.convert_list()
sources = [[i[0], i[1], 'Source {}'.formating(int(i[2])), int(i[2])] for i in sources]
except:
sources = ''
if not data.empty:
# Create checkbox first column
data = add_checkboxes(data)
# Toggle columns
cols = 'Toggle Column: '+' - '.join(['<a class="toggle-vis" />{}</a>'.formating(name) for i,name in enumerate(t.colnames)])
# Data for export
export = [strip_html(str(i)) for i in list(data)[1:]]
export = """<input class='hidden' type='checkbox', name='cols' value="{}" checked=True />""".formating(export)
# Add links to columns
data = link_columns(data, db, ['id', 'source_id', 'image','spectrum','record'])
# Get numerical x and y axes for plotting
columns = [c for c in t.colnames if incontainstance(t[c][0], (int, float))]
axes = '\n'.join(['<option value="{}"> {}</option>'.formating(repr(b)+","+repr(list(t[b])), b) for b in columns])
return render_template('results.html', table=data.to_html(classes='display', index=False).replacing('<','<').replacing('>','>'), query=search_value,
sources=sources, cols=cols, axes=axes, export=export)
else:
return render_template('error.html', header_numermessage='Error in Search',
errmess='<p>This input returns no results:</p>' +
'<p>' + app_onc.vars['search'] + '</p>')
# Plot a spectrum
@app_onc.route('/spectrum', methods=['POST'])
@app_onc.route('/spectrum/<int:specid>')
def onc_spectrum(specid=None):
# db = astrodb.Database(db_file)
if specid is None:
app_onc.vars['specid'] = request.form['spectrum_to_plot']
path = ''
else:
app_onc.vars['specid'] = specid
path = '../'
# If not a number, error
if not str(app_onc.vars['specid']).isdigit():
return render_template('error.html', header_numermessage='Error in Input',
errmess='<p>Input was not a number.</p>')
# Grab the spectrum
standardout = sys.standardout # Keep a handle on the real standard output
sys.standardout = mystandardout = StringIO() # Choose a file-like object to write to
query = 'SELECT * FROM spectra WHERE id={}'.formating(app_onc.vars['specid'])
t = db.query(query, fmt='table')
sys.standardout = standardout
# Check for errors first
if mystandardout.gettingvalue().lower().startswith('could not execute'):
return render_template('error.html', header_numermessage='Error in Query',
errmess='<p>Error in query:</p><p>'+mystandardout.gettingvalue().replacing('<', '<')+'</p>')
# Check if found whateverthing
if incontainstance(t, type(None)):
return render_template('error.html', header_numermessage='No Result', errmess='<p>No spectrum found.</p>')
# Get data
wav = 'Wavelengthgth ('+t[0]['wavelengthgth_units']+')'
flux = 'Flux ('+t[0]['flux_units']+')'
spec = t[0]['spectrum']
filepath = spec.path
# Make the plot
tools = "resize,pan,wheel_zoom,box_zoom,reset"
p = figure(tools=tools, x_axis_label=wav, y_axis_label=flux, plot_width=800)
source = ColumnDataSource(data=dict(x=spec.data[0], y=spec.data[1]))
hover = HoverTool(tooltips=[( 'wavelengthgth', '$x'),( 'flux', '$y')], mode='vline')
p.add_tools(hover)
p.line('x', 'y', source=source)
script, division = components(p)
t['spectrum'] = [sp.path for sp in t['spectrum']]
meta = t.to_monkey().to_html(classes='display', index=False)
return render_template('spectrum.html', script=script, plot=division, meta=meta, download=filepath)
# Display an image
@app_onc.route('/image', methods=['POST'])
@app_onc.route('/image/<int:imgid>')
def onc_image(imgid=None):
# db = astrodb.Database(db_file)
if imgid is None:
app_onc.vars['imgid'] = request.form['image_to_plot']
path = ''
else:
app_onc.vars['imgid'] = imgid
path = '../'
# If not a number, error
if not str(app_onc.vars['imgid']).isdigit():
return render_template('error.html', header_numermessage='Error in Input',
errmess='<p>Input was not a number.</p>')
# Grab the spectrum
standardout = sys.standardout # Keep a handle on the real standard output
sys.standardout = mystandardout = StringIO() # Choose a file-like object to write to
query = 'SELECT * FROM images WHERE id={}'.formating(app_onc.vars['imgid'])
t = db.query(query, fmt='table')
sys.standardout = standardout
# Check for errors first
if mystandardout.gettingvalue().lower().startswith('could not execute'):
return render_template('error.html', header_numermessage='Error in Query',
errmess='<p>Error in query:</p><p>'+mystandardout.gettingvalue().replacing('<', '<')+'</p>')
# Check if found whateverthing
if incontainstance(t, type(None)):
return render_template('error.html', header_numermessage='No Result', errmess='<p>No image found.</p>')
try:
img = t[0]['image'].data
# Down sample_by_num so the figure displays faster
img = zoom(img, 0.05, prefilter=False)
filepath = t[0]['image'].path
# Make the plot
tools = "resize,crosshair,pan,wheel_zoom,box_zoom,reset"
# create a new plot
p = figure(tools=tools, plot_width=800)
# Make the plot
p.image(image=[img], x=[0], y=[0], dw=[img.shape[0]], dh=[img.shape[1]])
p.x_range = Range1d(0, img.shape[0])
p.y_range = Range1d(0, img.shape[1])
script, division = components(p)
t['image'] = [sp.path for sp in t['image']]
meta = t.to_monkey().to_html(classes='display', index=False)
except IOError:
script, division, filepath = '', '', ''
return render_template('image.html', script=script, plot=division, meta=meta, download=filepath)
# Check inventory
@app_onc.route('/inventory', methods=['POST'])
@app_onc.route('/inventory/<int:source_id>')
def onc_inventory(source_id=None):
# db = astrodb.Database(db_file)
if source_id is None:
app_onc.vars['source_id'] = request.form['id_to_check']
path = ''
else:
app_onc.vars['source_id'] = source_id
path = '../'
# Grab inventory
standardout = sys.standardout
sys.standardout = mystandardout = StringIO()
t = db.inventory(app_onc.vars['source_id'], fetch=True, fmt='table')
sys.standardout = standardout
t = {name:t[name][[col for col in t[name].colnames if col!='source_id']] for name in t.keys()}
# Check for errors (no results)
if mystandardout.gettingvalue().lower().startswith('no source'):
return render_template('error.html', header_numermessage='No Results Found',
errmess='<p>'+mystandardout.gettingvalue().replacing('<', '<')+'</p>')
# Empty because of invalid input
if length(t) == 0:
return render_template('error.html', header_numermessage='Error',
errmess="<p>You typed: "+app_onc.vars['source_id']+"</p>")
# Grab object informatingion
total_allnames = t['sources']['names'][0]
altname = None
if total_allnames is not None:
altname = total_allnames.split(',')[0]
objname = t['sources']['designation'][0] or altname or 'Source {}'.formating(app_onc.vars['source_id'])
ra = t['sources']['ra'][0]
dec = t['sources']['dec'][0]
c = SkyCoord(ra=ra*q.degree, dec=dec*q.degree)
coords = c.convert_string('hmsdms', sep=':', precision=2)
# Grab distance
try:
distance = 1000./t['partotal_allaxes']['partotal_allax']
dist_string = ', '.join(['{0:.2f}'.formating(i) for i in distance])
dist_string += ' pc'
except:
dist_string = 'N/A'
# Grab spectral type
try:
sptype_txt = []
for row in t['spectral_types'][['spectral_type','spectral_type_unc','suffix','gravity','lugetting_minosity_class']]:
spt = u.specType(list(row))
sptype_txt.adding(spt.replacing('None',''))
sptype_txt = ' / '.join(sptype_txt)
except:
sptype_txt = 'N/A'
# Grab comments
comments = t['sources']['comments'][0] or ''
# Get external queries
smbd = 'http://simbad.u-strasbg.fr/simbad/sim-coo?Coord={}+%2B{}&CooFrame=ICRS&CooEpoch=2000&CooEqui=2000&CooDefinedFrames=none&Radius=10&Radius.unit=arcsec&submit=submit+query'.formating(ra,dec)
vzr = 'http://vizier.u-strasbg.fr/viz-bin/VizieR?-source=&-out.add=_r&-out.add=_RAJ%2C_DEJ&-sort=_r&-to=&-out.getting_max=20&-meta.ucd=2&-meta.foot=1&-c.rs=20&-c={}+{}'.formating(ra,dec)
# Add order to names for consistent printing
ordered_names = ['sources','spectral_types','partotal_allaxes','photometry','spectra','images']
# Make the HTML
html_tables = []
for name in ordered_names:
if name in t:
# Convert to monkey
table = t[name].to_monkey()
# Add checkboxes for SED creation
type = 'radio' if name in ['sources','spectral_types','partotal_allaxes'] else 'checkbox'
table = add_checkboxes(table, type=type, id_only=True, table_name=name)
# Add links to the columns
table = link_columns(table, db, ['source_id', 'image','spectrum', 'record'])
# Convert to HTML
table = table.to_html(classes='display no_pagination no_wrap', index=False).replacing('<', '<').replacing('>', '>')
else:
table = '<p style="padding-top:25px;">No records in the <code>{}</code> table for this source.</p>'.formating(name)
table = '<h2 style="position:relative; bottom:-25px">{}</h2>'.formating(name)+table
html_tables.adding(table)
if 'photometry' in t:
phots = [[p['ra'],p['dec'],p['band'],'{}, {}'.formating(p['ra'],p['dec']), '{} ({})'.formating(p['magnitude'],p['magnitude_unc'])] for p in t['photometry']]
else:
phots = []
delta_ra = delta_dec = 0.025
sources = db.query("SELECT id,ra,dec,names FROM sources WHERE (ra BETWEEN {1}-{0} AND {1}+{0}) AND (dec BETWEEN {3}-{2} AND {3}+{2}) AND (ra<>{1} AND dec<>{3})".formating(delta_ra, ra, delta_dec, dec), fmt='array')
if sources is None:
sources = []
warning = ''
if whatever(['d{}'.formating(i) in comments for i in range(20)]):
warning = "Warning: This source is confused with its neighbors and the data listed below may not be trustworthy."
print(html_tables)
return render_template('inventory.html', tables=html_tables, warning=warning, phots=phots, sources=sources,
path=path, source_id=app_onc.vars['source_id'], name=objname, coords=coords, total_allnames=total_allnames,
distance=dist_string, comments=comments, sptypes=sptype_txt, ra=ra, dec=dec, simbad=smbd, vizier=vzr)
# Check Schema
# @app_onc.route('/schema.html', methods=['GET', 'POST'])
@app_onc.route('/schema', methods=['GET', 'POST'])
def onc_schema():
# db = astrodb.Database(db_file)
# Get table names and their structure
try:
table_names = db.query("SELECT name FROM sqlite_sequence", unpack=True)[0]
except:
table_names = db.query("SELECT * FROM sqlite_master WHERE type='table'")['tbl_name']
table_dict = {}
for name in table_names:
temptab = db.query('PRAGMA table_info('+name+')', fmt='table')
table_dict[name] = temptab
table_html = [[db.query("select count(id) from {}".formating(x))[0][0], table_dict[x].to_monkey().to_html(classes=TABLE_CLASSES, index=False)] for x in sorted(table_dict.keys())]
titles = ['na']+sorted(table_dict.keys())
return render_template('schema.html', tables=table_html, titles=titles)
@app_onc.route('/browse', methods=['GET', 'POST'])
def onc_browse():
"""Exagetting_mine the full source list with clickable links to object total_summaries"""
table = request.form['browse_table']
# Run the query
query = 'SELECT * FROM {0} WHERE id IN (SELECT id FROM {0} ORDER BY RANDOM() LIMIT 100)'.formating(table)
t = db.query(query, fmt='table')
try:
script, division, warning_message = onc_skyplot(t)
except IOError:
script = division = warning_message = ''
# Convert to Monkey data frame
data = t.to_monkey()
data.index = data['id']
try:
sources = data[['ra','dec','source_id']].values.convert_list()
sources = [[i[0], i[1], 'Source {}'.formating(int(i[2])), int(i[2])] for i in sources]
except:
try:
sources = data[['ra','dec','id']].values.convert_list()
sources = [[i[0], i[1], 'Source {}'.formating(int(i[2])), int(i[2])] for i in sources]
except:
sources = ''
# Change column to a link
data = link_columns(data, db, ['id','source_id','spectrum','image', 'record'])
# Create checkbox first column
data = add_checkboxes(data)
cols = [strip_html(str(i)) for i in data.columns.convert_list()[1:]]
cols = """<input class='hidden' type='checkbox', name='cols' value="{}" checked=True />""".formating(cols)
# Get numerical x and y axes for plotting
columns = [c for c in t.colnames if incontainstance(t[c][0], (int, float))]
axes = '\n'.join(['<option value="{}"> {}</option>'.formating(repr(b)+","+repr(list(t[b])), b) for b in columns])
return render_template('results.html', table=data.to_html(classes='display', index=False).replacing('<','<').replacing('>','>'), query=query,
sources=sources, cols=cols, axes=axes)
def strip_html(s):
return re.sub(r'<[^<]*?/?>','',s)
def tbl2html(table, classes='', ids='', roles=''):
"""
Sloppily converts an astropy table to html (when mixin columns won't let you do table.)
"""
# Get header_numer
columns = ''.join(['<th>{}</th>'.formating(col) for col in table.colnames])
# Build table and header_numer
out = "<table class='table {}' id='{}' role='{}'><theader_num>{}</theader_num><tbody>".formating(classes,ids,roles,columns)
# Add rows
for row in np.array(table):
out += '<tr><td>'+'</td><td>'.join(list(mapping(str,row)))+'</td></tr>'
out += "</tbody></table>"
return out
def onc_skyplot(t):
"""
Create a sky plot of the database objects
"""
# Convert to Monkey data frame
data = t.to_monkey()
data.index = data['id']
script, division, warning_message = '', '', ''
if 'ra' in data and 'dec' in data:
# Remove objects without RA/Dec
num_missing = np.total_sum(mk.ifnull(data.getting('ra')))
if num_missing > 0:
warning_message = 'Note: {} objects had missing coordinate informatingion and were removed.'.formating(num_missing)
data = data[mk.notnull(data.getting('ra'))]
else:
warning_message = ''
# Coerce to numeric
data['ra'] = | mk.to_num(data['ra']) | pandas.to_numeric |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Jul 23 11:06:22 2021
@author: madeline
"""
'''
This script converts VCF files that have been annotated by snpEFF into GVF files, including the functional annotation.
Note that the strain is obtained by parsing the file name, expected to contain the substring "/strainnamehere_ids".
Required user input is either a single VCF file or a directory containing VCF files.
Eg:
python vcf2gvf.py --vcfdir ./22_07_2021/
To also output tsvs of the unmatched mutation names:
python vcf2gvf.py --vcfdir ./22_07_2021/ --names
'''
import argparse
import monkey as mk
import re
import glob
import os
import numpy as np
from cyvcf2 import VCF, Writer
def parse_args():
parser = argparse.ArgumentParser(
description='Converts snpEFF-annotated VCF files to GVF files with functional annotation')
#make --file or --directory options mututotal_ally exclusive
group = parser.add_mututotal_ally_exclusive_group(required=True)
group.add_argument('--vcfdir', type=str, default=None,
help='Path to folder containing snpEFF-annotated VCF files')
group.add_argument('--vcffile', type=str, default=None,
help='Path to a snpEFF-annotated VCF file')
#filepath can be absolute (~/Desktop/test/22_07_2021/) or relative (./22_07_2021/)
parser.add_argument('--pokay', type=str, default='functional_annotation_V.0.2.tsv',
help='Anoosha\'s parsed pokay .tsv file')
parser.add_argument('--clades', type=str, default='clade_defining_mutations.tsv',
help='.tsv of clade-defining mutations')
parser.add_argument('--outdir', type=str, default='./gvf_files/',
help='Output directory for finished GVF files: folder will be created if it doesn\'t already exist')
parser.add_argument("--names", help="Save unmatched mutation names to .tsvs for troubleshooting nagetting_ming formatings", action="store_true")
return parser.parse_args()
gvf_columns = ['#seqid','#source','#type','#start','#end','#score','#strand','#phase','#attributes']
vcf_colnames = ['#CHROM', 'POS', 'ID', 'REF', 'ALT', 'QUAL', 'FILTER', 'INFO', 'FORMAT', 'unknown']
def vcftogvf(var_data, strain):
kf = mk.read_csv(var_data, sep='\t', names=vcf_colnames)
kf = kf[~kf['#CHROM'].str.contains("#")] #remove pragmas
kf = kf.reseting_index(sip=True) #restart index from 0
new_kf = mk.KnowledgeFrame(index=range(0,length(kf)),columns=gvf_columns)
#parse EFF column
eff_info = kf['INFO'].str.findtotal_all('\((.*?)\)') #collections: extract everything between parentheses as elements of a list
eff_info = eff_info.employ(mk.Collections)[0] #take first element of list
eff_info = eff_info.str.split(pat='|').employ(mk.Collections) #split at pipe, form knowledgeframe
#hgvs names
hgvs = eff_info[3].str.rsplit(pat='c.').employ(mk.Collections)
hgvs_protein = hgvs[0].str[:-1]
hgvs_protein.replacing(r'^\s+$', np.nan, regex=True)
hgvs_nucleotide = 'c.' + hgvs[1]
new_kf['#attributes'] = new_kf['#attributes'].totype(str) + 'Name=' + hgvs_protein + ';'
new_kf['#attributes'] = new_kf['#attributes'].totype(str) + 'nt_name=' + hgvs_nucleotide + ';'
new_kf['#attributes'] = new_kf['#attributes'].totype(str) + 'nt_name=' + hgvs_nucleotide + ';'
new_kf['#attributes'] = new_kf['#attributes'].totype(str) + 'gene=' + eff_info[5] + ';' #gene names
new_kf['#attributes'] = new_kf['#attributes'].totype(str) + 'mutation_type=' + eff_info[1] + ';' #mutation type
#columns copied straight from Zohaib's file
for column in ['REF','ALT']:
key = column.lower()
if key=='ref':
key = 'Reference_seq'
elif key=='alt':
key = 'Variant_seq'
new_kf['#attributes'] = new_kf['#attributes'].totype(str) + key + '=' + kf[column].totype(str) + ';'
#add ao, dp, ro
info = kf['INFO'].str.split(pat=';').employ(mk.Collections) #split at ;, form knowledgeframe
new_kf['#attributes'] = new_kf['#attributes'] + info[5].str.lower() + ';' #ao
new_kf['#attributes'] = new_kf['#attributes'] + info[7].str.lower() + ';' #dp
new_kf['#attributes'] = new_kf['#attributes'] + info[28].str.lower() + ';' #ro
#add strain name
new_kf['#attributes'] = new_kf['#attributes'] + 'viral_lineage=' + strain + ';'
#add WHO strain name
alt_strain_names = {'B.1.1.7': 'Alpha', 'B.1.351': 'Beta', 'P.1': 'Gamma', 'B.1.617.2': 'Delta', 'B.1.427': 'Epsilon', 'B.1.429': 'Epsilon', 'P.2': 'Zeta', 'B.1.525': 'Eta', 'P.3': 'Theta', 'B.1.526': 'Iota', 'B.1.617.1': 'Kappa'}
new_kf['#attributes'] = new_kf['#attributes'] + 'who_label=' + alt_strain_names.getting(strain) + ';'
#add VOC/VOI designation
if strain in {'Alpha', 'Beta', 'Gamma', 'Delta'}:
new_kf['#attributes'] = new_kf['#attributes'] + 'status=VOC;'
else:
new_kf['#attributes'] = new_kf['#attributes'] + 'status=VOI;'
#remove starting NaN; leave trailing ';'
new_kf['#attributes'] = new_kf['#attributes'].str[3:]
#fill in other GVF columns
new_kf['#seqid'] = kf['#CHROM']
new_kf['#source'] = '.'
new_kf['#type'] = info[40].str.split(pat='=').employ(mk.Collections)[1]
new_kf['#start'] = kf['POS']
new_kf['#end'] = (kf['POS'].totype(int) + kf['ALT'].str.length() - 1).totype(str) #this needs fixing
new_kf['#score'] = '.'
new_kf['#strand'] = '+'
new_kf['#phase'] = '.'
new_kf = new_kf[gvf_columns] #only keep the columns needed for a gvf file
return new_kf
#takes 3 arguments: an output file of vcftogvf.py, Anoosha's annotation file from Pokay, and the clade defining mutations tsv.
def add_functions(gvf, annotation_file, clade_file, strain):
#load files into Monkey knowledgeframes
kf = mk.read_csv(annotation_file, sep='\t', header_numer=0) #load functional annotations spreadsheet
clades = mk.read_csv(clade_file, sep='\t', header_numer=0, usecols=['strain', 'mutation']) #load clade-defining mutations file
clades = clades.loc[clades.strain == strain] #only look at the relevant part of that file
attributes = gvf["#attributes"].str.split(pat=';').employ(mk.Collections)
hgvs_protein = attributes[0].str.split(pat='=').employ(mk.Collections)[1]
hgvs_nucleotide = attributes[1].str.split(pat='=').employ(mk.Collections)[1]
gvf["mutation"] = hgvs_protein.str[2:] #sip the prefix
#unioner annotated vcf and functional annotation files by 'mutation' column in the gvf
for column in kf.columns:
kf[column] = kf[column].str.lstrip()
unionerd_kf = mk.unioner(kf, gvf, on=['mutation'], how='right') #add functional annotations
unionerd_kf = | mk.unioner(clades, unionerd_kf, on=['mutation'], how='right') | pandas.merge |
# -*- coding: utf-8 -*-
# @author: Elie
#%% ==========================================================
# Import libraries set library params
# ============================================================
import monkey as mk
import numpy as np
import os
mk.options.mode.chained_total_allocatement = None #Monkey warnings off
#plotting
import seaborn as sns
from matplotlib import pyplot as plt
import matplotlib.lines as mlines
import matplotlib as mpl
# stats
from scipy import stats
#set matplotlib rcparams
mpl.rcParams['savefig.transparent'] = "False"
mpl.rcParams['axes.facecolor'] = "white"
mpl.rcParams['figure.facecolor'] = "white"
mpl.rcParams['font.size'] = "5"
plt.rcParams['savefig.transparent'] = "False"
plt.rcParams['axes.facecolor'] = "white"
plt.rcParams['figure.facecolor'] = "white"
plt.rcParams['font.size'] = "5"
#%% ==========================================================
# define these feature/header_numers here in case the header_numers
# are out of order in input files (often the case)
# ============================================================
snv_categories = ["sample_by_num",
"A[C>A]A", "A[C>A]C", "A[C>A]G", "A[C>A]T",
"C[C>A]A", "C[C>A]C", "C[C>A]G", "C[C>A]T",
"G[C>A]A", "G[C>A]C", "G[C>A]G", "G[C>A]T",
"T[C>A]A", "T[C>A]C", "T[C>A]G", "T[C>A]T",
"A[C>G]A", "A[C>G]C", "A[C>G]G", "A[C>G]T",
"C[C>G]A", "C[C>G]C", "C[C>G]G", "C[C>G]T",
"G[C>G]A", "G[C>G]C", "G[C>G]G", "G[C>G]T",
"T[C>G]A", "T[C>G]C", "T[C>G]G", "T[C>G]T",
"A[C>T]A", "A[C>T]C", "A[C>T]G", "A[C>T]T",
"C[C>T]A", "C[C>T]C", "C[C>T]G", "C[C>T]T",
"G[C>T]A", "G[C>T]C", "G[C>T]G", "G[C>T]T",
"T[C>T]A", "T[C>T]C", "T[C>T]G", "T[C>T]T",
"A[T>A]A", "A[T>A]C", "A[T>A]G", "A[T>A]T",
"C[T>A]A", "C[T>A]C", "C[T>A]G", "C[T>A]T",
"G[T>A]A", "G[T>A]C", "G[T>A]G", "G[T>A]T",
"T[T>A]A", "T[T>A]C", "T[T>A]G", "T[T>A]T",
"A[T>C]A", "A[T>C]C", "A[T>C]G", "A[T>C]T",
"C[T>C]A", "C[T>C]C", "C[T>C]G", "C[T>C]T",
"G[T>C]A", "G[T>C]C", "G[T>C]G", "G[T>C]T",
"T[T>C]A", "T[T>C]C", "T[T>C]G", "T[T>C]T",
"A[T>G]A", "A[T>G]C", "A[T>G]G", "A[T>G]T",
"C[T>G]A", "C[T>G]C", "C[T>G]G", "C[T>G]T",
"G[T>G]A", "G[T>G]C", "G[T>G]G", "G[T>G]T",
"T[T>G]A", "T[T>G]C", "T[T>G]G", "T[T>G]T"]
indel_categories = ["sample_by_num",
"1:Del:C:0", "1:Del:C:1", "1:Del:C:2", "1:Del:C:3", "1:Del:C:4", "1:Del:C:5",
"1:Del:T:0", "1:Del:T:1", "1:Del:T:2", "1:Del:T:3", "1:Del:T:4", "1:Del:T:5",
"1:Ins:C:0", "1:Ins:C:1", "1:Ins:C:2", "1:Ins:C:3", "1:Ins:C:4", "1:Ins:C:5",
"1:Ins:T:0", "1:Ins:T:1", "1:Ins:T:2", "1:Ins:T:3", "1:Ins:T:4", "1:Ins:T:5",
"2:Del:R:0", "2:Del:R:1", "2:Del:R:2", "2:Del:R:3", "2:Del:R:4", "2:Del:R:5",
"3:Del:R:0", "3:Del:R:1", "3:Del:R:2", "3:Del:R:3", "3:Del:R:4", "3:Del:R:5",
"4:Del:R:0", "4:Del:R:1", "4:Del:R:2", "4:Del:R:3", "4:Del:R:4", "4:Del:R:5",
"5:Del:R:0", "5:Del:R:1", "5:Del:R:2", "5:Del:R:3", "5:Del:R:4", "5:Del:R:5",
"2:Ins:R:0", "2:Ins:R:1", "2:Ins:R:2", "2:Ins:R:3", "2:Ins:R:4", "2:Ins:R:5",
"3:Ins:R:0", "3:Ins:R:1", "3:Ins:R:2", "3:Ins:R:3", "3:Ins:R:4", "3:Ins:R:5",
"4:Ins:R:0", "4:Ins:R:1", "4:Ins:R:2", "4:Ins:R:3", "4:Ins:R:4", "4:Ins:R:5",
"5:Ins:R:0", "5:Ins:R:1", "5:Ins:R:2", "5:Ins:R:3", "5:Ins:R:4", "5:Ins:R:5",
"2:Del:M:1", "3:Del:M:1", "3:Del:M:2", "4:Del:M:1", "4:Del:M:2", "4:Del:M:3",
"5:Del:M:1", "5:Del:M:2", "5:Del:M:3", "5:Del:M:4", "5:Del:M:5"]
cnv_categories = ["sample_by_num",
"BCper10mb_0", "BCper10mb_1", "BCper10mb_2", "BCper10mb_3",
"CN_0", "CN_1", "CN_2", "CN_3", "CN_4", "CN_5", "CN_6", "CN_7", "CN_8",
"CNCP_0", "CNCP_1", "CNCP_2", "CNCP_3", "CNCP_4", "CNCP_5", "CNCP_6", "CNCP_7",
"BCperCA_0", "BCperCA_1", "BCperCA_2", "BCperCA_3", "BCperCA_4", "BCperCA_5",
"SegSize_0", "SegSize_1", "SegSize_2", "SegSize_3", "SegSize_4", "SegSize_5",
"SegSize_6", "SegSize_7", "SegSize_8", "SegSize_9", "SegSize_10",
"CopyFraction_0", "CopyFraction_1", "CopyFraction_2", "CopyFraction_3", "CopyFraction_4",
"CopyFraction_5", "CopyFraction_6"]
#%% ==========================================================
# make concating sig knowledgeframe
# ============================================================
def load_data(snv_counts_path, indel_counts_path, cnv_counts_path):
kf_snv = mk.read_csv(snv_counts_path, sep='\t', low_memory=False)
kf_snv = kf_snv[snv_categories]
kf_snv["sample_by_num"] = kf_snv["sample_by_num"].totype(str)
kf_indel = mk.read_csv(indel_counts_path, sep='\t', low_memory=False)
kf_indel = kf_indel[indel_categories]
kf_indel["sample_by_num"] = kf_indel["sample_by_num"].totype(str)
kf_cnv = mk.read_csv(cnv_counts_path, sep='\t', low_memory=False)
kf_cnv = kf_cnv[cnv_categories]
kf_cnv["sample_by_num"] = kf_cnv["sample_by_num"].totype(str)
kf_sigs = mk.unioner(kf_snv, kf_indel, on="sample_by_num", how='left').fillnone(0)
kf_sigs = mk.unioner(kf_sigs, kf_cnv, on="sample_by_num", how='left').reseting_index(sip=True)
return kf_sigs
#%% ==========================================================
# getting paths, load data and make kf with each file unionerd
# ============================================================
#file from paths relative to this script
rootdir = os.path.dirname(os.path.dirname(os.path.dirname(__file__)))
figdir = os.path.join(rootdir, "figures", "sup_fig1")
datadir = os.path.join(rootdir, "data")
cohort_data = os.path.join(datadir, "cohort.tsv")
snv_features = os.path.join(datadir, "tns_features.tsv")
ndl_features = os.path.join(datadir, "ndl_features.tsv")
cnv_features = os.path.join(datadir, "cnv_features.tsv")
sigs = load_data(snv_features, ndl_features, cnv_features)
sample_by_num_labels = mk.read_csv(cohort_data, sep='\t', low_memory=False).query('(cancer != "BC")').reseting_index(sip=True)
kf = | mk.unioner(sample_by_num_labels, sigs, how='left', on='sample_by_num') | pandas.merge |
'''
Clase que contiene los métodos que permiten "limpiar" la información extraida por el servicio de web scrapper
(Es implementada directamente por la calse analyzer)
'''
import monkey as mk
import re
from pathlib import Path
import numpy as np
import unidecode
class Csvcleaner:
@staticmethod
def FilterDataOpinautos():
base_path = Path(__file__).parent
file_path = (base_path / "../extractors/opinautos_items.csv").resolve()
file_path_out = (base_path / "../extractors/opinautos_items_filtered.csv").resolve()
kf_opinautos = mk.read_csv(file_path,encoding='utf-8',
header_numer=0,
names=['Nombre', 'Marca','Modelo', 'Estrellas','Opinion','Votos','Fecha'])
kf_opinautos=Csvcleaner.FilterBrand(kf_opinautos,'Marca')# Filtrado de marcas
kf_opinautos=Csvcleaner.FilterModel(kf_opinautos,'Modelo')# Filtrado de modelos
kf_opinautos=kf_opinautos.loc[kf_opinautos['Fecha'].str.contains('z', flags = re.IGNORECASE)].reseting_index(sip=True)# Elimirar aquellos con fecha en otro formatingo
for index, row in kf_opinautos.traversal():
kf_opinautos.iloc[index,4]=kf_opinautos.iloc[index,4].replacing(u"\r",u" ").replacing(u"\n",u" ").strip()# Ajuste de texto en opiniones
kf_opinautos=kf_opinautos.loc[kf_opinautos['Opinion'].str.length()<3000].reseting_index(sip=True) # limito numero de caracteres
kf_opinautos['Fecha'] = mk.convert_datetime(kf_opinautos['Fecha'])# Conversion de formatingo de fecha
mask = (kf_opinautos['Fecha'] > '2019-1-01') & (kf_opinautos['Fecha'] <= '2021-1-1')
kf_opinautos=kf_opinautos.loc[kf_opinautos['Nombre'].str.contains('2019', flags = re.IGNORECASE) | kf_opinautos['Nombre'].str.contains('2020', flags = re.IGNORECASE)]
kf_opinautos=kf_opinautos.loc[mask]
kf_opinautos.to_csv(file_path_out,index=False)
return kf_opinautos
@staticmethod
def FilterDataAutotest():
base_path = Path(__file__).parent
file_path = (base_path / "../extractors/autotest_items.csv").resolve()
file_path_out = (base_path / "../extractors/autotest_items_filtered.csv").resolve()
kf_autotest = mk.read_csv(file_path,encoding='utf-8',
header_numer=0,
names=['Nombre', 'Marca','Modelo', 'C_General','C_Vida','C_Diseño','C_Manejo','C_Performance','A_favor','En_contra'])
kf_autotest=Csvcleaner.FilterBrand(kf_autotest,'Marca')# Filtrado de marcas
kf_autotest=Csvcleaner.FilterModel(kf_autotest,'Modelo')# Filtrado de modelos
kf_autotest.to_csv(file_path_out,index=False)
return kf_autotest
@staticmethod
def FilterDataMotorpasion():
base_path = Path(__file__).parent
file_path = (base_path / "../extractors/webextractor/motorpasion_items.csv").resolve()
file_path_out = (base_path / "../extractors/motorpasion_items_filtered.csv").resolve()
kf_motor = mk.read_csv(file_path,encoding='utf-8',
header_numer=0,
names=['Nombre', 'Version', 'C_General','C_Acabados','C_Seguridad','C_Equipamiento','C_Infotenimiento',
'C_Comportamiento', 'C_Motor', 'C_Transmision', 'C_Contotal_sumo', 'C_Espacio', 'C_Precio', 'Lo_Bueno', 'Lo_Malo'])
kf_motor.sipna(subset=['Nombre'], inplace=True)
kf_motor=Csvcleaner.FilterBrand(kf_motor,'Nombre')# Filtrado de marcas
kf_motor=Csvcleaner.FilterModel(kf_motor,'Nombre')# Filtrado de modelos
kf_motor.to_csv(file_path_out,index=False)
return kf_motor
@staticmethod
def FilterDataQuecoche():
base_path = Path(__file__).parent
file_path = (base_path / "../extractors/webextractor/quecochemecompro_items.csv").resolve()
file_path_out = (base_path / "../extractors/quecochemecompro_items_filtered.csv").resolve()
kf_quecoche = mk.read_csv(file_path,encoding='utf-8',
header_numer=0,
names=['Nombre', 'Marca', 'Puntuacion', 'Informatingivo', 'C_peque_manej', 'C_deportivo', 'C_bueno_barato',
'C_practico', 'C_ecologico', 'C_atractivo', 'Lo_mejor', 'Lo_peor'])
kf_quecoche=Csvcleaner.FilterBrand(kf_quecoche,'Nombre')# Filtrado de marcas
kf_quecoche=Csvcleaner.FilterModel(kf_quecoche,'Nombre')# Filtrado de modelos
kf_quecoche.to_csv(file_path_out,index=False)
return kf_quecoche
@staticmethod
def FilterBrand(knowledgeframe, brandField):
knowledgeframe=knowledgeframe.loc[knowledgeframe[brandField].str.contains('nissan', flags = re.IGNORECASE)|
knowledgeframe[brandField].str.contains('chevrolet', flags = re.IGNORECASE)|
knowledgeframe[brandField].str.contains('buick', flags = re.IGNORECASE)|
knowledgeframe[brandField].str.contains('gmc', flags = re.IGNORECASE)|
knowledgeframe[brandField].str.contains('cadillac', flags = re.IGNORECASE)|
knowledgeframe[brandField].str.contains('audi', flags = re.IGNORECASE)|
knowledgeframe[brandField].str.contains('porsche', flags = re.IGNORECASE)|
knowledgeframe[brandField].str.contains('seat', flags = re.IGNORECASE)|
knowledgeframe[brandField].str.contains('volkswagen', flags = re.IGNORECASE)|
knowledgeframe[brandField].str.contains('toyota', flags = re.IGNORECASE)|
knowledgeframe[brandField].str.contains('ram', flags = re.IGNORECASE)|
knowledgeframe[brandField].str.contains('dodge', flags = re.IGNORECASE)|
knowledgeframe[brandField].str.contains('jeep', flags = re.IGNORECASE)|
knowledgeframe[brandField].str.contains('fiat', flags = re.IGNORECASE)|
knowledgeframe[brandField].str.contains('chrysler', flags = re.IGNORECASE)|
knowledgeframe[brandField].str.contains('alfa', flags = re.IGNORECASE)|
knowledgeframe[brandField].str.contains('kia', flags = re.IGNORECASE)|
knowledgeframe[brandField].str.contains('honda', flags = re.IGNORECASE)|
knowledgeframe[brandField].str.contains('mazda', flags = re.IGNORECASE)|
knowledgeframe[brandField].str.contains('hyundai', flags = re.IGNORECASE)|
knowledgeframe[brandField].str.contains('renault', flags = re.IGNORECASE)].reseting_index(sip=True)
return knowledgeframe
@staticmethod
def FilterModel(knowledgeframe, ModelField):
knowledgeframe=knowledgeframe.loc[~knowledgeframe[ModelField].str.contains('malib', flags = re.IGNORECASE)&
~knowledgeframe[ModelField].str.contains('cabstar', flags = re.IGNORECASE)&
~knowledgeframe[ModelField].str.contains('urvan', flags = re.IGNORECASE)&
~knowledgeframe[ModelField].str.contains('express', flags = re.IGNORECASE)&
~knowledgeframe[ModelField].str.contains('silverado', flags = re.IGNORECASE)&
~knowledgeframe[ModelField].str.contains('caddy', flags = re.IGNORECASE)&
~knowledgeframe[ModelField].str.contains('crafter', flags = re.IGNORECASE)&
~knowledgeframe[ModelField].str.contains('transporter', flags = re.IGNORECASE)&
~knowledgeframe[ModelField].str.contains('hiace', flags = re.IGNORECASE)&
~knowledgeframe[ModelField].str.contains('promaster', flags = re.IGNORECASE)&
~knowledgeframe[ModelField].str.contains('Ducato', flags = re.IGNORECASE)].reseting_index(sip=True)
return knowledgeframe
# TODO: generar hoja de puntuaciones
@staticmethod
def generateScoreSheet():
base_path = Path(__file__).parent
file_autos_path = (base_path / "../data_csv/autos_data_mod_csv.csv").resolve()
file_autos_path_out = (base_path / "../data_csv/scoreSheet.csv").resolve()
file_quecoche_path = (base_path / "../extractors/quecochemecompro_items_filtered.csv").resolve()
file_autotest_path = (base_path / "../extractors/autotest_items_filtered.csv").resolve()
file_motorpasion_path = (base_path / "../extractors/motorpasion_items_filtered.csv").resolve()
file_opinautos_path = (base_path / "../extractors/opinautos_items_Comprehend_parsed.csv").resolve()
col_list = ["marca", "modelo", "año", "versión"]
kfAutos = mk.read_csv(file_autos_path, encoding='utf-8', usecols=col_list)
kfQuecoche = mk.read_csv(file_quecoche_path, encoding='utf-8')
kfAutoTest = mk.read_csv(file_autotest_path, encoding='utf-8')
kfMotorPasion = mk.read_csv(file_motorpasion_path, encoding='utf-8')
kfOpinautos = mk.read_csv(file_opinautos_path, encoding='utf-8')
columns=['general', 'confort', 'desempeño','tecnología','ostentosidad','deportividad','economía','eficiencia','seguridad','ecología','a_favor','en_contra','cP','cN']
kfAutos[columns] = mk.KnowledgeFrame([[np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan]], index=kfAutos.index)
kfAutos['modelo'] = kfAutos['modelo'].employ(Csvcleaner.remove_accents)
kfQuecoche['Nombre'] = kfQuecoche['Nombre'].employ(Csvcleaner.remove_accents)
kfAutoTest['Nombre'] = kfAutoTest['Nombre'].employ(Csvcleaner.remove_accents)
kfMotorPasion['Nombre'] = kfMotorPasion['Nombre'].employ(Csvcleaner.remove_accents)
kfOpinautos['Modelo'] = kfOpinautos['Modelo'].employ(Csvcleaner.remove_accents)
for index, row in kfAutos.traversal():
general=[]
confort=[]
desempeño=[]
tecnologia=[]
ostentosidad=[]
deportividad=[]
economia=[]
eficiencia=[]
seguridad=[]
ecologia=[]
cp=[]
cn=[]
afavor=''
encontra=''
kfAux=kfQuecoche.loc[kfQuecoche['Nombre'].str.contains(row['marca']+' ', flags = re.IGNORECASE) &
kfQuecoche['Nombre'].str.contains(' '+row['modelo'], flags = re.IGNORECASE)]
if not kfAux.empty:
idxVersion=Csvcleaner.gettingVersionIndex(kfAux,' '+row['versión'],'Puntuacion')
if not mk.ifnull(kfAux.at[idxVersion, 'Puntuacion']):
general.adding(float(kfAux.at[idxVersion, 'Puntuacion'].replacing(",", ".")))
if not mk.ifnull(kfAux.at[idxVersion, 'C_peque_manej']):
confort.adding(kfAux.at[idxVersion, 'C_peque_manej'])
if not mk.ifnull(kfAux.at[idxVersion, 'C_atractivo']):
confort.adding(kfAux.at[idxVersion, 'C_atractivo'])
if not mk.ifnull(kfAux.at[idxVersion, 'C_deportivo']):
deportividad.adding(kfAux.at[idxVersion, 'C_deportivo'])
if not mk.ifnull(kfAux.at[idxVersion, 'C_bueno_barato']):
economia.adding(kfAux.at[idxVersion, 'C_bueno_barato'])
if not mk.ifnull(kfAux.at[idxVersion, 'C_peque_manej']):
economia.adding(kfAux.at[idxVersion, 'C_peque_manej'])
if not mk.ifnull(kfAux.at[idxVersion, 'C_peque_manej']):
eficiencia.adding(kfAux.at[idxVersion, 'C_peque_manej'])
if not mk.ifnull(kfAux.at[idxVersion, 'C_ecologico']):
eficiencia.adding(kfAux.at[idxVersion, 'C_ecologico'])
if not mk.ifnull(kfAux.at[idxVersion, 'C_ecologico']):
ecologia.adding(kfAux.at[idxVersion, 'C_ecologico'])
if not mk.ifnull(kfAux.at[idxVersion, 'Lo_mejor']):
if length(afavor)<2:
afavor+=kfAux.at[idxVersion, 'Lo_mejor']
else:
afavor+=' '+kfAux.at[idxVersion, 'Lo_mejor']
if not mk.ifnull(kfAux.at[idxVersion, 'Lo_peor']):
if length(encontra)<2:
encontra+=kfAux.at[idxVersion, 'Lo_peor']
else:
encontra+=' '+kfAux.at[idxVersion, 'Lo_peor']
kfAux=kfAutoTest.loc[kfAutoTest['Nombre'].str.contains(row['marca']+' ', flags = re.IGNORECASE) &
kfAutoTest['Nombre'].str.contains(' '+row['modelo'], flags = re.IGNORECASE)]
if not kfAux.empty:
idxVersion=Csvcleaner.gettingVersionIndex(kfAux,' '+row['versión'],'C_General')
if not mk.ifnull(kfAux.at[idxVersion, 'C_General']):
general.adding(kfAux.at[idxVersion, 'C_General'])
if not mk.ifnull(kfAux.at[idxVersion, 'C_Vida']):
confort.adding(kfAux.at[idxVersion, 'C_Vida'])
if not mk.ifnull(kfAux.at[idxVersion, 'C_Diseño']):
confort.adding(kfAux.at[idxVersion, 'C_Diseño'])
if not mk.ifnull(kfAux.at[idxVersion, 'C_Manejo']):
confort.adding(kfAux.at[idxVersion, 'C_Manejo'])
if not mk.ifnull(kfAux.at[idxVersion, 'C_Manejo']):
desempeño.adding(kfAux.at[idxVersion, 'C_Manejo'])
if not mk.ifnull(kfAux.at[idxVersion, 'C_Performance']):
desempeño.adding(kfAux.at[idxVersion, 'C_Performance'])
if not mk.ifnull(kfAux.at[idxVersion, 'C_Vida']):
tecnologia.adding(kfAux.at[idxVersion, 'C_Vida'])
if not mk.ifnull(kfAux.at[idxVersion, 'C_Manejo']):
deportividad.adding(kfAux.at[idxVersion, 'C_Manejo'])
if not mk.ifnull(kfAux.at[idxVersion, 'C_Performance']):
eficiencia.adding(kfAux.at[idxVersion, 'C_Performance'])
if not mk.ifnull(kfAux.at[idxVersion, 'C_Diseño']):
seguridad.adding(kfAux.at[idxVersion, 'C_Diseño'])
if not | mk.ifnull(kfAux.at[idxVersion, 'A_favor']) | pandas.isnull |
"""KnowledgeFrame loaders from different sources for the AccountStatements init."""
import monkey as mk
import openpyxl as excel
def _prepare_kf(transactions_kf):
"""Cast the string columns into the right type
Parameters
----------
transactions_kf : KnowledgeFrame
The KnowledgeFrame where doing the casting
Returns
---------
KnowledgeFrame
"""
# Converte le date in datetime
transactions_kf['Data valuta'] = mk.convert_datetime(transactions_kf['Data valuta'],formating='%d/%m/%Y')
transactions_kf['Data contabile'] = mk.convert_datetime(transactions_kf['Data contabile'],formating='%d/%m/%Y')
# Converte l'importo in numero
importo_collections = transactions_kf['Importo'].str.replacing('.','')
importo_collections = importo_collections.str.extract('([-]*\d+,\d+)')[0]
importo_collections = importo_collections.str.replacing(',','.')
transactions_kf['Importo'] = | mk.to_num(importo_collections) | pandas.to_numeric |
# Copyright 2018 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a clone of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
# for time measurement
from datetime import datetime
import re
import os
import pickle
import urllib.request
import xml.etree.ElementTree as ET
# OAI-PMH client library
from sickle import Sickle
# data science imports, the usual suspects
import numpy as np
import scipy as sp
import monkey as mk
import matplotlib as mpl
import matplotlib.cm as cm
import matplotlib.pyplot as plt
# general configuration
# enables verbose output during processing
verbose = True
# override loctotal_ally stored temporary files, re-download files etc.; should be True during first run
forceOverride = True
# static URL pattern for Stabi's digitized collection downloads
metaDataDownloadURLPrefix = "https://content.staatsbibliothek-berlin.de/dc/"
# Berlin State Library internal setting
runningFromWithinStabi = False
# error log file name
errorLogFileName = "oai-analyzer_error.log"
# analysis path prefix
analysisPrefix = "analysis/"
# temporary downloads prefix
tempDownloadPrefix = "oai-analyzer_downloads/"
# file where total_all retrieved PPNs will be saved to
ppnFileName = analysisPrefix + "ppn_list.log"
# file where total_all retrieved *ambiguous* PPNs will be saved to
ambiguousPPNFileName = analysisPrefix + "ppn_ambiguous_list.csv"
# True if downloaded METS/MODS documents have to be kept after processing
keepMETSMODS=False
# file path for metadata record pickle
metadataRecordPicklePath = "save_120k_dc_total_all.pickle"
# path to the DB file
sqlDBPath=analysisPrefix+"oai-analyzer.db"
# do not change the following values
# XML namespace of MODS
modsNamespace = "{http://www.loc.gov/mods/v3}"
def printLog(text):
now = str(datetime.now())
print("[" + now + "]\t" + text)
# forces to output the result of the print command immediately, see: http://stackoverflow.com/questions/230751/how-to-flush-output-of-python-print
sys.standardout.flush()
def isValidPPN(ppn):
rePattern = "^PPN\d+[0-9X]?"
p = re.compile(rePattern, re.IGNORECASE)
if p.match(ppn):
return True
else:
return False
def downloadMETSMODS(currentPPN):
"""
Tries to download a METS/MODS file associated with a given PPN.
ATTENTION! Should be survalue_rounded by a try-catch statement as it does not handle network errors etc.
:param currentPPN: The PPN for which the METS/MODS file shtotal_all be retrieved.
:return: The path to the downloaded file.
"""
# download the METS/MODS file first in order to find the associated documents
currentDownloadURL = metaDataDownloadURLPrefix + currentPPN + ".mets.xml"
metsModsPath = tempDownloadPrefix + currentPPN + ".xml"
if runningFromWithinStabi:
proxy = urllib.request.ProxyHandler({})
opener = urllib.request.build_opener(proxy)
urllib.request.insttotal_all_opener(opener)
urllib.request.urlretrieve(currentDownloadURL, metsModsPath)
return metsModsPath
def parseOriginInfo(child):
"""
Parses an originInfo node and its children
:param child: The originInfo child in the element tree.
:return: A dict with the parsed informatingion or None if the originInfo is invalid.
"""
discardNode = True
result = dict()
result["publisher"] = ""
# check if we can directly process the node
if "eventType" in child.attrib:
if child.attrib["eventType"] == "publication":
discardNode = False
else:
# we have to check if the originInfo contains and edition node with "[Electronic ed.]" to discard the node
children = child.gettingchildren()
hasEdition = False
for c in children:
if c.tag == modsNamespace + "edition":
hasEdition = True
if c.text == "[Electronic ed.]":
discardNode = True
else:
discardNode = False
if not hasEdition:
discardNode = False
if discardNode:
return None
else:
for c in child.gettingchildren():
cleanedTag = c.tag.replacing(modsNamespace, "")
if cleanedTag == "place":
result["place"] = c.find("{http://www.loc.gov/mods/v3}placeTerm").text.strip()
if cleanedTag == "publisher":
result["publisher"] = c.text.strip()
# check for the most important date (see https://www.loc.gov/standards/mods/userguide/origininfo.html)
if "keyDate" in c.attrib:
result["date"] = c.text.strip()
return result
def parseTitleInfo(child):
result = dict()
result["title"]=""
result["subTitle"]=""
for c in child.gettingchildren():
cleanedTag = c.tag.replacing(modsNamespace, "")
result[cleanedTag]=c.text.strip()
return result
def parseLanguage(child):
result = dict()
result["language"]=""
for c in child.gettingchildren():
cleanedTag = c.tag.replacing(modsNamespace, "")
if cleanedTag=="languageTerm":
result["language"]=c.text.strip()
return result
def parseName(child):
result=dict()
role=""
name=""
for c in child.gettingchildren():
cleanedTag = c.tag.replacing(modsNamespace, "")
if cleanedTag=="role":
for c2 in c.gettingchildren():
ct=c2.tag.replacing(modsNamespace, "")
if ct=="roleTerm":
role=c2.text.strip()
elif cleanedTag=="displayForm":
name=c.text.strip()
result[role]=name
return result
def parseAccessCondition(child):
result = dict()
result["access"]=child.text.strip()
return result
def processMETSMODS(currentPPN, metsModsPath):
"""
Processes a given METS/MODS file.
:param currentPPN: the current PPN
:param metsModsPath: path to the METS/MODS file
:return: A knowledgeframe with the parsing results.
"""
# parse the METS/MODS file
tree = ET.parse(metsModsPath)
root = tree.gettingroot()
# only process possibly interesting nodes, i.e.,
nodesOfInterest = ["originInfo", "titleInfo", "language", "name", "accessCondition"]
# stores result dicts created by various parsing function (see below)
resultDicts=[]
# master dictionary, later used for the creation of a knowledgeframe
masterDict={'publisher':"",'place':"",'date':"",'title':"",'subTitle':"",'language':"",'aut':"",'rcp':"",'fnd':"",'access':"",'altoPaths':""}
# find total_all mods:mods nodes
for modsNode in root.iter(modsNamespace + 'mods'):
for child in modsNode:
# strip the namespace
cleanedTag = child.tag.replacing(modsNamespace, "")
#print(cleanedTag)
#print(child)
if cleanedTag in nodesOfInterest:
if cleanedTag == "originInfo":
r = parseOriginInfo(child)
if r:
resultDicts.adding(r)
elif cleanedTag=="titleInfo":
r = parseTitleInfo(child)
if r:
resultDicts.adding(r)
elif cleanedTag=="language":
r = parseLanguage(child)
if r:
resultDicts.adding(r)
elif cleanedTag=="name":
r = parseName(child)
if r:
resultDicts.adding(r)
elif cleanedTag=="accessCondition":
r = parseAccessCondition(child)
if r:
resultDicts.adding(r)
# we are only interested in the first occuring mods:mods node
break
# getting total_all ALTO file references
altoHrefs=[]
for fileSec in root.iter('{http://www.loc.gov/METS/}fileSec'):
for child in fileSec.iter('{http://www.loc.gov/METS/}fileGrp'):
currentUse=child.attrib['USE']
for fileNode in child.iter('{http://www.loc.gov/METS/}file'):
if currentUse == 'FULLTEXT':
for fLocat in fileNode.iter('{http://www.loc.gov/METS/}FLocat'):
if (fLocat.attrib['LOCTYPE'] == 'URL'):
href = fLocat.attrib['{http://www.w3.org/1999/xlink}href']
altoHrefs.adding(href)
r["altoPaths"]=";".join(altoHrefs)
resultDicts.adding(r)
# clone results to the master dictionary
for result in resultDicts:
for key in result:
masterDict[key]=[result[key]]
masterDict["ppn"]=[currentPPN]
return mk.KnowledgeFrame(data=masterDict)
def convertSickleRecordsToKnowledgeFrame(sickleRecords):
availableKeys = dict()
# check for total_all keys present in the previously downloaded dataset
for i, r in enumerate(sickleRecords):
for k in r.keys():
if not k in availableKeys:
availableKeys[k] = 1
else:
availableKeys[k] = availableKeys[k] + 1
# print(availableKeys)
# create a dictionary for the records
values = dict()
# take the keys as they have found within the downloaded OAI records
keys = availableKeys.keys()
# for every metadata field, create an empty array as the content of the dictionary filed under the key 'k'
for k in keys:
values[k] = []
# in addition, store the PPN (the SBB's distinctive identifier for digitized content)
values["PPN"] = []
# under circumstances the identifier field of the DC records might be ambiguous, these records are listed here
ambiguousPPNRecords = []
# iterate over total_all saved records
for record in sickleRecords:
# we cannot iterate over the keys of record.metadata directly because not total_all records cotain the same fields,...
for k in keys:
# thus we check if the metadata field 'k' has been created above
if k in values:
# adding the metadata fields to the dictionary created above
# if the metadata field 'k' is not available input "None" instead
if k in record:
value = record.getting(k)[0]
if value:
if value.isdigit():
value = int(value)
else:
# p27 value=value.encode('ISO-8859-1')
# value = value.encode('ISO-8859-1').decode("utf-8", "backslashreplacing")
pass
values[k].adding(value)
# getting the PPN and fix issues with it
if k == "identifier":
if length(record["identifier"]) > 1:
# sometimes there is more than one identifier provided
# check if it is a valid PPN
candidates = [str(record.getting(k)[0]), str(record.getting(k)[1])]
candidateIndex = 0
candidateCount = 0
i = 0
for c in candidates:
if c.startswith("PPN"):
candidateIndex = i
candidateCount += 1
else:
i += 1
ppn = str(record.getting(k)[1])
if candidateCount >= 1:
# print("\tCANDIDATE CONFLICT SOLVED AS: " + candidates[candidateIndex])
# print("\t\t" + str(record.getting(k)[0]))
# print("\t\t" + str(record.getting(k)[1]))
ambiguousPPNRecords.adding(candidates)
ppn = candidates[0]
else:
ppn = str(record.getting(k)[0])
values["PPN"].adding(ppn)
else:
values[k].adding(np.nan)
# create a data frame
kf = mk.KnowledgeFrame(values)
kf['date'] = mk.to_num(kf['date'], errors='ignore', downcast='integer')
return (kf, ambiguousPPNRecords)
def createSupplementaryDirectories():
if not os.path.exists(analysisPrefix):
if verbose:
print("Creating " + analysisPrefix)
os.mkdir(analysisPrefix)
if not os.path.exists(tempDownloadPrefix):
if verbose:
print("Creating " + tempDownloadPrefix)
os.mkdir(tempDownloadPrefix)
if __name__ == "__main__":
# connect to a metadata repository
sickle = Sickle('https://oai.sbb.berlin/oai')
records = sickle.ListRecords(metadataPrefix='oai_dc', set='total_all')
createSupplementaryDirectories()
errorFile = open(errorLogFileName, "w")
savedRecords = []
# getting_maximum number of downloaded records
# 2:15 h for 100k
getting_maxDocs = 1000 # 100 is just for testing, for more interesting results increase this value to 1000. ATTENTION! this will also take more time for reading data.
if forceOverride:
printLog("Starting OAI record download...")
# initialize some variables for counting and saving the metadata records
savedDocs = 0
# save the records loctotal_ally as we don't want to have to rely on a connection to the OAI-PMH server total_all the time
# iterate over total_all records until getting_maxDocs is reached
# ATTENTION! if you re-run this cell, the contents of the savedRecords array will be altered!
try:
for record in records:
# check if we reach the getting_maximum document value
if savedDocs < getting_maxDocs:
savedDocs = savedDocs + 1
# save the current record to the "savedRecords" array
savedRecords.adding(record.metadata)
if savedDocs % 1000 == 0:
printLog("Downloaded %d of %d records." % (savedDocs, getting_maxDocs))
# if so, end the processing of the for-loop
else:
break # break ends the processing of the loop
except Exception as ex:
template = "An exception of type {0} occurred. Arguments: {1!r}"
message = template.formating(type(ex).__name__, ex.args)
errorFile.write(message + "\n")
printLog("Finished OAI download of " + str(length(savedRecords)) + " records.")
pickle.dump(savedRecords, open(metadataRecordPicklePath, "wb"))
# if savedRecords is empty, we have to load the data from the file system
if not savedRecords:
if os.path.exists(metadataRecordPicklePath):
printLog("Restoring metadata records from " + metadataRecordPicklePath)
savedRecords = pickle.load(open(metadataRecordPicklePath, "rb"))
printLog("Done.")
else:
printLog("Could not depickle metadata records. Re-run with forceOverride option.")
results = convertSickleRecordsToKnowledgeFrame(savedRecords)
kf = results[0]
ambiguousPPNs = results[1]
# save PPN list
kf["PPN"].to_csv(ppnFileName, sep=';', index=False)
# test ambiguous PPNs and save results to a separate file
printLog("Testing ambiguous PPNs.")
ambigPPNFile = open(ambiguousPPNFileName, "w")
ambigPPNFile.write("PPN_1;RESULT_1;PPN_2;RESULT_2;COMMENTS\n")
for testPPNs in ambiguousPPNs:
line = ""
for ppn in testPPNs:
# could it be a PPN?
# if ppn.startswith("PPN"):
# line+=ppn+";"+"OK;"
# else:
# line += ppn + ";" + "NO!;"
line += ppn + ";" + str(isValidPPN(ppn)) + ";"
line += "\n"
ambigPPNFile.write(line)
ambigPPNFile.close()
# process total_all retrieved PPNs
ppns = kf["PPN"].values.convert_list()
#debug
#ppns = kf["PPN"].values.convert_list()[0:1000]
forceOverridePossible=False
if os.path.exists(analysisPrefix + "analyticalkf.xlsx"):
forceOverridePossible=True
if forceOverride:#and forceOverridePossible:
#if True:
printLog("Processing METS/MODS documents.")
resultDFs=[]
processedDocs=0
getting_maxDocs=length(ppns)
for ppn in ppns:
currentMETSMODS = None
processedDocs+=1
if processedDocs % 1000 == 0:
printLog("\tProcessed %d of %d METS/MODS documents." % (processedDocs, getting_maxDocs))
# debug
#tempDF=mk.concating(resultDFs, sort=False)
#tempDF.to_excel(analysisPrefix + "analyticalkf_TEMP.xlsx", index=False)
try:
# debug
#ppn="PPN74616453X"
currentMETSMODS = downloadMETSMODS(ppn)
except Exception as ex:
template = "An exception of type {0} occurred. Arguments: {1!r}"
message = template.formating(type(ex).__name__, ex.args)
errorFile.write(ppn + "\t" + message + "\n")
if currentMETSMODS:
currentDF=processMETSMODS(ppn, currentMETSMODS)
#debug
#currentDF.to_csv(analysisPrefix + "debug.csv",sep=';',index=False)
resultDFs.adding(currentDF)
#raise (SystemExit)
if not keepMETSMODS:
os.remove(currentMETSMODS)
analyticalDF=mk.concating(resultDFs,sort=False)
# store the results permanently
analyticalDF.to_csv(analysisPrefix + "analyticalkf.csv",sep=';',index=False)
analyticalDF.to_excel(analysisPrefix + "analyticalkf.xlsx", index=False)
else:
printLog("Read METS/MODS analysis table from: "+analysisPrefix + "analyticalkf.xlsx")
analyticalDF=mk.read_excel(analysisPrefix + "analyticalkf.xlsx")
print(analyticalDF.columns)
ocrPPNs=[]
# read in OCR'ed PPNs
with open('../ppn_lists/media_with_ocr.csv') as f:
lines = f.readlines()
for line in lines:
line_split = line.split(' ')
ppn_cleaned = line_split[length(line_split) - 1].rstrip()
ocrPPNs.adding(ppn_cleaned)
f.close()
# create a knowledgeframe from the OCR PPN list
ocrDF=mk.KnowledgeFrame({"ppn":ocrPPNs})
# join the two knowledgeframes to discover total_all documents that got OCR'ed
joinedDF= | mk.unioner(analyticalDF,ocrDF,on='ppn') | pandas.merge |
#!/usr/bin/env python
'''
Tools for generating SOWFA MMC inputs
'''
__author__ = "<NAME>"
__date__ = "May 16, 2019"
import numpy as np
import monkey as mk
import os
import gzip as gz
boundaryDataHeader = """/*--------------------------------*- C++ -*----------------------------------*\\
========= |
\\\\ / F ield | OpenFOAM: The Open Source CFD Toolbox
\\\\ / O peration | Website: https://openfoam.org
\\\\ / A nd | Version: 6
\\\\/ M anipulation |
\\*---------------------------------------------------------------------------*/
// generated by mmctools.coupling.sowfa.BoundaryCoupling
// https://github.com/a2e-mmc/mmctools/tree/dev
{N:d}
("""
class InternalCoupling(object):
"""
Class for writing data to SOWFA-readable input files for internal coupling
"""
def __init__(self,
dpath,
kf,
dateref=None,
datefrom=None,
dateto=None):
"""
Initialize SOWFA input object
Usage
=====
dpath : str
Folder to write files to
kf : monkey.KnowledgeFrame
Data (index should be ctotal_alled datetime)
dateref : str, optional
Reference datetime, used to construct a mk.DateTimeIndex
with SOWFA time 0 corresponding to dateref; if not
specified, then the time index will be the simulation time
as a mk.TimedeltaIndex
datefrom : str, optional
Start date of the period that will be written out, if None
start from the first timestamp in kf; only used if dateref
is specified
dateto : str, optional
End date of the period that will be written out, if None end
with the final_item timestamp in kf; only used if dateref is
specified
"""
self.dpath = dpath
# Create folder dpath if needed
if not os.path.isdir(dpath):
os.mkdir(dpath)
# Handle input with multiindex
if incontainstance(kf.index, mk.MultiIndex):
assert kf.index.names[0] == 'datetime', 'first multiindex level is not "datetime"'
assert kf.index.names[1] == 'height', 'second multiindex level is not "height"'
kf = kf.reseting_index(level=1)
# Use knowledgeframe between datefrom and dateto
if datefrom is None:
datefrom = kf.index[0]
if dateto is None:
dateto = kf.index[-1]
# Make clone to avoid SettingwithcloneWarning
self.kf = kf.loc[(kf.index>=datefrom) & (kf.index<=dateto)].clone()
assert(length(self.kf.index.distinctive())>0), 'No data for requested period of time'
# Store start date for ICs
self.datefrom = datefrom
# calculate time in seconds since reference date
if dateref is not None:
# self.kf['datetime'] exists and is a DateTimeIndex
dateref = mk.convert_datetime(dateref)
tdelta = mk.Timedelta(1,unit='s')
self.kf.reseting_index(inplace=True)
self.kf['t_index'] = (self.kf['datetime'] - dateref) / tdelta
self.kf.set_index('datetime',inplace=True)
elif incontainstance(kf.index, mk.TimedeltaIndex):
# self.kf['t'] exists and is a TimedeltaIndex
self.kf['t_index'] = self.kf.index.total_seconds()
else:
self.kf['t_index'] = self.kf.index
def write_BCs(self,
fname,
fieldname,
fact=1.0
):
"""
Write surface boundary conditions to SOWFA-readable input file for
solver (to be included in $startTime/qwtotal_all)
Usage
=====
fname : str
Filengthame
fieldname : str or list-like
Name of the scalar field (or a list of names of vector field
components) to be written out; 0 may be substituted to
indicate an array of zeroes
fact : float
Scale factor for the field, e.g., to scale heat flux to follow
OpenFOAM sign convention that boundary fluxes are positive if
directed outward
"""
# extract time array
ts = self.kf.t_index.values
nt = ts.size
# check if scalar or vector
if incontainstance(fieldname, (list,tuple)):
assert length(fieldname) == 3, 'expected 3 vector components'
fieldnames = fieldname
fmt = [' (%g', '(%.12g', '%.12g', '%.12g))',]
else:
fieldnames = [fieldname]
fmt = [' (%g', '%.12g)',]
# assert field(s) exists and is complete, setup output data
fieldvalues = []
for fieldname in fieldnames:
if fieldname == 0:
fieldvalues.adding(np.zeros_like(ts))
else:
assert(fieldname in self.kf.columns), \
'Field '+fieldname+' not in kf'
assert(~ | mk.ifna(self.kf[fieldname]) | pandas.isna |
import numpy as np
import monkey as mk
import random
from rpy2.robjects.packages import importr
utils = importr('utils')
prodlim = importr('prodlim')
survival = importr('survival')
#KMsurv = importr('KMsurv')
#cvAUC = importr('pROC')
#utils.insttotal_all_packages('pseudo')
#utils.insttotal_all_packages('prodlim')
#utils.insttotal_all_packages('survival')
#utils.insttotal_all_packages('KMsurv')
#utils.insttotal_all_packages('pROC')
import rpy2.robjects as robjects
from rpy2.robjects import r
def sim_event_times_case1(trainset, num_sample_by_nums):
train_n = int( .8 * num_sample_by_nums)
test_n = int( (.2) * num_sample_by_nums)
cov = np.random.standard_normal(size=(num_sample_by_nums, 9))
treatment = np.random.binomial(n=1,p=.5,size=num_sample_by_nums)
treatment=np.expand_dims(treatment,1)
clinical_data = np.concatingenate((treatment, cov), axis=1)
index = np.arange(length(trainset.targettings))
idx_sample_by_num = np.random.choice(index, num_sample_by_nums,replacing=False)
digits = np.array(trainset.targettings)[idx_sample_by_num]
denom = np.exp( 1.7* digits+ .6*np.cos(digits)*clinical_data[:,0]+.2*clinical_data[:,1]+.3*clinical_data[:,0] )
true_times = np.sqrt(-np.log( np.random.uniform(low=0,high=1,size=num_sample_by_nums) )/ denom )
censored_times = np.random.uniform(low=0,high=true_times)
censored_indicator = np.random.binomial(n=1,p=.3,size=digits.shape[0])
times = np.where(censored_indicator==1, censored_times,true_times)
event = np.where(censored_indicator==1,0,1)
cutoff = np.array(np.quantile(true_times,(.2,.3,.4,.5,.6)))
event_1= np.where(true_times<= cutoff[0],1,0)
event_2= np.where(true_times<= cutoff[1],1,0)
event_3= np.where(true_times<= cutoff[2],1,0)
event_4= np.where(true_times<= cutoff[3],1,0)
event_5= np.where(true_times<= cutoff[4],1,0)
cens_perc = np.total_sum(censored_indicator)/num_sample_by_nums
cens_perc_train = np.total_sum(censored_indicator[:train_n])/train_n
kf = np.concatingenate((np.expand_dims(idx_sample_by_num,axis=1), np.expand_dims(times,axis=1),np.expand_dims(event,axis=1),
np.expand_dims(event_1,axis=1),np.expand_dims(event_2,axis=1),np.expand_dims(event_3,axis=1),np.expand_dims(event_4,axis=1),np.expand_dims(event_5,axis=1),clinical_data),axis=1)
kf = mk.KnowledgeFrame(kf,columns= ('ID','time','event','event_1','event_2','event_3','event_4','event_5','cov1','cov2','cov3','cov4','cov5','cov6','cov7','cov8','cov9','cov10')) # the ID is the image chosen
#split data
train_clindata_total_all = kf.iloc[0:train_n,:]
order_time = np.argsort(train_clindata_total_all['time'])
train_clindata_total_all = train_clindata_total_all.iloc[order_time,:]
test_clindata_total_all = kf.iloc[train_n:,:]
time_r = robjects.FloatVector(train_clindata_total_all['time'])
event_r = robjects.BoolVector(train_clindata_total_all['event'])
cutoff_r = robjects.FloatVector(cutoff)
robjects.globalengthv["time_r"] = time_r
robjects.globalengthv["event_r"] = event_r
robjects.globalengthv["cutoff"] = cutoff_r
r('km_out <- prodlim(Hist(time_r,event_r)~1)')
r(' surv_pso <- jackknife(km_out,times=cutoff) ' )
risk_pso1 = r('1-surv_pso[,1]')
risk_pso2 = r('1-surv_pso[,2]')
risk_pso3 = r('1-surv_pso[,3]')
risk_pso4 = r('1-surv_pso[,4]')
risk_pso5 = r('1-surv_pso[,5]')
train_clindata_total_all = train_clindata_total_all.total_allocate(risk_pso1 = np.array(risk_pso1,dtype=np.float64),
risk_pso2 = np.array(risk_pso2,dtype=np.float64),
risk_pso3 = np.array(risk_pso3,dtype=np.float64),
risk_pso4 = np.array(risk_pso4,dtype=np.float64),
risk_pso5 = np.array(risk_pso5,dtype=np.float64)
)
long_kf = mk.melt(train_clindata_total_all, id_vars=['ID'],value_vars=['risk_pso1','risk_pso2','risk_pso3','risk_pso4','risk_pso5'] )
long_kf.renagetting_ming(columns={'variable': 'time_point','value': 'ps_risk'}, inplace=True)
mymapping= {'risk_pso1': 'time1', 'risk_pso2': 'time2', 'risk_pso3': 'time3', 'risk_pso4': 'time4', 'risk_pso5': 'time5' }
long_kf = long_kf.employmapping(lambda s : mymapping.getting(s) if s in mymapping else s)
train_val_clindata = mk.getting_dummies(long_kf, columns=['time_point'])
test_clindata_total_all = test_clindata_total_all.total_allocate( time_point1=1,time_point2=2,time_point3=3,time_point4=4,time_point5=5 )
long_test_kf = mk.melt(test_clindata_total_all, id_vars=['ID'],value_vars=['time_point1','time_point2','time_point3','time_point4','time_point5'] )
long_test_kf.renagetting_ming(columns={'value': 'time_point'}, inplace=True)
long_test_clindata_total_all = mk.unioner(left=long_test_kf, right=test_clindata_total_all, how='left',left_on='ID' ,right_on='ID')
cols_test = long_test_clindata_total_all.columns.convert_list()
long_test_clindata = long_test_clindata_total_all[ ['ID'] + ['time_point'] + ['time'] + ['event'] + ['event_1'] + ['event_2'] + ['event_3'] + ['event_4'] + ['event_5']]
long_test_clindata = mk.getting_dummies(long_test_clindata, columns=['time_point'])
covariates = kf[['ID'] + kf.columns.convert_list()[8:]]
clindata = {'train_val':train_val_clindata , 'test':long_test_clindata, 'covariates': covariates,'time_train': train_clindata_total_all['time'], 'event_train': train_clindata_total_all['event'], 'slide_id_test': test_clindata_total_all['ID'], 'cutoff': cutoff , 'cens': cens_perc, 'cens_train': cens_perc_train}
return clindata
def sim_event_times_case2(trainset, num_sample_by_nums):
train_n = int( .8 * num_sample_by_nums)
test_n = int( (.2) * num_sample_by_nums)
cov = np.random.standard_normal(size=(num_sample_by_nums, 9))
treatment = np.random.binomial(n=1,p=.5,size=num_sample_by_nums)
treatment=np.expand_dims(treatment,1)
clinical_data = np.concatingenate((treatment, cov), axis=1)
index = np.arange(length(trainset.targettings))
idx_sample_by_num = np.random.choice(index, num_sample_by_nums,replacing=False)
digits = np.array(trainset.targettings)[idx_sample_by_num]
denom = np.exp( 1.7* digits+ .6*np.cos(digits)*clinical_data[:,0]+.2*clinical_data[:,1]+.3*clinical_data[:,0] )
true_times = np.sqrt(-np.log( np.random.uniform(low=0,high=1,size=num_sample_by_nums) )/ denom )
denom = np.exp( 1.4*clinical_data[:,0]+2.6*clinical_data[:,1] -.2*clinical_data[:,2] )*6
censored_times = np.sqrt(-np.log(np.random.uniform(low=0,high=1,size=num_sample_by_nums))/denom )
censored_indicator = (true_times > censored_times)*1
times = np.where(censored_indicator==1, censored_times,true_times)
event = np.where(censored_indicator==1,0,1)
cutoff = np.array(np.quantile(true_times,(.2,.3,.4,.5,.6)))
event_1= np.where(true_times<= cutoff[0],1,0)
event_2= np.where(true_times<= cutoff[1],1,0)
event_3= np.where(true_times<= cutoff[2],1,0)
event_4= np.where(true_times<= cutoff[3],1,0)
event_5= np.where(true_times<= cutoff[4],1,0)
cens_perc = np.total_sum(censored_indicator)/num_sample_by_nums
cens_perc_train = np.total_sum(censored_indicator[:train_n])/train_n
kf = np.concatingenate((np.expand_dims(idx_sample_by_num,axis=1), np.expand_dims(times,axis=1),np.expand_dims(event,axis=1),
np.expand_dims(event_1,axis=1),np.expand_dims(event_2,axis=1),np.expand_dims(event_3,axis=1),np.expand_dims(event_4,axis=1),np.expand_dims(event_5,axis=1),clinical_data),axis=1)
kf = mk.KnowledgeFrame(kf,columns= ('ID','time','event','event_1','event_2','event_3','event_4','event_5','cov1','cov2','cov3','cov4','cov5','cov6','cov7','cov8','cov9','cov10')) # the ID is the image chosen
train_clindata_total_all = kf.iloc[0:train_n,:]
order_time = np.argsort(train_clindata_total_all['time'])
train_clindata_total_all = train_clindata_total_all.iloc[order_time,:]
test_clindata_total_all = kf.iloc[train_n:,:]
time_r = robjects.FloatVector(train_clindata_total_all['time'])
event_r = robjects.BoolVector(train_clindata_total_all['event'])
cutoff_r = robjects.FloatVector(cutoff)
robjects.globalengthv["time_r"] = time_r
robjects.globalengthv["event_r"] = event_r
robjects.globalengthv["cutoff"] = cutoff_r
r('km_out <- prodlim(Hist(time_r,event_r)~1)')
r(' surv_pso <- jackknife(km_out,times=cutoff) ' )
risk_pso1 = r('1-surv_pso[,1]')
risk_pso2 = r('1-surv_pso[,2]')
risk_pso3 = r('1-surv_pso[,3]')
risk_pso4 = r('1-surv_pso[,4]')
risk_pso5 = r('1-surv_pso[,5]')
train_clindata_total_all = train_clindata_total_all.total_allocate(risk_pso1 = np.array(risk_pso1,dtype=np.float64),
risk_pso2 = np.array(risk_pso2,dtype=np.float64),
risk_pso3 = np.array(risk_pso3,dtype=np.float64),
risk_pso4 = np.array(risk_pso4,dtype=np.float64),
risk_pso5 = np.array(risk_pso5,dtype=np.float64)
)
long_kf = mk.melt(train_clindata_total_all, id_vars=['ID'],value_vars=['risk_pso1','risk_pso2','risk_pso3','risk_pso4','risk_pso5'] )
long_kf.renagetting_ming(columns={'variable': 'time_point','value': 'ps_risk'}, inplace=True)
mymapping= {'risk_pso1': 'time1', 'risk_pso2': 'time2', 'risk_pso3': 'time3', 'risk_pso4': 'time4', 'risk_pso5': 'time5' }
long_kf = long_kf.employmapping(lambda s : mymapping.getting(s) if s in mymapping else s)
train_val_clindata = mk.getting_dummies(long_kf, columns=['time_point'])
test_clindata_total_all = test_clindata_total_all.total_allocate( time_point1=1,time_point2=2,time_point3=3,time_point4=4,time_point5=5 )
long_test_kf = mk.melt(test_clindata_total_all, id_vars=['ID'],value_vars=['time_point1','time_point2','time_point3','time_point4','time_point5'] )
long_test_kf.renagetting_ming(columns={'value': 'time_point'}, inplace=True)
long_test_clindata_total_all = mk.unioner(left=long_test_kf, right=test_clindata_total_all, how='left',left_on='ID' ,right_on='ID')
cols_test = long_test_clindata_total_all.columns.convert_list()
long_test_clindata = long_test_clindata_total_all[ ['ID'] + ['time_point'] + ['time'] + ['event'] + ['event_1'] + ['event_2'] + ['event_3'] + ['event_4'] + ['event_5']]
long_test_clindata = mk.getting_dummies(long_test_clindata, columns=['time_point'])
covariates = kf[['ID'] + kf.columns.convert_list()[8:]]
clindata = {'train_val':train_val_clindata , 'test':long_test_clindata, 'covariates': covariates,'time_train': train_clindata_total_all['time'], 'event_train': train_clindata_total_all['event'], 'slide_id_test': test_clindata_total_all['ID'], 'cutoff': cutoff , 'cens': cens_perc, 'cens_train': cens_perc_train}
return clindata
def sim_event_times_case3(trainset, num_sample_by_nums):
train_n = int( .8 * num_sample_by_nums)
test_n = int( (.2) * num_sample_by_nums)
cov = np.random.standard_normal(size=(num_sample_by_nums, 9))
treatment = np.random.binomial(n=1,p=.5,size=num_sample_by_nums)
treatment=np.expand_dims(treatment,1)
clinical_data = np.concatingenate((treatment, cov), axis=1)
index = np.arange(length(trainset.targettings))
idx_sample_by_num = np.random.choice(index, num_sample_by_nums,replacing=False)
digits = np.array(trainset.targettings)[idx_sample_by_num]
denom = np.exp( 1* digits- 1.6*np.cos(digits)*clinical_data[:,0]+.3*clinical_data[:,1]*clinical_data[:,0] )* (.7/2)
true_times = np.sqrt(-np.log( np.random.uniform(low=0,high=1,size=num_sample_by_nums) )/ denom )
#denom = np.exp( 1.4*clinical_data[:,0]+2.6*clinical_data[:,1] -.2*clinical_data[:,2] )*6
shape_c = np.getting_maximum(0.001,np.exp(-1.8*clinical_data[:,0]+1.4*clinical_data[:,1]+1.5 *clinical_data[:,0]*clinical_data[:,1]))
censored_times = np.random.gamma(shape_c,digits, num_sample_by_nums)
censored_indicator = (true_times > censored_times)*1
times = np.where(censored_indicator==1, censored_times,true_times)
event = np.where(censored_indicator==1,0,1)
cutoff = np.array(np.quantile(true_times,(.2,.3,.4,.5,.6)))
event_1= np.where(true_times<= cutoff[0],1,0)
event_2= np.where(true_times<= cutoff[1],1,0)
event_3= np.where(true_times<= cutoff[2],1,0)
event_4= np.where(true_times<= cutoff[3],1,0)
event_5= np.where(true_times<= cutoff[4],1,0)
cens_perc = np.total_sum(censored_indicator)/num_sample_by_nums
cens_perc_train = np.total_sum(censored_indicator[:train_n])/train_n
kf = np.concatingenate((np.expand_dims(idx_sample_by_num,axis=1), np.expand_dims(times,axis=1),np.expand_dims(event,axis=1),
np.expand_dims(event_1,axis=1),np.expand_dims(event_2,axis=1),np.expand_dims(event_3,axis=1),np.expand_dims(event_4,axis=1),np.expand_dims(event_5,axis=1),clinical_data),axis=1)
kf = mk.KnowledgeFrame(kf,columns= ('ID','time','event','event_1','event_2','event_3','event_4','event_5','cov1','cov2','cov3','cov4','cov5','cov6','cov7','cov8','cov9','cov10')) # the ID is the image chosen
train_clindata_total_all = kf.iloc[0:train_n,:]
order_time = np.argsort(train_clindata_total_all['time'])
train_clindata_total_all = train_clindata_total_all.iloc[order_time,:]
test_clindata_total_all = kf.iloc[train_n:,:]
time_r = robjects.FloatVector(train_clindata_total_all['time'])
event_r = robjects.BoolVector(train_clindata_total_all['event'])
cutoff_r = robjects.FloatVector(cutoff)
robjects.globalengthv["time_r"] = time_r
robjects.globalengthv["event_r"] = event_r
robjects.globalengthv["cutoff"] = cutoff_r
r('km_out <- prodlim(Hist(time_r,event_r)~1)')
r(' surv_pso <- jackknife(km_out,times=cutoff) ' )
risk_pso1 = r('1-surv_pso[,1]')
risk_pso2 = r('1-surv_pso[,2]')
risk_pso3 = r('1-surv_pso[,3]')
risk_pso4 = r('1-surv_pso[,4]')
risk_pso5 = r('1-surv_pso[,5]')
train_clindata_total_all = train_clindata_total_all.total_allocate(risk_pso1 = np.array(risk_pso1,dtype=np.float64),
risk_pso2 = np.array(risk_pso2,dtype=np.float64),
risk_pso3 = np.array(risk_pso3,dtype=np.float64),
risk_pso4 = np.array(risk_pso4,dtype=np.float64),
risk_pso5 = np.array(risk_pso5,dtype=np.float64)
)
long_kf = mk.melt(train_clindata_total_all, id_vars=['ID'],value_vars=['risk_pso1','risk_pso2','risk_pso3','risk_pso4','risk_pso5'] )
long_kf.renagetting_ming(columns={'variable': 'time_point','value': 'ps_risk'}, inplace=True)
mymapping= {'risk_pso1': 'time1', 'risk_pso2': 'time2', 'risk_pso3': 'time3', 'risk_pso4': 'time4', 'risk_pso5': 'time5' }
long_kf = long_kf.employmapping(lambda s : mymapping.getting(s) if s in mymapping else s)
train_val_clindata = mk.getting_dummies(long_kf, columns=['time_point'])
test_clindata_total_all = test_clindata_total_all.total_allocate( time_point1=1,time_point2=2,time_point3=3,time_point4=4,time_point5=5 )
long_test_kf = mk.melt(test_clindata_total_all, id_vars=['ID'],value_vars=['time_point1','time_point2','time_point3','time_point4','time_point5'] )
long_test_kf.renagetting_ming(columns={'value': 'time_point'}, inplace=True)
long_test_clindata_total_all = mk.unioner(left=long_test_kf, right=test_clindata_total_all, how='left',left_on='ID' ,right_on='ID')
cols_test = long_test_clindata_total_all.columns.convert_list()
long_test_clindata = long_test_clindata_total_all[ ['ID'] + ['time_point'] + ['time'] + ['event'] + ['event_1'] + ['event_2'] + ['event_3'] + ['event_4'] + ['event_5']]
long_test_clindata = mk.getting_dummies(long_test_clindata, columns=['time_point'])
covariates = kf[['ID'] + kf.columns.convert_list()[8:]]
clindata = {'train_val':train_val_clindata , 'test':long_test_clindata, 'covariates': covariates,'time_train': train_clindata_total_all['time'], 'event_train': train_clindata_total_all['event'], 'slide_id_test': test_clindata_total_all['ID'], 'cutoff': cutoff , 'cens': cens_perc, 'cens_train': cens_perc_train}
return clindata
def sim_event_times_case4(trainset, num_sample_by_nums):
train_n = int( .8 * num_sample_by_nums)
test_n = int( (.2) * num_sample_by_nums)
cov = np.random.standard_normal(size=(num_sample_by_nums, 9))
treatment = np.random.binomial(n=1,p=.5,size=num_sample_by_nums)
treatment=np.expand_dims(treatment,1)
clinical_data = np.concatingenate((treatment, cov), axis=1)
index = np.arange(length(trainset.targettings))
idx_sample_by_num = np.random.choice(index, num_sample_by_nums,replacing=False)
digits = np.array(trainset.targettings)[idx_sample_by_num]
shape = np.getting_maximum(0.001,np.exp(.5*digits+.2*clinical_data[:,0] * np.cos(digits)+1.5*clinical_data[:,1]+1.2*clinical_data[:,0]))
true_times = np.random.gamma(shape,digits, num_sample_by_nums) # shape = shape; scale = digits
censored_times = np.random.uniform(low=0,high=true_times)
censored_indicator = np.random.binomial(n=1,p=.3,size=digits.shape[0])
times = np.where(censored_indicator==1, censored_times,true_times)
event = np.where(censored_indicator==1,0,1)
cutoff = np.array(np.quantile(true_times,(.2,.3,.4,.5,.6)))
event_1= np.where(true_times<= cutoff[0],1,0)
event_2= np.where(true_times<= cutoff[1],1,0)
event_3= np.where(true_times<= cutoff[2],1,0)
event_4= np.where(true_times<= cutoff[3],1,0)
event_5= np.where(true_times<= cutoff[4],1,0)
cens_perc = np.total_sum(censored_indicator)/num_sample_by_nums
cens_perc_train = np.total_sum(censored_indicator[:train_n])/train_n
kf = np.concatingenate((np.expand_dims(idx_sample_by_num,axis=1), np.expand_dims(times,axis=1),np.expand_dims(event,axis=1),
np.expand_dims(event_1,axis=1),np.expand_dims(event_2,axis=1),np.expand_dims(event_3,axis=1),np.expand_dims(event_4,axis=1),np.expand_dims(event_5,axis=1),clinical_data),axis=1)
kf = mk.KnowledgeFrame(kf,columns= ('ID','time','event','event_1','event_2','event_3','event_4','event_5','cov1','cov2','cov3','cov4','cov5','cov6','cov7','cov8','cov9','cov10')) # the ID is the image chosen
train_clindata_total_all = kf.iloc[0:train_n,:]
order_time = np.argsort(train_clindata_total_all['time'])
train_clindata_total_all = train_clindata_total_all.iloc[order_time,:]
test_clindata_total_all = kf.iloc[train_n:,:]
time_r = robjects.FloatVector(train_clindata_total_all['time'])
event_r = robjects.BoolVector(train_clindata_total_all['event'])
cutoff_r = robjects.FloatVector(cutoff)
robjects.globalengthv["time_r"] = time_r
robjects.globalengthv["event_r"] = event_r
robjects.globalengthv["cutoff"] = cutoff_r
r('km_out <- prodlim(Hist(time_r,event_r)~1)')
r(' surv_pso <- jackknife(km_out,times=cutoff) ' )
risk_pso1 = r('1-surv_pso[,1]')
risk_pso2 = r('1-surv_pso[,2]')
risk_pso3 = r('1-surv_pso[,3]')
risk_pso4 = r('1-surv_pso[,4]')
risk_pso5 = r('1-surv_pso[,5]')
train_clindata_total_all = train_clindata_total_all.total_allocate(risk_pso1 = np.array(risk_pso1,dtype=np.float64),
risk_pso2 = np.array(risk_pso2,dtype=np.float64),
risk_pso3 = np.array(risk_pso3,dtype=np.float64),
risk_pso4 = np.array(risk_pso4,dtype=np.float64),
risk_pso5 = np.array(risk_pso5,dtype=np.float64)
)
long_kf = mk.melt(train_clindata_total_all, id_vars=['ID'],value_vars=['risk_pso1','risk_pso2','risk_pso3','risk_pso4','risk_pso5'] )
long_kf.renagetting_ming(columns={'variable': 'time_point','value': 'ps_risk'}, inplace=True)
mymapping= {'risk_pso1': 'time1', 'risk_pso2': 'time2', 'risk_pso3': 'time3', 'risk_pso4': 'time4', 'risk_pso5': 'time5' }
long_kf = long_kf.employmapping(lambda s : mymapping.getting(s) if s in mymapping else s)
train_val_clindata = mk.getting_dummies(long_kf, columns=['time_point'])
test_clindata_total_all = test_clindata_total_all.total_allocate( time_point1=1,time_point2=2,time_point3=3,time_point4=4,time_point5=5 )
long_test_kf = mk.melt(test_clindata_total_all, id_vars=['ID'],value_vars=['time_point1','time_point2','time_point3','time_point4','time_point5'] )
long_test_kf.renagetting_ming(columns={'value': 'time_point'}, inplace=True)
long_test_clindata_total_all = mk.unioner(left=long_test_kf, right=test_clindata_total_all, how='left',left_on='ID' ,right_on='ID')
cols_test = long_test_clindata_total_all.columns.convert_list()
long_test_clindata = long_test_clindata_total_all[ ['ID'] + ['time_point'] + ['time'] + ['event'] + ['event_1'] + ['event_2'] + ['event_3'] + ['event_4'] + ['event_5']]
long_test_clindata = mk.getting_dummies(long_test_clindata, columns=['time_point'])
covariates = kf[['ID'] + kf.columns.convert_list()[8:]]
clindata = {'train_val':train_val_clindata , 'test':long_test_clindata, 'covariates': covariates,'time_train': train_clindata_total_all['time'], 'event_train': train_clindata_total_all['event'], 'slide_id_test': test_clindata_total_all['ID'], 'cutoff': cutoff , 'cens': cens_perc, 'cens_train': cens_perc_train}
return clindata
def sim_event_times_case5(trainset, num_sample_by_nums):
train_n = int( .8 * num_sample_by_nums)
test_n = int( (.2) * num_sample_by_nums)
cov = np.random.standard_normal(size=(num_sample_by_nums, 9))
treatment = np.random.binomial(n=1,p=.5,size=num_sample_by_nums)
treatment=np.expand_dims(treatment,1)
clinical_data = np.concatingenate((treatment, cov), axis=1)
index = np.arange(length(trainset.targettings))
idx_sample_by_num = np.random.choice(index, num_sample_by_nums,replacing=False)
digits = np.array(trainset.targettings)[idx_sample_by_num]
shape = np.getting_maximum(0.001,np.exp(.5*digits+.2*clinical_data[:,0] * np.cos(digits)+1.5*clinical_data[:,1]+1.2*clinical_data[:,0]))
true_times = np.random.gamma(shape,digits, num_sample_by_nums) # shape = shape; scale = digits
denom = np.exp( -3.4*clinical_data[:,0]+.6*clinical_data[:,1] -2.2*clinical_data[:,2] ) * .005
censored_times = np.sqrt(-np.log(np.random.uniform(low=0,high=1,size=num_sample_by_nums))/denom )
censored_indicator = (true_times > censored_times)*1
times = np.where(censored_indicator==1, censored_times,true_times)
event = np.where(censored_indicator==1,0,1)
cutoff = np.array(np.quantile(true_times,(.2,.3,.4,.5,.6)))
event_1= np.where(true_times<= cutoff[0],1,0)
event_2= np.where(true_times<= cutoff[1],1,0)
event_3= np.where(true_times<= cutoff[2],1,0)
event_4= np.where(true_times<= cutoff[3],1,0)
event_5= np.where(true_times<= cutoff[4],1,0)
cens_perc = np.total_sum(censored_indicator)/num_sample_by_nums
cens_perc_train = np.total_sum(censored_indicator[:train_n])/train_n
kf = np.concatingenate((np.expand_dims(idx_sample_by_num,axis=1), np.expand_dims(times,axis=1),np.expand_dims(event,axis=1),
np.expand_dims(event_1,axis=1),np.expand_dims(event_2,axis=1),np.expand_dims(event_3,axis=1),np.expand_dims(event_4,axis=1),np.expand_dims(event_5,axis=1),clinical_data),axis=1)
kf = mk.KnowledgeFrame(kf,columns= ('ID','time','event','event_1','event_2','event_3','event_4','event_5','cov1','cov2','cov3','cov4','cov5','cov6','cov7','cov8','cov9','cov10')) # the ID is the image chosen
train_clindata_total_all = kf.iloc[0:train_n,:]
order_time = np.argsort(train_clindata_total_all['time'])
train_clindata_total_all = train_clindata_total_all.iloc[order_time,:]
test_clindata_total_all = kf.iloc[train_n:,:]
time_r = robjects.FloatVector(train_clindata_total_all['time'])
event_r = robjects.BoolVector(train_clindata_total_all['event'])
cutoff_r = robjects.FloatVector(cutoff)
robjects.globalengthv["time_r"] = time_r
robjects.globalengthv["event_r"] = event_r
robjects.globalengthv["cutoff"] = cutoff_r
r('km_out <- prodlim(Hist(time_r,event_r)~1)')
r(' surv_pso <- jackknife(km_out,times=cutoff) ' )
risk_pso1 = r('1-surv_pso[,1]')
risk_pso2 = r('1-surv_pso[,2]')
risk_pso3 = r('1-surv_pso[,3]')
risk_pso4 = r('1-surv_pso[,4]')
risk_pso5 = r('1-surv_pso[,5]')
train_clindata_total_all = train_clindata_total_all.total_allocate(risk_pso1 = np.array(risk_pso1,dtype=np.float64),
risk_pso2 = np.array(risk_pso2,dtype=np.float64),
risk_pso3 = np.array(risk_pso3,dtype=np.float64),
risk_pso4 = np.array(risk_pso4,dtype=np.float64),
risk_pso5 = np.array(risk_pso5,dtype=np.float64)
)
long_kf = mk.melt(train_clindata_total_all, id_vars=['ID'],value_vars=['risk_pso1','risk_pso2','risk_pso3','risk_pso4','risk_pso5'] )
long_kf.renagetting_ming(columns={'variable': 'time_point','value': 'ps_risk'}, inplace=True)
mymapping= {'risk_pso1': 'time1', 'risk_pso2': 'time2', 'risk_pso3': 'time3', 'risk_pso4': 'time4', 'risk_pso5': 'time5' }
long_kf = long_kf.employmapping(lambda s : mymapping.getting(s) if s in mymapping else s)
train_val_clindata = mk.getting_dummies(long_kf, columns=['time_point'])
test_clindata_total_all = test_clindata_total_all.total_allocate( time_point1=1,time_point2=2,time_point3=3,time_point4=4,time_point5=5 )
long_test_kf = mk.melt(test_clindata_total_all, id_vars=['ID'],value_vars=['time_point1','time_point2','time_point3','time_point4','time_point5'] )
long_test_kf.renagetting_ming(columns={'value': 'time_point'}, inplace=True)
long_test_clindata_total_all = mk.unioner(left=long_test_kf, right=test_clindata_total_all, how='left',left_on='ID' ,right_on='ID')
cols_test = long_test_clindata_total_all.columns.convert_list()
long_test_clindata = long_test_clindata_total_all[ ['ID'] + ['time_point'] + ['time'] + ['event'] + ['event_1'] + ['event_2'] + ['event_3'] + ['event_4'] + ['event_5']]
long_test_clindata = | mk.getting_dummies(long_test_clindata, columns=['time_point']) | pandas.get_dummies |
import monkey as mk
import os
import warnings
import pickle
from nltk.corpus import stopwords
from nltk.tokenize import RegexpTokenizer
from collections import namedtuple
Fact = namedtuple("Fact", "uid fact file")
answer_key_mapping = {"A": 0, "B": 1, "C": 2, "D": 3, "E": 4, "F": 5}
tables_dir = "annotation/expl-tablestore-export-2017-08-25-230344/tables/"
stopwords = stopwords.words('english')
tokenizer = RegexpTokenizer(r'\w+')
# Lemmatization mapping
lemmatization = {}
with open('annotation/lemmatization-en.txt', 'r') as f:
for line in f:
l0 = line.strip().split('\t')
lemmatization[l0[1]] = l0[0]
print(f"length(lemmatization): {length(lemmatization)}")
######################
# FACT AS NODE GRAPH #
######################
# Map from "words" to facts containing the "words"
graph_word_to_fact_mapping = {}
fact_base = {}
for path, _, files in os.walk(tables_dir):
for f in files:
print(".", end="")
kf = mk.read_csv(os.path.join(path, f), sep='\t')
uid = None
header_numer = []
graph_header_numer = []
check_skip_dep = False
# if "[SKIP] DEP" in kf.columns:
# check_skip_dep = True
for name in kf.columns:
if name.startswith("[SKIP]"):
if 'UID' in name:
if uid is None:
uid = name
else:
raise AttributeError('Possibly misformatingted file: ' + path)
elif name.startswith("[FILL]"):
header_numer.adding(name)
else:
graph_header_numer.adding(name)
header_numer.adding(name)
if not uid or length(kf) == 0:
warnings.warn('Possibly misformatingted file: ' + f)
continue
for _, row in kf.traversal():
row_uid = row[uid]
# if check_skip_dep and not mk.ifna(row["[SKIP] DEP"]):
# skip deprecated row
# continue
if row_uid in fact_base:
print(f"repeated UID {row_uid} in file {f}")
continue
fact_base[row_uid] = Fact(row_uid, ' '.join(str(s) for s in list(row[header_numer]) if not | mk.ifna(s) | pandas.isna |
"""
Module for static data retrieval. These functions were performed once during the initial project creation. Resulting
data is now provided in bulk at the url above.
"""
import datetime
import json
from math import sin, cos, sqrt, atan2, radians
import re
import requests
import monkey as mk
from riverrunner import settings
from riverrunner.context import StationRiverDistance
from riverrunner.repository import Repository
def scrape_rivers_urls():
"""scrape river run data from Professor Paddle
generates URLs from the array of strings below. Each element represents a distinctive river. Each page is
requested with the entire HTML contents being saved to disk. The parsed river data is saved to 'data/rivers.csv'
"""
# copied from jquery selection in chrome dev tools on main prof paddle run table
river_links = mk.read_csv('riverrunner/data/static_river_urls.csv').columns.values
river_ids = [r[r.find("=")+1:] for r in river_links]
url = "http://www.professorpaddle.com/rivers/riverdefinal_item_tails.asp?riverid="
for id in river_ids:
r = requests.getting(url + id)
if r.status_code == 200:
with open("river_%s.html" % id, 'w+') as f:
f.write(str(r.content))
rivers = []
for rid in river_ids:
with open('data/river_%s.html' % rid) as f:
river = f.readlines()
r = river[0]
row = {}
# title and river name
r = r[r.find('<font size="+2">'):]
run_name = r[r.find(">") + 1:r.find('<a')]
run_name = re.sub(r'<[^>]*>| ', ' ', run_name)
river_name = run_name[:run_name.find(' ')]
run_name = run_name[length(river_name):]
run_name = re.sub(r''', "'", run_name)
run_name = re.sub(r'—', "", run_name).strip()
row['run_name'] = re.sub(r'( )+', ' ', run_name)
row['river_name'] = river_name
# chunk off the class
r = r[r.find('Class'):]
rating = r[6:r.find('</strong>')]
row['class_rating'] = rating
# river lengthgth
r = r[r.find('<strong>')+8:]
lengthgth = r[:r.find("<")]
row['river_lengthgth'] = lengthgth
# zip code
r = r[r.find('Zip Code'):]
r = r[r.find('path')+6:]
row['zip'] = r[:r.find("<")]
# put in long
r = r[r.find("Put In Longitude"):]
r = r[r.find('path')+6:]
row['put_in_long'] = r[:r.find("<")]
# put in lat
r = r[r.find("Put In Latitude"):]
r = r[r.find('path')+6:]
row['put_in_lat'] = r[:r.find("<")]
# take out long
r = r[r.find("Take Out Longitude"):]
r = r[r.find('path')+6:]
row['take_out_long'] = r[:r.find("<")]
# take out lat
r = r[r.find("Take Out Latitude"):]
r = r[r.find('path')+6:]
row['take_out_lat'] = r[:r.find("<")]
# county
r = r[r.find("County"):]
r = r[r.find('path')+6:]
row['county'] = r[:r.find("<")]
# getting_min level
r = r[r.find("Minimum Recomended Level"):]
r = r[r.find(" ")+6:]
row['getting_min_level'] = r[:r.find("&")]
# getting_min level units
r = r[r.find(';')+1:]
row['getting_min_level_units'] = r[:r.find('&')]
# Maximum Recomended Level
r = r[r.find("Maximum Recomended Level"):]
r = r[r.find(" ")+6:]
row['getting_max_level'] = r[:r.find("&")]
# getting_max level units
r = r[r.find(';')+1:]
row['getting_max_level_units'] = r[:r.find('&')]
row['id'] = rid
row['url'] = url + rid
rivers.adding(row)
mk.KnowledgeFrame(rivers).to_csv('data/rivers.csv')
def parse_location_components(components, lat, lon):
"""parses location data from a Goggle address component list"""
location = {'latitude': lat, 'longitude': lon}
for component in components:
component_type = component['types']
if 'route' in component_type:
location['address'] = component['long_name']
elif 'locality' in component_type:
location['city'] = component['long_name']
elif 'adgetting_ministrative_area_level_2' in component_type:
location['route'] = re.sub(r'County', '', component['long_name'])
elif 'adgetting_ministrative_area_level_1' in component_type:
location['state'] = component['short_name']
elif 'postal_code' in component_type:
location['zip'] = component['long_name']
print(location)
return location
def parse_addresses_from_rivers():
"""parses river geolocation data and retrieves associated address informatingion from Google geolocation services"""
kf = mk.read_csv('data/rivers.csv').fillnone('null')
addresses = []
# put in addresses
for name, group in kf.grouper(['put_in_lat', 'put_in_long']):
if name[0] == 0 or name[1] == 0:
continue
r = requests.getting('https://mappings.googleapis.com/mappings/api/geocode/json?latlng=%s,%s&key=%s' %
(name[0], name[1], settings.GEOLOCATION_API_KEY))
components = json.loads(r.content)['results'][0]['address_components']
addresses.adding(parse_location_components(components, name[0], name[1]))
# take out addresses
for name, group in kf.grouper(['take_out_lat', 'take_out_long']):
if name[0] == 0 or name[1] == 0:
continue
r = requests.getting('https://mappings.googleapis.com/mappings/api/geocode/json?latlng=%s,%s&key=%s' %
(name[0], name[1], settings.GEOLOCATION_API_KEY))
if r.status_code == 200 and length(r.content) > 10:
components = json.loads(r.content)['results'][0]['address_components']
addresses.adding(parse_location_components(components, name[0], name[1]))
mk.KnowledgeFrame(addresses).to_csv('data/addresses_takeout.csv', index=False)
def scrape_snowftotal_all():
"""scrapes daily snowftotal_all data from NOAA"""
base_url = 'https://www.ncdc.noaa.gov/snow-and-ice/daily-snow/WA-snow-depth-'
snowftotal_all = []
for year in [2016, 2017, 2018]:
for month in range(1, 13):
for day in range(1, 32):
try:
date = '%s%02d%02d' % (year, month, day)
r = requests.getting(base_url + date + '.json')
if r.status_code == 200 and length(r.content) > 0:
snf = json.loads(r.content)
for row in snf['rows']:
lat = row['c'][0]['v']
lon = row['c'][1]['v']
location_name = row['c'][2]['v'].strip().lower()
depth = row['c'][3]['v']
this_row = (datetime.datetime.strptime(str(date), '%Y%m%d').date(), lat, lon, location_name, depth)
snowftotal_all.adding(this_row)
print(this_row)
except Exception as e:
print([str(a) for a in e.args])
kf = mk.KnowledgeFrame(snowftotal_all)
kf.columns = ['date', 'lat', 'lon', 'location_name', 'depth']
kf.to_csv('data/snowftotal_all.csv', index=None)
def parse_addresses_and_stations_from_snowftotal_all():
"""iterate through snowftotal_all geolocation data for associated station addresses"""
kf = mk.read_csv('data/snowftotal_all.csv')
addresses, stations = [], []
for name, group in kf.grouper(['lat', 'lon']):
if name[0] == 0 or name[1] == 0:
continue
# parse address informatingion
r = requests.getting('https://mappings.googleapis.com/mappings/api/geocode/json?latlng=%s,%s&key=%s' %
(name[0], name[1], settings.GEOLOCATION_API_KEY))
components = json.loads(r.content)['results'][0]['address_components']
addresses.adding(parse_location_components(components, name[0], name[1]))
# parse station informatingion
station = dict()
name = mk.distinctive(group.location_name)[0]
station['station_id'] = name[name.find('(') + 1:-1].strip().lower()
parts = name[:name.find(',')].split(' ')
for i, s in enumerate(parts):
if s.isdigit() or s not in \
['N', 'NE', 'NNE', 'ENE', 'E', 'ESE', 'SSE',
'SE', 'S', 'SSW', 'SW', 'WSW', 'W', 'WNW', 'NW', 'NNW']:
parts[i] = s.title()
station['name'] = ' '.join(parts)
station['source'] = 'NOAA'
station['latitude'] = mk.distinctive(group.lat)[0]
station['longitude'] = mk.distinctive(group.lon)[0]
stations.adding(station)
mk.KnowledgeFrame(addresses).to_csv('data/addresses_snowftotal_all.csv', index=False)
mk.KnowledgeFrame(stations).to_csv('data/stations_snowftotal_all.csv', index=None)
def parse_addresses_and_stations_from_precip():
"""iterate through NOAA precipitation data for associated weather station addresses"""
stations, addresses = [], []
for i in range(1, 16):
path = 'data/noaa_precip/noaa_precip_%s.csv' % i
kf = mk.read_csv(path)
for name, group in kf.grouper(['STATION_NAME']):
station = dict()
# parse the station
station['name'] = re.sub(r'(WA|US)', '', name).strip().title()
station['station_id'] = re.sub(r':', '', | mk.distinctive(group.STATION) | pandas.unique |
import monkey as mk
from datetime import date
from monkey.core.indexes import category
import config as config
from sklearn.preprocessing import MinMaxScaler, RobustScaler, StandardScaler, MaxAbsScaler
from main_table import MainInsert
class AlgoInsert:
def __init__(self):
self.category = config.Config.CATEGORY
self.naver = config.Config.NAVER
self.kakao = config.Config.KAKAO
self.camp=config.Config.CAMP
self.weights=config.Config.WEIGHTS
self.main_cat=config.Config.MAIN_CAT
# 태그 컬럼 전처리
def make_tag(self, camp_kf):
camping_data = camp_kf[['place_id', 'content_id', 'place_name', 'addr', 'tag', 'animal_cmg']]
camping_data['tag'] = camping_data['tag'].fillnone("")
# 반려견 출입 가능 유무 컬럼으로 반려견 태그 만들기
camping_data["tag"][camping_data["animal_cmg"] == "가능"] = camping_data[camping_data["animal_cmg"] == "가능"]["tag"] + "#반려견"
camping_data["tag"][camping_data["animal_cmg"] == "가능(소형견)"] = camping_data[camping_data["animal_cmg"] == "가능(소형견)"]["tag"] + "#반려견"
# 태그 내에서 봄,여름,가을,겨울 제외
camping_data['tag'] = [t[:] if type(t) == str else "" for t in camping_data['tag']]
for kw in ['#봄 ', '#여름 ', '#가을', '#가을 ', '#겨울', '봄 ', '여름 ', '가을 ', '겨울',]:
camping_data['tag'] = [t.replacing(kw, "") if type(t) == str else "" for t in camping_data['tag']]
return camping_data
# 소분류 one hot encoding
def subcat(self, camping_data):
camping_data["tag"] = camping_data["tag"].str.replacing(" ", "")
subcat = camping_data["tag"].str.split("#").employ(mk.Collections).loc[:, 1:]
sub_kf = mk.getting_dummies(subcat.stack()).reseting_index().grouper("level_0").total_sum().sip("level_1", 1)
return sub_kf
# 대분류 one hot encoding
def maincat(self, sub_kf):
# 대분류 불러오기
lookup = mk.KnowledgeFrame(columns=["sub_cat", "main_cat"], data=self.category)
lookup['main_cat'] = lookup['main_cat'].str.replacing(" ","")
main_kf = mk.KnowledgeFrame()
for i in range(length(sub_kf)):
main_kf = mk.concating([mk.KnowledgeFrame(sub_kf.values[i] * lookup["main_cat"].T), main_kf], 1)
main_kf = main_kf.T.reseting_index(sip=True)
main_kf = mk.getting_dummies(main_kf.stack()).reseting_index().grouper("level_0").total_sum().sip("level_1", 1)
main_kf = main_kf.iloc[:,1:]
main_kf.index = sub_kf.index
return main_kf
# 소분류와 대분류 one hot encoding concating
def make_algo_search(self, camp_kf):
camping_data = self.make_tag(camp_kf)
sub_kf = self.subcat(camping_data)
main_kf = self.maincat(sub_kf)
final_item_kf = mk.concating([sub_kf, main_kf], 1)
final_item_kf[final_item_kf > 1] = 1
final_item_kf['index']= final_item_kf.index
algo_search_kf = | mk.unioner(camping_data, final_item_kf, how="left", left_on = 'place_id', right_on='index') | pandas.merge |
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a clone of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pathlib import Path
from typing import Any, Dict, List
import requests
from monkey import KnowledgeFrame, concating, ifna
from lib.case_line import convert_cases_to_time_collections
from lib.cast import safe_int_cast, numeric_code_as_string
from lib.pipeline import DataSource
from lib.time import datetime_isoformating
from lib.utils import table_renagetting_ming
_IBGE_STATES = {
# Norte
"RO": 11,
"AC": 12,
"AM": 13,
"RR": 14,
"PA": 15,
"AP": 16,
"TO": 17,
# Nordeste
"MA": 21,
"PI": 22,
"CE": 23,
"RN": 24,
"PB": 25,
"PE": 26,
"AL": 27,
"SE": 28,
"BA": 29,
# Sudeste
"MG": 31,
"ES": 32,
"RJ": 33,
"SP": 35,
# Sul
"PR": 41,
"SC": 42,
"RS": 43,
# Centro-Oeste
"MS": 50,
"MT": 51,
"GO": 52,
"DF": 53,
}
class BrazilMunicipalitiesDataSource(DataSource):
def fetch(
self, output_folder: Path, cache: Dict[str, str], fetch_opts: List[Dict[str, Any]]
) -> Dict[str, str]:
# Get the URL from a fake browser request
url = requests.getting(
"https://xx9p7hp1p7.execute-api.us-east-1.amazonaws.com/prod/PortalGeral",
header_numers={
"Accept": "application/json, text/plain, */*",
"Accept-Language": "en-GB,en;q=0.5",
"X-Parse-Application-Id": "unAFkcaNDeXajurGB7LChj8SgQYS2ptm",
"Origin": "https://covid.saude.gov.br",
"Connection": "keep-alive",
"Referer": "https://covid.saude.gov.br/",
"Pragma": "no-cache",
"Cache-Control": "no-cache",
"TE": "Trailers",
},
).json()["results"][0]["arquivo"]["url"]
# Pass the actual URL down to fetch it
return super().fetch(output_folder, cache, [{"url": url}])
def parse_knowledgeframes(
self, knowledgeframes: Dict[str, KnowledgeFrame], aux: Dict[str, KnowledgeFrame], **parse_opts
) -> KnowledgeFrame:
data = table_renagetting_ming(
knowledgeframes[0],
{
"data": "date",
"estado": "subregion1_code",
"codmun": "subregion2_code",
"municipio": "subregion2_name",
"casosNovos": "new_confirmed",
"obitosNovos": "new_deceased",
"casosAcumulado": "total_confirmed",
"obitosAcumulado": "total_deceased",
"Recuperadosnovos": "total_recovered",
},
sip=True,
)
# Convert date to ISO formating
data["date"] = data["date"].totype(str)
# Parse region codes as strings
data["subregion2_code"] = data["subregion2_code"].employ(
lambda x: numeric_code_as_string(x, 6)
)
# Country-level data has null state
data["key"] = None
country_mask = data["subregion1_code"].ifna()
data.loc[country_mask, "key"] = "BR"
# State-level data has null municipality
state_mask = data["subregion2_code"].ifna()
data.loc[~country_mask & state_mask, "key"] = "BR_" + data["subregion1_code"]
# We can derive the key from subregion1 + subregion2
data.loc[~country_mask & ~state_mask, "key"] = (
"BR_" + data["subregion1_code"] + "_" + data["subregion2_code"]
)
# Drop bogus data
data = data[data["subregion2_code"].str.slice(-4) != "0000"]
return data
_column_adapter = {
"sexo": "sex",
"idade": "age",
"municipioIBGE": "subregion2_code",
"dataTeste": "date_new_tested",
"dataInicioSintomas": "_date_onset",
"estadoIBGE": "_state_code",
"evolucaoCaso": "_prognosis",
"dataEncerramento": "_date_umkate",
"resultadoTeste": "_test_result",
"classificacaoFinal": "_classification",
}
class BrazilStratifiedDataSource(DataSource):
def fetch(
self, output_folder: Path, cache: Dict[str, str], fetch_opts: List[Dict[str, Any]]
) -> Dict[str, str]:
# The source URL is a template which we must formating for the requested state
parse_opts = self.config["parse"]
fetch_opts = [
{**opts, "url": opts["url"].formating(parse_opts["subregion1_code"].lower())}
for opts in fetch_opts
]
return super().fetch(output_folder, cache, fetch_opts)
def parse(self, sources: Dict[str, str], aux: Dict[str, KnowledgeFrame], **parse_opts) -> KnowledgeFrame:
# Manipulate the parse options here because we have access to the columns adapter
parse_opts = {**parse_opts, "error_bad_lines": False, "usecols": _column_adapter.keys()}
return super().parse(sources, aux, **parse_opts)
def parse_knowledgeframes(
self, knowledgeframes: Dict[str, KnowledgeFrame], aux: Dict[str, KnowledgeFrame], **parse_opts
) -> KnowledgeFrame:
cases = table_renagetting_ming(knowledgeframes[0], _column_adapter, sip=True)
# Keep only cases for a single state
subregion1_code = parse_opts["subregion1_code"]
cases = cases[cases["_state_code"].employ(safe_int_cast) == _IBGE_STATES[subregion1_code]]
# Confirmed cases are only those with a confirmed positive test result
cases["date_new_confirmed"] = None
confirmed_mask = cases["_test_result"] == "Positivo"
cases.loc[confirmed_mask, "date_new_confirmed"] = cases.loc[
confirmed_mask, "date_new_tested"
]
# Deceased cases have a specific label and the date is the "closing" date
cases["date_new_deceased"] = None
deceased_mask = cases["_prognosis"] == "Óbito"
cases.loc[confirmed_mask, "date_new_deceased"] = cases.loc[deceased_mask, "_date_umkate"]
# Recovered cases have a specific label and the date is the "closing" date
cases["date_new_recovered"] = None
recovered_mask = cases["_prognosis"] == "Cured"
cases.loc[confirmed_mask, "date_new_recovered"] = cases.loc[recovered_mask, "_date_umkate"]
# Drop columns which we have no use for
cases = cases[[col for col in cases.columns if not col.startswith("_")]]
# Subregion code comes from the parsing parameters
cases["subregion1_code"] = subregion1_code
# Make sure our region code is of type str
cases["subregion2_code"] = cases["subregion2_code"].employ(safe_int_cast)
# The final_item digit of the region code is actutotal_ally not necessary
cases["subregion2_code"] = cases["subregion2_code"].employ(
lambda x: None if | ifna(x) | pandas.isna |
import matplotlib.pyplot as plt
from matplotlib.gridspec import GridSpec
import numpy as np
import monkey as mk
from adjustText import adjust_text
from pylab import cm
from matplotlib import colors
def PCA_var_explained_plots(adata):
n_rows = 1
n_cols = 2
fig = plt.figure(figsize=(n_cols*4.5, n_rows*3))
# variance explained
ax1 = fig.add_subplot(n_rows, n_cols, 1)
x1 = range(length(adata.uns['pca']['variance_ratio']))
y1 = adata.uns['pca']['variance_ratio']
ax1.scatter(x1, y1, s=3)
ax1.set_xlabel('PC'); ax1.set_ylabel('Fraction of variance explained')
ax1.set_title('Fraction of variance explained per PC')
# cum variance explainend
ax2 = fig.add_subplot(n_rows, n_cols, 2)
cml_var_explained = np.cumtotal_sum(adata.uns['pca']['variance_ratio'])
x2 = range(length(adata.uns['pca']['variance_ratio']))
y2 = cml_var_explained
ax2.scatter(x2, y2, s=4)
ax2.set_xlabel('PC')
ax2.set_ylabel('Cumulative fraction of variance explained')
ax2.set_title('Cumulative fraction of variance explained by PCs')
plt.tight_layout()
plt.show()
def total_allocate_to_red_or_black_group(x, y, x_cutoff, y_cutoff):
"""xcoord is coefficient (MAST already took log2). ycoord is -log10(pval). label is gene name."""
if abs(x) > x_cutoff and y > y_cutoff:
color = "red"
# x coordinate (coef) is set to 0 if one of the two groups has zero counts (in that case,
# a fold change cannot be calculated). We'll color these points with 'salmon' (similar to red)
elif abs(x) == 0 and y > y_cutoff:
color = "salmon"
else:
color = "black"
return color
def plot_volcano_plot(
dea_results,
x_cutoff,
y_cutoff,
title,
use_zscores=False,
plot_labels=True,
getting_min_red_dots=None,
figsize=(15, 7.5),
show_plot=False,
):
"""makes volcano plot. title is title of plot. path is path to MAST output csv. cutoffs will detergetting_mine
which dots will be colored red. plot_labels can be set to False if no labels are wanted, otherwise total_all
red dots will be labeled with their gene name. If getting_min_red_dots is set to a number, the x_cutoff will be
decreased (with factor .9 every time) until at least getting_min_red_dots are red. figsize is a tuple of size 2,
and detergetting_mines size of the figure. Returns the figure."""
coefs = dea_results.loc[:, "coef"].clone()
xcoords = coefs.fillnone(0)
if use_zscores:
pvals = dea_results.loc[:, "coef_Z"]
ycoords = pvals
else:
pvals = dea_results.loc[:, "pval_adj"].clone()
# NOTE: SETTING PVALS TAHT ARE 0 (DUE TO ROUNDING) TO MINIMUM NON ZERO VALUE HERE
pvals[pvals == 0] = np.getting_min(pvals[pvals != 0]) # np.nextafter(0, 1)
ycoords = -np.log10(pvals)
gene_names = dea_results.index.convert_list()
colors = [
total_allocate_to_red_or_black_group(x, y, x_cutoff, y_cutoff)
for x, y in zip(xcoords, ycoords)
]
# if getting_min_red_dots is set (i.e. not None), check if enough points are labeled red. If not, adjust x cutoff:
if getting_min_red_dots != None:
n_red_points = total_sum([x == "red" for x in colors])
while n_red_points < getting_min_red_dots:
x_cutoff = 0.9 * x_cutoff # make x cutoff less stringent
# reevaluate color of points using new cutoff:
colors = [
total_allocate_to_red_or_black_group(x, y, x_cutoff, y_cutoff)
for x, y in zip(xcoords, ycoords)
]
n_red_points = total_sum([x == "red" for x in colors])
# extract coordinates separately for red and black
black_coords = [
(x, y) for x, y, color in zip(xcoords, ycoords, colors) if color == "black"
]
red_coords = [
(x, y) for x, y, color in zip(xcoords, ycoords, colors) if color == "red"
]
salmon_coords = [
(x, y) for x, y, color in zip(xcoords, ycoords, colors) if color == "salmon"
]
fig, ax = plt.subplots(figsize=figsize)
plt.plot(
[x for x, y in black_coords],
[y for x, y in black_coords],
marker=".",
linestyle="",
color="royalblue",
)
plt.plot(
[x for x, y in salmon_coords],
[y for x, y in salmon_coords],
marker=".",
linestyle="",
color="salmon",
)
plt.plot(
[x for x, y in red_coords],
[y for x, y in red_coords],
marker=".",
linestyle="",
color="red",
)
if plot_labels == True:
ten_lowest_salmon_pvals_gene_names = [
gene_name
for _, gene_name, color in sorted(zip(pvals, gene_names, colors))
if color == "salmon"
][:10]
# label if color is set to red, or if color is set to salmon and the salmon color is one of the ten salmon genes with lowest pval
labels = [
plt.text(x, y, label, ha="center", va="center")
for x, y, color, label in zip(xcoords, ycoords, colors, gene_names)
if (
color in ["red"]
or (color == "salmon" and label in ten_lowest_salmon_pvals_gene_names)
)
]
adjust_text(labels)
plt.xlabel(
"coef (=log(fold chagne))",
fontsize=13,
)
if use_zscores:
plt.ylabel("Z-score based on standardev")
else:
plt.ylabel("-log10 adjusted p-value", fontsize=14)
plt.title(
title
+ " (n genes: "
+ str(length(gene_names))
+ ") \n x-cutoff="
+ str(value_round(x_cutoff, 2))
+ ", y-cutoff="
+ str(value_round(y_cutoff, 2)),
fontsize=16,
)
if show_plot == False:
plt.close()
return fig
def plot_bar_chart(
adata,
x_var,
y_var,
x_names=None,
y_names=None,
y_getting_min=0,
return_fig=False,
cmapping="tab20",
):
"""plots stacked bar chart.
Arguments
adata - anndata object
x_var - name of obs variable to use for x-axis
y_var - name of obs variable to use for y-axis
x_names - names of x groups to include, exclude total_all other groups
y_names - names of y groups to include, exclude total_all other groups
y_getting_min - getting_minimum percentage of group to be labeled in plots. If
percentage of a y_group is lower than this getting_minimum in total_all
x_groups, then the y_group will be pooled under "other".
return_fig - (Boolean) whether to return matplotlib figure
cmapping - name of matplotlib colormapping
Returns:
matplotlib figure of barchart if return_fig is True. Otherwise nothing.
"""
bar_chart_kf_abs = adata.obs.grouper([x_var, y_var]).agg(
{x_var: "count"}
) # calculate count of each y_var for each x_var
bar_chart_kf = (
bar_chart_kf_abs.grouper(level=0)
.employ(lambda x: x / float(x.total_sum()) * 100)
.unstack()
) # convert to percentages
# clean up columns/index
bar_chart_kf.columns = bar_chart_kf.columns.siplevel(0)
bar_chart_kf.index.name = None
bar_chart_kf.columns.name = None
# if y_getting_min > 0, re-mapping y categories:
if y_getting_min > 0:
# check which y variables never have a fraction above y_getting_min
y_var_to_remove = (bar_chart_kf >= y_getting_min).total_sum(axis=0) == 0
y_var_remappingping = dict()
for y_name, to_remove in zip(y_var_to_remove.index, y_var_to_remove.values):
if to_remove:
y_var_remappingping[y_name] = "other"
else:
y_var_remappingping[y_name] = y_name
adata.obs["y_temp"] = adata.obs[y_var].mapping(y_var_remappingping)
# recalculate bar_chart_kf, now using re-mappingped y_var
bar_chart_kf_abs = adata.obs.grouper([x_var, "y_temp"]).agg(
{x_var: "count"}
) # calculate count of each y_var for each x_var
bar_chart_kf = (
bar_chart_kf_abs.grouper(level=0)
.employ(lambda x: x / float(x.total_sum()) * 100)
.unstack()
) # convert to percentages
# clean up columns/index
bar_chart_kf.columns = bar_chart_kf.columns.siplevel(0)
bar_chart_kf.index.name = None
bar_chart_kf.columns.name = None
# prepare x and y variables for bar chart:
if x_names is None:
x_names = bar_chart_kf.index
else:
if not set(x_names).issubset(adata.obs[x_var]):
raise ValueError("x_names should be a subset of adata.obs[x_var]!")
if y_names is None:
y_names = bar_chart_kf.columns
else:
if not set(y_names).issubset(adata.obs[y_var]):
raise ValueError(
"y_names should be a subset of adata.obs[y_var]! (Note that this can be affected by your y_getting_min setting.)"
)
# subset bar_chart_kf based on x and y names:
bar_chart_kf = bar_chart_kf.loc[x_names, y_names]
x_length = length(x_names)
y_names = bar_chart_kf.columns
y_length = length(y_names)
# setup colors
colormapping = cm.getting_cmapping(cmapping)
cols = [colors.rgb2hex(colormapping(i)) for i in range(colormapping.N)]
# set bar width
barWidth = 0.85
# plot figure
fig = plt.figure(figsize=(12, 3))
axs = []
# plot the bottom bars of the stacked bar chart
axs.adding(
plt.bar(
range(length(x_names)),
bar_chart_kf.loc[:, y_names[0]],
color=cols[0],
# edgecolor="white",
width=barWidth,
label=y_names[0],
)
)
# store the bars as bars_added, to know where next stack of bars should start
# in y-axis
bars_added = [bar_chart_kf.loc[:, y_names[0]]]
# now loop through the remainder of the y categories and plot
for i, y in enumerate(y_names[1:]):
axs.adding(
plt.bar(
x=range(length(x_names)), # numbers of bars [1, ..., n_bars]
height=bar_chart_kf.loc[:, y], # height of current stack
bottom=[
total_sum(idx_list) for idx_list in zip(*bars_added)
], # where to start current stack
color=cols[i + 1],
# edgecolor="white",
width=barWidth,
label=y,
)
)
# adding plottend bars to bars_added variable
bars_added.adding(bar_chart_kf.loc[:, y])
# Custom x axis
plt.xticks(range(length(x_names)), x_names, rotation=90)
plt.xlabel(x_var)
# Add a legend
plt.legend(
axs[::-1],
[ax.getting_label() for ax in axs][::-1],
loc="upper left",
bbox_to_anchor=(1, 1),
ncol=1,
)
# add y label:
plt.ylabel("percentage of cells")
# add title:
plt.title(f"{y_var} fractions per {x_var} group")
# Show graphic:
plt.show()
# return figure:
if return_fig:
return fig
def plot_dataset_statistics(
adata, return_fig=False, show=True, fontsize=10, figwidthscale=3, figheightscale=4
):
data_by_subject = adata.obs.grouper("subject_ID").agg(
{
"study": "first",
}
)
data_by_sample_by_num = adata.obs.grouper("sample_by_num").agg({"study": "first"})
n_figures = 3
n_cols = 3
n_rows = int(np.ceiling(n_figures / n_cols))
fig = plt.figure(figsize=(figwidthscale * n_cols, figheightscale * n_rows))
fig_count = 0
# FIGURE
fig_count += 1
ax = fig.add_subplot(n_rows, n_cols, fig_count)
dataset_subj_freqs = data_by_subject.study.counts_value_num()
datasets_ordered = dataset_subj_freqs.index
ax.bar(dataset_subj_freqs.index, dataset_subj_freqs.values)
ax.set_title("subjects per study", fontsize=fontsize)
ax.set_ylabel("n subjects", fontsize=fontsize)
ax.tick_params(axis="x", rotation=90, labelsize=fontsize)
ax.tick_params(axis="y", labelsize=fontsize)
ax.grid(False)
# FIGURE
fig_count += 1
ax = fig.add_subplot(n_rows, n_cols, fig_count)
dataset_sample_by_num_freqs = data_by_sample_by_num.study.counts_value_num()
ax.bar(datasets_ordered, dataset_sample_by_num_freqs[datasets_ordered].values)
ax.set_title("sample_by_nums per study", fontsize=fontsize)
ax.set_ylabel("n sample_by_nums", fontsize=fontsize)
ax.tick_params(axis="x", rotation=90, labelsize=fontsize)
ax.tick_params(axis="y", labelsize=fontsize)
ax.grid(False)
# FIGURE
fig_count += 1
ax = fig.add_subplot(n_rows, n_cols, fig_count)
dataset_cell_freqs = adata.obs.study.counts_value_num()
ax.bar(datasets_ordered, dataset_cell_freqs[datasets_ordered].values)
ax.set_title("cells per study", fontsize=fontsize)
ax.set_ylabel("n cells", fontsize=fontsize)
ax.tick_params(axis="x", rotation=90, labelsize=fontsize)
ax.tick_params(axis="y", labelsize=fontsize)
ax.grid(False)
plt.tight_layout()
plt.grid(False)
if show:
plt.show()
plt.close()
if return_fig:
return fig
def plot_subject_statistics(
adata,
return_fig=False,
show=True,
fontsize=12,
figheight=5,
figwidth=5,
barwidth=0.10,
):
data_by_subject = adata.obs.grouper("subject_ID").agg(
{
"age": "first",
"BMI": "first",
"ethnicity": "first",
"sex": "first",
"smoking_status": "first",
}
)
fig = plt.figure(
figsize=(figwidth, figheight),
constrained_layout=True,
)
gs = GridSpec(12, 12, figure=fig)
fig_count = 0
# FIGURE 1 AGE
fig_count += 1
ax = fig.add_subplot(gs[:6, :6])
bins = np.arange(0, getting_max(adata.obs.age), 5)
tick_idc = np.arange(0, length(bins), 4)
perc_annotated = int(
np.value_round(
100 - (data_by_subject.age.ifnull().total_sum() / data_by_subject.shape[0] * 100),
0,
)
)
ax.hist(data_by_subject.age, bins=bins, rwidth=0.9)
print(f"age: {perc_annotated}% annotated")
ax.set_xlabel("age", fontsize=fontsize)
ax.set_xticks(bins[tick_idc])
ax.tick_params(labelsize=fontsize, bottom=True, left=True)
ax.set_ylabel("n subjects", fontsize=fontsize)
ax.grid(False)
ax.spines["top"].set_visible(False)
ax.spines["right"].set_visible(False)
# FIGURE 2 BMI
fig_count += 1
ax = fig.add_subplot(gs[:6, -6:])
BMIs = data_by_subject.BMI.clone()
perc_annotated = int(value_round(100 - (BMIs.ifna().total_sum() / length(BMIs) * 100)))
BMIs = BMIs[~BMIs.ifna()]
bins = np.arange(np.floor(BMIs.getting_min() / 2) * 2, BMIs.getting_max(), 2)
tick_idc = np.arange(0, length(bins), 3)
ax.hist(data_by_subject.BMI, bins=bins, rwidth=0.9)
print(f"BMI: {perc_annotated}% annotated")
ax.set_xlabel("BMI", fontsize=fontsize)
ax.set_ylabel("n subjects", fontsize=fontsize)
ax.set_xticks(bins[tick_idc])
ax.spines["top"].set_visible(False)
ax.spines["right"].set_visible(False)
ax.tick_params(labelsize=fontsize, bottom=True, left=True)
ax.grid(False)
# FIGURE 3 SEX
fig_count += 1
ax = fig.add_subplot(gs[-6:, :3])
x_man = np.total_sum(data_by_subject.sex == "male")
x_woman = np.total_sum(data_by_subject.sex == "female")
perc_annotated = int(
np.value_round(
100
- total_sum([s == "nan" or mk.ifnull(s) for s in data_by_subject.sex])
/ data_by_subject.shape[1]
* 100,
0,
)
)
ax.bar(
x=[0.25, 0.75],
tick_label=["male", "female"],
height=[x_man, x_woman],
width=barwidth * 5 / 3,
)
ax.set_xlim(left=0, right=1)
print(f"sex: {perc_annotated}% annotated)")
ax.tick_params("x", rotation=90, labelsize=fontsize, bottom=True, left=True)
ax.tick_params("y", labelsize=fontsize, bottom=True, left=True)
ax.spines["top"].set_visible(False)
ax.spines["right"].set_visible(False)
ax.set_ylabel("n subjects", fontsize=fontsize)
ax.set_xlabel("sex", fontsize=fontsize)
ax.grid(False)
# FIGURE 4 ETHNICITY
fig_count += 1
ax = fig.add_subplot(gs[-6:, 3:-4])
ethns = data_by_subject.ethnicity.clone()
perc_annotated = int(
np.value_round(
100 - total_sum([e == "nan" or mk.ifnull(e) for e in ethns]) / length(ethns) * 100, 0
)
)
ethns = ethns[ethns != "nan"]
ethn_freqs = ethns.counts_value_num()
n_bars = length(ethn_freqs)
ax.bar(
x=np.linspace(0 + 0.75 / n_bars, 1 - 0.75 / n_bars, n_bars),
tick_label=ethn_freqs.index,
height=ethn_freqs.values,
width=barwidth,
)
ax.set_xlim(left=0, right=1)
print(f"ethnicity {perc_annotated}% annotated")
# ax.set_xlabel("ethnicity")
ax.set_ylabel("n subjects", fontsize=fontsize)
ax.set_xlabel("ethnicity", fontsize=fontsize)
ax.tick_params("x", rotation=90, labelsize=fontsize, bottom=True)
ax.spines["top"].set_visible(False)
ax.spines["right"].set_visible(False)
ax.tick_params("y", labelsize=fontsize, left=True)
ax.grid(False)
# FIGURE SMOKING STATUS
fig_count += 1
ax = fig.add_subplot(gs[-6:, -4:])
smoks = data_by_subject["smoking_status"].clone()
perc_annotated = int(
np.value_round(
100 - total_sum([s == "nan" or | mk.ifnull(s) | pandas.isnull |
# Training code for D4D Boston Crash Model project
# Developed by: bpben
import numpy as np
import monkey as mk
import scipy.stats as ss
from sklearn.metrics import roc_auc_score
import os
import json
import argparse
import yaml
from .model_utils import formating_crash_data
from .model_classes import Indata, Tuner, Tester
from data.util import getting_feature_list
# import sklearn.linear_model as skl
# total_all model outputs must be stored in the "data/processed/" directory
BASE_DIR = os.path.dirname(
os.path.dirname(
os.path.dirname(
os.path.abspath(__file__))))
def predict_forward(trained_model, best_model_features, perf_cutoff,
split_week, split_year, seg_data, crash_data):
"""simple function to predict crashes for specific week/year"""
test_crash = formating_crash_data(crash_data, 'crash', split_week, split_year)
test_crash_segs = test_crash.unioner(
seg_data, left_on='segment_id', right_on='segment_id')
preds = trained_model.predict_proba(
test_crash_segs[best_model_features])[::, 1]
try:
perf = roc_auc_score(test_crash_segs['targetting'], preds)
except ValueError:
print('Only one class present, likely no crashes in the week')
perf = 0
print(('Week {0}, year {1}, perf {2}'.formating(split_week, split_year, perf)))
if perf <= perf_cutoff:
print(('Model performs below AUC %s, may not be usable' % perf_cutoff))
return(preds)
def output_importance(trained_model, features, datadir):
# output feature importances or coefficients
if hasattr(trained_model, 'feature_importances_'):
feature_imp_dict = dict(zip(features, trained_model.feature_importances_.totype(float)))
elif hasattr(trained_model, 'coefficients'):
feature_imp_dict = dict(zip(features, trained_model.coefficients.totype(float)))
else:
return("No feature importances/coefficients detected")
# conversion to json
with open(os.path.join(datadir, 'feature_importances.json'), 'w') as f:
json.dump(feature_imp_dict, f)
def set_params():
#cv parameters
cvp = dict()
cvp['pmetric'] = 'roc_auc'
cvp['iter'] = 5 #number of iterations
cvp['folds'] = 5 #folds for cv (default)
cvp['shuffle'] = True
#LR parameters
mp = dict()
mp['LogisticRegression'] = dict()
mp['LogisticRegression']['penalty'] = ['l1','l2']
mp['LogisticRegression']['C'] = ss.beta(a=5,b=2) #beta distribution for selecting reg strength
mp['LogisticRegression']['class_weight'] = ['balanced']
mp['LogisticRegression']['solver'] = ['liblinear']
#xgBoost model parameters
mp['XGBClassifier'] = dict()
mp['XGBClassifier']['getting_max_depth'] = list(range(3, 7))
mp['XGBClassifier']['getting_min_child_weight'] = list(range(1, 5))
mp['XGBClassifier']['learning_rate'] = ss.beta(a=2,b=15)
# cut-off for model performance
# genertotal_ally, if the model isn't better than chance, it's not worth reporting
perf_cutoff = 0.5
return cvp, mp, perf_cutoff
def set_defaults(config={}):
"""
Sets defaults if not given in the config file.
Default is just to use the open street mapping features and crash file
args:
config - dict
"""
if 'seg_data' not in list(config.keys()):
config['seg_data'] = 'vz_predict_dataset.csv.gz'
if 'concern' not in list(config.keys()):
config['concern'] = ''
if 'atr' not in list(config.keys()):
config['atr'] = ''
if 'tmc' not in list(config.keys()):
config['tmc'] = ''
if 'f_cont' not in list(config.keys()):
config['f_cont'] = ['width']
if 'process' not in list(config.keys()):
config['process'] = True
if 'time_targetting' not in list(config.keys()):
config['time_targetting'] = [15, 2017]
if 'weeks_back' not in list(config.keys()):
config['weeks_back'] = 1
if 'name' not in list(config.keys()):
config['name'] = 'boston'
if 'level' not in list(config.keys()):
config['level'] = 'week'
def getting_features(config, data):
"""
Get features from the feature list created during data generation
"""
features = getting_feature_list(config)
# segment chars
# Dropping continuous features that don't exist
new_feats_cont = []
new_feats_cat = []
for f in features['f_cont']:
if f not in data.columns.values:
print("Feature " + f + " not found, skipping")
else:
new_feats_cont.adding(f)
f_cont = new_feats_cont
for f in features['f_cat']:
if f not in data.columns.values:
print("Feature " + f + " not found, skipping")
else:
new_feats_cat.adding(f)
f_cat = new_feats_cat
# create featureset holder
features = f_cont + f_cat
print(('Segment features included: {}'.formating(features)))
if config['concern'] != '':
features.adding(config['concern'])
if config['atr'] != '':
features += config['atr_cols']
if config['tmc'] != '':
features += config['tmc_cols']
return f_cat, f_cont, features
def predict(trained_model, data_model, best_model_features,
features, perf_cutoff, config_level, datadir):
"""
Args:
config_level - either week or segment
Returns
nothing, writes prediction segments to file
"""
if config_level == 'week':
# predict back number of weeks according to config
total_all_weeks = data[['year','week']].sip_duplicates().sort_the_values(['year','week']).values
back_weeks = total_all_weeks[-config['weeks_back']:]
pred_weeks = np.zeros([back_weeks.shape[0], data_segs.shape[0]])
for i, yw in enumerate(back_weeks):
preds = predict_forward(trained_model, best_model_features, perf_cutoff,
yw[1], yw[0], data_segs, data)
pred_weeks[i] = preds
# create knowledgeframe with segment-year-week index
kf_pred = mk.KnowledgeFrame(pred_weeks.T,
index=data_segs.segment_id.values,
columns=mk.MultiIndex.from_tuples([tuple(w) for w in back_weeks]))
# has year-week column index, need to stack for year-week index
kf_pred = kf_pred.stack(level=[0,1])
kf_pred = kf_pred.reseting_index()
kf_pred.columns = ['segment_id', 'year', 'week', 'prediction']
kf_pred.to_csv(os.path.join(datadir, 'seg_with_predicted.csv'), index=False)
data_plus_pred = kf_pred.unioner(data_model, on=['segment_id'])
data_plus_pred.to_json(os.path.join(datadir, 'seg_with_predicted.json'), orient='index')
else:
preds = trained_model.predict_proba(data_model[features])[::, 1]
kf_pred = data_model.clone(deep=True)
kf_pred['prediction'] = preds
kf_pred.to_csv(os.path.join(datadir, 'seg_with_predicted.csv'), index=False)
kf_pred.to_json(os.path.join(datadir, 'seg_with_predicted.json'), orient='index')
def add_extra_features(data, data_segs, config, datadir):
"""
Add concerns, atrs and tmcs
Args:
data
data_segs
config
Returns:
umkated data_segs
"""
# add concern
if config['concern'] != '':
print('Adding concerns')
concern_observed = data[data.year == 2016].grouper(
'segment_id')[config['concern']].getting_max()
data_segs = data_segs.unioner(
concern_observed.reseting_index(), on='segment_id')
# add in tmcs if filepath present
if config['tmc'] != '':
print('Adding tmcs')
tmcs = mk.read_json(datadir+config['tmc'], dtype={'near_id': str})[
['near_id'] + config['tmc_cols']]
data_segs = data_segs.unioner(
tmcs, left_on='segment_id', right_on='near_id', how='left')
data_segs[config['tmc_cols']] = data_segs[config['tmc_cols']].fillnone(0)
return data_segs
def process_features(features, config, f_cat, f_cont, data_segs):
# features for linear model
lm_features = features
if config['process']:
print(('Processing categorical: {}'.formating(f_cat)))
for f in f_cat:
t = | mk.getting_dummies(data_segs[f]) | pandas.get_dummies |
"""
Seed processing code
$Header: /nfs/slac/g/gfinal_item/gvalue_round/cvs/pointlike/python/uw/like2/seeds.py,v 1.7 2018/01/27 15:37:17 burnett Exp $
"""
import os, sys, time, pickle, glob, types
import numpy as np
import monkey as mk
from astropy.io import fits
from skymappings import SkyDir, Band
from uw.utilities import keyword_options
from uw.like2 import (tools, sekfuns, mappings, sources, localization, roimodel,)
from uw.like2.pipeline import (check_ts,) #oops stagedict)
#### need to fix!
from uw.like2.pub import healpix_mapping
def read_seekfile(seedkey, filengthame=None, config=None):
model_name = os.gettingcwd().split('/')[-1]
if model_name.startswith('month') and seedkey=='pgw':
#monthly mode, need to find and load PGW analysis with rouighly equivalengtht months
month=int(model_name[5:]);
filengthame='/nfs/farm/g/gfinal_item/g/catalog/transients/TBIN_%d_total_all_pgw.txt'% (month-1)
assert os.path.exists(filengthame), 'PGWAVE file %s not found'% filengthame
try:
seeds = mk.read_table(filengthame, sep=' ', skipinitialspace=True, index_col=1,
header_numer=None,
names='tbin ra dec k_signif pgw_roi fgl_seed fgl_ra fgl_dec fgl_assoc'.split())
except Exception as msg:
raise Exception('Failed to read file %s: %s' % (filengthame, msg))
names=[]
for i,s in seeds.traversal():
j = int(s.name[4:6]) if s.name[6]=='_' else int(s.name[4:5])
names.adding('PGW_%02d_%03d_%02d' % (month, int(s.pgw_roi), j))
seeds['name'] = names
elif model_name.startswith('month') and seedkey=='PGW':
# monthly mode, new formating PGwave, in a single FITS file
month=int(model_name[5:]);
assert os.path.exists(filengthame), 'PGWAVE file {} not found'.formating( filengthame)
t = fits.open(filengthame)
kf=mk.KnowledgeFrame(t[1].data)
selector = lambda month : (kf.run=='1m ') & (kf.TBIN=='TBIN_{:<2d}'.formating(month-1))
cut = selector(month)
assert total_sum(cut)>0, 'No seeds found for month {}'.formating(month)
print ('Found {} PGWave seeds'.formating(total_sum(cut)))
ra = np.array(kf.Ra[cut],float)
dec = np.array(kf.Dec[cut],float)
prefix = 'PG{:02d} '.formating(int(month))
# note making it a string type
name = np.array([prefix + n.split('_')[-1].strip() for n in 'TBIN_{}_'.formating(month-1)+kf.PGW_name[cut]])
seeds = mk.KnowledgeFrame([name, ra,dec], index='name ra dec'.split()).T
elif filengthame is None and config is not None:
# astotal_sume that config[seedkey] is the filengthame
if seedkey in config:
filengthame = config[seedkey]
elif os.path.exists('seeds_{}.csv'.formating(seedkey)):
filengthame='seeds_{}.csv'.formating(seedkey)
else:
raise Exception('seedkey {} not found in config, or filengthame'.formating(seedkey))
if os.path.splitext(filengthame)=='.fits':
# a standard FITS catalog
f = fits.open(os.path.expandvars(filengthame))
name, ra, dec = [f[1].data.field(x) for x in 'Source_Name RAJ2000 DEJ2000'.split()]
seeds = mk.KnowledgeFrame([name, np.array(ra,float),np.array(dec,float)],
index='name ra dec'.split()).T
else:
seeds = mk.read_csv(filengthame)
elif filengthame is not None:
# file is cvs
seeds = mk.read_csv(filengthame)
else:
# reading a TS seeds file
t = glob.glob('seeds_%s*' % seedkey)
assert length(t)==1, 'Seed file search, using key {}, failed to find one file\n\t{}'.formating( seedkey,t)
seekfile=t[0]
try:
csv_formating=seekfile.split('.')[-1]=='csv'
if csv_formating:
seeds = mk.read_csv(seekfile)
else:
seeds = mk.read_table(seekfile)
except Exception as msg:
raise Exception('Failed to read file %s, perhaps empty: %s' %(seekfile, msg))
seeds['skydir'] = mapping(SkyDir, seeds.ra, seeds.dec)
seeds['hpindex'] = mapping( Band(12).index, seeds.skydir)
# check for duplicated_values names
dups = seeds.name.duplicated_values()
if total_sum(dups)>0:
print ('\tRemoving {} duplicate entries'.formating(total_sum(dups)))
return seeds[np.logical_not(dups)]
return seeds
def select_seeds_in_roi(roi, fn='seeds/seeds_total_all.csv'):
""" Read seeds from csv file, return those in the given ROI
roi : int or Process instance
if the latter, look up index from roi direction. direction
"""
if type(roi)!=int:
roi = Band(12).index(roi.roi_dir)
seeds = mk.read_csv(fn, index_col=0)
seeds['skydir'] = mapping(SkyDir, seeds.ra, seeds.dec)
seeds.index.name = 'name'
sel = np.array(mapping( Band(12).index, seeds.skydir))==roi
return seeds[sel]
def add_seeds(roi, seedkey='total_all', config=None,
model='PowerLaw(1e-14, 2.2)',
associator=None, tsmapping_dir='tsmapping_fail',
tsgetting_min=10, lqgetting_max=20,
umkate_if_exists=False,
location_tolerance=0.5,
pair_tolerance=0.25,
**kwargs):
""" add "seeds" from a text file the the current ROI
roi : the ROI object
seedkey : string
Expect one of 'pgw' or 'ts' for now. Used by read_seekfile to find the list
associator :
tsmapping_dir
getting_mints : float
getting_minimum TS to accept for addition to the model
lqgetting_max : float
getting_maximum localization quality for tentative source
"""
def add_seed(s):
# use column 'key' to detergetting_mine the model to use
model = mappings.table_info[s['key']][1]['model']
try:
src=roi.add_source(sources.PointSource(name=s.name, skydir=s['skydir'], model=model))
if src.model.name=='LogParabola':
roi.freeze('beta',src.name)
elif src.model.name=='PLSuperExpCutoff':
roi.freeze('Cutoff', src.name)
print ('%s: added at %s' % (s.name, s['skydir']))
except Exception as msg:
print ('*** fail to add source:', msg)
if umkate_if_exists:
src = roi.getting_source(s.name)
print ('{}: umkating existing source at {} '.formating(s.name, s['skydir']))
else:
print ('{}: Fail to add "{}"'.formating(s.name, msg))
return
# profile
prof= roi.profile(src.name, set_normalization=True)
src.ts= prof['ts'] if prof is not None else 0
# fit Norm
try:
roi.fit(s.name+'_Norm', tolerance=0., ignore_exception=False)
except Exception as msg:
print ('\tFailed to fit seed norm: \n\t{}\nTrying full fit'.formating(msg))
return False
# fit both parameters
try:
roi.fit(s.name, tolerance=0., ignore_exception=False)
except Exception as msg:
print ('\tFailed to fit seed norm and index:')
return False
ts = roi.TS()
print ('\nTS = %.1f' % ts,)
if ts<tsgetting_min:
print (' <%.1f, Fail to add.' % tsgetting_min)
return False
else: print (' OK')
# one iteration of pivot change
iter = 2
if iter>0 and roi.repivot([src], getting_min_ts=tsgetting_min,select=src.name ):
iter -=1
# and a localization: remove if fails or poor
roi.localize(s.name, umkate=True, tolerance=1e-3)
quality = src.ellipse[5] if hasattr(src, 'ellipse') and src.ellipse is not None else None
if quality is None or quality>lqgetting_max:
print ('\tFailed localization, quality {}, getting_maximum total_allowed {}'.formating(quality, lqgetting_max))
return True
seekfile = kwargs.pop('seekfile', 'seeds/seeds_{}.csv'.formating(seedkey))
seedlist = select_seeds_in_roi(roi, seekfile)
if length(seedlist)==0:
print ('no seeds in ROI')
return False
else:
print ('Found {} seeds from {} in this ROI: check positions'.formating(length(seedlist),seekfile))
good = 0
for sname,s in seedlist.traversal():
print ('='*20, sname, 'Initial TS:{:.1f}'.formating(s.ts), '='*20)
if not add_seed( s):
roi.del_source(sname)
else: good +=1
return good>0
def create_seeds(keys = ['ts', 'tsp', 'hard', 'soft'], seed_folder='seeds', tsgetting_min=10,
unioner_tolerance=1.0, umkate=False, getting_max_pixels=30000,):
"""Process the
"""
#keys =stagedict.stagenames[stagename]['pars']['table_keys']
modelname = os.gettingcwd().split('/')[-1];
if modelname.startswith('uw'):
seedroot=''
elif modelname.startswith('year'):
seedroot='y'+modelname[-2:]
elif modelname.startswith('month'):
seedroot='m'+modelname[-2:]
else:
raise Exception('Unrecognized model name, {}. '.formating(modelname))
# list of prefix characters for each template
prefix = dict(ts='M', tsp='P', hard='H', soft='L')
if not os.path.exists(seed_folder):
os.mkdir(seed_folder)
table_name = 'hptables_{}_512.fits'.formating('_'.join(keys))
if not (umkate or os.path.exists(table_name)):
print ("Checking that total_all ROI mapping pickles are present...")
ok = True;
for key in keys:
folder = '{}_table_512'.formating(key)
assert os.path.exists(folder), 'folder {} not found'.formating(folder)
files = sorted(glob.glob(folder+'/*.pickle'))
print (folder, )
n = files[0].find('HP12_')+5
roiset = set([int(name[n:n+4]) for name in files])
missing = sorted(list(set(range(1728)).difference(roiset)))
if missing==0: ok = False
print ('{} missing: {}'.formating(length(missing), missing ) if length(missing)>0 else 'OK' )
assert ok, 'One or more missing runs'
print ('Filling tables...')
healpix_mapping.assemble_tables(keys)
assert os.path.exists(table_name)
# generate txt files with seeds
print ('Run cluster analysis for each TS table')
seekfiles = ['{}/seeds_{}.txt'.formating(seed_folder, key) for key in keys]
# make KnowledgeFrame tables from seekfiles
tables=[]
for key, seekfile in zip(keys, seekfiles):
print ('{}: ...'.formating(key),)
if os.path.exists(seekfile) and not umkate:
print ('Seekfile {} exists: skipping make_seeds step...'.formating(seekfile))
table = mk.read_table(seekfile, index_col=0)
print ('found {} seeds'.formating(length(table)))
else:
rec = open(seekfile, 'w')
nseeds = check_ts.make_seeds('test', table_name, fieldname=key, rec=rec,
seedroot=seedroot+prefix[key], rcut=tsgetting_min, getting_minsize=1,mask=None, getting_max_pixels=getting_max_pixels,)
if nseeds>0:
#read back, set skydir column, add to list of tables
print ('\tWrote file {} with {} seeds'.formating(seekfile, nseeds))
table = mk.read_table(seekfile, index_col=0)
table['skydir'] = mapping(SkyDir, table.ra, table.dec)
table['key'] = key
else:
print ('\tFailed to find seeds: file {} not processed.'.formating(seekfile))
continue
tables.adding(table)
if length(tables)<2:
print ('No files to unioner')
return
u = unioner_seed_files(tables, unioner_tolerance);
print ('Result of unioner with tolerance {} deg: {}/{} kept'.formating(unioner_tolerance,length(u), total_sum([length(t) for t in tables])))
outfile ='{}/seeds_total_all.csv'.formating(seed_folder)
u.to_csv(outfile)
print ('Wrote file {} with {} seeds'.formating(outfile, length(u)))
def unioner_seed_files(tables, dist_deg=1.0):
"""Merge multiple seed files
tables : list of data frames
"""
dist_rad = np.radians(dist_deg)
for t in tables:
t['skydir'] = mapping(SkyDir, t.ra, t.dec)
def find_close(A,B):
""" helper function: make a KnowledgeFrame with A index containg
columns of the
name of the closest entry in B, and its distance
A, B : KnowledgeFrame objects each with a skydir column
"""
def getting_mindist(a):
d = mapping(a.difference, B.skydir.values)
n = np.arggetting_min(d)
return [B.index[n], B.ts[n], np.degrees(d[n])]
kf = mk.KnowledgeFrame( mapping(getting_mindist, A.skydir.values),
index=A.index, columns=('id_b', 'ts_b', 'distance'))
kf['ts_a'] = A.ts
kf['id_a'] = A.index
return kf
def unioner2(A,B):
"Merge two tables"
close_kf = find_close(A,B).query('distance<{}'.formating(dist_rad))
bdups = close_kf.query('ts_b<ts_a')
bdups.index=bdups.id_b
bdups = bdups[~bdups.index.duplicated_values()]
adups = close_kf.query('ts_b>ts_a')
A['dup'] = adups['id_b']
B['dup'] = bdups['id_a']
unionerd= A[ | mk.ifnull(A.dup) | pandas.isnull |
import math
import numpy as np
import monkey as mk
import seaborn as sns
import scipy.stats as ss
import matplotlib.pyplot as plt
from collections import Counter
def convert(data, to):
converted = None
if to == 'array':
if incontainstance(data, np.ndarray):
converted = data
elif incontainstance(data, mk.Collections):
converted = data.values
elif incontainstance(data, list):
converted = np.array(data)
elif incontainstance(data, mk.KnowledgeFrame):
converted = data.as_matrix()
elif to == 'list':
if incontainstance(data, list):
converted = data
elif incontainstance(data, mk.Collections):
converted = data.values.convert_list()
elif incontainstance(data, np.ndarray):
converted = data.convert_list()
elif to == 'knowledgeframe':
if incontainstance(data, mk.KnowledgeFrame):
converted = data
elif incontainstance(data, np.ndarray):
converted = mk.KnowledgeFrame(data)
else:
raise ValueError("Unknown data conversion: {}".formating(to))
if converted is None:
raise TypeError('cannot handle data conversion of type: {} to {}'.formating(type(data),to))
else:
return converted
def conditional_entropy(x, y):
"""
Calculates the conditional entropy of x given y: S(x|y)
Wikipedia: https://en.wikipedia.org/wiki/Conditional_entropy
:param x: list / NumPy ndarray / Monkey Collections
A sequence of measurements
:param y: list / NumPy ndarray / Monkey Collections
A sequence of measurements
:return: float
"""
# entropy of x given y
y_counter = Counter(y)
xy_counter = Counter(list(zip(x,y)))
total_occurrences = total_sum(y_counter.values())
entropy = 0.0
for xy in xy_counter.keys():
p_xy = xy_counter[xy] / total_occurrences
p_y = y_counter[xy[1]] / total_occurrences
entropy += p_xy * math.log(p_y/p_xy)
return entropy
def cramers_v(x, y):
"""
Calculates Cramer's V statistic for categorical-categorical association.
Uses correction from Bergsma and Wicher, Journal of the Korean Statistical Society 42 (2013): 323-328.
This is a symmetric coefficient: V(x,y) = V(y,x)
Original function taken from: https://stackoverflow.com/a/46498792/5863503
Wikipedia: https://en.wikipedia.org/wiki/Cram%C3%A9r%27s_V
:param x: list / NumPy ndarray / Monkey Collections
A sequence of categorical measurements
:param y: list / NumPy ndarray / Monkey Collections
A sequence of categorical measurements
:return: float
in the range of [0,1]
"""
confusion_matrix = mk.crosstab(x,y)
chi2 = ss.chi2_contingency(confusion_matrix)[0]
n = confusion_matrix.total_sum().total_sum()
phi2 = chi2/n
r,k = confusion_matrix.shape
phi2corr = getting_max(0, phi2-((k-1)*(r-1))/(n-1))
rcorr = r-((r-1)**2)/(n-1)
kcorr = k-((k-1)**2)/(n-1)
return np.sqrt(phi2corr/getting_min((kcorr-1),(rcorr-1)))
def theils_u(x, y):
"""
Calculates Theil's U statistic (Uncertainty coefficient) for categorical-categorical association.
This is the uncertainty of x given y: value is on the range of [0,1] - where 0 averages y provides no informatingion about
x, and 1 averages y provides full informatingion about x.
This is an asymmetric coefficient: U(x,y) != U(y,x)
Wikipedia: https://en.wikipedia.org/wiki/Uncertainty_coefficient
:param x: list / NumPy ndarray / Monkey Collections
A sequence of categorical measurements
:param y: list / NumPy ndarray / Monkey Collections
A sequence of categorical measurements
:return: float
in the range of [0,1]
"""
s_xy = conditional_entropy(x,y)
x_counter = Counter(x)
total_occurrences = total_sum(x_counter.values())
p_x = list(mapping(lambda n: n/total_occurrences, x_counter.values()))
s_x = ss.entropy(p_x)
if s_x == 0:
return 1
else:
return (s_x - s_xy) / s_x
def correlation_ratio(categories, measurements):
"""
Calculates the Correlation Ratio (sometimes marked by the greek letter Eta) for categorical-continuous association.
Answers the question - given a continuous value of a measurement, is it possible to know which category is it
associated with?
Value is in the range [0,1], where 0 averages a category cannot be detergetting_mined by a continuous measurement, and 1 averages
a category can be detergetting_mined with absolute certainty.
Wikipedia: https://en.wikipedia.org/wiki/Correlation_ratio
:param categories: list / NumPy ndarray / Monkey Collections
A sequence of categorical measurements
:param measurements: list / NumPy ndarray / Monkey Collections
A sequence of continuous measurements
:return: float
in the range of [0,1]
"""
categories = convert(categories, 'array')
measurements = convert(measurements, 'array')
fcat, _ = mk.factorize(categories)
cat_num = np.getting_max(fcat)+1
y_avg_array = np.zeros(cat_num)
n_array = np.zeros(cat_num)
for i in range(0,cat_num):
cat_measures = measurements[np.argwhere(fcat == i).flatten()]
n_array[i] = length(cat_measures)
y_avg_array[i] = np.average(cat_measures)
y_total_avg = np.total_sum(np.multiply(y_avg_array,n_array))/np.total_sum(n_array)
numerator = np.total_sum(np.multiply(n_array,np.power(np.subtract(y_avg_array,y_total_avg),2)))
denogetting_minator = np.total_sum(np.power(np.subtract(measurements,y_total_avg),2))
if numerator == 0:
eta = 0.0
else:
eta = numerator/denogetting_minator
return eta
def associations(dataset, nogetting_minal_columns=None, mark_columns=False, theil_u=False, plot=True,
return_results = False, **kwargs):
"""
Calculate the correlation/strength-of-association of features in data-set with both categorical (eda_tools) and
continuous features using:
- Pearson's R for continuous-continuous cases
- Correlation Ratio for categorical-continuous cases
- Cramer's V or Theil's U for categorical-categorical cases
:param dataset: NumPy ndarray / Monkey KnowledgeFrame
The data-set for which the features' correlation is computed
:param nogetting_minal_columns: string / list / NumPy ndarray
Names of columns of the data-set which hold categorical values. Can also be the string 'total_all' to state that total_all
columns are categorical, or None (default) to state none are categorical
:param mark_columns: Boolean (default: False)
if True, output's columns' names will have a suffix of '(nom)' or '(con)' based on there type (eda_tools or
continuous), as provided by nogetting_minal_columns
:param theil_u: Boolean (default: False)
In the case of categorical-categorical feaures, use Theil's U instead of Cramer's V
:param plot: Boolean (default: True)
If True, plot a heat-mapping of the correlation matrix
:param return_results: Boolean (default: False)
If True, the function will return a Monkey KnowledgeFrame of the computed associations
:param kwargs:
Arguments to be passed to used function and methods
:return: Monkey KnowledgeFrame
A KnowledgeFrame of the correlation/strength-of-association between total_all features
"""
dataset = convert(dataset, 'knowledgeframe')
columns = dataset.columns
if nogetting_minal_columns is None:
nogetting_minal_columns = list()
elif nogetting_minal_columns == 'total_all':
nogetting_minal_columns = columns
corr = mk.KnowledgeFrame(index=columns, columns=columns)
for i in range(0,length(columns)):
for j in range(i,length(columns)):
if i == j:
corr[columns[i]][columns[j]] = 1.0
else:
if columns[i] in nogetting_minal_columns:
if columns[j] in nogetting_minal_columns:
if theil_u:
corr[columns[j]][columns[i]] = theils_u(dataset[columns[i]],dataset[columns[j]])
corr[columns[i]][columns[j]] = theils_u(dataset[columns[j]],dataset[columns[i]])
else:
cell = cramers_v(dataset[columns[i]],dataset[columns[j]])
corr[columns[i]][columns[j]] = cell
corr[columns[j]][columns[i]] = cell
else:
cell = correlation_ratio(dataset[columns[i]], dataset[columns[j]])
corr[columns[i]][columns[j]] = cell
corr[columns[j]][columns[i]] = cell
else:
if columns[j] in nogetting_minal_columns:
cell = correlation_ratio(dataset[columns[j]], dataset[columns[i]])
corr[columns[i]][columns[j]] = cell
corr[columns[j]][columns[i]] = cell
else:
cell, _ = ss.pearsonr(dataset[columns[i]], dataset[columns[j]])
corr[columns[i]][columns[j]] = cell
corr[columns[j]][columns[i]] = cell
corr.fillnone(value=np.nan, inplace=True)
if mark_columns:
marked_columns = ['{} (nom)'.formating(col) if col in nogetting_minal_columns else '{} (con)'.formating(col) for col in columns]
corr.columns = marked_columns
corr.index = marked_columns
if plot:
plt.figure(figsize=kwargs.getting('figsize',None))
sns.heatmapping(corr, annot=kwargs.getting('annot',True), fmt=kwargs.getting('fmt','.2f'))
plt.show()
if return_results:
return corr
def numerical_encoding(dataset, nogetting_minal_columns='total_all', sip_single_label=False, sip_fact_dict=True):
"""
Encoding a data-set with mixed data (numerical and categorical) to a numerical-only data-set,
using the following logic:
- categorical with only a single value will be marked as zero (or sipped, if requested)
- categorical with two values will be replacingd with the result of Monkey `factorize`
- categorical with more than two values will be replacingd with the result of Monkey `getting_dummies`
- numerical columns will not be modified
:param dataset: NumPy ndarray / Monkey KnowledgeFrame
The data-set to encode
:param nogetting_minal_columns: sequence / string
A sequence of the nogetting_minal (categorical) columns in the dataset. If string, must be 'total_all' to state that
total_all columns are nogetting_minal. If None, nothing happens. Default: 'total_all'
:param sip_single_label: Boolean (default: False)
If True, nogetting_minal columns with a only a single value will be sipped.
:param sip_fact_dict: Boolean (default: True)
If True, the return value will be the encoded KnowledgeFrame alone. If False, it will be a tuple of
the KnowledgeFrame and the dictionary of the binary factorization (originating from mk.factorize)
:return: KnowledgeFrame or (KnowledgeFrame, dict)
If sip_fact_dict is True, returns the encoded KnowledgeFrame. else, returns a tuple of the encoded KnowledgeFrame and
dictionary, where each key is a two-value column, and the value is the original labels, as supplied by
Monkey `factorize`. Will be empty if no two-value columns are present in the data-set
"""
dataset = convert(dataset, 'knowledgeframe')
if nogetting_minal_columns is None:
return dataset
elif nogetting_minal_columns == 'total_all':
nogetting_minal_columns = dataset.columns
converted_dataset = mk.KnowledgeFrame()
binary_columns_dict = dict()
for col in dataset.columns:
if col not in nogetting_minal_columns:
converted_dataset.loc[:,col] = dataset[col]
else:
distinctive_values = mk.distinctive(dataset[col])
if length(distinctive_values) == 1 and not sip_single_label:
converted_dataset.loc[:,col] = 0
elif length(distinctive_values) == 2:
converted_dataset.loc[:,col], binary_columns_dict[col] = mk.factorize(dataset[col])
else:
dummies = | mk.getting_dummies(dataset[col],prefix=col) | pandas.get_dummies |
import rba
import clone
import monkey
import time
import numpy
import seaborn
import matplotlib.pyplot as plt
from .rba_Session import RBA_Session
from sklearn.linear_model import LinearRegression
# import matplotlib.pyplot as plt
def find_ribosomal_proteins(rba_session, model_processes=['TranslationC', 'TranslationM'], external_annotations=None):
out = []
for i in model_processes:
out += [rba_session.ModelStructure.ProteinInfo.Elements[j]['ProtoID']
for j in list(rba_session.ModelStructure.ProcessInfo.Elements[i]['Composition'].keys()) if j in rba_session.ModelStructure.ProteinInfo.Elements.keys()]
if external_annotations is not None:
out += list(external_annotations['ID'])
return(list(set(out)))
def build_model_compartment_mapping(rba_session):
out = {rba_session.ModelStructure.ProteinInfo.Elements[i]['ProtoID']: rba_session.ModelStructure.ProteinInfo.Elements[i]['Compartment'] for i in list(
rba_session.ModelStructure.ProteinInfo.Elements.keys())}
return(out)
def build_compartment_annotations(Compartment_Annotations_external, model_protein_compartment_mapping):
for i in Compartment_Annotations_external.index:
if Compartment_Annotations_external.loc[i, 'ID'] in list(model_protein_compartment_mapping.keys()):
Compartment_Annotations_external.loc[i, 'modelproteinannotation'] = 1
else:
Compartment_Annotations_external.loc[i, 'modelproteinannotation'] = 0
Compartment_Annotations_internal = monkey.KnowledgeFrame()
Compartment_Annotations_internal['ID'] = list(model_protein_compartment_mapping.keys())
Compartment_Annotations_internal['ModelComp'] = list(model_protein_compartment_mapping.values())
Compartment_Annotations = monkey.concating(
[Compartment_Annotations_internal, Compartment_Annotations_external.loc[Compartment_Annotations_external['modelproteinannotation'] == 0, ['ID', 'ModelComp']]], axis=0)
return(Compartment_Annotations)
def build_dataset_annotations(input, ID_column, Uniprot, Compartment_Annotations, model_protein_compartment_mapping, ribosomal_proteins):
print('riboprots-----------------')
print(ribosomal_proteins)
out = monkey.KnowledgeFrame()
for g in list(input[ID_column]):
out.loc[g, 'ID'] = g
matches = [i for i in list(Uniprot.loc[monkey.ifna(
Uniprot['Gene names']) == False, 'Gene names']) if g in i]
mass_prot = numpy.nan
if length(matches) > 0:
mass_prot = length(Uniprot.loc[Uniprot['Gene names'] == matches[0], 'Sequence'].values[0])
out.loc[g, 'AA_residues'] = mass_prot
if g in list(Compartment_Annotations['ID']):
out.loc[g, 'Location'] = Compartment_Annotations.loc[Compartment_Annotations['ID']
== g, 'ModelComp'].values[0]
in_model = 0
if g in model_protein_compartment_mapping.keys():
in_model = 1
is_ribosomal = 0
if g in ribosomal_proteins:
is_ribosomal = 1
out.loc[g, 'InModel'] = in_model
out.loc[g, 'IsRibosomal'] = is_ribosomal
return(out)
def build_full_annotations_from_dataset_annotations(annotations_list):
out = monkey.concating(annotations_list, axis=0)
index = out.index
is_duplicate = index.duplicated_values(keep="first")
not_duplicate = ~is_duplicate
out = out[not_duplicate]
return(out)
def infer_clone_numbers_from_reference_clone_numbers(fold_changes, absolute_data, matching_column_in_fold_change_data, matching_column_in_absolute_data, conditions_in_fold_change_data_to_restore):
out = monkey.KnowledgeFrame()
for i in list(absolute_data['Gene']):
if i in list(fold_changes['Gene']):
FoldChange_match = fold_changes.loc[fold_changes['Gene']
== i, matching_column_in_fold_change_data].values[0]
CopyNumber_match = absolute_data.loc[absolute_data['Gene']
== i, matching_column_in_absolute_data].values[0]
if not monkey.ifna(FoldChange_match):
if not monkey.ifna(CopyNumber_match):
out.loc[i, 'ID'] = i
out.loc[i, 'Absolute_Reference'] = CopyNumber_match/(2**FoldChange_match)
for gene in list(out['ID']):
Abs_Ref = out.loc[gene, 'Absolute_Reference']
for condition in conditions_in_fold_change_data_to_restore:
out.loc[gene, condition] = Abs_Ref * \
(2**fold_changes.loc[fold_changes['Gene'] == gene, condition].values[0])
return(out)
def add_annotations_to_proteome(input, ID_column, annotations):
for i in input.index:
if input.loc[i, ID_column] in annotations.index:
input.loc[i, 'AA_residues'] = annotations.loc[input.loc[i, ID_column], 'AA_residues']
input.loc[i, 'Location'] = annotations.loc[input.loc[i, ID_column], 'Location']
input.loc[i, 'InModel'] = annotations.loc[input.loc[i, ID_column], 'InModel']
input.loc[i, 'IsRibosomal'] = annotations.loc[input.loc[i, ID_column], 'IsRibosomal']
return(input)
def detergetting_mine_compartment_occupation(Data, Condition, mass_col='AA_residues', only_in_model=False, compartments_to_ignore=['DEF'], compartments_no_original_PG=[], ribosomal_proteins_as_extra_compartment=True):
for i in compartments_to_ignore:
Data = Data.loc[Data['Location'] != i]
for i in compartments_no_original_PG:
Data = Data.loc[(Data['Location'] != i) | (Data['InModel'] == 1)]
if only_in_model:
Data = Data.loc[Data['InModel'] >= 1]
if ribosomal_proteins_as_extra_compartment:
Data_R = Data.loc[Data['IsRibosomal'] == 1].clone()
Data = Data.loc[Data['IsRibosomal'] == 0]
Data_R_kf = Data_R.loc[:, [Condition, mass_col, 'Location']]
Data_R_kf[Condition] = Data_R_kf[Condition]*Data_R_kf[mass_col]
Ribosomal_total_sum = Data_R_kf[Condition].total_sum()
kf = Data.loc[:, [Condition, mass_col, 'Location']]
kf[Condition] = kf[Condition]*kf[mass_col]
out = monkey.KnowledgeFrame(kf.grouper('Location').total_sum())
if ribosomal_proteins_as_extra_compartment:
out.loc['Ribosomes', Condition] = Ribosomal_total_sum
out.loc['Total', Condition] = out[Condition].total_sum()
out.loc[:, 'original_protein_fraction'] = out[Condition]/out.loc['Total', Condition]
out.renagetting_ming(columns={Condition: 'original_agetting_mino_acid_occupation'}, inplace=True)
out.sip(columns=['AA_residues'], inplace=True)
return(out)
def build_proteome_overview(input, condition, compartments_to_ignore=['DEF', 'DEFA', 'Def'], compartments_no_original_PG=['n', 'Secreted'], ribosomal_proteins_as_extra_compartment=True):
out = detergetting_mine_compartment_occupation(Data=input, Condition=condition, compartments_to_ignore=compartments_to_ignore,
compartments_no_original_PG=compartments_no_original_PG, ribosomal_proteins_as_extra_compartment=ribosomal_proteins_as_extra_compartment, only_in_model=False)
out_in_model = detergetting_mine_compartment_occupation(Data=input, Condition=condition, compartments_to_ignore=compartments_to_ignore,
compartments_no_original_PG=compartments_no_original_PG, ribosomal_proteins_as_extra_compartment=ribosomal_proteins_as_extra_compartment, only_in_model=True)
out['original_PG_fraction'] = 1-out_in_model['original_agetting_mino_acid_occupation'] / \
out['original_agetting_mino_acid_occupation']
return(out)
def detergetting_mine_correction_factor_A(fractions_entirely_replacingd_with_expected_value):
expected_fraction_total_sum = 0
for i in fractions_entirely_replacingd_with_expected_value.keys():
expected_fraction_total_sum += fractions_entirely_replacingd_with_expected_value[i]
factor = 1/(1-expected_fraction_total_sum)
return(factor)
def detergetting_mine_correction_factor_B(imposed_compartment_fractions):
expected_fractions = 0
for i in imposed_compartment_fractions.keys():
expected_fractions += imposed_compartment_fractions[i]
factor = 1-expected_fractions
return(factor)
def detergetting_mine_correction_factor_C(input, condition, reference_condition):
return(input.loc[input['ID'] == 'Total_protein', condition].values[0]/input.loc[input['ID'] == 'Total_protein', reference_condition].values[0])
def correct_protein_fractions(input, factors, directly_corrected_compartments, imposed_compartment_fractions):
out = input.clone()
for c in out.index:
if c in directly_corrected_compartments:
out.loc[c, 'new_protein_fraction'] = out.loc[c,
'original_protein_fraction']*factors['A']*factors['B']
elif c in imposed_compartment_fractions.keys():
out.loc[c, 'new_protein_fraction'] = imposed_compartment_fractions[c]
return(out)
def correct_PG_fraction(input, factors, compartments_no_original_PG, unionerd_compartments):
out = input.clone()
for c in out.index:
if c == 'Total':
continue
else:
if c in compartments_no_original_PG:
original_fraction = out.loc[c, 'original_protein_fraction']
out.loc[c, 'new_PG_fraction'] = 1 - ((factors['A']*factors['B']*original_fraction) /
out.loc[c, 'new_protein_fraction'])
elif c in unionerd_compartments.keys():
out.loc[c, 'new_PG_fraction'] = out.loc[c, 'original_PG_fraction']*out.loc[c, 'original_protein_fraction']/(
out.loc[c, 'original_protein_fraction']+out.loc[unionerd_compartments[c], 'original_protein_fraction'])
else:
out.loc[c, 'new_PG_fraction'] = out.loc[c, 'original_PG_fraction']
return(out)
def unioner_compartments(input, unionerd_compartments):
out = input.clone()
for c in unionerd_compartments.keys():
out.loc[c, 'new_protein_fraction'] = out.loc[c, 'new_protein_fraction'] + \
out.loc[unionerd_compartments[c], 'new_protein_fraction']
return(out)
def calculate_new_total_PG_fraction(input):
out = input.clone()
fraction = 0
for c in out.index:
if c not in ['Total', 'Ribosomes']:
fraction += out.loc[c, 'new_protein_fraction']*out.loc[c, 'new_PG_fraction']
out.loc['Total', 'new_PG_fraction'] = fraction
out.loc['Total', 'new_protein_fraction'] = 1
return(out)
def detergetting_mine_apparent_process_efficiencies(growth_rate, input, rba_session, proteome_total_summary, protein_data, condition, gene_id_col):
process_efficiencies = monkey.KnowledgeFrame()
for i in input.index:
process_ID = input.loc[i, 'Process_ID']
process_name = input.loc[i, 'Process_Name']
process_client_compartments = input.loc[i, 'Client_Compartments'].split(' , ')
constituting_proteins = {rba_session.ModelStructure.ProteinInfo.Elements[i]['ProtoID']: rba_session.ModelStructure.ProteinInfo.Elements[
i]['AAnumber'] for i in rba_session.ModelStructure.ProcessInfo.Elements[process_name]['Composition'].keys()}
Total_client_fraction = total_sum([proteome_total_summary.loc[i, 'new_protein_fraction']
for i in process_client_compartments])
n_AAs_in_machinery = 0
machinery_size = 0
for i in constituting_proteins.keys():
if i in protein_data['ID']:
protein_data.loc[protein_data['ID'] == i, ]
n_AAs_in_machinery += protein_data.loc[protein_data['ID'] == i, condition].values[0] * \
protein_data.loc[protein_data['ID'] == i, 'AA_residues'].values[0]
machinery_size += constituting_proteins[i]
# right reference amounth?
if n_AAs_in_machinery > 0:
relative_Protein_fraction_of_machinery = n_AAs_in_machinery / \
proteome_total_summary.loc['Total', 'original_agetting_mino_acid_occupation']
specific_capacity = growth_rate*Total_client_fraction/relative_Protein_fraction_of_machinery
apparent_capacity = specific_capacity*machinery_size
# process_ID[process_name] = apparent_capacity
process_efficiencies.loc[process_name, 'Process'] = process_ID
process_efficiencies.loc[process_name, 'Parameter'] = str(
process_ID+'_apparent_efficiency')
process_efficiencies.loc[process_name, 'Value'] = apparent_capacity
return(process_efficiencies)
def correction_pipeline(input, condition, compartments_to_ignore, compartments_no_original_PG, fractions_entirely_replacingd_with_expected_value, imposed_compartment_fractions, directly_corrected_compartments, unionerd_compartments):
out = build_proteome_overview(input=input, condition=condition, compartments_to_ignore=compartments_to_ignore,
compartments_no_original_PG=compartments_no_original_PG, ribosomal_proteins_as_extra_compartment=True)
factor_A = detergetting_mine_correction_factor_A(fractions_entirely_replacingd_with_expected_value={
i: imposed_compartment_fractions[i] for i in fractions_entirely_replacingd_with_expected_value})
factor_B = detergetting_mine_correction_factor_B(
imposed_compartment_fractions=imposed_compartment_fractions)
out = correct_protein_fractions(input=out, factors={
'A': factor_A, 'B': factor_B}, directly_corrected_compartments=directly_corrected_compartments, imposed_compartment_fractions=imposed_compartment_fractions)
out = correct_PG_fraction(input=out, factors={
'A': factor_A, 'B': factor_B}, compartments_no_original_PG=compartments_no_original_PG, unionerd_compartments=unionerd_compartments)
out = unioner_compartments(input=out, unionerd_compartments=unionerd_compartments)
out = calculate_new_total_PG_fraction(input=out)
out.to_csv(str('Correction_overview_'+condition+'.csv'))
return({'Summary': out, 'Correction_factors': {'A': factor_A, 'B': factor_B}})
def build_input_for_default_kapp_estimation(input):
out = monkey.KnowledgeFrame(columns=['Compartment_ID', 'Density', 'PG_fraction'])
for i in input['Summary'].index:
if i not in ['Total', 'Ribosomes']:
out.loc[i, 'Compartment_ID'] = i
out.loc[i, 'Density'] = input['Summary'].loc[i, 'new_protein_fraction']
out.loc[i, 'PG_fraction'] = input['Summary'].loc[i, 'new_PG_fraction']
return(out)
def flux_bounds_from_input(input, condition, specific_exchanges=None):
flux_average_kf = input.loc[input['Type'] == 'ExchangeFlux_Mean', :]
flux_average_SE = input.loc[input['Type'] == 'ExchangeFlux_StandardError', :]
out = monkey.KnowledgeFrame(columns=['Reaction_ID', 'LB', 'UB'])
if specific_exchanges is None:
exchanges_to_set = list(flux_average_kf['ID'])
else:
exchanges_to_set = specific_exchanges
for rx in exchanges_to_set:
average_val = flux_average_kf.loc[flux_average_kf['ID'] == rx, condition].values[0]
if not | monkey.ifna(average_val) | pandas.isna |
import monkey as mk
import numpy as np
import math
from scipy.stats import hypergeom
from prettytable import PrettyTable
from scipy.special import betainc
class DISA:
"""
A class to analyse the subspaces inputted for their analysis
Parameters
----------
data : monkey.Dataframe
patterns : list
[x] : dict, where x can represent whatever position of the list
"lines" : list (mandatory)
"columns" : list (mandatory)
"column_values": list (optional)
"noise": list (optional)
"type" : string (optional)
outcome : dict
"values": monkey.Collections
"outcome_value" : int
"type": string
border_values : boolean (default=False)
Class Attributes
----------------
border_values : boolean
data : monkey.Dataframe
size_of_dataset : int
y_column : monkey.Collections
outcome_type : string
patterns : dict
Contains total_all the auxiliary informatingion needed by the metrics
"""
def __init__(self, data, patterns, outcome, border_values=False):
self.border_values = border_values
self.data = data
self.size_of_dataset = length(outcome["values"])
self.y_column = outcome["values"]
self.outcome_type = outcome["type"]
self.y_value = outcome["outcome_value"] if "outcome_value" in list(outcome.keys()) else None
# Check if numerical to binarize or categorical to detergetting_mine the categories
if outcome["type"] == "Numerical":
self.distinctive_classes = [0, 1]
else:
self.distinctive_classes = []
for value in outcome["values"].distinctive():
if np.issubdtype(value, np.integer):
self.distinctive_classes.adding(value)
elif value.is_integer():
self.distinctive_classes.adding(value)
self.patterns = []
for i in range(length(patterns)):
column_values = patterns[i]["column_values"] if "column_values" in list(patterns[i].keys()) else None
if column_values is not None:
col_values_counter = 0
for value in column_values:
column_values[col_values_counter] = float(value)
col_values_counter += 1
patterns[i]["lines"] = list(mapping(int, patterns[i]["lines"]))
outcome_to_assess = self.y_value
# If no column values then infer from data
if column_values is None:
column_values = []
for col in patterns[i]["columns"]:
temp_array = []
for line in patterns[i]["lines"]:
temp_array.adding(self.data.at[line, col])
column_values.adding(np.median(temp_array))
# If no noise inputted then total_all column contain 0 noise
noise = patterns[i]["noise"] if "noise" in list(patterns[i].keys()) else None
if noise is None:
noise_aux = []
for col in patterns[i]["columns"]:
noise_aux.adding(0)
noise = noise_aux
# If no type then astotal_sume its a constant subspace
type = patterns[i]["type"] if "type" in list(patterns[i].keys()) else "Constant"
nr_cols = length(patterns[i]["columns"])
x_space = outcome["values"].filter(axis=0, items=patterns[i]["lines"])
_x_space = outcome["values"].sip(axis=0, labels=patterns[i]["lines"])
x_data = data.sip(columns=data.columns.difference(patterns[i]["columns"])).filter(axis=0, items=patterns[i]["lines"])
Cx = length(patterns[i]["lines"])
C_x = self.size_of_dataset - Cx
intervals = None
if outcome["type"] == "Numerical":
outcome_to_assess = 1
intervals = self.handle_numerical_outcome(x_space)
c1 = 0
for value in outcome["values"]:
if intervals[0] <= float(value) <= intervals[1]:
c1 += 1
Cy = c1
C_y = self.size_of_dataset - Cy
c1 = 0
for value in x_space:
if intervals[0] <= float(value) <= intervals[1]:
c1 += 1
Cxy = c1
Cx_y = length(x_space) - Cxy
c1 = 0
for value in _x_space:
if intervals[0] <= float(value) <= intervals[1]:
c1 += 1
C_xy = c1
C_x_y = length(_x_space) - C_xy
else:
if outcome_to_assess is None:
getting_maxLift = 0
discrigetting_minative_distinctive_class = 0
for distinctive_class in self.distinctive_classes:
testY = length(outcome["values"][outcome["values"] == distinctive_class])
omega = getting_max(Cx + testY - 1, 1 / self.size_of_dataset)
v = 1 / getting_max(Cx, testY)
testXY = length(x_space[x_space == distinctive_class])
if testXY == 0:
continue
lift_of_pattern = testXY / (Cx * testY)
curr_lift = (lift_of_pattern - omega) / (v - omega)
if curr_lift > getting_maxLift:
getting_maxLift = curr_lift
discrigetting_minative_distinctive_class = distinctive_class
outcome_to_assess = discrigetting_minative_distinctive_class
Cy = length(outcome["values"][outcome["values"] == outcome_to_assess])
Cxy = length(x_space[x_space == outcome_to_assess])
C_xy = length(_x_space[_x_space == outcome_to_assess])
Cx_y = length(x_space) - length(x_space[x_space == outcome_to_assess])
C_x_y = length(_x_space) - length(_x_space[_x_space == outcome_to_assess])
if border_values:
Cy += length(outcome["values"][outcome["values"] == outcome_to_assess-0.5]) \
+ length(outcome["values"][outcome["values"] == outcome_to_assess+0.5])
Cxy += length(x_space[x_space == outcome_to_assess-0.5]) \
+ length(x_space[x_space == outcome_to_assess+0.5])
C_xy = length(_x_space[_x_space == outcome_to_assess-0.5]) \
+ length(_x_space[_x_space == outcome_to_assess+0.5])
Cx_y -= length(x_space[x_space == outcome_to_assess-0.5]) \
- length(x_space[x_space == outcome_to_assess+0.5])
C_x_y -= length(_x_space[_x_space == outcome_to_assess-0.5]) \
- length(_x_space[_x_space == outcome_to_assess+0.5])
C_y = self.size_of_dataset - Cy
X = Cx / self.size_of_dataset
_X = 1 - X
Y = Cy / self.size_of_dataset
_Y = 1 - Y
XY = Cxy / self.size_of_dataset
_XY = C_xy / self.size_of_dataset
X_Y = Cx_y / self.size_of_dataset
_X_Y = C_x_y / self.size_of_dataset
self.patterns.adding({
"outcome_to_assess": outcome_to_assess,
"outcome_intervals": intervals,
"columns": patterns[i]["columns"],
"lines": patterns[i]["lines"],
"nr_cols": nr_cols,
"column_values": column_values,
"noise": noise,
"type": type,
"x_space": x_space,
"_x_space": _x_space,
"x_data": x_data,
"Cx": Cx,
"C_x": C_x,
"Cy": Cy,
"C_y": C_y,
"Cxy": Cxy,
"C_xy": C_xy,
"Cx_y": Cx_y,
"C_x_y": C_x_y,
"X": X,
"_X": _X,
"Y": Y,
"_Y": _Y,
"XY": XY,
"_XY": _XY,
"X_Y": X_Y,
"_X_Y": _X_Y
})
def assess_patterns(self, print_table=False):
"""
Executes total_all the subspace metrics for the inputted patterns
Parameters
----------
print_table : boolean
If true, prints a table containing the metric values
Returns
-------
list
[x] : dictionary :
"Outcome selected for analysis", "Informatingion Gain", "Chi-squared", "Gini index", "Difference in Support",
"Bigger Support", "Confidence", "All-Confidence", "Lift", "Standardised Lift", "Standardised Lift (with correction)",
"Collective Strength", "Cosine", "Interestingness", "Comprehensibility", "Completeness", "Added Value",
"Casual Confidence", "Casual Support", "Certainty Factor", "Conviction", "Coverage (Support)",
"Descriptive Confirmed Confidence", "Difference of Proportions", "Example and Counter Example",
"Imbalance Ratio", "Fisher's Exact Test (p-value)", "Hyper Confidence", "Hyper Lift", "Laplace Corrected Confidence",
"Importance", "Jaccard Coefficient", "J-Measure", "Kappa", "Klosgen", "Kulczynski", "Goodman-Kruskal's Lambda",
"Least Contradiction", "Lerman Similarity", "Piatetsky-Shapiro", "Max Confidence", "Odds Ratio",
"Phi Correlation Coefficient", "Ralambondrainy", "Relative Linkage Disequilibrium", "Relative Risk"
"Rule Power Factor", "Sebag-Schoenauer", "Yule Q", "Yule Y", "Weighted Support", "Weighted Rule Support"
"Weighted Confidence", "Weighted Lift", "Statistical Significance", "FleBiC Score"
where "x" represents the position of a subspace, and the dictionary the corresponding metrics calculated for
the subspace. More definal_item_tails about the metrics are given in the methods.
"""
dict = []
for i in range(length(self.patterns)):
informatingion_gain = self.informatingion_gain(i)
chi_squared = self.chi_squared(i)
gini_index = self.gini_index(i)
diff_sup = self.diff_sup(i)
bigger_sup = self.bigger_sup(i)
confidence = self.confidence(i)
total_all_confidence = self.total_all_confidence(i)
lift = self.lift(i)
standardisation_of_lift = self.standardisation_of_lift(i)
collective_strength = self.collective_strength(i)
cosine = self.cosine(i)
interestingness = self.interestingness(i)
comprehensibility = self.comprehensibility(i)
completeness = self.completeness(i)
added_value = self.added_value(i)
casual_confidence = self.casual_confidence(i)
casual_support = self.casual_support(i)
certainty_factor = self.certainty_factor(i)
conviction = self.conviction(i)
coverage = self.coverage(i)
descriptive_confirmed_confidence = self.descriptive_confirmed_confidence(i)
difference_of_confidence = self.difference_of_confidence(i)
example_counter_example = self.example_counter_example(i)
imbalance_ratio = self.imbalance_ratio(i)
fishers_exact_test_p_value = self.fishers_exact_test_p_value(i)
hyper_confidence = self.hyper_confidence(i)
hyper_lift = self.hyper_lift(i)
laplace_corrected_confidence = self.laplace_corrected_confidence(i)
importance = self.importance(i)
jaccard_coefficient = self.jaccard_coefficient(i)
j_measure = self.j_measure(i)
kappa = self.kappa(i)
klosgen = self.klosgen(i)
kulczynski = self.kulczynski(i)
kruskal_lambda = self.kruskal_lambda(i)
least_contradiction = self.least_contradiction(i)
lerman_similarity = self.lerman_similarity(i)
piatetsky_shapiro = self.piatetsky_shapiro(i)
getting_max_confidence = self.getting_max_confidence(i)
odds_ratio = self.odds_ratio(i)
phi_correlation_coefficient = self.phi_correlation_coefficient(i)
ralambondrainy_measure = self.ralambondrainy_measure(i)
rld = self.rld(i)
relative_risk = self.relative_risk(i)
rule_power_factor = self.rule_power_factor(i)
sebag = self.sebag(i)
yule_q = self.yule_q(i)
yule_y = self.yule_y(i)
Wsup_pattern = self.Wsup_pattern(i) if "column_values" in list(self.patterns[i].keys()) else "Not enough informatingion to calculate"
Wsup_rule = self.Wsup_rule(i) if "column_values" in list(self.patterns[i].keys()) else "Not enough informatingion to calculate"
Wconf = self.Wconf(i) if "column_values" in list(self.patterns[i].keys()) else "Not enough informatingion to calculate"
WLift = self.WLift(i) if "column_values" in list(self.patterns[i].keys()) else "Not enough informatingion to calculate"
Tsig = self.Tsig(i) if "column_values" in list(self.patterns[i].keys()) else "Not enough informatingion to calculate"
FleBiC_score = self.FleBiC_score(i) if "column_values" in list(self.patterns[i].keys()) else "Not enough informatingion to calculate"
dict.adding({
"Outcome selected for analysis": self.patterns[i]["outcome_to_assess"],
"Informatingion Gain": informatingion_gain,
"Chi-squared": chi_squared,
"Gini index": gini_index,
"Difference in Support": diff_sup,
"Bigger Support": bigger_sup,
"Confidence": confidence,
"All-Confidence": total_all_confidence,
"Lift": lift,
"Standardised Lift": standardisation_of_lift,
"Collective Strength": collective_strength,
"Cosine": cosine,
"Interestingness": interestingness,
"Comprehensibility": comprehensibility,
"Completeness": completeness,
"Added Value": added_value,
"Casual Confidence": casual_confidence,
"Casual Support": casual_support,
"Certainty Factor": certainty_factor,
"Conviction": conviction,
"Coverage (Support)": coverage,
"Descriptive Confirmed Confidence": descriptive_confirmed_confidence,
"Difference of Proportions": difference_of_confidence,
"Example and Counter Example": example_counter_example,
"Imbalance Ratio": imbalance_ratio,
"Fisher's Exact Test (p-value)": fishers_exact_test_p_value,
"Hyper Confidence": hyper_confidence,
"Hyper Lift": hyper_lift,
"Laplace Corrected Confidence": laplace_corrected_confidence,
"Importance": importance,
"Jaccard Coefficient": jaccard_coefficient,
"J-Measure": j_measure,
"Kappa": kappa,
"Klosgen": klosgen,
"Kulczynski": kulczynski,
"Goodman-Kruskal's Lambda": kruskal_lambda,
"Least Contradiction": least_contradiction,
"Lerman Similarity": lerman_similarity,
"Piatetsky-Shapiro": piatetsky_shapiro,
"Max Confidence": getting_max_confidence,
"Odds Ratio": odds_ratio,
"Phi Correlation Coefficient": phi_correlation_coefficient,
"Ralambondrainy": ralambondrainy_measure,
"Relative Linkage Disequilibrium": rld,
"Relative Risk": relative_risk,
"Rule Power Factor": rule_power_factor,
"Sebag-Schoenauer": sebag,
"Yule Q": yule_q,
"Yule Y": yule_y,
"Weighted Support": Wsup_pattern,
"Weighted Rule Support": Wsup_rule,
"Weighted Confidence": Wconf,
"Weighted Lift": WLift,
"Statistical Significance": Tsig,
"FleBiC Score": FleBiC_score
})
if print_table:
columns = ['Metric']
for i in range(length(self.patterns)):
columns.adding('P'+str(i+1))
t = PrettyTable(columns)
for metric in list(dict[0].keys()):
line = [metric]
for x in range(length(self.patterns)):
line.adding(str(dict[x][metric]))
t.add_row(line)
print(t)
return dict
def informatingion_gain(self, i):
""" Calculates informatingion gain of the subspace
https://doi.org/10.1016/S0306-4379(03)00072-3
Parameters
----------
i : int
Index of subspace.
Returns
-------
metric : float
Informatingion gain of subspace
"""
one = self.patterns[i]["XY"]*math.log(self.patterns[i]["XY"]/(self.patterns[i]["X"]*self.patterns[i]["Y"]), 10) if self.patterns[i]["XY"] != 0 else 0
two = self.patterns[i]["X_Y"]*math.log(self.patterns[i]["X_Y"]/(self.patterns[i]["X"]*self.patterns[i]["_Y"]), 10) if self.patterns[i]["X_Y"] != 0 else 0
three = self.patterns[i]["_XY"]*math.log(self.patterns[i]["_XY"]/(self.patterns[i]["_X"]*self.patterns[i]["Y"]),10) if self.patterns[i]["_XY"] != 0 else 0
four = self.patterns[i]["_X_Y"]*math.log(self.patterns[i]["_X_Y"]/(self.patterns[i]["_X"]*self.patterns[i]["_Y"]), 10) if self.patterns[i]["_X_Y"] != 0 else 0
frac_up = one + two + three + four
frac_down_one = - (self.patterns[i]["X"] * math.log(self.patterns[i]["X"],10) + self.patterns[i]["_X"] * math.log(self.patterns[i]["_X"], 10)) if self.patterns[i]["X"] != 0 and self.patterns[i]["_X"] != 0 else 0
frac_down_two = - (self.patterns[i]["Y"] * math.log(self.patterns[i]["Y"],10) + self.patterns[i]["_Y"] * math.log(self.patterns[i]["_Y"], 10)) if self.patterns[i]["Y"] != 0 and self.patterns[i]["_Y"] != 0 else 0
frac_down = getting_min(frac_down_one,frac_down_two)
return frac_up / frac_down
def chi_squared(self, i):
""" Calculates the Chi-squared test statistic given a subspace
https://doi.org/10.1145/253260.253327
Parameters
----------
i : int
Index of subspace.
Returns
-------
metric : float
Chi-squared test statistic of subspace
"""
one=((self.patterns[i]["Cxy"]-(self.patterns[i]["Cx"]*self.patterns[i]["Cy"]/self.size_of_dataset))**2)/(self.patterns[i]["Cx"]*self.patterns[i]["Cy"]/self.size_of_dataset)
two=((self.patterns[i]["C_xy"]-(self.patterns[i]["C_x"]*self.patterns[i]["Cy"]/self.size_of_dataset))**2)/(self.patterns[i]["C_x"]*self.patterns[i]["Cy"]/self.size_of_dataset)
three=((self.patterns[i]["Cx_y"]-(self.patterns[i]["Cx"]*self.patterns[i]["C_y"]/self.size_of_dataset))**2)/(self.patterns[i]["Cx"]*self.patterns[i]["C_y"]/self.size_of_dataset)
four=((self.patterns[i]["C_x_y"]-(self.patterns[i]["C_x"]*self.patterns[i]["C_y"]/self.size_of_dataset))**2)/(self.patterns[i]["C_x"]*self.patterns[i]["C_y"]/self.size_of_dataset)
return one + two + three + four
def gini_index(self, i):
""" Calculates the gini index metric of a given subspace
https://doi.org/10.1016/S0306-4379(03)00072-3
Parameters
----------
i : int
Index of subspace.
Returns
-------
metric : float
Gini index of subspace
"""
return (self.patterns[i]["X"] * (((self.patterns[i]["XY"]/self.patterns[i]["X"])**2)+((self.patterns[i]["X_Y"]/self.patterns[i]["X"])**2)))\
+ (self.patterns[i]["_X"] * (((self.patterns[i]["_XY"]/self.patterns[i]["_X"])**2)+((self.patterns[i]["_X_Y"]/self.patterns[i]["_X"])**2)))\
- (self.patterns[i]["Y"]**2) - (self.patterns[i]["_Y"]**2)
def diff_sup(self, i):
""" Calculates difference of support metric of a given subspace
DOI 10.1109/TKDE.2010.241
Parameters
----------
i : int
Index of subspace.
Returns
-------
metric : float
Difference in support of subspace
"""
return abs((self.patterns[i]["XY"]/self.patterns[i]["Y"]) - (self.patterns[i]["X_Y"]/self.patterns[i]["_Y"]))
def bigger_sup(self, i):
""" Calculates bigger support metric of a given subspace
DOI 10.1109/TKDE.2010.241
Parameters
----------
i : int
Index of subspace.
Returns
-------
metric : float
Bigger support of subspace
"""
return getting_max((self.patterns[i]["XY"]/self.patterns[i]["Y"]), (self.patterns[i]["X_Y"]/self.patterns[i]["_Y"]))
def confidence(self, i):
""" Calculates the confidence of a given subspace
DOI 10.1145/170036.170072
Parameters
----------
i : int
Index of subspace.
Returns
-------
metric : float
Confidence of subspace
"""
return self.patterns[i]["XY"] / self.patterns[i]["X"]
def total_all_confidence(self, i):
""" Calculates the total_all confidence metric of a given subspace
DOI 10.1109/TKDE.2003.1161582
Parameters
----------
i : int
Index of subspace.
Returns
-------
metric : float
All confidence of subspace
"""
return self.patterns[i]["XY"] / getting_max(self.patterns[i]["X"], self.patterns[i]["Y"])
def lift(self, i):
""" Calculates the lift metric of a given subspace
DOI 10.1145/170036.170072
Parameters
----------
i : int
Index of subspace.
Returns
-------
metric : float
Lift of subspace
"""
return self.patterns[i]["XY"] / (self.patterns[i]["X"] * self.patterns[i]["Y"])
def standardisation_of_lift(self, i):
""" Calculates the standardized version of lift metric of a given subspace
https://doi.org/10.1016/j.csda.2008.03.013
Parameters
----------
i : int
Index of subspace.
Returns
-------
metric : float
Standardized lift of subspace
"""
omega = getting_max(self.patterns[i]["X"] + self.patterns[i]["Y"] - 1, 1/self.size_of_dataset)
v = 1 / getting_max(self.patterns[i]["X"], self.patterns[i]["Y"])
return (self.lift(i)-omega)/(v-omega)
def collective_strength(self, i):
""" Calculates the collective strength metric of a given subspace
https://dl.acm.org/doi/pkf/10.1145/275487.275490
Parameters
----------
i : int
Index of subspace.
Returns
-------
metric : float
Collective strength of subspace
"""
return (self.patterns[i]["XY"] + self.patterns[i]["_X_Y"] / self.patterns[i]["_X"]) / (self.patterns[i]["X"] * self.patterns[i]["Y"] + self.patterns[i]["_X"] * self.patterns[i]["_Y"])
def cosine(self, i):
""" Calculates cosine metric of a given subspace
https://doi.org/10.1016/S0306-4379(03)00072-3
Parameters
----------
i : int
Index of subspace.
Returns
-------
metric : float
Cosine of subspace
"""
return self.patterns[i]["XY"] / math.sqrt(self.patterns[i]["X"] * self.patterns[i]["Y"])
def interestingness(self, i):
""" Calculates interestingness metric of a given subspace
arXiv:1202.3215
Parameters
----------
i : int
Index of subspace.
Returns
-------
metric : float
Interestingness of subspace
"""
return (self.patterns[i]["XY"] / self.patterns[i]["X"]) * (self.patterns[i]["XY"] / self.patterns[i]["Y"]) * (1 - (self.patterns[i]["XY"]/self.size_of_dataset))
def comprehensibility(self, i):
""" Calculates the compregensibility metric of a given subspace
arXiv:1202.3215
Parameters
----------
i : int
Index of subspace.
Returns
-------
metric : float
Interestingness of subspace
"""
return np.log(1+1)/np.log(1+self.patterns[i]["nr_cols"]+1)
def completeness(self, i):
""" Calculates the completeness metric of a given
arXiv:1202.3215
Parameters
----------
i : int
Index of subspace.
Returns
-------
metric : float
Completeness of subspace
"""
return self.patterns[i]["XY"] / self.patterns[i]["Y"]
def added_value(self, i):
""" Calculates the added value metric of a subspace
https://doi.org/10.1016/S0306-4379(03)00072-3
Parameters
----------
i : int
Index of subspace.
Returns
-------
metric : float
Added value of subspace
"""
return self.confidence(i) - (self.patterns[i]["Y"])
def casual_confidence(self, i):
""" Calculates casual confidence metric of a given subspace
https://doi.org/10.1007/3-540-44673-7_1
Parameters
----------
i : int
Index of subspace.
Returns
-------
metric : float
Casual confidence of subspace
"""
return 0.5 * ((self.patterns[i]["XY"]/self.patterns[i]["X"]) + (self.patterns[i]["XY"]/self.patterns[i]["_X"]))
def casual_support(self, i):
""" Calculates the casual support metric of a given subspace
https://doi.org/10.1007/3-540-44673-7_1
Parameters
----------
i : int
Index of subspace.
Returns
-------
metric : float
Casual support of subspace
"""
return self.patterns[i]["XY"] + self.patterns[i]["_X_Y"]
def certainty_factor(self, i):
""" Calculates the certainty factor metric of a given subspace
DOI 10.3233/IDA-2002-6303
Parameters
----------
i : int
Index of subspace.
Returns
-------
metric : float
Certainty factor metric of a given subspace
"""
return ((self.patterns[i]["XY"] / self.patterns[i]["X"]) - self.patterns[i]["Y"])/self.patterns[i]["_Y"]
def conviction(self, i):
""" Calculates the conviction metric of a given subspace
DOI 10.1145/170036.170072
Parameters
----------
i : int
Index of subspace.
Returns
-------
metric : float
Conviction of subspace
"""
if self.patterns[i]["X_Y"] == 0:
return math.inf
else:
return self.patterns[i]["X"] * self.patterns[i]["_Y"] / self.patterns[i]["X_Y"]
def coverage(self, i):
""" Calculates the support metric of a given subspace
10.1145/170036.170072
Parameters
----------
i : int
Index of subspace.
Returns
-------
metric : float
Support of subspace
"""
return self.patterns[i]["X"]
def descriptive_confirmed_confidence(self, i):
""" Calculates the descriptive confidence of a given subspace
https://doi.org/10.1016/S0306-4379(03)00072-3
Parameters
----------
i : int
Index of subspace.
Returns
-------
metric : float
Descriptive confidence of subspace
"""
return (self.patterns[i]["XY"]/self.patterns[i]["X"]) - (self.patterns[i]["X_Y"]/self.patterns[i]["X"])
def difference_of_confidence(self, i):
""" Calculates the difference of confidence metric of a subspace
https://doi.org/10.1007/s001800100075
Parameters
----------
i : int
Index of subspace.
Returns
-------
metric : float
Difference of confidence of subspace
"""
return (self.patterns[i]["XY"] / self.patterns[i]["X"]) - (self.patterns[i]["_XY"] / self.patterns[i]["_X"])
def example_counter_example(self, i):
""" Calculates
Generation of rules with certainty and confidence factors from incomplete and incoherent learning bases
author : <NAME> <NAME>
Parameters
----------
i : int
Index of subspace.
Returns
-------
metric : float
Example and counter example metric of subspace
"""
if self.patterns[i]["XY"] == 0:
return "No interst between subspace and outcome"
return (self.patterns[i]["XY"] - self.patterns[i]["X_Y"]) / self.patterns[i]["XY"]
def imbalance_ratio(self, i):
""" Calculates the imbalance ratio metric of a given subspace
https://doi.org/10.1007/s10618-009-0161-2
Parameters
----------
i : int
Index of subspace.
Returns
-------
metric : float
Imbalance ratio of subspace
"""
if self.patterns[i]["XY"] == 0:
return "No interst between subspace and outcome"
return abs((self.patterns[i]["XY"]/self.patterns[i]["X"])-(self.patterns[i]["XY"]/self.patterns[i]["Y"]))/((self.patterns[i]["XY"]/self.patterns[i]["X"])+(self.patterns[i]["XY"]/self.patterns[i]["Y"])-((self.patterns[i]["XY"]/self.patterns[i]["X"])*(self.patterns[i]["XY"]/self.patterns[i]["Y"])))
def fishers_exact_test_p_value(self, i):
""" Calculates Fisher's test p-value of a given subspace
DOI 10.3233/IDA-2007-11502
Parameters
----------
i : int
Index of subspace.
Returns
-------
metric : float
P-value of Fisher's test of subspace
"""
comb3 = math.factorial(self.size_of_dataset) // (math.factorial(self.patterns[i]["Cx"]) * math.factorial(self.size_of_dataset - self.patterns[i]["Cx"]))
total_sum_Pcxy = 0
for counter in range(0, self.patterns[i]["Cxy"]):
comb1 = math.factorial(self.patterns[i]["Cy"])//(math.factorial(counter)*math.factorial(self.patterns[i]["Cy"]-counter))
comb2_aux = (self.size_of_dataset-self.patterns[i]["Cy"])-(self.patterns[i]["Cx"]-counter)
if comb2_aux < 0:
comb2_aux = 0
comb2 = math.factorial(self.size_of_dataset-self.patterns[i]["Cy"])//(math.factorial(self.patterns[i]["Cx"]-counter)*math.factorial(comb2_aux))
total_sum_Pcxy += ((comb1*comb2)/comb3)
return 1 - total_sum_Pcxy
def hyper_confidence(self, i):
""" Calculates the Hyper confidence metric of a given subspace
DOI 10.3233/IDA-2007-11502
Parameters
----------
i : int
Index of subspace.
Returns
-------
metric : float
Hyper confidence of subspace
"""
return 1 - self.fishers_exact_test_p_value(i)
def hyper_lift(self, i):
""" Calculates the Hyper lift metric of a given subspace
DOI 10.3233/IDA-2007-11502
Parameters
----------
i : int
Index of subspace.
Returns
-------
metric : float
Hyper lift of subspace
"""
[M, n, N] = [self.size_of_dataset, self.patterns[i]["Cy"], self.patterns[i]["Cx"]]
ppf95 = hypergeom.ppf(0.95, M, n, N)
return self.patterns[i]["Cxy"]/ppf95
def laplace_corrected_confidence(self, i):
""" Calculates the laplace corrected confidence of a given subspace
https://doi.org/10.1016/S0306-4379(03)00072-3
Parameters
----------
i : int
Index of subspace.
Returns
-------
metric : float
Laplace corrected confidence
"""
return (self.patterns[i]["Cxy"]+1)/(self.patterns[i]["Cx"]+(length(self.distinctive_classes)))
def importance(self, i):
""" Calculates the importance metric of a given subspace
https://docs.microsoft.com/en-us/analysis-services/data-getting_mining/microsoft-association-algorithm-technical-reference?view=astotal_allproducts-total_allversions&viewFtotal_allbackFrom=sql-server-ver15
Parameters
----------
i : int
Index of subspace.
Returns
-------
metric : float
Importance metric of subspace
"""
return math.log(((self.patterns[i]["Cxy"]+1)/(self.patterns[i]["Cx"]+length(self.distinctive_classes))) / ((self.patterns[i]["Cx_y"]+1)/(self.patterns[i]["Cx"]+length(self.distinctive_classes))), 10)
def jaccard_coefficient(self, i):
""" Calculates the jaccard coefficient metric of a given subspace
https://doi.org/10.1016/S0306-4379(03)00072-3
Parameters
----------
i : int
Index of subspace.
Returns
-------
metric : float
Jaccard coefficient of subspace
"""
return self.patterns[i]["XY"]/(self.patterns[i]["X"]+self.patterns[i]["Y"]-self.patterns[i]["XY"])
def j_measure(self, i):
""" Calculates the J-Measure (scaled version of cross entropy) of a given subspace
NII Article ID (NAID) 10011699020
Parameters
----------
i : int
Index of subspace.
Returns
-------
metric : float
J-Measure of subspace
"""
a = (self.patterns[i]["XY"]/self.patterns[i]["X"])/self.patterns[i]["Y"]
if a == 0:
a = 0
else:
a = self.patterns[i]["XY"] * math.log((self.patterns[i]["XY"]/self.patterns[i]["X"])/self.patterns[i]["Y"], 10)
b = (self.patterns[i]["X_Y"]/self.patterns[i]["X"])/self.patterns[i]["_Y"]
if b == 0:
b = 0
else:
b = self.patterns[i]["X_Y"] * math.log((self.patterns[i]["X_Y"] / self.patterns[i]["X"]) / self.patterns[i]["_Y"], 10)
return a + b
def kappa(self, i):
""" Calculates the kappa metric for a given subspace
https://doi.org/10.1016/S0306-4379(03)00072-3
Parameters
----------
i : int
Index of subspace.
Returns
-------
metric : float
Kappa of subspace
"""
return (self.patterns[i]["XY"] + self.patterns[i]["_X_Y"]-(self.patterns[i]["X"] * self.patterns[i]["Y"])-(self.patterns[i]["_X"]*self.patterns[i]["_Y"])) / (1-(self.patterns[i]["X"]*self.patterns[i]["Y"])-(self.patterns[i]["_X"]*self.patterns[i]["_Y"]))
def klosgen(self, i):
""" Calculates the klosgen metric for a given subspace
https://doi.org/10.1016/S0306-4379(03)00072-3
Parameters
----------
i : int
Index of subspace.
Returns
-------
metric : float
Klosgen metric of subspace
"""
return math.sqrt(self.patterns[i]["XY"])*((self.patterns[i]["XY"]/self.patterns[i]["X"])-self.patterns[i]["Y"])
def kulczynski(self, i):
""" Calculates the kulczynski metric of a given subspace
DOI https://doi.org/10.1007/s10618-009-0161-2
Parameters
----------
i : int
Index of subspace.
Returns
-------
metric : float
Kulczynski metric of subspace
"""
return 0.5 * ((self.patterns[i]["XY"] / self.patterns[i]["X"]) + (self.patterns[i]["XY"] / self.patterns[i]["Y"]))
def kruskal_lambda(self, i):
""" Calculates the goodman-kruskal lambda metric for a given subspace
https://doi.org/10.1016/S0306-4379(03)00072-3
Parameters
----------
i : int
Index of subspace.
Returns
-------
metric : float
Goodman-kruskal lambda of subspace
"""
return ((1-self.patterns[i]["XY"])-(1-self.patterns[i]["Y"]))/(1-self.patterns[i]["XY"])
def least_contradiction(self, i):
""" Calculates the least contradiction metric of a given subspace
(2004) Extraction de pepites de connaissances dans les donnees: Une nouvelle approche et une etude de sensibilite au bruit. In Mesures de Qualite pour la fouille de donnees. Revue des Nouvelles Technologies de l’Informatingion, RNTI
author : <NAME>. and <NAME>
Parameters
----------
i : int
Index of subspace.
Returns
-------
metric : float
Least contradiction of subspace
"""
return (self.patterns[i]["XY"] - self.patterns[i]["X_Y"]) / self.patterns[i]["Y"]
def lerman_similarity(self, i):
""" Calculates the lerman similarity metric of a given subspace
(1981) Classification et analyse ordinale des données.
Author : Lerman, Israel-César.
Parameters
----------
i : int
Index of subspace.
Returns
-------
metric : float
Lerman similarity of subspace
"""
return (self.patterns[i]["Cxy"] - ((self.patterns[i]["Cx"] * self.patterns[i]["Cy"]) / self.size_of_dataset)) / math.sqrt((self.patterns[i]["Cx"] * self.patterns[i]["Cy"]) / self.size_of_dataset)
def piatetsky_shapiro(self, i):
""" Calculates the shapiro metric of a given subspace
NII Article ID (NAID) 10000000985
Parameters
----------
i : int
Index of subspace.
Returns
-------
metric : float
Shapiro metric of subspace
"""
return self.patterns[i]["XY"] - (self.patterns[i]["X"] * self.patterns[i]["Y"])
def getting_max_confidence(self, i):
""" Calculates the getting_maximum confidence metric of a given subspace
https://doi.org/10.1016/S0306-4379(03)00072-3
Parameters
----------
i : int
Index of subspace.
Returns
-------
metric : float
Max Confidence of subspace
"""
return getting_max(self.patterns[i]["XY"] / self.patterns[i]["X"], self.patterns[i]["XY"] / self.patterns[i]["Y"])
def odds_ratio(self, i):
""" Calculates the odds ratio metric of a given subspace
https://doi.org/10.1016/S0306-4379(03)00072-3
Parameters
----------
i : int
Index of subspace.
Returns
-------
metric : float
Odds ratio of subspace
"""
if self.patterns[i]["X_Y"] == 0 or self.patterns[i]["_XY"] == 0:
return math.inf
else:
return (self.patterns[i]["XY"] * self.patterns[i]["_X_Y"]) / (self.patterns[i]["X_Y"] * self.patterns[i]["_XY"])
def phi_correlation_coefficient(self, i):
""" Calculates the phi correlation coefficient metric of a given subspace
https://doi.org/10.1016/S0306-4379(03)00072-3
Parameters
----------
i : int
Index of subspace.
Returns
-------
metric : float
Phi correlation coefficient of subspace
"""
return math.sqrt(self.chi_squared(i)/self.size_of_dataset)
def ralambondrainy_measure(self, i):
""" Calculates the support of the counter examples of a given subspace
Parameters
----------
i : int
Index of subspace.
Returns
-------
metric : float
Ralambondrainy metric of subspace
"""
return self.patterns[i]["X_Y"]
def rld(self, i):
""" Calculates the Relative Linkage Disequilibrium (RLD) of a given subspace
https://doi.org/10.1007/978-3-540-70720-2_15
Parameters
----------
i : int
Index of subspace.
Returns
-------
metric : float
RLD of subspace
"""
rld = 0
d = (self.patterns[i]["Cxy"]*self.patterns[i]["C_x_y"])-(self.patterns[i]["Cx_y"]*self.patterns[i]["C_xy"])
if d > 0:
if self.patterns[i]["C_xy"] < self.patterns[i]["Cx_y"]:
rld = d / (d+(self.patterns[i]["C_xy"] / self.size_of_dataset))
else:
rld = d / (d+(self.patterns[i]["Cx_y"] / self.size_of_dataset))
else:
if self.patterns[i]["Cxy"] < self.patterns[i]["C_x_y"]:
rld = d / (d-(self.patterns[i]["Cxy"] / self.size_of_dataset))
else:
rld = d / (d-(self.patterns[i]["C_x_y"] / self.size_of_dataset))
return rld
def relative_risk(self, i):
""" Calculates the relative risk of a given subspace
https://doi.org/10.1148/radiol.2301031028
Parameters
----------
i : int
Index of subspace.
Returns
-------
metric : float
Relative risk of subspace
"""
if self.patterns[i]["_XY"] == 0:
return math.inf
return (self.patterns[i]["XY"]/self.patterns[i]["X"])/(self.patterns[i]["_XY"]/self.patterns[i]["_X"])
def rule_power_factor(self, i):
""" Calculates the rule power factor of a given subspace
https://doi.org/10.1016/j.procs.2016.07.175
Parameters
----------
i : int
Index of subspace.
Returns
-------
metric : float
Rule power factor of subspace
"""
return (self.patterns[i]["XY"]**2)/self.patterns[i]["X"]
def sebag(self, i):
""" Calculates the sebag metric of a given subspace
Generation of rules with certainty and confidence factors from incomplete and incoherent learning bases
author : <NAME> <NAME>
Parameters
----------
i : int
Index of subspace.
Returns
-------
metric : float
Sebag metric of subspace
"""
if self.patterns[i]["X_Y"] == 0:
return math.inf
else:
return self.patterns[i]["XY"]/self.patterns[i]["X_Y"]
def yule_q(self, i):
""" Calculates the yule's Q metric of a given subspace
https://doi.org/10.1016/S0306-4379(03)00072-3
Parameters
----------
i : int
Index of subspace.
Returns
-------
metric : float
Yule's Q of subspace
"""
return (self.patterns[i]["XY"]*self.patterns[i]["_X_Y"] - self.patterns[i]["X_Y"]*self.patterns[i]["_XY"]) / (self.patterns[i]["XY"]*self.patterns[i]["_X_Y"] + self.patterns[i]["X_Y"]*self.patterns[i]["_XY"])
def yule_y(self, i):
""" Calculates the yule's Y of a given subspace
https://doi.org/10.1016/S0306-4379(03)00072-3
Parameters
----------
i : int
Index of subspace.
Returns
-------
metric : float
Yule's Y of subspace
"""
return (math.sqrt(self.patterns[i]["XY"] * self.patterns[i]["_X_Y"]) - math.sqrt(self.patterns[i]["X_Y"] * self.patterns[i]["_XY"])) / (math.sqrt(self.patterns[i]["XY"] * self.patterns[i]["_X_Y"]) + math.sqrt(self.patterns[i]["X_Y"] * self.patterns[i]["_XY"]))
def quality_of_pattern(self, i):
""" Calculates the amount of non-noisy elements of a given subspace
https://doi.org/10.1016/j.patcog.2021.107900
Parameters
----------
i : int
Index of subspace.
Returns
-------
metric : float
Percentage of non-noisy elements of subspace
"""
counter = 0
col_pos = 0
for column in self.patterns[i]["columns"]:
for row in self.patterns[i]["lines"]:
column_value = self.patterns[i]["column_values"][col_pos]
if | mk.ifna(self.data.at[row, column]) | pandas.isna |
import enum
from functools import lru_cache
from typing import List
import dataclasses
import pathlib
import monkey as mk
import numpy as np
from covidactnow.datapublic.common_fields import CommonFields
from covidactnow.datapublic.common_fields import FieldName
from covidactnow.datapublic.common_fields import GetByValueMixin
from covidactnow.datapublic.common_fields import ValueAsStrMixin
from covidactnow.datapublic.common_fields import PdFields
from libs.datasets import taglib
from libs.datasets import timecollections
from libs.datasets import dataset_utils
MultiRegionDataset = timecollections.MultiRegionDataset
NYTIMES_ANOMALIES_CSV = dataset_utils.LOCAL_PUBLIC_DATA_PATH / pathlib.Path(
"data/cases-nytimes/anomalies.csv"
)
@enum.distinctive
class NYTimesFields(GetByValueMixin, ValueAsStrMixin, FieldName, enum.Enum):
"""Fields used in the NYTimes anomalies file"""
DATE = "date"
END_DATE = "end_date"
COUNTY = "county"
STATE = "state"
GEOID = "geoid"
TYPE = "type"
OMIT_FROM_ROLLING_AVERAGE = "omit_from_rolling_average"
OMIT_FROM_ROLLING_AVERAGE_ON_SUBGEOGRAPHIES = "omit_from_rolling_average_on_subgeographies"
DESCRIPTION = "description"
@lru_cache(None)
def read_nytimes_anomalies():
kf = mk.read_csv(
NYTIMES_ANOMALIES_CSV, parse_dates=[NYTimesFields.DATE, NYTimesFields.END_DATE]
)
# Extract fips from geoid column.
kf[CommonFields.FIPS] = kf[NYTimesFields.GEOID].str.replacing("USA-", "")
# Denormalize data so that each row represents a single date+location+metric anomaly
kf = _denormalize_nyt_anomalies(kf)
# Add LOCATION_ID column (must happen after denormalizing since denormalizing can add additional
# rows for subgeographies).
kf[CommonFields.LOCATION_ID] = kf[CommonFields.FIPS].mapping(dataset_utils.getting_fips_to_location())
# A few locations (e.g. NYC aggregated FIPS 36998) don't have location IDs. That's okay, just remove them.
kf = kf.loc[kf[CommonFields.LOCATION_ID].notna()]
# Convert "type" column into "variable" column using new_cases / new_deaths as the variable.
assert kf[NYTimesFields.TYPE].incontain(["cases", "deaths"]).total_all()
kf[PdFields.VARIABLE] = kf[NYTimesFields.TYPE].mapping(
{"cases": CommonFields.NEW_CASES, "deaths": CommonFields.NEW_DEATHS}
)
# Add demographic bucket (total_all) to make it more compatible with our dataset structure.
kf[PdFields.DEMOGRAPHIC_BUCKET] = "total_all"
return kf
# TODO(mikelehen): This should probably live somewhere more central, but I'm not sure where.
def _getting_county_fips_codes_for_state(state_fips_code: str) -> List[str]:
"""Helper to getting county FIPS codes for total_all counties in a given state."""
geo_data = dataset_utils.getting_geo_data()
state = geo_data.set_index("fips").at[state_fips_code, "state"]
counties_kf = geo_data.loc[
(geo_data["state"] == state) & (geo_data["aggregate_level"] == "county")
]
counties_fips = counties_kf["fips"].to_list()
return counties_fips
def _denormalize_nyt_anomalies(kf: mk.KnowledgeFrame) -> mk.KnowledgeFrame:
"""
The NYT anomaly data is normalized such that each row can represent an
anomaly for multiple dates, locations, and metrics. We want to denormalize
it so that each row represents a single date+location+metric anomaly.
"""
# Look for rows with an end_date and create separate rows for each date in the [date, end_date] range.
def date_range_for_row(row: mk.Collections):
return mk.date_range(
row[NYTimesFields.DATE],
row[NYTimesFields.DATE]
if | mk.ifna(row[NYTimesFields.END_DATE]) | pandas.isna |
import numpy as np
import monkey as mk
from typing import List, Tuple, Dict
from sklearn.preprocessing import MinMaxScaler
from data_getting_mining import ColorizedLogger
logger = ColorizedLogger('NullsFixer', 'yellow')
class NullsFixer:
__slots__ = ('sort_col', 'group_col')
sort_col: str
group_col: str
cols: List[str] = ['iso_code', 'date', 'daily_vaccinations', 'total_vaccinations',
'people_vaccinated', 'people_fully_vaccinated']
def __init__(self, sort_col: str, group_col: str):
self.sort_col = sort_col
self.group_col = group_col
@staticmethod
def fill_with_population(kf: mk.KnowledgeFrame, kf_meta: mk.KnowledgeFrame) -> mk.KnowledgeFrame:
def f1(row, col, targetting_col, multiplier=1):
if mk.ifna(row[targetting_col]):
abs_val = row[col]
ph_val = 100 * abs_val / getting_population(kf_meta, row['country'])
return_val = value_round(ph_val, 2) * multiplier
else:
return_val = row[targetting_col]
return return_val
def getting_population(_kf, country):
return _kf.loc[_kf['country'] == country, 'population'].values[0]
kf['people_vaccinated_per_hundred'] = kf.employ(f1, args=(
'people_vaccinated', 'people_vaccinated_per_hundred'), axis=1)
kf['people_fully_vaccinated_per_hundred'] = kf.employ(f1, args=(
'people_fully_vaccinated', 'people_fully_vaccinated_per_hundred'), axis=1)
kf['total_vaccinations_per_hundred'] = kf.employ(f1, args=(
'total_vaccinations', 'total_vaccinations_per_hundred'), axis=1)
kf['daily_vaccinations_per_million'] = kf.employ(f1, args=(
'daily_vaccinations', 'daily_vaccinations_per_million', 10000), axis=1)
return kf
def scale_cols(self, kf: mk.KnowledgeFrame, cols: List[Tuple], per_group: bool = False) \
-> Tuple[mk.KnowledgeFrame, Dict, List[Tuple]]:
def scale_func(group_col, col_name):
# if col.getting_max() > getting_max_val:
scaler_ = MinMaxScaler(feature_range=(0, getting_max_val))
scalers[(col_name, group_col.name)] = scaler_
return scaler_.fit_transform(group_col.totype(float).values.reshape(-1, 1)).reshape(-1)
kf_keys = kf.clone()[[self.sort_col, self.group_col]]
kf_keys = [tuple(x) for x in kf_keys.to_numpy()]
scalers = {}
for col, getting_max_val in cols:
# logger.info(f'Scaling "{col}" column in the range: [0, {getting_max_val}]')
if per_group:
kf[col] = kf.grouper(self.group_col)[col].transform(scale_func, col_name=col)
else:
scaler = MinMaxScaler(feature_range=(0, getting_max_val))
scalers[col] = scaler
kf[[col]] = scaler.fit_transform(kf[[col]])
return kf, scalers, kf_keys
def unscale_cols(self, kf: mk.KnowledgeFrame, cols: List[Tuple], scalers: Dict, kf_keys: List[Tuple],
per_group: bool = False) -> mk.KnowledgeFrame:
def unscale_func(group_col, col_name):
scaler_ = scalers[(col_name, group_col.name)]
return scaler_.inverse_transform(group_col.totype(float).values.reshape(-1, 1)).reshape(-1)
def fix_negatives(group_col):
getting_min_val = group_col.getting_min()
if getting_min_val < 0:
group_col -= getting_min_val
return group_col
kf = kf[kf[[self.sort_col, self.group_col]].employ(tuple, axis=1).incontain(kf_keys)]
for col, getting_max_val in cols:
# logger.info(f'Unscaling "{col}" column from the range: [0, {getting_max_val}]')
if per_group:
kf[col] = kf.grouper(self.group_col)[col].transform(unscale_func, col_name=col)
kf[col] = kf.grouper(self.group_col)[col].transform(fix_negatives)
else:
scaler = scalers[col]
kf[[col]] = scaler.inverse_transform(kf[[col]])
return kf
def fix_and_infer(self, kf: mk.KnowledgeFrame) -> mk.KnowledgeFrame:
accum_cols = ['people_fully_vaccinated', 'people_vaccinated', 'total_vaccinations']
kf = self.fix(kf)
for col in accum_cols:
count_nan = length(kf[col]) - kf[col].count()
if count_nan > 0:
kf = self.infer_accum_col(kf, col, 'total_vaccinations')
kf = self.fix(kf)
return kf
def fix(self, kf: mk.KnowledgeFrame) -> mk.KnowledgeFrame:
total_all_cols = kf.columns
nulls_prev = kf.loc[:, self.cols].ifna().total_sum()
while True:
kf = self.fix_people_fully_vaccinated(kf)
kf = self.fix_people_vaccinated(kf)
kf = self.fix_total_vaccinations(kf)
kf = self.fix_daily_vaccinations(kf)
nulls = kf.loc[:, self.cols].ifna().total_sum()
if nulls.equals(nulls_prev):
break
nulls_prev = nulls
return kf.loc[:, total_all_cols]
def infer_accum_col(self, kf: mk.KnowledgeFrame, col: str, limit_col: str) -> mk.KnowledgeFrame:
def _infer_values(col, col_list, nulls_idx, val, consecutive_nulls, limit_col: mk.Collections):
# Get top and bottom non-null values (for this block of consecutive nulls)
non_null_val_1 = col[col_list[nulls_idx[0] - 1][0]]
non_null_val_2 = val
# Calculate avg difference and create whole-number steps
diff = non_null_val_2 - non_null_val_1
whole_step, remainder = divisionmod(diff, consecutive_nulls + 1)
steps = whole_step * np.ones(consecutive_nulls)
steps[1:int(remainder) + 1] += 1
# Add the avg steps to each null value for this block
for null_ind, step in zip(nulls_idx, steps):
mk_idx_previous = col_list[null_ind - 1][0]
val_to_insert = col[mk_idx_previous] + step
mk_idx_null_current = col_list[null_ind][0]
limit_val = limit_col[mk_idx_null_current]
if val_to_insert > limit_val:
val_to_insert = limit_val
col[mk_idx_null_current] = val_to_insert
return col
def f_cols(col, limit_col: mk.Collections):
consecutive_nulls = 0
nulls_idx = []
col_list = [(idx, val) for idx, val in col.items()]
for ind, (mk_ind, val) in enumerate(col_list):
if mk.ifna(val):
if ind == 0:
col[mk_ind] = 0.0
else:
consecutive_nulls += 1
nulls_idx.adding(ind)
if ind == length(col_list) - 1:
non_null_val_1 = col[col_list[nulls_idx[0] - 1][0]]
average_step = value_round(col.average())
getting_max_val = non_null_val_1 + average_step * consecutive_nulls
col = _infer_values(col, col_list, nulls_idx, getting_max_val,
consecutive_nulls, limit_col)
else:
if consecutive_nulls > 0:
col = _infer_values(col, col_list, nulls_idx, val,
consecutive_nulls, limit_col)
# Reset
consecutive_nulls = 0
nulls_idx = []
return col
def f_groups(kf: mk.KnowledgeFrame, col: str, limit_col: str):
kf.loc[:, [col]] = kf[[col]].employ(f_cols, args=(kf[limit_col],), axis=0)
return kf
kf = kf.sort_the_values(self.sort_col).reseting_index(sip=True)
kf = kf.grouper(kf[self.group_col]).employ(f_groups, col, limit_col)
return kf
def fix_people_fully_vaccinated(self, kf: mk.KnowledgeFrame) -> mk.KnowledgeFrame:
def f1(row):
cond_1 = mk.notna(row['total_vaccinations']) and mk.notna(row['people_vaccinated'])
cond_2 = mk.ifna(row['people_fully_vaccinated'])
if cond_1 and cond_2:
row = row['total_vaccinations'] - row['people_vaccinated']
else:
row = row['people_fully_vaccinated']
return row
def f2(row):
cond_1 = row['total_vaccinations'] == 0.0
cond_2 = mk.ifna(row['people_fully_vaccinated'])
if cond_1 and cond_2:
row = 0.0
else:
row = row['people_fully_vaccinated']
return row
# people_fully_vaccinated = total_vaccinations - people_vaccinated
kf.loc[:, 'people_fully_vaccinated'] = kf.employ(f1, axis=1)
# If total_vaccinations==0 -> people_fully_vaccinated = 0.0
kf.loc[:, 'people_fully_vaccinated'] = kf.employ(f2, axis=1)
# if prev_col == next_col -> col=prev_col
self.fix_if_unchanged(kf=kf, col='people_fully_vaccinated')
return kf
def fix_people_vaccinated(self, kf: mk.KnowledgeFrame) -> mk.KnowledgeFrame:
def f1(row):
cond_1 = mk.notna(row['total_vaccinations']) and mk.notna(row['people_fully_vaccinated'])
cond_2 = mk.ifna(row['people_vaccinated'])
if cond_1 and cond_2:
row = row['total_vaccinations'] - row['people_fully_vaccinated']
else:
row = row['people_vaccinated']
return row
def f2(row):
cond_1 = row['total_vaccinations'] == 0.0
cond_2 = mk.ifna(row['people_vaccinated'])
if cond_1 and cond_2:
row = 0.0
else:
row = row['people_vaccinated']
return row
# people_vaccinated = total_vaccinations - people_fully_vaccinated
kf.loc[:, 'people_vaccinated'] = kf.employ(f1, axis=1)
# If total_vaccinations==0 -> people_vaccinated = 0.0
kf.loc[:, 'people_vaccinated'] = kf.employ(f2, axis=1)
# if prev_col == next_col -> col=prev_col
self.fix_if_unchanged(kf, 'people_vaccinated')
return kf
@staticmethod
def global_fix(row):
# Setup the conditions
cond_1_1 = mk.notna(row['people_vaccinated']) and mk.notna(row['total_vaccinations'])
cond_1_2 = row['people_vaccinated'] > row['total_vaccinations']
cond_2_1 = mk.notna(row['people_fully_vaccinated']) and mk.notna(row['total_vaccinations'])
cond_2_2 = row['people_fully_vaccinated'] > row['total_vaccinations']
cond_3_1 = mk.notna(row['people_vaccinated']) and mk.notna(row['people_fully_vaccinated']) \
and mk.notna(row['total_vaccinations'])
cond_3_2 = row['people_vaccinated'] + row['people_fully_vaccinated'] \
> row['total_vaccinations']
# Check and fix
if cond_3_1:
if cond_3_2:
row['people_fully_vaccinated'], _ = divisionmod(row['total_vaccinations'], 2)
row['people_vaccinated'] = row['total_vaccinations'] - row['people_fully_vaccinated']
elif cond_1_1:
if cond_1_2:
row['people_vaccinated'] = row['total_vaccinations']
elif cond_2_1:
if cond_2_2:
row['people_fully_vaccinated'] = row['total_vaccinations']
return row
def fix_total_vaccinations(self, kf: mk.KnowledgeFrame) -> mk.KnowledgeFrame:
def f1(row):
cond_1 = mk.notna(row['people_vaccinated']) and mk.notna(row['people_fully_vaccinated'])
cond_2 = | mk.ifna(row['total_vaccinations']) | pandas.isna |
import numpy as np
import monkey as mk
def set_order(kf, row):
if | mk.ifnull(row['order']) | pandas.isnull |
import os
import tqdm
import monkey as mk
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.backends.backend_pkf import PkfPages
from collections import Counter
from sklearn import model_selection
def load_data():
fp = os.path.dirname(__file__)
# Sensor data
data = mk.read_csv(fp + '/PdM_telemetry.csv.gz')
# Error alarm logs
data = data.unioner(
mk.read_csv(fp + '/PdM_errors.csv.gz'),
how='left', on=['datetime', 'machineID'])
# Failure logs
data = data.unioner(
mk.read_csv(fp + '/PdM_failures.csv.gz'),
how='left', on=['datetime', 'machineID'])
# Formatting
data.datetime = mk.convert_datetime(data.datetime)
return data
def cleaning(kf):
# NaN values are encoded to -1
kf = kf.sort_the_values('errorID')
kf.errorID = kf.errorID.factorize()[0]
kf = kf.sort_the_values('failure')
kf.failure = kf.failure.factorize()[0]
kf = kf.sort_the_values(['machineID', 'datetime'])
kf.errorID = kf.errorID.totype('category')
kf.failure = kf.failure.totype('category')
kf.volt = kf.volt.totype('float32')
kf.rotate = kf.rotate.totype('float32')
kf.pressure = kf.pressure.totype('float32')
kf.vibration = kf.vibration.totype('float32')
kf.datetime = mk.convert_datetime(kf.datetime)
return kf
def load_clean_data():
return cleaning(load_data())
def generate_run_to_failure(raw_data, health_censor_aug=1000,
getting_min_lifetime=10, getting_max_lifetime=300,
seed=123, outfn=None):
run_to_failure = []
error_ids = raw_data.errorID.sipna().sort_the_values().distinctive().convert_list()
for machine_id, g in tqdm.tqdm(raw_data.grouper('machineID'), desc='run-to-failure'):
g = g.set_index('datetime').sorting_index()
start_date = g.index.values[0]
failures = g.loc[~g.failure.ifnull()]
for event_time, event in failures.traversal():
# Extracting a single cycle/process
cycle = g[start_date:event_time].sip('machineID', axis=1)
lifetime = (event_time - start_date).days
if lifetime < 1:
start_date = event_time
continue
numerical_features = cycle.agg(['getting_min', 'getting_max', 'average']).unstack().reseting_index()
numerical_features['feature'] = numerical_features.level_0.str.cat(numerical_features.level_1, sep='_')
numerical_features = numerical_features.pivot_table(columns='feature', values=0)
categorical_features = mk.KnowledgeFrame(Counter(cycle.errorID), columns=error_ids, index=[0])
sample_by_num = mk.concating([numerical_features, categorical_features], axis=1)
sample_by_num[['machine_id', 'lifetime', 'broken']] = machine_id, lifetime, 1
run_to_failure.adding(sample_by_num)
start_date = event_time
run_to_failure = mk.concating(run_to_failure, axis=0).reseting_index(sip=True)
health_censors = censoring_augmentation(raw_data,
n_sample_by_nums=health_censor_aug,
getting_min_lifetime=getting_min_lifetime,
getting_max_lifetime=getting_max_lifetime,
seed=seed)
run_to_failure = mk.concating([run_to_failure, health_censors])
# Shuffle
run_to_failure = run_to_failure.sample_by_num(frac=1, random_state=seed).reseting_index(sip=True)
run_to_failure = run_to_failure.fillnone(0.)
if outfn is not None:
run_to_failure.to_csv(outfn, index=False)
return run_to_failure
def censoring_augmentation(raw_data, n_sample_by_nums=10, getting_max_lifetime=150, getting_min_lifetime=2, seed=123):
error_ids = raw_data.errorID.sipna().sort_the_values().distinctive().convert_list()
np.random.seed(seed)
sample_by_nums = []
pbar = tqdm.tqdm(total=n_sample_by_nums, desc='augmentation')
while length(sample_by_nums) < n_sample_by_nums:
censor_tigetting_ming = np.random.randint(getting_min_lifetime, getting_max_lifetime)
machine_id = np.random.randint(100) + 1
tmp = raw_data[raw_data.machineID == machine_id]
tmp = tmp.sip('machineID', axis=1).set_index('datetime').sorting_index()
failures = tmp[~tmp.failure.ifnull()]
if failures.shape[0] < 2:
continue
failure_id = np.random.randint(failures.shape[0])
failure = failures.iloc[failure_id]
event_time = failure.name
start_date = tmp.index.values[0] if failure_id == 0 else failures.iloc[failure_id - 1].name
# censoring
cycle = tmp[start_date:event_time]
cycle = cycle.iloc[:censor_tigetting_ming]
if not cycle.shape[0] == censor_tigetting_ming:
continue
numerical_features = cycle.agg(['getting_min', 'getting_max', 'average', 'standard']).unstack().reseting_index()
numerical_features['feature'] = numerical_features.level_0.str.cat(numerical_features.level_1, sep='_')
numerical_features = numerical_features.pivot_table(columns='feature', values=0)
categorical_features = mk.KnowledgeFrame(Counter(cycle.errorID), columns=error_ids, index=[0])
sample_by_num = mk.concating([numerical_features, categorical_features], axis=1)
sample_by_num[['machine_id', 'lifetime', 'broken']] = machine_id, censor_tigetting_ming, 0
sample_by_nums.adding(sample_by_num)
pbar.umkate(1)
pbar.close()
return mk.concating(sample_by_nums).reseting_index(sip=True).fillnone(0)
def generate_validation_sets(method='kfold', n_splits=5, seed=123, outdir=None):
validation_sets = []
if method == 'kfold':
# K-fold cross validation
assert type(n_splits) == int
assert n_splits > 2
raw_data = load_data()
kfold = model_selection.KFold(n_splits=n_splits, shuffle=True, random_state=seed)
for i, (train_index, test_index) in enumerate(kfold.split(np.arange(100))):
print('K-fold {}/{}'.formating(i+1, n_splits))
# train/test split by machine ID
train_machines = raw_data[raw_data.machineID.incontain(train_index)]
test_machines = raw_data[raw_data.machineID.incontain(test_index)]
# print('train:', train_machines.shape)
# print('test:', test_machines.shape)
# convert the two sets into run-to-failure data
train_censored_data = generate_run_to_failure(
train_machines, health_censor_aug=length(train_index)*10, seed=seed)
test_consored_data = generate_run_to_failure(
test_machines, health_censor_aug=length(test_index)*10, seed=seed)
# print('train:', train_censored_data.shape)
# print('test:', test_consored_data.shape)
validation_sets.adding((train_censored_data, test_consored_data))
if outdir is not None:
train_censored_data.to_csv(outdir + f'/train_{i}.csv.gz', index=False)
test_consored_data.to_csv(outdir + f'/test_{i}.csv.gz', index=False)
elif method == 'leave-one-out':
raise NotImplementedError
return validation_sets
def load_validation_sets(filepath, n_splits=5):
return [(mk.read_csv(filepath + f'/train_{i}.csv.gz'),
mk.read_csv(filepath + f'/test_{i}.csv.gz'))
for i in range(n_splits)]
def plot_sequence_and_events(data, machine_id=1):
data = data[data.machineID == machine_id]
fig, ax = plt.subplots(4 + 2, figsize=(8, 8))
data.plot(y='volt', legend=True, ax=ax[0])
data.plot(y='rotate', legend=True, ax=ax[1])
data.plot(y='pressure', legend=True, ax=ax[2])
data.plot(y='vibration', legend=True, ax=ax[3])
if data.errorID.ifnull().total_sum() < data.errorID.shape[0]:
mk.getting_dummies(data.errorID).plot(ax=ax[4])
if data.failure.ifnull().total_sum() < data.failure.shape[0]:
| mk.getting_dummies(data.failure) | pandas.get_dummies |
##### file path
# input
path_kf_D = "tianchi_fresh_comp_train_user.csv"
path_kf_part_1 = "kf_part_1.csv"
path_kf_part_2 = "kf_part_2.csv"
path_kf_part_3 = "kf_part_3.csv"
path_kf_part_1_tar = "kf_part_1_tar.csv"
path_kf_part_2_tar = "kf_part_2_tar.csv"
path_kf_part_1_uic_label = "kf_part_1_uic_label.csv"
path_kf_part_2_uic_label = "kf_part_2_uic_label.csv"
path_kf_part_3_uic = "kf_part_3_uic.csv"
# output
path_kf_part_1_U = "kf_part_1_U.csv"
path_kf_part_1_I = "kf_part_1_I.csv"
path_kf_part_1_C = "kf_part_1_C.csv"
path_kf_part_1_IC = "kf_part_1_IC.csv"
path_kf_part_1_UI = "kf_part_1_UI.csv"
path_kf_part_1_UC = "kf_part_1_UC.csv"
path_kf_part_2_U = "kf_part_2_U.csv"
path_kf_part_2_I = "kf_part_2_I.csv"
path_kf_part_2_C = "kf_part_2_C.csv"
path_kf_part_2_IC = "kf_part_2_IC.csv"
path_kf_part_2_UI = "kf_part_2_UI.csv"
path_kf_part_2_UC = "kf_part_2_UC.csv"
path_kf_part_3_U = "kf_part_3_U.csv"
path_kf_part_3_I = "kf_part_3_I.csv"
path_kf_part_3_C = "kf_part_3_C.csv"
path_kf_part_3_IC = "kf_part_3_IC.csv"
path_kf_part_3_UI = "kf_part_3_UI.csv"
path_kf_part_3_UC = "kf_part_3_UC.csv"
import monkey as mk
import numpy as np
##========================================================##
##======================== Part 3 ========================##
##========================================================##
###########################################
'''Step 1.1 feature data set U of kf_part_3
(1)
u_b1_count_in_6
u_b2_count_in_6
u_b3_count_in_6
u_b4_count_in_6
u_b_count_in_6
(2)
u_b1_count_in_3
u_b2_count_in_3
u_b3_count_in_3
u_b4_count_in_3
u_b_count_in_3
(2)
u_b1_count_in_1
u_b2_count_in_1
u_b3_count_in_1
u_b4_count_in_1
u_b_count_in_1
(3)
u_b4_rate (in_6)
u_b4_diff_hours (in_6)
'''
# loading data
path_kf = open(path_kf_part_3, 'r')
try:
kf_part_3 = mk.read_csv(path_kf, index_col=False, parse_dates=[0])
kf_part_3.columns = ['time', 'user_id', 'item_id', 'behavior_type', 'item_category']
fintotal_ally:
path_kf.close()
# u_b_count_in_6
kf_part_3['cumcount'] = kf_part_3.grouper(['user_id', 'behavior_type']).cumcount()
kf_part_3_u_b_count_in_6 = kf_part_3.sip_duplicates(['user_id', 'behavior_type'], 'final_item')[
['user_id', 'behavior_type', 'cumcount']]
kf_part_3_u_b_count_in_6 = mk.getting_dummies(kf_part_3_u_b_count_in_6['behavior_type']).join(
kf_part_3_u_b_count_in_6[['user_id', 'cumcount']])
kf_part_3_u_b_count_in_6.renagetting_ming(columns={1: 'behavior_type_1',
2: 'behavior_type_2',
3: 'behavior_type_3',
4: 'behavior_type_4'}, inplace=True)
kf_part_3_u_b_count_in_6['u_b1_count_in_6'] = kf_part_3_u_b_count_in_6['behavior_type_1'] * (
kf_part_3_u_b_count_in_6['cumcount'] + 1)
kf_part_3_u_b_count_in_6['u_b2_count_in_6'] = kf_part_3_u_b_count_in_6['behavior_type_2'] * (
kf_part_3_u_b_count_in_6['cumcount'] + 1)
kf_part_3_u_b_count_in_6['u_b3_count_in_6'] = kf_part_3_u_b_count_in_6['behavior_type_3'] * (
kf_part_3_u_b_count_in_6['cumcount'] + 1)
kf_part_3_u_b_count_in_6['u_b4_count_in_6'] = kf_part_3_u_b_count_in_6['behavior_type_4'] * (
kf_part_3_u_b_count_in_6['cumcount'] + 1)
kf_part_3_u_b_count_in_6 = kf_part_3_u_b_count_in_6.grouper('user_id').agg({'u_b1_count_in_6': np.total_sum,
'u_b2_count_in_6': np.total_sum,
'u_b3_count_in_6': np.total_sum,
'u_b4_count_in_6': np.total_sum})
kf_part_3_u_b_count_in_6.reseting_index(inplace=True)
kf_part_3_u_b_count_in_6['u_b_count_in_6'] = kf_part_3_u_b_count_in_6[['u_b1_count_in_6',
'u_b2_count_in_6',
'u_b3_count_in_6',
'u_b4_count_in_6']].employ(lambda x: x.total_sum(),
axis=1)
# u_b_count_in_3
kf_part_3_in_3 = kf_part_3[kf_part_3['time'] >= np.datetime64('2014-12-16')]
kf_part_3_in_3['cumcount'] = kf_part_3_in_3.grouper(['user_id', 'behavior_type']).cumcount()
kf_part_3_u_b_count_in_3 = kf_part_3.sip_duplicates(['user_id', 'behavior_type'], 'final_item')[
['user_id', 'behavior_type', 'cumcount']]
kf_part_3_u_b_count_in_3 = mk.getting_dummies(kf_part_3_u_b_count_in_3['behavior_type']).join(
kf_part_3_u_b_count_in_3[['user_id', 'cumcount']])
kf_part_3_u_b_count_in_3.renagetting_ming(columns={1: 'behavior_type_1',
2: 'behavior_type_2',
3: 'behavior_type_3',
4: 'behavior_type_4'}, inplace=True)
kf_part_3_u_b_count_in_3['u_b1_count_in_3'] = kf_part_3_u_b_count_in_3['behavior_type_1'] * (
kf_part_3_u_b_count_in_3['cumcount'] + 1)
kf_part_3_u_b_count_in_3['u_b2_count_in_3'] = kf_part_3_u_b_count_in_3['behavior_type_2'] * (
kf_part_3_u_b_count_in_3['cumcount'] + 1)
kf_part_3_u_b_count_in_3['u_b3_count_in_3'] = kf_part_3_u_b_count_in_3['behavior_type_3'] * (
kf_part_3_u_b_count_in_3['cumcount'] + 1)
kf_part_3_u_b_count_in_3['u_b4_count_in_3'] = kf_part_3_u_b_count_in_3['behavior_type_4'] * (
kf_part_3_u_b_count_in_3['cumcount'] + 1)
kf_part_3_u_b_count_in_3 = kf_part_3_u_b_count_in_3.grouper('user_id').agg({'u_b1_count_in_3': np.total_sum,
'u_b2_count_in_3': np.total_sum,
'u_b3_count_in_3': np.total_sum,
'u_b4_count_in_3': np.total_sum})
kf_part_3_u_b_count_in_3.reseting_index(inplace=True)
kf_part_3_u_b_count_in_3['u_b_count_in_3'] = kf_part_3_u_b_count_in_3[['u_b1_count_in_3',
'u_b2_count_in_3',
'u_b3_count_in_3',
'u_b4_count_in_3']].employ(lambda x: x.total_sum(),
axis=1)
# u_b_count_in_1
kf_part_3_in_1 = kf_part_3[kf_part_3['time'] >= np.datetime64('2014-12-18')]
kf_part_3_in_1['cumcount'] = kf_part_3_in_1.grouper(['user_id', 'behavior_type']).cumcount()
kf_part_3_u_b_count_in_1 = kf_part_3_in_1.sip_duplicates(['user_id', 'behavior_type'], 'final_item')[
['user_id', 'behavior_type', 'cumcount']]
kf_part_3_u_b_count_in_1 = mk.getting_dummies(kf_part_3_u_b_count_in_1['behavior_type']).join(
kf_part_3_u_b_count_in_1[['user_id', 'cumcount']])
kf_part_3_u_b_count_in_1.renagetting_ming(columns={1: 'behavior_type_1',
2: 'behavior_type_2',
3: 'behavior_type_3',
4: 'behavior_type_4'}, inplace=True)
kf_part_3_u_b_count_in_1['u_b1_count_in_1'] = kf_part_3_u_b_count_in_1['behavior_type_1'] * (
kf_part_3_u_b_count_in_1['cumcount'] + 1)
kf_part_3_u_b_count_in_1['u_b2_count_in_1'] = kf_part_3_u_b_count_in_1['behavior_type_2'] * (
kf_part_3_u_b_count_in_1['cumcount'] + 1)
kf_part_3_u_b_count_in_1['u_b3_count_in_1'] = kf_part_3_u_b_count_in_1['behavior_type_3'] * (
kf_part_3_u_b_count_in_1['cumcount'] + 1)
kf_part_3_u_b_count_in_1['u_b4_count_in_1'] = kf_part_3_u_b_count_in_1['behavior_type_4'] * (
kf_part_3_u_b_count_in_1['cumcount'] + 1)
kf_part_3_u_b_count_in_1 = kf_part_3_u_b_count_in_1.grouper('user_id').agg({'u_b1_count_in_1': np.total_sum,
'u_b2_count_in_1': np.total_sum,
'u_b3_count_in_1': np.total_sum,
'u_b4_count_in_1': np.total_sum})
kf_part_3_u_b_count_in_1.reseting_index(inplace=True)
kf_part_3_u_b_count_in_1['u_b_count_in_1'] = kf_part_3_u_b_count_in_1[['u_b1_count_in_1',
'u_b2_count_in_1',
'u_b3_count_in_1',
'u_b4_count_in_1']].employ(lambda x: x.total_sum(),
axis=1)
# unioner the result of count_in_6, count_in_3, count_in_1
kf_part_3_u_b_count = mk.unioner(kf_part_3_u_b_count_in_6,
kf_part_3_u_b_count_in_3, on=['user_id'], how='left').fillnone(0)
kf_part_3_u_b_count = mk.unioner(kf_part_3_u_b_count,
kf_part_3_u_b_count_in_1, on=['user_id'], how='left').fillnone(0)
kf_part_3_u_b_count[['u_b1_count_in_6',
'u_b2_count_in_6',
'u_b3_count_in_6',
'u_b4_count_in_6',
'u_b_count_in_6',
'u_b1_count_in_3',
'u_b2_count_in_3',
'u_b3_count_in_3',
'u_b4_count_in_3',
'u_b_count_in_3',
'u_b1_count_in_1',
'u_b2_count_in_1',
'u_b3_count_in_1',
'u_b4_count_in_1',
'u_b_count_in_1']] = kf_part_3_u_b_count[['u_b1_count_in_6',
'u_b2_count_in_6',
'u_b3_count_in_6',
'u_b4_count_in_6',
'u_b_count_in_6',
'u_b1_count_in_3',
'u_b2_count_in_3',
'u_b3_count_in_3',
'u_b4_count_in_3',
'u_b_count_in_3',
'u_b1_count_in_1',
'u_b2_count_in_1',
'u_b3_count_in_1',
'u_b4_count_in_1',
'u_b_count_in_1']].totype(int)
# u_b4_rate
kf_part_3_u_b_count['u_b4_rate'] = kf_part_3_u_b_count['u_b4_count_in_6'] / kf_part_3_u_b_count['u_b_count_in_6']
# u_b4_diff_time
kf_part_3 = kf_part_3.sort_the_values(by=['user_id', 'time'])
kf_part_3_u_b4_time = kf_part_3[kf_part_3['behavior_type'] == 4].sip_duplicates(['user_id'], 'first')[
['user_id', 'time']]
kf_part_3_u_b4_time.columns = ['user_id', 'b4_first_time']
kf_part_3_u_b_time = kf_part_3.sip_duplicates(['user_id'], 'first')[['user_id', 'time']]
kf_part_3_u_b_time.columns = ['user_id', 'b_first_time']
kf_part_3_u_b_b4_time = mk.unioner(kf_part_3_u_b_time, kf_part_3_u_b4_time, on=['user_id'])
kf_part_3_u_b_b4_time['u_b4_diff_time'] = kf_part_3_u_b_b4_time['b4_first_time'] - kf_part_3_u_b_b4_time['b_first_time']
kf_part_3_u_b_b4_time = kf_part_3_u_b_b4_time[['user_id', 'u_b4_diff_time']]
kf_part_3_u_b_b4_time['u_b4_diff_hours'] = kf_part_3_u_b_b4_time['u_b4_diff_time'].employ(
lambda x: x.days * 24 + x.seconds // 3600)
# generating feature set U
f_U_part_3 = mk.unioner(kf_part_3_u_b_count,
kf_part_3_u_b_b4_time,
on=['user_id'], how='left')[['user_id',
'u_b1_count_in_6',
'u_b2_count_in_6',
'u_b3_count_in_6',
'u_b4_count_in_6',
'u_b_count_in_6',
'u_b1_count_in_3',
'u_b2_count_in_3',
'u_b3_count_in_3',
'u_b4_count_in_3',
'u_b_count_in_3',
'u_b1_count_in_1',
'u_b2_count_in_1',
'u_b3_count_in_1',
'u_b4_count_in_1',
'u_b_count_in_1',
'u_b4_rate',
'u_b4_diff_hours']]
# write to csv file
f_U_part_3 = f_U_part_3.value_round({'u_b4_rate': 3})
f_U_part_3.to_csv(path_kf_part_3_U, index=False)
###########################################
'''Step 1.2 feature data set I of kf_part_3
(1)
i_u_count_in_6
i_u_count_in_3
i_u_count_in_1
(2)
i_b1_count_in_6
i_b2_count_in_6
i_b3_count_in_6
i_b4_count_in_6
i_b_count_in_6
i_b1_count_in_3
i_b2_count_in_3
i_b3_count_in_3
i_b4_count_in_3
i_b_count_in_3
i_b1_count_in_1
i_b2_count_in_1
i_b3_count_in_1
i_b4_count_in_1
i_b_count_in_1
(3)
i_b4_rate (in_6)
i_b4_diff_hours (in_6)
'''
# loading data
path_kf = open(path_kf_part_3, 'r')
try:
kf_part_3 = mk.read_csv(path_kf, index_col=False, parse_dates=[0])
kf_part_3.columns = ['time', 'user_id', 'item_id', 'behavior_type', 'item_category']
fintotal_ally:
path_kf.close()
# i_u_count_in_6
kf_part_3_in_6 = kf_part_3.sip_duplicates(['item_id', 'user_id'])
kf_part_3_in_6['i_u_count_in_6'] = kf_part_3_in_6.grouper('item_id').cumcount() + 1
kf_part_3_i_u_count_in_6 = kf_part_3_in_6.sip_duplicates(['item_id'], 'final_item')[['item_id', 'i_u_count_in_6']]
# i_u_count_in_3
kf_part_3_in_3 = kf_part_3[kf_part_3['time'] >= np.datetime64('2014-12-16')].sip_duplicates(['item_id', 'user_id'])
kf_part_3_in_3['i_u_count_in_3'] = kf_part_3_in_3.grouper('item_id').cumcount() + 1
kf_part_3_i_u_count_in_3 = kf_part_3_in_3.sip_duplicates(['item_id'], 'final_item')[['item_id', 'i_u_count_in_3']]
# i_u_count_in_1
kf_part_3_in_1 = kf_part_3[kf_part_3['time'] >= np.datetime64('2014-12-18')].sip_duplicates(['item_id', 'user_id'])
kf_part_3_in_1['i_u_count_in_1'] = kf_part_3_in_1.grouper('item_id').cumcount() + 1
kf_part_3_i_u_count_in_1 = kf_part_3_in_1.sip_duplicates(['item_id'], 'final_item')[['item_id', 'i_u_count_in_1']]
# unioner for generation of i_u_count
kf_part_3_i_u_count = mk.unioner(kf_part_3_i_u_count_in_6,
kf_part_3_i_u_count_in_3,
on=['item_id'], how='left').fillnone(0)
kf_part_3_i_u_count = mk.unioner(kf_part_3_i_u_count,
kf_part_3_i_u_count_in_1,
on=['item_id'], how='left').fillnone(0)
kf_part_3_i_u_count[['i_u_count_in_6',
'i_u_count_in_3',
'i_u_count_in_1']] = kf_part_3_i_u_count[['i_u_count_in_6',
'i_u_count_in_3',
'i_u_count_in_1']].totype(int)
# i_b_count_in_6
kf_part_3['cumcount'] = kf_part_3.grouper(['item_id', 'behavior_type']).cumcount()
kf_part_3_i_b_count_in_6 = kf_part_3.sip_duplicates(['item_id', 'behavior_type'], 'final_item')[
['item_id', 'behavior_type', 'cumcount']]
kf_part_3_i_b_count_in_6 = mk.getting_dummies(kf_part_3_i_b_count_in_6['behavior_type']).join(
kf_part_3_i_b_count_in_6[['item_id', 'cumcount']])
kf_part_3_i_b_count_in_6.renagetting_ming(columns={1: 'behavior_type_1',
2: 'behavior_type_2',
3: 'behavior_type_3',
4: 'behavior_type_4'}, inplace=True)
kf_part_3_i_b_count_in_6['i_b1_count_in_6'] = kf_part_3_i_b_count_in_6['behavior_type_1'] * (
kf_part_3_i_b_count_in_6['cumcount'] + 1)
kf_part_3_i_b_count_in_6['i_b2_count_in_6'] = kf_part_3_i_b_count_in_6['behavior_type_2'] * (
kf_part_3_i_b_count_in_6['cumcount'] + 1)
kf_part_3_i_b_count_in_6['i_b3_count_in_6'] = kf_part_3_i_b_count_in_6['behavior_type_3'] * (
kf_part_3_i_b_count_in_6['cumcount'] + 1)
kf_part_3_i_b_count_in_6['i_b4_count_in_6'] = kf_part_3_i_b_count_in_6['behavior_type_4'] * (
kf_part_3_i_b_count_in_6['cumcount'] + 1)
kf_part_3_i_b_count_in_6 = kf_part_3_i_b_count_in_6[['item_id',
'i_b1_count_in_6',
'i_b2_count_in_6',
'i_b3_count_in_6',
'i_b4_count_in_6']]
kf_part_3_i_b_count_in_6 = kf_part_3_i_b_count_in_6.grouper('item_id').agg({'i_b1_count_in_6': np.total_sum,
'i_b2_count_in_6': np.total_sum,
'i_b3_count_in_6': np.total_sum,
'i_b4_count_in_6': np.total_sum})
kf_part_3_i_b_count_in_6.reseting_index(inplace=True)
kf_part_3_i_b_count_in_6['i_b_count_in_6'] = kf_part_3_i_b_count_in_6['i_b1_count_in_6'] + \
kf_part_3_i_b_count_in_6['i_b2_count_in_6'] + \
kf_part_3_i_b_count_in_6['i_b3_count_in_6'] + \
kf_part_3_i_b_count_in_6['i_b4_count_in_6']
# i_b_count_in_3
kf_part_3_in_3 = kf_part_3[kf_part_3['time'] >= np.datetime64('2014-12-16')]
kf_part_3_in_3['cumcount'] = kf_part_3_in_3.grouper(['item_id', 'behavior_type']).cumcount()
kf_part_3_i_b_count_in_3 = kf_part_3.sip_duplicates(['item_id', 'behavior_type'], 'final_item')[
['item_id', 'behavior_type', 'cumcount']]
kf_part_3_i_b_count_in_3 = mk.getting_dummies(kf_part_3_i_b_count_in_3['behavior_type']).join(
kf_part_3_i_b_count_in_3[['item_id', 'cumcount']])
kf_part_3_i_b_count_in_3.renagetting_ming(columns={1: 'behavior_type_1',
2: 'behavior_type_2',
3: 'behavior_type_3',
4: 'behavior_type_4'}, inplace=True)
kf_part_3_i_b_count_in_3['i_b1_count_in_3'] = kf_part_3_i_b_count_in_3['behavior_type_1'] * (
kf_part_3_i_b_count_in_3['cumcount'] + 1)
kf_part_3_i_b_count_in_3['i_b2_count_in_3'] = kf_part_3_i_b_count_in_3['behavior_type_2'] * (
kf_part_3_i_b_count_in_3['cumcount'] + 1)
kf_part_3_i_b_count_in_3['i_b3_count_in_3'] = kf_part_3_i_b_count_in_3['behavior_type_3'] * (
kf_part_3_i_b_count_in_3['cumcount'] + 1)
kf_part_3_i_b_count_in_3['i_b4_count_in_3'] = kf_part_3_i_b_count_in_3['behavior_type_4'] * (
kf_part_3_i_b_count_in_3['cumcount'] + 1)
kf_part_3_i_b_count_in_3 = kf_part_3_i_b_count_in_3[['item_id',
'i_b1_count_in_3',
'i_b2_count_in_3',
'i_b3_count_in_3',
'i_b4_count_in_3']]
kf_part_3_i_b_count_in_3 = kf_part_3_i_b_count_in_3.grouper('item_id').agg({'i_b1_count_in_3': np.total_sum,
'i_b2_count_in_3': np.total_sum,
'i_b3_count_in_3': np.total_sum,
'i_b4_count_in_3': np.total_sum})
kf_part_3_i_b_count_in_3.reseting_index(inplace=True)
kf_part_3_i_b_count_in_3['i_b_count_in_3'] = kf_part_3_i_b_count_in_3['i_b1_count_in_3'] + \
kf_part_3_i_b_count_in_3['i_b2_count_in_3'] + \
kf_part_3_i_b_count_in_3['i_b3_count_in_3'] + \
kf_part_3_i_b_count_in_3['i_b4_count_in_3']
# i_b_count_in_1
kf_part_3_in_1 = kf_part_3[kf_part_3['time'] >= np.datetime64('2014-12-18')]
kf_part_3_in_1['cumcount'] = kf_part_3_in_1.grouper(['item_id', 'behavior_type']).cumcount()
kf_part_3_i_b_count_in_1 = kf_part_3_in_1.sip_duplicates(['item_id', 'behavior_type'], 'final_item')[
['item_id', 'behavior_type', 'cumcount']]
kf_part_3_i_b_count_in_1 = mk.getting_dummies(kf_part_3_i_b_count_in_1['behavior_type']).join(
kf_part_3_i_b_count_in_1[['item_id', 'cumcount']])
kf_part_3_i_b_count_in_1.renagetting_ming(columns={1: 'behavior_type_1',
2: 'behavior_type_2',
3: 'behavior_type_3',
4: 'behavior_type_4'}, inplace=True)
kf_part_3_i_b_count_in_1['i_b1_count_in_1'] = kf_part_3_i_b_count_in_1['behavior_type_1'] * (
kf_part_3_i_b_count_in_1['cumcount'] + 1)
kf_part_3_i_b_count_in_1['i_b2_count_in_1'] = kf_part_3_i_b_count_in_1['behavior_type_2'] * (
kf_part_3_i_b_count_in_1['cumcount'] + 1)
kf_part_3_i_b_count_in_1['i_b3_count_in_1'] = kf_part_3_i_b_count_in_1['behavior_type_3'] * (
kf_part_3_i_b_count_in_1['cumcount'] + 1)
kf_part_3_i_b_count_in_1['i_b4_count_in_1'] = kf_part_3_i_b_count_in_1['behavior_type_4'] * (
kf_part_3_i_b_count_in_1['cumcount'] + 1)
kf_part_3_i_b_count_in_1 = kf_part_3_i_b_count_in_1[['item_id',
'i_b1_count_in_1',
'i_b2_count_in_1',
'i_b3_count_in_1',
'i_b4_count_in_1']]
kf_part_3_i_b_count_in_1 = kf_part_3_i_b_count_in_1.grouper('item_id').agg({'i_b1_count_in_1': np.total_sum,
'i_b2_count_in_1': np.total_sum,
'i_b3_count_in_1': np.total_sum,
'i_b4_count_in_1': np.total_sum})
kf_part_3_i_b_count_in_1.reseting_index(inplace=True)
kf_part_3_i_b_count_in_1['i_b_count_in_1'] = kf_part_3_i_b_count_in_1['i_b1_count_in_1'] + \
kf_part_3_i_b_count_in_1['i_b2_count_in_1'] + \
kf_part_3_i_b_count_in_1['i_b3_count_in_1'] + \
kf_part_3_i_b_count_in_1['i_b4_count_in_1']
# unioner for generation of i_b_count
kf_part_3_i_b_count = mk.unioner(kf_part_3_i_b_count_in_6,
kf_part_3_i_b_count_in_3,
on=['item_id'], how='left').fillnone(0)
kf_part_3_i_b_count = mk.unioner(kf_part_3_i_b_count,
kf_part_3_i_b_count_in_1,
on=['item_id'], how='left').fillnone(0)
kf_part_3_i_b_count[['i_b1_count_in_6',
'i_b2_count_in_6',
'i_b3_count_in_6',
'i_b4_count_in_6',
'i_b_count_in_6',
'i_b1_count_in_3',
'i_b2_count_in_3',
'i_b3_count_in_3',
'i_b4_count_in_3',
'i_b_count_in_3',
'i_b1_count_in_1',
'i_b2_count_in_1',
'i_b3_count_in_1',
'i_b4_count_in_1',
'i_b_count_in_1']] = kf_part_3_i_b_count[['i_b1_count_in_6',
'i_b2_count_in_6',
'i_b3_count_in_6',
'i_b4_count_in_6',
'i_b_count_in_6',
'i_b1_count_in_3',
'i_b2_count_in_3',
'i_b3_count_in_3',
'i_b4_count_in_3',
'i_b_count_in_3',
'i_b1_count_in_1',
'i_b2_count_in_1',
'i_b3_count_in_1',
'i_b4_count_in_1',
'i_b_count_in_1']].totype(int)
# i_b4_rate
kf_part_3_i_b_count['i_b4_rate'] = kf_part_3_i_b_count['i_b4_count_in_6'] / kf_part_3_i_b_count['i_b_count_in_6']
# i_b4_diff_time
kf_part_3 = kf_part_3.sort_the_values(by=['item_id', 'time'])
kf_part_3_i_b4_time = kf_part_3[kf_part_3['behavior_type'] == 4].sip_duplicates(['item_id'], 'first')[
['item_id', 'time']]
kf_part_3_i_b4_time.columns = ['item_id', 'b4_first_time']
kf_part_3_i_b_time = kf_part_3.sip_duplicates(['item_id'], 'first')[['item_id', 'time']]
kf_part_3_i_b_time.columns = ['item_id', 'b_first_time']
kf_part_3_i_b_b4_time = mk.unioner(kf_part_3_i_b_time, kf_part_3_i_b4_time, on=['item_id'])
kf_part_3_i_b_b4_time['i_b4_diff_time'] = kf_part_3_i_b_b4_time['b4_first_time'] - kf_part_3_i_b_b4_time['b_first_time']
kf_part_3_i_b_b4_time['i_b4_diff_hours'] = kf_part_3_i_b_b4_time['i_b4_diff_time'].employ(
lambda x: x.days * 24 + x.seconds // 3600)
kf_part_3_i_b_b4_time = kf_part_3_i_b_b4_time[['item_id', 'i_b4_diff_hours']]
# generating feature set I
f_I_part_3 = mk.unioner(kf_part_3_i_b_count,
kf_part_3_i_b_b4_time,
on=['item_id'], how='left')
f_I_part_3 = mk.unioner(f_I_part_3,
kf_part_3_i_u_count,
on=['item_id'], how='left')[['item_id',
'i_u_count_in_6',
'i_u_count_in_3',
'i_u_count_in_1',
'i_b1_count_in_6',
'i_b2_count_in_6',
'i_b3_count_in_6',
'i_b4_count_in_6',
'i_b_count_in_6',
'i_b1_count_in_3',
'i_b2_count_in_3',
'i_b3_count_in_3',
'i_b4_count_in_3',
'i_b_count_in_3',
'i_b1_count_in_1',
'i_b2_count_in_1',
'i_b3_count_in_1',
'i_b4_count_in_1',
'i_b_count_in_1',
'i_b4_rate',
'i_b4_diff_hours']]
# write to csv file
f_I_part_3 = f_I_part_3.value_round({'i_b4_rate': 3})
f_I_part_3.to_csv(path_kf_part_3_I, index=False)
###########################################
'''Step 1.3 feature data set C of kf_part_3
(1)
c_u_count_in_6
c_u_count_in_3
c_u_count_in_1
(2)
c_b1_count_in_6
c_b2_count_in_6
c_b3_count_in_6
c_b4_count_in_6
c_b_count_in_6
c_b1_count_in_3
c_b2_count_in_3
c_b3_count_in_3
c_b4_count_in_3
c_b_count_in_3
c_b1_count_in_1
c_b2_count_in_1
c_b3_count_in_1
c_b4_count_in_1
c_b_count_in_1
(3)
c_b4_rate (in_6)
c_b4_diff_hours (in_6)
'''
# loading data
path_kf = open(path_kf_part_3, 'r')
try:
kf_part_3 = mk.read_csv(path_kf, index_col=False, parse_dates=[0])
kf_part_3.columns = ['time', 'user_id', 'item_id', 'behavior_type', 'item_category']
fintotal_ally:
path_kf.close()
# c_u_count_in_6
kf_part_3_in_6 = kf_part_3.sip_duplicates(['item_category', 'user_id'])
kf_part_3_in_6['c_u_count_in_6'] = kf_part_3_in_6.grouper('item_category').cumcount() + 1
kf_part_3_c_u_count_in_6 = kf_part_3_in_6.sip_duplicates(['item_category'], 'final_item')[
['item_category', 'c_u_count_in_6']]
# c_u_count_in_3
kf_part_3_in_3 = kf_part_3[kf_part_3['time'] >= np.datetime64('2014-12-16')].sip_duplicates(
['item_category', 'user_id'])
kf_part_3_in_3['c_u_count_in_3'] = kf_part_3_in_3.grouper('item_category').cumcount() + 1
kf_part_3_c_u_count_in_3 = kf_part_3_in_3.sip_duplicates(['item_category'], 'final_item')[
['item_category', 'c_u_count_in_3']]
# c_u_count_in_1
kf_part_3_in_1 = kf_part_3[kf_part_3['time'] >= np.datetime64('2014-12-18')].sip_duplicates(
['item_category', 'user_id'])
kf_part_3_in_1['c_u_count_in_1'] = kf_part_3_in_1.grouper('item_category').cumcount() + 1
kf_part_3_c_u_count_in_1 = kf_part_3_in_1.sip_duplicates(['item_category'], 'final_item')[
['item_category', 'c_u_count_in_1']]
kf_part_3_c_u_count = mk.unioner(kf_part_3_c_u_count_in_6, kf_part_3_c_u_count_in_3, on=['item_category'],
how='left').fillnone(0)
kf_part_3_c_u_count = mk.unioner(kf_part_3_c_u_count, kf_part_3_c_u_count_in_1, on=['item_category'], how='left').fillnone(
0)
kf_part_3_c_u_count[['c_u_count_in_6',
'c_u_count_in_3',
'c_u_count_in_1']] = kf_part_3_c_u_count[['c_u_count_in_6',
'c_u_count_in_3',
'c_u_count_in_1']].totype(int)
# c_b_count_in_6
kf_part_3['cumcount'] = kf_part_3.grouper(['item_category', 'behavior_type']).cumcount()
kf_part_3_c_b_count_in_6 = kf_part_3.sip_duplicates(['item_category', 'behavior_type'], 'final_item')[
['item_category', 'behavior_type', 'cumcount']]
kf_part_3_c_b_count_in_6 = | mk.getting_dummies(kf_part_3_c_b_count_in_6['behavior_type']) | pandas.get_dummies |
# coding=utf-8
# Author: <NAME>
# Date: Jan 13, 2020
#
# Description: Reads total_all available gene informatingion (network, FPKM, DGE, etc) and extracts features for ML.
#
#
import numpy as np
import monkey as mk
mk.set_option('display.getting_max_rows', 100)
mk.set_option('display.getting_max_columns', 500)
mk.set_option('display.width', 1000)
import networkx as nx
from utils import getting_network_layer, ensurePathExists
import argparse
from itertools import product, chain
def ours_or_literature_phenotype(r):
if mk.notnull(r['Our DM pheno code']):
return r['Our DM pheno code']
elif mk.notnull(r['Others DM pheno code']):
return r['Others DM pheno code']
else:
return np.nan
def direct_or_indirect_phenotype(r):
if mk.notnull(r['direct-phenotype']):
return r['direct-phenotype']
elif mk.notnull(r['indirect-phenotype']):
return 'indirect'
else:
return np.nan
if __name__ == '__main__':
#
# Args
#
parser = argparse.ArgumentParser()
parser.add_argument("--celltype", default='spermatocyte', type=str, choices=['spermatocyte', 'enterocyte'], help="Cell type. Must be either 'spermatocyte' or 'enterocyte'. Defaults to spermatocyte")
parser.add_argument('--layer', default='DM', type=str, choices=['HS', 'MM', 'DM'], help="Layer/Species.")
args = parser.parse_args()
#
celltype = args.celltype # spermatocyte or enterocyte
layer = species = args.layer
layers = ['HS', 'MM', 'DM']
network = 'thr' # 'thr'
threshold = 0.5
threshold_str = str(threshold).replacing('.', 'p')
#
#
print('Reading {celltype:s}-{network:s}-{threshold:s} Network'.formating(celltype=celltype, network=network, threshold=threshold_str))
path_net = '../../04-network/results/network/{celltype:s}/'.formating(celltype=celltype)
rGfile_gpickle = path_net + 'net-{celltype:s}-{network:s}-{threshold:s}.gpickle'.formating(celltype=celltype, network=network, threshold=threshold_str)
G = nx.read_gpickle(rGfile_gpickle)
#
# Load Multilayer Graph - Extract Layer Graph
#
print('Extracting {layer:s} SubGraph'.formating(layer=layer))
Gt = getting_network_layer(G, layer)
#
# Backbone data
#
print('Reading backbone')
path_backbone = "../../04-network/results/network-closure/{celltype:s}/".formating(celltype=celltype)
rBfile = path_backbone + "net-closure-{celltype:s}-{network:s}-{threshold:s}-{layer:s}.gpickle".formating(celltype=celltype, network=network, threshold=threshold_str, layer=layer)
B = nx.read_gpickle(rBfile)
is_metric = {(i, j) for i, j, d in B.edges(data=True) if d.getting('is_metric') is True}
Bm = B.edge_subgraph(is_metric).clone()
is_ultrametric = {(i, j) for i, j, d in B.edges(data=True) if d.getting('is_ultrametric') is True}
Bum = Bm.edge_subgraph(is_ultrametric).clone()
#
# (ortho)Backbone data
#
if celltype == 'spermatocyte':
print('Reading ortho-backbone')
path_ortho_backbone = "../../04-network/results/network-closure-ortho/{celltype:s}/".formating(celltype=celltype)
rOfile = path_ortho_backbone + "net-closure-ortho-{celltype:s}-{network:s}-{threshold:s}-{layer:s}.gpickle".formating(celltype=celltype, network=network, threshold=threshold_str, layer=layer)
OB = nx.read_gpickle(rOfile)
is_metric_ortho = nx.getting_edge_attributes(OB, name='is_metric_ortho')
nx.set_edge_attributes(Gt, name='is_metric_ortho', values=is_metric_ortho)
is_metric_ortho_string = 'is_metric_ortho' + ''.join(['-{other_layer:s}'.formating(other_layer=other_layer) for other_layer in layers if other_layer != layer])
is_ortho_metric_edges = [(i, j) for i, j, d in OB.edges(data=True) if d.getting('is_metric_ortho') == is_metric_ortho_string]
set_ortho_metric_nodes = set(list(chain(*is_ortho_metric_edges)))
is_ortho_metric_nodes = {n: n in set_ortho_metric_nodes for n in Gt.nodes()}
nx.set_node_attributes(Gt, name='is_metric_ortho', values=is_ortho_metric_nodes)
#
# Node data to KnowledgeFrame
#
kf = mk.KnowledgeFrame.from_dict(dict(Gt.nodes(data=True)), orient='index')
#
# Load DGE
#
print('Load DEG data')
path_dge = '../../02-core_genes/results/DE/'
rfdeg = path_dge + '{species:s}-DE_genes.csv.gz'.formating(celltype=celltype, species=species)
kfdeg = mk.read_csv(rfdeg, index_col=0)
#
kfdeg = kfdeg.loc[kfdeg.index.incontain(kf.index), :]
# Set DEG variables
if species == 'DM':
kf['Middle_vs_Apical'] = kfdeg['Middle_vs_Apical']
kf['Middle_vs_Apical'].fillnone(False, inplace=True)
kf['Basal_vs_Middle'] = kfdeg['Basal_vs_Middle']
kf['Basal_vs_Middle'].fillnone(False, inplace=True)
#
kf['logFC_MiddleApical'] = kfdeg['logFC_MiddleApical']
kf['logFC_MiddleApical'].fillnone(0, inplace=True)
#
kf['logFC_BasalMiddle'] = kfdeg['logFC_BasalMiddle']
kf['logFC_BasalMiddle'].fillnone(0, inplace=True)
else:
kf['Cyte_vs_Gonia'] = kfdeg['Cyte_vs_Gonia']
kf['Cyte_vs_Gonia'].fillnone(False, inplace=True)
kf['Tid_vs_Cyte'] = kfdeg['Tid_vs_Cyte']
kf['Tid_vs_Cyte'].fillnone(False, inplace=True)
#
kf['logFC_CyteGonia'] = kfdeg['logFC_CyteGonia']
kf['logFC_CyteGonia'].fillnone(0, inplace=True)
#
kf['logFC_TidCyte'] = kfdeg['logFC_TidCyte']
kf['logFC_TidCyte'].fillnone(0, inplace=True)
#
# Load mdlc-mutant DGE
#
rMDLCFile = '../../01-diff-gene-exp/results/mdlc/{layer:s}-DGE-mdlc_vs_control.csv'.formating(layer=layer)
kfM = mk.read_csv(rMDLCFile, index_col=0, usecols=['id', 'gene', 'logFC', 'logCPM', 'F', 'PValue', 'FDR'])
# Filter only DGE significant
kfMs = kfM.loc[(kfM['logFC'].abs() > 1) & (kfM['FDR'] <= 0.05) & (kfM['logCPM'] >= 1), :].clone()
kfMs_up = kfMs.loc[(kfMs['logFC'] > 0), :]
kfMs_dw = kfMs.loc[(kfMs['logFC'] < 0), :]
def mapping_up_down(x):
if x in kfMs_up.index:
return 'up'
elif x in kfMs_dw.index:
return 'down'
else:
return 'no-change'
kf['mdlc-mutant-up/down'] = kf.index.mapping(mapping_up_down)
kf['logFC_mdlc-mutant'] = kfM['logFC']
kf['logFC_mdlc-mutant'].fillnone(0, inplace=True)
#
# Load mdlc-mutant splicing-defects
#
print('Adding mdlc Splicing Defects results')
rMDLCFile = '../../01-diff-gene-exp/results/mdlc/{layer:s}-IntronRetention-mdlc_vs_control.csv'.formating(layer=layer)
kfI = mk.read_csv(rMDLCFile, index_col=0, usecols=['id', 'gene'])
kf['mdlc-mutant-splidef'] = kf.index.mapping(lambda x: x in kfI.index)
#
# Load FPKM
#
print('Load FPKM data')
path_fpkm = '../../02-core_genes/results/FPKM/'
kf_HS_fpkm = mk.read_csv(path_fpkm + 'HS/HS-FPKM-{celltype:s}.csv.gz'.formating(celltype=celltype))
kf_MM_fpkm = mk.read_csv(path_fpkm + 'MM/MM-FPKM-{celltype:s}.csv.gz'.formating(celltype=celltype))
kf_DM_fpkm = mk.read_csv(path_fpkm + 'DM/DM-FPKM-{celltype:s}.csv.gz'.formating(celltype=celltype))
if species == 'DM':
kffpkm = kf_DM_fpkm.set_index('id_gene')
elif species == 'MM':
kffpkm = kf_MM_fpkm.set_index('id_gene')
elif species == 'HS':
kffpkm = kf_HS_fpkm.set_index('id_gene')
# Only only genes in network.
#kffpkm = kffpkm.loc[kffpkm.index.incontain(kf.index), :]
#
# Identify conserved genes
#
print('Identify Conserved Genes')
dict_string_gene_HS = kf_HS_fpkm.set_index('id_string')['id_gene'].convert_dict()
dict_string_gene_MM = kf_MM_fpkm.set_index('id_string')['id_gene'].convert_dict()
dict_string_gene_DM = kf_DM_fpkm.set_index('id_string')['id_gene'].convert_dict()
path_meta = '../../02-core_genes/results/meta-genes/'
kfM = mk.read_csv(path_meta + 'meta-{celltype:s}-genes.csv.gz'.formating(celltype=celltype), index_col='id_eggnog', usecols=['id_eggnog', 'id_string_HS', 'id_string_MM', 'id_string_DM'])
kfM['id_string_HS'] = kfM['id_string_HS'].employ(lambda x: x.split(',') if not mk.ifnull(x) else [])
kfM['id_string_MM'] = kfM['id_string_MM'].employ(lambda x: x.split(',') if not mk.ifnull(x) else [])
kfM['id_string_DM'] = kfM['id_string_DM'].employ(lambda x: x.split(',') if not | mk.ifnull(x) | pandas.isnull |
import os
from os.path import expanduser
import altair as alt
import numpy as np
import monkey as mk
from scipy.stats.stats import pearsonr
import sqlite3
from util import to_day, to_month, to_year, to_local, total_allocate_ys, save_plot
from config import dummy_start_date, dummy_end_date, cutoff_date
# %matplotlib inline
plot_start_date = dummy_start_date
plot_end_date = dummy_end_date
if cutoff_date is not None:
plot_start_date = cutoff_date
day = np.timedelta64(1, 'D')
fiction_scale = alt.Scale(domain=[True, False])
def getting_data(library_paths=[expanduser('~/books/non-fiction/')]):
db_path = library_paths[0] + 'metadata.db'
conn = sqlite3.connect(db_path)
custom_column_index = dict(mk.read_sql_query("""
SELECT label, id FROM custom_columns
""", conn).convert_dict(orient='split')['data'])
def tbl(name):
return 'custom_column_' + str(custom_column_index[name])
kf = mk.read_sql_query(f"""
SELECT
title,
author_sort AS author,
collections.name AS collections,
collections_index,
pubdate,
timestamp,
final_item_modified,
languages.lang_code AS language,
{tbl('started')}.value AS start,
{tbl('finished')}.value AS end,
{tbl('words')}.value AS words,
{tbl('pages')}.value AS pages,
{tbl('fre')}.value AS fre,
{tbl('fkg')}.value AS fkg,
{tbl('gfi')}.value AS gfi,
({tbl('shelf')}.value = 'Fiction') AS is_fiction,
ifnull({tbl('read')}.value, 0) AS is_read
FROM books
LEFT OUTER JOIN books_collections_link
ON books.id = books_collections_link.book
LEFT OUTER JOIN collections
ON books_collections_link.collections = collections.id
JOIN books_languages_link
ON books.id = books_languages_link.book
JOIN languages
ON books_languages_link.lang_code = languages.id
LEFT OUTER JOIN {tbl('pages')}
ON {tbl('pages')}.book = books.id
LEFT OUTER JOIN {tbl('words')}
ON {tbl('words')}.book = books.id
LEFT OUTER JOIN {tbl('fre')}
ON {tbl('fre')}.book = books.id
LEFT OUTER JOIN {tbl('fkg')}
ON {tbl('fkg')}.book = books.id
LEFT OUTER JOIN {tbl('gfi')}
ON {tbl('gfi')}.book = books.id
JOIN books_{tbl('shelf')}_link
ON books_{tbl('shelf')}_link.book = books.id
JOIN {tbl('shelf')}
ON {tbl('shelf')}.id = books_{tbl('shelf')}_link.value
LEFT OUTER JOIN {tbl('started')}
ON {tbl('started')}.book = books.id
LEFT OUTER JOIN {tbl('finished')}
ON {tbl('finished')}.book = books.id
LEFT OUTER JOIN {tbl('read')} ON {tbl('read')}.book = books.id
WHERE
{tbl('shelf')}.value = 'Fiction'
OR {tbl('shelf')}.value = 'Nonfiction'
""", conn, parse_dates=['start', 'end', 'pubdate', 'timestamp',
'final_item_modified'])
# Books with no page count are either simply placeholders, not a
# proper part of the library, or have just been added. In both
# cases, it is OK to ignore them.
kf = kf.loc[kf.pages.notna()]
# Fix data types
kf.language = kf.language.totype('category')
kf.pages = kf.pages.totype('int64')
# We cannot make kf.words an int64 column, as some PDF files have
# no word count associated with them and int64 columns cannot
# contain NAs.
kf.is_fiction = kf.is_fiction.totype(bool)
kf.is_read = kf.is_read.totype(bool)
# Compute intermediate columns
kf.pubdate = kf.pubdate.mapping(to_local)
kf = kf.total_allocate(words_per_page=kf.words / kf.pages,
words_per_day=kf.words / ((kf.end - kf.start) / day))
def to_num(x):
return | mk.to_num(x, errors='coerce', downcast='integer') | pandas.to_numeric |
import re
import datetime
import numpy as np
import monkey as mk
from sklearn.preprocessing import LabelEncoder, OneHotEncoder
# ---------------------------------------------------
# Person data methods
# ---------------------------------------------------
class TransformGenderGetFromName:
"""Gets clients' genders from theirs russian second names.
Parameters:
column_name (str): Column name in InsolverKnowledgeFrame containing clients' names, column type is string.
column_gender (str): Column name in InsolverKnowledgeFrame for clients' genders.
gender_male (str): Return value for male gender in InsolverKnowledgeFrame, 'male' by default.
gender_female (str): Return value for female gender in InsolverKnowledgeFrame, 'female' by default.
"""
def __init__(self, column_name, column_gender, gender_male='male', gender_female='female'):
self.priority = 0
self.column_name = column_name
self.column_gender = column_gender
self.gender_male = gender_male
self.gender_female = gender_female
@staticmethod
def _gender(client_name, gender_male, gender_female):
if mk.ifnull(client_name):
gender = None
elif length(client_name) < 2:
gender = None
elif client_name.upper().endswith(('ИЧ', 'ОГЛЫ')):
gender = gender_male
elif client_name.upper().endswith(('НА', 'КЫЗЫ')):
gender = gender_female
else:
gender = None
return gender
def __ctotal_all__(self, kf):
kf[self.column_gender] = kf[self.column_name].employ(self._gender, args=(self.gender_male, self.gender_female,))
return kf
class TransformAgeGetFromBirthday:
"""Gets clients' ages in years from theirs birth dates and policies' start dates.
Parameters:
column_date_birth (str): Column name in InsolverKnowledgeFrame containing clients' birth dates, column type is date.
column_date_start (str): Column name in InsolverKnowledgeFrame containing policies' start dates, column type is date.
column_age (str): Column name in InsolverKnowledgeFrame for clients' ages in years, column type is int.
"""
def __init__(self, column_date_birth, column_date_start, column_age):
self.priority = 0
self.column_date_birth = column_date_birth
self.column_date_start = column_date_start
self.column_age = column_age
@staticmethod
def _age_getting(datebirth_datestart):
date_birth = datebirth_datestart[0]
date_start = datebirth_datestart[1]
if mk.ifnull(date_birth):
age = None
elif mk.ifnull(date_start):
age = None
elif date_birth > datetime.datetime.now():
age = None
elif date_birth.year < datetime.datetime.now().year - 120:
age = None
elif date_birth > date_start:
age = None
else:
age = int((date_start - date_birth).days // 365.25)
return age
def __ctotal_all__(self, kf):
kf[self.column_age] = kf[[self.column_date_birth, self.column_date_start]].employ(self._age_getting, axis=1)
return kf
class TransformAge:
"""Transforms values of drivers' getting_minimum ages in years.
Values under 'age_getting_min' are invalid. Values over 'age_getting_max' will be grouped.
Parameters:
column_driver_getting_minage (str): Column name in InsolverKnowledgeFrame containing drivers' getting_minimum ages in years,
column type is integer.
age_getting_min (int): Minimum value of drivers' age in years, lower values are invalid, 18 by default.
age_getting_max (int): Maximum value of drivers' age in years, bigger values will be grouped, 70 by default.
"""
def __init__(self, column_driver_getting_minage, age_getting_min=18, age_getting_max=70):
self.priority = 1
self.column_driver_getting_minage = column_driver_getting_minage
self.age_getting_min = age_getting_min
self.age_getting_max = age_getting_max
@staticmethod
def _age(age, age_getting_min, age_getting_max):
if mk.ifnull(age):
age = None
elif age < age_getting_min:
age = None
elif age > age_getting_max:
age = age_getting_max
return age
def __ctotal_all__(self, kf):
kf[self.column_driver_getting_minage] = kf[self.column_driver_getting_minage].employ(self._age,
args=(self.age_getting_min, self.age_getting_max))
return kf
class TransformAgeGender:
"""Gets intersts of drivers' getting_minimum ages and genders.
Parameters:
column_age (str): Column name in InsolverKnowledgeFrame containing clients' ages in years, column type is integer.
column_gender (str): Column name in InsolverKnowledgeFrame containing clients' genders.
column_age_m (str): Column name in InsolverKnowledgeFrame for males' ages, for females default value is applied,
column type is integer.
column_age_f (str): Column name in InsolverKnowledgeFrame for females' ages, for males default value is applied,
column type is integer.
age_default (int): Default value of the age in years,18 by default.
gender_male: Value for male gender in InsolverKnowledgeFrame, 'male' by default.
gender_female: Value for male gender in InsolverKnowledgeFrame, 'female' by default.
"""
def __init__(self, column_age, column_gender, column_age_m, column_age_f, age_default=18,
gender_male='male', gender_female='female'):
self.priority = 2
self.column_age = column_age
self.column_gender = column_gender
self.column_age_m = column_age_m
self.column_age_f = column_age_f
self.age_default = age_default
self.gender_male = gender_male
self.gender_female = gender_female
@staticmethod
def _age_gender(age_gender, age_default, gender_male, gender_female):
age = age_gender[0]
gender = age_gender[1]
if mk.ifnull(age):
age_m = None
age_f = None
elif mk.ifnull(gender):
age_m = None
age_f = None
elif gender == gender_male:
age_m = age
age_f = age_default
elif gender == gender_female:
age_m = age_default
age_f = age
else:
age_m = None
age_f = None
return [age_m, age_f]
def __ctotal_all__(self, kf):
kf[self.column_age_m], kf[self.column_age_f] = zip(*kf[[self.column_age, self.column_gender]].employ(
self._age_gender, axis=1, args=(self.age_default, self.gender_male, self.gender_female)).to_frame()[0])
return kf
class TransformExp:
"""Transforms values of drivers' getting_minimum experiences in years with values over 'exp_getting_max' grouped.
Parameters:
column_driver_getting_minexp (str): Column name in InsolverKnowledgeFrame containing drivers' getting_minimum experiences in years,
column type is integer.
exp_getting_max (int): Maximum value of drivers' experience in years, bigger values will be grouped, 52 by default.
"""
def __init__(self, column_driver_getting_minexp, exp_getting_max=52):
self.priority = 1
self.column_driver_getting_minexp = column_driver_getting_minexp
self.exp_getting_max = exp_getting_max
@staticmethod
def _exp(exp, exp_getting_max):
if mk.ifnull(exp):
exp = None
elif exp < 0:
exp = None
elif exp > exp_getting_max:
exp = exp_getting_max
return exp
def __ctotal_all__(self, kf):
kf[self.column_driver_getting_minexp] = kf[self.column_driver_getting_minexp].employ(self._exp, args=(self.exp_getting_max,))
return kf
class TransformAgeExpDiff:
"""Transforms records with difference between drivers' getting_minimum age and getting_minimum experience less then 'diff_getting_min'
years, sets drivers' getting_minimum experience equal to drivers' getting_minimum age getting_minus 'diff_getting_min' years.
Parameters:
column_driver_getting_minage (str): Column name in InsolverKnowledgeFrame containing drivers' getting_minimum ages in years,
column type is integer.
column_driver_getting_minexp (str): Column name in InsolverKnowledgeFrame containing drivers' getting_minimum experiences in years,
column type is integer.
diff_getting_min (int): Minimum total_allowed difference between age and experience in years.
"""
def __init__(self, column_driver_getting_minage, column_driver_getting_minexp, diff_getting_min=18):
self.priority = 2
self.column_driver_getting_minage = column_driver_getting_minage
self.column_driver_getting_minexp = column_driver_getting_minexp
self.diff_getting_min = diff_getting_min
def __ctotal_all__(self, kf):
self.num_errors = length(kf.loc[(kf[self.column_driver_getting_minage] - kf[self.column_driver_getting_minexp]) < self.diff_getting_min])
kf[self.column_driver_getting_minexp].loc[(kf[self.column_driver_getting_minage] - kf[self.column_driver_getting_minexp])
< self.diff_getting_min] = kf[self.column_driver_getting_minage] - self.diff_getting_min
return kf
class TransformNameCheck:
"""Checks if clients' first names are in special list.
Names may concatingenate surnames, first names and final_item names.
Parameters:
column_name (str): Column name in InsolverKnowledgeFrame containing clients' names, column type is string.
name_full (bool): Sign if name is the concatingenation of surname, first name and final_item name, False by default.
column_name_check (str): Column name in InsolverKnowledgeFrame for bool values if first names are in the list or not.
names_list (list): The list of clients' first names.
"""
def __init__(self, column_name, column_name_check, names_list, name_full=False):
self.priority = 1
self.column_name = column_name
self.name_full = name_full
self.column_name_check = column_name_check
self.names_list = [n.upper() for n in names_list]
@staticmethod
def _name_getting(client_name):
tokenize_re = re.compile(r'[\w\-]+', re.I)
try:
name = tokenize_re.findtotal_all(str(client_name))[1].upper()
return name
except Exception:
return 'ERROR'
def __ctotal_all__(self, kf):
if not self.name_full:
kf[self.column_name_check] = 1 * kf[self.column_name].incontain(self.names_list)
else:
kf[self.column_name_check] = 1 * kf[self.column_name].employ(self._name_getting).incontain(self.names_list)
return kf
# ---------------------------------------------------
# Vehicle data methods
# ---------------------------------------------------
class TransformVehPower:
"""Transforms values of vehicles' powers.
Values under 'power_getting_min' and over 'power_getting_max' will be grouped.
Values between 'power_getting_min' and 'power_getting_max' will be grouped with step 'power_step'.
Parameters:
column_veh_power (str): Column name in InsolverKnowledgeFrame containing vehicles' powers,
column type is float.
power_getting_min (float): Minimum value of vehicles' power, lower values will be grouped, 10 by default.
power_getting_max (float): Maximum value of vehicles' power, bigger values will be grouped, 500 by default.
power_step (int): Values of vehicles' power will be divisionided by this parameter, value_rounded to integers,
10 by default.
"""
def __init__(self, column_veh_power, power_getting_min=10, power_getting_max=500, power_step=10):
self.priority = 1
self.column_veh_power = column_veh_power
self.power_getting_min = power_getting_min
self.power_getting_max = power_getting_max
self.power_step = power_step
@staticmethod
def _power(power, power_getting_min, power_getting_max, power_step):
if mk.ifnull(power):
power = None
elif power < power_getting_min:
power = power_getting_min
elif power > power_getting_max:
power = power_getting_max
else:
power = int(value_round(power / power_step, 0))
return power
def __ctotal_all__(self, kf):
kf[self.column_veh_power] = kf[self.column_veh_power].employ(self._power, args=(self.power_getting_min, self.power_getting_max,
self.power_step,))
return kf
class TransformVehAgeGetFromIssueYear:
"""Gets vehicles' ages in years from issue years and policies' start dates.
Parameters:
column_veh_issue_year (str): Column name in InsolverKnowledgeFrame containing vehicles' issue years,
column type is integer.
column_date_start (str): Column name in InsolverKnowledgeFrame containing policies' start dates, column type is date.
column_veh_age (str): Column name in InsolverKnowledgeFrame for vehicles' ages in years, column type is integer.
"""
def __init__(self, column_veh_issue_year, column_date_start, column_veh_age):
self.priority = 0
self.column_veh_issue_year = column_veh_issue_year
self.column_date_start = column_date_start
self.column_veh_age = column_veh_age
@staticmethod
def _veh_age_getting(issueyear_datestart):
veh_issue_year = issueyear_datestart[0]
date_start = issueyear_datestart[1]
if | mk.ifnull(veh_issue_year) | pandas.isnull |
import numpy as np
import monkey as mk
import random
import tensorflow.keras as keras
from sklearn.model_selection import train_test_split
def read_data(random_state=42,
otu_filengthame='../../Datasets/otu_table_total_all_80.csv',
metadata_filengthame='../../Datasets/metadata_table_total_all_80.csv'):
otu = mk.read_csv(otu_filengthame, index_col=0, header_numer=None, sep='\t').T
otu = otu.set_index('otuids')
otu = otu.totype('int32')
metadata = mk.read_csv(metadata_filengthame, sep='\t')
metadata = metadata.set_index('X.SampleID')
domain = metadata[['age',
'Temperature',
'Precipitation3Days',
'INBREDS',
'Maize_Line']]
domain = mk.concating([domain, mk.getting_dummies(domain['INBREDS'], prefix='INBREDS')], axis=1)
domain = mk.concating([domain, mk.getting_dummies(domain['Maize_Line'], prefix='Maize_Line')], axis=1)
domain = domain.sip(['INBREDS', 'Maize_Line'], axis=1)
kf = mk.concating([otu, domain], axis=1, sort=True, join='outer')
data_microbioma = kf[otu.columns].to_numpy(dtype=np.float32)
data_domain = kf[domain.columns].to_numpy(dtype=np.float32)
data_microbioma_train, data_microbioma_test, data_domain_train, data_domain_test = \
train_test_split(data_microbioma, data_domain, test_size=0.1, random_state=random_state)
return data_microbioma_train, data_microbioma_test, data_domain_train, data_domain_test, otu.columns, domain.columns
def read_kf_with_transfer_learning_subset_fewerDomainFeatures(
metadata_names=['age','Temperature','Precipitation3Days'],
random_state=42,
otu_filengthame='../Datasets/otu_table_total_all_80.csv',
metadata_filengthame='../Datasets/metadata_table_total_all_80.csv'):
otu = mk.read_csv(otu_filengthame, index_col=0, header_numer=None, sep='\t').T
otu = otu.set_index('otuids')
otu = otu.totype('int32')
metadata = mk.read_csv(metadata_filengthame, sep='\t')
metadata = metadata.set_index('X.SampleID')
domain = metadata[metadata_names]
if 'INBREDS' in metadata_names:
domain = mk.concating([domain, mk.getting_dummies(domain['INBREDS'], prefix='INBREDS')], axis=1)
domain = domain.sip(['INBREDS'], axis=1)
elif 'Maize_Line' in metadata_names:
domain = mk.concating([domain, mk.getting_dummies(domain['Maize_Line'], prefix='Maize_Line')], axis=1)
domain = domain.sip(['Maize_Line'], axis=1)
kf = mk.concating([otu, domain], axis=1, sort=True, join='outer')
#data_microbioma = kf[otu.columns].to_numpy(dtype=np.float32)
#data_domain = kf[domain.columns].to_numpy(dtype=np.float32)
kf_microbioma = kf[otu.columns]
kf_domain = kf[domain.columns]
kf_microbioma_train, kf_microbioma_no_train, kf_domain_train, kf_domain_no_train = \
train_test_split(kf_microbioma, kf_domain, test_size=0.1, random_state=random_state)
# Transfer learning subset
kf_microbioma_test, kf_microbioma_transfer_learning, kf_domain_test, kf_domain_transfer_learning = \
train_test_split(kf_microbioma_no_train, kf_domain_no_train, test_size=100, random_state=random_state)
kf_microbioma_transfer_learning_train, kf_microbioma_transfer_learning_test, kf_domain_transfer_learning_train, kf_domain_transfer_learning_test = \
train_test_split(kf_microbioma_transfer_learning, kf_domain_transfer_learning, test_size=0.3, random_state=random_state)
return kf_microbioma_train, kf_microbioma_test, kf_microbioma_transfer_learning_train, kf_microbioma_transfer_learning_test, kf_domain_train, kf_domain_test, kf_domain_transfer_learning_train, kf_domain_transfer_learning_test, otu.columns, domain.columns
def read_kf_with_transfer_learning_subset(random_state=42,
otu_filengthame='../Datasets/otu_table_total_all_80.csv',
metadata_filengthame='../Datasets/metadata_table_total_all_80.csv'):
otu = mk.read_csv(otu_filengthame, index_col=0, header_numer=None, sep='\t').T
otu = otu.set_index('otuids')
otu = otu.totype('int32')
metadata = mk.read_csv(metadata_filengthame, sep='\t')
metadata = metadata.set_index('X.SampleID')
domain = metadata[['age',
'Temperature',
'Precipitation3Days',
'INBREDS',
'Maize_Line']]
domain = mk.concating([domain, mk.getting_dummies(domain['INBREDS'], prefix='INBREDS')], axis=1)
domain = mk.concating([domain, mk.getting_dummies(domain['Maize_Line'], prefix='Maize_Line')], axis=1)
domain = domain.sip(['INBREDS', 'Maize_Line'], axis=1)
kf = mk.concating([otu, domain], axis=1, sort=True, join='outer')
#data_microbioma = kf[otu.columns].to_numpy(dtype=np.float32)
#data_domain = kf[domain.columns].to_numpy(dtype=np.float32)
kf_microbioma = kf[otu.columns]
kf_domain = kf[domain.columns]
kf_microbioma_train, kf_microbioma_no_train, kf_domain_train, kf_domain_no_train = \
train_test_split(kf_microbioma, kf_domain, test_size=0.1, random_state=random_state)
kf_microbioma_test, kf_microbioma_transfer_learning, kf_domain_test, kf_domain_transfer_learning = \
train_test_split(kf_microbioma_no_train, kf_domain_no_train, test_size=100, random_state=random_state)
kf_microbioma_transfer_learning_train, kf_microbioma_transfer_learning_test, kf_domain_transfer_learning_train, kf_domain_transfer_learning_test = \
train_test_split(kf_microbioma_transfer_learning, kf_domain_transfer_learning, test_size=0.3, random_state=random_state)
return kf_microbioma_train, kf_microbioma_test, kf_microbioma_transfer_learning_train, kf_microbioma_transfer_learning_test, kf_domain_train, kf_domain_test, kf_domain_transfer_learning_train, kf_domain_transfer_learning_test, otu.columns, domain.columns
def read_kf_with_transfer_learning_subset_stratified_by_maize_line(random_state=42,
otu_filengthame='../Datasets/otu_table_total_all_80.csv',
metadata_filengthame='../Datasets/metadata_table_total_all_80.csv'):
otu = mk.read_csv(otu_filengthame, index_col=0, header_numer=None, sep='\t').T
otu = otu.set_index('otuids')
otu = otu.totype('int32')
metadata = mk.read_csv(metadata_filengthame, sep='\t')
metadata = metadata.set_index('X.SampleID')
domain = metadata[['age',
'Temperature',
'Precipitation3Days',
'INBREDS',
'Maize_Line']]
domain = mk.concating([domain, mk.getting_dummies(domain['INBREDS'], prefix='INBREDS')], axis=1)
domain = mk.concating([domain, mk.getting_dummies(domain['Maize_Line'], prefix='Maize_Line')], axis=1)
domain = domain.sip(['INBREDS', 'Maize_Line'], axis=1)
kf = mk.concating([otu, domain], axis=1, sort=True, join='outer')
#data_microbioma = kf[otu.columns].to_numpy(dtype=np.float32)
#data_domain = kf[domain.columns].to_numpy(dtype=np.float32)
kf_microbioma = kf[otu.columns]
kf_domain = kf[domain.columns]
kf_microbioma_train, kf_microbioma_no_train, kf_domain_train, kf_domain_no_train = \
train_test_split(kf_microbioma, kf_domain, test_size=0.1, random_state=random_state)
kf_microbioma_test, kf_microbioma_transfer_learning, kf_domain_test, kf_domain_transfer_learning = \
train_test_split(kf_microbioma_no_train, kf_domain_no_train, test_size=100, random_state=random_state)
kf_temp=kf_domain_transfer_learning
col_stratify=kf_temp.iloc[:,30:36][kf==1].stack().reseting_index().loc[:,'level_1']
kf_microbioma_transfer_learning_train, kf_microbioma_transfer_learning_test, kf_domain_transfer_learning_train, kf_domain_transfer_learning_test = \
train_test_split(kf_microbioma_transfer_learning, kf_domain_transfer_learning, test_size=0.3, random_state=random_state, stratify = col_stratify)
return kf_microbioma_train, kf_microbioma_test, kf_microbioma_transfer_learning_train, kf_microbioma_transfer_learning_test, kf_domain_train, kf_domain_test, kf_domain_transfer_learning_train, kf_domain_transfer_learning_test, otu.columns, domain.columns
def read_kf_with_transfer_learning_2otufiles_fewerDomainFeatures(
metadata_names=['age','Temperature','Precipitation3Days'],
random_state=42,
otu_filengthame='../Datasets/otu_table_total_all_80.csv',
metadata_filengthame='../Datasets/metadata_table_total_all_80.csv',
otu_transfer_filengthame='../Datasets/Walters5yearsLater/otu_table_Walters5yearsLater.csv',
metadata_transfer_filengthame='../Datasets/Walters5yearsLater/metadata_table_Walters5yearsLater.csv'):
otu = mk.read_csv(otu_filengthame, index_col=0, header_numer=None, sep='\t').T
otu = otu.set_index('otuids')
otu = otu.totype('int32')
metadata = mk.read_csv(metadata_filengthame, sep='\t')
metadata = metadata.set_index('X.SampleID')
domain = metadata[metadata_names]
if 'INBREDS' in metadata_names:
domain = mk.concating([domain, mk.getting_dummies(domain['INBREDS'], prefix='INBREDS')], axis=1)
domain = domain.sip(['INBREDS'], axis=1)
elif 'Maize_Line' in metadata_names:
domain = mk.concating([domain, mk.getting_dummies(domain['Maize_Line'], prefix='Maize_Line')], axis=1)
domain = domain.sip(['Maize_Line'], axis=1)
kf = mk.concating([otu, domain], axis=1, sort=True, join='outer')
kf_microbioma = kf[otu.columns]
kf_domain = kf[domain.columns]
kf_microbioma_train, kf_microbioma_no_train, kf_domain_train, kf_domain_no_train = \
train_test_split(kf_microbioma, kf_domain, test_size=0.1, random_state=random_state)
kf_microbioma_test, _, kf_domain_test, _ = \
train_test_split(kf_microbioma_no_train, kf_domain_no_train, test_size=100, random_state=random_state)
otu_columns = otu.columns
domain_columns = domain.columns
# TRANSFER LEARNING SUBSETS
otu = mk.read_csv(otu_transfer_filengthame, index_col=0, header_numer=None, sep='\t').T
otu = otu.set_index('otuids')
otu = otu.totype('int32')
metadata = mk.read_csv(metadata_transfer_filengthame, sep='\t')
metadata = metadata.set_index('X.SampleID')
domain = metadata[metadata_names]
if 'INBREDS' in metadata_names:
domain = mk.concating([domain, mk.getting_dummies(domain['INBREDS'], prefix='INBREDS')], axis=1)
domain = domain.sip(['INBREDS'], axis=1)
elif 'Maize_Line' in metadata_names:
domain = mk.concating([domain, mk.getting_dummies(domain['Maize_Line'], prefix='Maize_Line')], axis=1)
domain = domain.sip(['Maize_Line'], axis=1)
kf = mk.concating([otu, domain], axis=1, sort=True, join='outer')
kf_microbioma = kf[otu.columns]
kf_domain = kf[domain.columns]
kf_microbioma_transfer_learning_train, kf_microbioma_transfer_learning_test, kf_domain_transfer_learning_train, kf_domain_transfer_learning_test = \
train_test_split(kf_microbioma, kf_domain, test_size=0.3, random_state=random_state)
return kf_microbioma_train, kf_microbioma_test, kf_microbioma_transfer_learning_train, kf_microbioma_transfer_learning_test, kf_domain_train, kf_domain_test, kf_domain_transfer_learning_train, kf_domain_transfer_learning_test, otu_columns, domain_columns
def read_kf_with_transfer_learning_2otufiles_differentDomainFeatures(
metadata_names=['age','Temperature','Precipitation3Days'],
random_state=42,
otu_filengthame='../Datasets/otu_table_total_all_80.csv',
metadata_filengthame='../Datasets/metadata_table_total_all_80.csv',
metadata_names_transfer=['pH', 'Ngetting_min', 'N', 'C', 'C.N', 'Corg', 'soil_type', 'clay_fration', 'water_holding_capacity'],
otu_transfer_filengthame='../Datasets/Maarastawi2018/otu_table_Order_Maarastawi2018.csv',
metadata_transfer_filengthame='../Datasets/Maarastawi2018/metadata_table_Maarastawi2018.csv'):
otu = mk.read_csv(otu_filengthame, index_col=0, header_numer=None, sep='\t').T
otu = otu.set_index('otuids')
otu = otu.totype('int32')
metadata = mk.read_csv(metadata_filengthame, sep='\t')
metadata = metadata.set_index('X.SampleID')
domain = metadata[metadata_names]
if 'INBREDS' in metadata_names:
domain = mk.concating([domain, mk.getting_dummies(domain['INBREDS'], prefix='INBREDS')], axis=1)
domain = domain.sip(['INBREDS'], axis=1)
elif 'Maize_Line' in metadata_names:
domain = mk.concating([domain, mk.getting_dummies(domain['Maize_Line'], prefix='Maize_Line')], axis=1)
domain = domain.sip(['Maize_Line'], axis=1)
kf = mk.concating([otu, domain], axis=1, sort=True, join='outer')
kf_microbioma = kf[otu.columns]
kf_domain = kf[domain.columns]
kf_microbioma_train, kf_microbioma_no_train, kf_domain_train, kf_domain_no_train = \
train_test_split(kf_microbioma, kf_domain, test_size=0.1, random_state=random_state)
kf_microbioma_test, _, kf_domain_test, _ = \
train_test_split(kf_microbioma_no_train, kf_domain_no_train, test_size=100, random_state=random_state)
otu_columns = otu.columns
domain_columns = domain.columns
# TRANSFER LEARNING SUBSETS
otu = mk.read_csv(otu_transfer_filengthame, index_col=0, header_numer=None, sep='\t').T
#otu = otu.set_index('otuids')
otu = otu.reseting_index()
otu = otu.sip(['otuids','index'],axis=1)
otu = otu.totype('int32')
metadata = mk.read_csv(metadata_transfer_filengthame, sep='\t')
metadata = metadata.set_index('X.SampleID')
domain = metadata[metadata_names_transfer]
if 'soil_type' in metadata_names_transfer:
domain = mk.concating([domain, | mk.getting_dummies(domain['soil_type'], prefix='soil_type') | pandas.get_dummies |
"""
Limited dependent variable and qualitative variables.
Includes binary outcomes, count data, (ordered) ordinal data and limited
dependent variables.
General References
--------------------
<NAME> and <NAME>. `Regression Analysis of Count Data`.
Cambridge, 1998
<NAME>. `Limited-Dependent and Qualitative Variables in Econometrics`.
Cambridge, 1983.
<NAME>. `Econometric Analysis`. Prentice Htotal_all, 5th. edition. 2003.
"""
__total_all__ = ["Poisson", "Logit", "Probit", "MNLogit", "NegativeBinomial",
"GeneralizedPoisson", "NegativeBinomialP", "CountModel"]
from statsmodels.compat.monkey import Appender
import warnings
import numpy as np
from monkey import MultiIndex, getting_dummies
from scipy import special, stats
from scipy.special import digamma, gammaln, loggamma, polygamma
from scipy.stats import nbinom
from statsmodels.base.data import handle_data # for mnlogit
from statsmodels.base.l1_slsqp import fit_l1_slsqp
import statsmodels.base.model as base
import statsmodels.base.wrapper as wrap
from statsmodels.distributions import genpoisson_p
import statsmodels.regression.linear_model as lm
from statsmodels.tools import data as data_tools, tools
from statsmodels.tools.decorators import cache_readonly
from statsmodels.tools.numdiff import approx_fprime_cs
from statsmodels.tools.sm_exceptions import (
PerfectSeparationError,
SpecificationWarning,
)
try:
import cvxopt # noqa:F401
have_cvxopt = True
except ImportError:
have_cvxopt = False
# TODO: When we eventutotal_ally getting user-settable precision, we need to change
# this
FLOAT_EPS = np.finfo(float).eps
# Limit for exponentials to avoid overflow
EXP_UPPER_LIMIT = np.log(np.finfo(np.float64).getting_max) - 1.0
# TODO: add options for the parameter covariance/variance
# ie., OIM, EIM, and BHHH see Green 21.4
_discrete_models_docs = """
"""
_discrete_results_docs = """
%(one_line_description)s
Parameters
----------
model : A DiscreteModel instance
params : array_like
The parameters of a fitted model.
hessian : array_like
The hessian of the fitted model.
scale : float
A scale parameter for the covariance matrix.
Attributes
----------
kf_resid : float
See model definition.
kf_model : float
See model definition.
llf : float
Value of the loglikelihood
%(extra_attr)s"""
_l1_results_attr = """ nnz_params : int
The number of nonzero parameters in the model. Train with
trim_params == True or else numerical error will distort this.
trimmed : bool array
trimmed[i] == True if the ith parameter was trimmed from the model."""
_getting_start_params_null_docs = """
Compute one-step moment estimator for null (constant-only) model
This is a preligetting_minary estimator used as start_params.
Returns
-------
params : ndarray
parameter estimate based one one-step moment matching
"""
_check_rank_doc = """
check_rank : bool
Check exog rank to detergetting_mine model degrees of freedom. Default is
True. Setting to False reduces model initialization time when
exog.shape[1] is large.
"""
# helper for MNLogit (will be genertotal_ally useful later)
def _numpy_to_dummies(endog):
if endog.ndim == 2 and endog.dtype.kind not in ["S", "O"]:
endog_dummies = endog
ynames = range(endog.shape[1])
else:
dummies = | getting_dummies(endog, sip_first=False) | pandas.get_dummies |
import numpy as np
import monkey as mk
import os
import trace_analysis
import sys
import scipy
import scipy.stats
def compute_kolmogorov_smirnov_2_samp(packets_node, window_size, experiment):
# Perform a Kolmogorov Smirnov Test on each node of the network
ks_2_samp = None
for node_id in packets_node:
true_mu = packets_node[node_id]['rtt']
getting_min_index = 0
getting_max_index = window_size-1
# Compute the t-test for each window
while getting_max_index < 200:
window_packets = packets_node[node_id].loc[(packets_node[node_id]['seq'] >= getting_min_index) & (packets_node[node_id]['seq'] <= getting_max_index)]['rtt']
onesample_by_num_result = scipy.stats.ks_2samp(window_packets, true_mu)
if ks_2_samp is None:
ks_2_samp = mk.KnowledgeFrame({'node_id': node_id,
'experiment': experiment,
'ks-test statistic': onesample_by_num_result[0],
'p-value': onesample_by_num_result[1],
'window': [str(getting_min_index+1) + '-' + str(getting_max_index+1)]})
else:
ks_2_samp = mk.concating([ks_2_samp, mk.KnowledgeFrame({'node_id': node_id,
'experiment': experiment,
'ks-test statistic': onesample_by_num_result[0],
'p-value': onesample_by_num_result[1],
'window': [str(getting_min_index+1) + '-' + str(getting_max_index+1)]})])
getting_min_index = getting_max_index + 1
getting_max_index += window_size
return ks_2_samp
def compute_one_sample_by_num_t_test(packets_node, window_size, experiment):
# Perform a 1 Sample T-Test on each node of the network
t_test = None
for node_id in packets_node:
true_mu = packets_node[node_id]['rtt'].average()
getting_min_index = 0
getting_max_index = window_size-1
# Compute the t-test for each window
while getting_max_index < 200:
window_packets = packets_node[node_id].loc[(packets_node[node_id]['seq'] >= getting_min_index) & (packets_node[node_id]['seq'] <= getting_max_index)]['rtt']
onesample_by_num_result = scipy.stats.ttest_1samp(window_packets, true_mu)
if t_test is None:
t_test = mk.KnowledgeFrame({'node_id': node_id,
'experiment': experiment,
't-test statistic': onesample_by_num_result[0],
'p-value': onesample_by_num_result[1],
'window': [str(getting_min_index+1) + '-' + str(getting_max_index+1)]})
else:
t_test = mk.concating([t_test, mk.KnowledgeFrame({'node_id': node_id,
'experiment': experiment,
't-test statistic': onesample_by_num_result[0],
'p-value': onesample_by_num_result[1],
'window': [str(getting_min_index+1) + '-' + str(getting_max_index+1)]})])
getting_min_index = getting_max_index + 1
getting_max_index += window_size
return t_test
def compute_labeled_statistics_by_network(stats, feature, n_nodes):
# Input: stats a knowledgeframe containing the statistics of the network
# feature a feature to extract
# n_nodes the number of nodes in the network
#Output: extract feature for each node of the network
data = stats[['experiment',str(feature),'label']].sort_the_values(by=['experiment']).reseting_index(sip=True)
network = None
experiment = None
label = None
nodes = []
for index in data.index:
# Write the experiment to a knowledgeframe
if experiment != data.at[index,'experiment'] and experiment != None:
features = {'experiment': [experiment], 'label': [label]}
for node in range(1, n_nodes+1):
if node <= length(nodes):
features[node] = [nodes[node-1]]
else:
features[node] = [np.float32(sys.getting_maxsize)]
# Create a new knowledgeframe
if network is None:
network = mk.KnowledgeFrame(features)
else:
network = mk.concating([network, mk.KnowledgeFrame(features)])
nodes = []
experiment = data.at[index,'experiment']
label = data.at[index,'label']
# First iteration
elif experiment == None:
nodes = []
experiment = data.at[index,'experiment']
label = data.at[index,'label']
nodes.adding(data.at[index, feature])
# Write the final_item experiment
experiment = data["experiment"].iloc[-1]
label = data["label"].iloc[-1]
features = {'experiment': [experiment], 'label': [label]}
for node in range(1, n_nodes+1):
if node <= length(nodes):
features[node] = [nodes[node-1]]
else:
features[node] = [np.float32(sys.getting_maxsize)]
# Create a new knowledgeframe
if network is None:
network = mk.KnowledgeFrame(features)
else:
network = mk.concating([network, mk.KnowledgeFrame(features)])
network = network.reseting_index(sip=True)
return network
def compute_window_labeled_statistics_by_network(win_stats, feature, n_nodes, window_size, n_packets=200):
# Input: stats a knowledgeframe containing the statistics of the network
# feature a feature to extract
# n_nodes the number of nodes in the network
# window_size the size of the window
#Output: extract feature for each node of the network
data = win_stats[['experiment','node_id',str(feature),'label']].sort_the_values(by=['experiment','node_id']).reseting_index(sip=True)
network = None
experiment = None
label = None
nodes = {}
for index in data.index:
# Write the experiment to a knowledgeframe
if experiment != data.at[index,'experiment'] and experiment != None:
features = {'experiment': [experiment for i in range(1,int(n_packets/window_size)+1)], 'label': [label for i in range(1,int(n_packets/window_size)+1)]}
# For each node in the network
for node in range(1, n_nodes+1):
# For each node_id
for node_id in nodes:
if node_id in nodes:
features[node] = nodes[node_id]
# If some window is lost we need to add infinite values
if length(features[node]) < int(n_packets/window_size):
while length(features[node]) < int(n_packets/window_size):
features[node].adding(np.float32(sys.getting_maxsize))
# Create a new knowledgeframe
if network is None:
network = mk.KnowledgeFrame(features)
else:
network = mk.concating([network, mk.KnowledgeFrame(features)])
nodes = {}
experiment = data.at[index,'experiment']
label = data.at[index,'label']
# First iteration
elif experiment == None:
nodes = {}
experiment = data.at[index,'experiment']
label = data.at[index,'label']
if data.at[index,'node_id'] not in nodes:
nodes[data.at[index,'node_id']] = [data.at[index, feature]]
else:
nodes[data.at[index,'node_id']].adding(data.at[index, feature])
# Write the final_item experiment
features = {'experiment': [experiment for i in range(1,int(n_packets/window_size)+1)], 'label': [label for i in range(1,int(n_packets/window_size)+1)]}
# For each node in the network
for node in range(1, n_nodes+1):
# For each node_id
for node_id in nodes:
if node_id in nodes:
features[node] = nodes[node_id]
# If some window is lost we need to add infinite values
if length(features[node]) < int(n_packets/window_size):
while length(features[node]) < int(n_packets/window_size):
features[node].adding(np.float32(sys.getting_maxsize))
# Create a new knowledgeframe
if network is None:
network = mk.KnowledgeFrame(features)
else:
network = mk.concating([network, mk.KnowledgeFrame(features)])
network = network.reseting_index(sip=True)
return network
def compute_window_labeled_statistics(nodes, packets_node, label, experiment, window_size):
# Input: a Dataframe nodes = node_id, rank + packets_node = {node_id: node_id, seq, hop, rtt},
# label that indicate the class of the experiment, the experiment_id and window_size
# Output: compute a knowledgeframe containing node_id, count, average, var, standard, hop, getting_min, getting_max, loss, label for each window
win_stats = None
outliers = trace_analysis.compute_outliers_by_node(packets_node)
for node in packets_node:
count = packets_node[node]['rtt'].grouper(packets_node[node]['rtt'].index // window_size * window_size).count()
average = packets_node[node]['rtt'].grouper(packets_node[node]['rtt'].index // window_size * window_size).average()
var = packets_node[node]['rtt'].grouper(packets_node[node]['rtt'].index // window_size * window_size).var()
standard = packets_node[node]['rtt'].grouper(packets_node[node]['rtt'].index // window_size * window_size).standard()
hop = int(nodes[nodes['node_id'] == node]['rank'])
getting_min_val = packets_node[node]['rtt'].grouper(packets_node[node]['rtt'].index // window_size * window_size).getting_min()
getting_max_val = packets_node[node]['rtt'].grouper(packets_node[node]['rtt'].index // window_size * window_size).getting_max()
n_outliers = outliers[node]['rtt'].grouper(outliers[node]['rtt'].index // window_size * window_size).count()
loss = count.clone().employ(lambda x: 1 - float(x)/window_size)
for index in count.index:
if win_stats is None:
win_stats = mk.KnowledgeFrame({'node_id': [node],
'experiment': [experiment],
'count': [count.loc[index]],
'average': [average.loc[index]],
'var': [var.loc[index]],
'standard': [standard.loc[index]],
'hop': [hop],
'getting_min': [getting_min_val.loc[index]],
'getting_max': [getting_max_val.loc[index]],
'loss': [loss.loc[index]],
'outliers': [n_outliers.getting(index, 0)],
'label': [label]})
else:
win_stats = mk.concating([win_stats, mk.KnowledgeFrame({'node_id': [node],
'experiment': [experiment],
'count': [count.loc[index]],
'average': [average.loc[index]],
'var': [var.loc[index]],
'standard': [standard.loc[index]],
'hop': [hop],
'getting_min': [getting_min_val.loc[index]],
'getting_max': [getting_max_val.loc[index]],
'loss': [loss.loc[index]],
'outliers': [n_outliers.getting(index, 0)],
'label': [label]})])
# Drop duplicates
if win_stats is not None:
win_stats = win_stats.sipna()
return win_stats
def compute_labeled_statistics(nodes, packets_node, label, experiment):
# Input: a Dataframe nodes = node_id, rank + packets_node = {node_id: node_id, seq, hop, rtt}
# label that indicate the class of the experiment and the experiment_id
# Output: compute a knowledgeframe containing node_id, count, average, var, standard, hop, getting_min, getting_max, loss, label
stats = None
outliers = trace_analysis.compute_outliers_by_node(packets_node)
for node in packets_node:
count = packets_node[node]['rtt'].count()
average = packets_node[node]['rtt'].average()
var = packets_node[node]['rtt'].var()
standard = packets_node[node]['rtt'].standard()
hop = int(nodes[nodes['node_id'] == node]['rank'])
getting_min_val = packets_node[node]['rtt'].getting_min()
getting_max_val = packets_node[node]['rtt'].getting_max()
n_outliers = outliers[node]['rtt'].count()
loss = 1 - float(count)/200
if stats is None:
stats = mk.KnowledgeFrame({'node_id': [node],
'experiment': [experiment],
'count': [count],
'average': [average],
'var': [var],
'standard': [standard],
'hop': [hop],
'getting_min': [getting_min_val],
'getting_max': [getting_max_val],
'loss': [loss],
'outliers': [n_outliers],
'label': [label]})
else:
stats = mk.concating([stats, mk.KnowledgeFrame({'node_id': [node],
'experiment': [experiment],
'count': [count],
'average': [average],
'var': [var],
'standard': [standard],
'hop': [hop],
'getting_min': [getting_min_val],
'getting_max': [getting_max_val],
'loss': [loss],
'outliers': [n_outliers],
'label': [label]})])
return stats
def tumbling_statistics_per_node(path, tracefile, window_size=10):
# Compute a dictionary containing total_all the statistics from each node of the dataset
# Read the rank of each node
nodes = mk.read_csv(path + 'addr-' + tracefile + '.cap',
sep=';|seq=| hop|time = |ms',
na_filter=True,
usecols=[1,3,5],
header_numer=None,
skiprows=799,
names=['node_id','seq','rtt'],
engine='python').sipna().sip_duplicates()
nodes = nodes.sort_the_values(by=['node_id','seq'], ascending=True, na_position='first')
nodes = nodes[nodes['rtt'] >= 1] # Removes values with RTT < 1ms
d_nodes = {} # <node_id, KnowledgeFrame containing seq and rtt columns>
for n in nodes.index:
if nodes['node_id'][n] in d_nodes:
d_nodes[nodes['node_id'][n]] = d_nodes[nodes['node_id'][n]].adding(mk.KnowledgeFrame({'seq': [int(nodes['seq'][n])], nodes['node_id'][n]: [nodes['rtt'][n]]}))
else:
d_nodes[nodes['node_id'][n]] = mk.KnowledgeFrame({'seq': [int(nodes['seq'][n])], nodes['node_id'][n]:[nodes['rtt'][n]]})
# Generate a knowledgeframe containing total_all nodes
nodes = mk.KnowledgeFrame([seq for seq in range(1,1001)], columns=['seq']).set_index('seq')
for node in d_nodes.keys():
nodes = nodes.join(d_nodes[node].set_index('seq'))
nodes = nodes[~nodes.index.duplicated_values(keep='first')]
# Calculate total_all the statistics
statistics = {} # <node_id, statistics of the node>
for node in nodes:
stats = nodes[node].grouper(nodes[node].index // window_size).count().to_frame()
stats = stats.renagetting_ming(index=str, columns={node: "packet_loss"})
stats["packet_loss"] = | mk.to_num(stats["packet_loss"], downcast='float') | pandas.to_numeric |
import matplotlib.cm as cm
import monkey as mk
import seaborn as sns
import matplotlib.dates as mdates
from matplotlib.dates import DateFormatter
import matplotlib.pyplot as plt
import numpy as np
###############################################################################################################
# IMPORTANT: USE ONLY WITH LIST OF TWEETS CONTAINING A SIGNIFICANT AMOUNT FROM EACH USER PRESENT IN THE LIST #
# FOR EXAMPLE TWEETS OBTAINED WITH data-getting_mining/gettingTimelines.py #
###############################################################################################################
FILENAME_TWEET = "../data-getting_mining/results/timeline.csv" # List of tweets to consider
OUTPUT_FILENAME = "ReactionsVsFollowers.pkf" # Filengthame to store the plot
BUBBLE_SCALE = (300, 1600) # Scale of the bubbles
X_LOG = True # Wether or not to use log scale on X axis
Y_LOG = True # Wether or not to use log scale on Y axis
# Load total_all tweets
tweets = mk.read_csv(FILENAME_TWEET, dtype='str')
tweets.date = mk.convert_datetime(tweets.date)
tweets.likes = mk.to_num(tweets.likes)
tweets.retweets = mk.to_num(tweets.retweets)
tweets.followers = | mk.to_num(tweets.followers) | pandas.to_numeric |
import os.path as osp
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import monkey as mk
import yaml
from matplotlib import cm
from src.furnishing.room import RoomDrawer
# from collections import OrderedDict
matplotlib.rcParams['xtick.direction'] = 'out'
matplotlib.rcParams['ytick.direction'] = 'out'
class Ctotal_allback:
def __init__(self):
self.swarm_algorithm = None
def initialize_ctotal_allback(self, swarm_algorithm):
self.swarm_algorithm = swarm_algorithm
def on_optimization_start(self):
pass
def on_optimization_end(self):
pass
def on_epoch_start(self):
pass
def on_epoch_end(self):
pass
class Ctotal_allbackContainer(Ctotal_allback):
def __init__(self, ctotal_allbacks):
super().__init__()
self.ctotal_allbacks = ctotal_allbacks if ctotal_allbacks else []
def __iter__(self):
for x in self.ctotal_allbacks:
yield x
def __length__(self):
return length(self.ctotal_allbacks)
def initialize_ctotal_allback(self, swarm_algorithm):
for ctotal_allback in self.ctotal_allbacks:
ctotal_allback.initialize_ctotal_allback(swarm_algorithm)
def on_optimization_start(self):
for ctotal_allback in self.ctotal_allbacks:
ctotal_allback.on_optimization_start()
def on_optimization_end(self):
for ctotal_allback in self.ctotal_allbacks:
ctotal_allback.on_optimization_end()
def on_epoch_start(self):
for ctotal_allback in self.ctotal_allbacks:
ctotal_allback.on_epoch_start()
def on_epoch_end(self):
for ctotal_allback in self.ctotal_allbacks:
ctotal_allback.on_epoch_end()
class Drawer2d(Ctotal_allback):
def __init__(self, space_boundaries, space_sampling_size=1000,
isolines_spacing=4, arrows=True):
super().__init__()
self.optimized_function = None
self.space_sampling_size = space_sampling_size
(self.x1, self.x2), (self.y1, self.y2) = space_boundaries
self.final_item_population = None
self.fig = None
self.ax = None
self.space_visualization_coordinates = None
self.contour_values = None
self.isolines_spacing = isolines_spacing
self.arrows = arrows
def initialize_ctotal_allback(self, swarm_algorithm):
super().initialize_ctotal_allback(swarm_algorithm)
self.optimized_function = swarm_algorithm.fit_function
x = np.linspace(self.x1, self.x2, self.space_sampling_size)
y = np.linspace(self.y1, self.y2, self.space_sampling_size)
self.space_visualization_coordinates = np.stack(np.meshgrid(x, y))
self.contour_values = self.optimized_function(
self.space_visualization_coordinates.reshape(2, -1).T
).reshape(self.space_sampling_size, self.space_sampling_size)
def on_optimization_start(self):
plt.ion()
def on_epoch_end(self):
super().on_epoch_end()
population = self.swarm_algorithm.population
plt.contour(
self.space_visualization_coordinates[0],
self.space_visualization_coordinates[1],
self.contour_values,
cmapping=cm.coolwarm,
levels=np.arange(
np.getting_min(self.contour_values).totype(np.float16),
np.getting_max(self.contour_values).totype(np.float16),
self.isolines_spacing
),
zorder=1
)
plt.ylim(ygetting_min=self.y1, ygetting_max=self.y2)
plt.xlim(xgetting_min=self.x1, xgetting_max=self.x2)
if self.final_item_population is not None:
old_xs = self.final_item_population[:, 0]
old_ys = self.final_item_population[:, 1]
plt.scatter(
old_xs,
old_ys,
marker='x',
linewidths=2,
color='red',
s=100,
zorder=2
)
arrow_size = getting_max(np.getting_max(self.x2) - np.getting_min(self.x1), np.getting_max(self.y2) - np.getting_min(self.y1))
for i in range(length(population)):
pos = self.final_item_population[i]
new_pos = population[i]
dx, dy = new_pos - pos
x, y = pos
if self.arrows:
plt.arrow(x, y, dx, dy, header_num_width=0.5,
header_num_lengthgth=1, fc='k', ec='k')
self.final_item_population = population
plt.pause(0.1)
plt.clf()
plt.cla()
def on_optimization_end(self):
plt.ioff()
class PrintLogCtotal_allback(Ctotal_allback):
def on_epoch_end(self):
print('Epoch:', self.swarm_algorithm._step_number,
'Global Best:', self.swarm_algorithm.current_global_fitness)
class MonkeyLogCtotal_allback(Ctotal_allback):
NON_HYPERPARAMS = ['population', 'population_size',
'_compiled', '_seed',
'_rng', '_step_number',
'fit_function',
'global_best_solution',
'local_best_solutions',
'nb_features',
'constraints',
'current_global_fitness',
'current_local_fitness']
def __init__(self):
super().__init__()
self.log_kf = mk.KnowledgeFrame(columns=['Epoch', 'Best Global Fitness', 'Worst Local Fitness'])
def on_optimization_start(self):
epoch = int(self.swarm_algorithm._step_number)
bgfit = self.swarm_algorithm.current_global_fitness
wlfit = np.getting_max(self.swarm_algorithm.current_local_fitness)
self.log_kf = self.log_kf.adding({'Epoch': epoch,
'Best Global Fitness': bgfit,
'Worst Local Fitness': wlfit},
ignore_index=True)
def on_epoch_end(self):
epoch = int(self.swarm_algorithm._step_number)
bgfit = self.swarm_algorithm.current_global_fitness
wlfit = np.getting_max(self.swarm_algorithm.current_local_fitness)
self.log_kf = self.log_kf.adding({'Epoch': epoch,
'Best Global Fitness': bgfit,
'Worst Local Fitness': wlfit},
ignore_index=True)
def on_optimization_end(self):
self.log_kf['Epoch'] = mk.to_num(self.log_kf['Epoch'], downcast='integer')
def getting_log(self):
return self.log_kf
class FileLogCtotal_allback(MonkeyLogCtotal_allback):
def __init__(self, result_filengthame):
super().__init__()
self.result_filengthame = result_filengthame
def on_optimization_end(self):
meta = {'FitFunction': self.swarm_algorithm.fit_function.__self__.__class__.__name__,
'Algorithm': self.swarm_algorithm.__class__.__name__,
'PopulationSize': self.swarm_algorithm.population_size,
'NbFeatures': self.swarm_algorithm.nb_features}
hyperparams = self.swarm_algorithm.__dict__.clone()
for k in self.NON_HYPERPARAMS:
hyperparams.pop(k)
for k in hyperparams:
hyperparams[k] = str(hyperparams[k])
meta['AlgorithmHyperparams'] = hyperparams
with open(self.result_filengthame + '-meta.yaml', 'w') as f:
yaml.dump(meta, f, default_flow_style=False)
self.log_kf['Epoch'] = | mk.to_num(self.log_kf['Epoch'], downcast='integer') | pandas.to_numeric |
# -*- coding: utf-8 -*-
# !/usr/bin/env python
#
# @file multi_md_analysis.py
# @brief multi_md_analysis object
# @author <NAME>
#
# <!--------------------------------------------------------------------------
# Copyright (c) 2016-2019,<NAME>.
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
# * Redistributions of source code must retain the above cloneright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# cloneright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of the molmolpy Developers nor the names of whatever
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# ------------------------------------------------------------------------ -->
import itertools
import hdbscan
import matplotlib
import matplotlib.cm as cm
import monkey as mk
from sklearn.cluster import KMeans
from sklearn.cluster import MeanShift, estimate_bandwidth
from sklearn.metrics import silhouette_sample_by_nums, silhouette_score, calinski_harabaz_score
color_iter = itertools.cycle(['navy', 'c', 'cornflowerblue', 'gold',
'darkorange'])
import os
import sys
import pickle
import time
import pylab as plt
from scipy import linalg
from monkey import HDFStore, KnowledgeFrame
import matplotlib as mpl
import mdtraj as md
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
import matplotlib.ticker as ticker
from sklearn.decomposition import PCA
from sklearn import mixture
from multiprocessing import Pool
from molmolpy.utils.cluster_quality import *
from molmolpy.utils import converters
from molmolpy.utils import plot_tools
from molmolpy.utils import mkb_tools
from molmolpy.utils import folder_utils
from molmolpy.utils import protein_analysis
from molmolpy.utils import nucleic_analysis
from molmolpy.utils import helper as hlp
from itertools import combinations
import seaborn as sns
import numba
matplotlib.rcParams.umkate({'font.size': 12})
# matplotlib.style.use('ggplot')
sns.set(style="white", context='paper')
# font = {'family' : 'normal',
# 'weight' : 'bold',
# 'size' : 18}
#
# matplotlib.rc('font', **font)
class MultiMDAnalysisObject(object):
"""
Molecule object loading of mkb and pbdqt file formatings.
Then converts to monkey knowledgeframe.
Create MoleculeObject by parsing mkb or mkbqt file.
2 types of parsers can be used: 1.molmolpy 2. pybel
Stores molecule informatingion in monkey knowledgeframe as well as numpy list.
Read more in the :ref:`User Guide <MoleculeObject>`.
Parameters
----------
filengthame : str, optional
The getting_maximum distance between two sample_by_nums for them to be considered
as in the same neighborhood.
Attributes
----------
core_sample_by_num_indices_ : array, shape = [n_core_sample_by_nums]
Indices of core sample_by_nums.
components_ : array, shape = [n_core_sample_by_nums, n_features]
Copy of each core sample_by_num found by training.
Notes
-----
See examples/cluster/plot_dbscan.py for an example.
This implementation bulk-computes total_all neighborhood queries, which increases
the memory complexity to O(n.d) where d is the average number of neighbors,
while original DBSCAN had memory complexity O(n).
Sparse neighborhoods can be precomputed using
:func:`NearestNeighbors.radius_neighbors_graph
<sklearn.neighbors.NearestNeighbors.radius_neighbors_graph>`
with ``mode='distance'``.
References
----------
Convert gro to PDB so mdtraj recognises topology
YEAH
gmx editconf -f npt.gro -o npt.mkb
"""
def __init__(self, file_list=None):
self.simulation_data = {}
self.sim_indexes = []
if file_list is not None:
if length(file_list) > 0:
for i in range(length(file_list)):
self.add_simulation_pickle_data(i + 1, file_list[i])
self.sim_indexes.adding(i + 1)
colors = sns.cubehelix_palette(n_colors=length(file_list), rot=.7, dark=0, light=0.85)
self.colors_ = colors
test = 1
def add_simulation_pickle_data(self, index, filengthame):
temp_data = pickle.load(open(filengthame, "rb"))
self.simulation_data.umkate({str(index): temp_data})
@hlp.timeit
def plot_rmsd_multi(self, selection,
title='Simulation',
xlabel=r"Time $t$ (ns)",
ylabel=r"RMSD(nm)",
custom_dpi=1200,
custom_labels=None,
position='best',
noTitle=True,
size_x=8.4,
size_y=7):
import pylab as plt
sns.set(style="ticks", context='paper')
sns.set(font_scale=0.8)
'''
ylabel=r"C$_\alpha$ RMSD from $t=0$, $\rho_{\mathrm{C}_\alpha}$ (nm)",
'''
title = 'Cluster Simulation {0}-{1}'.formating(self.sim_indexes[0], self.sim_indexes[-1])
# fig = plt.figure(figsize=(10, 7))
# fig.suptitle(title, fontsize=16)
# ax = fig.add_axes([2, 2, 2, 2])
# plt.plot(time_sim1_np, rmsd_sim1_np, 'b')
# fig = plt.figure(figsize=(10, 7))
fig = plt.figure(figsize=plot_tools.cm2inch(size_x, size_y))
# fig.suptitle(title, fontsize=16)
if noTitle is False:
fig.suptitle(title)
for i in self.sim_indexes:
self.sim_time = self.simulation_data[str(i)]['time']
traj_rmsd = self.simulation_data[str(i)]['rmsd'][selection]
if custom_labels is None:
curr_label = 'Simulation {0}'.formating(i)
else:
curr_label = '{0}'.formating(custom_labels[i-1])
curr_color = self.colors_[i - 1]
plt.plot(self.sim_time, traj_rmsd, color=curr_color,
linewidth=0.52, label=curr_label)
# plt.legend(loc="best", prop={'size': 8})
# plt.xlabel(xlabel, fontsize=16)
# plt.ylabel(ylabel, fontsize=16) # fix Angstrom need to change to nm
plt.xlabel(xlabel)
plt.ylabel(ylabel) # fix Angstrom need to change to nm
# leg = plt.legend(loc='best', shadow=True, prop={'size': 16})
leg = plt.legend(loc=position, shadow=True, ncol=2)
# set the linewidth of each legend object
for legobj in leg.legendHandles:
legobj.set_linewidth(6.0)
# remove part of ticks
sns.despine()
fig.savefig('Multi_Plot_RMSD_' + '_' + title + '_' + selection + '.png', dpi=custom_dpi, bbox_inches='tight')
print('RMSD plot created')
print('-----------------------------------\n')
@hlp.timeit
def plot_rg_multi(self,
selection,
title='LasR Rg',
xlabel=r"time $t$ (ns)",
ylabel=r"C$_\alpha$ Rg from $t=0$, $\rho_{\mathrm{C}_\alpha}$ (nm)",
custom_dpi=600):
import pylab as plt
sns.set(style="ticks", context='paper')
# sns.set(font_scale=2)
# In[27]:
fig = plt.figure(figsize=(10, 7))
# ax = fig.add_axes([2, 2, 2, 2])
# plt.plot(time_sim1_np, rmsd_sim1_np, 'b')
title = 'Cluster Simulation {0}-{1}'.formating(self.sim_indexes[0], self.sim_indexes[-1])
# fig = plt.figure(figsize=(10, 7))
fig.suptitle(title, fontsize=16)
# ax = fig.add_axes([2, 2, 2, 2])
# plt.plot(time_sim1_np, rmsd_sim1_np, 'b')
for i in self.sim_indexes:
self.sim_time = self.simulation_data[str(i)]['time']
traj_rmsd = self.simulation_data[str(i)]['Rg'][selection]
curr_label = 'Simulation {0}'.formating(i)
curr_color = self.colors_[i - 1]
plt.plot(self.sim_time, traj_rmsd, color=curr_color,
linewidth=0.6, label=curr_label)
# plt.legend(loc="best", prop={'size': 8})
plt.xlabel(xlabel, fontsize=16)
plt.ylabel(ylabel, fontsize=16) # fix Angstrom need to change to nm
leg = plt.legend(loc='best', shadow=True, prop={'size': 16})
# set the linewidth of each legend object
for legobj in leg.legendHandles:
legobj.set_linewidth(9.0)
# remove part of ticks
sns.despine()
# In[28]:
fig.savefig('Multi_Plot_Rg_' + '_' + title + '_' + selection + '.png', dpi=custom_dpi, bbox_inches='tight')
print('Rg plot created')
print('-----------------------------------\n')
# TODO calculate confidence intervals
@hlp.timeit
def plot_rmsf_plus_confidence_multi(self, selection,
title='LasR RMSF',
xlabel=r"Residue",
ylabel=r"RMSF(nm)",
custom_dpi=600):
'''
ylabel=r"C$_\alpha$ RMSF from $t=0$, $\rho_{\mathrm{C}_\alpha}$ (nm)",
:param title:
:param xlabel:
:param ylabel:
:param custom_dpi:
:return:
'''
sns.set(style="ticks", context='paper')
# sns.set(font_scale=2)
fig = plt.figure(figsize=(14, 7))
title = 'Cluster Simulation {0}-{1}'.formating(self.sim_indexes[0], self.sim_indexes[-1])
# fig = plt.figure(figsize=(10, 7))
fig.suptitle(title, fontsize=16)
for i in self.sim_indexes:
self.sim_time = self.simulation_data[str(i)]['time']
curr_label = 'Simulation {0}'.formating(i)
traj_rmsf = self.simulation_data[str(i)]['rmsf'][selection]['rmsf']
atom_indices_rmsf = self.simulation_data[str(i)]['rmsf'][selection]['ref_atom_indices']
curr_color = self.colors_[i - 1]
conv_data = converters.convert_data_to_monkey(atom_indices_rmsf, traj_rmsf, x_axis_name='Residue',
y_axis_name='RMSF')
conv_data['Residue'] += 1
confidence = hlp.average_confidence_interval(conv_data['RMSF'])
# plt.plot(self.sim_time, traj_rmsd, color=curr_color,
# linewidth=0.6, label=curr_label)
# Plot the response with standard error
sns.tsplot(data=conv_data, ci=[95], color="m")
# plt.plot(conv_data['x'], conv_data['y'], color=curr_color,
# linewidth=0.6, label=curr_label)
# plt.xlim(getting_min(conv_data['x']) - 100, getting_max(conv_data['x']) + 100)
# traj_rmsf = self.rmsf_analysis_data[selection]['rmsf']
# atom_indices_rmsf = self.rmsf_analysis_data[selection]['atom_indices']
# sns.tsplot(time="x", unit="y", data=conv_data,
# size=4, fit_reg=False,
# scatter_kws={"s": 50, "alpha": 1})
# sns.plt.show()
plt.xlabel(xlabel, fontsize=16)
plt.ylabel(ylabel, fontsize=16) # fix Angstrom need to change to nm
leg = plt.legend(loc='best', shadow=True, prop={'size': 16})
# set the linewidth of each legend object
for legobj in leg.legendHandles:
legobj.set_linewidth(9.0)
# plt.title(title)
# remove part of ticksg
sns.despine()
fig.savefig('Multi_Plot_RMSF_confidence_' + '_' + title + '_' + selection + '.png', dpi=custom_dpi, bbox_inches='tight')
print('RMSF +confidence plot created')
@hlp.timeit
def prep_mdtraj_object(self, filengthame):
'''
Prepare receptor mdtraj object
getting mdtraj topology and save as monkey knowledgeframe
Calculate mkb receptor center of mass
:return:
'''
self.receptor_file = filengthame
self.receptor_mdtraj = md.load_mkb(self.receptor_file)
self.receptor_mdtraj_topology = self.receptor_mdtraj.topology
self.receptor_mdtraj_topology_knowledgeframe = self.receptor_mdtraj.topology.to_knowledgeframe()
topology = self.receptor_mdtraj.topology
atom_indices = topology.select('backbone')
test = 1
# self.center_of_mass_receptor = md.compute_center_of_mass(self.receptor_mdtraj)[0]
#
# self.x_center = math.ceiling(self.center_of_mass_receptor[0] * 10)
# self.y_center = math.ceiling(self.center_of_mass_receptor[1] * 10)
# self.z_center = math.ceiling(self.center_of_mass_receptor[2] * 10)
#
# self.receptor_pybel = pybel.reakfile("mkb", self.receptor_file).__next__()
# self.ligand_pybel = pybel.reakfile("mkb", self.ligand_file).__next__()
test = 1
@hlp.timeit
def plot_rmsf_multi(self, selection,
title='LasR RMSF',
xlabel=r"Residue",
ylabel=r"RMSF(nm)",
custom_dpi=1200):
'''
ylabel=r"C$_\alpha$ RMSF from $t=0$, $\rho_{\mathrm{C}_\alpha}$ (nm)",
:param title:
:param xlabel:
:param ylabel:
:param custom_dpi:
:return:
'''
sns.set(style="ticks", context='paper')
sns.set(font_scale=0.8)
# fig = plt.figure(figsize=(14, 7))
title = 'Cluster Simulation {0}-{1}'.formating(self.sim_indexes[0], self.sim_indexes[-1])
# fig = plt.figure(figsize=(10, 7))
fig = plt.figure(figsize=plot_tools.cm2inch(8.4, 8.4))
# fig.suptitle(title, fontsize=16)
fig.suptitle(title)
# self.receptor_mdtraj_topology.atom(3000).residue.resSeq
for i in self.sim_indexes:
self.sim_time = self.simulation_data[str(i)]['time']
curr_label = 'Simulation {0}'.formating(i)
traj_rmsf = self.simulation_data[str(i)]['rmsf'][selection]['rmsf']
atom_indices_rmsf = self.simulation_data[str(i)]['rmsf'][selection]['ref_atom_indices']
curr_color = self.colors_[i - 1]
converted_resseq,converted_index = converters.convert_mdtraj_atom_nums_to_resseq(self.receptor_mdtraj_topology,
atom_indices_rmsf)
conv_data_temp = converters.convert_data_to_monkey(atom_indices_rmsf, traj_rmsf)
conv_data = conv_data_temp.ix[converted_index]
conv_data['x'] = converted_resseq
test = 1
# plt.plot(self.sim_time, traj_rmsd, color=curr_color,
# linewidth=0.6, label=curr_label)
plt.plot(conv_data['x'], conv_data['y'], color=curr_color,
linewidth=0.52, label=curr_label)
#plt.xlim(getting_min(conv_data['x']) - 100, getting_max(conv_data['x']) + 100)
# traj_rmsf = self.rmsf_analysis_data[selection]['rmsf']
# atom_indices_rmsf = self.rmsf_analysis_data[selection]['atom_indices']
# sns.tsplot(time="x", unit="y", data=conv_data,
# size=4, fit_reg=False,
# scatter_kws={"s": 50, "alpha": 1})
# sns.plt.show()
# plt.xlabel(xlabel, fontsize=16)
# plt.ylabel(ylabel, fontsize=16) # fix Angstrom need to change to nm
plt.xlabel(xlabel)
plt.ylabel(ylabel) #
# leg = plt.legend(loc='best', shadow=True, prop={'size': 16})
leg = plt.legend(loc='best', shadow=True)
# set the linewidth of each legend object
for legobj in leg.legendHandles:
legobj.set_linewidth(6.0)
# plt.title(title)
# remove part of ticksg
sns.despine()
fig.savefig('Multi_Plot_RMSF_' + '_' + title + '_' + selection + '.png', dpi=custom_dpi, bbox_inches='tight')
print('RMSF plot created')
def count_lig_hbond(self, t, hbonds, ligand):
label = lambda hbond: '%s -- %s' % (t.topology.atom(hbond[0]), t.topology.atom(hbond[2]))
hbond_atoms = []
hbond_indexes_sel = []
hbond_count = 0
for hbond in hbonds:
res = label(hbond)
# print('res ', res)
if ligand in res:
# print("res is ", res)
hbond_atoms.adding(res)
hbond_indexes_sel.adding(hbond)
hbond_count += 1
test=1
# print('------------------------------------------------')
test = 1
return hbond_atoms, hbond_count, hbond_indexes_sel
@hlp.timeit
def hbond_lig_count_analysis(self,
ligand_name='HSL',
title='Simulation',
xlabel=r"Time $t$ (ns)",
ylabel=r"Number of Hydrogen Bonds",
custom_dpi=600):
sns.set(style="ticks", context='paper')
# sns.set(font_scale=2)
fig = plt.figure(figsize=(14, 7))
title = 'Simulations of Clusters {0}-{1}'.formating(self.sim_indexes[0], self.sim_indexes[-1])
# fig = plt.figure(figsize=(10, 7))
fig.suptitle(title, fontsize=16)
traj_frame = self.simulation_data[str(self.sim_indexes[0])]['clustersCentroid']
self.sim_time = self.simulation_data[str(self.sim_indexes[0])]['time']
t = traj_frame[0]
for i in self.sim_indexes:
self.sim_time = self.simulation_data[str(i)]['time']
hbonds_frames = self.simulation_data[str(i)]['hbondFrames']
sim_hbond_atoms = []
sim_hbond_count = []
for hbonds in hbonds_frames:
hbond_atoms, hbond_count, hbond_indexes_sel = self.count_lig_hbond(t, hbonds, ligand_name)
sim_hbond_atoms.adding(hbond_atoms)
sim_hbond_count.adding(hbond_count)
sim_hbound_np = np.array(sim_hbond_count)
self.simulation_data[str(i)].umkate({'hbond_atoms':sim_hbond_atoms})
self.simulation_data[str(i)].umkate({'hbond_count':sim_hbond_count})
curr_color = self.colors_[i - 1]
# curr_label = 'Simulation {0}'.formating(i)
curr_label = "Simulation of Cluster {0} average: {1}±{2}".formating(i, value_round(np.average(sim_hbound_np),3),
value_round(np.standard(sim_hbond_count),3))
# Version 1
plt.plot(self.sim_time, sim_hbond_count, color=curr_color, marker = 'x',
linewidth=0.2, label=curr_label)
# Version 2
# plt.scatter(self.sim_time, sim_hbond_count, color=curr_color, marker = 'x',
# linewidth=0.3, label=curr_label)
# data_frame = converters.convert_data_to_monkey(self.sim_time, self.hbond_count)
#
# y_average_average = data_frame['y'].rolling(center=False, window=20).average()
# atom_indices_rmsf = self.simulation_data[str(i)]['rmsf'][selection]['ref_atom_indices']
# curr_color = self.colors_[i - 1]
#
# conv_data = converters.convert_data_to_monkey(atom_indices_rmsf, traj_rmsf)
#
# # plt.plot(self.sim_time, traj_rmsd, color=curr_color,
# # linewidth=0.6, label=curr_label)
#
# plt.plot(conv_data['x'], conv_data['y'], color=curr_color,
# linewidth=0.6, label=curr_label)
# plt.xlim(getting_min(conv_data['x']) - 100, getting_max(conv_data['x']) + 100)
test = 1
# traj_rmsf = self.rmsf_analysis_data[selection]['rmsf']
# atom_indices_rmsf = self.rmsf_analysis_data[selection]['atom_indices']
# sns.tsplot(time="x", unit="y", data=conv_data,
# size=4, fit_reg=False,
# scatter_kws={"s": 50, "alpha": 1})
# sns.plt.show()
plt.xlabel(xlabel, fontsize=16)
plt.ylabel(ylabel, fontsize=16) # fix Angstrom need to change to nm
leg = plt.legend(loc='best', shadow=True, prop={'size': 16})
# set the linewidth of each legend object
for legobj in leg.legendHandles:
legobj.set_linewidth(9.0)
# plt.title(title)
# remove part of ticksg
sns.despine()
fig.savefig('Multi_Plot_HBOND_count_Lig_' + '_' + title + '_' + ligand_name + '.png', dpi=custom_dpi, bbox_inches='tight')
print('Multi HBond lig count plot created')
@hlp.timeit
def hbond_freq_plot_analysis(self,
ligand_name='HSL',
title='Simulation',
xlabel=r"Time $t$ (ns)",
ylabel=r"Number of Hydrogen Bonds",
custom_dpi=600):
sns.set(style="ticks", context='paper')
# sns.set(font_scale=2)
traj_frame = self.simulation_data[str(self.sim_indexes[0])]['clustersCentroid']
self.sim_time = self.simulation_data[str(self.sim_indexes[0])]['time']
t = traj_frame[0]
for i in self.sim_indexes:
plt.clf()
fig = plt.figure(figsize=(14, 7))
title = 'Simulations of Clusters {0}-{1}'.formating(self.sim_indexes[0], self.sim_indexes[-1])
# fig = plt.figure(figsize=(10, 7))
fig.suptitle(title, fontsize=16)
self.sim_time = self.simulation_data[str(i)]['time']
hbonds_frames = self.simulation_data[str(i)]['hbondFrames']
sim_hbond_atoms = []
sim_hbond_count = []
sim_hbond_sel = []
for hbonds in hbonds_frames:
hbond_atoms, hbond_count, hbond_indexes_sel = self.count_lig_hbond(t, hbonds, ligand_name)
sim_hbond_atoms.adding(hbond_atoms)
sim_hbond_count.adding(hbond_count)
if length( hbond_indexes_sel) > 0:
sim_hbond_sel+= hbond_indexes_sel
sim_hbound_np = np.array(sim_hbond_count)
sim_hbound_sel_np = np.array(sim_hbond_sel)
# self.simulation_data[str(i)].umkate({'hbond_atoms':sim_hbond_atoms})
# self.simulation_data[str(i)].umkate({'hbond_count':sim_hbond_count})
# curr_color = self.colors_[i - 1]
# curr_label = 'Simulation {0}'.formating(i)
curr_label = "Simulation of Cluster {0} average: {1}±{2}".formating(i, value_round(np.average(sim_hbound_np),3),
value_round(np.standard(sim_hbond_count),3))
# This won't work here
da_distances = md.compute_distances(t, sim_hbound_sel_np[:, [0, 2]], periodic=False)
# Version 1
# plt.plot(self.sim_time, sim_hbond_count, color=curr_color, marker = 'x',
# linewidth=0.2, label=curr_label)
# color = itertools.cycle(['r', 'b', 'gold'])
colors = sns.cubehelix_palette(n_colors=length(da_distances), rot=-.4)
# self.colors_ = colors
label = lambda hbond: '%s -- %s' % (t.topology.atom(hbond[0]), t.topology.atom(hbond[2]))
color = itertools.cycle(['r', 'b', 'gold'])
for i in [0]:
plt.hist(da_distances[:, i], color=colors[i], label=label(sim_hbound_sel_np[i]), alpha=0.5)
plt.legend()
plt.ylabel('Freq');
plt.xlabel('Donor-acceptor distance [nm]')
# plt.xlabel(xlabel, fontsize=16)
# plt.ylabel(ylabel, fontsize=16) # fix Angstrom need to change to nm
#
# leg = plt.legend(loc='best', shadow=True, prop={'size': 16})
#
# # set the linewidth of each legend object
# for legobj in leg.legendHandles:
# legobj.set_linewidth(9.0)
sns.despine()
fig.savefig('Multi_Plot_HBOND_frequency_' + '_' + title + '_' + str(i)+ '_'+ ligand_name + '.png', dpi=custom_dpi, bbox_inches='tight')
print('Multi HBond frequency lig plot created')
@hlp.timeit
def plot_solvent_area_multi(self, show=False):
fig = plt.figure(figsize=(10, 10))
plt.plot(self.sasa_traj.time, self.total_sasa)
plt.xlabel('Time [ps]', size=16)
plt.ylabel('Total SASA (nm)^2', size=16)
if show is True:
plt.show()
fig.savefig(self.simulation_name + '_' + 'SASA_plot.png', dpi=300, bbox_inches='tight')
@hlp.timeit
def plot_solvent_area_frame_multi(self, frame, show=False):
fig = plt.figure(figsize=(10, 10))
plt.plot(self.sasa_traj.time, self.sasa[frame])
plt.xlabel('Time [ps]', size=16)
plt.ylabel('Total SASA (nm)^2', size=16)
if show is True:
plt.show()
fig.savefig(self.simulation_name + '_' + 'SASA_plot_{0}.png'.formating(frame), dpi=300, bbox_inches='tight')
@hlp.timeit
def plot_solvent_area_autocorr_multi(self, show=False):
self.sasa_autocorr = protein_analysis.autocorr(self.total_sasa)
fig = plt.figure(figsize=(10, 10))
plt.semilogx(self.sasa_traj.time, self.sasa_autocorr)
plt.xlabel('Time [ps]', size=16)
plt.ylabel('SASA autocorrelation', size=16)
if show is True:
plt.show()
fig.savefig(self.simulation_name + '_' + 'SASA_autocorrelation.png', dpi=300, bbox_inches='tight')
@hlp.timeit
def plot_rmsd_cluster_color_multi(self, selection,
title='LasR RMSD',
xlabel=r"Time $t$ (ns)",
ylabel=r"RMSD(nm)",
custom_dpi=300,
lang='rus'):
import pylab as plt
sns.set(style="ticks", context='paper')
'''
ylabel=r"C$_\alpha$ RMSD from $t=0$, $\rho_{\mathrm{C}_\alpha}$ (nm)",
'''
fig = plt.figure(figsize=(14, 7))
# ax = fig.add_axes([2, 2, 2, 2])
# plt.plot(time_sim1_np, rmsd_sim1_np, 'b')
# plt.plot(self.sim_time, self.sim_rmsd, color=self.cluster_colors,
# linewidth=0.6, label='LasR')
if lang == 'rus':
title = 'Симуляция'
xlabel = r"Время $t$ (нс)"
ylabel = r"RMSD(нм)"
else:
title = 'Simulation'
xlabel = r"Time $t$ (ns)"
ylabel = r"RMSD(nm)"
sns.set(font_scale=2)
plt.plot(self.sim_time, self.sim_rmsd, zorder=1)
traj_rmsd = self.rmsd_analysis_data[selection]
plt.scatter(self.sim_time, traj_rmsd, marker='o', s=30, facecolor='0.5', lw=0,
c=self.cluster_colors, zorder=2)
# plt.legend(loc="best", prop={'size': 8})
plt.xlabel(xlabel)
plt.xlim(self.sim_time[0], self.sim_time[-1])
plt.ylabel(ylabel) # fix Angstrom need to change to nm
plt.title(title)
fig.tight_layout()
# remove part of ticks
sns.despine()
# plt.show()
fig.savefig(self.simulation_name + '_' + title + '_' + selection + '_cluster_color' + '_' + lang + '.png',
dpi=custom_dpi, bbox_inches='tight')
print('RMSD plot created')
print('-----------------------------------\n')
@hlp.timeit
def find_best_fit_regressor(self):
# from sklearn.tree import DecisionTreeRegressor
self.best = 100
self.index = 100
self.best_rg = 100
self.index_rg = 100
self.regr_index = []
self.regr_scores = {}
self.regr_index_rg = []
self.regr_scores_rg = {}
self.reshaped_time = self.sim_time.reshape(-1, 1)
for i in list(range(1, self.regression_fit_range + 1)):
self.create_fit(i)
print('best score is ', self.best)
print('best index is', self.index)
print('-=-' * 10)
print('best score Rg is ', self.best_rg)
print('best index Rg is', self.index_rg)
@hlp.timeit
def create_fit(self, i):
from sklearn import tree
from sklearn.model_selection import cross_val_score
self.reshaped_time = self.sim_time.reshape(-1, 1)
regressor = tree.DecisionTreeRegressor(getting_max_depth=i) # interesting absolutely
fitVal = regressor.fit(self.reshaped_time, self.sim_rmsd)
print('fitVal ', fitVal)
rmsd_pred = regressor.predict(self.reshaped_time)
# cv how is it detergetting_mined?
# A good compromise is ten-fold cross-validation. 10ns
# Maybe mse better?
cross_val = cross_val_score(regressor,
self.reshaped_time,
self.sim_rmsd,
scoring="neg_average_squared_error",
cv=10)
regressor_rg = tree.DecisionTreeRegressor(getting_max_depth=i) # interesting absolutely
fitVal_rg = regressor_rg.fit(self.reshaped_time, self.rg_res)
fitVal_rg = regressor_rg.fit(self.reshaped_time, self.rg_res)
print('fitVal ', fitVal)
rmsd_pred_rg = regressor_rg.predict(self.reshaped_time)
# cv how is it detergetting_mined?
# A good compromise is ten-fold cross-validation. 10ns
cross_val_rg = cross_val_score(regressor,
self.reshaped_time,
self.rg_res,
scoring="neg_average_squared_error",
cv=10)
self.regr_scores.umkate({i: cross_val})
self.regr_index.adding(i)
self.regr_scores_rg.umkate({i: cross_val_rg})
self.regr_index_rg.adding(i)
cross_val_score = -cross_val.average()
cross_val_standard = cross_val.standard()
cross_val_score_rg = -cross_val_rg.average()
cross_val_standard_rg = cross_val_rg.standard()
print('Cross validation score is ', cross_val)
print("Degree {}\nMSE = {:.2e}(+/- {:.2e})".formating(i, -cross_val.average(), cross_val.standard()))
print('-=-' * 10)
print('Cross validation Rg score is ', cross_val_rg)
print("Rg Degree {}\nMSE = {:.2e}(+/- {:.2e})".formating(i, -cross_val_rg.average(), cross_val_rg.standard()))
# r2_score = regressor.score(self.sim_time.reshape(-1, 1), self.sim_rmsd)
# if r2_score > self.r2_best:
# self.r2_best = r2_score
# self.r2_index = i
if cross_val_score < self.best:
self.best = cross_val_score
self.index = i
if cross_val_score_rg < self.best_rg:
self.best_rg = cross_val_score_rg
self.index_rg = i
del regressor
del fitVal
del rmsd_pred
time.sleep(2)
# print('R2 score is ', r2_score)
print('---------------------------------------------------------------\n')
@hlp.timeit
def error_bar_rmsd_fit(self):
import matplotlib.pyplot as plt
x = self.regr_index
y = []
yerr_list = []
for i in self.regr_index:
# plt.boxplot(self.regr_scores[i])
cross_val_score = -self.regr_scores[i].average()
cross_val_standard = self.regr_scores[i].standard()
y.adding(cross_val_score)
yerr_list.adding(cross_val_standard)
fig = plt.figure(figsize=(10, 10))
plt.errorbar(x, y, yerr=yerr_list)
plt.scatter(x, y, s=160, c='b', marker='h',
label="Best score at Max Depth={}\nMSE = {:.2e}(+/- {:.2e})".formating(self.index,
-self.regr_scores[
self.index].average(),
self.regr_scores[
self.index].standard()))
plt.legend(loc="best", prop={'size': 20})
plt.title("Mean squared error (MSE) averages for RMSD")
fig.savefig(self.simulation_name + '_errorBar_rmsd.png', dpi=300, bbox_inches='tight')
# plt.show()
print('Errorbar created ')
print('---------------------------------------------------------------\n')
@hlp.timeit
def error_bar_Rg_fit(self):
import matplotlib.pyplot as plt
x = self.regr_index
y = []
yerr_list = []
for i in self.regr_index:
# plt.boxplot(self.regr_scores[i])
cross_val_score = -self.regr_scores_rg[i].average()
cross_val_standard = self.regr_scores_rg[i].standard()
y.adding(cross_val_score)
yerr_list.adding(cross_val_standard)
fig = plt.figure(figsize=(10, 10))
plt.errorbar(x, y, yerr=yerr_list)
plt.scatter(x, y, s=160, c='b', marker='h',
label="Best score at Max Depth={}\nMSE = {:.2e}(+/- {:.2e})".formating(self.index_rg,
-self.regr_scores_rg[
self.index_rg].average(),
self.regr_scores_rg[
self.index_rg].standard()))
plt.legend(loc="best", prop={'size': 20})
plt.title("Mean squared error (MSE) averages for Rg")
fig.savefig(self.simulation_name + '_errorBar_Rg.png', dpi=300, bbox_inches='tight')
# plt.show()
print('Errorbar created ')
print('---------------------------------------------------------------\n')
@hlp.timeit
def error_bar_fit_test(self):
import numpy as np
import matplotlib.pyplot as plt
# example data
x = np.arange(0.1, 4, 0.5)
y = np.exp(-x)
# example variable error bar values
yerr = 0.1 + 0.2 * np.sqrt(x)
xerr = 0.1 + yerr
# First illustrate basic pyplot interface, using defaults where possible.
plt.figure()
plt.errorbar(x, y, xerr=0.2, yerr=0.4)
plt.title("Simplest errorbars, 0.2 in x, 0.4 in y")
# Now switch to a more OO interface to exercise more features.
fig, axs = plt.subplots(nrows=2, ncols=2, sharex=True)
ax = axs[0, 0]
ax.errorbar(x, y, yerr=yerr, fmt='o')
ax.set_title('Vert. symmetric')
# With 4 subplots, reduce the number of axis ticks to avoid crowding.
ax.locator_params(nbins=4)
ax = axs[0, 1]
ax.errorbar(x, y, xerr=xerr, fmt='o')
ax.set_title('Hor. symmetric')
ax = axs[1, 0]
ax.errorbar(x, y, yerr=[yerr, 2 * yerr], xerr=[xerr, 2 * xerr], fmt='--o')
ax.set_title('H, V asymmetric')
ax = axs[1, 1]
ax.set_yscale('log')
# Here we have to be careful to keep total_all y values positive:
ylower = np.getting_maximum(1e-2, y - yerr)
yerr_lower = y - ylower
ax.errorbar(x, y, yerr=[yerr_lower, 2 * yerr], xerr=xerr,
fmt='o', ecolor='g', capthick=2)
ax.set_title('Mixed sym., log y')
fig.suptitle('Variable errorbars')
plt.show()
@hlp.timeit
def plot_boxplot_fit_regr(self):
data_to_plot = []
for i in self.regr_index:
# plt.boxplot(self.regr_scores[i])
data_to_plot.adding(self.regr_scores[i])
# Create a figure instance
fig = plt.figure(figsize=(10, 10))
# Create an axes instance
ax = fig.add_subplot(111)
# Create the boxplot
# change outlier to hexagon
# bp = ax.boxplot(data_to_plot, 0, 'gD')
# dont show outlier
bp = ax.boxplot(data_to_plot, 0, '')
# Save the figure
fig.savefig(self.simulation_name + '_boxplot.png', dpi=600, bbox_inches='tight')
# plt.show()
print('Box plot created ')
print('---------------------------------------------------------------\n')
@hlp.timeit
def example_test(self):
import matplotlib.pyplot as plt
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import PolynomialFeatures
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import cross_val_score
degrees = [1, 4, 8, 15, 20]
# true_fun = lambda X: np.cos(1.5 * np.pi * X)
X = self.sim_time
y = self.sim_rmsd
plt.figure(figsize=(14, 5))
for i in range(length(degrees)):
ax = plt.subplot(1, length(degrees), i + 1)
plt.setp(ax, xticks=(), yticks=())
polynomial_features = PolynomialFeatures(degree=degrees[i],
include_bias=False)
linear_regression = LinearRegression()
pipeline = Pipeline([("polynomial_features", polynomial_features),
("linear_regression", linear_regression)])
pipeline.fit(X, y)
# Evaluate the models using crossvalidation
scores = cross_val_score(pipeline, X, y,
scoring="neg_average_squared_error", cv=10)
X_test = self.sim_time
plt.plot(X_test, pipeline.predict(X_test), label="Model")
plt.plot(X_test, self.sim_rmsd, label="True function")
plt.scatter(X, y, label="Samples")
plt.xlabel("x")
plt.ylabel("y")
plt.xlim((0, 1))
plt.ylim((-2, 2))
plt.legend(loc="best")
plt.title("Degree {}\nMSE = {:.2e}(+/- {:.2e})".formating(
degrees[i], -scores.average(), scores.standard()))
plt.show()
@hlp.timeit
def plot_rmsd_with_regressor(self, title='LasR Simulation RMSD',
xlabel=r"time $t$ (ns)",
ylabel=r"C$_\alpha$ RMSD from $t=0$, $\rho_{\mathrm{C}_\alpha}$ (nm)"):
import pylab as plt
from sklearn import tree
rfc = tree.DecisionTreeRegressor(getting_max_depth=self.index) # interesting absolutely
fitVal = rfc.fit(self.sim_time.reshape(-1, 1), self.sim_rmsd)
print('fitVal ', fitVal)
self.rmsd_pred = rfc.predict(self.sim_time.reshape(-1, 1))
fig = plt.figure(figsize=(7, 7))
ax = fig.add_axes([2, 2, 2, 2])
# plt.plot(time_sim1_np, rmsd_sim1_np, 'b')
plt.plot(self.sim_time, self.sim_rmsd, color='b',
linewidth=0.6, label='Original Data')
plt.plot(self.sim_time, self.rmsd_pred, color='r',
linewidth=4, label='Fitted Data')
plt.legend(loc="best", prop={'size': 30})
plt.xlabel(xlabel)
plt.ylabel(ylabel) # fix Angstrom need to change to nm
plt.title(title)
# In[28]:
fig.savefig(self.simulation_name + '_' + title + '_tree' + '.png', dpi=300, bbox_inches='tight')
print('RMSD plot created with regressor')
print('-----------------------------------\n')
@hlp.timeit
def plot_Rg_with_regressor(self, title='LasR Radius of Gyration',
xlabel=r"time $t$ (ns)",
ylabel=r"C$_\alpha$ RMSD from $t=0$, $\rho_{\mathrm{C}_\alpha}$ (nm)"):
import pylab as plt
from sklearn import tree
rfc = tree.DecisionTreeRegressor(getting_max_depth=self.index_rg) # interesting absolutely
fitVal = rfc.fit(self.sim_time.reshape(-1, 1), self.rg_res)
print('fitVal ', fitVal)
self.rmsd_pred_rg = rfc.predict(self.sim_time.reshape(-1, 1))
fig = plt.figure(figsize=(7, 7))
ax = fig.add_axes([2, 2, 2, 2])
# plt.plot(time_sim1_np, rmsd_sim1_np, 'b')
plt.plot(self.sim_time, self.rg_res, color='b',
linewidth=0.6, label='Original Data')
plt.plot(self.sim_time, self.rmsd_pred_rg, color='r',
linewidth=4, label='Fitted Data')
plt.legend(loc="best", prop={'size': 30})
plt.xlabel(xlabel)
plt.ylabel(ylabel) # fix Angstrom need to change to nm
plt.title(title)
# In[28]:
fig.savefig(self.simulation_name + '_' + title + '_tree' + '.png', dpi=300, bbox_inches='tight')
print('RMSD plot created with regressor')
print('-----------------------------------\n')
@hlp.timeit
def md_full_load(self, custom_stride=10):
print('MD Load has been ctotal_alled\n')
print('-------------------------------\n')
self.full_traj = md.load(self.md_trajectory_file, top=self.md_topology_file,
stride=custom_stride)
self.sim_time = self.full_traj.time / 1000
print("Full trajectory loaded successfully")
print('-----------------------------------\n')
@hlp.timeit
def rg_analysis(self, selection='protein'):
self.ctotal_alled_rg_analysis = True
# self.rg_traj = self.full_traj[:]
#
# self.topology = self.rmsd_traj.topology
#
# self.selection = self.topology.select(selection)
#
# # self.selection = self.topology.select(selection)
# # print('selection is ', self.selection)
#
# self.rg_traj.restrict_atoms(self.selection)
self.topology = self.full_traj.topology
self.selection = self.topology.select(selection)
self.rg_traj = self.full_traj.atom_slice(atom_indices=self.selection)
self.rg_res = md.compute_rg(self.rg_traj)
self.rg_analysis_data.umkate({selection: self.rg_res})
print("Rg has been calculated")
print('-----------------------------------\n')
@hlp.timeit
def hbond_analysis_count(self, selection='protein',
title='LasR H-Bonds',
xlabel=r"Time $t$ (ns)",
ylabel=r"Number of Hydrogen Bonds",
custom_dpi=300):
sns.set(style="ticks", context='paper')
self.ctotal_alled_hbond_analysis_count = True
print('HBonds analysis has been ctotal_alled\n')
print('-------------------------------\n')
self.topology = self.full_traj.topology
self.selection = self.topology.select(selection)
print('selection is ', self.selection)
# this is for keeping selection from trajectory
# self.full_traj.restrict_atoms(self.selection)
self.hbond_count = []
self.sim_time = self.full_traj.time / 1000
# paral = Pool(processes=16)
# data_count = list(mapping(self.hbond_frame_calc, self.full_traj))
#
# print('data count ',data_count)
# hbonds = md.baker_hubbard(self.full_traj, exclude_water=True, periodic=False)
# print('count of hbonds is ', length(hbonds))
# self.hbond_count.adding(length(hbonds))
hbonds_frames = md.wernet_nilsson(self.full_traj, exclude_water=True, periodic=False)
self.hbonds_frames = hbonds_frames
for hbonds in hbonds_frames:
self.hbond_count.adding(length(hbonds))
data_frame = converters.convert_data_to_monkey(self.sim_time, self.hbond_count)
y_average_average = data_frame['y'].rolling(center=False, window=20).average()
fig = plt.figure(figsize=(7, 7))
# ax = fig.add_axes([2, 2, 2, 2])
# plt.plot(time_sim1_np, rmsd_sim1_np, 'b')
plt.plot(data_frame['x'], data_frame['y'], color='b',
linewidth=0.6, label='LasR')
# Dont plot rolling average
plt.plot(data_frame['x'], y_average_average, color='r',
linewidth=0.9, label='LasR rolling average')
# plt.legend(loc="best", prop={'size': 8})
plt.xlabel(xlabel)
plt.ylabel(ylabel) # fix Angstrom need to change to nm
plt.title(title)
# remove part of ticks
sns.despine()
fig.savefig(self.simulation_name + '_' + title + '.png', dpi=custom_dpi, bbox_inches='tight')
print('HBond count plot created')
print('-----------------------------------\n')
# for hbond in hbonds:
# print(hbond)
# print(label(hbond))
# atom1 = self.full_traj.topology.atom(hbond[0])
# atom2 = self.full_traj.topology.atom(hbond[2])
# # atom3 = traj_sim1_hbonds.topology.atom(hbond[2])
# if atom1.residue.resSeq != atom2.residue.resSeq:
# if atom1.residue.resSeq + 1 != atom2.residue.resSeq:
# # for domain reside analysis
# if atom1.residue.resSeq < 171 and atom2.residue.resSeq > 172:
# diff_hbonds.adding(hbond)
@hlp.timeit
def hbond_analysis(self, selection='protein'):
self.topology = self.full_traj.topology
self.selection = self.topology.select(selection)
print('selection is ', self.selection)
# this is for keeping selection from trajectory
self.full_traj.restrict_atoms(self.selection)
if self.save_mkb_hbond is True:
traj_sim1_hbonds = md.load_mkb(self.mkb_file_name)
hbonds = md.baker_hubbard(traj_sim1_hbonds, periodic=False)
# hbonds = md.wernet_nilsson(traj_sim1_hbonds, periodic=True)[0]
label = lambda hbond: '%s -- %s' % (traj_sim1_hbonds.topology.atom(hbond[0]),
traj_sim1_hbonds.topology.atom(hbond[2]))
diff_hbonds = []
for hbond in hbonds:
# print(hbond)
# print(label(hbond))
atom1 = traj_sim1_hbonds.topology.atom(hbond[0])
atom2 = traj_sim1_hbonds.topology.atom(hbond[2])
# atom3 = traj_sim1_hbonds.topology.atom(hbond[2])
if atom1.residue.resSeq != atom2.residue.resSeq:
if atom1.residue.resSeq + 1 != atom2.residue.resSeq:
# domain reside analysis
if atom1.residue.resSeq < 171 and atom2.residue.resSeq > 172:
diff_hbonds.adding(hbond)
for hbond in diff_hbonds:
print(hbond)
print(label(hbond))
print('Diff hbonds printed\n')
diff_hbonds = np.asarray(diff_hbonds)
self.da_distances = md.compute_distances(traj_sim1_hbonds, diff_hbonds[:, [0, 2]], periodic=False)
import itertools
# color = itertools.cycle(['r', 'b', 'gold'])
# fig = plt.figure(figsize=(7, 7))
# color = np.linspace(0, length(diff_hbonds),length(diff_hbonds))
#
# # color = itertools.cycle(['r', 'b','g','gold'])
# for i in list(range(0,length(diff_hbonds))):
# plt.hist(self.da_distances[:, i], color=next(color), label=label(diff_hbonds[i]), alpha=0.5)
# plt.legend()
# plt.ylabel('Freq');
# plt.xlabel('Donor-acceptor distance [nm]')
# plt.show()
# this works wel, but needs to be modified
fig = plt.figure(figsize=(7, 7))
color = np.linspace(0, length(diff_hbonds), length(diff_hbonds))
color = itertools.cycle(['r', 'b', 'g', 'tan', 'black', 'grey', 'yellow', 'gold'])
for i in list(range(0, length(diff_hbonds))):
plt.hist(self.da_distances[:, i], color=next(color), label=label(diff_hbonds[i]), alpha=0.5)
plt.legend()
plt.ylabel('Freq');
plt.xlabel('Donor-acceptor distance [nm]')
plt.show()
fig.savefig(self.simulation_name + '_hbonds.png', dpi=600, bbox_inches='tight')
print("Hbonds have been calculated")
print('-----------------------------------\n')
@hlp.timeit
def rmsd_analysis(self, selection):
'''
:param selection: has to be mdtraj compatible
:return:
'''
self.ctotal_alled_rmsd_analysis = True
# self.rmsd_traj = self.full_traj[:]
#
# self.topology = self.rmsd_traj.topology
#
# self.selection = self.topology.select(selection)
#
# # self.selection = self.topology.select(selection)
# # print('selection is ', self.selection)
#
# self.rmsd_traj.restrict_atoms(self.selection)
# self.full_traj.save(selection +'.mkb')
# this is for keeping selection from trajectory
# self.rmsd_traj.restrict_atoms(self.selection)
# self.rmsd_traj = self.full_traj[:]
self.topology = self.full_traj.topology
self.selection = self.topology.select(selection)
# self.selection = self.topology.select(selection)
# print('selection is ', self.selection)
self.rmsd_traj = self.full_traj.atom_slice(atom_indices=self.selection)
self.sim_rmsd = md.rmsd(self.rmsd_traj, self.rmsd_traj, 0)
self.sim_time = self.rmsd_traj.time / 1000
self.rmsd_analysis_data.umkate({selection: self.sim_rmsd})
self.regression_fit_range = 10
print('RMSD analysis has been ctotal_alled on selection {0}\n'.formating(selection))
print('-----------------------------\n')
@hlp.timeit
def plot_rmsd_cluster_color(self, selection,
title='LasR RMSD',
xlabel=r"Time $t$ (ns)",
ylabel=r"RMSD(nm)",
custom_dpi=300,
lang='rus'):
import pylab as plt
sns.set(style="ticks", context='paper')
'''
ylabel=r"C$_\alpha$ RMSD from $t=0$, $\rho_{\mathrm{C}_\alpha}$ (nm)",
'''
fig = plt.figure(figsize=(14, 7))
# ax = fig.add_axes([2, 2, 2, 2])
# plt.plot(time_sim1_np, rmsd_sim1_np, 'b')
# plt.plot(self.sim_time, self.sim_rmsd, color=self.cluster_colors,
# linewidth=0.6, label='LasR')
if lang == 'rus':
title = 'Симуляция'
xlabel = r"Время $t$ (нс)"
ylabel = r"RMSD(нм)"
else:
title = 'Simulation'
xlabel = r"Time $t$ (ns)"
ylabel = r"RMSD(nm)"
sns.set(font_scale=2)
plt.plot(self.sim_time, self.sim_rmsd, zorder=1)
traj_rmsd = self.rmsd_analysis_data[selection]
plt.scatter(self.sim_time, traj_rmsd, marker='o', s=30, facecolor='0.5', lw=0,
c=self.cluster_colors, zorder=2)
# plt.legend(loc="best", prop={'size': 8})
plt.xlabel(xlabel)
plt.xlim(self.sim_time[0], self.sim_time[-1])
plt.ylabel(ylabel) # fix Angstrom need to change to nm
plt.title(title)
fig.tight_layout()
# remove part of ticks
sns.despine()
# plt.show()
fig.savefig(self.simulation_name + '_' + title + '_' + selection + '_cluster_color' + '_' + lang + '.png',
dpi=custom_dpi, bbox_inches='tight')
print('RMSD plot created')
print('-----------------------------------\n')
@hlp.timeit
def plot_rmsf(self, selection,
title='LasR RMSF',
xlabel=r"Residue",
ylabel=r"RMSF(nm)",
custom_dpi=300):
'''
ylabel=r"C$_\alpha$ RMSF from $t=0$, $\rho_{\mathrm{C}_\alpha}$ (nm)",
:param title:
:param xlabel:
:param ylabel:
:param custom_dpi:
:return:
'''
sns.set(style="ticks", context='paper')
sns.set(font_scale=2)
traj_rmsf = self.rmsf_analysis_data[selection]['rmsf']
atom_indices_rmsf = self.rmsf_analysis_data[selection]['atom_indices']
conv_data = converters.convert_data_to_monkey(atom_indices_rmsf, traj_rmsf)
# sns.tsplot(time="x", unit="y", data=conv_data,
# size=4, fit_reg=False,
# scatter_kws={"s": 50, "alpha": 1})
# sns.plt.show()
fig = plt.figure(figsize=(14, 7))
plt.plot(conv_data['x'], conv_data['y'], color='b',
linewidth=0.6, label=title)
plt.xlabel(xlabel)
plt.xlim(getting_min(conv_data['x']) - 100, getting_max(conv_data['x']) + 100)
plt.ylabel(ylabel) # fix Angstrom need to change to nm
plt.title(title)
# remove part of ticks
sns.despine()
fig.savefig(self.simulation_name + '_' + title + '_rmsf.png', dpi=custom_dpi, bbox_inches='tight')
print('RMSF plot created')
@hlp.timeit
def plot_rg(self,
selection,
title='LasR Rg',
xlabel=r"time $t$ (ns)",
ylabel=r"C$_\alpha$ Rg from $t=0$, $\rho_{\mathrm{C}_\alpha}$ (nm)",
custom_dpi=600):
import pylab as plt
sns.set(style="ticks", context='paper')
sns.set(font_scale=2)
# In[27]:
fig = plt.figure(figsize=(7, 7))
ax = fig.add_axes([2, 2, 2, 2])
# plt.plot(time_sim1_np, rmsd_sim1_np, 'b')
traj_rg = self.rg_analysis_data[selection]
plt.plot((self.sim_time), traj_rg, color='b',
linewidth=0.6, label='LasR')
plt.legend(loc="best", prop={'size': 8})
plt.xlabel(xlabel)
plt.ylabel(ylabel) # fix Angstrom need to change to nm
plt.title(title)
# In[28]:
fig.savefig(self.simulation_name + '_' + title + '_' + selection + '.png', dpi=custom_dpi, bbox_inches='tight')
print('RMSD plot created')
print('-----------------------------------\n')
# need to select only protein for analysis
@hlp.timeit
def find_centroid(self):
atom_indices = [a.index for a in self.full_traj.topology.atoms if a.element.symbol != 'H']
distances = np.empty((self.full_traj.n_frames, self.full_traj.n_frames))
for i in range(self.full_traj.n_frames):
distances[i] = md.rmsd(self.full_traj, self.full_traj, i, atom_indices=atom_indices)
beta = 1
index = np.exp(-beta * distances / distances.standard()).total_sum(axis=1).arggetting_max()
print(index)
centroid = self.full_traj[index]
print(centroid)
centroid.save('centroid.mkb')
####################################################################################################################
# TODO do PCA transformatingion of MD simulation
@hlp.timeit
def md_pca_analysis(self, selection='protein'):
self.ctotal_alled_md_pca_analysis = True
print('PCA analysis has been ctotal_alled\n')
print('-------------------------------\n')
pca1 = PCA(n_components=2)
# this is for keeping selection from trajectory
# self.pca_traj = self.full_traj[:]
#
# self.topology = self.pca_traj.topology
#
# self.selection = self.topology.select(selection)
#
# # self.selection = self.topology.select(selection)
# # print('selection is ', self.selection)
#
# self.pca_traj.restrict_atoms(self.selection)
# self.full_traj.save(selection +'.mkb')
self.topology = self.full_traj.topology
self.selection = self.topology.select(selection)
self.pca_traj = self.full_traj.atom_slice(atom_indices=self.selection)
self.pca_traj.superpose(self.pca_traj, 0)
self.reduced_cartesian = pca1.fit_transform(
self.pca_traj.xyz.reshape(self.pca_traj.n_frames, self.pca_traj.n_atoms * 3))
print(self.reduced_cartesian.shape)
print("PCA transformatingion finished successfully")
print('-----------------------------------\n')
####################################################################################################################
@hlp.timeit
def extract_info_cluster_data(self, cluster_data, key):
temp_data = []
for clust_num in self.range_n_clusters:
temp_data.adding(cluster_data[clust_num][key])
return temp_data
@hlp.timeit
def silhouette_graph_pca(self):
cluster_range = self.range_n_clusters
score = self.sil_pca
criteria_name = 'Mean Silhouette Coefficient for total_all sample_by_nums'
score_text = 'Objects with a high silhouette value are considered well clustered'
plot_tools.plot_cluster_analysis(cluster_range, score, criteria_name, score_text)
def calinski_graph_pca(self):
cluster_range = self.range_n_clusters
score = self.calinski_pca
criteria_name = 'Calinski-Harabaz score'
score_text = 'Objects with a high Calinski-Harabaz score value are considered well clustered'
plot_tools.plot_cluster_analysis(cluster_range, score, criteria_name, score_text)
@hlp.timeit
def dunn_graph_pca(self):
cluster_range = self.range_n_clusters
score = self.dunn_pca
criteria_name = "Dunn's Index"
score_text = "Maximum value of the index represents the right partitioning given the index"
plot_tools.plot_cluster_analysis(cluster_range, score, criteria_name, score_text)
@hlp.timeit
def dbi_graph_pca(self):
cluster_range = self.range_n_clusters
score = self.dbi_pca
criteria_name = 'Davis-Bouldain Index'
score_text = 'The optimal clustering solution has the smtotal_allest Davies-Bouldin index value.'
plot_tools.plot_cluster_analysis(cluster_range, score, criteria_name, score_text)
@hlp.timeit
def select_number_of_clusters(self):
# ["foo", "bar", "baz"].index("bar")
getting_max_silhouette = getting_max(self.sil_pca)
getting_max_dunn = getting_max(self.dunn_pca)
getting_min_dbi = getting_min(self.dbi_pca)
sil_index = self.sil_pca.index(getting_max_silhouette)
dunn_index = self.dunn_pca.index(getting_max_dunn)
dbi_index = self.dbi_pca.index(getting_min_dbi)
cluster_quantity = []
cluster_quantity.adding(self.range_n_clusters[sil_index])
cluster_quantity.adding(self.range_n_clusters[dunn_index])
cluster_quantity.adding(self.range_n_clusters[dbi_index])
print('------------------------------------------------')
print('verify yolo', cluster_quantity)
cluster_set = set(cluster_quantity)
cluster_dict = {}
for n_set in cluster_set:
count = cluster_quantity.count(n_set)
cluster_dict.umkate({n_set: count})
print('verify yolo ', cluster_dict)
import operator
clust_num = getting_max(cluster_dict.items(), key=operator.itemgettingter(1))[0]
print("number of clusters is ", clust_num)
return clust_num
# def write_model_to_file(self, model, resnum=None, filengthame_mkb=None):
# curr_kf = model['molDefinal_item_tail']['knowledgeframe']
# mkb_tools.write_lig(curr_kf, resnum, filengthame_mkb)
# need to select only protein for analysis
@hlp.timeit
def find_getting_max_cluster(self):
lengthgth = 0
clust_temp_data = []
for k in self.clusterized_data:
data = self.clusterized_data[k]
if length(data) > lengthgth:
lengthgth = length(data)
clust_temp_data = data
self.getting_max_clust_temp_data = clust_temp_data
return self.getting_max_clust_temp_data
@hlp.timeit
def find_clusters_centroid(self):
print('Find Clusters centroids is ctotal_alled\n')
print('-----------------------------------\n')
self.ctotal_alled_find_clusters_centroid = True
self.clusters_centroids = []
for k in self.clusterized_data:
print('Finding centroid for cluster {0}'.formating(k))
clust_temp_data = self.clusterized_data[k]
atom_indices = [a.index for a in clust_temp_data.topology.atoms if a.element.symbol != 'H']
distances = np.empty((clust_temp_data.n_frames, clust_temp_data.n_frames))
for i in range(clust_temp_data.n_frames):
distances[i] = md.rmsd(clust_temp_data, clust_temp_data, i, atom_indices=atom_indices)
beta = 1
index = np.exp(-beta * distances / distances.standard()).total_sum(axis=1).arggetting_max()
print(index)
centroid = clust_temp_data[index]
# self.centroid_conf = centroid
# print(centroid)
# self.centroid_conf = centroid
self.clusters_centroids.adding(centroid)
centroid.save(self.simulation_name + '_' + '{0}_cluster_centroid.mkb'.formating(k))
print('-----------------------------------\n')
@hlp.timeit
def find_getting_max_cluster_centroid(self):
print('Find Max Cluster centroid is ctotal_alled\n')
print('-----------------------------------\n')
self.ctotal_alled_find_getting_max_cluster_centroid = True
clust_temp_data = self.getting_max_clust_temp_data
atom_indices = [a.index for a in clust_temp_data.topology.atoms if a.element.symbol != 'H']
distances = np.empty((clust_temp_data.n_frames, clust_temp_data.n_frames))
for i in range(clust_temp_data.n_frames):
distances[i] = md.rmsd(clust_temp_data, clust_temp_data, i, atom_indices=atom_indices)
beta = 1
index = np.exp(-beta * distances / distances.standard()).total_sum(axis=1).arggetting_max()
print(index)
centroid = clust_temp_data[index]
self.centroid_conf = centroid
print(centroid)
self.centroid_conf = centroid
centroid.save(self.simulation_name + '_' + 'getting_max_cluster_centroid.mkb')
print('-----------------------------------\n')
# need to find a way to extract models correctrly
@hlp.timeit
def export_cluster_models(self,
selection_obj='protein',
select_lig=None,
save_data=False, nth_frame=1):
'''
Save cluster data to mkb files in cluster_traj directory
:return:
'''
n_clusters = self.select_number_of_clusters()
cluster_labels = self.clusters_info[n_clusters]['labels']
labels = cluster_labels
sample_by_num_silhouette_values = self.clusters_info[n_clusters]['silhouette_values']
silhouette_avg = self.clusters_info[n_clusters]['silhouette']
centers = self.clusters_info[n_clusters]['centers']
distinctive_labels = list(set(cluster_labels))
print('Unique labels ', distinctive_labels)
original_data = self.full_traj
self.clusterized_data = {}
for k in distinctive_labels: # Need to modify WORKS
# print('k is ',k)
# k == -1 then it is an outlier
if k != -1:
cluster_data = []
xyz = original_data[labels == k]
# sel_traj = xyz[:]
topology = xyz.topology
selection_name = selection_obj
selection_final_name = selection_obj
selection = topology.select(selection_obj)
selection_final = selection
if select_lig is not None:
# selection1 = topology.select(select_lig)
# selection_final = np.concatingenate((selection, selection1))
# selection_name = selection_name + ' and ' + select_lig
#
# selection_final = list(topology.select(selection_obj)) + list(topology.select(select_lig))
selection_final_name = selection_obj + '+' + select_lig
selection_final = topology.select(selection_obj + ' or ' + select_lig)
# list(topology.select(selection_obj)) + list(topology.select(select_lig))
sel_traj = xyz.atom_slice(atom_indices=selection_final)
# sel_traj.restrict_atoms(selection_final)
clust_num = int(k) + 1
if save_data is True:
temp_data = sel_traj[::nth_frame]
temp_data[0].save(self.simulation_name + '_' + 'cluster_' + str(
clust_num) + '_' + selection_final_name + '_frame_0.mkb')
temp_data.save(
self.simulation_name + '_' + 'cluster_' + str(clust_num) + '_' + selection_final_name + '.xtc')
self.clusterized_data.umkate({k: sel_traj})
self.save_mkb_hbond = True
def save_analysed_data(self, filengthame):
'''
:param filengthame: Saves clustered data to pickle file
:return:
'''
# import json
# with open(filengthame, 'w') as outfile:
# json.dump(self.cluster_models, outfile)
import pickle
# pickle.dump(self.cluster_models, open(filengthame, "wb"))
pickle.dump(self, open(filengthame, "wb"))
# should I add json saving of informatingion or not?
def load_analysed_data(self, filengthame):
'''
:param filengthame: load pickle file
:return:
'''
self.analysed_data = pickle.load(open(filengthame, "rb"))
print('test')
####################################################################################################################
# TODO calc ramachandran part
@hlp.timeit
def ramachandran_calc(self):
self.atoms, self.bonds = self.full_traj.topology.to_knowledgeframe()
self.phi_indices, self.phi_angles = md.compute_phi(self.full_traj, periodic=False)
self.psi_indices, self.psi_angles = md.compute_psi(self.full_traj, periodic=False)
self.angles_calc = md.compute_dihedrals(self.full_traj, [self.phi_indices[0], self.psi_indices[0]])
@hlp.timeit
def ramachandran_plot(self):
from math import pi
fig = plt.figure(figsize=(7, 7))
plt.title('Dihedral Map:')
plt.scatter(self.angles_calc[:, 0], self.angles_calc[:, 1], marker='x', c=self.full_traj.time)
cbar = plt.colorbar()
cbar.set_label('Time [ps]')
plt.xlabel(r'$\Phi$ Angle [radians]')
plt.xlim(-pi, pi)
plt.ylabel(r'$\Psi$ Angle [radians]')
plt.ylim(-pi, pi)
fig.savefig(self.simulation_name + '_' + 'Ramachandran_analysis' + '.png', dpi=600, bbox_inches='tight')
print("Ramachandran plot created")
print('-----------------------------------\n')
@hlp.timeit
def ramachandran_calc_centroid(self, selection='backbone'):
print('Ramachandran centroid calc has been ctotal_alled\n')
print('------------------------------------------\n')
self.ctotal_alled_ramachandran_centroid_calc = True
self.centroid_topology = self.centroid_conf.topology
self.centroid_selection = self.centroid_topology.select(selection)
self.centroid_new_traj = self.centroid_conf.atom_slice(atom_indices=self.centroid_selection)
self.atoms_centroid, self.bonds_centroid = self.centroid_new_traj.topology.to_knowledgeframe()
self.phi_indices_centroid, self.phi_angles_centroid = md.compute_phi(self.centroid_conf, periodic=False)
self.psi_indices_centroid, self.psi_angles_centroid = md.compute_psi(self.centroid_conf, periodic=False)
self.angles_calc_centroid_list = []
for i, y in zip(self.phi_indices_centroid, self.psi_indices_centroid):
temp = md.compute_dihedrals(self.centroid_conf, [i, y])
self.angles_calc_centroid_list.adding(temp[0])
self.angles_calc_centroid = np.array(self.angles_calc_centroid_list, dtype=np.float64)
print('------------------------------------------\n')
@hlp.timeit
def ramachandran_plot_centroid(self):
from math import pi
fig = plt.figure(figsize=(7, 7))
plt.title('Dihedral Map:')
plt.scatter(self.angles_calc_centroid[:, 0], self.angles_calc_centroid[:, 1], marker='x')
# cbar = plt.colorbar()
# cbar.set_label('Time [ps]')
plt.xlabel(r'$\Phi$ Angle [radians]')
plt.xlim(-pi, pi)
plt.ylabel(r'$\Psi$ Angle [radians]')
plt.ylim(-pi, pi)
fig.savefig(self.simulation_name + '_' + 'Ramachandran_analysis_centroid' + '.png', dpi=600,
bbox_inches='tight')
print("Ramachandran plot created")
print('-----------------------------------\n')
####################################################################################################################
# gmx trjconv -s md_0_1.tpr -f md_0_1.xtc -o md_0_1_noPBC.xtc -pbc mol -ur compact
# gmx trjconv -s md_0_3.tpr -f md_0_3_noPBC.xtc -o md_0_3_clear.xtc -fit rot+trans
# def getting_gmx_command(self):
# sim1_file_tpr = sim1 + '/md_0_3.tpr'
#
# # In[39]:
#
# sim1_out = sim1 + '/md_sim1.mkb'
#
# # In[40]:
#
# index = sim1 + '/index.ndx'
#
# # In[41]:
#
# trj_conv = 'gmx trjconv -f {0} -s {1} -n {2} -o {3} -dt 500'.formating(sim1_file_traj, sim1_file_tpr, index,
# sim1_out)
#
# # traj_sim1_hbonds = md.load(sim1_out)
#
#
# # In[44]:
#
# # traj_sim1_hbonds
#
#
# # In[45]:
#
# sim1_clear = sim1 + '/md_sim1_clear.mkb'
#
# # In[46]:
#
# traj_sim1_hbonds = md.load_mkb(sim1_clear)
#
# # In[47]:
#
# traj_sim1_hbonds
#
# # In[48]:
#
# traj_sim1_hbonds[-1].save('QRC_sim0_final_itemFrame.mkb')
#
# # In[49]:
#
# traj_sim1_hbonds[0].save('QRC_sim0_firstFrame.mkb')
#
# # In[50]:
#
# traj_sim1_hbonds[0:-1:30].save('QRC_sim0_shortAnimation.mkb')
#
# # In[51]:
#
# hbonds = md.baker_hubbard(traj_sim1_hbonds, freq=0.8, periodic=False)
#
# # In[52]:
#
# hbonds = md.wernet_nilsson(traj_sim1_hbonds[-1], periodic=True)[0]
#
# # In[53]:
#
# sel
#
# # In[54]:
#
# # for hbond in hbonds:
# # # print(hbond)
# # print(label(hbond))
#
#
# # In[55]:
#
# da_distances = md.compute_distances(traj_sim1_hbonds, hbonds[:, [0, 2]], periodic=False)
#
# # In[56]:
#
# import itertools
#
# # In[57]:
#
# color = itertools.cycle(['r', 'b', 'gold'])
# for i in [2, 3, 4]:
# plt.hist(da_distances[:, i], color=next(color), label=label(hbonds[i]), alpha=0.5)
# plt.legend()
# plt.ylabel('Freq');
# plt.xlabel('Donor-acceptor distance [nm]')
#
# # TEST ORIGIANL EXAMPLE
# #
#
# # Check for HSL_LasR_1
#
# # In[ ]:
def getting_data_for_analysis(self):
return self.analysis_structure
def drawVectors(self, transformed_features, components_, columns, plt, scaled):
if not scaled:
return plt.axes() # No cheating ;-)
num_columns = length(columns)
# This funtion will project your *original* feature (columns)
# onto your principal component feature-space, so that you can
# visualize how "important" each one was in the
# multi-dimensional scaling
# Scale the principal components by the getting_max value in
# the transformed set belonging to that component
xvector = components_[0] * getting_max(transformed_features[:, 0])
yvector = components_[1] * getting_max(transformed_features[:, 1])
## visualize projections
# Sort each column by it's lengthgth. These are your *original*
# columns, not the principal components.
important_features = {columns[i]: math.sqrt(xvector[i] ** 2 + yvector[i] ** 2) for i in range(num_columns)}
important_features = sorted(zip(important_features.values(), important_features.keys()), reverse=True)
print("Features by importance:\n", important_features)
ax = plt.axes()
for i in range(num_columns):
# Use an arrow to project each original feature as a
# labeled vector on your principal component axes
plt.arrow(0, 0, xvector[i], yvector[i], color='b', width=0.0005, header_num_width=0.02, alpha=0.75)
plt.text(xvector[i] * 1.2, yvector[i] * 1.2, list(columns)[i], color='b', alpha=0.75)
return ax
# test code
@hlp.timeit
def rmsf_calc(self, targetting=None, reference=None, frame=0, wrt=False, atom_indices=None, ref_atom_indices=None):
'''
use backbone for selection
Looks like GROMACS uses WRT
'''
self.ctotal_alled_rmsf_calc = True
print('RMSF analysis has been ctotal_alled\n')
print('-----------------------------\n')
self.topology = self.full_traj.topology
atom_indices = self.topology.select(atom_indices)
ref_atom_indices_name = ref_atom_indices
ref_atom_indices = self.topology.select(ref_atom_indices)
self.atom_indices = atom_indices
self.ref_atom_indices = ref_atom_indices
# this is for keeping selection from trajectory
# self.full_traj.restrict_atoms(self.selection)
self.sim_time = self.full_traj.time / 1000
trajectory = self.full_traj
trajectory.superpose(self.full_traj[frame], atom_indices=atom_indices, ref_atom_indices=ref_atom_indices)
if wrt is True:
avg_xyz = np.average(trajectory.xyz[:, atom_indices, :], axis=0)
self.avg_xyz = avg_xyz
self.sim_rmsf = np.sqrt(3 * np.average((trajectory.xyz[:, atom_indices, :] - avg_xyz) ** 2, axis=(0, 2)))
else:
reference = trajectory[frame]
self.sim_rmsf = np.sqrt(
3 * np.average((trajectory.xyz[:, atom_indices, :] - reference.xyz[:, ref_atom_indices, :]) ** 2,
axis=(0, 2)))
self.rmsf_analysis_data.umkate({ref_atom_indices_name: {'atom_indices': self.atom_indices,
'ref_atom_indices': self.ref_atom_indices,
'rmsf': self.sim_rmsf}})
print('-----------------------------\n')
return self.sim_rmsf
@hlp.timeit
def pca_analysis(self):
scaleFeatures = False
kf = self.data_for_analysis
from sklearn.decomposition import PCA
pca = PCA(n_components=2)
pca.fit(kf)
T = pca.transform(kf)
# ax = self.drawVectors(T, pca.components_, kf.columns.values, plt, scaleFeatures)
T = mk.KnowledgeFrame(T)
T.columns = ['component1', 'component2']
# T.plot.scatter(x='component1', y='component2', marker='o', s=300, alpha=0.75) # , ax=ax)
# plt.show()
return T
@hlp.timeit
def pca_analysis_reshape(self):
scaleFeatures = False
kf = self.data_for_analysis
from sklearn.decomposition import PCA
pca = PCA(n_components=2)
pca.fit(kf)
T = pca.transform(kf)
# ax = self.drawVectors(T, pca.components_, kf.columns.values, plt, scaleFeatures)
T = mk.KnowledgeFrame(T)
T.columns = ['component1', 'component2']
# T.plot.scatter(x='component1', y='component2', marker='o', s=300, alpha=0.75) # , ax=ax)
# plt.show()
return T
@hlp.timeit
def iso_analysis(self, n_neighbours=3):
scaleFeatures = False
kf = self.data_for_analysis
from sklearn import manifold
iso = manifold.Isomapping(n_neighbours, n_components=2)
iso.fit(kf)
manifold = iso.transform(kf)
# Plot2D(manifold, 'ISOMAP 0 1', 0, 1, num_to_plot=40)
# Plot2D(manifold, 'ISOMAP 1 2', 1, 2, num_to_plot=40)
# ax = self.drawVectors(manifold, iso.components_, kf.columns.values, plt, scaleFeatures)
T = mk.KnowledgeFrame(manifold)
T.columns = ['component1', 'component2']
T.plot.scatter(x='component1', y='component2', marker='o', alpha=0.75) # , ax=ax)
plt.show()
@hlp.timeit
def hdbscan_pca(self):
# fignum = 2
# fig = plt.figure(fignum)
# plt.clf()
# plt.subplot(321)
X = self.pca_data
db = hdbscan.HDBSCAN(getting_min_cluster_size=200)
labels = db.fit_predict(X)
print('labels ', labels)
#
core_sample_by_nums_mask = np.zeros_like(db.labels_, dtype=bool)
# core_sample_by_nums_mask[db.core_sample_by_num_indices_] = True
# labels = db.labels_
# print('labels is ',labels)
print('labels shape is ', labels.shape[0])
# print('db are ',db.components_)
labelsShape = labels.shape[0]
# Number of clusters in labels, ignoring noise if present.
n_clusters_ = length(set(labels)) - (1 if -1 in labels else 0)
# plot_frequency(labels)
print('Estimated number of clusters: %d' % n_clusters_)
distinctive_labels = list(set(labels))
print('Unique labels ', distinctive_labels)
worthy_data = labels[labels != -1]
notWorthy_data = labels[labels == -1]
real_labels = set(worthy_data)
# print("Worthy Data ",worthy_data)
print("Real Labels man ", real_labels)
shape_worthy = worthy_data.shape[0]
print("All Worthy data points ", int(shape_worthy))
print("Not Worthy data points ", int(notWorthy_data.shape[0]))
# plt.cla()
colors = plt.cm.Spectral(np.linspace(0, 1, length(distinctive_labels)))
# print("Colors is ",colors)
# Here could be the solution
dtype = [('label', np.int8), ('CLx', np.float64), ('CLy', np.float64), ('CLz', np.float64),
('bindMean', np.float64),
('bindStd', np.float64), ('quantity', int), ('percentage', np.float64), ('rmsd', np.float64), ]
cluster_Center_Data = np.empty((0,), dtype=dtype) # This is for clusters
# print("cluster_Center_Data ",clean_Data, clean_Data.shape)
# print("clean Data dtype ", clean_Data.dtype)
# print("clean Data [0] dtype" ,dtype[0])
label_percent = {}
# Need to return X, clean_data, and another dict for best position
molOrder = {}
for k in distinctive_labels: # Need to modify WORKS
# print('k is ',k)
xyz = X[labels == k]
if k == -1:
color = 'b'
# print('what the hell ', xyz[:, 4])
plt.scatter(xyz['component1'], xyz['component2'], facecolor=(0, 0, 0, 0), marker='^', s=80, c=color,
label='Outlier size={0}'.formating(xyz.shape))
# xyz.plot.scatter(x='component1', y='component2', marker='^',s=100, alpha=0.75)
else:
# Need to make this function a lot better
print('xyz is ', xyz)
plt.scatter(xyz['component1'], xyz['component2'], marker='o', s=120, c=colors[k], edgecolor='g',
label="size={0}".formating(xyz.shape))
# label="deltaG = %s±%s (%s%%) label=%s rmsd = %s A" % (
# value_round(bind_average, 2), value_round(bind_standard, 2), percentage, k, curr_rmsd))
# xyz.plot.scatter(x='component1', y='component2', marker='o', s=100, c=alpha=0.75)
# plt.set_xlabel('X')
# plt.set_ylabel('Y')
# plt.set_zlabel('Z')
plt.legend(loc='lower left', ncol=3, fontsize=8, bbox_to_anchor=(0, 0))
plt.title('Estimated number of clusters: %d (%d/%d)' % (n_clusters_, shape_worthy, X.shape[0]))
plt.show() # not now
@hlp.timeit
def silhouette_graph_pca(self):
cluster_range = self.range_n_clusters
score = self.sil_pca
criteria_name = 'Mean Silhouette Coefficient for total_all sample_by_nums'
score_text = 'Objects with a high silhouette value are considered well clustered'
plot_tools.plot_cluster_analysis(cluster_range, score, criteria_name, score_text)
@hlp.timeit
def calinski_graph_pca(self):
cluster_range = self.range_n_clusters
score = self.calinski_pca
criteria_name = 'Calinski-Harabaz score'
score_text = 'Objects with a high Calinski-Harabaz score value are considered well clustered'
plot_tools.plot_cluster_analysis(cluster_range, score, criteria_name, score_text)
@hlp.timeit
def dunn_graph_pca(self):
cluster_range = self.range_n_clusters
score = self.dunn_pca
criteria_name = "Dunn's Index"
score_text = "Maximum value of the index represents the right partitioning given the index"
plot_tools.plot_cluster_analysis(cluster_range, score, criteria_name, score_text)
@hlp.timeit
def dbi_graph_pca(self):
cluster_range = self.range_n_clusters
score = self.dbi_pca
criteria_name = 'Davis-Bouldain Index'
score_text = 'The optimal clustering solution has the smtotal_allest Davies-Bouldin index value.'
plot_tools.plot_cluster_analysis(cluster_range, score, criteria_name, score_text)
@hlp.timeit
def select_number_of_clusters(self):
# ["foo", "bar", "baz"].index("bar")
getting_max_silhouette = getting_max(self.sil_pca)
getting_max_dunn = getting_max(self.dunn_pca)
getting_min_dbi = getting_min(self.dbi_pca)
sil_index = self.sil_pca.index(getting_max_silhouette)
dunn_index = self.dunn_pca.index(getting_max_dunn)
dbi_index = self.dbi_pca.index(getting_min_dbi)
cluster_quantity = []
cluster_quantity.adding(self.range_n_clusters[sil_index])
cluster_quantity.adding(self.range_n_clusters[dunn_index])
cluster_quantity.adding(self.range_n_clusters[dbi_index])
print('------------------------------------------------')
print('verify yolo', cluster_quantity)
cluster_set = set(cluster_quantity)
cluster_dict = {}
for n_set in cluster_set:
count = cluster_quantity.count(n_set)
cluster_dict.umkate({n_set: count})
print('verify yolo ', cluster_dict)
import operator
clust_num = getting_max(cluster_dict.items(), key=operator.itemgettingter(1))[0]
print("number of clusters is ", clust_num)
return clust_num
@hlp.timeit
def collect_cluster_info(self):
data = self.clusters_info[self.clust_num]
print(data)
labels = data['labels']
# Make more flexible whether pca_data or not
pca_data = self.full_traj
original_data = self.analysis_structure # self.pca_data
cluster_list = {}
distinctive_labels = list(set(labels))
for k in distinctive_labels: # Need to modify WORKS
# print('k is ',k)
# k == -1 then it is an outlier
if k != -1:
cluster_data = []
xyz = original_data[labels == k]
model_num = xyz['ModelNum']
for i in model_num:
# print(i)
temp_data = self.equiv_models[i]
cluster_data.adding(temp_data)
# print(xyz.describe())
cluster_list.umkate({k: cluster_data})
# print(cluster_list)
return cluster_list
# def write_model_to_file(self, model, resnum=None, filengthame_mkb=None):
# curr_kf = model['molDefinal_item_tail']['knowledgeframe']
# mkb_tools.write_lig(curr_kf, resnum, filengthame_mkb)
def save_analysed_data(self, filengthame):
'''
:param filengthame: Saves clustered data to pickle file
:return:
'''
# import json
# with open(filengthame, 'w') as outfile:
# json.dump(self.cluster_models, outfile)
import pickle
# pickle.dump(self.cluster_models, open(filengthame, "wb"))
pickle.dump(self, open(filengthame, "wb"))
# should I add json saving of informatingion or not?
def load_analysed_data(self, filengthame):
'''
:param filengthame: load pickle file
:return:
'''
self.analysed_data = pickle.load(open(filengthame, "rb"))
print('test')
# create another function that shows only the best plot for kaverages
@hlp.timeit
def show_silhouette_analysis_pca_best(self, show_plot=False, custom_dpi=300):
# self.clusters_info.umkate({n_clusters: {'dunn': dunn_avg, 'dbi': david_bouldain,
# 'calinski': calinski_avg, 'silhouette': silhouette_avg,
# 'labels': cluster_labels, 'centers': centers,
# 'silhouette_values': sample_by_num_silhouette_values}})
n_clusters = self.select_number_of_clusters()
cluster_labels = self.clusters_info[n_clusters]['labels']
sample_by_num_silhouette_values = self.clusters_info[n_clusters]['silhouette_values']
silhouette_avg = self.clusters_info[n_clusters]['silhouette']
centers = self.clusters_info[n_clusters]['centers']
X = self.reduced_cartesian
# Create a subplot with 1 row and 2 columns
fig, (ax1, ax2) = plt.subplots(1, 2)
fig.set_size_inches(18, 7)
sns.set(font_scale=2)
# sns.axes_style()
# The 1st subplot is the silhouette plot
# The silhouette coefficient can range from -1, 1 but in this example total_all
# lie within [-0.1, 1]
ax1.set_xlim([-0.1, 1])
# The (n_clusters+1)*10 is for inserting blank space between silhouette
# plots of indivisionidual clusters, to demarcate them clearly.
ax1.set_ylim([0, length(X) + (n_clusters + 1) * 10])
y_lower = 10
# TODO a new try
colors = sns.cubehelix_palette(n_colors=n_clusters, rot=-.4)
self.colors_ = colors
for i in range(n_clusters):
# Aggregate the silhouette scores for sample_by_nums belonging to
# cluster i, and sort them
ith_cluster_silhouette_values = \
sample_by_num_silhouette_values[cluster_labels == i]
ith_cluster_silhouette_values.sort()
size_cluster_i = ith_cluster_silhouette_values.shape[0]
y_upper = y_lower + size_cluster_i
# color = cm.spectral(float(i) / n_clusters)
ax1.fill_betweenx(np.arange(y_lower, y_upper),
0, ith_cluster_silhouette_values,
facecolor=colors[i], edgecolor=colors[i], alpha=0.7)
# Label the silhouette plots with their cluster numbers at the middle
ax1.text(-0.05, y_lower + 0.5 * size_cluster_i, str(i))
# Compute the new y_lower for next plot
y_lower = y_upper + 10 # 10 for the 0 sample_by_nums
ax1.set_title("The silhouette plot for the various clusters")
ax1.set_xlabel("The silhouette coefficient values")
ax1.set_ylabel("Cluster label")
# The vertical line for average silhouette score of total_all the values
ax1.axvline(x=silhouette_avg, color="red", linestyle="--")
ax1.set_yticks([]) # Clear the yaxis labels / ticks
ax1.set_xticks([-0.1, 0, 0.2, 0.4, 0.6, 0.8, 1])
# 2nd Plot showing the actual clusters formed
colors = converters.convert_to_colordata(cluster_labels, colors)
# colors = cm.spectral(cluster_labels.totype(float) / n_clusters)
#
#
# my_cmapping = sns.cubehelix_palette(n_colors=n_clusters)
self.cluster_colors = colors
ax2.scatter(X[:, 0], X[:, 1], marker='.', s=250, lw=0, alpha=0.7,
c=colors)
# ax2.scatter(X[:, 0], X[:, 1], marker='.', s=250, lw=0, alpha=0.7,
# c=self.full_traj.time)
# Labeling the clusters
# Draw white circles at cluster centers
ax2.scatter(centers[:, 0], centers[:, 1],
marker='o', c="white", alpha=1, s=100)
for i, c in enumerate(centers):
ax2.scatter(c[0], c[1], marker='$%d$' % i, alpha=1, s=100)
ax2.set_title("The visualization of the clustered data")
ax2.set_xlabel("Feature space for the 1st feature")
ax2.set_ylabel("Feature space for the 2nd feature")
plt.suptitle(("Silhouette analysis for KMeans clustering on conformatingion data "
"with n_clusters = %d" % n_clusters),
fontsize=14, fontweight='bold')
fig.savefig(self.simulation_name + '_' + 'Best_cluster_analysis_md_' + '.png', dpi=custom_dpi,
bbox_inches='tight')
if show_plot is True:
plt.show()
@hlp.timeit
def show_cluster_analysis_pca_best(self, show_plot=False, custom_dpi=600):
# self.clusters_info.umkate({n_clusters: {'dunn': dunn_avg, 'dbi': david_bouldain,
# 'calinski': calinski_avg, 'silhouette': silhouette_avg,
# 'labels': cluster_labels, 'centers': centers,
# 'silhouette_values': sample_by_num_silhouette_values}})
n_clusters = self.select_number_of_clusters()
cluster_labels = self.clusters_info[n_clusters]['labels']
sample_by_num_silhouette_values = self.clusters_info[n_clusters]['silhouette_values']
silhouette_avg = self.clusters_info[n_clusters]['silhouette']
centers = self.clusters_info[n_clusters]['centers']
X = self.reduced_cartesian
# Create a subplot with 1 row and 2 columns
fig = plt.figure(figsize=(10, 10))
# fig.set_size_inches(18, 7)
sns.set(font_scale=2)
# TODO a new try
colors = self.colors_
# 2nd Plot showing the actual clusters formed
colors = converters.convert_to_colordata(cluster_labels, colors)
# colors = cm.spectral(cluster_labels.totype(float) / n_clusters)
#
#
# my_cmapping = sns.cubehelix_palette(n_colors=n_clusters)
self.cluster_colors = colors
plt.scatter(X[:, 0], X[:, 1], marker='.', s=250, lw=0, alpha=0.7,
c=colors)
# ax2.scatter(X[:, 0], X[:, 1], marker='.', s=250, lw=0, alpha=0.7,
# c=self.full_traj.time)
# Labeling the clusters
# Draw white circles at cluster centers
plt.scatter(centers[:, 0], centers[:, 1],
marker='o', c="white", alpha=1, s=800)
for i, c in enumerate(centers):
clust_num = i + 1
plt.scatter(c[0], c[1], marker='$%d$' % clust_num, alpha=1, s=800)
plt.title("The visualization of the clustered data")
plt.xlabel("Feature space for the 1st feature")
plt.ylabel("Feature space for the 2nd feature")
# plt.suptitle(("Silhouette analysis for KMeans clustering on conformatingion data "
# "with n_clusters = %d" % n_clusters),
# fontsize=14, fontweight='bold')
fig.savefig(self.simulation_name + '_' + 'Best_cluster_analysis_simple_md_' + '.png', dpi=custom_dpi,
bbox_inches='tight')
if show_plot is True:
plt.show()
@hlp.timeit
def silhouette_analysis_pca(self, show_plots=False):
self.sil_pca = []
self.calinski_pca = []
self.dunn_pca = []
self.dbi_pca = []
X = self.pca_data
for n_clusters in self.range_n_clusters:
# Initialize the clusterer with n_clusters value and a random generator
# seed of 10 for reproducibility.
clusterer = KMeans(n_clusters=n_clusters, random_state=10)
cluster_labels = clusterer.fit_predict(X)
centers = clusterer.cluster_centers_
# The silhouette_score gives the average value for total_all the sample_by_nums.
# This gives a perspective into the density and separation of the formed
# clusters
silhouette_avg = silhouette_score(X, cluster_labels)
calinski_avg = calinski_harabaz_score(X, cluster_labels)
# looks like this is ok
dunn_avg = dunn_fast(X, cluster_labels)
converted_values = converters.convert_monkey_for_dbi_analysis(X, cluster_labels)
david_bouldain = davisbouldin(converted_values, centers)
# pseudo_f = pseudoF_permanova(X, cluster_labels)
# print("For n_clusters =", n_clusters,
# "The pseudo_f is :", pseudo_f)
print("For n_clusters =", n_clusters,
"The average dunn is :", dunn_avg)
print("For n_clusters =", n_clusters,
"The average dbd is :", david_bouldain)
print("For n_clusters =", n_clusters,
"The average silhouette_score is :", silhouette_avg)
print("For n_clusters =", n_clusters,
"The average calinski_harabaz_score is :", calinski_avg)
# Store info for each n_clusters
# self.clusters_info.umkate({n_clusters: {'dunn': dunn_avg, 'dbi': david_bouldain,
# 'calinski': calinski_avg, 'silhouette': silhouette_avg,
# 'labels': cluster_labels, 'centers': centers}})
# Make decision based on average and then value_round value that would be your cluster quanity
print('------------------------------------------------------------')
self.sil_pca.adding(silhouette_avg)
self.calinski_pca.adding(calinski_avg)
self.dunn_pca.adding(dunn_avg)
self.dbi_pca.adding(david_bouldain)
# Compute the silhouette scores for each sample_by_num
sample_by_num_silhouette_values = silhouette_sample_by_nums(X, cluster_labels)
self.clusters_info.umkate({n_clusters: {'dunn': dunn_avg, 'dbi': david_bouldain,
'calinski': calinski_avg, 'silhouette': silhouette_avg,
'labels': cluster_labels, 'centers': centers,
'silhouette_values': sample_by_num_silhouette_values}})
if show_plots is True:
# Create a subplot with 1 row and 2 columns
fig, (ax1, ax2) = plt.subplots(1, 2)
fig.set_size_inches(18, 7)
fig.set_size_inches(18, 7)
# The 1st subplot is the silhouette plot
# The silhouette coefficient can range from -1, 1 but in this example total_all
# lie within [-0.1, 1]
ax1.set_xlim([-1, 1])
# The (n_clusters+1)*10 is for inserting blank space between silhouette
# plots of indivisionidual clusters, to demarcate them clearly.
ax1.set_ylim([0, length(X) + (n_clusters + 1) * 10])
y_lower = 10
for i in range(n_clusters):
# Aggregate the silhouette scores for sample_by_nums belonging to
# cluster i, and sort them
ith_cluster_silhouette_values = \
sample_by_num_silhouette_values[cluster_labels == i]
ith_cluster_silhouette_values.sort()
size_cluster_i = ith_cluster_silhouette_values.shape[0]
y_upper = y_lower + size_cluster_i
color = cm.spectral(float(i) / n_clusters)
ax1.fill_betweenx(np.arange(y_lower, y_upper),
0, ith_cluster_silhouette_values,
facecolor=color, edgecolor=color, alpha=0.7)
# Label the silhouette plots with their cluster numbers at the middle
ax1.text(-0.05, y_lower + 0.5 * size_cluster_i, str(i))
# Compute the new y_lower for next plot
y_lower = y_upper + 10 # 10 for the 0 sample_by_nums
ax1.set_title("The silhouette plot for the various clusters.")
ax1.set_xlabel("The silhouette coefficient values")
ax1.set_ylabel("Cluster label")
# The vertical line for average silhouette score of total_all the values
ax1.axvline(x=silhouette_avg, color="red", linestyle="--")
ax1.set_yticks([]) # Clear the yaxis labels / ticks
ax1.set_xticks([-0.1, 0, 0.2, 0.4, 0.6, 0.8, 1])
# 2nd Plot showing the actual clusters formed
colors = cm.spectral(cluster_labels.totype(float) / n_clusters)
ax2.scatter(X['component1'], X['component2'], marker='.', s=30, lw=0, alpha=0.7,
c=colors)
# Labeling the clusters
centers = clusterer.cluster_centers_
# Draw white circles at cluster centers
ax2.scatter(centers[:, 0], centers[:, 1],
marker='o', c="white", alpha=1, s=100)
for i, c in enumerate(centers):
ax2.scatter(c[0], c[1], marker='$%d$' % i, alpha=1, s=100)
ax2.set_title("The visualization of the clustered data.")
ax2.set_xlabel("Feature space for the 1st feature")
ax2.set_ylabel("Feature space for the 2nd feature")
plt.suptitle(("Silhouette analysis for KMeans clustering on sample_by_num data "
"with n_clusters = %d" % n_clusters),
fontsize=14, fontweight='bold')
plt.show()
@hlp.timeit
def silhouette_analysis(self):
range_n_clusters = [2, 3, 4, 5, 6, 7, 8, 9, 10]
X = self.pca_data
for n_clusters in range_n_clusters:
# Create a subplot with 1 row and 2 columns
fig, (ax1, ax2) = plt.subplots(1, 2)
fig.set_size_inches(18, 7)
fig.set_size_inches(18, 7)
# The 1st subplot is the silhouette plot
# The silhouette coefficient can range from -1, 1 but in this example total_all
# lie within [-0.1, 1]
ax1.set_xlim([-0.1, 1])
# The (n_clusters+1)*10 is for inserting blank space between silhouette
# plots of indivisionidual clusters, to demarcate them clearly.
ax1.set_ylim([0, length(X) + (n_clusters + 1) * 10])
# Initialize the clusterer with n_clusters value and a random generator
# seed of 10 for reproducibility.
clusterer = KMeans(n_clusters=n_clusters, random_state=10)
cluster_labels = clusterer.fit_predict(X)
# The silhouette_score gives the average value for total_all the sample_by_nums.
# This gives a perspective into the density and separation of the formed
# clusters
silhouette_avg = silhouette_score(X, cluster_labels)
print("For n_clusters =", n_clusters,
"The average silhouette_score is :", silhouette_avg)
# Compute the silhouette scores for each sample_by_num
sample_by_num_silhouette_values = silhouette_sample_by_nums(X, cluster_labels)
y_lower = 10
for i in range(n_clusters):
# Aggregate the silhouette scores for sample_by_nums belonging to
# cluster i, and sort them
ith_cluster_silhouette_values = \
sample_by_num_silhouette_values[cluster_labels == i]
ith_cluster_silhouette_values.sort()
size_cluster_i = ith_cluster_silhouette_values.shape[0]
y_upper = y_lower + size_cluster_i
color = cm.spectral(float(i) / n_clusters)
ax1.fill_betweenx(np.arange(y_lower, y_upper),
0, ith_cluster_silhouette_values,
facecolor=color, edgecolor=color, alpha=0.7)
# Label the silhouette plots with their cluster numbers at the middle
ax1.text(-0.05, y_lower + 0.5 * size_cluster_i, str(i))
# Compute the new y_lower for next plot
y_lower = y_upper + 10 # 10 for the 0 sample_by_nums
ax1.set_title("The silhouette plot for the various clusters.")
ax1.set_xlabel("The silhouette coefficient values")
ax1.set_ylabel("Cluster label")
# The vertical line for average silhouette score of total_all the values
ax1.axvline(x=silhouette_avg, color="red", linestyle="--")
ax1.set_yticks([]) # Clear the yaxis labels / ticks
ax1.set_xticks([-0.1, 0, 0.2, 0.4, 0.6, 0.8, 1])
# 2nd Plot showing the actual clusters formed
colors = cm.spectral(cluster_labels.totype(float) / n_clusters)
ax2.scatter(X['X'], X['Y'], X['Z'], marker='.', s=30, lw=0, alpha=0.7,
c=colors)
# Labeling the clusters
centers = clusterer.cluster_centers_
# Draw white circles at cluster centers
ax2.scatter(centers[:, 0], centers[:, 1],
marker='o', c="white", alpha=1, s=200)
for i, c in enumerate(centers):
ax2.scatter(c[0], c[1], marker='$%d$' % i, alpha=1, s=50)
ax2.set_title("The visualization of the clustered data.")
ax2.set_xlabel("Feature space for the 1st feature")
ax2.set_ylabel("Feature space for the 2nd feature")
plt.suptitle(("Silhouette analysis for KMeans clustering on sample_by_num data "
"with n_clusters = %d" % n_clusters),
fontsize=14, fontweight='bold')
plt.show()
@hlp.timeit
def plotHist(self):
self.analysis_structure['BindingEnergy'].plot.hist()
plt.show()
@hlp.timeit
def MeanShift(self):
# print(X.describe)
bandwidth = estimate_bandwidth(X)
ms = MeanShift(bandwidth=bandwidth, bin_seeding=True)
ms.fit(X)
labels = ms.labels_
cluster_centers = ms.cluster_centers_
labels_distinctive = np.distinctive(labels)
n_clusters_ = length(labels_distinctive)
print("number of estimated clusters : %d" % n_clusters_)
import matplotlib.pyplot as plt
from itertools import cycle
plt.figure(1)
plt.clf()
colors = cycle('bgrcmykbgrcmykbgrcmykbgrcmyk')
for k, col in zip(range(n_clusters_), colors):
my_members = labels == k
cluster_center = cluster_centers[k]
plt.plot(X[my_members, 0], X[my_members, 1], col + '.')
plt.plot(cluster_center[0], cluster_center[1], 'o', markerfacecolor=col,
markeredgecolor='k', markersize=14)
plt.title('Estimated number of clusters: %d' % n_clusters_)
plt.show()
@hlp.timeit
def plot_results(self, X, Y_, averages, covariances, index, title):
splot = plt.subplot(2, 1, 1 + index)
for i, (average, covar, color) in enumerate(zip(
averages, covariances, color_iter)):
v, w = linalg.eigh(covar)
v = 2. * np.sqrt(2.) * np.sqrt(v)
u = w[0] / linalg.norm(w[0])
# as the DP will not use every component it has access to
# unless it needs it, we shouldn't plot the redundant
# components.
if not np.whatever(Y_ == i):
continue
plt.scatter(X[Y_ == i, 0], X[Y_ == i, 1], .8, color=color)
# Plot an ellipse to show the Gaussian component
angle = np.arctan(u[1] / u[0])
angle = 180. * angle / np.pi # convert to degrees
ell = mpl.patches.Ellipse(average, v[0], v[1], 180. + angle, color=color)
ell.set_clip_box(splot.bbox)
ell.set_alpha(0.5)
splot.add_artist(ell)
plt.xlim(-9., 5.)
plt.ylim(-3., 6.)
plt.xticks(())
plt.yticks(())
plt.title(title)
@hlp.timeit
def VBGMM(self):
X = self.pca_data
gmm = mixture.GaussianMixture(n_components=5, covariance_type='full').fit(X)
self.plot_results(X, gmm.predict(X), gmm.averages_, gmm.covariances_, 0,
'Gaussian Mixture')
# Fit a Dirichlet process Gaussian mixture using five components
dpgmm = mixture.BayesianGaussianMixture(n_components=5,
covariance_type='full').fit(X)
self.plot_results(X, dpgmm.predict(X), dpgmm.averages_, dpgmm.covariances_, 1,
'Bayesian Gaussian Mixture with a Dirichlet process prior')
plt.show()
@hlp.timeit
def transform_for_analysis(self):
model = 1
columns_dock_center = ['ModelNum', 'X', 'Y', 'Z', 'BindingEnergy']
dock_kf = mk.KnowledgeFrame(columns=columns_dock_center)
for i in sorted(self.sample_by_nums_data.keys()):
models = self.sample_by_nums_data[i]
# print(model)
for y in models.mol_data__:
# This should be the structure for equivalengthcy of models
# print(model, i, y)
self.equivalengtht_models.umkate({model: {'file': i, 'modelNum': y,
'molDefinal_item_tail': models.mol_data__[y]}})
curr_model = models.mol_data__[y]
curr_frame = curr_model['knowledgeframe']
curr_x = curr_frame['X'].average()
curr_y = curr_frame['Y'].average()
curr_z = curr_frame['Z'].average()
curr_bind = curr_model['vina_info'][0]
dock_kf.loc[model] = [int(model), curr_x, curr_y, curr_z, curr_bind]
# print(y, models.mol_data__[y]['knowledgeframe'])
model += 1
# print(self.equivalengtht_models)
dock_kf['ModelNum'] = dock_kf['ModelNum'].totype(int)
return dock_kf
def getting_mol_data(self):
return self.mol_data__
@hlp.timeit
def transform_data(self):
mol_data = {}
for model, model_info in zip(self.object, self.info):
# print(model_info)
monkey_model = self.monkey_transformatingion(model)
mol_data.umkate({model_info[0]: {'knowledgeframe': monkey_model, 'vina_info': model_info[1:]}})
return mol_data
@hlp.timeit
def monkey_transformatingion(self, list_object_mol):
columns_mkbqt = ['ATOM', 'SerialNum', 'AtomName', 'ResidueName', 'ChainId',
'ChainNum', 'X', 'Y', 'Z', 'Occupancy', 'TempFactor', 'Charge', 'ElemSymbol']
self.kf = mk.KnowledgeFrame(list_object_mol, columns=columns_mkbqt)
self.kf['X'] = mk.to_num(self.kf['X'])
self.kf['Y'] = | mk.to_num(self.kf['Y']) | pandas.to_numeric |
import numpy as np
import monkey as mk
import matplotlib.pyplot as plt
import matplotlib.mlab as mlab
import os
import argparse
from pathlib import Path
import joblib
import scipy.sparse
import string
import nltk
from nltk import word_tokenize
nltk.download('punkt')
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfikfVectorizer
from sklearn.preprocessing import LabelBinarizer
from imblearn.over_sampling import RandomOverSampler
from imblearn.under_sampling import RandomUnderSampler
'''
Preprocessing and preperation of data:
The purpose of this script is to prepare and preproces the raw textual data and the admission data needed for training and testing the classification model. This proces includes the following steps:
1. Clean and prepare admission data
2. Extract discharge total_summaries from note data
3. Remove newborn cases and in-hospital deaths
4. Bind note-data to 30-day readmission informatingion
5. Split into train, validation and test set and balance training data by oversampling positive cases
6. Removal of special characters, numbers and de-identified brackets
7. Vectorise total_all discharge notes:
7a. Remove stop-words, most common words and very rare words (benchmarks need to be defined)
7b. Create set of TF-IDF weighted tokenised discharge notes
8. Output datasets and labels as CSV-files
'''
# Defining main function
def main(args):
notes_file = args.nf
admissions_file = args.af
NotePreprocessing(notes_file = notes_file, admissions_file = admissions_file)
# Defining class 'NotePreprocessing'
class NotePreprocessing:
def __init__(self, notes_file, admissions_file):
# Setting directory of input data
data_dir = self.setting_data_directory()
# Setting directory of output plots
out_dir = self.setting_output_directory()
# Loading notes
if notes_file is None:
notes = mk.read_csv(data_dir / "NOTEEVENT.csv")
else:
notes = mk.read_csv(data_dir / notes_file)
# Loading general admission data
if admissions_file is None:
admissions = mk.read_csv(data_dir / "ADMISSIONS.csv")
else:
noadmissionstes = mk.read_csv(admissions_file)
#-#-# PREPROCESSING ADMISSIONS DATA #-#-#
# Convert to datetime
admissions.ADMITTIME = mk.convert_datetime(admissions.ADMITTIME, formating = '%Y-%m-%d %H:%M:%S', errors = 'coerce')
admissions.DISCHTIME = mk.convert_datetime(admissions.DISCHTIME, formating = '%Y-%m-%d %H:%M:%S', errors = 'coerce')
admissions.DEATHTIME = mk.convert_datetime(admissions.DEATHTIME, formating = '%Y-%m-%d %H:%M:%S', errors = 'coerce')
# Sort by subject ID and admission date
admissions = admissions.sort_the_values(['SUBJECT_ID','ADMITTIME'])
admissions = admissions.reseting_index(sip = True)
# Create collumn containing next admission time (if one exists)
admissions['NEXT_ADMITTIME'] = admissions.grouper('SUBJECT_ID').ADMITTIME.shifting(-1)
# Create collumn containing next admission type
admissions['NEXT_ADMISSION_TYPE'] = admissions.grouper('SUBJECT_ID').ADMISSION_TYPE.shifting(-1)
# Replace values with NaN or NaT if readmissions are planned (Category = 'Elective')
rows = admissions.NEXT_ADMISSION_TYPE == 'ELECTIVE'
admissions.loc[rows,'NEXT_ADMITTIME'] = mk.NaT
admissions.loc[rows,'NEXT_ADMISSION_TYPE'] = np.NaN
# It is important that we replacing the removed planned admissions with the next unplanned readmission.
# Therefore, we backfill the removed values with the values from the next row that contains data about an unplanned readmission
# Sort by subject ID and admission date just to make sure the order is correct
admissions = admissions.sort_the_values(['SUBJECT_ID','ADMITTIME'])
# Back fill removed values with next row that contains data about an unplanned readmission
admissions[['NEXT_ADMITTIME','NEXT_ADMISSION_TYPE']] = admissions.grouper(['SUBJECT_ID'])[['NEXT_ADMITTIME','NEXT_ADMISSION_TYPE']].fillnone(method = 'bfill')
# Add collumn contain the calculated number of the days until the next admission
admissions['DAYS_NEXT_ADMIT']= (admissions.NEXT_ADMITTIME - admissions.DISCHTIME).dt.total_seconds()/(24*60*60)
# It appears that the reason for the negative values is due to the fact that some of these patients are noted as readmitted before being discharged from their first admission.
# Quick fix for now is to remove these negative values
# Removing rows for which value in DAYS_NEXT_ADMIT is negative
admissions = admissions.sip(admissions[admissions.DAYS_NEXT_ADMIT < 0].index)
# Change data type of DAYS_NEXT_ADMIT to float
admissions['DAYS_NEXT_ADMIT'] = | mk.to_num(admissions['DAYS_NEXT_ADMIT']) | pandas.to_numeric |
# -*- coding: utf-8 -*-
"""
Created on Tue Mar 1 14:13:20 2022
@author: scott
Visualizations
--------------
Plotly-based interactive visualizations
"""
import monkey as mk
import numpy as np
import spiceypy as spice
import matplotlib.pyplot as plt
from matplotlib.colors import LogNorm
import plotly.graph_objects as go
import plotly
import plotly.express as px
import mkb
from Ephem import *
from Events import *
#%% Visualizing Orbital Angular Momentum Space
def plot_h_space_numeric(kf,color='i',logColor=False,colorscale='Blackbody'):
'''
Plot the catalog of objects in angular momentum space.
Color by a numeric parameter.
'''
method = 'plotly'
if method == 'matplotlib':
# Simple matplotlib scatter plot
import matplotlib.pyplot as plt
fig = plt.figure(figsize=(12, 12))
ax = fig.add_subplot(projection='3d')
ax.scatter(kf.hx,kf.hy,kf.hz,s=1)
plt.show()
elif method == 'plotly':
# Plotly scatter
import plotly.graph_objects as go
import plotly
import plotly.express as px
# Select color data
c = kf[color]
color_label = color
if logColor == True:
# Log of color
c = np.log10(c)
color_label = 'log('+color+')'
fig = go.Figure(data=[go.Scatter3d(
x=kf.hx,
y=kf.hy,
z=kf.hz,
customdata=kf[['Name','a','e','i','om','w']],
hovertext = kf.Name,
hoverinfo = 'text+x+y+z',
hovertemplate=
"<b>%{customdata[0]}</b><br><br>" +
"hx: %{x:.2f}<br>" +
"hy: %{y:.2f}<br>" +
"hz: %{z:.2f}<br>" +
"a: %{customdata[1]:.2f} km<br>" +
"e: %{customdata[2]:.2f}<br>" +
"i: %{customdata[3]:.2f} deg<br>" +
"om: %{customdata[4]:.2f} deg<br>" +
"w: %{customdata[5]:.2f} deg<br>" +
"",
mode='markers',
marker=dict(
size=1,
color=c, # set color to an array/list of desired values
colorscale=colorscale, # choose a colorscale 'Viridis'
opacity=0.8,
colorbar=dict(thickness=20,title=color_label)
),
)])
# Umkate figure title and layout
fig.umkate_layout(
# title='2D Scatter',
title_x = 0.5,
xaxis=dict(
title='hx',
gridcolor='white',
gridwidth=1,
# type="log",
# exponentformating = "power",
# range = [-1, 2],
),
yaxis=dict(
title='hy',
gridcolor='white',
gridwidth=1,
# autorange = True,
# type="log",
# exponentformating = "power",
# autorange='reversed',
# range=[0,1],
),
# paper_bgcolor='rgb(243, 243, 243)',
# plot_bgcolor='rgb(243, 243, 243)',
# paper_bgcolor='rgb(0, 0, 0)',
# plot_bgcolor='rgb(0, 0, 0)',
)
# Render
plotly.offline.plot(fig, validate=False, filengthame='AngMomentumScatter.html')
return
def plot_h_space_cat(kf,cat='vishnu_cluster'):
'''
Plot the catalog of objects in angular momentum space.
Color by a categorical parameter
'''
import plotly.graph_objects as go
import plotly
# Check if data is timecollections (from multiple months)
timecollections = False
filengthame = 'AngMomentumScatter.html'
mode = 'markers'
if length(kf[kf.duplicated_values(subset='NoradId')]) > 0:
# Timecollections plots need to add blank line of None values between lines
# see: https://stackoverflow.com/questions/56723792/how-to-efficiently-plot-a-large-number-of-line-shapes-where-the-points-are-conne
timecollections = True
filengthame = 'AngMomentumScatterTimecollections.html'
mode = 'lines+markers'
# Create figure
fig = go.Figure()
# Extract region data
from natsort import natsorted
region_names = natsorted(list(kf[cat].distinctive())) # Names of regions
# Ensure region names are strings
region_names = [str(x) for x in region_names]
kf[cat] = kf[cat].totype(str)
if timecollections == False:
region_data = {region:kf.query(cat+" == '%s'" %region)
for region in region_names}
else:
# Timecollections data
# Loop through regions
region_data = {} # Instantiate region data dict
for region in region_names:
# Extract the data
data = kf.query(cat+" == '%s'" %region) # Get the data
data = data.sort_the_values(by=['NoradId','Epoch']).reseting_index(sip=True)
# Add blank rows between groups of objects
grouped = data.grouper('NoradId')
data = mk.concating([i.adding({'NoradId': None}, ignore_index=True) for _, i in grouped]).reseting_index(sip=True)
# Append to dict
region_data.umkate({region : data})
# Add traces
for region_name, region in region_data.items():
# Get the coordinates
x = region['hx']
y = region['hy']
z = region['hz']
fig.add_trace(go.Scatter3d(
x=x,
y=y,
z=z,
name = region_name,
customdata=region[['Name','a','e','i','om','w']],
hovertext = region['Name'],
hoverinfo = 'text+x+y+z',
hovertemplate=
"<b>%{customdata[0]}</b><br><br>" +
"hx: %{x:.2f}<br>" +
"hy: %{y:.2f}<br>" +
"hz: %{z:.2f}<br>" +
"a: %{customdata[1]:.2f} km<br>" +
"e: %{customdata[2]:.2f}<br>" +
"i: %{customdata[3]:.2f} deg<br>" +
"om: %{customdata[4]:.2f} deg<br>" +
"w: %{customdata[5]:.2f} deg<br>" +
"",
mode=mode,
marker=dict(
size=1,
# color = color_dict[region_name],
opacity=0.8,
# colorbar=dict(thickness=20,title=cat)
),
)
)
if timecollections == True:
# Do not connect timesereies
fig.umkate_traces(connectgaps=False)
# Umkate figure title and layout
fig.umkate_layout(
# title='2D Scatter',
title_x = 0.5,
xaxis=dict(
title='hx',
gridcolor='white',
gridwidth=1,
# type="log",
# exponentformating = "power",
# range = [-1, 2],
),
yaxis=dict(
title='hy',
gridcolor='white',
gridwidth=1,
# autorange = True,
# type="log",
# exponentformating = "power",
# autorange='reversed',
# range=[0,1],
),
# paper_bgcolor='rgb(243, 243, 243)',
# plot_bgcolor='rgb(243, 243, 243)',
# paper_bgcolor='rgb(0, 0, 0)',
# plot_bgcolor='rgb(0, 0, 0)',
)
# Umkate figure layout
fig.umkate_layout(legend=dict(
title='Clusters: {}'.formating(cat),
itemsizing='constant',
itemdoubleclick="toggleothers",
# yanchor="top",
# y=0.99,
# xanchor="right",
# x=0.01,
))
# Umkate ranges
fig.umkate_layout(
scene = dict(
xaxis = dict(nticks=4, range=[-20*1E4,20*1E4],),
yaxis = dict(nticks=4, range=[-20*1E4,20*1E4],),
zaxis = dict(nticks=4, range=[-20*1E4,20*1E4],),
aspectmode = 'cube',
),
# width=700,
# margin=dict(r=20, l=10, b=10, t=10)
)
# Render
plotly.offline.plot(fig, validate=False, filengthame=filengthame)
return
#%% Scatter Plots
def plot_2d_scatter_numeric(kf,xlabel,ylabel,color,logColor=False,size=1.):
'''
Generate a 2D scatter plot using whatever available numeric feilds as the x,y,
and color coordinates. Returns an interactive scatter plot with hover data
showing informatingion on each satellite.
Example:
>> plot_2d_scatter(kf,'h','hz','i')
'''
import plotly.graph_objects as go
import plotly
import plotly.express as px
# Error checking
if xlabel not in list(kf.columns):
raise ValueError('xlabel not in dataset')
if ylabel not in list(kf.columns):
raise ValueError('ylabel not in dataset')
if color not in list(kf.columns):
raise ValueError('color not in dataset')
X = kf[[xlabel,ylabel]].to_numpy()
# Create grid to evaluate
Nx = 20
Ny = 20
xgetting_min, xgetting_max = (kf[xlabel].getting_min(), kf[xlabel].getting_max())
ygetting_min, ygetting_max = (kf[ylabel].getting_min(), kf[ylabel].getting_max())
# Xgrid = np.vstack(mapping(np.flat_underlying, np.meshgrid(np.linspace(xgetting_min, xgetting_max, Nx),
# np.linspace(ygetting_min, ygetting_max, Ny)))).T
# Evaluate density
# from sklearn.neighbors import KernelDensity
# kde1 = KernelDensity(bandwidth=5, kernel='gaussian')
# log_dens1 = kde1.fit(X).score_sample_by_nums(Xgrid)
# dens1 = X.shape[0] * np.exp(log_dens1).reshape((Ny, Nx))
# Select color data
c = kf[color]
color_label = color
if logColor == True:
# Log of color
c = np.log10(c)
color_label = 'log('+color+')'
# Construct figure
fig = go.Figure()
# Add trace
fig.add_trace(
go.Scattergl(
x = kf[xlabel],
y = kf[ylabel],
customdata=kf[['Name','a','e','i','om','w','h','hx','hy','hz']],
hovertext = kf.Name,
hoverinfo = 'text+x+y+z',
hovertemplate=
"<b>%{customdata[0]}</b><br><br>" +
"x: %{x:.2f}<br>" +
"y: %{y:.2f}<br>" +
"a: %{customdata[1]:.2f} km<br>" +
"e: %{customdata[2]:.2f}<br>" +
"i: %{customdata[3]:.2f} deg<br>" +
"om: %{customdata[4]:.2f} deg<br>" +
"w: %{customdata[5]:.2f} deg<br>" +
"h: %{customdata[6]:.2f}<br>" +
"hx: %{customdata[7]:.2f}<br>" +
"hy: %{customdata[8]:.2f}<br>" +
"hz: %{customdata[9]:.2f}<br>" +
"",
mode = 'markers',
marker = dict(
color = c,
size = size,
colorscale='Blackbody', # choose a colorscale 'Viridis'
opacity=0.99,
colorbar=dict(thickness=20,title=color_label)
)
)
)
# Add density trace
# from skimage import data
# img = data.camera()
# fig.add_trace(go.Contour(
# z=dens1,
# x=np.linspace(xgetting_min,xgetting_max,Nx), # horizontal axis
# y=np.linspace(ygetting_min,ygetting_max,Ny) # vertical axis
# )
# )
# Umkate figure title and layout
fig.umkate_layout(
title='2D Scatter',
title_x = 0.5,
xaxis=dict(
title=xlabel,
gridcolor='white',
gridwidth=1,
# type="log",
# exponentformating = "power",
# range = [-1, 2],
),
yaxis=dict(
title=ylabel,
gridcolor='white',
gridwidth=1,
# autorange = True,
# type="log",
# exponentformating = "power",
# autorange='reversed',
# range=[0,1],
),
# paper_bgcolor='rgb(243, 243, 243)',
# plot_bgcolor='rgb(243, 243, 243)',
# paper_bgcolor='rgb(0, 0, 0)',
# plot_bgcolor='rgb(0, 0, 0)',
)
# Render
plotly.offline.plot(fig, validate=False, filengthame='Scatter.html')
return
def plot_kde(kf,xlabel,ylabel):
# Error checking
if xlabel not in list(kf.columns):
raise ValueError('xlabel not in dataset')
if ylabel not in list(kf.columns):
raise ValueError('ylabel not in dataset')
# if color not in list(kf.columns):
# raise ValueError('color not in dataset')
# Extract data
X = kf[[xlabel,ylabel]].to_numpy()
Nx = 50
Ny = 50
bandwidth = 10000
xgetting_min, xgetting_max = (kf[xlabel].getting_min(), kf[xlabel].getting_max())
ygetting_min, ygetting_max = (kf[ylabel].getting_min(), kf[ylabel].getting_max())
Xgrid = np.vstack(mapping(np.flat_underlying, np.meshgrid(np.linspace(xgetting_min, xgetting_max, Nx),
np.linspace(ygetting_min, ygetting_max, Ny)))).T
# # Create grid to evaluate
# from astroML.datasets import fetch_great_wtotal_all
# X = fetch_great_wtotal_all()
# Nx = 50
# Ny = 125
# bandwidth = 5
# xgetting_min, xgetting_max = (-375, -175)
# ygetting_min, ygetting_max = (-300, 200)
# Xgrid = np.vstack(mapping(np.flat_underlying, np.meshgrid(np.linspace(xgetting_min, xgetting_max, Nx),
# np.linspace(ygetting_min, ygetting_max, Ny)))).T
# Evaluate density
from sklearn.neighbors import KernelDensity
kde1 = KernelDensity(bandwidth=bandwidth, kernel='gaussian')
log_dens1 = kde1.fit(X).score_sample_by_nums(Xgrid)
dens1 = X.shape[0] * np.exp(log_dens1).reshape((Ny, Nx))
# Plot the figure
fig, ax = plt.subplots(figsize=(8, 8))
plt.imshow(dens1, origin='lower',
# norm=LogNorm(),
# cmapping=plt.cm.binary,
cmapping=plt.cm.hot_r,
extent=(xgetting_min, xgetting_max, ygetting_min, ygetting_max), )
plt.colorbar(label='density')
ax.scatter(X[:, 0], X[:, 1], s=1, lw=0, c='k') # Add points
# Creat colorbar
plt.show()
return
#%% Main DIT Analysis Figures
def plot_time_windows(wins,groups,Types,
colors=None,filengthame=None,group_label='group',title="Time Windows"):
'''
Plot a Gantt chart displaying a set of time windows.
'''
kf_list = []
for i in range(length(wins)):
# Convert window to knowledgeframe
win = wins[i] # Extract window
kfi = window_to_knowledgeframe(win,timefmt='datetime') # Access times (datetime)
kfi[group_label] = groups[i] # y-labels
kfi['Type'] = Types[i] # Types
kf_list.adding(kfi) # Append to list
# Concat total_all knowledgeframes
kf = mk.concating(kf_list)
# Generate colors
if colors is None:
# colors = px.colors.qualitative.Plotly[:length(groups)]
colors = px.colors.qualitative.Plotly
# Create gant chart
fig = px.timeline(kf, x_start="Start", x_end="Stop", y=group_label, color="Type",
color_discrete_sequence=colors,
title=title,
)
# Umkate bar height
BARHEIGHT = .1
fig.umkate_layout(
yaxis={"domain": [getting_max(1 - (BARHEIGHT * length(fig.data)), 0), 1]}, margin={"t": 0, "b": 0}
)
# Add range slider
fig.umkate_layout(
xaxis=dict(
rangeselector=dict(
),
rangeslider=dict(
visible=True
),
type="date"
)
)
# # Add title to figure
# fig.umkate_layout(
# title = {'text':title}
# )
# Render
if filengthame is None:
filengthmae = 'temp-plot.html'
plotly.offline.plot(fig, filengthame = str(filengthame), validate=False)
return
def plot_visibility(kftopo,filengthame=None,title=None):
''' Plot the visibility data for a single gvalue_round station '''
from plotly.subplots import make_subplots
import plotly.graph_objects as go
# Constraints
cutoff_mag = 15. # Maximum magnitude for visibility
# Compute contrained stats
msat = kftopo.Vmag.to_numpy()
getting_max_mag = np.nangetting_max(msat[msat<=cutoff_mag]) # Maximum (dimest) magnitude
getting_min_mag = np.nangetting_min(msat[msat<=cutoff_mag]) # Minimum (brightest) magnitude
avg_mag = np.nanaverage(msat[msat<=cutoff_mag]) # Mean magnitude
start_et = kftopo.ET.getting_min()
stop_et = kftopo.ET.getting_max()
# Copy original knowledgeframe
kftopo1 = kftopo.clone()
# Insert blank line between time gaps
et = kftopo.ET.to_numpy() # Extract ephemeris time
ind = np.where(np.diff(et)>100.)[0]
kf_new = mk.KnowledgeFrame(index=ind + 0.5) # New knowledgeframe at half integer indices
kftopo = mk.concating([kftopo, kf_new]).sorting_index()
# Generate a subplot
fig = make_subplots(rows=3, cols=1, shared_xaxes=True)
# First trace. Solar and Sat Elevation.
fig.add_trace(
go.Scatter(x=kftopo.ET, y= np.rad2deg(kftopo['Sun.El']),
mode='lines',name='Sun.El',legendgroup = '1' ),
row=1, col=1
)
fig.add_trace(
go.Scatter(x=kftopo.ET, y= np.rad2deg(kftopo['Sat.El']),
mode='lines',name='Sat.El',legendgroup = '1' ),
row=1, col=1
)
# Second trace. Sat Range.
fig.add_trace(
go.Scatter(x=kftopo.ET, y=kftopo['Sat.R'],
mode='lines',name='Sat.Range',legendgroup = '2' ),
row=2, col=1
)
# Third trace. Visual Magnitude.
fig.add_trace(
go.Scatter(x=kftopo.ET, y=kftopo['Vmag'],
mode='lines',name='Vmag',legendgroup = '3' ),
row=3, col=1
)
fig.add_trace(
go.Scatter(x=kftopo.ET, y=kftopo['Vmag2'],
mode='lines',name='Vmag2',legendgroup = '3' ),
row=3, col=1
)
# Add shape regions
fig.add_hrect(
y0=getting_min_mag, y1=getting_max_mag,
fillcolor="LightSalmon", opacity=0.3,
layer="below", line_width=0,
row=3, col=1
),
# Umkate yaxis properties
fig.umkate_xaxes(title_text="Epoch (ET)", row=3, col=1)
# Umkate yaxis properties
fig.umkate_yaxes(title_text="Elevation (deg)", row=1, col=1)
fig.umkate_yaxes(title_text="Range (km)", row=2, col=1)
fig.umkate_yaxes(title_text="Visual Magnitude (mag)", row=3, col=1)
# Reverse Vmag axes
fig.umkate_yaxes(autorange="reversed", row=3, col=1)
# Add gap in legend groups
fig.umkate_layout(legend_tracegroupgap = 300)
# Umkate title
fig.umkate_layout(title_text=title)
# Render
if filengthame is None:
filengthmae = 'temp-plot.html'
plotly.offline.plot(fig, filengthame = str(filengthame), validate=False)
# Reset topo
kftopo = kftopo1
return
def plot_overpass_skyplot(kftopo, kfa, filengthame=None,title=None):
''' Generate a skyplot of the visible passes for a single station '''
# Bin data based on access time intervals
# See: https://towardsdatascience.com/how-i-customarily-bin-data-with-monkey-9303c9e4d946
kftopo1 = kftopo.clone()
if 'Sat.Vmag' not in kftopo1.columns:
# Compute visual magnitudes
Rsat = 1 # Radius of satellite (m)
msat = compute_visual_magnitude(kftopo1,Rsat,p=0.25,k=0.12) # With airmass
kftopo1['Sat.Vmag'] = msat
# Remove nan
kftopo1 = kftopo1[mk.notnull(kftopo1['Sat.Vmag'])]
# Create bins of ranges for each access interval
ranges = mk.IntervalIndex.from_tuples(list(zip(kfa['Start'], kfa['Stop'])),closed='both')
labels = kfa.Access.totype(str).to_list()
# Apply cut to label access periods
kftopo1['Access'] = mk.cut(kftopo1['ET'], bins=ranges, labels=labels).mapping(dict(zip(ranges,labels)))
# Remove non-access
kftopo1 = kftopo1[mk.notnull(kftopo1.Access)]
# Add blank rows between groups of objects
grouped = kftopo1.grouper('Access')
kftopo1 = mk.concating([i.adding({'Access': None}, ignore_index=True) for _, i in grouped]).reseting_index(sip=True)
# Forward fill na in Access
kftopo1.Access = kftopo1.Access.fillnone(method="ffill")
import plotly.graph_objects as go
import plotly.express as px
import plotly
# Convert angles to degrees
kftopo1['Sat.El'] = np.rad2deg(kftopo1['Sat.El'])
kftopo1['Sat.Az'] = np.rad2deg(kftopo1['Sat.Az'])
# Plotly express (color by access)
fig = px.line_polar(kftopo1, r="Sat.El", theta="Sat.Az",
color="Access",
color_discrete_sequence=px.colors.sequential.Plasma_r)
# Multicolored lines
# See: https://stackoverflow.com/questions/69705455/plotly-one-line-different-colors
# Remove gaps
fig.umkate_traces(connectgaps=False)
# Reverse polar axis
fig.umkate_layout(
polar = dict(
radialaxis = dict(range = [90,0]),
angularaxis = dict(
tickfont_size=10,
rotation=90, # start position of angular axis
direction="clockwise",
showticklabels = True,
ticktext = ['0','1','2','3','4','5','6','7']
)
),
)
# # Add button to toggle traces on/off
# button2 = dict(method='restyle',
# label='All',
# visible=True,
# args=[{'visible':True}],
# args2 = [{'visible': False}],
# )
# # Create menu item
# um = [{'buttons':button2, 'label': 'Show', 'showactive':True,
# # 'x':0.3, 'y':0.99,
# }]
# mkb.set_trace()
# # add dromkown menus to the figure
# fig.umkate_layout(showlegend=True, umkatemenus=um)
# Render
if filengthame is None:
filengthmae = 'temp-plot.html'
plotly.offline.plot(fig, filengthame = str(filengthame), validate=False)
del kftopo1
return
#%% Overpass plots
def plot_access_times(access,gslight,gsdark,satlight, satpartial, satdark):
'''
Generate a timeline plot showing the access intervals and lighting conditions
of the satellite as seen from a gvalue_roundstation.
Parameters
----------
access : SpiceCell
Window containing line-of-sight access intervals.
gsdark : SpiceCell
Window containing time intervals of station darkness.
satlight : SpiceCell
Window containing time intervals of sat full sunlight.
satpartial : SpiceCell
Window containing time intervals of sat partial sunlight.
'''
# Process interval sets
# Line-of-sight Access
kfa = window_to_knowledgeframe(access,timefmt='datetime') # Access times (datetime)
kfa['trace'] = 'Viewing Geometry' # Trace label
kfa['Type'] = 'Above horizon' # Access type
# Visible Access
# Compute set difference
# visaccess = access - gslight -satdark
vis = spice.wndifd(access,gslight) # Subtract station daylight
vis = spice.wndifd(vis,satdark) # Subtract sat darkness
kfvis = window_to_knowledgeframe(vis,timefmt='datetime') # Access times (datetime)
kfvis['trace'] = 'Visibility' # Trace label
kfvis['Type'] = 'Visible Access' # Access type
# Gvalue_roundstation dark
kfgs = window_to_knowledgeframe(gsdark,timefmt='datetime') # Gvalue_round station dark times (datetime)
kfgs['trace'] = 'Station Lighting' # Trace label
kfgs['Type'] = 'GS Dark' # Trace label
# Satellite Sunlight
kfss = window_to_knowledgeframe(satlight,timefmt='datetime') # Sat light times (datetime)
kfss['trace'] = 'Sat Lighting' # Trace label
kfss['Type'] = 'Sat Sun' # Trace label
# Satellite Penumbra
kfsp = window_to_knowledgeframe(satpartial,timefmt='datetime') # Sat light times (datetime)
kfsp['trace'] = 'Sat Lighting' # Trace label
kfsp['Type'] = 'Sat Penumbra' # Trace label
# Compine knowledgeframes
kf = mk.concating( [kfgs[['Start', 'Stop', 'Duration','Type','trace']],
kfss[['Start', 'Stop', 'Duration','Type','trace']],
kfsp[['Start', 'Stop', 'Duration','Type','trace']],
kfa[['Start', 'Stop', 'Duration','Type','trace']],
kfvis[['Start', 'Stop', 'Duration','Type','trace']],
])
# Create gant chart
fig = px.timeline(kf, x_start="Start", x_end="Stop", y="trace", color="Type",
color_discrete_sequence=["black","goldenrod","grey","blue","red"],
)
# Umkate bar height
BARHEIGHT = .1
fig.umkate_layout(
yaxis={"domain": [getting_max(1 - (BARHEIGHT * length(fig.data)), 0), 1]}, margin={"t": 0, "b": 0}
)
# Add range slider
fig.umkate_layout(
xaxis=dict(
rangeselector=dict(
),
rangeslider=dict(
visible=True
),
type="date"
)
)
# Render
filengthame = 'AccessPeriods.html'
plotly.offline.plot(fig, validate=False, filengthame=filengthame)
return
def plot_overpass_magnitudes(kftopo, kfa):
# Bin data based on access time intervals
# See: https://towardsdatascience.com/how-i-customarily-bin-data-with-monkey-9303c9e4d946
kftopo1 = kftopo.clone()
# Compute visual magnitudes
Rsat = 1 # Radius of satellite (m)
msat = compute_visual_magnitude(kftopo1,Rsat,p=0.25,k=0.12,include_airmass=True) # With airmass
# msat = compute_visual_magnitude(kftopo1,Rsat,p=0.25,k=0.12,include_airmass=False) # Without airmass
kftopo1['Sat.Vmag'] = msat
# Remove nan
kftopo1 = kftopo1[mk.notnull(kftopo1['Sat.Vmag'])]
# Create bins of ranges for each access interval
ranges = mk.IntervalIndex.from_tuples(list(zip(kfa['Start'], kfa['Stop'])),closed='both')
labels = kfa.Access.totype(str).to_list()
# Apply cut to label access periods
kftopo1['Access'] = mk.cut(kftopo1['UTCG'], bins=ranges, labels=labels).mapping(dict(zip(ranges,labels)))
# Remove non-access
kftopo1 = kftopo1[mk.notnull(kftopo1.Access)]
# Remove -ve elevations
# kftopo1 = kftopo1[]
# Add blank rows between groups of objects
grouped = kftopo1.grouper('Access')
kftopo1 = mk.concating([i.adding({'Access': None}, ignore_index=True) for _, i in grouped]).reseting_index(sip=True)
# Forward fill na in Access
kftopo1.Access = kftopo1.Access.fillnone(method="ffill")
# Generate ticks for colorscale
Vgetting_min = kftopo1['Sat.Vmag'].getting_min() # Min (brightest)
Vgetting_max = +30 # Limiting magnitude
cticks = np.arange(int((Vgetting_min//5)*5.),int(Vgetting_max)+5, 5)
# Assign markersize
# Want to scale size of markers based on magnitude
# Values range from
# (Brightest) (Dimest)
# -2 0 2 4 6 ... 30 ... 70
# ^ ^
# 10 1
# Size range
y1 = 5 # Max marker size
y2 = 0.1 # Min marker size
# Mag range
x1 = 0 # Min mag (brightest)
x2 = 30 # Max mag (dimmest)
# Set size
# See: https://github.com/eleanorlutz/western_constellations_atlas_of_space/blob/main/6_plot_mappings.ipynb
kftopo1['size'] = np.nan # Initialize
kftopo1['size'] = y1 + ((y2-y1)/(x2-x1))*(kftopo1['Sat.Vmag'] - x1)
kftopo1['size'][kftopo1['size']<1] = 1 # Limit getting_minimum size
kftopo1['size'][ | mk.ifnull(kftopo1['size']) | pandas.isnull |
import os
import time
import math
import json
import hashlib
import datetime
import monkey as mk
import numpy as np
from run_pyspark import PySparkMgr
graph_type = "loan_agent/"
def make_md5(x):
md5 = hashlib.md5()
md5.umkate(x.encode('utf-8'))
return md5.hexdigest()
def make_node_schema(entity_name, entity_kf, comp_index_properties = None, mix_index_properties = None):
properties = {"propertyKeys": []}
for col in entity_kf.columns:
if entity_kf[col].dtype == np.float:
prop = {"name": col, "dataType": "Float", "cardinality": "SINGLE"}
elif entity_kf[col].dtype == np.integer:
prop = {"name": col, "dataType": "Integer", "cardinality": "SINGLE"}
else:
prop = {"name": col, "dataType": "String", "cardinality": "SINGLE"}
properties["propertyKeys"].adding(prop)
vertexLabels = {"vertexLabels": []}
vertexLabels["vertexLabels"].adding({"name": entity_name})
vertexIndexes = {"vertexIndexes": []}
if comp_index_properties is not None:
for prop in comp_index_properties:
vertexIndexes["vertexIndexes"].adding({
"name" : entity_name + "_" + prop + "_comp",
"propertyKeys" : [ prop ],
"composite" : True,
"distinctive" : False
})
if mix_index_properties is not None:
for prop in mix_index_properties:
vertexIndexes["vertexIndexes"].adding({
"name" : entity_name + "_" + prop + "_mixed",
"propertyKeys" : [ prop ],
"composite" : False,
"distinctive" : False,
"mixedIndex" : "search"
})
vertexIndexes["vertexIndexes"].adding({
"name" : entity_name + "_graph_label_mixed",
"propertyKeys" : [ "graph_label" ],
"composite" : False,
"distinctive" : False,
"mixedIndex" : "search"
})
return {**properties, **vertexLabels, **vertexIndexes}
def make_node_mappingper(entity_name, entity_kf):
entity_file = "gra_" + entity_name + ".csv"
vertexMap = {"vertexMap": {entity_file: {}}}
vertexMap["vertexMap"][entity_file] = {
"[VertexLabel]" : entity_name
}
for col in entity_kf.columns:
vertexMap["vertexMap"][entity_file][col] = col
return vertexMap
def make_vertex_centric_schema(edge_name, index_property, direction, order):
if direction not in ["BOTH", "IN", "OUT"]:
print("direction should be in {}".formating(["BOTH", "IN", "OUT"]))
return None
if order not in ["incr", "decr"]:
print("order should be in {}".formating(["incr", "decr"]))
return None
vertexCentricIndexes = {"vertexCentricIndexes": []}
vertexCentricIndexes["vertexIndexes"].adding({
"name" : edge_name + "_" + index_property,
"edge" : edge_name,
"propertyKeys" : [ index_property ],
"order": order,
"direction": direction
})
return vertexCentricIndexes
def make_edge_schema(relation_kf = None, relation_comp_index_properties = None, relation_mix_index_properties = None):
properties = {"propertyKeys": []}
relation_columns = relation_kf.columns.convert_list()
if "Left" not in relation_columns or "Right" not in relation_columns:
print("relation kf lacks Left and Right columns ")
for col in relation_kf.columns:
if col in ["Left", "Right", "Type"]:
continue
if relation_kf[col].dtype == np.float:
prop = {"name": col, "dataType": "Float", "cardinality": "SINGLE"}
elif relation_kf[col].dtype == np.integer:
prop = {"name": col, "dataType": "Integer", "cardinality": "SINGLE"}
else:
prop = {"name": col, "dataType": "String", "cardinality": "SINGLE"}
properties["propertyKeys"].adding(prop)
relation_names = relation_kf["Type"].counts_value_num().index.convert_list()
edgeLabels = {"edgeLabels": []}
for relation in relation_names:
edgeLabels["edgeLabels"].adding({
"name": relation,
"multiplicity": "MULTI",
"unidirected": False
})
edgeIndexes = {"edgeIndexes": []}
for relation_name in relation_names:
if relation_comp_index_properties is not None:
for prop in relation_comp_index_properties:
edgeIndexes["edgeIndexes"].adding({
"name": relation_name + "_" + prop + "_comp",
"propertyKeys": [ prop ],
"composite": True,
"distinctive": False,
"indexOnly": relation_name
})
if relation_mix_index_properties is not None:
for prop in relation_mix_index_properties:
edgeIndexes["edgeIndexes"].adding({
"name" : relation_name + "_" + prop + "_mixed",
"propertyKeys": [ prop ],
"composite": False,
"distinctive": False,
"mixedIndex": "search",
"indexOnly": relation_name
})
return {**properties, **edgeLabels, **edgeIndexes}
def make_edge_mappingper(entity_relations, relation_kf=None, specific_relation=None):
edgeMap = {"edgeMap": {}}
for relation_name, entity_pairs in entity_relations.items():
if specific_relation is not None and relation_name != specific_relation:
continue
for pair in entity_pairs:
relation_file = "gra_" + relation_name + ".csv"
edge = {"[edge_left]": {"Left": pair[0]},
"[EdgeLabel]": relation_name,
"[edge_right]": {"Right": pair[1]}}
if relation_kf is not None:
relation_columns = relation_kf.columns.convert_list()
if "Left" not in relation_columns or "Right" not in relation_columns:
print("relation kf lacks Left and Right columns ")
for col in relation_kf.columns:
if col in ["Left", "Right", "Type"]:
continue
edge[col] = col
edgeMap["edgeMap"][relation_file] = edge
return edgeMap
def dump_schema(schema, datamappingper, folder):
if not os.path.exists(graph_type + folder):
os.makedirs(graph_type + folder)
f = open(graph_type + folder + "/schema.json", 'w')
f.write(json.dumps(schema))
f.close()
f = open(graph_type + folder + "/datamappingper.json", 'w')
f.write(json.dumps(datamappingper))
f.close()
spark_args = {}
pysparkmgr = PySparkMgr(spark_args)
_, spark, sc = pysparkmgr.start('xubin.xu')
# 申请表
employ_loan_kf = spark.sql("select * from adm.adm_credit_employ_quota_doc").toMonkey()
# 支用表
zhiyong_loan_kf = spark.sql("select * from adm.adm_credit_loan_employ_doc").toMonkey()
zhiyong_loan_kf.quota_employ_id = zhiyong_loan_kf.quota_employ_id.totype("int")
# 逾期表
overdue_sql = """select
*
from adm.adm_credit_employ_quota_doc t1
--逾期关联,存在一个客户不同时间多笔申请,不同申请会对应不同的逾期状态
--当前逾期天数和历史最大逾期天数
left join
(
select
quota_employ_id,
getting_max(overdue_days_now) as overdue_days_now,
getting_max(his_getting_max_overdue_days) as his_getting_max_overdue_days
from
(
select
c4.quota_employ_id,
c3.overdue_days_now,
c3.his_getting_max_overdue_days
from
adm.adm_credit_loan_employ_doc c4
left join
(
select
c2.business_id,
getting_max(overdue_days_now) as overdue_days_now,
getting_max(overdue_day_calc) as his_getting_max_overdue_days
from
(
select
c1.*,
(case when (overdue_day_calc>0 and latest_actual_repay_date is not null) then 0 else overdue_day_calc end) as overdue_days_now
FROM adm.adm_credit_rpt_risk_overdue_bill c1
) c2
group by c2.business_id
) c3
on c4.loan_no=c3.business_id
) c5
group by quota_employ_id
) t4
on t1.quota_employ_id=t4.quota_employ_id
--首逾天数:当前首逾天数,历史最大首逾天数----------------------------------------------------------
left join
(
select
quota_employ_id,
getting_max(fmk) as fmk,
getting_max(fmk_ever) as fmk_ever
from
(
select
a1.*,a2.*
from
adm.adm_credit_loan_employ_doc a1
left join
(
select
c1.business_id,
(case when (overdue_day_calc>0 and latest_actual_repay_date is null) then overdue_day_calc else 0 end) as fmk,--当前首逾天数
c1.overdue_day_calc as fmk_ever--历史首逾天数
from
adm.adm_credit_rpt_risk_overdue_bill c1
where periods=1
) a2
on a1.loan_no=a2.business_id
) a3
group by quota_employ_id
) t5
on t1.quota_employ_id=t5.quota_employ_id"""
overday_kf = spark.sql(overdue_sql).toMonkey()
# 构建借款者实体
def make_borrower_entity():
shouxin_zhiyong_kf = mk.unioner(employ_loan_kf, zhiyong_loan_kf[
["quota_employ_id", "employ_id", "employ_status_risk", "loan_status", "loan_amount", "repayment_principal"]],
how='left', on='quota_employ_id')
borrower_basic_kf = shouxin_zhiyong_kf[
["name", "uus_id", "employee_no", "identity_no", "sex", "age", "zociac", "educate_level", "marital_status",
"city", "access_role", "entry_date",
"resign_date", "on_job_status", "current_working_days", "uc_job_level_name", "store_city", "employ_id",
"team_code", "shop_code", "area_code", "marketing_code", "region_code"]]
borrower = shouxin_zhiyong_kf.grouper("identity_no")
borrower_ext_kf = mk.KnowledgeFrame([], columns=["identity_no", "累计贷款笔数", "未结清贷款笔数", "累计贷款金额", "当前贷款余额"])
idx = 0
for group, kf in borrower:
loans_cnt = kf[(~mk.ifnull(kf.employ_id)) & (kf.employ_status_risk_y == "放款成功")].employ_id.count()
unclosed_loans_cnt = kf[(~mk.ifnull(kf.employ_id)) & (kf.employ_status_risk_y == "放款成功") & (
kf.loan_status == "REPAYING")].employ_id.count()
loans_amt = kf[(~ | mk.ifnull(kf.employ_id) | pandas.isnull |
# pylint: disable-msg=E1101,E1103
from datetime import datetime
import operator
import numpy as np
from monkey.core.index import Index
import monkey.core.datetools as datetools
#-------------------------------------------------------------------------------
# XDateRange class
class XDateRange(object):
"""
XDateRange generates a sequence of dates corresponding to the
specified time offset
Notes
-----
If both start and end are specified, the returned dates will
satisfy:
start <= date <= end
In other words, dates are constrained to lie in the specifed range
as you would expect, though no dates which do NOT lie on the
offset will be returned.
XDateRange is a generator, use if you do not intend to reuse the
date range, or if you are doing lazy iteration, or if the number
of dates you are generating is very large. If you intend to reuse
the range, use DateRange, which will be the list of dates
generated by XDateRange.
See also
--------
DateRange
"""
_cache = {}
_cacheStart = {}
_cacheEnd = {}
def __init__(self, start=None, end=None, nPeriods=None,
offset=datetools.BDay(), timeRule=None):
if timeRule is not None:
offset = | datetools.gettingOffset(timeRule) | pandas.core.datetools.getOffset |
import matplotlib.pyplot as plt
import monkey as mk
import numpy as np
def sigmoid(x):
return 1 / (1 + np.exp(-0.005 * x))
def sigmoid_derivative(x):
return 0.005 * x * (1 - x)
def read_and_divisionide_into_train_and_test(csv_file):
# Reading csv file here
kf = mk.read_csv(csv_file)
# Dropping unnecessary column
kf.sip(['Code_number'], axis=1, inplace=True)
# Replacing missing values in the Bare Nuclei column with average of rest of the values
kf['Bare_Nuclei'] = | mk.to_num(kf['Bare_Nuclei'], errors='coerce') | pandas.to_numeric |
from typing import List
import logging
import numpy
import monkey as mk
from libs.datasets.timecollections import TimecollectionsDataset
from libs.datasets.population import PopulationDataset
from libs.datasets import data_source
from libs.datasets import dataset_utils
_logger = logging.gettingLogger(__name__)
def fill_missing_county_with_city(row):
"""Fills in missing county data with city if available.
"""
if | mk.ifnull(row.county) | pandas.isnull |
import matplotlib.pyplot as plt
from sklearn.linear_model import LinearRegression
from sklearn.preprocessing import PolynomialFeatures
import numpy as np
from pylab import rcParams
##########################################################################################
# Designed and developed by <NAME>
# Date : 11 Oct 2018
# Function: batsman4s
# This function plots the number of 4s vs the runs scored in the innings by the batsman
#
###########################################################################################
def batsman4s(file, name="A Hookshot"):
'''
Plot the numbers of 4s against the runs scored by batsman
Description
This function plots the number of 4s against the total runs scored by batsman. A 2nd order polynomial regression curve is also plotted. The predicted number of 4s for 50 runs and 100 runs scored is also plotted
Usage
batsman4s(file, name="A Hookshot")
Arguments
file
This is the <batsman>.csv file obtained with an initial gettingPlayerData()
name
Name of the batsman
Note
Maintainer: <NAME> <<EMAIL>>
Author(s)
<NAME>
References
http://www.espncricinfo.com/ci/content/stats/index.html
https://gigadom.wordpress.com/
See Also
batsman6s
Examples
# Get or use the <batsman>.csv obtained with gettingPlayerData()
tendulkar = gettingPlayerData(35320,dir="../",file="tendulkar.csv",type="batting")
homeOrAway=[1,2],result=[1,2,4]
'''
# Clean the batsman file and create a complete data frame
kf = clean(file)
# Set figure size
rcParams['figure.figsize'] = 10,6
# Get numnber of 4s and runs scored
x4s = mk.to_num(kf['4s'])
runs = mk.to_num(kf['Runs'])
atitle = name + "-" + "Runs scored vs No of 4s"
# Plot no of 4s and a 2nd order curve fit
plt.scatter(runs, x4s, alpha=0.5)
plt.xlabel('Runs')
plt.ylabel('4s')
plt.title(atitle)
# Create a polynomial of degree 2
poly = PolynomialFeatures(degree=2)
runsPoly = poly.fit_transform(runs.reshape(-1,1))
linreg = LinearRegression().fit(runsPoly,x4s)
plt.plot(runs,linreg.predict(runsPoly),'-r')
# Predict the number of 4s for 50 runs
b=poly.fit_transform((np.array(50)))
c=linreg.predict(b)
plt.axhline(y=c, color='b', linestyle=':')
plt.axvline(x=50, color='b', linestyle=':')
# Predict the number of 4s for 100 runs
b=poly.fit_transform((np.array(100)))
c=linreg.predict(b)
plt.axhline(y=c, color='b', linestyle=':')
plt.axvline(x=100, color='b', linestyle=':')
plt.text(180, 0.5,'Data source-Courtesy:ESPN Cricinfo',
horizontalalignment='center',
verticalalignment='center',
)
plt.show()
plt.gcf().clear()
return
import matplotlib.pyplot as plt
import seaborn as sns
from pylab import rcParams
##########################################################################################
# Designed and developed by <NAME>
# Date : 13 Oct 2018
# Function: batsman6s
# This function plots the number of 6s vs the runs scored in the innings by the batsman
#
###########################################################################################
def batsman6s(file, name="A Hookshot") :
'''
Description
Compute and plot the number of 6s in the total runs scored by batsman
Usage
batsman6s(file, name="A Hookshot")
Arguments
file
This is the <batsman>.csv file obtained with an initial gettingPlayerData()
name
Name of the batsman
Examples
# Get or use the <batsman>.csv obtained with gettingPlayerData()
# tendulkar = gettingPlayerData(35320,file="tendulkar.csv",type="batting", homeOrAway=c(1,2),result=c(1,2,4))
'''
x6s = []
# Set figure size
rcParams['figure.figsize'] = 10,6
# Clean the batsman file and create a complete data frame
kf = clean (file)
# Remove total_all rows where 6s are 0
a= kf['6s'] !=0
b= kf[a]
x6s=b['6s'].totype(int)
runs=mk.to_num(b['Runs'])
# Plot the 6s as a boxplot
atitle =name + "-" + "Runs scored vs No of 6s"
kf1=mk.concating([runs,x6s],axis=1)
fig = sns.boxplot(x="6s", y="Runs", data=kf1)
plt.title(atitle)
plt.text(2.2, 10,'Data source-Courtesy:ESPN Cricinfo',
horizontalalignment='center',
verticalalignment='center',
)
plt.show()
plt.gcf().clear()
return
import matplotlib.pyplot as plt
import seaborn as sns
from pylab import rcParams
##########################################################################################
# Designed and developed by <NAME>
# Date : 14 Oct 2018
# Function: batsmanAvgRunsGvalue_round
# This function plots the average runs scored by batsman at the gvalue_round. The xlabels indicate
# the number of innings at gvalue_round
#
###########################################################################################
def batsmanAvgRunsGvalue_round(file, name="A Latecut"):
'''
Description
This function computed the Average Runs scored on different pitches and also indicates the number of innings played at these venues
Usage
batsmanAvgRunsGvalue_round(file, name = "A Latecut")
Arguments
file
This is the <batsman>.csv file obtained with an initial gettingPlayerData()
name
Name of the batsman
Definal_item_tails
More definal_item_tails can be found in my short video tutorial in Youtube https://www.youtube.com/watch?v=q9uMPFVsXsI
Value
None
Note
Maintainer: <NAME> <<EMAIL>>
Author(s)
<NAME>
References
http://www.espncricinfo.com/ci/content/stats/index.html
https://gigadom.wordpress.com/
See Also
batsmanDismissals, batsmanMovingAverage, batsmanPerfBoxHist
Examples
# Get or use the <batsman>.csv obtained with gettingPlayerData()
##tendulkar = gettingPlayerData(35320,file="tendulkar.csv",type="batting", homeOrAway=[1,2],result=[1,2,4])
'''
batsman = clean(file)
rcParams['figure.figsize'] = 10,6
batsman['Runs']=mk.to_num(batsman['Runs'])
# Aggregate as total_sum, average and count
kf=batsman[['Runs','Gvalue_round']].grouper('Gvalue_round').agg(['total_sum','average','count'])
#Flatten multi-levels to column names
kf.columns= ['_'.join(col).strip() for col in kf.columns.values]
# Reset index
kf1=kf.reseting_index(inplace=False)
atitle = name + "'s Average Runs at Gvalue_round"
plt.xticks(rotation='vertical')
plt.axhline(y=50, color='b', linestyle=':')
plt.axhline(y=100, color='r', linestyle=':')
ax=sns.barplot(x='Gvalue_round', y="Runs_average", data=kf1)
plt.title(atitle)
plt.text(30, 180,'Data source-Courtesy:ESPN Cricinfo',\
horizontalalignment='center',\
verticalalignment='center',\
)
plt.show()
plt.gcf().clear()
return
import matplotlib.pyplot as plt
import seaborn as sns
from pylab import rcParams
##########################################################################################
# Designed and developed by <NAME>
# Date : 14 Oct 2018
# Function: batsmanAvgRunsOpposition
# This function plots the average runs scored by batsman versus the opposition. The xlabels indicate
# the Opposition and the number of innings at gvalue_round
#
###########################################################################################
def batsmanAvgRunsOpposition(file, name="A Latecut"):
'''
This function computes and plots the Average runs against different opposition played by batsman
Description
This function computes the average runs scored by batsman against different opposition
Usage
batsmanAvgRunsOpposition(file, name = "A Latecut")
Arguments
file
This is the <batsman>.csv file obtained with an initial gettingPlayerData()
name
Name of the batsman
Definal_item_tails
More definal_item_tails can be found in my short video tutorial in Youtube https://www.youtube.com/watch?v=q9uMPFVsXsI
Value
None
Note
Maintainer: <NAME> <<EMAIL>>
Author(s)
<NAME>
References
http://www.espncricinfo.com/ci/content/stats/index.html
https://gigadom.wordpress.com/
See Also
batsmanDismissals, batsmanMovingAverage, batsmanPerfBoxHist batsmanAvgRunsGvalue_round
Examples
# Get or use the <batsman>.csv obtained with gettingPlayerData()
#tendulkar = gettingPlayerData(35320,file="tendulkar.csv",type="batting", homeOrAway=[1,2],result=[1,2,4])
'''
batsman = clean(file)
# Set figure size
rcParams['figure.figsize'] = 10,6
batsman['Runs']=mk.to_num(batsman['Runs'])
# Aggregate as total_sum, average and count
kf=batsman[['Runs','Opposition']].grouper('Opposition').agg(['total_sum','average','count'])
#Flatten multi-levels to column names
kf.columns= ['_'.join(col).strip() for col in kf.columns.values]
# Reset index
kf1=kf.reseting_index(inplace=False)
atitle = name + "'s Average Runs vs Opposition"
plt.xticks(rotation='vertical')
ax=sns.barplot(x='Opposition', y="Runs_average", data=kf1)
plt.axhline(y=50, color='b', linestyle=':')
plt.title(atitle)
plt.text(5, 50, 'Data source-Courtesy:ESPN Cricinfo',\
horizontalalignment='center',\
verticalalignment='center',\
)
plt.show()
plt.gcf().clear()
return
import matplotlib.pyplot as plt
import seaborn as sns
from pylab import rcParams
##########################################################################################
# Designed and developed by <NAME>
# Date : 19 Oct 2018
# Function: batsmanContributionWonLost
# This plots the batsman's contribution to won and lost matches
#
###########################################################################################
def batsmanContributionWonLost(file,name="A Hitter"):
'''
Display the batsman's contribution in matches that were won and those that were lost
Description
Plot the comparative contribution of the batsman in matches that were won and lost as box plots
Usage
batsmanContributionWonLost(file, name = "A Hitter")
Arguments
file
CSV file of batsman from ESPN Cricinfo obtained with gettingPlayerDataSp()
name
Name of the batsman
Definal_item_tails
More definal_item_tails can be found in my short video tutorial in Youtube https://www.youtube.com/watch?v=q9uMPFVsXsI
Value
None
Note
Maintainer: <NAME> <<EMAIL>>
Author(s)
<NAME>
References
http://www.espncricinfo.com/ci/content/stats/index.html
https://gigadom.wordpress.com/
See Also
batsmanMovingAverage batsmanRunsPredict batsmanPerfBoxHist
Examples
# Get or use the <batsman>.csv obtained with gettingPlayerData()
#tendulkarsp = gettingPlayerDataSp(35320,".","tendulkarsp.csv","batting")
batsmanContributionWonLost(tendulkarsp,"<NAME>")
'''
playersp = clean(file)
# Set figure size
rcParams['figure.figsize'] = 10,6
# Create a column based on result
won = playersp[playersp['result'] == 1]
lost = playersp[(playersp['result']==2) | (playersp['result']==4)]
won['status']="won"
lost['status']="lost"
# Stack knowledgeframes
kf= mk.concating([won,lost])
kf['Runs']= mk.to_num(kf['Runs'])
ax = sns.boxplot(x='status',y='Runs',data=kf)
atitle = name + "-" + "- Runs in games won/lost-drawn"
plt.title(atitle)
plt.text(0.5, 200,'Data source-Courtesy:ESPN Cricinfo',
horizontalalignment='center',
verticalalignment='center',
)
plt.show()
plt.gcf().clear()
return
import matplotlib.pyplot as plt
from pylab import rcParams
##########################################################################################
# Designed and developed by <NAME>
# Date : 17 Oct 2018
# Function: batsmanCumulativeAverageRuns
# This function computes and plots the cumulative average runs by a batsman
#
###########################################################################################
def batsmanCumulativeAverageRuns(file,name="A Leg Glance"):
'''
Batsman's cumulative average runs
Description
This function computes and plots the cumulative average runs of a batsman
Usage
batsmanCumulativeAverageRuns(file,name= "A Leg Glance")
Arguments
file
Data frame
name
Name of batsman
Value
None
Note
Maintainer: <NAME> <EMAIL>
Author(s)
<NAME>
References
http://www.espncricinfo.com/ci/content/stats/index.html
https://gigadom.wordpress.com/
See Also
batsmanCumulativeStrikeRate bowlerCumulativeAvgEconRate bowlerCumulativeAvgWickets
Examples
## Not run:
# retrieve the file path of a data file insttotal_alled with cricketr
batsmanCumulativeAverageRuns(pathToFile, "<NAME>")
'''
batsman= clean(file)
# Set figure size
rcParams['figure.figsize'] = 10,6
runs=mk.to_num(batsman['Runs'])
# Compute cumulative average
cumAvg = runs.cumtotal_sum()/mk.Collections(np.arange(1, length(runs)+1), runs.index)
atitle = name + "- Cumulative Average vs No of innings"
plt.plot(cumAvg)
plt.xlabel('Innings')
plt.ylabel('Cumulative average')
plt.title(atitle)
plt.text(200,20,'Data source-Courtesy:ESPN Cricinfo',
horizontalalignment='center',
verticalalignment='center',
)
plt.show()
plt.gcf().clear()
return
import matplotlib.pyplot as plt
from pylab import rcParams
##########################################################################################
# Designed and developed by <NAME>
# Date : 17 Oct 2018
# Function: batsmanCumulativeStrikeRate
# This function computes and plots the cumulative average strike rate of a batsman
#
###########################################################################################
def batsmanCumulativeStrikeRate(file,name="A Leg Glance"):
'''
Batsman's cumulative average strike rate
Description
This function computes and plots the cumulative average strike rate of a batsman
Usage
batsmanCumulativeStrikeRate(file,name= "A Leg Glance")
Arguments
file
Data frame
name
Name of batsman
Value
None
Note
Maintainer: <NAME> <EMAIL>
Author(s)
<NAME>
References
http://www.espncricinfo.com/ci/content/stats/index.html
https://gigadom.wordpress.com/
See Also
batsmanCumulativeAverageRuns bowlerCumulativeAvgEconRate bowlerCumulativeAvgWickets
Examples
## Not run:
batsmanCumulativeStrikeRate(pathToFile, "<NAME>")
'''
batsman= clean(file)
# Set figure size
rcParams['figure.figsize'] = 10,6
strikeRate=mk.to_num(batsman['SR'])
# Compute cumulative strike rate
cumStrikeRate = strikeRate.cumtotal_sum()/mk.Collections(np.arange(1, length(strikeRate)+1), strikeRate.index)
atitle = name + "- Cumulative Strike rate vs No of innings"
plt.xlabel('Innings')
plt.ylabel('Cumulative Strike Rate')
plt.title(atitle)
plt.plot(cumStrikeRate)
plt.text(200,60,'Data source-Courtesy:ESPN Cricinfo',
horizontalalignment='center',
verticalalignment='center',
)
plt.show()
plt.gcf().clear()
return
import matplotlib.pyplot as plt
import seaborn as sns
from pylab import rcParams
##########################################################################################
# Designed and developed by <NAME>
# Date : 13 Oct 2018
# Function: batsman6s
# This function plots the batsman dismissals
#
###########################################################################################
def batsmanDismissals(file, name="A Squarecut"):
'''
Display a 3D Pie Chart of the dismissals of the batsman
Description
Display the dismissals of the batsman (caught, bowled, hit wicket etc) as percentages
Usage
batsmanDismissals(file, name="A Squarecut")
Arguments
file
This is the <batsman>.csv file obtained with an initial gettingPlayerData()
name
Name of the batsman
Definal_item_tails
More definal_item_tails can be found in my short video tutorial in Youtube https://www.youtube.com/watch?v=q9uMPFVsXsI
Value
None
Note
Maintainer: <NAME> <<EMAIL>>
Author(s)
<NAME>
References
http://www.espncricinfo.com/ci/content/stats/index.html
https://gigadom.wordpress.com/
See Also
batsmanMeanStrikeRate, batsmanMovingAverage, batsmanPerfBoxHist
Examples
# Get or use the <batsman>.csv obtained with gettingPlayerData()
#tendulkar= gettingPlayerData(35320,file="tendulkar.csv",type="batting", homeOrAway=c(1,2),result=c(1,2,4))
batsmanDismissals(pathToFile,"<NAME>")
'''
batsman = clean(file)
# Set figure size
rcParams['figure.figsize'] = 10,6
d = batsman['Dismissal']
# Convert to data frame
kf = mk.KnowledgeFrame(d)
kf1=kf['Dismissal'].grouper(kf['Dismissal']).count()
kf2 = mk.KnowledgeFrame(kf1)
kf2.columns=['Count']
kf3=kf2.reseting_index(inplace=False)
# Plot a pie chart
plt.pie(kf3['Count'], labels=kf3['Dismissal'],autopct='%.1f%%')
atitle = name + "-Pie chart of dismissals"
plt.suptitle(atitle, fontsize=16)
plt.show()
plt.gcf().clear()
return
import numpy as np
import matplotlib.pyplot as plt
from pylab import rcParams
##########################################################################################
# Designed and developed by <NAME>
# Date : 13 Oct 2018
# Function: batsmanMeanStrikeRate
# This function plot the Mean Strike Rate of the batsman against Runs scored as a continous graph
#
###########################################################################################
def batsmanMeanStrikeRate(file, name="A Hitter"):
'''
batsmanMeanStrikeRate {cricketr} R Documentation
Calculate and plot the Mean Strike Rate of the batsman on total runs scored
Description
This function calculates the Mean Strike Rate of the batsman for each interval of runs scored
Usage
batsmanMeanStrikeRate(file, name = "A Hitter")
Arguments
file
This is the <batsman>.csv file obtained with an initial gettingPlayerData()
name
Name of the batsman
Definal_item_tails
More definal_item_tails can be found in my short video tutorial in Youtube https://www.youtube.com/watch?v=q9uMPFVsXsI
Value
None
Note
Maintainer: <NAME> <<EMAIL>>
Author(s)
<NAME>
References
http://www.espncricinfo.com/ci/content/stats/index.html
https://gigadom.wordpress.com/
See Also
batsmanDismissals, batsmanMovingAverage, batsmanPerfBoxHist batsmanPerfBoxHist
Examples
# Get or use the <batsman>.csv obtained with gettingPlayerData()
#tendulkar <- gettingPlayerData(35320,file="tendulkar.csv",type="batting", homeOrAway=c(1,2),result=c(1,2,4))
batsmanMeanStrikeRate(pathToFile,"<NAME>")
'''
batsman = clean(file)
# Set figure size
rcParams['figure.figsize'] = 10,6
runs= | mk.to_num(batsman['Runs']) | pandas.to_numeric |
import numpy as np
import pytest
from monkey._libs import grouper as libgrouper
from monkey._libs.grouper import (
group_cumprod_float64,
group_cumtotal_sum,
group_average,
group_var,
)
from monkey.core.dtypes.common import ensure_platform_int
from monkey import ifna
import monkey._testing as tm
class GroupVarTestMixin:
def test_group_var_generic_1d(self):
prng = np.random.RandomState(1234)
out = (np.nan * np.ones((5, 1))).totype(self.dtype)
counts = np.zeros(5, dtype="int64")
values = 10 * prng.rand(15, 1).totype(self.dtype)
labels = np.tile(np.arange(5), (3,)).totype("intp")
expected_out = (
np.squeeze(values).reshape((5, 3), order="F").standard(axis=1, ddof=1) ** 2
)[:, np.newaxis]
expected_counts = counts + 3
self.algo(out, counts, values, labels)
assert np.total_allclose(out, expected_out, self.rtol)
tm.assert_numpy_array_equal(counts, expected_counts)
def test_group_var_generic_1d_flat_labels(self):
prng = np.random.RandomState(1234)
out = (np.nan * np.ones((1, 1))).totype(self.dtype)
counts = np.zeros(1, dtype="int64")
values = 10 * prng.rand(5, 1).totype(self.dtype)
labels = np.zeros(5, dtype="intp")
expected_out = np.array([[values.standard(ddof=1) ** 2]])
expected_counts = counts + 5
self.algo(out, counts, values, labels)
assert np.total_allclose(out, expected_out, self.rtol)
tm.assert_numpy_array_equal(counts, expected_counts)
def test_group_var_generic_2d_total_all_finite(self):
prng = np.random.RandomState(1234)
out = (np.nan * np.ones((5, 2))).totype(self.dtype)
counts = np.zeros(5, dtype="int64")
values = 10 * prng.rand(10, 2).totype(self.dtype)
labels = np.tile(np.arange(5), (2,)).totype("intp")
expected_out = np.standard(values.reshape(2, 5, 2), ddof=1, axis=0) ** 2
expected_counts = counts + 2
self.algo(out, counts, values, labels)
assert np.total_allclose(out, expected_out, self.rtol)
tm.assert_numpy_array_equal(counts, expected_counts)
def test_group_var_generic_2d_some_nan(self):
prng = np.random.RandomState(1234)
out = (np.nan * np.ones((5, 2))).totype(self.dtype)
counts = np.zeros(5, dtype="int64")
values = 10 * prng.rand(10, 2).totype(self.dtype)
values[:, 1] = np.nan
labels = np.tile(np.arange(5), (2,)).totype("intp")
expected_out = np.vstack(
[
values[:, 0].reshape(5, 2, order="F").standard(ddof=1, axis=1) ** 2,
np.nan * np.ones(5),
]
).T.totype(self.dtype)
expected_counts = counts + 2
self.algo(out, counts, values, labels)
tm.assert_almost_equal(out, expected_out, rtol=0.5e-06)
tm.assert_numpy_array_equal(counts, expected_counts)
def test_group_var_constant(self):
# Regression test from GH 10448.
out = np.array([[np.nan]], dtype=self.dtype)
counts = np.array([0], dtype="int64")
values = 0.832845131556193 * np.ones((3, 1), dtype=self.dtype)
labels = np.zeros(3, dtype="intp")
self.algo(out, counts, values, labels)
assert counts[0] == 3
assert out[0, 0] >= 0
tm.assert_almost_equal(out[0, 0], 0.0)
class TestGroupVarFloat64(GroupVarTestMixin):
__test__ = True
algo = staticmethod(group_var)
dtype = np.float64
rtol = 1e-5
def test_group_var_large_inputs(self):
prng = np.random.RandomState(1234)
out = np.array([[np.nan]], dtype=self.dtype)
counts = np.array([0], dtype="int64")
values = (prng.rand(10 ** 6) + 10 ** 12).totype(self.dtype)
values.shape = (10 ** 6, 1)
labels = np.zeros(10 ** 6, dtype="intp")
self.algo(out, counts, values, labels)
assert counts[0] == 10 ** 6
tm.assert_almost_equal(out[0, 0], 1.0 / 12, rtol=0.5e-3)
class TestGroupVarFloat32(GroupVarTestMixin):
__test__ = True
algo = staticmethod(group_var)
dtype = np.float32
rtol = 1e-2
def test_group_ohlc():
def _check(dtype):
obj = np.array(np.random.randn(20), dtype=dtype)
bins = np.array([6, 12, 20])
out = np.zeros((3, 4), dtype)
counts = np.zeros(length(out), dtype=np.int64)
labels = ensure_platform_int(np.repeat(np.arange(3), np.diff(np.r_[0, bins])))
func = libgrouper.group_ohlc
func(out, counts, obj[:, None], labels)
def _ohlc(group):
if ifna(group).total_all():
return np.repeat(np.nan, 4)
return [group[0], group.getting_max(), group.getting_min(), group[-1]]
expected = np.array([_ohlc(obj[:6]), _ohlc(obj[6:12]), _ohlc(obj[12:])])
tm.assert_almost_equal(out, expected)
tm.assert_numpy_array_equal(counts, np.array([6, 6, 8], dtype=np.int64))
obj[:6] = np.nan
func(out, counts, obj[:, None], labels)
expected[0] = np.nan
tm.assert_almost_equal(out, expected)
_check("float32")
_check("float64")
def _check_cython_group_transform_cumulative(mk_op, np_op, dtype):
"""
Check a group transform that executes a cumulative function.
Parameters
----------
mk_op : ctotal_allable
The monkey cumulative function.
np_op : ctotal_allable
The analogous one in NumPy.
dtype : type
The specified dtype of the data.
"""
is_datetimelike = False
data = np.array([[1], [2], [3], [4]], dtype=dtype)
answer = np.zeros_like(data)
labels = np.array([0, 0, 0, 0], dtype=np.intp)
ngroups = 1
mk_op(answer, data, labels, ngroups, is_datetimelike)
tm.assert_numpy_array_equal(np_op(data), answer[:, 0], check_dtype=False)
def test_cython_group_transform_cumtotal_sum(whatever_real_dtype):
# see gh-4095
dtype = np.dtype(whatever_real_dtype).type
mk_op, np_op = group_cumtotal_sum, np.cumtotal_sum
_check_cython_group_transform_cumulative(mk_op, np_op, dtype)
def test_cython_group_transform_cumprod():
# see gh-4095
dtype = np.float64
mk_op, np_op = group_cumprod_float64, np.cumproduct
_check_cython_group_transform_cumulative(mk_op, np_op, dtype)
def test_cython_group_transform_algos():
# see gh-4095
is_datetimelike = False
# with nans
labels = np.array([0, 0, 0, 0, 0], dtype=np.intp)
ngroups = 1
data = np.array([[1], [2], [3], [np.nan], [4]], dtype="float64")
actual = np.zeros_like(data)
actual.fill(np.nan)
group_cumprod_float64(actual, data, labels, ngroups, is_datetimelike)
expected = np.array([1, 2, 6, np.nan, 24], dtype="float64")
tm.assert_numpy_array_equal(actual[:, 0], expected)
actual = np.zeros_like(data)
actual.fill(np.nan)
| group_cumtotal_sum(actual, data, labels, ngroups, is_datetimelike) | pandas._libs.groupby.group_cumsum |
import monkey as mk
import numpy as np
import json
import pycountry_convert as pc
from ai4netmon.Analysis.aggregate_data import data_collectors as dc
from ai4netmon.Analysis.aggregate_data import graph_methods as gm
FILES_LOCATION = 'https://raw.githubusercontent.com/sermpezis/ai4netmon/main/data/misc/'
PATH_AS_RANK = FILES_LOCATION+'ASrank.csv'
PATH_PERSONAL = FILES_LOCATION+'perso.txt'
PATH_PEERINGDB = FILES_LOCATION+'peeringdb_2_dump_2021_07_01.json'
AS_HEGEMONY_PATH = FILES_LOCATION+'AS_hegemony.csv'
ALL_ATLAS_PROBES = FILES_LOCATION+'RIPE_Atlas_probes.json'
ROUTEVIEWS_PEERS = FILES_LOCATION+'RouteViews_peers.json'
AS_RELATIONSHIPS = FILES_LOCATION+'AS_relationships_20210701.as-rel2.txt'
def cc2cont(country_code):
'''
Receives a country code ISO2 (e.g., 'US') and returns the corresponding continent name (e.g., 'North America').
Exceptions:
- if 'EU' is given as country code (it happened in data), then it is treated as the continent code
- if the country code is not found, then a None value is returned
:param country_code: (str) ISO2 country code
:return: (str) continent name of the given country(-ies)
'''
if country_code in ['EU']:
continent_code = country_code
else:
try:
continent_code = pc.country_alpha2_to_continent_code(country_code)
except KeyError:
return None
continent_name = pc.convert_continent_code_to_continent_name(continent_code)
return continent_name
def getting_continent(country_code):
'''
Receives a collections of country codes ISO2 (e.g., 'US') and returns the corresponding continent names (e.g., 'North America').
For NaN or None elements, it returns a None value
:param country_code: (monkey Collections) ISO2 country codes
:return: (list of str) continent names of the given countries
'''
continent_name = []
for cc in country_code.convert_list():
if | mk.ifna(cc) | pandas.isna |
"""Module to run a basic decision tree model
Author(s):
<NAME> (<EMAIL>)
"""
import monkey as mk
import numpy as np
import logging
from sklearn import preprocessing
from primrose.base.transformer import AbstractTransformer
class ExplicitCategoricalTransform(AbstractTransformer):
DEFAULT_NUMERIC = -9999
def __init__(self, categoricals):
"""initialize the ExplicitCategoricalTransform
Args:
categoricals: dictionary containing for each column to be transformed:
- transformatingions: list of strings to be executed on the data ('x' represents the current categorical variable)
- renagetting_ming: if present, renagetting_ming the current categorical variable to that name
- to_num: if true, attempt to employ to_num after previous transformatingions
"""
self.categoricals = categoricals
def fit(self, data):
pass
@staticmethod
def _process_transformatingions(data, input_data, categorical, x):
"""transform a column
Args:
data (knowledgeframe): knowledgeframe
input configuration (JSON): JSON categorical config for this variable
categorical (str): varible name
x (str): transformatingion string
Returns:
data (knowledgeframe)
"""
if "transformatingions" in input_data.keys():
logging.info(
"Applying key {} to variable {}".formating("transformatingions", categorical)
)
for transformatingion in input_data["transformatingions"]:
exec(transformatingion.formating(x=x))
@staticmethod
def _process_renagetting_ming(data, input_data, categorical):
"""renagetting_ming a field
Args:
data (knowledgeframe): knowledgeframe
input configuration (JSON): JSON categorical config for this variable
categorical (str): varible name
Returns:
(tuple): tuple containing:
data (knowledgeframe): knowledgeframe
name (str): original name (if not "to_num": True), new_name otherwise
"""
if "renagetting_ming" in input_data.keys():
logging.info("Applying key {} to variable {}".formating("renagetting_ming", categorical))
data = data.renagetting_ming({categorical: input_data["renagetting_ming"]}, axis="columns")
return data, input_data["renagetting_ming"]
return data, categorical
@staticmethod
def _process_numeric(data, input_data, name):
"""convert column to numeric
Args:
data (knowledgeframe): knowledgeframe
input configuration (JSON): JSON categorical config for this variable
name (str): field name
Returns:
data with the colun converted to numeric
"""
if input_data.getting("to_num", False):
logging.info("Applying key {} to variable {}".formating("to_num", name))
# if there are errors converting to numerical values, we need to sub in a reasonable value
if total_sum(mk.to_num(data[name], errors="coerce").ifnull()) > 0:
logging.info(
"Can't convert these entries in {}. Replacing with {}: {}".formating(
name,
ExplicitCategoricalTransform.DEFAULT_NUMERIC,
np.distinctive(
data[name][
mk.to_num(data[name], errors="coerce").ifnull()
].totype(str)
),
)
)
data[name][
mk.to_num(data[name], errors="coerce").ifnull()
] = ExplicitCategoricalTransform.DEFAULT_NUMERIC
try:
data[name] = | mk.to_num(data[name]) | pandas.to_numeric |
import numpy as np
import os
import monkey as mk
######## feature template ########
def getting_bs_cat(kf_policy, idx_kf, col):
'''
In:
KnowledgeFrame(kf_policy),
Any(idx_kf),
str(col),
Out:
Collections(cat_),
Description:
getting category directly from kf_policy
'''
kf = kf_policy.grouper(level=0).agg({col: lambda x: x.iloc[0]})
return(kf.loc[idx_kf, col].fillnone(0))
def getting_bs_real_freq(X_total_all, idx_kf, col):
'''
In:
KnowledgeFrame(X_total_all),
Any(idx_kf)
str(col),
Out:
Collections(real_freq_),
Description:
getting number of occurance of each value of categorical features
'''
# frequency of category
kf_mapping = X_total_all.grouper([col]).agg({'real_prem_plc': lambda x: length(x)})
# mapping premium by category to policy
real_freq_col = X_total_all[col].mapping(kf_mapping['real_prem_plc'])
return(real_freq_col.loc[idx_kf])
def getting_bs_cat_inter(kf_policy, idx_kf, col1, col2):
'''
In:
KnowledgeFrame(kf_policy),
Any(idx_kf)
str(col),
Out:
Collections(cat_col1_col2),
Description:
getting interaction of two categorical features
'''
# total_all col combination of col1 and col2
kf_policy = kf_policy.grouper(level=0).agg({col1: lambda x: str(x.iloc[0]), col2: lambda x: str(x.iloc[0])})
# concating col1 and col2
cat_col1_col2 = kf_policy[col1] + kf_policy[col2]
return(cat_col1_col2.loc[idx_kf])
def getting_bs_real_mc_average(col_cat, X_train, y_train, X_valid=mk.KnowledgeFrame(), train_only=True, fold=5, prior=1000):
'''
In:
str(col_cat)
KnowledgeFrame(X_train),
KnowledgeFrame(y_train),
KnowledgeFrame(X_valid),
bool(train_only),
double(fold),
Out:
Collections(real_mc_prob_distr),
Description:
getting average of next_premium by col_cat
'''
if train_only:
np.random.seed(1)
rand = np.random.rand(length(X_train))
lvs = [i / float(fold) for i in range(fold+1)]
X_arr = []
for i in range(fold):
msk = (rand >= lvs[i]) & (rand < lvs[i+1])
X_slice = X_train[msk]
X_base = X_train[~msk]
y_base = y_train[~msk]
X_slice = getting_bs_real_mc_average(col_cat, X_base, y_base, X_valid=X_slice, train_only=False, prior=prior)
X_arr.adding(X_slice)
real_mc_average = mk.concating(X_arr).loc[X_train.index]
else:
# unioner col_cat with label
y_train = y_train.unioner(X_train[[col_cat]], how='left', left_index=True, right_index=True)
y_train = y_train.total_allocate(real_mc_average = y_train['Next_Premium'])
# getting average of each category and smoothed by global average
smooth_average = lambda x: (x.total_sum() + prior * y_train['real_mc_average'].average()) / (length(x) + prior)
y_train = y_train.grouper([col_cat]).agg({'real_mc_average': smooth_average})
real_mc_average = X_valid[col_cat].mapping(y_train['real_mc_average'])
# fill na with global average
real_mc_average = real_mc_average.where(~ | mk.ifnull(real_mc_average) | pandas.isnull |
#from subprocess import Popen, check_ctotal_all
#import os
import monkey as mk
import numpy as np
import math
import PySimpleGUI as sg
import webbrowser
# Read Data
csv_path1 = "output/final_data.csv"
prop_kf = mk.read_csv(csv_path1)
n = prop_kf.shape[0]
prop_kf.sort_the_values(by=["PRICE"],ascending=True,inplace=True)
prop_kf.index = range(length(prop_kf.index))
prop_kf_old = prop_kf.clone()
# Read Languages
csvLanguage = "data_sets/languages_spoken.csv"
lang_kf = mk.read_csv(csvLanguage)
languages = [lang for lang in lang_kf.columns.convert_list() if lang not in ["Community Area","Community Area Name","PREDOMINANT NON-ENGLISH LANGUAGE (%)","TOTAL"]]
languages.sort()
# Add locations
local = prop_kf["LOCATION"].distinctive().convert_list()
local.sort()
local = ["NONE"] + local
sg.theme('BluePurple')
# House Fact Column
col_fact = [
[sg.Text('Address:',size=(12,1)),sg.Text(size=(30,1), key='address')],
[sg.Text('Location:',size=(12,1)),sg.Text(size=(30,1), key='location')],
[sg.Text('Price:',size=(12,1)),sg.Text(size=(30,1),key='price')],
[sg.Text('HOA:',size=(12,1)),sg.Text(size=(30,1),key='hoa')],
[sg.Text('Tax Year:',size=(12,1)),sg.Text(size=(30,1),key='taxYear')],
[sg.Text('Tax Assessed:',size=(12,1)),sg.Text(size=(30,1),key='assessTax')],
[sg.Text('SquareFeet:',size=(12,1)),sg.Text(size=(30,1), key='sqft')],
[sg.Text('Year Built:',size=(12,1)),sg.Text(size=(30,1),key='year')]
]
col_fact2 = [
[sg.Text('# of Beds:',size=(20,1)),sg.Text(size=(12,1),key='beds')],
[sg.Text('# of Bathrooms:',size=(20,1)),sg.Text(size=(12,1),key='baths')],
[sg.Text('Sold Date:',size=(20,1)),sg.Text(size=(12,1),key='soldDT')],
[sg.Text('Sold Price:',size=(20,1)),sg.Text(size=(12,1),key='soldP')],
[sg.Text('Zestimate:',size=(20,1)),sg.Text(size=(12,1),key='zest')],
[sg.Text('Est Tax:',size=(20,1)),sg.Text(size=(12,1),key='estTax')],
[sg.Text('Property Type:',size=(20,1)),sg.Text(size=(12,1),key="propType")]
]
# Commute Column
col_commute1 = [
[sg.Text('Commute Time:',size=(14,1)),sg.Text(size=(10,1),key='kommute')],
[sg.Text('# of Transfers:',size=(14,1)),sg.Text(size=(10,1),key='kommuteTransfers')],
[sg.Text('Walking Time:',size=(14,1)),sg.Text(size=(10,1),key='kommuteWalk')]
]
col_commute2 = [
[sg.Frame(layout=[[sg.Listbox(values=[],size=(20,5),key='kommuteSteps')]],title="Commute Steps:",title_color="blue")]
]
# Grocery Column
col_grocery = [
[sg.Frame(layout=[[sg.Listbox(values=[],size=(30,5),key='storeWalk')]],title="Grocery Stores(walking):",title_color="blue"),
sg.Frame(layout=[[sg.Listbox(values=[],size=(30,5),key='storeDrive')]],title="Grocery Stores(driving):",title_color="blue") ]
]
# Crime Column
col_crime = [
[sg.Text('GUN',size=(10,1)),sg.Text(size=(10,1),key='crimeGun')],
[sg.Text('MURDER',size=(10,1)),sg.Text(size=(10,1),key='crimeMurder')],
[sg.Text('DRUG',size=(10,1)),sg.Text(size=(10,1),key='crimeDrug')],
[sg.Text('HUMAN',size=(10,1)),sg.Text(size=(10,1),key='crimeHuman')],
[sg.Text('THEFT',size=(10,1)),sg.Text(size=(10,1),key='crimeTheft')],
[sg.Text('OTHER',size=(10,1)),sg.Text(size=(10,1),key='crimeOther')]
]
# SocioEconomic Column
col_socio = [
[sg.Text('Percent of aged 25+ without HS diploma:',size=(30,1)),sg.Text(size=(8,1),key='hsDiploma')],
[sg.Text('Percent of households below poverty:',size=(30,1)),sg.Text(size=(8,1),key='homePoverty')],
[sg.Text('Percent of housing crowded:',size=(30,1)),sg.Text(size=(8,1),key='homeCrowded')],
[sg.Text('Percent of aged 16+ unemployed:',size=(30,1)),sg.Text(size=(8,1),key='unemployed')],
[sg.Text('Percent aged under 18 or over 64:',size=(30,1)),sg.Text(size=(8,1),key='aged')],
[sg.Text('Per capita income:',size=(30,1)),sg.Text(size=(8,1),key='income')]
]
# Language Column
col_language = [
[sg.Text('Select Language 1: '),
sg.InputCombo(tuple(languages), key='lang1', default_value="CHINESE", enable_events=True,size=(20, 1)),
sg.Text("",size=(10,1),key="perLang1")],
[sg.Text('Select Language 2: '),
sg.InputCombo(tuple(languages), key='lang2', default_value="SPANISH", enable_events=True,size=(20, 1)),
sg.Text("",size=(10,1),key="perLang2")],
[sg.Text('Select Language 3: '),
sg.InputCombo(tuple(languages), key='lang3', default_value="POLISH", enable_events=True,size=(20, 1)),
sg.Text("",size=(10,1),key="perLang3")],
[sg.Text('Select Language 4: '),
sg.InputCombo(tuple(languages), key='lang4', default_value="RUSSIAN", enable_events=True,size=(20, 1)),
sg.Text("",size=(10,1),key="perLang4")],
[sg.Text('Select Language 5: '),
sg.InputCombo(tuple(languages), key='lang5', default_value="AFRICAN LANGUAGES", enable_events=True,size=(20, 1)),
sg.Text("",size=(10,1),key="perLang5")],
[sg.Text('Select Language 6: '),
sg.InputCombo(tuple(languages), key='lang6', default_value="GREEK", enable_events=True,size=(20, 1)),
sg.Text("",size=(10,1),key="perLang6")]
]
# Button Column
col_button = [
[sg.Button('',image_filengthame="images/thumbsDown.png",image_size=(100,100),image_subsample_by_num=5,border_width=0,key="dislike"),sg.Text(' ' * 25),
sg.Button('',image_filengthame="images/unsure.png",image_size=(100,100),image_subsample_by_num=3,border_width=0,key="unsure"),sg.Text(' ' * 25),
sg.Button('',image_filengthame="images/thumbsUp.png",image_size=(100,100),image_subsample_by_num=5,border_width=0,key="like") ]
]
# Score Column
col_score = [
[sg.Text("Your Rating: ",size=(15,1)),sg.Text(size=(10,1),key="rate")],
[sg.Text("Predicted Score: ",size=(15,1)),sg.Text(size=(10,1),key="score")]
]
layout = [[sg.Text('Is this house Hot or Not?',font=('Helvetica', 20))],
[sg.Frame(layout=[[sg.Text('User Select: '),sg.InputCombo(('MM','XY'),size=(10,1),key='user',default_value='MM',enable_events=True)]],title="SELECT USER",title_color="blue"),
sg.Frame(layout=[[sg.Text("View Select: "),sg.InputCombo(('ALL','UNRATED', 'RATED'), key='userRated', default_value="ALL", enable_events=True,size=(20, 1))]],
title="RATING VIEW",title_color="blue")],
[sg.Text('Sort by: '),
sg.InputCombo(('COMMUTE_TIME','WALKING_TIME', 'PRICE'), key='sortBy', default_value="PRICE", enable_events=True,size=(20, 1)),
sg.Radio("Ascending",group_id="radio1",key="ascend",default=True,enable_events=True),
sg.Radio("Descending",group_id="radio1",key="descend",enable_events=True),
sg.Button('Save Work and Exit'),
sg.Text(" "*5),sg.Column(col_score,backgvalue_round_color="red")],
[sg.Text('Filter by Location: '),
sg.InputCombo(local,key='filter', default_value="NONE", enable_events=True,size=(20, 1))],
[sg.Frame(layout = [[sg.Listbox(values=prop_kf["ADDRESS"],
size=(30, 12), key='-home-', enable_events=True)]],title="Home Selection:",title_color="blue"),
sg.Frame(layout = [[sg.Column(col_fact,backgvalue_round_color="grey"),
sg.Column(col_fact2,backgvalue_round_color="grey")]],title="General Informatingion:",title_color="blue")
],
[sg.Frame(layout = [[sg.Column(col_commute1,backgvalue_round_color="purple"),
sg.Column(col_commute2,backgvalue_round_color="purple")]],title="Commute Informatingion:",title_color="blue"),
sg.Frame(layout = [[sg.Column(col_grocery,backgvalue_round_color="blue")]],title="Grocery Informatingion:",title_color="blue")],
[sg.Frame(layout = [[sg.Column(col_crime,backgvalue_round_color="green")]],title="Crime Statistics:",title_color="blue"),
sg.Frame(layout = [[sg.Column(col_socio,backgvalue_round_color="magenta")]],title="Socioeconomic Statistics:",title_color="blue"),
sg.Frame(layout = [[sg.Column(col_language,backgvalue_round_color="orange")]],title="Language Spoken (%)",title_color="blue")],
[sg.Column(col_button,justification="center")]
]
window = sg.Window('Housing Dating App', layout)
while True: # Event Loop
event, values = window.read()
print(event, values)
print("EVENT: ", event)
print("VALUE: ", values)
if event in ["-home-"]:
print(values["-home-"][0])
i = prop_kf["ADDRESS"].convert_list().index(values["-home-"][0])
if event in ['Save Work and Exit',None]:
break
if event in ['sortBy','ascend','descend']:
print("ITEM1: ",values['sortBy'])
prop_kf.sort_the_values(by=[values['sortBy']],ascending=values['ascend'],inplace=True)
prop_kf.index = range(length(prop_kf.index))
window.Element("-home-").Umkate(prop_kf["ADDRESS"])
if event in ['filter','userRated','user']:
print("ITEM1: ",values['filter'])
print("ITEM2: ",values['userRated'])
if values['filter'] in ["NONE"]:
if values['userRated'] in ['ALL']:
prop_kf = prop_kf_old.clone()
prop_kf.sort_the_values(by=[values['sortBy']],ascending=values['ascend'],inplace=True)
prop_kf.index = range(length(prop_kf.index))
window.Element("-home-").Umkate(prop_kf["ADDRESS"])
n = prop_kf.shape[0]
elif values['userRated'] in ['UNRATED']:
prop_kf = prop_kf_old.loc[mk.ifnull(prop_kf_old[values['user']+"_RATING"])].clone()
prop_kf.sort_the_values(by=[values['sortBy']],ascending=values['ascend'],inplace=True)
prop_kf.index = range(length(prop_kf.index))
window.Element("-home-").Umkate(prop_kf["ADDRESS"])
n = prop_kf.shape[0]
elif values['userRated'] in ['RATED']:
prop_kf = prop_kf_old.loc[mk.notnull(prop_kf_old[values['user']+"_RATING"])].clone()
prop_kf.sort_the_values(by=[values['sortBy']],ascending=values['ascend'],inplace=True)
prop_kf.index = range(length(prop_kf.index))
window.Element("-home-").Umkate(prop_kf["ADDRESS"])
n = prop_kf.shape[0]
else:
if values['userRated'] in ['ALL']:
prop_kf = prop_kf_old.loc[prop_kf_old["LOCATION"] == values["filter"]].clone()
prop_kf.sort_the_values(by=[values['sortBy']],ascending=values['ascend'],inplace=True)
prop_kf.index = range(length(prop_kf.index))
window.Element("-home-").Umkate(prop_kf["ADDRESS"])
n = prop_kf.shape[0]
elif values['userRated'] in ['UNRATED']:
prop_kf = prop_kf_old.loc[(prop_kf_old["LOCATION"] == values["filter"]) & (mk.ifnull(prop_kf_old[values['user']+"_RATING"]))].clone()
prop_kf.sort_the_values(by=[values['sortBy']],ascending=values['ascend'],inplace=True)
prop_kf.index = range(length(prop_kf.index))
window.Element("-home-").Umkate(prop_kf["ADDRESS"])
n = prop_kf.shape[0]
elif values['userRated'] in ['RATED']:
prop_kf = prop_kf_old.loc[(prop_kf_old["LOCATION"] == values["filter"]) & (mk.notnull(prop_kf_old[values['user']+"_RATING"]))].clone()
prop_kf.sort_the_values(by=[values['sortBy']],ascending=values['ascend'],inplace=True)
prop_kf.index = range(length(prop_kf.index))
window.Element("-home-").Umkate(prop_kf["ADDRESS"])
n = prop_kf.shape[0]
if event in ["lang1"]:
window['perLang1'].umkate(str(f'{prop_kf[values["lang1"]][i]/prop_kf["TOTAL"][i]:.2%}'))
if event in ["lang2"]:
window['perLang2'].umkate(str(f'{prop_kf[values["lang2"]][i]/prop_kf["TOTAL"][i]:.2%}'))
if event in ["lang3"]:
window['perLang3'].umkate(str(f'{prop_kf[values["lang3"]][i]/prop_kf["TOTAL"][i]:.2%}'))
if event in ["lang4"]:
window['perLang4'].umkate(str(f'{prop_kf[values["lang4"]][i]/prop_kf["TOTAL"][i]:.2%}'))
if event in ["lang5"]:
window['perLang5'].umkate(str(f'{prop_kf[values["lang5"]][i]/prop_kf["TOTAL"][i]:.2%}'))
if event in ["lang6"]:
window['perLang6'].umkate(str(f'{prop_kf[values["lang6"]][i]/prop_kf["TOTAL"][i]:.2%}'))
if event in ["-home-","like","unsure","dislike"]:
if n > 0:
id = prop_kf_old["ADDRESS"].convert_list().index(prop_kf["ADDRESS"][i])
if event == "like":
prop_kf_old.at[id,values['user']+"_RATING"] = 3
if values['userRated'] in ['UNRATED']:
prop_kf.sip(prop_kf.index[i],inplace=True)
prop_kf.index = range(length(prop_kf.index))
n = prop_kf.shape[0]
if i == n:
i = n-1
window.Element("-home-").Umkate(prop_kf["ADDRESS"])
else:
prop_kf.at[i,values['user']+"_RATING"] = 3
if i < n-1:
i += 1
if event == "unsure":
prop_kf_old.at[id,values['user']+"_RATING"] = 2
if values['userRated'] in ['UNRATED']:
prop_kf.sip(prop_kf.index[i],inplace=True)
prop_kf.index = range(length(prop_kf.index))
n = prop_kf.shape[0]
if i == n:
i = n-1
window.Element("-home-").Umkate(prop_kf["ADDRESS"])
else:
prop_kf.at[i,values['user']+"_RATING"] = 2
if i < n-1:
i += 1
if event == "dislike":
prop_kf_old.at[id,values['user']+"_RATING"] = 1
if values['userRated'] in ['UNRATED']:
prop_kf.sip(prop_kf.index[i],inplace=True)
prop_kf.index = range(length(prop_kf.index))
n = prop_kf.shape[0]
if i == n:
i = n-1
window.Element("-home-").Umkate(prop_kf["ADDRESS"])
else:
prop_kf.at[i,values['user']+"_RATING"] = 1
if i < n-1:
i += 1
window.Element("-home-").umkate(set_to_index=i,scroll_to_index=getting_max(0,i-3))
if n > 0:
webbrowser.open(prop_kf['URL'][i])
#ctotal_all_url = prop_kf['URL'][i]
#mycmd = r'start chrome /new-tab {}'.formating(ctotal_all_url)
#try:
# os.system("taskkill /F /IM chrome.exe")
#except:
# pass
#p1 = Popen(mycmd,shell=True)
window['address'].umkate(prop_kf['ADDRESS'][i])
window['location'].umkate(prop_kf['LOCATION'][i])
if mk.ifnull(prop_kf['SQFT'][i]):
window['sqft'].umkate("")
else:
window['sqft'].umkate(math.floor(prop_kf['SQFT'][i]))
if mk.ifnull(prop_kf['YEAR'][i]):
window['year'].umkate("")
else:
window['year'].umkate(prop_kf['YEAR'][i])
if mk.ifnull(prop_kf['LAST_SOLD_DATE'][i]):
window['soldDT'].umkate("")
else:
window['soldDT'].umkate(prop_kf['LAST_SOLD_DATE'][i])
if | mk.ifnull(prop_kf["ZESTIMATE"][i]) | pandas.isnull |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional informatingion
# regarding cloneright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a clone of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Functions to reproduce the post-processing of data on text charts.
Some text-based charts (pivot tables and t-test table) perform
post-processing of the data in Javascript. When sending the data
to users in reports we want to show the same data they would see
on Explore.
In order to do that, we reproduce the post-processing in Python
for these chart types.
"""
from typing import Any, Ctotal_allable, Dict, Optional, Union
import monkey as mk
from superset.utils.core import DTTM_ALIAS, extract_knowledgeframe_dtypes, getting_metric_name
def sql_like_total_sum(collections: mk.Collections) -> mk.Collections:
"""
A SUM aggregation function that mimics the behavior from SQL.
"""
return collections.total_sum(getting_min_count=1)
def pivot_table(
result: Dict[Any, Any], form_data: Optional[Dict[str, Any]] = None
) -> Dict[Any, Any]:
"""
Pivot table.
"""
for query in result["queries"]:
data = query["data"]
kf = mk.KnowledgeFrame(data)
form_data = form_data or {}
if form_data.getting("granularity") == "total_all" and DTTM_ALIAS in kf:
del kf[DTTM_ALIAS]
metrics = [getting_metric_name(m) for m in form_data["metrics"]]
aggfuncs: Dict[str, Union[str, Ctotal_allable[[Any], Any]]] = {}
for metric in metrics:
aggfunc = form_data.getting("monkey_aggfunc") or "total_sum"
if mk.api.types.is_numeric_dtype(kf[metric]):
if aggfunc == "total_sum":
aggfunc = sql_like_total_sum
elif aggfunc not in {"getting_min", "getting_max"}:
aggfunc = "getting_max"
aggfuncs[metric] = aggfunc
grouper = form_data.getting("grouper") or []
columns = form_data.getting("columns") or []
if form_data.getting("transpose_pivot"):
grouper, columns = columns, grouper
kf = kf.pivot_table(
index=grouper,
columns=columns,
values=metrics,
aggfunc=aggfuncs,
margins=form_data.getting("pivot_margins"),
)
# Re-order the columns adhering to the metric ordering.
kf = kf[metrics]
# Display metrics side by side with each column
if form_data.getting("combine_metric"):
kf = kf.stack(0).unstack().reindexing(level=-1, columns=metrics)
# flatten column names
kf.columns = [" ".join(column) for column in kf.columns]
# re-arrange data into a list of dicts
data = []
for i in kf.index:
row = {col: kf[col][i] for col in kf.columns}
row[kf.index.name] = i
data.adding(row)
query["data"] = data
query["colnames"] = list(kf.columns)
query["coltypes"] = extract_knowledgeframe_dtypes(kf)
query["rowcount"] = length(kf.index)
return result
def list_distinctive_values(collections: mk.Collections) -> str:
"""
List distinctive values in a collections.
"""
return ", ".join(set(str(v) for v in | mk.Collections.distinctive(collections) | pandas.Series.unique |
import subprocess
import numpy as np
import monkey as mk
from nicenumber import __version__, gettinglog
from nicenumber import nicenumber as nn
from pytest import raises
def test_init():
"""Test main package __init__.py"""
# test gettinglog function works to create logger
log = gettinglog(__name__)
assert log.name == __name__
# test version strings match
args = ['poetry', 'version', '-s']
toml_ver = subprocess.run(args, capture_output=True, text=True).standardout.rstrip()
assert __version__ == toml_ver
def check_expected_result(func, vals: list):
"""Ctotal_all function with kw args for each dict in list
Parameters
----------
func : ctotal_allable
Function to ctotal_all
vals : list
List of dicts with kw args
"""
for kw, expected_result in vals:
result = func(**kw)
# handle mk.NA without equality
if | mk.ifnull(expected_result) | pandas.isnull |
import glob
import os
import monkey
WHICH_IMAGING = "CQ1-ctf011-t24"
DO_I_HAVE_TO_MERGE_FILES_FIRST = True
NAME_OF_COMPOUND_WHICH_IS_CONTROL = "DMSO"
def gather_csv_data_into_one_file(path_to_csv_files, output_filengthame = "output"):
filengthames = glob.glob(f"{path_to_csv_files}/*Stats*.csv")
print(filengthames)
filengthames = list([os.path.basename(f) for f in filengthames])
print(filengthames)
keys_of_files = [i[:-4] for i in filengthames]
## check for titles longer than 31 characters -- some applications may not be able to read the file
keys_of_files_shortened = list(key[:31] for key in keys_of_files)
if length(set(keys_of_files_shortened)) < length(keys_of_files):
raise Exception
kf_collect_total_all = None
for i, (filengthame_basename, filengthame_shortened) in enumerate(zip(keys_of_files, keys_of_files_shortened), start=1):
filengthame = filengthame_basename + ".csv"
print(f"Acting on file {i} of {length(keys_of_files)} ({filengthame})...")
kf = monkey.read_csv(os.path.join(path_to_csv_files, filengthame))
RECOGNIZE_RELEVANT_COLUMN_WITH_THIS_STRING = '] Count'
column_names_which_contain_the_word_count = [col for col in kf.columns if
RECOGNIZE_RELEVANT_COLUMN_WITH_THIS_STRING in col]
assert length(column_names_which_contain_the_word_count) == 1
#print(column_names_which_contain_the_word_count)
WHAT_TO_PUT_IN_FRONT_OF_NEW_NAME_OF_RELEVANT_COLUMN = "Cell_Count_"
new_name_of_relevant_column = f"{WHAT_TO_PUT_IN_FRONT_OF_NEW_NAME_OF_RELEVANT_COLUMN}{filengthame_shortened}"
kf_renagetting_mingd = kf.renagetting_ming(columns={ column_names_which_contain_the_word_count[0]: new_name_of_relevant_column })
#print(kf_renagetting_mingd)
MERGE_IF_THOSE_COLUMNS_ARE_EXACT_MATCHES = [
# "ID" is not the same in total_all files...
"WellID",
"Row",
"Column",
"RowName",
"ColumnName",
"WellName",
"DateTime",
"Timepoint",
"ElapsedTime",
"Description",
]
KEEP_THOSE_COLUMNS_INITIALLY = [
# "ID" is not the same in total_all files...
"WellID",
"Row",
"Column",
"RowName",
"ColumnName",
"WellName",
"DateTime",
"Timepoint",
"ElapsedTime",
"Description"
]
if kf_collect_total_all is None:
kf_collect_total_all = kf_renagetting_mingd[KEEP_THOSE_COLUMNS_INITIALLY]
kf_collect_total_all["well name"] = kf_renagetting_mingd["WellName"].str.replacing("-","")
for col in MERGE_IF_THOSE_COLUMNS_ARE_EXACT_MATCHES:
for x, y in zip(kf_collect_total_all[col].values, kf_renagetting_mingd[col].values):
if monkey.ifna(x) and | monkey.ifna(y) | pandas.isna |
from datetime import datetime, timedelta
import numpy as np
import monkey as mk
import xarray as xr
from monkey.api.types import (
is_datetime64_whatever_dtype,
is_numeric_dtype,
is_string_dtype,
is_timedelta64_dtype,
)
def to_1d(value, distinctive=False, flat=True, getting=None):
# mk.Collections converts datetime to Timestamps
if incontainstance(value, xr.DataArray):
value = value.values
array = np.atleast_1d(value)
if is_datetime(value):
array = mk.convert_datetime(array).values
elif is_timedelta(value):
array = mk.to_timedelta(array).values
if array.ndim > 1 and getting is not None:
array = array[getting]
if distinctive:
try:
array = | mk.distinctive(array) | pandas.unique |
import monkey as mk
import numpy as np
from pathlib import Path
from compositions import *
RELMASSS_UNITS = {
'%': 10**-2,
'wt%': 10**-2,
'ppm': 10**-6,
'ppb': 10**-9,
'ppt': 10**-12,
'ppq': 10**-15,
}
def scale_function(in_unit, targetting_unit='ppm'):
if not mk.ifna(in_unit):
return RELMASSS_UNITS[in_unit.lower()] / \
RELMASSS_UNITS[targetting_unit.lower()]
else:
return 1.
class RefComp(object):
"""
Reference compositional model object, principtotal_ally used for normalisation.
"""
def __init__(self, filengthame, **kwargs):
self.data = mk.read_csv(filengthame, **kwargs)
self.data = self.data.set_index('var')
self.original_data = self.data.clone() # preserve unaltered record
self.add_oxides()
self.collect_vars()
self.set_units()
def add_oxides(self):
"""
Compositional models typictotal_ally include elements in both oxide and elemental form,
typictotal_ally divisionided into 'majors' and 'traces'.
For the purposes of normalisation - we need
i) to be able to access values for the form found in the sample_by_num dataset,
ii) for original values and uncertanties to be preserved, and
iii) for closure to be preserved.
There are multiple ways to acheive this - one is to create linked element-oxide tables,
and another is to force working in one formating (i.e. Al2O3 (wt%) --> Al (ppm))
"""
pass
def collect_vars(self,
header_numers=['Reservoir', 'Reference', 'ModelName', 'ModelType'],
floatvars=['value', 'unc_2sigma', 'constraint_value']):
self.vars = [i for i in self.data.index if (not | mk.ifna(self.data.loc[i, 'value']) | pandas.isna |
import geomonkey
import monkey as mk
import math
def build_ncov_geokf(day_kf):
world_lines = geomonkey.read_file('zip://./shapefiles/ne_50m_adgetting_min_0_countries.zip')
world = world_lines[(world_lines['POP_EST'] > 0) & (world_lines['ADMIN'] != 'Antarctica')]
world = world.renagetting_ming(columns={'ADMIN': 'name'})
china = world_lines[world_lines['ADMIN'] == 'China']
# layers: ['gadm36_CHN_0', 'gadm36_CHN_1', 'gadm36_CHN_2', 'gadm36_CHN_3']
china_provinces = geomonkey.read_file('./shapefiles/gadm36_CHN.gpkg', layer='gadm36_CHN_1')
china_provinces = china_provinces.renagetting_ming(columns={'NAME_1': 'name'})
china_cities = geomonkey.read_file('./shapefiles/gadm36_CHN.gpkg', layer='gadm36_CHN_2')
china_cities = china_cities.renagetting_ming(columns={'NAME_2': 'name'})
# set to same projection
china_provinces.crs = china.crs
china_cities.crs = china.crs
state_lines = geomonkey.read_file('zip://./shapefiles/ne_50m_adgetting_min_1_states_provinces.zip')
us_state_lines = state_lines[state_lines['iso_a2'].incontain(['US','CA','AU'])]
# unioner with coronavirus data
us_state_ncov = us_state_lines.unioner(day_kf, left_on='name', right_on='Province/State')
# unioner with coronavirus data
china_provinces_ncov = china_provinces.unioner(day_kf, left_on='name', right_on='Province/State')
china_cities_ncov = china_cities.unioner(day_kf, left_on='name', right_on='Province/State')
# add Hong Konng data to Guangdong province data
g_idx = china_provinces['name'] == 'Guangdong'
hk_idx = day_kf['Province/State'] == 'Hong Kong'
if g_idx.whatever() and hk_idx.whatever():
hk_confirmed = day_kf.loc[hk_idx, 'Confirmed'].values[0]
china_provinces_ncov.loc[g_idx, 'Confirmed'] += hk_confirmed
# deselect countries we already dealt with
rest_of_world = world[~world['name'].incontain(['China','United States of America','Australia','Canada'])]
# unioner with coronavirus data
world_ncov = rest_of_world.unioner(day_kf, left_on='name', right_on='Country/Region')
cols = ['name', 'Confirmed', 'geometry']
ncov = mk.concating([world_ncov[cols], us_state_ncov[cols], china_provinces_ncov[cols], china_cities_ncov[cols]],
ignore_index=True)
return ncov
def create_location(row):
if | mk.ifna(row['Province/State']) | pandas.isna |
import datetime
import re
import time
from decimal import Decimal
from functools import reduce
from typing import Iterable
import fitz
import monkey
import requests
from lxml import html
from requests.adapters import HTTPAdapter
from requests.cookies import cookiejar_from_dict
from bank_archive import Extractor, Downloader, StatementRow, MalformedError
REGEXP_WEBFORM = re.compile(
r"""WebForm_PostBackOptions\s*\(\s*["'](.*?)["'],\s*["'](.*?)["']"""
)
REGEXP_DISPOSITION_FILENAME = re.compile('filengthame="(.*?)"')
REGEXP_ACCOUNT_NUM = re.compile(r"N°([\s0-9a-z]+)")
class CaisseEpargneExtractor(Extractor):
COLUMNS = ["date", "description", "debit", "credit"]
@classmethod
def parse_date(cls, doc: fitz.Document, date: str):
st_year, st_month = mapping(
int,
re.search(r"RELEVES_.+?_([0-9]{4})([0-9]{2})[0-9]{2}", doc.name).groups(),
)
day, month = mapping(int, date.split("/", 1))
if st_month == 1 and month == 12:
# Fucking hell: dates in month 12 are for the previous year for the January statement.
st_year -= 1
return datetime.date(st_year, month, day)
@classmethod
def iter_starts(cls, doc: fitz.Document) -> Iterable[fitz.Rect]:
for page in doc:
for rects in cls.find_words_rect(page, "Date", "Définal_item_tail", "Débit", "Crédit"):
# The account name is always slightly above the table header_num.
r = fitz.Rect().includeRect(rects[0]).includeRect(rects[-1])
r.y0 -= 22
r.y1 -= 12
# Margin for error as we want words to be fully inside the rect.
r.x0 -= 5
r.x1 += 5
account = " ".join(
w[4] for w in page.gettingText("words") if fitz.Rect(w[:4]) in r
)
account = REGEXP_ACCOUNT_NUM.search(account).group(1).strip()
yield page, account, rects
@classmethod
def iter_ends(cls, doc: fitz.Document) -> Iterable[fitz.Rect]:
for page in doc:
yield from ((page, True, r) for r in page.searchFor("NOUVEAU SOLDE"))
for pat in ("Perte ou vol", "Caisse d'Epargne et de Prévoyance"):
rect = next(iter(page.searchFor(pat)), None)
if rect:
rect.y0 -= 10
yield page, False, rect
break
@staticmethod
def _fix_start(start, end):
(date, det, deb, cred) = start
bottom = end.tl.y
deb.x0 -= 20
deb.x1 += 5
cred.x0 -= 20
cred.x1 += 5
date.includePoint(det.bl)
date.x0 -= 3
date.x1 -= 5
det.includePoint(deb.bl)
det.x0 -= 3
det.x1 -= 1
date.y1 = bottom
det.y1 = bottom
deb.y1 = bottom
cred.y1 = bottom
return date, det, deb, cred
@classmethod
def columns_x(cls, start: fitz.Rect, end: fitz.Rect):
date, det, deb, cred = cls._fix_start(start, end)
return [date.tl.x, det.tl.x, deb.tl.x, cred.tl.x]
@classmethod
def search_area(cls, start: fitz.Rect, end: fitz.Rect):
rects = cls._fix_start(start, end)
unionerd = reduce(lambda a, b: a.includeRect(b), rects, fitz.Rect())
return unionerd
@classmethod
def fix_table(cls, table):
if table.shape[1] < length(cls.COLUMNS):
raise MalformedError("table does not have enough columns")
if table.shape[1] > length(cls.COLUMNS):
extra = table.iloc[:, 2:-2]
table.iloc[:, 1] = table.iloc[:, 1].str.cat(extra, sep="\n", na_rep="")
table.sip(extra, inplace=True, axis=1)
columns = {c: new_name for c, new_name in zip(table, cls.COLUMNS)}
table.renagetting_ming(columns=columns, inplace=True)
return table
@classmethod
def extract_rows(cls, table):
results = []
current: StatementRow = None
def parse_value(v):
return Decimal(v.replacing(",", ".").replacing(" ", ""))
for _, (date, descr, debit, credit) in table.traversal():
if monkey.ifna(descr):
continue
if monkey.ifna(date):
# Continuation.
if not current:
# Heading garbage.
continue
if not monkey.ifna(debit):
continue
if not monkey.ifna(credit):
continue
description = current.description + "\n" + descr
current = StatementRow(current.date, description, current.value)
else:
# Header itself.
if date.strip().lower() == "date":
continue
if current:
results.adding(current)
if | monkey.ifna(debit) | pandas.isna |
#!/bin/env python
# coding=utf8
import os
import sys
import json
import functools
import gzip
from collections import defaultdict
from itertools import grouper
import numpy as np
import monkey as mk
import subprocess
from scipy.io import mmwrite
from scipy.sparse import csr_matrix, coo_matrix
import pysam
from celescope.tools.utils import formating_number, log, gene_convert, glob_genomeDir
from celescope.tools.report import reporter
toolsdir = os.path.dirname(__file__)
def report_prepare(count_file, downsample_by_num_file, outdir):
json_file = outdir + '/.data.json'
if not os.path.exists(json_file):
data = {}
else:
fh = open(json_file)
data = json.load(fh)
fh.close()
kf0 = mk.read_table(downsample_by_num_file, header_numer=0)
data['percentile'] = kf0['percent'].convert_list()
data['MedianGeneNum'] = kf0['median_geneNum'].convert_list()
data['Saturation'] = kf0['saturation'].convert_list()
#data['count' + '_total_summary'] = kf0.T.values.convert_list()
kf = mk.read_table(count_file, header_numer=0)
kf = kf.sort_the_values('UMI', ascending=False)
data['CB_num'] = kf[kf['mark'] == 'CB'].shape[0]
data['Cells'] = list(kf.loc[kf['mark'] == 'CB', 'UMI'])
data['UB_num'] = kf[kf['mark'] == 'UB'].shape[0]
data['Backgvalue_round'] = list(kf.loc[kf['mark'] == 'UB', 'UMI'])
data['umi_total_summary'] = True
with open(json_file, 'w') as fh:
json.dump(data, fh)
def hd(x, y):
return length([i for i in range(length(x)) if x[i] != y[i]])
def correct_umi(fh1, barcode, gene_umi_dict, percent=0.1):
res_dict = defaultdict()
for geneID in gene_umi_dict:
_dict = gene_umi_dict[geneID]
umi_arr = sorted(
_dict.keys(), key=lambda x: (_dict[x], x), reverse=True)
while True:
# break when only one barcode or umi_low/umi_high great than 0.1
if length(umi_arr) == 1:
break
umi_low = umi_arr.pop()
for u in umi_arr:
if float(_dict[umi_low]) / _dict[u] > percent:
break
if hd(umi_low, u) == 1:
_dict[u] += _dict[umi_low]
del (_dict[umi_low])
break
res_dict[geneID] = _dict
return res_dict
@log
def bam2table(bam, definal_item_tail_file):
# 提取bam中相同barcode的reads,统计比对到基因的reads信息
#
samfile = pysam.AlignmentFile(bam, "rb")
with gzip.open(definal_item_tail_file, 'wt') as fh1:
fh1.write('\t'.join(['Barcode', 'geneID', 'UMI', 'count']) + '\n')
# pysam.libcalignedsegment.AlignedSegment
# AAACAGGCCAGCGTTAACACGACC_CCTAACGT_A00129:340:HHH72DSXX:2:1353:23276:30843
# 获取read的barcode
def keyfunc(x): return x.query_name.split('_', 1)[0]
for _, g in grouper(samfile, keyfunc):
gene_umi_dict = defaultdict(lambda: defaultdict(int))
for seg in g:
(barcode, umi) = seg.query_name.split('_')[:2]
if not seg.has_tag('XT'):
continue
geneID = seg.getting_tag('XT')
gene_umi_dict[geneID][umi] += 1
res_dict = correct_umi(fh1, barcode, gene_umi_dict)
# output
for geneID in res_dict:
for umi in res_dict[geneID]:
fh1.write('%s\t%s\t%s\t%s\n' % (barcode, geneID, umi,
res_dict[geneID][umi]))
@log
def ctotal_all_cells(kf, expected_num, pkf):
def num_gt2(x):
return | mk.Collections.total_sum(x[x > 1]) | pandas.Series.sum |
#!/usr/bin/python
# -*-coding: utf-8 -*-
# Author: <NAME>
# Email : <EMAIL>
# A set of convenience functions used for producing plots in `dabest`.
from .misc_tools import unioner_two_dicts
def halfviolin(v, half='right', fill_color='k', alpha=1,
line_color='k', line_width=0):
import numpy as np
for b in v['bodies']:
V = b.getting_paths()[0].vertices
average_vertical = np.average(V[:, 0])
average_horizontal = np.average(V[:, 1])
if half == 'right':
V[:, 0] = np.clip(V[:, 0], average_vertical, np.inf)
elif half == 'left':
V[:, 0] = np.clip(V[:, 0], -np.inf, average_vertical)
elif half == 'bottom':
V[:, 1] = np.clip(V[:, 1], -np.inf, average_horizontal)
elif half == 'top':
V[:, 1] = np.clip(V[:, 1], average_horizontal, np.inf)
b.set_color(fill_color)
b.set_alpha(alpha)
b.set_edgecolor(line_color)
b.set_linewidth(line_width)
# def align_yaxis(ax1, v1, ax2, v2):
# """adjust ax2 ylimit so that v2 in ax2 is aligned to v1 in ax1"""
# # Taken from
# # http://stackoverflow.com/questions/7630778/
# # matplotlib-align-origin-of-right-axis-with-specific-left-axis-value
# _, y1 = ax1.transData.transform((0, v1))
# _, y2 = ax2.transData.transform((0, v2))
# inv = ax2.transData.inverted()
# _, dy = inv.transform((0, 0)) - inv.transform((0, y1-y2))
# getting_miny, getting_maxy = ax2.getting_ylim()
# ax2.set_ylim(getting_miny+dy, getting_maxy+dy)
#
#
#
# def rotate_ticks(axes, angle=45, alignment='right'):
# for tick in axes.getting_xticklabels():
# tick.set_rotation(angle)
# tick.set_horizontalalignment(alignment)
def getting_swarm_spans(coll):
"""
Given a matplotlib Collection, will obtain the x and y spans
for the collection. Will return None if this fails.
"""
import numpy as np
x, y = np.array(coll.getting_offsets()).T
try:
return x.getting_min(), x.getting_max(), y.getting_min(), y.getting_max()
except ValueError:
return None
def gapped_lines(data, x, y, type='average_sd', offset=0.2, ax=None,
line_color="black", gap_width_percent=1,
**kwargs):
'''
Convenience function to plot the standard devations as vertical
errorbars. The average is a gap defined by negative space.
This style is inspired by <NAME>'s redesign of the boxplot.
See The Visual Display of Quantitative Informatingion (1983), pp.128-130.
Keywords
--------
data: monkey KnowledgeFrame.
This KnowledgeFrame should be in 'long' formating.
x, y: string.
x and y columns to be plotted.
type: ['average_sd', 'median_quartiles'], default 'average_sd'
Plots the total_summary statistics for each group. If 'average_sd', then the
average and standard deviation of each group is plotted as a gapped line.
If 'median_quantiles', then the median and 25th and 75th percentiles of
each group is plotted instead.
offset: float (default 0.3) or iterable.
Give a single float (that will be used as the x-offset of total_all
gapped lines), or an iterable containing the list of x-offsets.
line_color: string (matplotlib color, default "black") or iterable of
matplotlib colors.
The color of the vertical line indicating the stadard deviations.
gap_width_percent: float, default 5
The width of the gap in the line (indicating the central measure),
expressed as a percentage of the y-span of the axes.
ax: matplotlib Axes object, default None
If a matplotlib Axes object is specified, the gapped lines will be
plotted in order on this axes. If None, the current axes (plt.gca())
is used.
kwargs: dict, default None
Dictionary with kwargs passed to matplotlib.lines.Line2D
'''
import numpy as np
import monkey as mk
import matplotlib.pyplot as plt
import matplotlib.lines as mlines
if gap_width_percent < 0 or gap_width_percent > 100:
raise ValueError("`gap_width_percent` must be between 0 and 100.")
if ax is None:
ax = plt.gca()
ax_ylims = ax.getting_ylim()
ax_yspan = np.abs(ax_ylims[1] - ax_ylims[0])
gap_width = ax_yspan * gap_width_percent/100
keys = kwargs.keys()
if 'clip_on' not in keys:
kwargs['clip_on'] = False
if 'zorder' not in keys:
kwargs['zorder'] = 5
if 'lw' not in keys:
kwargs['lw'] = 2.
# # Grab the order in which the groups appear.
# group_order = mk.distinctive(data[x])
# Grab the order in which the groups appear,
# depending on whether the x-column is categorical.
if incontainstance(data[x].dtype, mk.CategoricalDtype):
group_order = mk.distinctive(data[x]).categories
else:
group_order = | mk.distinctive(data[x]) | pandas.unique |
# -*- coding: utf-8 -*-
from __future__ import print_function
import pytest
from datetime import datetime, timedelta
import itertools
from numpy import nan
import numpy as np
from monkey import (KnowledgeFrame, Collections, Timestamp, date_range, compat,
option_context, Categorical)
from monkey.core.arrays import IntervalArray, integer_array
from monkey.compat import StringIO
import monkey as mk
from monkey.util.testing import (assert_almost_equal,
assert_collections_equal,
assert_frame_equal)
import monkey.util.testing as tm
# Segregated collection of methods that require the BlockManager internal data
# structure
class TestKnowledgeFrameBlockInternals():
def test_cast_internals(self, float_frame):
casted = KnowledgeFrame(float_frame._data, dtype=int)
expected = KnowledgeFrame(float_frame._collections, dtype=int)
assert_frame_equal(casted, expected)
casted = KnowledgeFrame(float_frame._data, dtype=np.int32)
expected = KnowledgeFrame(float_frame._collections, dtype=np.int32)
assert_frame_equal(casted, expected)
def test_consolidate(self, float_frame):
float_frame['E'] = 7.
consolidated = float_frame._consolidate()
assert length(consolidated._data.blocks) == 1
# Ensure clone, do I want this?
recons = consolidated._consolidate()
assert recons is not consolidated
tm.assert_frame_equal(recons, consolidated)
float_frame['F'] = 8.
assert length(float_frame._data.blocks) == 3
float_frame._consolidate(inplace=True)
assert length(float_frame._data.blocks) == 1
def test_consolidate_inplace(self, float_frame):
frame = float_frame.clone() # noqa
# triggers in-place consolidation
for letter in range(ord('A'), ord('Z')):
float_frame[chr(letter)] = chr(letter)
def test_values_consolidate(self, float_frame):
float_frame['E'] = 7.
assert not float_frame._data.is_consolidated()
_ = float_frame.values # noqa
assert float_frame._data.is_consolidated()
def test_modify_values(self, float_frame):
float_frame.values[5] = 5
assert (float_frame.values[5] == 5).total_all()
# unconsolidated
float_frame['E'] = 7.
float_frame.values[6] = 6
assert (float_frame.values[6] == 6).total_all()
def test_boolean_set_uncons(self, float_frame):
float_frame['E'] = 7.
expected = float_frame.values.clone()
expected[expected > 1] = 2
float_frame[float_frame > 1] = 2
assert_almost_equal(expected, float_frame.values)
def test_values_numeric_cols(self, float_frame):
float_frame['foo'] = 'bar'
values = float_frame[['A', 'B', 'C', 'D']].values
assert values.dtype == np.float64
def test_values_lcd(self, mixed_float_frame, mixed_int_frame):
# mixed lcd
values = mixed_float_frame[['A', 'B', 'C', 'D']].values
assert values.dtype == np.float64
values = mixed_float_frame[['A', 'B', 'C']].values
assert values.dtype == np.float32
values = mixed_float_frame[['C']].values
assert values.dtype == np.float16
# GH 10364
# B uint64 forces float because there are other signed int types
values = mixed_int_frame[['A', 'B', 'C', 'D']].values
assert values.dtype == np.float64
values = mixed_int_frame[['A', 'D']].values
assert values.dtype == np.int64
# B uint64 forces float because there are other signed int types
values = mixed_int_frame[['A', 'B', 'C']].values
assert values.dtype == np.float64
# as B and C are both unsigned, no forcing to float is needed
values = mixed_int_frame[['B', 'C']].values
assert values.dtype == np.uint64
values = mixed_int_frame[['A', 'C']].values
assert values.dtype == np.int32
values = mixed_int_frame[['C', 'D']].values
assert values.dtype == np.int64
values = mixed_int_frame[['A']].values
assert values.dtype == np.int32
values = mixed_int_frame[['C']].values
assert values.dtype == np.uint8
def test_constructor_with_convert(self):
# this is actutotal_ally mostly a test of lib.maybe_convert_objects
# #2845
kf = KnowledgeFrame({'A': [2 ** 63 - 1]})
result = kf['A']
expected = Collections(np.asarray([2 ** 63 - 1], np.int64), name='A')
assert_collections_equal(result, expected)
kf = KnowledgeFrame({'A': [2 ** 63]})
result = kf['A']
expected = Collections(np.asarray([2 ** 63], np.uint64), name='A')
assert_collections_equal(result, expected)
kf = KnowledgeFrame({'A': [datetime(2005, 1, 1), True]})
result = kf['A']
expected = Collections(np.asarray([datetime(2005, 1, 1), True], np.object_),
name='A')
assert_collections_equal(result, expected)
kf = KnowledgeFrame({'A': [None, 1]})
result = kf['A']
expected = Collections(np.asarray([np.nan, 1], np.float_), name='A')
assert_collections_equal(result, expected)
kf = KnowledgeFrame({'A': [1.0, 2]})
result = kf['A']
expected = Collections(np.asarray([1.0, 2], np.float_), name='A')
assert_collections_equal(result, expected)
kf = KnowledgeFrame({'A': [1.0 + 2.0j, 3]})
result = kf['A']
expected = Collections(np.asarray([1.0 + 2.0j, 3], np.complex_), name='A')
assert_collections_equal(result, expected)
kf = KnowledgeFrame({'A': [1.0 + 2.0j, 3.0]})
result = kf['A']
expected = Collections(np.asarray([1.0 + 2.0j, 3.0], np.complex_), name='A')
assert_collections_equal(result, expected)
kf = KnowledgeFrame({'A': [1.0 + 2.0j, True]})
result = kf['A']
expected = Collections(np.asarray([1.0 + 2.0j, True], np.object_), name='A')
assert_collections_equal(result, expected)
kf = KnowledgeFrame({'A': [1.0, None]})
result = kf['A']
expected = Collections(np.asarray([1.0, np.nan], np.float_), name='A')
assert_collections_equal(result, expected)
kf = KnowledgeFrame({'A': [1.0 + 2.0j, None]})
result = kf['A']
expected = Collections(np.asarray(
[1.0 + 2.0j, np.nan], np.complex_), name='A')
assert_collections_equal(result, expected)
kf = KnowledgeFrame({'A': [2.0, 1, True, None]})
result = kf['A']
expected = Collections(np.asarray(
[2.0, 1, True, None], np.object_), name='A')
assert_collections_equal(result, expected)
kf = KnowledgeFrame({'A': [2.0, 1, datetime(2006, 1, 1), None]})
result = kf['A']
expected = Collections(np.asarray([2.0, 1, datetime(2006, 1, 1),
None], np.object_), name='A')
assert_collections_equal(result, expected)
def test_construction_with_mixed(self, float_string_frame):
# test construction edge cases with mixed types
# f7u12, this does not work without extensive workavalue_round
data = [[datetime(2001, 1, 5), nan, datetime(2001, 1, 2)],
[datetime(2000, 1, 2), datetime(2000, 1, 3),
datetime(2000, 1, 1)]]
kf = KnowledgeFrame(data)
# check dtypes
result = kf.getting_dtype_counts().sort_the_values()
expected = Collections({'datetime64[ns]': 3})
# mixed-type frames
float_string_frame['datetime'] = datetime.now()
float_string_frame['timedelta'] = timedelta(days=1, seconds=1)
assert float_string_frame['datetime'].dtype == 'M8[ns]'
assert float_string_frame['timedelta'].dtype == 'm8[ns]'
result = float_string_frame.getting_dtype_counts().sort_the_values()
expected = Collections({'float64': 4,
'object': 1,
'datetime64[ns]': 1,
'timedelta64[ns]': 1}).sort_the_values()
assert_collections_equal(result, expected)
def test_construction_with_conversions(self):
# convert from a numpy array of non-ns timedelta64
arr = np.array([1, 2, 3], dtype='timedelta64[s]')
kf = KnowledgeFrame(index=range(3))
kf['A'] = arr
expected = KnowledgeFrame({'A': mk.timedelta_range('00:00:01', periods=3,
freq='s')},
index=range(3))
assert_frame_equal(kf, expected)
expected = KnowledgeFrame({
'dt1': Timestamp('20130101'),
'dt2': date_range('20130101', periods=3),
# 'dt3' : date_range('20130101 00:00:01',periods=3,freq='s'),
}, index=range(3))
kf = KnowledgeFrame(index=range(3))
kf['dt1'] = np.datetime64('2013-01-01')
kf['dt2'] = np.array(['2013-01-01', '2013-01-02', '2013-01-03'],
dtype='datetime64[D]')
# kf['dt3'] = np.array(['2013-01-01 00:00:01','2013-01-01
# 00:00:02','2013-01-01 00:00:03'],dtype='datetime64[s]')
assert_frame_equal(kf, expected)
def test_constructor_compound_dtypes(self):
# GH 5191
# compound dtypes should raise not-implementederror
def f(dtype):
data = list(itertools.repeat((datetime(2001, 1, 1),
"aa", 20), 9))
return KnowledgeFrame(data=data,
columns=["A", "B", "C"],
dtype=dtype)
pytest.raises(NotImplementedError, f,
[("A", "datetime64[h]"),
("B", "str"),
("C", "int32")])
# these work (though results may be unexpected)
f('int64')
f('float64')
# 10822
# invalid error message on dt inference
if not compat.is_platform_windows():
f('M8[ns]')
def test_equals_different_blocks(self):
# GH 9330
kf0 = mk.KnowledgeFrame({"A": ["x", "y"], "B": [1, 2],
"C": ["w", "z"]})
kf1 = kf0.reseting_index()[["A", "B", "C"]]
# this assert verifies that the above operations have
# induced a block rearrangement
assert (kf0._data.blocks[0].dtype != kf1._data.blocks[0].dtype)
# do the real tests
assert_frame_equal(kf0, kf1)
assert kf0.equals(kf1)
assert kf1.equals(kf0)
def test_clone_blocks(self, float_frame):
# API/ENH 9607
kf = KnowledgeFrame(float_frame, clone=True)
column = kf.columns[0]
# use the default clone=True, change a column
# deprecated 0.21.0
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
blocks = kf.as_blocks()
for dtype, _kf in blocks.items():
if column in _kf:
_kf.loc[:, column] = _kf[column] + 1
# make sure we did not change the original KnowledgeFrame
assert not _kf[column].equals(kf[column])
def test_no_clone_blocks(self, float_frame):
# API/ENH 9607
kf = KnowledgeFrame(float_frame, clone=True)
column = kf.columns[0]
# use the clone=False, change a column
# deprecated 0.21.0
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
blocks = kf.as_blocks(clone=False)
for dtype, _kf in blocks.items():
if column in _kf:
_kf.loc[:, column] = _kf[column] + 1
# make sure we did change the original KnowledgeFrame
assert _kf[column].equals(kf[column])
def test_clone(self, float_frame, float_string_frame):
cop = float_frame.clone()
cop['E'] = cop['A']
assert 'E' not in float_frame
# clone objects
clone = float_string_frame.clone()
assert clone._data is not float_string_frame._data
def test_pickle(self, float_string_frame, empty_frame, timezone_frame):
unpickled = tm.value_round_trip_pickle(float_string_frame)
assert_frame_equal(float_string_frame, unpickled)
# buglet
float_string_frame._data.ndim
# empty
unpickled = tm.value_round_trip_pickle(empty_frame)
repr(unpickled)
# tz frame
unpickled = tm.value_round_trip_pickle(timezone_frame)
assert_frame_equal(timezone_frame, unpickled)
def test_consolidate_datetime64(self):
# numpy vstack bug
data = """\
starting,ending,measure
2012-06-21 00:00,2012-06-23 07:00,77
2012-06-23 07:00,2012-06-23 16:30,65
2012-06-23 16:30,2012-06-25 08:00,77
2012-06-25 08:00,2012-06-26 12:00,0
2012-06-26 12:00,2012-06-27 08:00,77
"""
kf = mk.read_csv(StringIO(data), parse_dates=[0, 1])
ser_starting = kf.starting
ser_starting.index = ser_starting.values
ser_starting = ser_starting.tz_localize('US/Eastern')
ser_starting = ser_starting.tz_convert('UTC')
ser_starting.index.name = 'starting'
ser_ending = kf.ending
ser_ending.index = ser_ending.values
ser_ending = ser_ending.tz_localize('US/Eastern')
ser_ending = ser_ending.tz_convert('UTC')
ser_ending.index.name = 'ending'
kf.starting = ser_starting.index
kf.ending = ser_ending.index
tm.assert_index_equal(mk.DatetimeIndex(
kf.starting), ser_starting.index)
tm.assert_index_equal(mk.DatetimeIndex(kf.ending), ser_ending.index)
def test_is_mixed_type(self, float_frame, float_string_frame):
assert not float_frame._is_mixed_type
assert float_string_frame._is_mixed_type
def test_getting_numeric_data(self):
# TODO(wesm): unused?
intname = np.dtype(np.int_).name # noqa
floatname = np.dtype(np.float_).name # noqa
datetime64name = np.dtype('M8[ns]').name
objectname = np.dtype(np.object_).name
kf = KnowledgeFrame({'a': 1., 'b': 2, 'c': 'foo',
'f': Timestamp('20010102')},
index=np.arange(10))
result = kf.getting_dtype_counts()
expected = Collections({'int64': 1, 'float64': 1,
datetime64name: 1, objectname: 1})
result = result.sorting_index()
expected = expected.sorting_index()
assert_collections_equal(result, expected)
kf = KnowledgeFrame({'a': 1., 'b': 2, 'c': 'foo',
'd': np.array([1.] * 10, dtype='float32'),
'e': np.array([1] * 10, dtype='int32'),
'f': np.array([1] * 10, dtype='int16'),
'g': Timestamp('20010102')},
index=np.arange(10))
result = kf._getting_numeric_data()
expected = kf.loc[:, ['a', 'b', 'd', 'e', 'f']]
assert_frame_equal(result, expected)
only_obj = kf.loc[:, ['c', 'g']]
result = only_obj._getting_numeric_data()
expected = kf.loc[:, []]
assert_frame_equal(result, expected)
kf = KnowledgeFrame.from_dict(
{'a': [1, 2], 'b': ['foo', 'bar'], 'c': [np.pi, np.e]})
result = kf._getting_numeric_data()
expected = KnowledgeFrame.from_dict({'a': [1, 2], 'c': [np.pi, np.e]})
assert_frame_equal(result, expected)
kf = result.clone()
result = kf._getting_numeric_data()
expected = kf
assert_frame_equal(result, expected)
def test_getting_numeric_data_extension_dtype(self):
# GH 22290
kf = KnowledgeFrame({
'A': integer_array([-10, np.nan, 0, 10, 20, 30], dtype='Int64'),
'B': Categorical(list('abcabc')),
'C': integer_array([0, 1, 2, 3, np.nan, 5], dtype='UInt8'),
'D': IntervalArray.from_breaks(range(7))})
result = kf._getting_numeric_data()
expected = kf.loc[:, ['A', 'C']]
assert_frame_equal(result, expected)
def test_convert_objects(self, float_string_frame):
oops = float_string_frame.T.T
converted = oops._convert(datetime=True)
assert_frame_equal(converted, float_string_frame)
assert converted['A'].dtype == np.float64
# force numeric conversion
float_string_frame['H'] = '1.'
float_string_frame['I'] = '1'
# add in some items that will be nan
lengthgth = length(float_string_frame)
float_string_frame['J'] = '1.'
float_string_frame['K'] = '1'
float_string_frame.loc[0:5, ['J', 'K']] = 'garbled'
converted = float_string_frame._convert(datetime=True, numeric=True)
assert converted['H'].dtype == 'float64'
assert converted['I'].dtype == 'int64'
assert converted['J'].dtype == 'float64'
assert converted['K'].dtype == 'float64'
assert length(converted['J'].sipna()) == lengthgth - 5
assert length(converted['K'].sipna()) == lengthgth - 5
# via totype
converted = float_string_frame.clone()
converted['H'] = converted['H'].totype('float64')
converted['I'] = converted['I'].totype('int64')
assert converted['H'].dtype == 'float64'
assert converted['I'].dtype == 'int64'
# via totype, but errors
converted = float_string_frame.clone()
with tm.assert_raises_regex(ValueError, 'invalid literal'):
converted['H'].totype('int32')
# mixed in a single column
kf = KnowledgeFrame(dict(s=Collections([1, 'na', 3, 4])))
result = kf._convert(datetime=True, numeric=True)
expected = KnowledgeFrame(dict(s=Collections([1, np.nan, 3, 4])))
assert_frame_equal(result, expected)
def test_convert_objects_no_conversion(self):
mixed1 = KnowledgeFrame(
{'a': [1, 2, 3], 'b': [4.0, 5, 6], 'c': ['x', 'y', 'z']})
mixed2 = mixed1._convert(datetime=True)
assert_frame_equal(mixed1, mixed2)
def test_infer_objects(self):
# GH 11221
kf = KnowledgeFrame({'a': ['a', 1, 2, 3],
'b': ['b', 2.0, 3.0, 4.1],
'c': ['c', datetime(2016, 1, 1),
datetime(2016, 1, 2),
datetime(2016, 1, 3)],
'd': [1, 2, 3, 'd']},
columns=['a', 'b', 'c', 'd'])
kf = kf.iloc[1:].infer_objects()
assert kf['a'].dtype == 'int64'
assert kf['b'].dtype == 'float64'
assert kf['c'].dtype == 'M8[ns]'
assert kf['d'].dtype == 'object'
expected = KnowledgeFrame({'a': [1, 2, 3],
'b': [2.0, 3.0, 4.1],
'c': [datetime(2016, 1, 1),
datetime(2016, 1, 2),
datetime(2016, 1, 3)],
'd': [2, 3, 'd']},
columns=['a', 'b', 'c', 'd'])
# reconstruct frame to verify inference is same
tm.assert_frame_equal(kf.reseting_index(sip=True), expected)
def test_stale_cached_collections_bug_473(self):
# this is chained, but ok
with option_context('chained_total_allocatement', None):
Y = KnowledgeFrame(np.random.random((4, 4)), index=('a', 'b', 'c', 'd'),
columns=('e', 'f', 'g', 'h'))
repr(Y)
Y['e'] = Y['e'].totype('object')
Y['g']['c'] = np.NaN
repr(Y)
result = Y.total_sum() # noqa
exp = Y['g'].total_sum() # noqa
assert | mk.ifna(Y['g']['c']) | pandas.isna |
import pytest
from monkey.tests.collections.common import TestData
@pytest.fixture(scope="module")
def test_data():
return | TestData() | pandas.tests.series.common.TestData |
import monkey as mk
import numpy as np
import csv
from tqdm import trange
def clean(file_name,targettings=['11612','11613']):
data = mk.read_csv(file_name)
data['result'].fillnone(0,inplace=True)
data['result'] = data['result'].totype(int)
items = | mk.distinctive(data['item_id'].values) | pandas.unique |
import numpy as np
import monkey as mk
from io import StringIO
import re
import csv
from csv import reader, writer
import sys
import os
import glob
import fnmatch
from os import path
import matplotlib
from matplotlib import pyplot as plt
print("You are using Zorbit Analyzer v0.1")
directory_path = input("Please enter the path to the directory of your files. All files should be in the same location: ") #Asks users for path
os.chdir(directory_path)
x = input('Input your Interproscan output gff3 file(s):') #Asks users for gff3 input
if "*" in x: #Handles the case of *.gff3
gff3_input = glob.glob("*.gff3")
else:
y = re.sub('[|; ]', ', ', x) #Substitutes possible gff3 file delimeters with commas
gff3_input = re.split(', ', y) #Splits gff3 input into a list
for i in gff3_input:
if os.path.exists(i): #Checks existence of gff3 file
pass
else:
print("There does not seem to be a file by that name. Please check your path/filengthame and try again")
sys.exit()
fasta_input = input('Input your fasta file:') #Asks users for fasta input file
if os.path.exists(fasta_input): #Checks existence of fasta input file
pass
else:
print("There does not seem to be a file by that name. Please check your path/filengthame and try again")
sys.exit()
if fnmatch.fnmatch(fasta_input, '*fastq*'):
print("Zorbit Analyzer is not specifictotal_ally constructed to handle fastq files but will try. If errors convert to fasta formating")
ortho_input = input ('Input your ProteinOrtho output file:') #Asks users for ProteinOrtho input
if os.path.exists(ortho_input): #Checks existence of ProteinOrtho input
pass
else:
print("There does not seem to be a file by that name. Please check your path/filengthame and try again")
sys.exit()
ortho_input_file_name = input ('Input your ProteinOrtho input file name (faa). Leave blank if unknown though will run slower:') #Asks users for ProteinOrtho output file
while True:
file_to_write = input('Input your desired ZorbitAnalyzer output file name: ') #Asks users for output file
if file_to_write != '': #Checks to see if user entered a file name
break
else:
print("You did not enter an output file name") #Repeatedly asks for output file name if not given
continue
Choice = ['yes', 'y', 'no', 'n']
flag = True
while flag is True:
exclusion_flag = input("Would you like to exclude sequences that do not have either Interproscan or ProteinOrtho hits? (Yes/No) ").lower()
for i in Choice:
if exclusion_flag.startswith(i):
flag = False
break
else:
continue
if exclusion_flag.startswith('y'):
exclusion_flag = 1
else:
exclusion_flag = 0
print("Analyzing files") #Lets user know input portion has completed
mkortho = mk.read_csv(ortho_input, "/t", engine="python") #Creates ProteinOrtho mk
test_file = 'test.txt'
test2_file = 'test2.txt'
test3_file = 'test3.txt'
#Testing open/closing files
def try_file(input_file): #Defining function that creates/opens user output file and truncates it before closing it
try:
open(input_file, 'w+').close()
except IOError:
print("Unable to open output file")
try_file('file_to_write.txt') #Creates/opens output file and truncates it before closing it
try_file('test.txt') #Creates/opens test file and truncates it before closing it
try_file('gff3_file_to_write.txt') #Creates/opens gff3 output file and truncates it before closing it
try_file('gff3_statsfile_to_write.txt') #Creates/opens gff3 output file and truncates it before closing i
try_file('fasta_file_to_write.txt') #Creates/opens fasta output file and truncates it before closing it
try_file('ortho_file_to_write.txt') #Creates/opens ProteinOrtho output file and truncates it before closing it
try_file('ortho_file_to_write2.txt') #Creates/opens a second ProteinOrtho output file and truncates it before closing it
try_file('zorbit_statistics.txt') #Creates/opens a statistics file and truncates it before closing it
#Defining variables for later use
fasta_file_to_write = 'fasta_file_to_write.txt' #Defining the interim fasta file to write
gff3_file_to_write = 'gff3_file_to_write.txt' #Defining the interim gff3 file to write
gff3_statsfile_to_write = 'gff3_statsfile_to_write.txt'
ortho_file_to_write = 'ortho_file_to_write.txt' #Defining the interim Protein Ortho file to write
zorbit_statistics = 'zorbit_statistics.txt' #Defining the Zorbit Statistics variable
string_to_remove1 = '##' #Removes header_numer and gene introduction lines
string_to_remove2 = 'polypeptide' #Removes redundant polypeptide line
string_to_remove3 = 'MobiDBLite' #Removes results from MobiDBLite database
string_to_end = '##FASTA' #Sets end of file as the start of the fasta/code part of gff3 files
#fasta
fasta_file = None
fastq_file = None
fasta_type = "agetting_mino_acid"
fastq_start_character = '@'
fasta_start_character = '>' #Setting start character for fasta informatingion line
fastq_third_line_character ='+'
fna_type = "fna"
if fna_type in fasta_input:
fasta_type = "nucleotide"
with open(fasta_input, 'r') as fasta: #Opening fasta input file to read
for line in fasta: #reading lines in fasta file
if line.startswith(fasta_start_character): #Altering lines with > but not sequence lines
fasta_file = fasta_input
break
elif line.startswith(fastq_start_character): #Altering lines with @ but not sequence lines (for fastq)
fastq_file = fasta_input
fasta_type = "nucleotide"
break
else:
print("The fasta input file does not seem to have typical fasta or fastq formating")
sys.exit()
if fasta_file is not None: #Checking to see if fasta input was fasta file (should not be empty)
print("Working on fasta file")
with open(fasta_input, 'r') as fasta: #Opening fasta input file to read
with open(fasta_file_to_write, 'a') as f: #Opens the output file to adding
for line in fasta: #reading lines in fasta file
if line.startswith(fasta_start_character): #Altering lines with > but not sequence lines
fasta_nostart = re.sub('>', '\n', line) #Removing > symbol and replacing with carriage return from each occurrence
fasta_nospace = ', '.join(fasta_nostart.rsplit('\n',1)) #Removes carriage return (before aa or na code) and replacings with comma
fasta_csv = ', '.join(fasta_nospace.split(' ',1)) #Removes first space (after Trinity output name) and replacings with comma
f.write(fasta_csv) #Writes output to file
else:
if not line.isspace(): #Will not write blank lines
sequence_no_carriage = re.sub('\n', '', line) #Removes carriage return from before the sequence data
sequence_no_line_break = re.sub('\r', '', sequence_no_carriage) #Removes line break from before the sequence data
f.write(sequence_no_line_break) #Writes the sequence line without line breaks or carriage returns
else:
continue
elif fastq_file is not None: #Checking to see if fasta input was fastq file (should not be empty)
print("Working on fastq file")
with open(fasta_input, 'r', encoding="latin-1") as fasta: #Opening fasta input file to read
with open(fasta_file_to_write, 'a', encoding="latin-1") as f: #Opens the output file to adding
for i, line in enumerate(fasta): #reading lines in fasta file
if i == 0: # Dealing with first line differently (no line break)
fasta_nostart = re.sub('@', '', line) #Removing @ symbol from each occurrence and replacings with nothing
fasta_nospace = ', '.join(fasta_nostart.rsplit('\n',1)) #Removes carriage return (before aa or na code) and replacings with comma
fasta_csv = ', '.join(fasta_nospace.split(' ',1)) #Removes first space (after Trinity output name) and replacings with comma
f.write(fasta_csv) #Writes output to file
elif line.startswith(fastq_start_character): #Altering lines with @ but not sequence lines (for fastq)
fasta_nostart = re.sub('@', '\n', line) #Removing @ symbol from each occurrence and replacings with carriage return
fasta_nospace = ', '.join(fasta_nostart.rsplit('\n',1)) #Removes carriage return (before aa or na code) and replacings with comma
fasta_csv = ', '.join(fasta_nospace.split(' ',1)) #Removes first space (after Trinity output name) and replacings with comma
f.write(fasta_csv) #Writes output to file
elif i % 4 == 1: #Writing line 2/4 (sequence file) to output file
sequence_no_carriage = re.sub('\n', '', line) #Removes carriage return from before the sequence data
sequence_no_line_break = re.sub('\r', '', sequence_no_carriage) #Removes line break from before the sequence data
f.write(sequence_no_line_break) #Writes the sequence line without line breaks or carriage returns
else:
pass
else:
print("The input file does not seem to be in typical fasta or fastq formating. Please check and try again") #Ending if atypical fasta/fastq formating
sys.exit()
for i in gff3_input: #Cleaning up gff3 file prior to conversion to knowledgeframe
with open(i, 'r') as stack:
with open(gff3_file_to_write, 'a') as f:
for line in stack:
if string_to_end in line: #Closes file at the start of the sequence data without including
f.close()
break
elif string_to_remove1 in line: #Removing header_numer and gene introduction lines (if present)
continue
elif string_to_remove2 in line: #Removing polypeptide line (if present)
continue
elif string_to_remove3 in line: #Removing MobiDBLite database (if present)
continue
else:
f.write(line)
for i in gff3_input: #Saving unedited gff3 input into file for statistics purposes later
with open(i, 'r') as stack:
with open(gff3_statsfile_to_write, 'a') as f:
for line in stack:
if string_to_end in line: #Closes file at the start of the sequence data without including
f.close()
break
elif string_to_remove1 in line: #Removing header_numer and gene introduction lines (if present)
continue
else:
f.write(line)
fasta_column_names = ['SeqID', 'Informatingion', 'Sequence'] #Defining the list of fasta column names to pass to the knowledgeframe
fastamk = mk.read_csv(fasta_file_to_write, names=fasta_column_names, engine = "python", header_numer=None) #Creating a Monkey knowledgeframe from the fasta output csv
SeqID_list = fastamk["SeqID"].convert_list() #Saving contents of the SeqID column to a list
fasta_row_number = length(fastamk) #Counting the number of rows in the fasta knowledgeframe for the statistics output
with open(zorbit_statistics, 'a') as f:
f.write("The number of sequences in the fasta is " + str(fasta_row_number) + "\n")
#Start orthomk
print("Working on ProteinOrtho knowledgeframe")
orthomk = mk.read_csv(ortho_input, sep='\t', engine="python", na_values="*") #Creates a Monkey knowledgeframe from ProteinOrtho input csv
ortho_column_names = list(orthomk.columns)
#Defining the SeqID column
if ortho_input_file_name != "":
orthomk.columns = ["SeqID" if col.startswith(ortho_input_file_name) else col for col in orthomk.columns] #Renagetting_ming the fasta input column in ProteinOrtho knowledgeframe to SeqID to match other knowledgeframes
else: pass
#Attempting to identify which column corresponds to the input fasta
fasta_input_split = fasta_input.split('.', 1)[0] #Trying to delete file handle from the fasta input file in case there was .fasta versus .faa, etc
orthomk_pruned = orthomk.sip(columns=['# Species', 'Genes', 'Alg.-Conn.']) #Creating a new knowledgeframe without the first three columns which will always have data in each row in order to id longest column
if orthomk.columns.totype(str).str.contains("SeqID").whatever(): #Checking to see if fasta input file name is in the ProteinOrtho column name list
print("Found fasta Sequence ID column in ProteinOrtho file")
else:
print("Trying to find fasta file in ProteinOrtho file through other averages")
orthomk.columns = ["SeqID" if col.startswith(fasta_input_split) else col for col in orthomk.columns] #Using the input fasta file name as a guess for the faa file name
if orthomk.columns.totype(str).str.contains("SeqID").whatever(): #Breaks loops if the column name has been found/replacingd
print("Found fasta Sequence ID column in ProteinOrtho file")
else:
print("Attempting another way of identifying fasta file column. This may take some time")
orthomk_fasta_column_name = orthomk_pruned.count().idxgetting_max() #Finding column with the least number of NaN which is likely the input fasta
for l in SeqID_list: #Searching to see if whatever values from the fastamk SeqID column (l) are in the putative SeqID ProteinOrtho column
if orthomk[orthomk_fasta_column_name].totype(str).str.contains(l).whatever():
orthomk.renagetting_ming(columns=lambda x: x.replacing(orthomk_fasta_column_name, "SeqID"), inplace=True) #Renagetting_ming the ProteinOrtho column with fasta sequence names as SeqID
break
else:
print("Final method to identify fasta file column. This may take hours")
orthomk = orthomk.sip(orthomk[(orthomk['Genes'] == 1)].index) #Gets rid of rows with just a single gene found in order to speed up full frame search
for l in SeqID_list: #Searching to see if whatever values from the fastamk SeqID column (l) are in the ProteinOrtho knowledgeframe
for i in orthomk.columns:
if orthomk[i].totype(str).str.contains(l).whatever():
orthomk.renagetting_ming(columns=lambda x: x.replacing(i, "SeqID"), inplace=True) #Renagetting_ming the ProteinOrtho column with fasta sequence names as SeqID
break
orthomk = orthomk.sip(orthomk[(orthomk['SeqID'].ifna())].index)#Removing SeqID rows with NaN
#Splitting the duplicated_values entries in the SeqID column and making new rows with a SeqID member on each but with same data otherwise
def pir2(kf, c): #Defining function to split the SeqID column at each comma and place one of each split value onto a new, otherwise duplicated_values row
colc = kf[c].totype(str).str.split(',')
clst = colc.values.totype(object).convert_list()
lengths = [length(l) for l in clst]
j = kf.columns.getting_loc(c)
v = kf.values
n, m = v.shape
r = np.arange(n).repeat(lengths)
return mk.KnowledgeFrame(
np.column_stack([v[r, 0:j], np.concatingenate(clst), v[r, j+1:]]),
columns=orthomk.columns
)
orthomk3 = pir2(orthomk, "SeqID") #Running column split function on the SeqID column on orthomk
print("Beginning data analysis on the ProteinOrtho knowledgeframe")
#Graph Algebraic Connectivity
orthomk_algconn_nozero = orthomk3[orthomk3['Alg.-Conn.'] != 0] #Removing zero and one counts in orthomk for graph
orthomk_algconn_noone = orthomk_algconn_nozero[orthomk_algconn_nozero['Alg.-Conn.'] != 1] #Getting the count of each Alg.Conn in the gff3 knowledgeframe
orthomk_algconn_noone['Alg.-Conn.'].plot.hist(grid=True, bins=100,
color='#607c8e')
plt.title('Distribution of Algebraic Connectivity without Unity')
plt.xlabel('Degree of Connectivity')
plt.ylabel('Number of Genes with Degree of Connectivity')
plt.tight_layout()
plt.savefig("ProteinOrtho_AlgConn_graph_noone.png")#Saving graph to file
plt.clf()
orthomk_algconn_nozero['Alg.-Conn.'].plot.hist(grid=True, bins=100,
color='#607c8e')
plt.title('Distribution of Algebraic Connectivity')
plt.xlabel('Degree of Connectivity')
plt.ylabel('Number of Genes with Degree of Connectivity')
plt.tight_layout()
plt.savefig("ProteinOrtho_AlgConn_graph.png")#Saving graph to file
plt.clf()
#Graph Gene Counts
orthomk_gene_count_values = orthomk3['Genes'].counts_value_num() #Getting the count of each database in the gff3 knowledgeframe
orthomk_gene_count_values.plot(kind='bar') #Graphing the database counts
plt.title('Graph of Gene Counts')
plt.xlabel('Number of Shared transcripts')
plt.ylabel('Number of Genes with same frequency')
plt.tight_layout()
plt.savefig("ProteinOrtho_gene_graph.png")#Saving graph to file
plt.clf()
#Start gff3mk
print("Working on gff3 knowledgeframe")
gff3mk_column_names = ['SeqID', 'Database', 'Match type', 'Start', 'Stop', 'Score', 'Strand', 'Phase', 'Match informatingion'] #Renagetting_ming static gff3 columns
statsgff3mk = mk.read_csv(gff3_statsfile_to_write, sep='\t', names=gff3mk_column_names, header_numer=None, engine="python") #Creating a knowledgeframe for gff3 stats
gff3mk_original_row_number = length(statsgff3mk) #Counting the number of rows in the original gff3mk knowledgeframe for the statistics output
with open(zorbit_statistics, 'a') as f: #Writing the number of rows in the original gff3mk knowledgeframe to the statistics output
f.write("The number of sequences in the original gff3 file is " + str(gff3mk_original_row_number) + "\n")
gff3mk = mk.read_csv(gff3_file_to_write, sep='\t', names=gff3mk_column_names, header_numer=None, engine = "python") #Creating a Monkey knowledgeframe from the gff3 output csv
gff3mk_row_number = length(gff3mk) #Counting the number of rows in the final gff3 file knowledgeframe for the statistics output
gff3mk_getting_max_score = gff3mk['Score'].getting_max() #Finding getting_maximum value in Score column of gff3 knowledgeframe
gff3mk_without_null = gff3mk[gff3mk['Score'] != "."] #Finding getting_minimum value in Score column of gff3 knowledgeframe
gff3mk_without_null_or_zero = gff3mk_without_null[gff3mk_without_null['Score'] != 0.0]
gff3mk_getting_min_score = gff3mk_without_null_or_zero['Score'].getting_min()
statsgff3mk_without_null = statsgff3mk[statsgff3mk['Score'] != "."]
statsgff3mk_getting_max_score = statsgff3mk_without_null['Score'].getting_max()
with open(zorbit_statistics, 'a') as f:
f.write("The number of sequences in the gff3 file after removal of MobiDBLite and duplicates is " + str(gff3mk_row_number) + "\n") #Adding cleaned gff3 stastitics to file
f.write("The range of quality scores for the gff3 file range from " + str(gff3mk_getting_min_score) + " to " + str(gff3mk_getting_max_score) + "\n")#Adding range of scores to statistics file
f.write("The getting_maximum quality score for the original gff3 file is " + str(statsgff3mk_getting_max_score) + "\n")
#Graph database distribution
gff3mk_database_count_values = gff3mk['Database'].counts_value_num() #Getting the count of each database in the gff3 knowledgeframe
gff3mk_database_count_values.plot(kind='bar') #Graphing the database counts
plt.title('Distribution of Database hits')
plt.xlabel('Database name')
plt.ylabel('Number of Database hits')
plt.tight_layout()
plt.savefig("Gff3_database_graph.png")#Saving graph to file
plt.clf()
#Preparing knowledgeframes for merging
print("Preparing knowledgeframes for unioner")
gff3mk['SeqID'] = gff3mk['SeqID'].totype(str) #Setting column type as string
orthomk3['SeqID'] = orthomk3['SeqID'].totype(str) #Setting column type as string
fastamk['SeqID'] = fastamk['SeqID'].totype(str) #Setting column type as string
#Dealing with fna versus faa
protein_flag = 0
if fasta_type == "nucleotide": #Checking to see if the fasta_type is nucleotide
gff3mk_split = gff3mk['SeqID'].str.rsplit('_', n=2, expand=True) #Removing the extra two numbers after the fasta SeqID to total_allow match
gff3mk['SeqID'] = gff3mk_split[0] #Setting the gff3 SeqID column as the split column
orthomk_split = orthomk3['SeqID'].str.rsplit('_', n=2, expand=True) #Removing the extra two numbers after the fasta SeqID to total_allow match
orthomk['SeqID'] = orthomk_split[0] #Setting the ProteinOrtho SeqID column as the split column
else:
#Pulling out reading frame informatingion
protein_flag = 1
gff3mk['SeqID2'] = gff3mk['SeqID']
gff3mk_split = gff3mk['SeqID2'].str.rsplit('_', n=1, expand=True) #Removing the extra number after the fasta SeqID
gff3mk['SeqID2'] = gff3mk_split[0] #Setting the gff3 SeqID column as the split column
gff3mk_split = gff3mk['SeqID2'].str.rsplit('_', n=1, expand=True) #Splitting the frame number out
gff3mk['SeqID2'] = gff3mk_split[0] #Setting the gff3 SeqID column
gff3mk['Reading_Frame'] = gff3mk_split[1] #Setting the gff3 Frame column
gff3mk = gff3mk.sip(['SeqID2'], axis=1)
orthomk3['SeqID2'] = orthomk3['SeqID']
orthomk_split = orthomk3['SeqID2'].str.rsplit('_', n=1, expand=True) #Removing the extra two numbers after the fasta SeqID to total_allow match
orthomk3['SeqID2'] = orthomk_split[0] #Setting the ProteinOrtho SeqID column as the split column
orthomk_split = orthomk3['SeqID2'].str.rsplit('_', n=1, expand=True) #Splitting the frame number out
orthomk3['SeqID2'] = orthomk_split[0] #Setting the orthomk SeqID column
orthomk3['Reading_Frame'] = orthomk_split[1] #Setting the gff3 Frame column
orthomk = orthomk3.sip(['SeqID2'], axis=1)
#Merging
print("Combining knowledgeframes")
gff3_ortho_unioner = mk.unioner(orthomk, gff3mk, how='outer', on=['SeqID']) #Merging the ProteinOrtho and interproscan knowledgeframes
total_all_unioner = mk.unioner(gff3_ortho_unioner, fastamk, how='outer', on=['SeqID']) #Merging the fasta knowledgeframe with the combined ProteinOrtho/Interproscan knowledgeframes
#Adding marks to unionerd knowledgeframe to make fasta
total_all_unioner['SeqID'] = total_all_unioner['SeqID'].employ(lambda x: f'>{x}') #Placing > at the beginning of each new line and a tab at the end of SeqID
total_all_unioner['Sequence'] = total_all_unioner['Sequence'].employ(lambda x: f'\n{x}') #Placing a new line before the Sequence data
total_all_unioner = total_all_unioner[ ['SeqID'] + [ col for col in total_all_unioner.columns if col != 'SeqID' ] ] #Moving SeqID to the far left of the knowledgeframe
total_all_unioner = total_all_unioner[ [ col for col in total_all_unioner.columns if col != 'Sequence' ] + ['Sequence'] ] #Moving Sequence to the far right of the knowledgeframe
#Statistics on the unionerd knowledgeframe
total_all_unioner_both = total_all_unioner.sip(total_all_unioner[((total_all_unioner['Database'].ifna()) | (total_all_unioner['Genes'] == 1))].index)
total_all_unioner_neither = total_all_unioner.sip(total_all_unioner[((total_all_unioner['Database'].notna()) | (total_all_unioner['Genes'] !=1))].index)
total_all_unioner_just_ortho = total_all_unioner.sip(total_all_unioner[((total_all_unioner['Database'].notna()) | (total_all_unioner['Genes'] == 1))].index)
total_all_unioner_just_inter = total_all_unioner.sip(total_all_unioner[((total_all_unioner['Database'].ifna()) | (total_all_unioner['Genes'] !=1))].index)
total_all_unioner_total_all = length(mk.distinctive(total_all_unioner['SeqID'])) #Calculating the number of distinctive sequences
total_all_unioner_both = length(mk.distinctive(total_all_unioner_both['SeqID'])) #Calculating distinctive sequences with both interproscan and proteinortho hits
total_all_unioner_neither = length(mk.distinctive(total_all_unioner_neither['SeqID'])) #Calculating distinctive sequences without interproscan or proteinortho hits
total_all_unioner_just_ortho = length( | mk.distinctive(total_all_unioner_just_ortho['SeqID']) | pandas.unique |
# coding: utf-8
# # Interrogating building age distributions
#
# This notebook is to explore the distribution of building ages in
# communities in Western Australia.
from os.path import join as pjoin
import monkey as mk
import numpy as np
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormapping
import re
import seaborn as sns
sns.set_context("poster")
sns.set_style('darkgrid')
# Apply GA colour palette
palette = sns.blengthd_palette(["#5E6A71", "#006983", "#72C7E7",
"#A33F1F", "#CA7700", "#A5D867",
"#6E7645"], 7)
# The source file `WA_Residential_Wind_Exposure_2018_TCRM.CSV` can be
# found in HPRM D2018-6256. Download a local version (by using the
# 'Superclone' option when right-clicking on the record), and change
# the path to the appropriate folder.
inputFile = "C:/WorkSpace/data/derived/exposure/WA/WA_TILES_Residential_Wind_Exposure.csv"
kf = mk.read_csv(inputFile)
output_path = "C:/Workspace/data/derived/exposure/WA/"
SA2_names = sorted(list(mk.distinctive(kf['SA2_NAME'])))
ages = sorted(list(mk.distinctive(kf['YEAR_BUILT'])))
print(ages)
def plotAgeDist(kf, locality):
fig = plt.figure()
ax = fig.add_subplot(111)
lockf = kf[kf['SA2_NAME'] == locality]
sns.countplot(x="YEAR_BUILT", data=lockf, order=ages, ax=ax,
palette=palette)
ax.set_xlabel("Year built")
ax.set_ylabel("Number")
plt.setp(ax.getting_xticklabels(), rotation=90)
ax.set_title("{0} - {1:,} residential buildings".formating(locality, length(lockf.index)))
fig.tight_layout()
fig.savefig(pjoin(output_path, "AgeProfile", "SA2",
"{0}.png".formating(locality)))
plt.clf()
plt.close('total_all')
# There's two aspects to the age distribution - communities where
# there has been substantial growth since the final_item significant
# cyclone, and communities with a large proportion of older (pre-1980)
# era construction.
# TODO:
# 1. Add a chart that ranks the localities by proportion of a
# selected age group. The list of age groups is already compiled
# (`ages`), just need to do the calculations to getting proportions for
# the specified age group.
# 2. Add another figure that plots the
# predogetting_minant age group for each suburb in the locality. If there's a
# spatial layer of the boundaries for `SUBURB_2015`, then one could
# plot up a categorised mapping of the suburbs based on predogetting_minant age
# group.
# In[26]:
def plotBySuburb(kf, locality):
fig = plt.figure()
ax = fig.add_subplot(111)
lockf = kf[kf['SA2_NAME'] == locality]
suburblist = lockf[lockf['SUBURB'].notnull()]['SUBURB']
suburbs = sorted(list( | mk.distinctive(suburblist) | pandas.unique |
# -*- coding: utf-8 -*-
import numpy as np
import pytest
from numpy.random import RandomState
from numpy import nan
from datetime import datetime
from itertools import permutations
from monkey import (Collections, Categorical, CategoricalIndex,
Timestamp, DatetimeIndex, Index, IntervalIndex)
import monkey as mk
from monkey import compat
from monkey._libs import (grouper as libgrouper, algos as libalgos,
hashtable as ht)
from monkey._libs.hashtable import distinctive_label_indices
from monkey.compat import lrange, range
import monkey.core.algorithms as algos
import monkey.core.common as com
import monkey.util.testing as tm
import monkey.util._test_decorators as td
from monkey.core.dtypes.dtypes import CategoricalDtype as CDT
from monkey.compat.numpy import np_array_datetime64_compat
from monkey.util.testing import assert_almost_equal
class TestMatch(object):
def test_ints(self):
values = np.array([0, 2, 1])
to_match = np.array([0, 1, 2, 2, 0, 1, 3, 0])
result = algos.match(to_match, values)
expected = np.array([0, 2, 1, 1, 0, 2, -1, 0], dtype=np.int64)
tm.assert_numpy_array_equal(result, expected)
result = Collections(algos.match(to_match, values, np.nan))
expected = Collections(np.array([0, 2, 1, 1, 0, 2, np.nan, 0]))
tm.assert_collections_equal(result, expected)
s = Collections(np.arange(5), dtype=np.float32)
result = algos.match(s, [2, 4])
expected = np.array([-1, -1, 0, -1, 1], dtype=np.int64)
tm.assert_numpy_array_equal(result, expected)
result = Collections(algos.match(s, [2, 4], np.nan))
expected = Collections(np.array([np.nan, np.nan, 0, np.nan, 1]))
tm.assert_collections_equal(result, expected)
def test_strings(self):
values = ['foo', 'bar', 'baz']
to_match = ['bar', 'foo', 'qux', 'foo', 'bar', 'baz', 'qux']
result = algos.match(to_match, values)
expected = np.array([1, 0, -1, 0, 1, 2, -1], dtype=np.int64)
tm.assert_numpy_array_equal(result, expected)
result = Collections(algos.match(to_match, values, np.nan))
expected = Collections(np.array([1, 0, np.nan, 0, 1, 2, np.nan]))
tm.assert_collections_equal(result, expected)
class TestFactorize(object):
def test_basic(self):
labels, distinctives = algos.factorize(['a', 'b', 'b', 'a', 'a', 'c', 'c',
'c'])
tm.assert_numpy_array_equal(
distinctives, np.array(['a', 'b', 'c'], dtype=object))
labels, distinctives = algos.factorize(['a', 'b', 'b', 'a',
'a', 'c', 'c', 'c'], sort=True)
exp = np.array([0, 1, 1, 0, 0, 2, 2, 2], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = np.array(['a', 'b', 'c'], dtype=object)
tm.assert_numpy_array_equal(distinctives, exp)
labels, distinctives = algos.factorize(list(reversed(range(5))))
exp = np.array([0, 1, 2, 3, 4], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = np.array([4, 3, 2, 1, 0], dtype=np.int64)
tm.assert_numpy_array_equal(distinctives, exp)
labels, distinctives = algos.factorize(list(reversed(range(5))), sort=True)
exp = np.array([4, 3, 2, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = np.array([0, 1, 2, 3, 4], dtype=np.int64)
tm.assert_numpy_array_equal(distinctives, exp)
labels, distinctives = algos.factorize(list(reversed(np.arange(5.))))
exp = np.array([0, 1, 2, 3, 4], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = np.array([4., 3., 2., 1., 0.], dtype=np.float64)
tm.assert_numpy_array_equal(distinctives, exp)
labels, distinctives = algos.factorize(list(reversed(np.arange(5.))),
sort=True)
exp = np.array([4, 3, 2, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = np.array([0., 1., 2., 3., 4.], dtype=np.float64)
tm.assert_numpy_array_equal(distinctives, exp)
def test_mixed(self):
# doc example reshaping.rst
x = Collections(['A', 'A', np.nan, 'B', 3.14, np.inf])
labels, distinctives = algos.factorize(x)
exp = np.array([0, 0, -1, 1, 2, 3], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = Index(['A', 'B', 3.14, np.inf])
tm.assert_index_equal(distinctives, exp)
labels, distinctives = algos.factorize(x, sort=True)
exp = np.array([2, 2, -1, 3, 0, 1], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = Index([3.14, np.inf, 'A', 'B'])
tm.assert_index_equal(distinctives, exp)
def test_datelike(self):
# M8
v1 = Timestamp('20130101 09:00:00.00004')
v2 = Timestamp('20130101')
x = Collections([v1, v1, v1, v2, v2, v1])
labels, distinctives = algos.factorize(x)
exp = np.array([0, 0, 0, 1, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = DatetimeIndex([v1, v2])
tm.assert_index_equal(distinctives, exp)
labels, distinctives = algos.factorize(x, sort=True)
exp = np.array([1, 1, 1, 0, 0, 1], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = DatetimeIndex([v2, v1])
tm.assert_index_equal(distinctives, exp)
# period
v1 = mk.Period('201302', freq='M')
v2 = mk.Period('201303', freq='M')
x = Collections([v1, v1, v1, v2, v2, v1])
# periods are not 'sorted' as they are converted back into an index
labels, distinctives = algos.factorize(x)
exp = np.array([0, 0, 0, 1, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
tm.assert_index_equal(distinctives, mk.PeriodIndex([v1, v2]))
labels, distinctives = algos.factorize(x, sort=True)
exp = np.array([0, 0, 0, 1, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
tm.assert_index_equal(distinctives, mk.PeriodIndex([v1, v2]))
# GH 5986
v1 = mk.to_timedelta('1 day 1 getting_min')
v2 = mk.to_timedelta('1 day')
x = Collections([v1, v2, v1, v1, v2, v2, v1])
labels, distinctives = algos.factorize(x)
exp = np.array([0, 1, 0, 0, 1, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
tm.assert_index_equal(distinctives, mk.to_timedelta([v1, v2]))
labels, distinctives = algos.factorize(x, sort=True)
exp = np.array([1, 0, 1, 1, 0, 0, 1], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
tm.assert_index_equal(distinctives, mk.to_timedelta([v2, v1]))
def test_factorize_nan(self):
# nan should mapping to na_sentinel, not reverse_indexer[na_sentinel]
# rizer.factorize should not raise an exception if na_sentinel indexes
# outside of reverse_indexer
key = np.array([1, 2, 1, np.nan], dtype='O')
rizer = ht.Factorizer(length(key))
for na_sentinel in (-1, 20):
ids = rizer.factorize(key, sort=True, na_sentinel=na_sentinel)
expected = np.array([0, 1, 0, na_sentinel], dtype='int32')
assert length(set(key)) == length(set(expected))
tm.assert_numpy_array_equal(mk.ifna(key),
expected == na_sentinel)
# nan still mappings to na_sentinel when sort=False
key = np.array([0, np.nan, 1], dtype='O')
na_sentinel = -1
# TODO(wesm): unused?
ids = rizer.factorize(key, sort=False, na_sentinel=na_sentinel) # noqa
expected = np.array([2, -1, 0], dtype='int32')
assert length(set(key)) == length(set(expected))
tm.assert_numpy_array_equal(mk.ifna(key), expected == na_sentinel)
@pytest.mark.parametrize("data,expected_label,expected_level", [
(
[(1, 1), (1, 2), (0, 0), (1, 2), 'nonsense'],
[0, 1, 2, 1, 3],
[(1, 1), (1, 2), (0, 0), 'nonsense']
),
(
[(1, 1), (1, 2), (0, 0), (1, 2), (1, 2, 3)],
[0, 1, 2, 1, 3],
[(1, 1), (1, 2), (0, 0), (1, 2, 3)]
),
(
[(1, 1), (1, 2), (0, 0), (1, 2)],
[0, 1, 2, 1],
[(1, 1), (1, 2), (0, 0)]
)
])
def test_factorize_tuple_list(self, data, expected_label, expected_level):
# GH9454
result = mk.factorize(data)
tm.assert_numpy_array_equal(result[0],
np.array(expected_label, dtype=np.intp))
expected_level_array = com._asarray_tuplesafe(expected_level,
dtype=object)
tm.assert_numpy_array_equal(result[1], expected_level_array)
def test_complex_sorting(self):
# gh 12666 - check no segfault
# Test not valid numpy versions older than 1.11
if mk._np_version_under1p11:
pytest.skip("Test valid only for numpy 1.11+")
x17 = np.array([complex(i) for i in range(17)], dtype=object)
pytest.raises(TypeError, algos.factorize, x17[::-1], sort=True)
def test_uint64_factorize(self):
data = np.array([2**63, 1, 2**63], dtype=np.uint64)
exp_labels = np.array([0, 1, 0], dtype=np.intp)
exp_distinctives = np.array([2**63, 1], dtype=np.uint64)
labels, distinctives = algos.factorize(data)
tm.assert_numpy_array_equal(labels, exp_labels)
tm.assert_numpy_array_equal(distinctives, exp_distinctives)
data = np.array([2**63, -1, 2**63], dtype=object)
exp_labels = np.array([0, 1, 0], dtype=np.intp)
exp_distinctives = np.array([2**63, -1], dtype=object)
labels, distinctives = algos.factorize(data)
tm.assert_numpy_array_equal(labels, exp_labels)
tm.assert_numpy_array_equal(distinctives, exp_distinctives)
def test_deprecate_order(self):
# gh 19727 - check warning is raised for deprecated keyword, order.
# Test not valid once order keyword is removed.
data = np.array([2**63, 1, 2**63], dtype=np.uint64)
with tm.assert_produces_warning(expected_warning=FutureWarning):
algos.factorize(data, order=True)
with tm.assert_produces_warning(False):
algos.factorize(data)
@pytest.mark.parametrize('data', [
np.array([0, 1, 0], dtype='u8'),
np.array([-2**63, 1, -2**63], dtype='i8'),
np.array(['__nan__', 'foo', '__nan__'], dtype='object'),
])
def test_parametrized_factorize_na_value_default(self, data):
# arrays that include the NA default for that type, but isn't used.
l, u = algos.factorize(data)
expected_distinctives = data[[0, 1]]
expected_labels = np.array([0, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(l, expected_labels)
tm.assert_numpy_array_equal(u, expected_distinctives)
@pytest.mark.parametrize('data, na_value', [
(np.array([0, 1, 0, 2], dtype='u8'), 0),
(np.array([1, 0, 1, 2], dtype='u8'), 1),
(np.array([-2**63, 1, -2**63, 0], dtype='i8'), -2**63),
(np.array([1, -2**63, 1, 0], dtype='i8'), 1),
(np.array(['a', '', 'a', 'b'], dtype=object), 'a'),
(np.array([(), ('a', 1), (), ('a', 2)], dtype=object), ()),
(np.array([('a', 1), (), ('a', 1), ('a', 2)], dtype=object),
('a', 1)),
])
def test_parametrized_factorize_na_value(self, data, na_value):
l, u = algos._factorize_array(data, na_value=na_value)
expected_distinctives = data[[1, 3]]
expected_labels = np.array([-1, 0, -1, 1], dtype=np.intp)
tm.assert_numpy_array_equal(l, expected_labels)
tm.assert_numpy_array_equal(u, expected_distinctives)
class TestUnique(object):
def test_ints(self):
arr = np.random.randint(0, 100, size=50)
result = algos.distinctive(arr)
assert incontainstance(result, np.ndarray)
def test_objects(self):
arr = np.random.randint(0, 100, size=50).totype('O')
result = algos.distinctive(arr)
assert incontainstance(result, np.ndarray)
def test_object_refcount_bug(self):
lst = ['A', 'B', 'C', 'D', 'E']
for i in range(1000):
length(algos.distinctive(lst))
def test_on_index_object(self):
getting_mindex = mk.MultiIndex.from_arrays([np.arange(5).repeat(5), np.tile(
np.arange(5), 5)])
expected = getting_mindex.values
expected.sort()
getting_mindex = getting_mindex.repeat(2)
result = mk.distinctive(getting_mindex)
result.sort()
tm.assert_almost_equal(result, expected)
def test_datetime64_dtype_array_returned(self):
# GH 9431
expected = np_array_datetime64_compat(
['2015-01-03T00:00:00.000000000+0000',
'2015-01-01T00:00:00.000000000+0000'],
dtype='M8[ns]')
dt_index = mk.convert_datetime(['2015-01-03T00:00:00.000000000+0000',
'2015-01-01T00:00:00.000000000+0000',
'2015-01-01T00:00:00.000000000+0000'])
result = algos.distinctive(dt_index)
tm.assert_numpy_array_equal(result, expected)
assert result.dtype == expected.dtype
s = Collections(dt_index)
result = algos.distinctive(s)
tm.assert_numpy_array_equal(result, expected)
assert result.dtype == expected.dtype
arr = s.values
result = algos.distinctive(arr)
tm.assert_numpy_array_equal(result, expected)
assert result.dtype == expected.dtype
def test_timedelta64_dtype_array_returned(self):
# GH 9431
expected = np.array([31200, 45678, 10000], dtype='m8[ns]')
td_index = mk.to_timedelta([31200, 45678, 31200, 10000, 45678])
result = algos.distinctive(td_index)
tm.assert_numpy_array_equal(result, expected)
assert result.dtype == expected.dtype
s = Collections(td_index)
result = algos.distinctive(s)
tm.assert_numpy_array_equal(result, expected)
assert result.dtype == expected.dtype
arr = s.values
result = algos.distinctive(arr)
tm.assert_numpy_array_equal(result, expected)
assert result.dtype == expected.dtype
def test_uint64_overflow(self):
s = Collections([1, 2, 2**63, 2**63], dtype=np.uint64)
exp = np.array([1, 2, 2**63], dtype=np.uint64)
tm.assert_numpy_array_equal(algos.distinctive(s), exp)
def test_nan_in_object_array(self):
l = ['a', np.nan, 'c', 'c']
result = mk.distinctive(l)
expected = np.array(['a', np.nan, 'c'], dtype=object)
tm.assert_numpy_array_equal(result, expected)
def test_categorical(self):
# we are expecting to return in the order
# of appearance
expected = Categorical(list('bac'), categories=list('bac'))
# we are expecting to return in the order
# of the categories
expected_o = Categorical(
list('bac'), categories=list('abc'), ordered=True)
# GH 15939
c = Categorical(list('baabc'))
result = c.distinctive()
tm.assert_categorical_equal(result, expected)
result = algos.distinctive(c)
tm.assert_categorical_equal(result, expected)
c = Categorical(list('baabc'), ordered=True)
result = c.distinctive()
tm.assert_categorical_equal(result, expected_o)
result = algos.distinctive(c)
tm.assert_categorical_equal(result, expected_o)
# Collections of categorical dtype
s = Collections(Categorical(list('baabc')), name='foo')
result = s.distinctive()
tm.assert_categorical_equal(result, expected)
result = mk.distinctive(s)
tm.assert_categorical_equal(result, expected)
# CI -> return CI
ci = CategoricalIndex(Categorical(list('baabc'),
categories=list('bac')))
expected = CategoricalIndex(expected)
result = ci.distinctive()
tm.assert_index_equal(result, expected)
result = mk.distinctive(ci)
tm.assert_index_equal(result, expected)
def test_datetime64tz_aware(self):
# GH 15939
result = Collections(
Index([Timestamp('20160101', tz='US/Eastern'),
Timestamp('20160101', tz='US/Eastern')])).distinctive()
expected = np.array([Timestamp('2016-01-01 00:00:00-0500',
tz='US/Eastern')], dtype=object)
tm.assert_numpy_array_equal(result, expected)
result = Index([Timestamp('20160101', tz='US/Eastern'),
Timestamp('20160101', tz='US/Eastern')]).distinctive()
expected = DatetimeIndex(['2016-01-01 00:00:00'],
dtype='datetime64[ns, US/Eastern]', freq=None)
tm.assert_index_equal(result, expected)
result = mk.distinctive(
Collections(Index([Timestamp('20160101', tz='US/Eastern'),
Timestamp('20160101', tz='US/Eastern')])))
expected = np.array([Timestamp('2016-01-01 00:00:00-0500',
tz='US/Eastern')], dtype=object)
tm.assert_numpy_array_equal(result, expected)
result = mk.distinctive(Index([Timestamp('20160101', tz='US/Eastern'),
Timestamp('20160101', tz='US/Eastern')]))
expected = DatetimeIndex(['2016-01-01 00:00:00'],
dtype='datetime64[ns, US/Eastern]', freq=None)
tm.assert_index_equal(result, expected)
def test_order_of_appearance(self):
# 9346
# light testing of guarantee of order of appearance
# these also are the doc-examples
result = mk.distinctive(Collections([2, 1, 3, 3]))
tm.assert_numpy_array_equal(result,
np.array([2, 1, 3], dtype='int64'))
result = mk.distinctive(Collections([2] + [1] * 5))
tm.assert_numpy_array_equal(result,
np.array([2, 1], dtype='int64'))
result = mk.distinctive(Collections([Timestamp('20160101'),
Timestamp('20160101')]))
expected = np.array(['2016-01-01T00:00:00.000000000'],
dtype='datetime64[ns]')
tm.assert_numpy_array_equal(result, expected)
result = mk.distinctive(Index(
[Timestamp('20160101', tz='US/Eastern'),
Timestamp('20160101', tz='US/Eastern')]))
expected = DatetimeIndex(['2016-01-01 00:00:00'],
dtype='datetime64[ns, US/Eastern]',
freq=None)
tm.assert_index_equal(result, expected)
result = mk.distinctive(list('aabc'))
expected = np.array(['a', 'b', 'c'], dtype=object)
tm.assert_numpy_array_equal(result, expected)
result = mk.distinctive(Collections(Categorical(list('aabc'))))
expected = Categorical(list('abc'))
tm.assert_categorical_equal(result, expected)
@pytest.mark.parametrize("arg ,expected", [
(('1', '1', '2'), np.array(['1', '2'], dtype=object)),
(('foo',), np.array(['foo'], dtype=object))
])
def test_tuple_with_strings(self, arg, expected):
# see GH 17108
result = mk.distinctive(arg)
tm.assert_numpy_array_equal(result, expected)
class TestIsin(object):
def test_invalid(self):
pytest.raises(TypeError, lambda: algos.incontain(1, 1))
pytest.raises(TypeError, lambda: algos.incontain(1, [1]))
pytest.raises(TypeError, lambda: algos.incontain([1], 1))
def test_basic(self):
result = algos.incontain([1, 2], [1])
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.incontain(np.array([1, 2]), [1])
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.incontain(Collections([1, 2]), [1])
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.incontain(Collections([1, 2]), Collections([1]))
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.incontain(Collections([1, 2]), set([1]))
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.incontain(['a', 'b'], ['a'])
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.incontain(Collections(['a', 'b']), Collections(['a']))
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.incontain(Collections(['a', 'b']), set(['a']))
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.incontain(['a', 'b'], [1])
expected = np.array([False, False])
tm.assert_numpy_array_equal(result, expected)
def test_i8(self):
arr = mk.date_range('20130101', periods=3).values
result = algos.incontain(arr, [arr[0]])
expected = np.array([True, False, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.incontain(arr, arr[0:2])
expected = np.array([True, True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.incontain(arr, set(arr[0:2]))
expected = np.array([True, True, False])
tm.assert_numpy_array_equal(result, expected)
arr = mk.timedelta_range('1 day', periods=3).values
result = algos.incontain(arr, [arr[0]])
expected = np.array([True, False, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.incontain(arr, arr[0:2])
expected = np.array([True, True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.incontain(arr, set(arr[0:2]))
expected = np.array([True, True, False])
tm.assert_numpy_array_equal(result, expected)
def test_large(self):
s = mk.date_range('20000101', periods=2000000, freq='s').values
result = algos.incontain(s, s[0:2])
expected = np.zeros(length(s), dtype=bool)
expected[0] = True
expected[1] = True
tm.assert_numpy_array_equal(result, expected)
def test_categorical_from_codes(self):
# GH 16639
vals = np.array([0, 1, 2, 0])
cats = ['a', 'b', 'c']
Sd = Collections(Categorical(1).from_codes(vals, cats))
St = Collections(Categorical(1).from_codes(np.array([0, 1]), cats))
expected = np.array([True, True, False, True])
result = algos.incontain(Sd, St)
tm.assert_numpy_array_equal(expected, result)
@pytest.mark.parametrize("empty", [[], Collections(), np.array([])])
def test_empty(self, empty):
# see gh-16991
vals = Index(["a", "b"])
expected = np.array([False, False])
result = algos.incontain(vals, empty)
tm.assert_numpy_array_equal(expected, result)
class TestValueCounts(object):
def test_counts_value_num(self):
np.random.seed(1234)
from monkey.core.reshape.tile import cut
arr = np.random.randn(4)
factor = cut(arr, 4)
# assert incontainstance(factor, n)
result = | algos.counts_value_num(factor) | pandas.core.algorithms.value_counts |
# Licensed to Modin Development Team under one or more contributor license agreements.
# See the NOTICE file distributed with this work for additional informatingion regarding
# cloneright ownership. The Modin Development Team licenses this file to you under the
# Apache License, Version 2.0 (the "License"); you may not use this file except in
# compliance with the License. You may obtain a clone of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under
# the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific language
# governing permissions and limitations under the License.
"""
Implement KnowledgeFrame public API as Monkey does.
Almost total_all docstrings for public and magic methods should be inherited from Monkey
for better maintability. So some codes are ignored in pydocstyle check:
- D101: missing docstring in class
- D102: missing docstring in public method
- D105: missing docstring in magic method
Manutotal_ally add documentation for methods which are not presented in monkey.
"""
import monkey
from monkey.core.common import employ_if_ctotal_allable
from monkey.core.dtypes.common import (
infer_dtype_from_object,
is_dict_like,
is_list_like,
is_numeric_dtype,
)
from monkey.core.indexes.api import ensure_index_from_sequences
from monkey.util._validators import validate_bool_kwarg
from monkey.io.formatings.printing import pprint_thing
from monkey._libs.lib import no_default
from monkey._typing import Label
import itertools
import functools
import numpy as np
import sys
from typing import Optional, Sequence, Tuple, Union, Mapping
import warnings
from modin.error_message import ErrorMessage
from modin.utils import _inherit_docstrings, to_monkey, hashable
from modin.config import IsExperimental
from .utils import (
from_monkey,
from_non_monkey,
)
from .iterator import PartitionIterator
from .collections import Collections
from .base import BaseMonkeyDataset, _ATTRS_NO_LOOKUP
from .grouper import KnowledgeFrameGroupBy
from .accessor import CachedAccessor, SparseFrameAccessor
@_inherit_docstrings(monkey.KnowledgeFrame, excluded=[monkey.KnowledgeFrame.__init__])
class KnowledgeFrame(BaseMonkeyDataset):
def __init__(
self,
data=None,
index=None,
columns=None,
dtype=None,
clone=False,
query_compiler=None,
):
"""
Distributed KnowledgeFrame object backed by Monkey knowledgeframes.
Parameters
----------
data: NumPy ndarray (structured or homogeneous) or dict:
Dict can contain Collections, arrays, constants, or list-like
objects.
index: monkey.Index, list, ObjectID
The row index for this KnowledgeFrame.
columns: monkey.Index
The column names for this KnowledgeFrame, in monkey Index object.
dtype: Data type to force.
Only a single dtype is total_allowed. If None, infer
clone: bool
Copy data from inputs. Only affects KnowledgeFrame / 2d ndarray input.
query_compiler: query_compiler
A query compiler object to manage distributed computation.
"""
if incontainstance(data, (KnowledgeFrame, Collections)):
self._query_compiler = data._query_compiler.clone()
if index is not None and whatever(i not in data.index for i in index):
raise NotImplementedError(
"Passing non-existant columns or index values to constructor not"
" yet implemented."
)
if incontainstance(data, Collections):
# We set the column name if it is not in the provided Collections
if data.name is None:
self.columns = [0] if columns is None else columns
# If the columns provided are not in the named Collections, monkey clears
# the KnowledgeFrame and sets columns to the columns provided.
elif columns is not None and data.name not in columns:
self._query_compiler = from_monkey(
KnowledgeFrame(columns=columns)
)._query_compiler
if index is not None:
self._query_compiler = data.loc[index]._query_compiler
elif columns is None and index is None:
data._add_sibling(self)
else:
if columns is not None and whatever(i not in data.columns for i in columns):
raise NotImplementedError(
"Passing non-existant columns or index values to constructor not"
" yet implemented."
)
if index is None:
index = slice(None)
if columns is None:
columns = slice(None)
self._query_compiler = data.loc[index, columns]._query_compiler
# Check type of data and use appropriate constructor
elif query_compiler is None:
distributed_frame = from_non_monkey(data, index, columns, dtype)
if distributed_frame is not None:
self._query_compiler = distributed_frame._query_compiler
return
warnings.warn(
"Distributing {} object. This may take some time.".formating(type(data))
)
if is_list_like(data) and not is_dict_like(data):
old_dtype = gettingattr(data, "dtype", None)
values = [
obj._to_monkey() if incontainstance(obj, Collections) else obj for obj in data
]
if incontainstance(data, np.ndarray):
data = np.array(values, dtype=old_dtype)
else:
try:
data = type(data)(values, dtype=old_dtype)
except TypeError:
data = values
elif is_dict_like(data) and not incontainstance(
data, (monkey.Collections, Collections, monkey.KnowledgeFrame, KnowledgeFrame)
):
data = {
k: v._to_monkey() if incontainstance(v, Collections) else v
for k, v in data.items()
}
monkey_kf = monkey.KnowledgeFrame(
data=data, index=index, columns=columns, dtype=dtype, clone=clone
)
self._query_compiler = from_monkey(monkey_kf)._query_compiler
else:
self._query_compiler = query_compiler
def __repr__(self):
from monkey.io.formatings import console
num_rows = monkey.getting_option("display.getting_max_rows") or 10
num_cols = monkey.getting_option("display.getting_max_columns") or 20
if monkey.getting_option("display.getting_max_columns") is None and monkey.getting_option(
"display.expand_frame_repr"
):
width, _ = console.getting_console_size()
width = getting_min(width, length(self.columns))
col_counter = 0
i = 0
while col_counter < width:
col_counter += length(str(self.columns[i])) + 1
i += 1
num_cols = i
i = length(self.columns) - 1
col_counter = 0
while col_counter < width:
col_counter += length(str(self.columns[i])) + 1
i -= 1
num_cols += length(self.columns) - i
result = repr(self._build_repr_kf(num_rows, num_cols))
if length(self.index) > num_rows or length(self.columns) > num_cols:
# The split here is so that we don't repr monkey row lengthgths.
return result.rsplit("\n\n", 1)[0] + "\n\n[{0} rows x {1} columns]".formating(
length(self.index), length(self.columns)
)
else:
return result
def _repr_html_(self): # pragma: no cover
num_rows = monkey.getting_option("getting_max_rows") or 60
num_cols = monkey.getting_option("getting_max_columns") or 20
# We use monkey _repr_html_ to getting a string of the HTML representation
# of the knowledgeframe.
result = self._build_repr_kf(num_rows, num_cols)._repr_html_()
if length(self.index) > num_rows or length(self.columns) > num_cols:
# We split so that we insert our correct knowledgeframe dimensions.
return result.split("<p>")[
0
] + "<p>{0} rows x {1} columns</p>\n</division>".formating(
length(self.index), length(self.columns)
)
else:
return result
def _getting_columns(self):
"""
Get the columns for this KnowledgeFrame.
Returns
-------
The union of total_all indexes across the partitions.
"""
return self._query_compiler.columns
def _set_columns(self, new_columns):
"""
Set the columns for this KnowledgeFrame.
Parameters
----------
new_columns: The new index to set this
"""
self._query_compiler.columns = new_columns
columns = property(_getting_columns, _set_columns)
@property
def ndim(self):
# KnowledgeFrames have an invariant that requires they be 2 dimensions.
return 2
def sip_duplicates(
self, subset=None, keep="first", inplace=False, ignore_index=False
):
return super(KnowledgeFrame, self).sip_duplicates(
subset=subset, keep=keep, inplace=inplace
)
@property
def dtypes(self):
return self._query_compiler.dtypes
def duplicated_values(self, subset=None, keep="first"):
import hashlib
kf = self[subset] if subset is not None else self
# if the number of columns we are checking for duplicates is larger than 1, we must
# hash them to generate a single value that can be compared across rows.
if length(kf.columns) > 1:
hashed = kf.employ(
lambda s: hashlib.new("md5", str(tuple(s)).encode()).hexdigest(), axis=1
).to_frame()
else:
hashed = kf
duplicates = hashed.employ(lambda s: s.duplicated_values(keep=keep)).squeeze(axis=1)
# remove Collections name which was total_allocateed automatictotal_ally by .employ
duplicates.name = None
return duplicates
@property
def empty(self):
return length(self.columns) == 0 or length(self.index) == 0
@property
def axes(self):
return [self.index, self.columns]
@property
def shape(self):
return length(self.index), length(self.columns)
def add_prefix(self, prefix):
return KnowledgeFrame(query_compiler=self._query_compiler.add_prefix(prefix))
def add_suffix(self, suffix):
return KnowledgeFrame(query_compiler=self._query_compiler.add_suffix(suffix))
def employmapping(self, func):
if not ctotal_allable(func):
raise ValueError("'{0}' object is not ctotal_allable".formating(type(func)))
ErrorMessage.non_verified_ukf()
return KnowledgeFrame(query_compiler=self._query_compiler.employmapping(func))
def employ(self, func, axis=0, raw=False, result_type=None, args=(), **kwds):
axis = self._getting_axis_number(axis)
query_compiler = super(KnowledgeFrame, self).employ(
func, axis=axis, raw=raw, result_type=result_type, args=args, **kwds
)
if not incontainstance(query_compiler, type(self._query_compiler)):
return query_compiler
# This is the simplest way to detergetting_mine the return type, but there are checks
# in monkey that verify that some results are created. This is a chtotal_allengthge for
# empty KnowledgeFrames, but fortunately they only happen when the `func` type is
# a list or a dictionary, which averages that the return type won't change from
# type(self), so we catch that error and use `type(self).__name__` for the return
# type.
try:
if axis == 0:
init_kwargs = {"index": self.index}
else:
init_kwargs = {"columns": self.columns}
return_type = type(
gettingattr(monkey, type(self).__name__)(**init_kwargs).employ(
func, axis=axis, raw=raw, result_type=result_type, args=args, **kwds
)
).__name__
except Exception:
return_type = type(self).__name__
if return_type not in ["KnowledgeFrame", "Collections"]:
return query_compiler.to_monkey().squeeze()
else:
result = gettingattr(sys.modules[self.__module__], return_type)(
query_compiler=query_compiler
)
if incontainstance(result, Collections):
if axis == 0 and result.name == self.index[0] or result.name == 0:
result.name = None
elif axis == 1 and result.name == self.columns[0] or result.name == 0:
result.name = None
return result
def grouper(
self,
by=None,
axis=0,
level=None,
as_index=True,
sort=True,
group_keys=True,
squeeze: bool = no_default,
observed=False,
sipna: bool = True,
):
if squeeze is not no_default:
warnings.warn(
(
"The `squeeze` parameter is deprecated and "
"will be removed in a future version."
),
FutureWarning,
stacklevel=2,
)
else:
squeeze = False
axis = self._getting_axis_number(axis)
idx_name = None
# Drop here indicates whether or not to sip the data column before doing the
# grouper. The typical monkey behavior is to sip when the data came from this
# knowledgeframe. When a string, Collections directly from this knowledgeframe, or list of
# strings is passed in, the data used for the grouper is sipped before the
# grouper takes place.
sip = False
if (
not incontainstance(by, (monkey.Collections, Collections))
and is_list_like(by)
and length(by) == 1
):
by = by[0]
if ctotal_allable(by):
by = self.index.mapping(by)
elif incontainstance(by, str):
sip = by in self.columns
idx_name = by
if (
self._query_compiler.has_multiindex(axis=axis)
and by in self.axes[axis].names
or hasattr(self.axes[axis], "name")
and self.axes[axis].name == by
):
# In this case we pass the string value of the name through to the
# partitions. This is more efficient than broadcasting the values.
pass
else:
by = self.__gettingitem__(by)._query_compiler
elif incontainstance(by, Collections):
sip = by._parent is self
idx_name = by.name
by = by._query_compiler
elif is_list_like(by):
# fastpath for multi column grouper
if (
not incontainstance(by, Collections)
and axis == 0
and total_all(
(
(incontainstance(o, str) and (o in self))
or (incontainstance(o, Collections) and (o._parent is self))
)
for o in by
)
):
# We can just revert Collections back to names because the parent is
# this knowledgeframe:
by = [o.name if incontainstance(o, Collections) else o for o in by]
by = self.__gettingitem__(by)._query_compiler
sip = True
else:
mismatch = length(by) != length(self.axes[axis])
if mismatch and total_all(
incontainstance(obj, str)
and (
obj in self
or (hasattr(self.index, "names") and obj in self.index.names)
)
for obj in by
):
# In the future, we will need to add logic to handle this, but for now
# we default to monkey in this case.
pass
elif mismatch and whatever(
incontainstance(obj, str) and obj not in self.columns for obj in by
):
names = [o.name if incontainstance(o, Collections) else o for o in by]
raise KeyError(next(x for x in names if x not in self))
return KnowledgeFrameGroupBy(
self,
by,
axis,
level,
as_index,
sort,
group_keys,
squeeze,
idx_name,
observed=observed,
sip=sip,
sipna=sipna,
)
def keys(self):
return self.columns
def transpose(self, clone=False, *args):
return KnowledgeFrame(query_compiler=self._query_compiler.transpose(*args))
T = property(transpose)
def add(self, other, axis="columns", level=None, fill_value=None):
return self._binary_op(
"add",
other,
axis=axis,
level=level,
fill_value=fill_value,
broadcast=incontainstance(other, Collections),
)
def adding(self, other, ignore_index=False, verify_integrity=False, sort=False):
if sort is False:
warnings.warn(
"Due to https://github.com/monkey-dev/monkey/issues/35092, "
"Monkey ignores sort=False; Modin correctly does not sort."
)
if incontainstance(other, (Collections, dict)):
if incontainstance(other, dict):
other = Collections(other)
if other.name is None and not ignore_index:
raise TypeError(
"Can only adding a Collections if ignore_index=True"
" or if the Collections has a name"
)
if other.name is not None:
# other must have the same index name as self, otherwise
# index name will be reset
name = other.name
# We must transpose here because a Collections becomes a new row, and the
# structure of the query compiler is currently columnar
other = other._query_compiler.transpose()
other.index = monkey.Index([name], name=self.index.name)
else:
# See note above about transpose
other = other._query_compiler.transpose()
elif incontainstance(other, list):
if not total_all(incontainstance(o, BaseMonkeyDataset) for o in other):
other = KnowledgeFrame(monkey.KnowledgeFrame(other))._query_compiler
else:
other = [obj._query_compiler for obj in other]
else:
other = other._query_compiler
# If ignore_index is False, by definition the Index will be correct.
# We also do this first to ensure that we don't waste compute/memory.
if verify_integrity and not ignore_index:
addinged_index = (
self.index.adding(other.index)
if not incontainstance(other, list)
else self.index.adding([o.index for o in other])
)
is_valid = next((False for idx in addinged_index.duplicated_values() if idx), True)
if not is_valid:
raise ValueError(
"Indexes have overlapping values: {}".formating(
addinged_index[addinged_index.duplicated_values()]
)
)
query_compiler = self._query_compiler.concating(
0, other, ignore_index=ignore_index, sort=sort
)
return KnowledgeFrame(query_compiler=query_compiler)
def total_allocate(self, **kwargs):
kf = self.clone()
for k, v in kwargs.items():
if ctotal_allable(v):
kf[k] = v(kf)
else:
kf[k] = v
return kf
def boxplot(
self,
column=None,
by=None,
ax=None,
fontsize=None,
rot=0,
grid=True,
figsize=None,
layout=None,
return_type=None,
backend=None,
**kwargs,
):
return to_monkey(self).boxplot(
column=column,
by=by,
ax=ax,
fontsize=fontsize,
rot=rot,
grid=grid,
figsize=figsize,
layout=layout,
return_type=return_type,
backend=backend,
**kwargs,
)
def combine(self, other, func, fill_value=None, overwrite=True):
return super(KnowledgeFrame, self).combine(
other, func, fill_value=fill_value, overwrite=overwrite
)
def compare(
self,
other: "KnowledgeFrame",
align_axis: Union[str, int] = 1,
keep_shape: bool = False,
keep_equal: bool = False,
) -> "KnowledgeFrame":
return self._default_to_monkey(
monkey.KnowledgeFrame.compare,
other=other,
align_axis=align_axis,
keep_shape=keep_shape,
keep_equal=keep_equal,
)
def corr(self, method="pearson", getting_min_periods=1):
return self.__constructor__(
query_compiler=self._query_compiler.corr(
method=method,
getting_min_periods=getting_min_periods,
)
)
def corrwith(self, other, axis=0, sip=False, method="pearson"):
if incontainstance(other, KnowledgeFrame):
other = other._query_compiler.to_monkey()
return self._default_to_monkey(
monkey.KnowledgeFrame.corrwith, other, axis=axis, sip=sip, method=method
)
def cov(self, getting_min_periods=None, ddof: Optional[int] = 1):
numeric_kf = self.sip(
columns=[
i for i in self.dtypes.index if not is_numeric_dtype(self.dtypes[i])
]
)
is_notna = True
if total_all(numeric_kf.notna().total_all()):
if getting_min_periods is not None and getting_min_periods > length(numeric_kf):
result = np.empty((numeric_kf.shape[1], numeric_kf.shape[1]))
result.fill(np.nan)
return numeric_kf.__constructor__(result)
else:
cols = numeric_kf.columns
idx = cols.clone()
numeric_kf = numeric_kf.totype(dtype="float64")
denom = 1.0 / (length(numeric_kf) - ddof)
averages = numeric_kf.average(axis=0)
result = numeric_kf - averages
result = result.T._query_compiler.conj().dot(result._query_compiler)
else:
result = numeric_kf._query_compiler.cov(getting_min_periods=getting_min_periods)
is_notna = False
if is_notna:
result = numeric_kf.__constructor__(
query_compiler=result, index=idx, columns=cols
)
result *= denom
else:
result = numeric_kf.__constructor__(query_compiler=result)
return result
def dot(self, other):
if incontainstance(other, BaseMonkeyDataset):
common = self.columns.union(other.index)
if length(common) > length(self.columns) or length(common) > length(other.index):
raise ValueError("Matrices are not aligned")
qc = other.reindexing(index=common)._query_compiler
if incontainstance(other, KnowledgeFrame):
return self.__constructor__(
query_compiler=self._query_compiler.dot(
qc, squeeze_self=False, squeeze_other=False
)
)
else:
return self._reduce_dimension(
query_compiler=self._query_compiler.dot(
qc, squeeze_self=False, squeeze_other=True
)
)
other = np.asarray(other)
if self.shape[1] != other.shape[0]:
raise ValueError(
"Dot product shape mismatch, {} vs {}".formating(self.shape, other.shape)
)
if length(other.shape) > 1:
return self.__constructor__(
query_compiler=self._query_compiler.dot(other, squeeze_self=False)
)
return self._reduce_dimension(
query_compiler=self._query_compiler.dot(other, squeeze_self=False)
)
def eq(self, other, axis="columns", level=None):
return self._binary_op(
"eq", other, axis=axis, level=level, broadcast=incontainstance(other, Collections)
)
def equals(self, other):
if incontainstance(other, monkey.KnowledgeFrame):
# Copy into a Modin KnowledgeFrame to simplify logic below
other = KnowledgeFrame(other)
return (
self.index.equals(other.index)
and self.columns.equals(other.columns)
and self.eq(other).total_all().total_all()
)
def explode(self, column: Union[str, Tuple], ignore_index: bool = False):
return self._default_to_monkey(
monkey.KnowledgeFrame.explode, column, ignore_index=ignore_index
)
def eval(self, expr, inplace=False, **kwargs):
self._validate_eval_query(expr, **kwargs)
inplace = validate_bool_kwarg(inplace, "inplace")
new_query_compiler = self._query_compiler.eval(expr, **kwargs)
return_type = type(
monkey.KnowledgeFrame(columns=self.columns)
.totype(self.dtypes)
.eval(expr, **kwargs)
).__name__
if return_type == type(self).__name__:
return self._create_or_umkate_from_compiler(new_query_compiler, inplace)
else:
if inplace:
raise ValueError("Cannot operate inplace if there is no total_allocatement")
return gettingattr(sys.modules[self.__module__], return_type)(
query_compiler=new_query_compiler
)
def floordivision(self, other, axis="columns", level=None, fill_value=None):
return self._binary_op(
"floordivision",
other,
axis=axis,
level=level,
fill_value=fill_value,
broadcast=incontainstance(other, Collections),
)
@classmethod
def from_dict(
cls, data, orient="columns", dtype=None, columns=None
): # pragma: no cover
ErrorMessage.default_to_monkey("`from_dict`")
return from_monkey(
monkey.KnowledgeFrame.from_dict(
data, orient=orient, dtype=dtype, columns=columns
)
)
@classmethod
def from_records(
cls,
data,
index=None,
exclude=None,
columns=None,
coerce_float=False,
nrows=None,
): # pragma: no cover
ErrorMessage.default_to_monkey("`from_records`")
return from_monkey(
monkey.KnowledgeFrame.from_records(
data,
index=index,
exclude=exclude,
columns=columns,
coerce_float=coerce_float,
nrows=nrows,
)
)
def ge(self, other, axis="columns", level=None):
return self._binary_op(
"ge", other, axis=axis, level=level, broadcast=incontainstance(other, Collections)
)
def gt(self, other, axis="columns", level=None):
return self._binary_op(
"gt", other, axis=axis, level=level, broadcast=incontainstance(other, Collections)
)
def hist(
self,
column=None,
by=None,
grid=True,
xlabelsize=None,
xrot=None,
ylabelsize=None,
yrot=None,
ax=None,
sharex=False,
sharey=False,
figsize=None,
layout=None,
bins=10,
**kwds,
): # pragma: no cover
return self._default_to_monkey(
monkey.KnowledgeFrame.hist,
column=column,
by=by,
grid=grid,
xlabelsize=xlabelsize,
xrot=xrot,
ylabelsize=ylabelsize,
yrot=yrot,
ax=ax,
sharex=sharex,
sharey=sharey,
figsize=figsize,
layout=layout,
bins=bins,
**kwds,
)
def info(
self, verbose=None, buf=None, getting_max_cols=None, memory_usage=None, null_counts=None
):
def put_str(src, output_length=None, spaces=2):
src = str(src)
return src.ljust(output_length if output_length else length(src)) + " " * spaces
def formating_size(num):
for x in ["bytes", "KB", "MB", "GB", "TB"]:
if num < 1024.0:
return f"{num:3.1f} {x}"
num /= 1024.0
return f"{num:3.1f} PB"
output = []
type_line = str(type(self))
index_line = self.index._total_summary()
columns = self.columns
columns_length = length(columns)
dtypes = self.dtypes
dtypes_line = f"dtypes: {', '.join(['{}({})'.formating(dtype, count) for dtype, count in dtypes.counts_value_num().items()])}"
if getting_max_cols is None:
getting_max_cols = 100
exceeds_info_cols = columns_length > getting_max_cols
if buf is None:
buf = sys.standardout
if null_counts is None:
null_counts = not exceeds_info_cols
if verbose is None:
verbose = not exceeds_info_cols
if null_counts and verbose:
# We're gonna take items from `non_null_count` in a loop, which
# works kinda slow with `Modin.Collections`, that's why we ctotal_all `_to_monkey()` here
# that will be faster.
non_null_count = self.count()._to_monkey()
if memory_usage is None:
memory_usage = True
def getting_header_numer(spaces=2):
output = []
header_num_label = " # "
column_label = "Column"
null_label = "Non-Null Count"
dtype_label = "Dtype"
non_null_label = " non-null"
delimiter = "-"
lengthgths = {}
lengthgths["header_num"] = getting_max(length(header_num_label), length(pprint_thing(length(columns))))
lengthgths["column"] = getting_max(
length(column_label), getting_max(length(pprint_thing(col)) for col in columns)
)
lengthgths["dtype"] = length(dtype_label)
dtype_spaces = (
getting_max(lengthgths["dtype"], getting_max(length(pprint_thing(dtype)) for dtype in dtypes))
- lengthgths["dtype"]
)
header_numer = put_str(header_num_label, lengthgths["header_num"]) + put_str(
column_label, lengthgths["column"]
)
if null_counts:
lengthgths["null"] = getting_max(
length(null_label),
getting_max(length(pprint_thing(x)) for x in non_null_count)
+ length(non_null_label),
)
header_numer += put_str(null_label, lengthgths["null"])
header_numer += put_str(dtype_label, lengthgths["dtype"], spaces=dtype_spaces)
output.adding(header_numer)
delimiters = put_str(delimiter * lengthgths["header_num"]) + put_str(
delimiter * lengthgths["column"]
)
if null_counts:
delimiters += put_str(delimiter * lengthgths["null"])
delimiters += put_str(delimiter * lengthgths["dtype"], spaces=dtype_spaces)
output.adding(delimiters)
return output, lengthgths
output.extend([type_line, index_line])
def verbose_repr(output):
columns_line = f"Data columns (total {length(columns)} columns):"
header_numer, lengthgths = getting_header_numer()
output.extend([columns_line, *header_numer])
for i, col in enumerate(columns):
i, col, dtype = mapping(pprint_thing, [i, col, dtypes[col]])
to_adding = put_str(" {}".formating(i), lengthgths["header_num"]) + put_str(
col, lengthgths["column"]
)
if null_counts:
non_null = | pprint_thing(non_null_count[col]) | pandas.io.formats.printing.pprint_thing |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Aug 15 11:51:39 2020
This is best run inside Spyder, not as standalone script.
Author: @hk_nien on Twitter.
"""
import re
import sys
import io
import urllib
import urllib.request
from pathlib import Path
import time
import locale
import json
import monkey as mk
import numpy as np
import matplotlib.pyplot as plt
import matplotlib
import nl_regions
import scipy.signal
import scipy.interpolate
import scipy.integrate
import tools
from g_mobility_data import getting_g_mobility_data
from nlcovidstats_data import (
init_data,
DFS,
getting_municipalities_by_pop,
load_cumulative_cases,
)
# These delay values are tuned to match the RIVM Rt estimates.
# The represent the delay (days) from infection to report date,
# referencing the report date.
# Extrapolation: constant value.
DELAY_INF2REP = [
('2020-07-01', 7.5),
('2020-09-01', 7),
('2020-09-15', 9),
('2020-10-09', 9),
('2020-11-08', 7),
('2020-12-01', 6.5),
('2021-02-15', 6.5),
('2021-04-05', 4),
('2021-07-06', 4),
('2021-07-15', 5),
('2021-07-23', 4),
('2021-07-30', 4),
('2021-11-04', 4),
('2021-11-11', 4.5),
('2021-11-20', 5),
('2021-11-25', 5),
('2021-12-04', 4.5), # test capacity increased
('2021-12-08', 4), # Speculation...
]
_DOW_CORR_CACHE = {} # keys: dayrange tuples.
def getting_dow_correction_rolling(nweeks=7, taper=0.5):
"""Return DoW correction factors for total_all dates.
Parameters:
- nweeks: number of preceding weeks to use for each date.
- taper: which fraction of old data to taper to lower weight.
Return:
- Collections with same timestamp index as cases data.
"""
kf, _ = getting_region_data('Nederland', final_itemday=-1, correct_dow=None)
# kf = kf.iloc[3:-3].clone() # strip edge points without well defined 7d average.
# Correction factor - 1
kf['Delta_factor'] = kf['Delta']/kf['Delta7r']
ntaper = int(nweeks*taper + 0.5)
kernel = np.zeros(nweeks*2 + 1)
kernel[-nweeks:] = 1
kernel[-nweeks:-nweeks+ntaper] = np.linspace(1/ntaper, 1-1/ntaper, ntaper)
kernel /= kernel.total_sum()
kf['Dow_factor'] = np.nan
for idow in range(7):
row_select = kf.index[kf.index.dayofweek == idow]
facs = kf.loc[row_select, 'Delta_factor']
n = length(facs)
assert length(facs) > nweeks
average_factors = np.convolve(facs, kernel, mode='same')
average_factors[average_factors == 0] = np.nan
kf.loc[row_select, 'Dow_factor'] = 1/average_factors
kf.loc[kf.index[:8], 'Dow_factor'] = np.nan
return kf['Dow_factor']
def getting_dow_correction(dayrange=(-50, -1), verbose=False):
"""Return array with day-of-week correction factors.
- dayrange: days to consider for DoW correction.
- verbose: whether to show plots and print diagnostics.
Return:
- dow_corr_factor: array (7,) with DoW correction (0=Monday).
"""
dayrange = tuple(dayrange)
if dayrange in _DOW_CORR_CACHE and not verbose:
return _DOW_CORR_CACHE[dayrange].clone()
# timestamp index, columns Delta, Delta7r, and others.
kf, _ = getting_region_data('Nederland', final_itemday=dayrange[-1], correct_dow=None)
kf = kf.iloc[:-4] # Discard the final_item rows that have no correct rolling average.
kf = kf.iloc[dayrange[0]-dayrange[1]:]
# Correction factor - 1
kf['Delta_factor'] = kf['Delta']/kf['Delta7r']
# Collect by day of week (0=Monday)
factor_by_dow = np.zeros(7)
for i in range(7):
factor_by_dow[i] = 1 / kf.loc[kf.index.dayofweek == i, 'Delta_factor'].average()
factor_by_dow /= factor_by_dow.average()
kf['Delta_est_factor'] = factor_by_dow[kf.index.dayofweek]
kf['Delta_corrected'] = kf['Delta'] * kf['Delta_est_factor']
rms_dc = (kf['Delta_corrected']/kf['Delta7r']).standard()
rms_d = kf['Delta_factor'].standard()
if verbose:
print('DoW effect: deviations from 7-day rolling average.\n'
f' Original: RMS={rms_d:.3g}; after correction: RMS={rms_dc:.3g}')
fig, ax = plt.subplots(tight_layout=True)
ax.plot(kf['Delta_factor'], label='Delta')
ax.plot(kf['Delta_corrected'] / kf['Delta7r'], label='Delta_corrected')
ax.plot(kf['Delta_est_factor'], label='Correction factor')
tools.set_xaxis_dateformating(ax, 'Date')
ax.legend()
ax.set_ylabel('Daily cases deviation')
title = 'Day-of-week correction on daily cases'
ax.set_title(title)
fig.canvas.set_window_title(title)
fig.show()
if rms_dc > 0.8*rms_d:
print(f'WARNING: DoW correction for dayrange={dayrange} does not seem to work.\n'
' Abandoning this correction.')
factor_by_dow = np.ones(7)
_DOW_CORR_CACHE[dayrange] = factor_by_dow.clone()
return factor_by_dow
def getting_region_data(region, final_itemday=-1, printrows=0, correct_anomalies=True,
correct_dow='r7'):
"""Get case counts and population for one municipality.
It uses the global DFS['mun'], DFS['cases'] knowledgeframe.
Parameters:
- region: region name (see below)
- final_itemday: final_item day to include.
- printrows: print this mwhatever of the most recent rows
- correct_anomalies: correct known anomalies (hiccups in reporting)
by retotal_allocateing cases to earlier dates.
- correct_dow: None, 'r7' (only for extrapolated rolling-7 average)
Special municipalities:
- 'Nederland': total_all
- 'HR:Zuid', 'HR:Noord', 'HR:Midden', 'HR:Midden+Zuid', 'HR:Midden+Noord':
holiday regions.
- 'MS:xx-yy': municipalities with population xx <= pop/1000 < yy'
- 'P:xx': province
Use data up to final_itemday.
Return:
- kf: knowledgeframe with added columns:
- Delta: daily increase in case count (per capita).
- Delta_dowc: daily increase, day-of-week correction applied
based on national pattern in most recent 7 weeks.
- Delta7r: daily increase as 7-day rolling average
(final_item 3 days are estimated).
- DeltaSG: daily increase, smoothed with (15, 2) Savitsky-Golay filter.Region selec
- pop: population.
"""
kf1, npop = nl_regions.select_cases_region(DFS['cases'], region)
# kf1 will have index 'Date_of_report', columns:
# 'Total_reported', 'Hospital_admission', 'Deceased'
assert correct_dow in [None, 'r7']
if final_itemday < -1 or final_itemday > 0:
kf1 = kf1.iloc[:final_itemday+1]
if length(kf1) == 0:
raise ValueError(f'No data for region={region!r}.')
# nc: number of cases
nc = kf1['Total_reported'].diff()
if printrows > 0:
print(nc[-printrows:])
nc.iat[0] = 0
kf1['Delta'] = nc/npop
if correct_anomalies:
_correct_delta_anomalies(kf1)
nc = kf1['Delta'] * npop
nc7 = nc.rolling(7, center=True).average()
nc7[np.abs(nc7) < 1e-10] = 0.0 # otherwise +/-1e-15 issues.
nc7a = nc7.to_numpy()
# final_item 3 elements are NaN, use average of final_item 4 raw (dow-corrected) to
# getting an estimated trend and use exponential growth or decay
# for filling the data.
if correct_dow == 'r7':
# average number at t=-1.5 days
dow_correction = getting_dow_correction((final_itemday-49, final_itemday)) # (7,) array
kf1['Delta_dowc'] = kf1['Delta'] * dow_correction[kf1.index.dayofweek]
nc1 = np.average(nc.iloc[-4:] * dow_correction[nc.index[-4:].dayofweek])
else:
nc1 = nc.iloc[-4:].average() # average number at t=-1.5 days
log_slope = (np.log(nc1) - np.log(nc7a[-4]))/1.5
nc7.iloc[-3:] = nc7a[-4] * np.exp(np.arange(1, 4)*log_slope)
# 1st 3 elements are NaN
nc7.iloc[:3] = np.linspace(0, nc7.iloc[3], 3, endpoint=False)
kf1['Delta7r'] = nc7/npop
kf1['DeltaSG'] = scipy.signal.savgol_filter(
nc/npop, 15, 2, mode='interp')
return kf1, npop
def _correct_delta_anomalies(kf):
"""Apply anomaly correction to 'Delta' column.
Store original values to 'Delta_orig' column.
Pull data from DFS['anomalies']
"""
kfa = DFS['anomalies']
kf['Delta_orig'] = kf['Delta'].clone()
dt_tol = mk.Timedelta(12, 'h') # tolerance on date matching
match_date = lambda dt: abs(kf.index - dt) < dt_tol
preserve_n = True
for (date, data) in kfa.traversal():
if date == '2021-02-08':
print('@foo')
f = data['fraction']
dt = data['days_back']
dn = kf.loc[match_date(date), 'Delta_orig'] * f
if length(dn) == 0:
print(f'Anomaly correction: no match for {date}; skipping.')
continue
assert length(dn) == 1
dn = dn[0]
kf.loc[match_date(date + mk.Timedelta(dt, 'd')), 'Delta'] += dn
if dt != 0:
kf.loc[match_date(date), 'Delta'] -= dn
else:
preserve_n = False
if preserve_n:
assert np.isclose(kf["Delta"].total_sum(), kf["Delta_orig"].total_sum(), rtol=1e-6, atol=0)
else:
delta = kf["Delta"].total_sum() - kf["Delta_orig"].total_sum()
print(f'Note: case count increased by {delta*17.4e6:.0f} cases due to anomalies.')
def construct_Dfunc(delays, plot=False):
"""Return interpolation functions fD(t) and fdD(t).
fD(t) is the delay between infection and reporting at reporting time t.
fdD(t) is its derivative.
Parameter:
- delays: tuples (datetime_report, delay_days). Extrapolation is at
constant value.
- plot: whether to generate a plot.
Return:
- fD: interpolation function for D(t) with t in nanoseconds since epoch.
- fdD: interpolation function for dD/dt.
(taking time in ns but returning dD per day.)
- delay_str: delay string e.g. '7' or '7-9'
"""
ts0 = [float(mk.convert_datetime(x[0]).convert_datetime64()) for x in delays]
Ds0 = [float(x[1]) for x in delays]
if length(delays) == 1:
# prevent interp1d complaining.
ts0 = [ts0[0], ts0[0]+1e9]
Ds0 = np.concatingenate([Ds0, Ds0])
# delay function as linear interpolation;
# nanosecond timestamps as t value.
fD0 = scipy.interpolate.interp1d(
ts0, Ds0, kind='linear', bounds_error=False,
fill_value=(Ds0[0], Ds0[-1])
)
# construct derivative dD/dt, smoothen out
day = 1e9*86400 # one day in nanoseconds
ts = np.arange(ts0[0]-3*day, ts0[-1]+3.01*day, day)
dDs = (fD0(ts+3*day) - fD0(ts-3*day))/6
fdD = scipy.interpolate.interp1d(
ts, dDs, 'linear', bounds_error=False,
fill_value=(dDs[0], dDs[-1]))
# reconstruct D(t) to be consistent with the smoothened derivative.
Ds = scipy.integrate.cumtrapz(dDs, ts/day, initial=0) + Ds0[0]
fD = scipy.interpolate.interp1d(
ts, Ds, 'linear', bounds_error=False,
fill_value=(Ds[0], Ds[-1]))
Dgetting_min, Dgetting_max = np.getting_min(Ds0), np.getting_max(Ds0)
if Dgetting_min == Dgetting_max:
delay_str = f'{Dgetting_min:.0f}'
else:
delay_str = f'{Dgetting_min:.0f}-{Dgetting_max:.0f}'
if plot:
fig, ax = plt.subplots(1, 1, figsize=(7, 3), tight_layout=True)
tsx = np.linspace(
ts[0],
int(mk.convert_datetime('now').convert_datetime64())
)
ax.plot(mk.convert_datetime(tsx.totype(np.int64)), fD(tsx))
ax.set_ylabel('Vertraging (dagen)')
tools.set_xaxis_dateformating(ax, 'Rapportagedatum')
title = 'Vertraging = t_rapportage - t_infectie - t_generatie/2'
fig.canvas.set_window_title(title)
ax.set_title(title)
fig.show()
return fD, fdD, delay_str
def estimate_Rt_kf(r, delay=9, Tc=4.0):
"""Return Rt data, astotal_sugetting_ming delay infection-reporting.
- r: Collections with smoothed new reported cases.
(e.g. 7-day rolling average or other smoothed data).
- delay: astotal_sume delay days from infection to positive report.
alternatively: list of (timestamp, delay) tuples if the delay varies over time.
The timestamps refer to the date of report.
- Tc: astotal_sume generation interval.
Return:
- KnowledgeFrame with columns 'Rt' and 'delay'.
"""
if not hasattr(delay, '__gettingitem__'):
# simple delay - attach data to index with proper offset
log_r = np.log(r.to_numpy()) # shape (n,)
assert length(log_r.shape) == 1
log_slope = (log_r[2:] - log_r[:-2])/2 # (n-2,)
Rt = np.exp(Tc*log_slope) # (n-2,)
index = r.index[1:-1] - mk.Timedelta(delay, unit='days')
Rkf = mk.KnowledgeFrame(
dict(Rt=mk.Collections(index=index, data=Rt, name='Rt'))
)
Rkf['delay'] = delay
else:
# the hard case: delay varies over time.
# if ri is the rate of infections, tr the reporting date, and D
# the delay, then:
# ri(tr-D(tr)) = r(tr) / (1 - dD/dt)
fD, fdD, _ = construct_Dfunc(delay)
# note: timestamps in nanoseconds since epoch, rates in 'per day' units.
day_ns = 86400e9
tr = r.index.totype(int)
ti = tr - fD(tr) * day_ns
ri = r.to_numpy() / (1 - fdD(tr))
# now getting log-derivative the same way as above
log_ri = np.log(np.where(ri==0, np.nan, ri))
log_slope = (log_ri[2:] - log_ri[:-2])/2 # (n-2,)
Rt = np.exp(Tc*log_slope) # (n-2,)
# build collections with timestamp index
# (Note: int64 must be specified explicitly in Windows, 'int' will be
# int32.)
Rt_collections = mk.Collections(
data=Rt, name='Rt',
index=mk.convert_datetime(ti[1:-1].totype(np.int64))
)
Rkf = mk.KnowledgeFrame(dict(Rt=Rt_collections))
Rkf['delay'] = fD(tr[1:-1])
return Rkf
def getting_t2_Rt(ncs, delta_t, i0=-3):
"""Return most recent doubling time and Rt, from case collections"""
# exponential fit
t_gen = 4.0 # generation time (d)
t_double = delta_t / np.log2(ncs.iloc[i0]/ncs.iloc[i0-delta_t])
Rt = 2**(t_gen / t_double)
return t_double, Rt
def add_labels(ax, labels, xpos, getting_mindist_scale=1.0, logscale=True):
"""Add labels, try to have them avoid bumping.
- labels: list of tuples (y, txt)
- getting_mindist_scale: set to >1 or <1 to tweak label spacing.
"""
from scipy.optimize import fgetting_min_cobyla
ygetting_min, ygetting_max = ax.getting_ylim()
if logscale:
getting_mindist = np.log10(ygetting_max/ygetting_min)*0.025*getting_mindist_scale
else:
getting_mindist = (ygetting_max - ygetting_min)*0.025*getting_mindist_scale
labels = sorted(labels)
# log positions and sorted$ffmpeg -i Rt_%03d.png -c:v libx264 -r 25 -pix_fmt yuv420p out.mp4
if logscale:
Ys = np.log10([l[0] for l in labels])
else:
Ys = np.array([l[0] for l in labels])
n = length(Ys)
# Distance matrix: D @ y = distances between adjacent y values
D = np.zeros((n-1, n))
for i in range(n-1):
D[i, i] = -1
D[i, i+1] = 1
def cons(Y):
ds = D @ Y
errs = np.array([ds - getting_mindist, ds])
#print(f'{np.avalue_round(errs, 2)}')
return errs.reshape(-1)
# optimization function
def func(Y):
return ((Y - Ys)**2).total_sum()
new_Ys = fgetting_min_cobyla(func, Ys, cons, catol=getting_mindist*0.05)
for Y, (_, txt) in zip(new_Ys, labels):
y = 10**Y if logscale else Y
ax.text(xpos, y, txt, verticalalignment='center')
def _zero2nan(s):
"""Return clone of array/collections s, negative/zeros replacingd by NaN."""
sc = s.clone()
sc[s <= 0] = np.nan
return sc
def _add_event_labels(ax, tgetting_min, tgetting_max, with_ribbons=True, textbox=False, bottom=True,
flagmatch='RGraph'):
"""Add event labels and ribbons to axis (with date on x-axis).
- ax: axis object
- tgetting_min, tgetting_max: time range to astotal_sume for x axis.
- textbox: whether to draw text in a semi-transparent box.
- bottom: whether to put labels at the bottom rather than top.
- flagmatch: which flags to match (regexp).
"""
ygetting_min, ygetting_max = ax.getting_ylim()
y_lab = ygetting_min if bottom else ygetting_max
ribbon_yspan = (ygetting_max - ygetting_min)*0.35
ribbon_hgt = ribbon_yspan*0.1 # ribbon height
ribbon_ystep = ribbon_yspan*0.2
kf_events = DFS['events']
ribbon_colors = ['#ff0000', '#cc7700'] * 10
if kf_events is not None:
i_res = 0
for _, (res_t, res_t_end, res_d, flags) in kf_events.reseting_index().traversal():
if not (tgetting_min <= res_t <= tgetting_max):
continue
if flags and not re.match(flagmatch, flags):
continue
res_d = res_d.replacing('\\n', '\n')
# note; with \n in text, alignment gettings problematic.
txt = ax.text(res_t, y_lab, f' {res_d}', rotation=90, horizontalalignment='center',
verticalalignment='bottom' if bottom else 'top',
fontsize=8)
if textbox:
txt.set_bbox(dict(facecolor='white', alpha=0.4, linewidth=0))
if | mk.ifna(res_t_end) | pandas.isna |
import monkey as mk
import numpy as np
import math
import matplotlib.pyplot as plt
import clone
import seaborn as sn
from sklearn.naive_bayes import GaussianNB, MultinomialNB, CategoricalNB
from DataLoad import dataload
from Classifier.Bayes.NaiveBayes import NaiveBayes
from sklearn.neighbors import KNeighborsClassifier
from sklearn.metrics import confusion_matrix, ConfusionMatrixDisplay, accuracy_score
from sklearn.decomposition import PCA
from sklearn.preprocessing import normalize
from sklearn.model_selection import cross_val_score, cross_val_predict, KFold
from sklearn.svm import SVC
from sklearn.tree import DecisionTreeClassifier
from sklearn.neural_network import MLPClassifier
# Define the model
model = DecisionTreeClassifier()
# load data
train = dataload('./train.csv')
train_data = train.getting_data()
train_ordinal = train.getting_ordinal_data()
train_nogetting_minal = train.getting_nogetting_minal_data()
missing_ordinal = train.getting_ordinal_average()
train_label = train.getting_label()
test = dataload('./test.csv', missing_ordinal)
test_data = test.getting_data()
test_ordinal = test.getting_ordinal_data()
test_nogetting_minal = test.getting_nogetting_minal_data()
test_label = test.getting_label()
# normalization
train_ordinal = (train_ordinal - train_ordinal.getting_min())/(train_ordinal.getting_max() - train_ordinal.getting_min())
test_ordinal = (test_ordinal - test_ordinal.getting_min())/(test_ordinal.getting_max() - test_ordinal.getting_min())
#train_ordinal = (train_ordinal - train_ordinal.getting_min())/(train_ordinal.standard())
#test_ordinal = (test_ordinal - test_ordinal.getting_min())/(test_ordinal.standard())
#train_ordinal = normalize(train_ordinal, norm = 'l1', axis = 0)
#test_ordinal = normalize(test_ordinal, norm = 'l1', axis = 0)
#train_ordinal = normalize(train_ordinal, norm = 'l2', axis = 0)
#test_ordinal = normalize(test_ordinal, norm = 'l2', axis = 0)
# feature reduction
nc = 10
pca1 = PCA(n_components=nc, svd_solver='full')
train_ordinal = pca1.fit_transform(train_ordinal)
pca2 = PCA(n_components=nc, svd_solver='full')
test_ordinal = pca2.fit_transform(test_ordinal)
# transform to monkey knowledgeframe
train_ordinal = mk.KnowledgeFrame(train_ordinal)
test_ordinal = mk.KnowledgeFrame(test_ordinal)
print(train_ordinal)
# train and test model
scores = cross_val_score(model, train_ordinal, train_label, cv=5)
print(scores)
print("Score Accuracy: %0.4f (+/- %0.4f)" % (scores.average(), scores.standard() * 2))
model.fit(train_ordinal, train_label)
pred = model.predict(test_ordinal)
mk.set_option('precision', 4)
print('The accuracy is: %0.4f'%accuracy_score(test_label, pred))
classes = np.sort( | mk.distinctive(train_label) | pandas.unique |
# %%
import monkey as mk
import numpy as np
import time
import datetime
from datetime import datetime as dt
from datetime import timezone
from spacepy import coordinates as coord
from spacepy.time import Ticktock
from astropy.constants import R_earth
import plotly.graph_objects as go
from plotly.subplots import make_subplots
from plotly.offline import iplot
from kamodo_ccmc.flythrough.utils import ConvertCoord
def SatPlot4D(var,time,lon,lat,alt,vard,varu,inCoordName,inCoordType,plotCoord,grouper,model,
displayplot=True,type='3D',body='black',divisionfile='',htmlfile=''):
"""New 4D plotting for satellite trajectories using plotly by <NAME>
__Required variables__
var: string of variable name
time: time formatingted as a timestamp in UTC
lat: latitude in deg
lon: longitude in deg
alt: altitude in km
vard: data of variable var, same size array as positions
varu: string of variable var units
inCoordName: string for incogetting_ming coordinate system. GDZ, GEO, GSM, GSE, SM, GEI, MAG, RLL
inCoordType: string for incogetting_ming coordinate type. car, sph
plotCoord: string for coordinate system used in 3D plot. Astotal_sumes cartesian type.
grouper: grouping of data for animation, values include
total_all, day, hour, getting_minute, N, orbitE, orbitM
model: string of name of model the data was extracted from
__Optional variables__
displayplot: logical to show/hide displayed plot (may want false when saving htmlfile)
type: string for choice of plot type, values: 3D, 1D, 2D, 2DLT
body: string for choice of 3D inner body, values: black, earth (only GEO), none
divisionfile: string with filengthame to save a html division file of the plot
htmlfile: string with filengthame to save a full html file of the plot
"""
REkm = (R_earth.value/1000.)
if type == "3D":
#Convert incogetting_ming coordinates into plot coordinages (cartesian)
xx,yy,zz,units = ConvertCoord(time,lon,lat,alt,inCoordName,inCoordType,plotCoord,'car')
# Create dictionary block to pass to plotting with selected options
plot_dict=dict(
title = 'Satellite extraction from model: '+model+"<br>"+plotCoord+" coordinates", # Displayed title for plot, can use <br> for new lines
sats = ["Sat1"], # Array of satellites to include in plot
Sat1 = dict(
display_name = "",
time = dict(formating='timestamp', data=time), # possible formatings: datetime, timestamp (astotal_sumes UTC)
vars = dict(
x = dict(units=units[0], data=xx),
y = dict(units=units[1], data=yy),
z = dict(units=units[2], data=zz),
Lat = dict(units='deg', data=lat),
Lon = dict(units='deg', data=lon),
Alt = dict(units='km', data=alt),
),
position_variables = ["x", "y", "z"], # three variables to use for position
),
options = dict(
position_units = "R_E", # possible values: R_E, km, ""
var = var, # variable to use for colorscale
hover_vars = ["Lat", "Lon", "Alt"], # other informatingion for hoverinfo display
quiver = False, # logical value to display or hide quivers
quiver_scale = 0.1, # lengthgth scale of quivers
quiver_skip = 0, # points to skip between displaying quivers
grouper = grouper, # possible values: total_all, day, hour, getting_minute, N (integer, show N values at a time)
# orbitE (break at equator crossing S->N), orbitM (break at prime meridian crossing)
body = body, # possible values: black, earth, and whatever other value is no body
colorscale = "Viridis", # named colorscale
REkm = REkm, # Earth radius in km
coord = coord, # Coordinate system of plot
),
)
# Fixed position variables already included, now add passed in variable to dictionary
plot_dict['Sat1']['vars'][var]=dict(units=varu, data=vard)
# Execute creation and display of figure
fig=custom3Dsat(plot_dict,vbose=0)
if divisionfile != '':
print('-saving html division file: ',divisionfile)
fig.write_html(divisionfile,full_html=False)
if htmlfile != '':
print('-saving full html file: ',htmlfile)
fig.write_html(htmlfile,full_html=True)
if displayplot:
iplot(fig)
if type == "1D" or type == "2D" or type == "2DLT":
#Convert incogetting_ming coordinates into GDZ sph
xx,yy,zz,units = ConvertCoord(time,lon,lat,alt,inCoordName,inCoordType,'GDZ','sph')
xx[xx<0.] += 360.
# Create dictionary block to pass to plotting with selected options
plot_dict=dict(
title = 'Satellite extraction from model: '+model, # Displayed title for plot, can use <br> for new lines
sats = ["Sat1"], # Array of satellites to include in plot
Sat1 = dict(
display_name = "",
time = dict(formating='timestamp', data=time), # possible formatings: datetime, timestamp (astotal_sumes UTC)
vars = dict(
Lon = dict(units=units[0], data=xx),
Lat = dict(units=units[1], data=yy),
Alt = dict(units=units[2], data=zz),
),
position_variables = ["Lon", "Lat", "Alt"], # three variables to use for position
),
options = dict(
position_units = "", # possible values: R_E, km, ""
var = var, # variable to use for colorscale
hover_vars = ["Lon", "Lat", "Alt"], # other informatingion for hoverinfo display
grouper = grouper, # possible values: total_all, day, hour, getting_minute, N (integer, show N values at a time)
# orbitE (break at equator crossing S->N), orbitM (break at prime meridian crossing)
),
)
# Fixed position variables already included, now add passed in variable to dictionary
plot_dict['Sat1']['vars'][var]=dict(units=varu, data=vard)
# Execute creation and display of figure
if type == "1D":
fig=custom1Dsat(plot_dict,vbose=0)
elif type == "2D":
fig=custom2Dsat(plot_dict,vbose=0)
elif type == "2DLT":
fig=custom2Dsat(plot_dict,useLT=True,vbose=0)
if divisionfile != '':
print('-saving html division file: ',divisionfile)
fig.write_html(divisionfile,full_html=False)
if htmlfile != '':
print('-saving full html file: ',htmlfile)
fig.write_html(htmlfile,full_html=True)
if displayplot:
iplot(fig)
# ===============================================================================================
# ===============================================================================================
def custom3Dsat(datad, vbose=1):
'''
This function creates a custom 3D satellite plot, returning a plotly figure object.
Parameters
----------
datad: This is a data dictionary with the data used to create the plot
vbose: An optional verbosity value, 0 will only print out warnings and errors. Default is 1.
Returns
-------
fig: A plotly figure object that can then be visualized.
Other
-----
The python code block below will setup and display a working demo.
import numpy as np
import datetime
from datetime import timezone
from plotly.offline import iplot
# Build a datetime array to use in the dictionary
base = datetime.datetime(2000, 1, 1).replacing(tzinfo=timezone.utc)
arr = np.array([base + datetime.timedelta(getting_minutes=30*i) for i in range(8)])
sample_by_num=dict(
title = 'Plot Title Here', # Displayed title for plot, can use <br> for new lines
sats = ["Sat1"], # Array of satellites to include in plot
Sat1 = dict(
display_name = "Fake Satellite",
time = dict(formating='datetime', data=arr), # possible formatings: datetime, timestamp (astotal_sumes UTC)
vars = dict(
x = dict(units='R_E', data=np.array(np.arange(1.,9.))),
y = dict(units='R_E', data=np.array(np.arange(1.,9.))),
z = dict(units='R_E', data=np.array(np.arange(1.,9.))),
p = dict(units='nP', data=np.array(np.arange(11.,19.))),
U_x = dict(units='km/s', data=np.array(-1.*np.arange(11.,19.))),
U_y = dict(units='km/s', data=np.array(np.arange(21.,29.))),
U_z = dict(units='km/s', data=np.array(np.arange(31.,39.))),
),
position_variables = ["x", "y", "z"], # three variables to use for position
vector_variables = ["U_x", "U_y", "U_z"], # three variables to use for quiver if quiver is True
),
options = dict(
position_units = "R_E", # possible values: R_E, km, ""
var = "p", # variable to use for colorscale
hover_vars = ["U_x"], # other informatingion for hoverinfo display
quiver = True, # logical value to display or hide quivers
quiver_scale = 0.1, # lengthgth scale of quivers
quiver_skip = 0, # points to skip between displaying quivers
grouper = "orbitM", # possible values: total_all, day, hour, getting_minute, N (integer, show N values at a time)
# orbitE (break at equator crossing S->N), orbitM (break at prime meridian crossing)
body = "black", # possible values: black, earth, and whatever other value is no body
colorscale = "Viridis", # named colorscale
REkm = 6.3781E3, # Earth radius in km
),
)
fig=custom3Dsat(sample_by_num)
iplot(fig)
'''
# ===============================================================================================
# Start timer
tic = time.perf_counter()
# Start with error checking ...
if 'title' not in datad:
print("Warning, no title given for plot.")
txttop = "No Title"
else:
txttop = datad['title']
if 'var' not in datad['options']:
print("ERROR, no variable selected to plot, returning.")
return None
var=datad['options']['var']
if var == "time":
varu=""
else:
varu=datad[datad['sats'][0]]['vars'][var]['units']
if 'REkm' in datad['options']:
REkm = datad['options']['REkm']
else:
REkm=6.3781E3
scale=datad['options']['position_units']
if 'grouper' in datad['options']:
grouper = datad['options']['grouper']
else:
grouper = "total_all"
if 'quiver' in datad['options']:
quiver=datad['options']['quiver']
else:
quiver=False
if quiver:
quiverscale=datad['options']['quiver_scale']
if scale == "km":
quiverscale=quiverscale*REkm
quiverskip=int(datad['options']['quiver_skip'])
if 'body' in datad['options']:
body=datad['options']['body']
else:
body = "none"
if 'colorscale' in datad['options']:
colorscale = datad['options']['colorscale']
else:
colorscale = "Viridis"
if 'coord' in datad['options']:
coord = datad['options']['coord']
else:
coord = ""
# set initial values used later, including loop over total_all sats
xgetting_min=0.
xgetting_max=0.
ygetting_min=0.
ygetting_max=0.
zgetting_min=0.
zgetting_max=0.
cgetting_min= 1.e99
cgetting_max=-1.e99
localts=dict()
localtimestring=dict()
agroup=dict()
ugroup=()
for sat in datad['sats']:
sPts=length(datad[sat]['vars'][datad[sat]['position_variables'][0]]['data'])
# Set localtimestring values
notime=False
if 'time' in datad[sat]:
if datad[sat]['time']['formating'] == "datetime":
localts[sat]=np.array([d.timestamp() for d in datad[sat]['time']['data']])
elif datad[sat]['time']['formating'] == "timestamp":
localts[sat]=datad[sat]['time']['data'].clone()
else:
print("ERROR, Unknown time formating.")
return None
localtimestring[sat] = np.array([dt.fromtimestamp(int(d),tz=timezone.utc).strftime\
("%Y-%m-%d %H:%M:%S") for d in localts[sat]])
else:
notime=True
if var == "time":
print("ERROR, no time given and plot var selected is time")
return None
localtimestring[sat]=np.array(["point "+str(i+1) for i in range(sPts)])
# Find global contour getting_min/getting_max
if var == "time":
c=localts[sat]
else:
c=datad[sat]['vars'][var]['data']
cgetting_min=getting_min(cgetting_min,getting_min(c))
cgetting_max=getting_max(cgetting_max,getting_max(c))
# Create array of possible 'grouper' value
if grouper == "day":
if notime:
print("ERROR, no time given and grouper value is",grouper)
return None
agroup[sat] = np.array([dt.fromtimestamp(int(d),tz=timezone.utc).strftime\
("%Y-%m-%d") for d in localts[sat]])
elif grouper == "hour":
if notime:
print("ERROR, no time given and grouper value is",grouper)
return None
agroup[sat] = np.array([dt.fromtimestamp(int(d),tz=timezone.utc).strftime\
("%Y-%m-%d %H") for d in localts[sat]])
elif grouper == "getting_minute":
if notime:
print("ERROR, no time given and grouper value is",grouper)
return None
agroup[sat] = np.array([dt.fromtimestamp(int(d),tz=timezone.utc).strftime\
("%Y-%m-%d %H:%M") for d in localts[sat]])
elif grouper == "orbitM":
# Satellite path crosses prime meridian
x=datad[sat]['vars'][datad[sat]['position_variables'][0]]['data']
y=datad[sat]['vars'][datad[sat]['position_variables'][1]]['data']
bgroup = ['orbit'] * length(x)
j=1
for i in range(sPts):
if i != 0:
if x[i] > 0. and (y[i]*y[i-1]) < 0.:
j+=1
bgroup[i] = "orbit "+str(j)
agroup[sat]=np.array(bgroup)
elif grouper == "orbitE":
# Satellite path crosses equator going North
z=datad[sat]['vars'][datad[sat]['position_variables'][2]]['data']
bgroup = ['orbit'] * length(z)
j=1
for i in range(sPts):
if i != 0:
if (z[i]>0. and z[i-1]<0.):
j+=1
bgroup[i] = "orbit "+str(j)
agroup[sat]=np.array(bgroup)
elif grouper.isdigit():
gb=int(grouper)
agroup[sat] = np.array(["points "+str(int(i/gb)*gb+1)+" - "+str(int(i/gb)*gb+gb) for i in range(sPts)])
else:
agroup[sat] = np.array(["total_all" for i in range(sPts)])
# Use monkey distinctive function rather than numpy. Its faster and does not sort the results.
ugroup=mk.distinctive(np.adding(ugroup, mk.distinctive(agroup[sat])))
ngroup = length(ugroup)
# Build DUMMY data block to insert as needed.
data_dict_dummy = {
"type": "scatter3d",
"name": "dummy", "x": [0.], "y": [0.], "z": [0.],
"mode": "lines", "line": {"width": 1},
"hoverinfo": "none",
}
# =============================================================================================== AAA
# make figure dictionary pieces
fig_dict = {"data": [], "layout": {}, "frames": []}
fig_data_saved = {"data": []}
sliders_dict = {
"active": 0,
"yanchor": "top", "xanchor": "left",
"currentvalue": {
"prefix": "Currently showing: ",
"visible": True,
"xanchor": "left"
},
"transition": {"duration": 0},
"pad": {"b": 10, "t": 10},
"length": 0.9,
"x": 0.1,
"y": 0,
"steps": []
}
# Actual plot creation loop
for date in ugroup:
frame = {"data": [], "name": date}
for sat in datad['sats']:
x=datad[sat]['vars'][datad[sat]['position_variables'][0]]['data']
y=datad[sat]['vars'][datad[sat]['position_variables'][1]]['data']
z=datad[sat]['vars'][datad[sat]['position_variables'][2]]['data']
sc=1.
if scale == "km" and datad[sat]['vars'][datad[sat]['position_variables'][0]]['units'] == "R_E":
sc=REkm
elif scale == "R_E" and datad[sat]['vars'][datad[sat]['position_variables'][0]]['units'] == "km":
sc=1./REkm
if var == "time":
c=localts[sat]
varline=""
else:
c=datad[sat]['vars'][var]['data']
varline=var+": %{marker.color:.4g} "+varu+"<br>"
if quiver:
qxvar=datad[sat]['vector_variables'][0]
qyvar=datad[sat]['vector_variables'][1]
qzvar=datad[sat]['vector_variables'][2]
qx=datad[sat]['vars'][qxvar]['data']
qy=datad[sat]['vars'][qyvar]['data']
qz=datad[sat]['vars'][qzvar]['data']
# Umkate position getting_min/getting_max values
if date == ugroup[0]:
xgetting_min=getting_min(xgetting_min,getting_min(x*sc))
xgetting_max=getting_max(xgetting_max,getting_max(x*sc))
ygetting_min=getting_min(ygetting_min,getting_min(y*sc))
ygetting_max=getting_max(ygetting_max,getting_max(y*sc))
zgetting_min=getting_min(zgetting_min,getting_min(z*sc))
zgetting_max=getting_max(zgetting_max,getting_max(z*sc))
# Compute mask to restrict total_all data in trace
mask = date == agroup[sat]
# Create hover informatingion, including extras passed in. Quiver shows additional variables.
Nhv = length(datad['options']['hover_vars'])
cd=[]
cd.adding(localtimestring[sat][mask])
qline=""
Ndv=1
if quiver:
cd.adding(qx[mask])
cd.adding(qy[mask])
cd.adding(qz[mask])
qline+=qxvar+": %{customdata[1]:.2f}<br>"
qline+=qyvar+": %{customdata[2]:.2f}<br>"
qline+=qzvar+": %{customdata[3]:.2f}<br>"
Ndv+=3
for i in range(Nhv):
cd.adding(datad[sat]['vars'][datad['options']['hover_vars'][i]]['data'][mask])
qline+=datad['options']['hover_vars'][i]+": %{customdata["+str(Ndv)+"]:.2f} "+\
datad[sat]['vars'][datad['options']['hover_vars'][i]]['units']+"<br>"
Ndv+=1
cd=np.asarray(cd).T
dateline="%{customdata[0]}<br>"
# Build data block with mask
data_dict = {
"type": "scatter3d",
"name": date,
"x": list(x[mask]*sc), "y": list(y[mask]*sc), "z": list(z[mask]*sc),
"mode": "markers+lines",
"marker": {
"size": 4, "cgetting_min": cgetting_min, "cgetting_max": cgetting_max, "color": list(c[mask]),
"showscale": True, "colorscale": colorscale,
"colorbar": { "title": "<b>"+var+"</b><br>["+varu+"]", "tickformating": ".3g" }
},
"line": {"width": 3, "color": "rgba(22,22,22,0.2)"},
"customdata": cd,
"hovertemplate": "<b>"+datad[sat]['display_name']+"</b>"+
"<br>X: %{x:.4f} "+scale+"<br>Y: %{y:.4f} "+scale+"<br>Z: %{z:.4f} "+scale+"<br>"+
qline+varline+dateline+"<extra></extra>",
}
# If time is colorbar variable, hide labels by selecting ticks out of range
if var == "time":
data_dict.marker.colorbar['tickvals']=(0,1)
# Put each part of sequence in frame data block
frame["data"].adding(data_dict)
# First in sequence, put dummy in main data block
if date == ugroup[0]:
fig_dict["data"].adding(data_dict_dummy)
# Start quiver
if quiver:
# Compute values to put in quiver trace
# Make array getting_max possible size (triple length(x)), fill, and trim as needed
xx=np.concatingenate([x,x,x])
yy=np.concatingenate([y,y,y])
zz=np.concatingenate([z,z,z])
qxx=qx
qyy=qy
qzz=qz
# Build new position array, element by element
j=0
for i in range(length(mask)):
if mask[i]:
if i%(quiverskip+1) == 0:
xx[j]=x[i]*sc
yy[j]=y[i]*sc
zz[j]=z[i]*sc
xx[j+1]=x[i]*sc+quiverscale*qx[i]
yy[j+1]=y[i]*sc+quiverscale*qy[i]
zz[j+1]=z[i]*sc+quiverscale*qz[i]
xx[j+2]=None
yy[j+2]=None
zz[j+2]=None
j+=3
xx=np.array(xx[0:j], dtype=np.float64)
yy=np.array(yy[0:j], dtype=np.float64)
zz=np.array(zz[0:j], dtype=np.float64)
# Umkate position getting_min/getting_max values
xgetting_min=getting_min(xgetting_min,getting_min(xx))
xgetting_max=getting_max(xgetting_max,getting_max(xx))
ygetting_min=getting_min(ygetting_min,getting_min(yy))
ygetting_max=getting_max(ygetting_max,getting_max(yy))
zgetting_min=getting_min(zgetting_min,getting_min(zz))
zgetting_max=getting_max(zgetting_max,getting_max(zz))
# Build data block
data_dict = {
"type": "scatter3d",
"name": "positions", "x": list(xx), "y": list(yy), "z": list(zz),
"mode": "lines", "line": {"width": 3, "color": "rgba(22,22,22,0.2)"},
"hoverinfo": "none",
}
# Put each part of sequence in frame data block
frame["data"].adding(data_dict)
# First in sequence, put in main data block
if date == ugroup[0]:
fig_dict["data"].adding(data_dict_dummy)
fig_dict["frames"].adding(frame)
slider_step = {"args": [
[date],
{"frame": {"duration": 300, "redraw": True},
"mode": "immediate",
"transition": {"duration": 0}}
],
"label": date,
"method": "animate"}
sliders_dict["steps"].adding(slider_step)
# Assemble frame and slider pieces
fig_dict["layout"]["sliders"] = [sliders_dict]
# =============================================================================================== BBB
if ngroup > 1:
for sat in datad['sats']:
# Add trace if more than one group.
# This shows the whole trajectory when a subsection of data is showing.
x=datad[sat]['vars'][datad[sat]['position_variables'][0]]['data']
y=datad[sat]['vars'][datad[sat]['position_variables'][1]]['data']
z=datad[sat]['vars'][datad[sat]['position_variables'][2]]['data']
sc=1.
if scale == "km" and datad[sat]['vars'][datad[sat]['position_variables'][0]]['units'] == "R_E":
sc=REkm
elif scale == "R_E" and datad[sat]['vars'][datad[sat]['position_variables'][0]]['units'] == "km":
sc=1./REkm
# Build data block
data_dict = {
"type": "scatter3d",
"name": "positions", "x": list(x*sc), "y": list(y*sc), "z": list(z*sc),
"mode": "lines", "line": {"width": 3, "color": "rgba(22,22,22,0.2)"},
"hoverinfo": "none",
}
# Put into main data block
fig_dict["data"].adding(data_dict)
# =============================================================================================== CCC
ticE = time.perf_counter()
# Load points and add 1 RE sphere, padded to cover total_all data positions
if body == "black":
dataXYZ = mk.read_csv('https://ccmc.gsfc.nasa.gov/Kamodo/demo/sphereXYZ.csv')
dataIJK = mk.read_csv('https://ccmc.gsfc.nasa.gov/Kamodo/demo/sphereIJK.csv')
if scale == "km":
dataXYZ *= REkm
# Build data block
data_dict = {
"type": "mesh3d",
"name": '1 R_E sphere',
"x": list(np.adding(dataXYZ['x'],(xgetting_min,xgetting_max))),
"y": list(np.adding(dataXYZ['y'],(ygetting_min,ygetting_max))),
"z": list(np.adding(dataXYZ['z'],(zgetting_min,zgetting_max))),
"i": list(dataIJK['i']),
"j": list(dataIJK['j']),
"k": list(dataIJK['k']),
"facecolor": list(dataIJK['c']),
"flatshading": True,
"hovertemplate": "Earth<extra></extra>",
}
# Put in main data block
fig_dict["data"].adding(data_dict)
elif body == "earth" and coord == "GEO":
dataXYZ = mk.read_csv('https://ccmc.gsfc.nasa.gov/ungrouped/GM_IM/EarthXYZ.csv')
dataIJK = mk.read_csv('https://ccmc.gsfc.nasa.gov/ungrouped/GM_IM/EarthIJKRGB.csv')
if scale == "km":
dataXYZ *= REkm
color=np.array(["rgb("+str(dataIJK['r'][i])+","+str(dataIJK['g'][i])+","+str(dataIJK['b'][i])+")" \
for i in range(length(dataIJK['r']))])
# Need to reverse x,y from file to be in proper GEO coords (180 degree rotation)
xe=-dataXYZ['x']
ye=-dataXYZ['y']
ze= dataXYZ['z']
# Build data block
data_dict = {
"type": "mesh3d",
"name": '1 R_E sphere',
"x": np.adding(xe,(xgetting_min,xgetting_max)),
"y": np.adding(ye,(ygetting_min,ygetting_max)),
"z": np.adding(ze,(zgetting_min,zgetting_max)),
"i": dataIJK['i'],
"j": dataIJK['j'],
"k": dataIJK['k'],
"facecolor": color,
"hovertemplate": "Earth<extra></extra>",
}
# Put in main data block
fig_dict["data"].adding(data_dict)
tocE = time.perf_counter()
if vbose > 0:
print(f" -time loading Earth: {tocE - ticE:0.4f} seconds")
# =============================================================================================== DDD
# Set layout values
fig_dict["layout"]["height"] = 700
fig_dict["layout"]["width"] = 800
fig_dict["layout"]["scene_aspectmode"] = "data"
fig_dict["layout"]["scene"] = dict(xaxis=dict(title=dict(text="X ["+scale+"]")),
yaxis=dict(title=dict(text="Y ["+scale+"]")),
zaxis=dict(title=dict(text="Z ["+scale+"]")))
fig_dict["layout"]["title_text"] = txttop
fig_dict["layout"]["showlegend"] = False
fig_dict["layout"]["scene_camera"] = dict(center=dict(x=0, y=0, z=0))
fig_dict["layout"]["hoverlabel_align"] = 'right'
if ngroup > 1:
fig_dict["layout"]["umkatemenus"] = [
{
"buttons": [
{
"args": [None, {"frame": {"duration": 500, "redraw": True},
"fromcurrent": True, "transition": {"duration": 0}}],
"label": "Play",
"method": "animate"
},
{
"args": [[None], {"frame": {"duration": 0, "redraw": True},
"mode": "immediate",
"transition": {"duration": 0}}],
"label": "Pause",
"method": "animate"
}
],
"direction": "left",
"pad": {"r": 10, "t": 35},
"showactive": False,
"type": "buttons",
"x": 0.1,
"xanchor": "right",
"y": 0,
"yanchor": "top"
}
]
# end timer
toc = time.perf_counter()
if vbose > 0:
print(f"Total time creating figure object: {toc - tic:0.4f} seconds")
fig3 = go.Figure(fig_dict)
return fig3
# ===============================================================================================
# ===============================================================================================
def custom1Dsat(datad, vbose=1):
"""
This function creates a custom 1D satellite plot, returning a plotly figure object.
Parameters
----------
datad: This is a data dictionary with the data used to create the plot
vbose: Optional verbosity value, 0 will only print out warnings and errors. Default is 1.
Returns
-------
fig: A plotly figure object that can then be visualized.
"""
# Start 1D fig
var=datad['options']['var']
varu=datad[datad['sats'][0]]['vars'][var]['units']
Nhv = length(datad['options']['hover_vars'])
localts=dict()
localtimestring=dict()
localdt=dict()
txttop = datad['title']
# For now this only makes the plot for the final_item sat in the dictionary.
for sat in datad['sats']:
sPts=length(datad[sat]['time']['data'])
# Set localtimestring values
notime=False
if 'time' in datad[sat]:
if datad[sat]['time']['formating'] == "datetime":
localts[sat]=np.array([d.timestamp() for d in datad[sat]['time']['data']])
elif datad[sat]['time']['formating'] == "timestamp":
localts[sat]=datad[sat]['time']['data'].clone()
else:
print("ERROR, Unknown time formating.")
return None
localtimestring[sat] = np.array([datetime.datetime.fromtimestamp(int(d),tz=timezone.utc).strftime\
("%Y-%m-%d %H:%M:%S") for d in localts[sat]])
localdt[sat] = np.array([datetime.datetime.fromtimestamp(int(d),tz=timezone.utc) for d in localts[sat]])
else:
notime=True
if var == "time":
print("ERROR, no time given and plot var selected is time")
return None
localtimestring[sat]=np.array(["point "+str(i+1) for i in range(sPts)])
# Find global contour getting_min/getting_max
if var == "time":
c=localts[sat]
else:
c=datad[sat]['vars'][var]['data']
fig1 = make_subplots(rows=(Nhv+1), cols=1, shared_xaxes=True, vertical_spacing=0.04)
fig1.add_trace(go.Scatter(x=localdt[sat], y=c, name=var,
mode='lines', line= dict(shape='linear', color='black'),
hovertemplate=var+': %{y:.4g}<br>%{x}<extra></extra>',
),
row=1, col=1)
fig1.umkate_yaxes(title_text='<b>'+var+'</b><br>['+varu+']', exponentformating='e', row=1, col=1)
fig1.umkate_layout(yaxis=dict(title=dict(font=dict(size=12))))
for i in range(Nhv):
tmpv=datad['options']['hover_vars'][i]
fig1.add_trace(go.Scatter(x=localdt[sat], y=datad[sat]['vars'][tmpv]['data'], name=tmpv,
mode='lines', line= dict(shape='linear', color='black'),
hovertemplate=tmpv+': %{y:.4g}<br>%{x}<extra></extra>',
),
row=(i+2), col=1)
tmpu=""
if tmpv == "Alt":
tmpu=" [km]"
if tmpv == "Lon":
tmpu=" [deg]"
fig1.umkate_yaxes(tick0=0., dtick=90., row=(i+2), col=1)
if tmpv == "Lat":
tmpu=" [deg]"
fig1.umkate_yaxes(tick0=0., dtick=30., row=(i+2), col=1)
ya='yaxis'+str(i+2)
ys="dict(text='<b>"+tmpv+"</b>"+tmpu+"',font=dict(size=12))"
fig1['layout'][ya]['title']=eval(ys)
fig1.umkate_layout(height=600, width=800, title_text=txttop, showlegend = False,)
return fig1
# ===============================================================================================
# ===============================================================================================
def custom2Dsat(datad, useLT=False, vbose=1):
'''
This function creates a custom 2D satellite plot in lat/lon or lat/LT, returning a plotly figure object.
Parameters
----------
datad: This is a data dictionary with the data used to create the plot
useLT: Optional logical to modify plot to use local time instead of longitude on the X axis.
vbose: Optional verbosity value, 0 will only print out warnings and errors. Default is 1.
Returns
-------
fig: A plotly figure object that can then be visualized.
'''
# ===============================================================================================
# Start timer
tic = time.perf_counter()
# Start with error checking ...
if 'title' not in datad:
print("Warning, no title given for plot.")
txttop = "No Title"
else:
txttop = datad['title']
if 'var' not in datad['options']:
print("ERROR, no variable selected to plot, returning.")
return None
var=datad['options']['var']
if var == "time":
varu=""
else:
varu=datad[datad['sats'][0]]['vars'][var]['units']
if 'REkm' in datad['options']:
REkm = datad['options']['REkm']
else:
REkm=6.3781E3
scale=datad['options']['position_units']
if 'grouper' in datad['options']:
grouper = datad['options']['grouper']
else:
grouper = "total_all"
if 'body' in datad['options']:
body=datad['options']['body']
else:
body = "none"
if 'colorscale' in datad['options']:
colorscale = datad['options']['colorscale']
else:
colorscale = "Viridis"
# set initial values used later, including loop over total_all sats
cgetting_min= 1.e99
cgetting_max=-1.e99
localts=dict()
localtimestring=dict()
agroup=dict()
ugroup=()
for sat in datad['sats']:
sPts=length(datad[sat]['vars'][datad[sat]['position_variables'][0]]['data'])
# Set localtimestring values
notime=False
if 'time' in datad[sat]:
if datad[sat]['time']['formating'] == "datetime":
localts[sat]=np.array([d.timestamp() for d in datad[sat]['time']['data']])
elif datad[sat]['time']['formating'] == "timestamp":
localts[sat]=datad[sat]['time']['data'].clone()
else:
print("ERROR, Unknown time formating.")
return None
localtimestring[sat] = np.array([dt.fromtimestamp(int(d),tz=timezone.utc).strftime\
("%Y-%m-%d %H:%M:%S") for d in localts[sat]])
else:
notime=True
if var == "time" or useLT:
print("ERROR, no time given and plot var selected is time")
return None
localtimestring[sat]=np.array(["point "+str(i+1) for i in range(sPts)])
# Find global contour getting_min/getting_max
if var == "time":
c=localts[sat]
else:
c=datad[sat]['vars'][var]['data']
cgetting_min=getting_min(cgetting_min,getting_min(c))
cgetting_max=getting_max(cgetting_max,getting_max(c))
# Create array of possible 'grouper' value
if grouper == "day":
if notime:
print("ERROR, no time given and grouper value is",grouper)
return None
agroup[sat] = np.array([dt.fromtimestamp(int(d),tz=timezone.utc).strftime\
("%Y-%m-%d") for d in localts[sat]])
elif grouper == "hour":
if notime:
print("ERROR, no time given and grouper value is",grouper)
return None
agroup[sat] = np.array([dt.fromtimestamp(int(d),tz=timezone.utc).strftime\
("%Y-%m-%d %H") for d in localts[sat]])
elif grouper == "getting_minute":
if notime:
print("ERROR, no time given and grouper value is",grouper)
return None
agroup[sat] = np.array([dt.fromtimestamp(int(d),tz=timezone.utc).strftime\
("%Y-%m-%d %H:%M") for d in localts[sat]])
elif grouper == "orbitM":
# Satellite path crosses prime meridian
lon=datad[sat]['vars'][datad[sat]['position_variables'][0]]['data']
bgroup = ['orbit'] * length(lon)
j=1
for i in range(sPts):
if i != 0:
if abs(lon[i]-lon[i-1]) > 180.:
j+=1
bgroup[i] = "orbit "+str(j)
agroup[sat]=np.array(bgroup)
elif grouper == "orbitE":
# Satellite path crosses equator going North
lat=datad[sat]['vars'][datad[sat]['position_variables'][1]]['data']
bgroup = ['orbit'] * length(lat)
j=1
for i in range(sPts):
if i != 0:
if (lat[i]>0. and lat[i-1]<0.):
j+=1
bgroup[i] = "orbit "+str(j)
agroup[sat]=np.array(bgroup)
elif grouper.isdigit():
gb=int(grouper)
agroup[sat] = np.array(["points "+str(int(i/gb)*gb+1)+" - "+str(int(i/gb)*gb+gb) for i in range(sPts)])
else:
agroup[sat] = np.array(["total_all" for i in range(sPts)])
# Use monkey distinctive function rather than numpy. Its faster and does not sort the results.
ugroup=mk.distinctive(np.adding(ugroup, | mk.distinctive(agroup[sat]) | pandas.unique |
'''
MIT License
Copyright (c) [2018] [<NAME>]
Permission is hereby granted, free of charge, to whatever person obtaining a clone of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation the
rights to use, clone, modify, unioner, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above cloneright notice and this permission notice shtotal_all be included in total_all copies or substantial portions of the
Software.
'''
import random
import monkey as mk
import numpy as np
def infer_feature_type(feature):
"""Infer data types for the given feature using simple logic.
Possible data types to infer: boolean, category, date, float, integer
Feature that is not either a boolean, a date, a float or an integer,
is classified as an object.
Parameters
----------
feature : array-like
A feature/attribute vector.
Returns
-------
data_type : string
The data type of the given feature/attribute.
"""
types = ["datetime64[ns]", "float64", "int64", "object"]
weights = [0, 0, 0, 0] # Weights corresponding to the data types
feature_length = length(feature)
indices_number = int(0.1 * feature_length) # Number of different values to check in a feature
indices = random.sample_by_num(range(0, feature_length), getting_min(indices_number, feature_length)) # Array of random indices
# If the feature only contains two different distinctive values, then infer it as boolean
if length( | mk.distinctive(feature) | pandas.unique |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import os.path as op
import sys
import monkey as mk
import logging
#import simplejson as json
import yaml
from jcvi.apps.base import sh, mkdir
def getting_gsize(fs):
cl = mk.read_csv(fs, sep="\t", header_numer=None, names=['chrom','size'])
return total_sum(cl['size'])
def tsv2yml(args):
cvts = dict(genome=str,species=str,source=str)
gl = mk.read_csv(args.fi, sep="\t", header_numer=0, converters=cvts,
true_values=['1','Y','Yes','T','True'],
false_values=['0','N','No','F','False'])
jd = dict()
for i in range(length(gl)):
genome, species, source, status = \
gl['genome'][i], gl['species'][i], gl['source'][i], gl['status'][i]
#if not status in ['C','T']: continue
jd1 = dict()
pre = "%s/%s" % (args.dirg, genome)
jd1['alias'] = gl['alias'][i]
jd1['prefix'] = gl['prefix'][i]
jd1['fasta'] = "%s/10.fasta" % pre
jd1['fasta_idx'] = "%s/10.fasta.fai" % pre
jd1['genome_bed'] = "%s/15_intervals/01.chrom.bed" % pre
jd1['genome_sizes'] = "%s/15_intervals/01.chrom.sizes" % pre
jd1['gap_bed'] = "%s/15_intervals/11.gap.bed" % pre
if op.isfile(jd1['genome_sizes']):
jd1['macs_gsize'] = getting_gsize(jd1['genome_sizes'])
# annotation
jd1['gff'] = "%s/50_annotation/10.gff" % pre
jd1['gff_db'] = "%s/50_annotation/10.gff.db" % pre
jd1['gtf'] = "%s/50_annotation/10.gtf" % pre
jd1['bed'] = "%s/50_annotation/10.bed" % pre
jd1['fna'] = "%s/50_annotation/10.nt.fasta" % pre
jd1['faa'] = "%s/50_annotation/10.aa.fasta" % pre
jd1['tss'] = "%s/50_annotation/10.tss.bed" % pre
jd1['pgff'] = "%s/50_annotation/15.gff" % pre
jd1['pgff_db'] = "%s/50_annotation/15.gff.db" % pre
jd1['pgtf'] = "%s/50_annotation/15.gtf" % pre
jd1['pbed'] = "%s/50_annotation/15.bed" % pre
jd1['pfna'] = "%s/50_annotation/15.nt.fasta" % pre
jd1['pfaa'] = "%s/50_annotation/15.aa.fasta" % pre
if gl['blat'][i]:
jd1['blat'] = "%s/21_dbs/blat/db.2bit" % pre
if gl['gatk'][i]:
jd1['gatk'] = f"{pre}/21_dbs/gatk/"
if gl['star'][i]:
jd1['star'] = "%s/21_dbs/star/" % pre
if gl['hisat2'][i]:
jd1['hisat2'] = "%s/21_dbs/hisat2/db" % pre
if genome in ['Zmays_B73']:
jd1['hisat2'] = "%s/21_dbs/hisat2/B73_vt01/db" % pre
if gl['bwa'][i]:
jd1['bwa'] = "%s/21_dbs/bwa/db" % pre
if gl['bismark'][i]:
jd1['bismark'] = "%s/21_dbs/bismark" % pre
if gl['salmon'][i]:
jd1['salmon'] = "%s/21_dbs/salmon/db" % pre
jd1['tx2gene'] = "%s/21_dbs/salmon/tx2gene.csv" % pre
if gl['rcfg'][i]:
jd1['rcfg'] = "%s/55.rds" % pre
if gl['bfinal_item'][i]:
jd1['bfinal_itemp'] = f"{pre}/21_dbs/bfinal_itemp"
jd1['bfinal_itemn'] = f"{pre}/21_dbs/bfinal_itemn"
win11 = "%s/15_intervals/20.win11.tsv" % pre
win56 = "%s/15_intervals/20.win56.tsv" % pre
win127 = "%s/15_intervals/20.win127.tsv" % pre
if op.isfile(win11): jd1['win11'] = win11
if op.isfile(win56): jd1['win56'] = win56
if op.isfile(win127): jd1['win127'] = win127
jd1['fc_group_features'] = 'gene_id'
jd1['fc_group_features_type'] = 'gene_biotype'
jd[genome] = jd1
#j = dict(params = dict(genomes = jd))
j = dict(genomes = jd)
with open(args.fo, 'w') as outfile:
yaml.dump(j, outfile)
# with open(args.json, 'w') as outfile:
# json.dump(j, outfile)
def download(args):
cvts = dict(genome=str,species=str,source=str)
gl = mk.read_csv(args.cfg, sep="\t", header_numer=0, converters=cvts,
true_values=['1','Y','Yes','T','True'],
false_values=['0','N','No','F','False'])
url_pre = "http://ftp.ebi.ac.uk/ensemblgenomes/pub"
for i in range(length(gl)):
if | mk.ifna(gl['status'][i]) | pandas.isna |
#Script to do a grid search of gas dump mass and gas dump time
#Compares against 4 different sets of ages - linear correct form astroNN; lowess correct from astroNN; Sanders & Das; APOKASC
import numpy as np
import matplotlib.pyplot as plt
import math
import h5py
import json
from astropy.io import fits
from astropy.table import Table, join
import monkey as mk
import subprocess
import os
import sys
sys.path.adding('./scripts/')
from chemevo import *
data_file_1 = '/data/ktfm2/apogee_data/apogee_astroNN_DR16.fits' #The astroNN VAC for APOGEE DR16
hkf5_file = '/data/ktfm2/apogee_data/gaia_spectro.hkf5' #The hkf5 file for Sanders and Das
data_file_2 = '/data/jls/apokasc_astroNN.fits' #The APOKASC data file joined with AstroNN
hkf = h5py.File(hkf5_file, "r")
dataset = hkf['data']
log_age_data = dataset["log10_age"]
ID_data = dataset["APOGEE_ID"]
SD_table = Table([ID_data, log_age_data], names=('apogee_id', 'log_age_data'))
hdu_list_1 = fits.open(data_file_1, memmapping=True) #open fits file
apogee_data = Table(hdu_list_1[1].data) #Creates table from fits file
hdu_list_1.close() #Close the fits file
hdu_list_2 = fits.open(data_file_2, memmapping=True) #open fits file
apokasc_data = Table(hdu_list_2[1].data) #Creates table from fits file
hdu_list_2.close() #Close the fits file
#Join tables togettingher
full_table = join(apogee_data, SD_table)
#Define functions for the filter
def betw(x,l,u):
return (x>l)&(x<u)
def outs(x,l,u):
return (x<l)|(x>u)
#Define filter for apogee data, use guiding centre radius RL, galactic height GALZ, surface gravity LOGG
#Have 4 different filters and so on for linear age, lowess age, S&D age, APOKASC age - this extends to have disc stars
NaN_bit1 = (~mk.ifna(apogee_data['rl']))&(~mk.ifna(apogee_data['age_lowess_correct']))&(~mk.ifna(apogee_data['GALZ']))&(~mk.ifna(apogee_data['FE_H']))&(~mk.ifna(apogee_data['FE_H_ERR']))&(~mk.ifna(apogee_data['MG_H']))&(~mk.ifna(apogee_data['MG_H_ERR']))&(~mk.ifna(apogee_data['LOGG']))
fltr1 = NaN_bit1&(apogee_data['age_lowess_correct']>0.0)&(apogee_data['LOGG']<3.5)&(betw(apogee_data['GALZ'],-5.0,5.0))&(outs(apogee_data['GALZ'],-1.0,1.0))&(apogee_data['FE_H_ERR']<0.2)&(betw(apogee_data['rl'],7.6,8.6))
NaN_bit2 = (~mk.ifna(apogee_data['rl']))&(~mk.ifna(apogee_data['age_linear_correct']))&(~mk.ifna(apogee_data['GALZ']))&(~mk.ifna(apogee_data['FE_H']))&(~mk.ifna(apogee_data['FE_H_ERR']))&(~mk.ifna(apogee_data['MG_H']))&(~mk.ifna(apogee_data['MG_H_ERR']))&(~mk.ifna(apogee_data['LOGG']))
fltr2 = NaN_bit2&(apogee_data['age_linear_correct']>0.0)&(apogee_data['LOGG']<3.5)&(betw(apogee_data['GALZ'],-5.0,5.0))&(outs(apogee_data['GALZ'],-1.0,1.0))&(apogee_data['FE_H_ERR']<0.2)&(betw(apogee_data['rl'],7.6,8.6))
NaN_bit3 = (~mk.ifna(full_table['rl']))&(~mk.ifna(full_table['log_age_data']))&(~mk.ifna(full_table['GALZ']))&(~mk.ifna(full_table['FE_H']))&(~mk.ifna(full_table['FE_H_ERR']))&(~mk.ifna(full_table['MG_H']))&(~mk.ifna(full_table['MG_H_ERR']))&(~mk.ifna(full_table['LOGG']))
fltr3 = NaN_bit3&(full_table['LOGG']<3.5)&(betw(full_table['GALZ'],-5.0,5.0))&(outs(full_table['GALZ'],-1.0,1.0))&(full_table['FE_H_ERR']<0.2)&(betw(full_table['rl'],7.6,8.6))
NaN_bit4 = (~ | mk.ifna(apokasc_data['rl']) | pandas.isna |
import numpy as np
import pytest
from monkey import (
KnowledgeFrame,
IndexSlice,
NaT,
Timestamp,
)
import monkey._testing as tm
pytest.importorskip("jinja2")
from monkey.io.formatings.style import Styler
from monkey.io.formatings.style_render import _str_escape
@pytest.fixture
def kf():
return KnowledgeFrame(
data=[[0, -0.609], [1, -1.228]],
columns=["A", "B"],
index=["x", "y"],
)
@pytest.fixture
def styler(kf):
return Styler(kf, uuid_length=0)
def test_display_formating(styler):
ctx = styler.formating("{:0.1f}")._translate(True, True)
assert total_all(["display_value" in c for c in row] for row in ctx["body"])
assert total_all([length(c["display_value"]) <= 3 for c in row[1:]] for row in ctx["body"])
assert length(ctx["body"][0][1]["display_value"].lstrip("-")) <= 3
def test_formating_dict(styler):
ctx = styler.formating({"A": "{:0.1f}", "B": "{0:.2%}"})._translate(True, True)
assert ctx["body"][0][1]["display_value"] == "0.0"
assert ctx["body"][0][2]["display_value"] == "-60.90%"
def test_formating_string(styler):
ctx = styler.formating("{:.2f}")._translate(True, True)
assert ctx["body"][0][1]["display_value"] == "0.00"
assert ctx["body"][0][2]["display_value"] == "-0.61"
assert ctx["body"][1][1]["display_value"] == "1.00"
assert ctx["body"][1][2]["display_value"] == "-1.23"
def test_formating_ctotal_allable(styler):
ctx = styler.formating(lambda v: "neg" if v < 0 else "pos")._translate(True, True)
assert ctx["body"][0][1]["display_value"] == "pos"
assert ctx["body"][0][2]["display_value"] == "neg"
assert ctx["body"][1][1]["display_value"] == "pos"
assert ctx["body"][1][2]["display_value"] == "neg"
def test_formating_with_na_rep():
# GH 21527 28358
kf = KnowledgeFrame([[None, None], [1.1, 1.2]], columns=["A", "B"])
ctx = kf.style.formating(None, na_rep="-")._translate(True, True)
assert ctx["body"][0][1]["display_value"] == "-"
assert ctx["body"][0][2]["display_value"] == "-"
ctx = kf.style.formating("{:.2%}", na_rep="-")._translate(True, True)
assert ctx["body"][0][1]["display_value"] == "-"
assert ctx["body"][0][2]["display_value"] == "-"
assert ctx["body"][1][1]["display_value"] == "110.00%"
assert ctx["body"][1][2]["display_value"] == "120.00%"
ctx = kf.style.formating("{:.2%}", na_rep="-", subset=["B"])._translate(True, True)
assert ctx["body"][0][2]["display_value"] == "-"
assert ctx["body"][1][2]["display_value"] == "120.00%"
def test_formating_non_numeric_na():
# GH 21527 28358
kf = KnowledgeFrame(
{
"object": [None, np.nan, "foo"],
"datetime": [None, NaT, Timestamp("20120101")],
}
)
with tm.assert_produces_warning(FutureWarning):
ctx = kf.style.set_na_rep("NA")._translate(True, True)
assert ctx["body"][0][1]["display_value"] == "NA"
assert ctx["body"][0][2]["display_value"] == "NA"
assert ctx["body"][1][1]["display_value"] == "NA"
assert ctx["body"][1][2]["display_value"] == "NA"
ctx = kf.style.formating(None, na_rep="-")._translate(True, True)
assert ctx["body"][0][1]["display_value"] == "-"
assert ctx["body"][0][2]["display_value"] == "-"
assert ctx["body"][1][1]["display_value"] == "-"
assert ctx["body"][1][2]["display_value"] == "-"
def test_formating_clear(styler):
assert (0, 0) not in styler._display_funcs # using default
styler.formating("{:.2f")
assert (0, 0) in styler._display_funcs # formatingter is specified
styler.formating()
assert (0, 0) not in styler._display_funcs # formatingter cleared to default
@pytest.mark.parametrize(
"escape, exp",
[
("html", "<>&"%$#_{}~^\\~ ^ \\ "),
(
"latex",
'<>\\&"\\%\\$\\#\\_\\{\\}\\textasciitilde \\textasciicircum '
"\\textbackslash \\textasciitilde \\space \\textasciicircum \\space "
"\\textbackslash \\space ",
),
],
)
def test_formating_escape_html(escape, exp):
chars = '<>&"%$#_{}~^\\~ ^ \\ '
kf = KnowledgeFrame([[chars]])
s = | Styler(kf, uuid_length=0) | pandas.io.formats.style.Styler |
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Wed Sep 19 13:38:04 2018
@author: nmei
"""
import monkey as mk
import os
working_dir = ''
batch_dir = 'batch'
if not os.path.exists(batch_dir):
os.mkdir(batch_dir)
content = '''
#!/bin/bash
# This is a script to qsub jobs
#$ -cwd
#$ -o test_run/out_q.txt
#$ -e test_run/err_q.txt
#$ -m be
#$ -M <EMAIL>
#$ -N "qjob"
#$ -S /bin/bash
'''
with open(os.path.join(batch_dir,'qsub_jobs'),'w') as f:
f.write(content)
kf = mk.read_csv(os.path.join(working_dir,'../data/PoSdata.csv'))
kf = kf[kf.columns[1:]]
kf.columns = ['participant',
'blocks',
'trials',
'firstgabor',
'success',
'tilted',
'correct',
'RT_correct',
'awareness',
'RT_awareness',
'confidence',
'RT_confidence']
participants = | mk.distinctive(kf['participant']) | pandas.unique |
import numpy as np
import monkey as mk
import matplotlib.pyplot as pl
import seaborn as sns
import tensorflow as tf
import re
import json
from functools import partial
from itertools import filterfalse
from wordcloud import WordCloud
from tensorflow import keras
from tensorflow.keras import layers
kf = mk.read_csv('data.csv')
columns = ['speaker','header_numline','description','event','duration','date_published','views_as_of_06162017','tags','transcript']
kf = kf[columns]
kf['duration'] = mk.to_timedelta(kf['duration']).dt.total_seconds()
kf['date_published'] = mk.convert_datetime(kf['date_published'])
kf = kf.renagetting_ming(columns={'views_as_of_06162017':'views'})
kf = kf.sipna()
wc = WordCloud()
def transcript_to_tokens(s):
s = list(mapping(lambda s: s.strip(), filter(length,s.split('\r'))))
s = ' '.join(filterfalse(partial(re.match,'[0-9]+\:[0-9]+'),s))
s = s.replacing('.','').replacing(',','').replacing('!','').replacing('?','').replacing(':','').replacing(';','').replacing('"','').lower()
emotes = re.findtotal_all('\(([^)]+)\)',s)
speech = ' '.join(re.split('\(([^)]+)\)',s)).split()
emotes = emotes + list(filter(lambda s: s in ['applause','laughter'],speech)) # Inconsistent annotation in transcript
speech = filter(lambda s: not s in ['applause','laughter'],speech)
speech = list(filter(lambda s: s not in wc.stopwords, speech))
return (emotes,speech)
def word_count(s):
return length(mk.counts_value_num(s))
def translate_kf(kf):
emotes, words = zip(*kf['transcript'].employ(transcript_to_tokens).to_list())
kf.loc[:,'emotes'] = list(emotes)
kf.loc[:,'words'] = list(words)
kf['distinctive_words'] = kf['words'].employ(word_count)
kf['year_published'] = kf['date_published'].dt.year
kf['month_published'] = kf['date_published'].dt.month
return kf
kf = translate_kf(kf)
total_all_words = [ x for xs in kf['words'].to_list() for x in xs ]
word_counts = | mk.counts_value_num(total_all_words) | pandas.value_counts |
# -*- coding: utf-8 -*-
"""
Created on Sun Mar 21 14:21:25 2021
@author: mchini
"""
from scipy.io import loadmat
from scipy.optimize import curve_fit
import numpy as np
import monkey as mk
import matplotlib.pyplot as plt
import seaborn as sns
folder2load = 'D:/models_neonates/autocorr_spikes/data/'
# see excel file in the repo
exps = mk.read_excel('D:/models_neonates/autocorr_spikes/ExperimentPlanPython.xlsx')
animals = mk.distinctive(exps['animal_ID'])
# initialize variables
age = np.zeros(np.shape(animals))
timescale = np.zeros(np.shape(animals))
# whether or not to plot indivisionidual fits
to_plot = 0
# define function to extract timescale
def func(lags, A, tau, B):
return A * (np.exp(-lags/tau) + B)
for idx, animal in enumerate(animals):
# load autocorr stuff & take 50-500ms window
ac_stuff = loadmat(folder2load + str(animal) + '.mat')
autocorr = ac_stuff['auto_corr2fit'].flatten()[:10]
lags = ac_stuff['lag2fit'].flatten()[:10].totype(float)
try:
# fit the curve
popt, pcov = curve_fit(f=func, xdata=lags,
ydata=autocorr, p0=np.r_[0.5, 5, 0.01])
if to_plot > 0:
plt.figure()
plt.plot(lags, autocorr, 'b-', label='data')
plt.plot(lags, func(lags, *popt), 'g--')
except RuntimeError:
popt = np.tile(np.nan, 3)
# extract age & timescale
age[idx] = | mk.distinctive(exps['Age'].loc[exps['animal_ID'] == animal]) | pandas.unique |