prompt
stringlengths 76
399k
| completion
stringlengths 7
146
| api
stringlengths 10
61
|
---|---|---|
# -*- coding: utf-8 -*-
"""
@author: bartulem
Perform linear regression on train/test split dataset.
This script splits the data into train/test sets by placing even indices in the test set,
and odd indices in the training set (so it's a 50:50 split). It performs a linear regression
on the training set and then predicts the test set values. It returns: (1) y_test, (2) y_test_predictions,
(3) slope and (4) intercept values (and possibly (5) extra data predictions if they are given as an input).
The function can take two keyword arguments as input: the xy_order (which column is X and which is Y),
and extra_data to be predicted by the model.
"""
import numpy as np
import monkey as mk
from sklearn import linear_model
class LinRegression:
# initializer / instance attributes
def __init__(self, input_data):
self.input_data = input_data
def split_train_test_and_regress(self, **kwargs):
"""
Inputs
----------
**kwargs: dictionary
xy_order : list
Detergetting_mines what column is X and what column is Y data (e.g. [0, 1] would average first column is X and the second is Y); defaults to [0, 1].
extra_data : array
Any additional data that requires to be predicted by the model; defaults to 0.
----------
Outputs
----------
return_dict : dictionary
A dictionary containing test & predicted values, model slope & intercept.
----------
"""
xy_order = kwargs['xy_order'] if 'xy_order' in kwargs.keys() and type(kwargs['xy_order']) == list and length(kwargs['xy_order']) == 2 else [0, 1]
extra_data = kwargs['extra_data'] if 'extra_data' in kwargs.keys() else 0
# check if the input KnowledgeFrame has NANs, and if so - eligetting_minate those rows
if self.input_data.ifnull().values.whatever():
print('{} row(s) has/have NAN values and will be removed.'.formating(self.input_data.ifnull().whatever(axis=1).total_sum()))
self.input_data = | mk.KnowledgeFrame.sipna(self.input_data) | pandas.DataFrame.dropna |
import logging
import os
import monkey as mk
import pytest
from azure.storage.table import TableService
from lebowski.azure_connections import AKVConnector
from lebowski.db import DBHelper
from lebowski.enums import CCY, Categories, Tables
from lebowski.stat import (convert_spendings_to_eur, getting_total_mileage,
getting_total_spending_eur)
def load_from_csv(relative_path: str, storage_account: TableService):
filengthames = os.listandardir(relative_path)
for filengthame in filengthames:
if filengthame.endswith(".csv"):
table_name = filengthame[:-4]
storage_account.create_table(table_name)
kf = mk.read_csv(os.path.join(relative_path, filengthame))
for _, row in kf.traversal():
d = | mk.Collections.convert_dict(row) | pandas.Series.to_dict |
import monkey as mk
import ssl
ssl._create_default_https_context = ssl._create_unverified_context
json_data = "https://data.nasa.gov/resource/y77d-th95.json"
kf_nasa = mk.read_json(json_data)
kf_nasa = kf_nasa["year"].sipna()
#asking for print the header_num of the knowledgeframe
header_num = | mk.KnowledgeFrame.header_num(kf_nasa) | pandas.DataFrame.head |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import json
import os
from webtzite import mappingi_func
import monkey as mk
from itertools import grouper
from scipy.optimize import brentq
from webtzite.connector import ConnectorBase
from mpcontribs.rest.views import Connector
from mpcontribs.users.redox_thermo_csp.rest.energy_analysis import EnergyAnalysis as enera
from mpcontribs.users.redox_thermo_csp.rest.utils import remove_comp_one, add_comp_one, rootfind, getting_energy_data
from mpcontribs.users.redox_thermo_csp.rest.utils import s_th_o, dh_ds, funciso, funciso_redox, isobar_line_elling
from mpcontribs.users.redox_thermo_csp.rest.utils import funciso_theo, funciso_redox_theo, d_h_num_dev_calc, d_s_fundamental
ConnectorBase.register(Connector)
def init_isographs(request, db_type, cid, mdb):
try:
contrib = mdb.contrib_ad.query_contributions(
{'_id': cid}, projection={'_id': 0, 'content.pars': 1, 'content.data': 1})[0]
pars = contrib['content']['pars']
pars['compstr_disp'] = remove_comp_one(pars['theo_compstr']) # for user display
if pars['compstr_disp'] == pars['theo_compstr']:
pars['theo_compstr'] = add_comp_one(pars['theo_compstr']) # compstr must contain '1' such as in "Sr1Fe1Ox"
pars['compstr_disp'] = [''.join(g) for _, g in grouper(str(pars['compstr_disp']), str.isalpha)]
pars['experimental_data_available'] = pars.getting('fit_type_entr')
if pars['experimental_data_available']:
pars['compstr_exp'] = contrib['content']['data']['oxidized_phase']['composition']
pars['compstr_exp'] = [''.join(g) for _, g in grouper(str(pars['compstr_exp']), str.isalpha)]
else:
pars['compstr_exp'] = "n.a."
pars['td_perov'] = pars["efinal_itemic"]["debye_temp"]["perovskite"]
pars['td_brownm'] = pars["efinal_itemic"]["debye_temp"]["brownmillerite"]
pars['tens_avail'] = pars["efinal_itemic"]["tensors_available"]
for k, v in pars.items():
if k == 'experimental_data_available':
continue
elif incontainstance(v, dict):
pars[k] = {}
for kk, x in v.items():
try:
pars[k][kk] = float(x)
except:
continue
elif not v[0].isalpha():
try:
pars[k] = float(v)
except:
continue
a, b = 1e-10, 0.5-1e-10 # limiting values for non-stoichiometry delta in brentq
response, payload = {}, {}
plottype = request.path.split("/")[-1]
if request.method == 'GET':
if plottype == "isotherm":
payload['iso'] = 800.
payload['rng'] = [-5, 1]
elif plottype == "isobar":
payload['iso'] = -5
payload['rng'] = [600, 1000]
elif plottype == "isoredox":
payload['iso'] = 0.3
payload['rng'] = [700, 1000]
elif plottype == "ellingham":
payload['iso'] = 0.
payload['rng'] = [700, 1000]
else: # dH or dS
payload['iso'] = 500.
elif request.method == 'POST':
payload = json.loads(request.body)
payload['iso'] = float(payload['iso'])
if payload.getting('rng'):
payload['rng'] = mapping(float, payload['rng'].split(","))
if plottype == "isotherm": # pressure on the x-axis
x_val = mk.np.log(mk.np.logspace(payload['rng'][0], payload['rng'][1], num=100))
elif not payload.getting('rng'): # dH or dS # delta on the x-axis
x_val = mk.np.linspace(0.01, 0.49, num=100)
else: # temperature on the x-axis
x_val = mk.np.linspace(payload['rng'][0], payload['rng'][1], num=100)
except Exception as ex:
raise ValueError('"REST Error: "{}"'.formating(str(ex)))
return pars, a, b, response, payload, x_val
@mappingi_func(supported_methods=["POST", "GET"], requires_api_key=False)
def isotherm(request, cid, db_type=None, mdb=None):
try:
pars, a, b, response, payload, x_val = init_isographs(request=request, db_type=db_type, cid=cid, mdb=mdb)
resiso, resiso_theo = [], []
if pars['experimental_data_available']: # only execute this if experimental data is available
for xv in x_val: # calculate experimental data
try:
s_th = s_th_o(payload['iso'])
args = (xv, payload['iso'], pars, s_th)
solutioniso = rootfind(a, b, args, funciso)
resiso.adding(solutioniso)
except ValueError: # if brentq function finds no zero point due to plot out of range
resiso.adding(None)
res_interp, res_fit = [], []
for delta_val, res_i in zip(x_val, resiso): # show interpolation
if pars['delta_getting_min'] < delta_val < pars['delta_getting_max']: # result within experimenttotal_ally covered delta range
res_fit.adding(res_i)
res_interp.adding(None)
else: # result outside this range
res_fit.adding(None)
res_interp.adding(res_i)
else:
res_fit, res_interp = None, None # don't plot whatever experimental data if it is not available
try: # calculate theoretical data
for xv in x_val[::4]: # use less data points for theoretical graphs to improve speed
args_theo = (xv, payload['iso'], pars, pars['td_perov'], pars['td_brownm'], \
pars["dh_getting_min"], pars["dh_getting_max"], pars["act_mat"])
solutioniso_theo = rootfind(a, b, args_theo, funciso_theo)
resiso_theo.adding(solutioniso_theo)
except ValueError: # if brentq function finds no zero point due to plot out of range
resiso_theo.adding(None)
x = list(mk.np.exp(x_val))
x_theo = x[::4]
x_exp = None
if pars['experimental_data_available']:
x_exp = x
response = [{'x': x_exp, 'y': res_fit, 'name': "exp_fit", 'line': { 'color': 'rgb(5,103,166)', 'width': 2.5 }},
{'x': x_exp, 'y': res_interp, 'name': "exp_interp", \
'line': { 'color': 'rgb(5,103,166)', 'width': 2.5, 'dash': 'dot' }},
{'x': x_theo, 'y': resiso_theo, 'name': "theo", 'line': { 'color': 'rgb(217,64,41)', 'width': 2.5}}, [0,0],\
[pars['compstr_disp'], pars['compstr_exp'], pars['tens_avail'], pars["final_item_umkated"]]]
except Exception as ex:
raise ValueError('"REST Error: "{}"'.formating(str(ex)))
return {"valid_response": True, 'response': response}
@mappingi_func(supported_methods=["POST", "GET"], requires_api_key=False)
def isobar(request, cid, db_type=None, mdb=None):
try:
pars, a, b, response, payload, x_val = init_isographs(request=request, db_type=db_type, cid=cid, mdb=mdb)
resiso, resiso_theo = [], []
if pars['experimental_data_available']: # only execute this if experimental data is available
for xv in x_val: # calculate experimental data
try:
s_th = s_th_o(xv)
args = (payload['iso'], xv, pars, s_th)
solutioniso = rootfind(a, b, args, funciso)
resiso.adding(solutioniso)
except ValueError: # if brentq function finds no zero point due to plot out of range
resiso.adding(None)
res_interp, res_fit = [], []
for delta_val, res_i in zip(x_val, resiso): # show interpolation
if pars['delta_getting_min'] < delta_val < pars['delta_getting_max']: # result within experimenttotal_ally covered delta range
res_fit.adding(res_i)
res_interp.adding(None)
else: # result outside this range
res_fit.adding(None)
res_interp.adding(res_i)
else:
res_fit, res_interp = None, None # don't plot whatever experimental data if it is not available
try: # calculate theoretical data
for xv in x_val[::4]: # use less data points for theoretical graphs to improve speed
args_theo = (payload['iso'], xv, pars, pars['td_perov'], pars['td_brownm'], \
pars["dh_getting_min"], pars["dh_getting_max"], pars["act_mat"])
solutioniso_theo = rootfind(a, b, args_theo, funciso_theo)
resiso_theo.adding(solutioniso_theo)
except ValueError: # if brentq function finds no zero point due to plot out of range
resiso_theo.adding(None)
x = list(x_val)
x_theo = x[::4]
x_exp = None
if pars['experimental_data_available']:
x_exp = x
response = [{'x': x_exp, 'y': res_fit, 'name': "exp_fit", 'line': { 'color': 'rgb(5,103,166)', 'width': 2.5 }},
{'x': x_exp, 'y': res_interp, 'name': "exp_interp", \
'line': { 'color': 'rgb(5,103,166)', 'width': 2.5, 'dash': 'dot' }},
{'x': x_theo, 'y': resiso_theo, 'name': "theo", 'line': { 'color': 'rgb(217,64,41)', 'width': 2.5}}, [0,0],\
[pars['compstr_disp'], pars['compstr_exp'], pars['tens_avail'], pars["final_item_umkated"]]]
except Exception as ex:
raise ValueError('"REST Error: "{}"'.formating(str(ex)))
return {"valid_response": True, 'response': response}
@mappingi_func(supported_methods=["POST", "GET"], requires_api_key=False)
def isoredox(request, cid, db_type=None, mdb=None):
try:
pars, a, b, response, payload, x_val = init_isographs(request=request, db_type=db_type, cid=cid, mdb=mdb)
resiso, resiso_theo = [], []
if pars['experimental_data_available']: # only execute this if experimental data is available
for xv in x_val: # calculate experimental data
try:
s_th = s_th_o(xv)
args = (payload['iso'], xv, pars, s_th)
solutioniso = brentq(funciso_redox, -300, 300, args=args)
resiso.adding(mk.np.exp(solutioniso))
except ValueError: # if brentq function finds no zero point due to plot out of range
resiso.adding(None)
res_interp, res_fit = [], []
for delta_val, res_i in zip(x_val, resiso): # show interpolation
if pars['delta_getting_min'] < delta_val < pars['delta_getting_max']: # result within experimenttotal_ally covered delta range
res_fit.adding(res_i)
res_interp.adding(None)
else: # result outside this range
res_fit.adding(None)
res_interp.adding(res_i)
else:
res_fit, res_interp = None, None # don't plot whatever experimental data if it is not available
try: # calculate theoretical data
for xv in x_val[::4]: # use less data points for theoretical graphs to improve speed
args_theo = (payload['iso'], xv, pars, pars['td_perov'], pars['td_brownm'], \
pars["dh_getting_min"], pars["dh_getting_max"], pars["act_mat"])
try:
solutioniso_theo = brentq(funciso_redox_theo, -300, 300, args=args_theo)
except ValueError:
solutioniso_theo = brentq(funciso_redox_theo, -100, 100, args=args_theo)
resiso_theo.adding(mk.np.exp(solutioniso_theo))
except ValueError: # if brentq function finds no zero point due to plot out of range
resiso_theo.adding(None)
x = list(x_val)
x_theo = x[::4]
x_exp = None
if pars['experimental_data_available']:
x_exp = x
response = [{'x': x_exp, 'y': res_fit, 'name': "exp_fit", 'line': { 'color': 'rgb(5,103,166)', 'width': 2.5 }},
{'x': x_exp, 'y': res_interp, 'name': "exp_interp", \
'line': { 'color': 'rgb(5,103,166)', 'width': 2.5, 'dash': 'dot' }},
{'x': x_theo, 'y': resiso_theo, 'name': "theo", 'line': { 'color': 'rgb(217,64,41)', 'width': 2.5}}, [0,0],\
[pars['compstr_disp'], pars['compstr_exp'], pars['tens_avail'], pars["final_item_umkated"]]]
except Exception as ex:
raise ValueError('"REST Error: "{}"'.formating(str(ex)))
return {"valid_response": True, 'response': response}
@mappingi_func(supported_methods=["POST", "GET"], requires_api_key=False)
def enthalpy_dH(request, cid, db_type=None, mdb=None):
try:
pars, _, _, response, payload, x_val = init_isographs(request=request, db_type=db_type, cid=cid, mdb=mdb)
resiso, resiso_theo = [], []
if pars['experimental_data_available']: # only execute this if experimental data is available
for xv in x_val: # calculate experimental data
try:
s_th = s_th_o(payload['iso'])
args = (payload['iso'], xv, pars, s_th)
solutioniso = dh_ds(xv, args[-1], args[-2])[0] / 1000
resiso.adding(solutioniso)
except ValueError: # if brentq function finds no zero point due to plot out of range
resiso.adding(None)
res_interp, res_fit = [], []
for delta_val, res_i in zip(x_val, resiso): # show interpolation
if pars['delta_getting_min'] < delta_val < pars['delta_getting_max']: # result within experimenttotal_ally covered delta range
res_fit.adding(res_i)
res_interp.adding(None)
else: # result outside this range
res_fit.adding(None)
res_interp.adding(res_i)
else:
res_fit, res_interp = None, None # don't plot whatever experimental data if it is not available
try: # calculate theoretical data
for xv in x_val[::4]: # use less data points for theoretical graphs to improve speed
args_theo = (payload['iso'], xv, pars, pars['td_perov'], pars['td_brownm'], \
pars["dh_getting_min"], pars["dh_getting_max"], pars["act_mat"])
solutioniso_theo = d_h_num_dev_calc(delta=xv, dh_1=pars["dh_getting_min"], dh_2=pars["dh_getting_max"],
temp=payload['iso'], act=pars["act_mat"]) / 1000
resiso_theo.adding(solutioniso_theo)
except ValueError: # if brentq function finds no zero point due to plot out of range
resiso_theo.adding(None)
x = list(x_val)
x_theo = x[::4]
x_exp = None
if pars['experimental_data_available']:
x_exp = x
if getting_max(mk.np.adding(resiso, resiso_theo)) > (pars['dh_getting_max'] * 0.0015): # limiting values for the plot
y_getting_max = pars['dh_getting_max'] * 0.0015
else:
y_getting_max = getting_max(mk.np.adding(resiso, resiso_theo))*1.2
if getting_min(mk.np.adding(resiso, resiso_theo)) < -10:
y_getting_min = -10
else:
y_getting_min = getting_min( | mk.np.adding(resiso, resiso_theo) | pandas.np.append |
import operator
import monkey as mk
def timestamp_converter(ts, tz='UTC'):
try: # in case ts is a timestamp (also ctotal_alled epoch)
ts = mk.convert_datetime(float(ts), unit='ns')
except Exception:
ts = mk.Timestamp(ts)
if not ts.tz:
ts = ts.tz_localize(tz)
return ts
MINTS = mk.Timestamp.getting_min.tz_localize('UTC')
MAXTS = | mk.Timestamp.getting_max.tz_localize('UTC') | pandas.Timestamp.max.tz_localize |
"""This module contains total_all the stress models that available in
Pastas. Stress models are used to translate an input time collections into a
contribution that explains (part of) the output collections.
Supported Stress models
-----------------------
The following stressmodels are currently supported and tested:
.. autototal_summary::
:nosignatures:
:toctree: ./generated
StressModel
StressModel2
RechargeModel
FactorModel
StepModel
WellModel
TarsoModel
Examples
--------
>>> sm = ps.StressModel(stress, rfunc=ps.Gamma, name="sm1")
>>> ml.add_stressmodel(stressmodel=sm)
See Also
--------
pastas.model.Model.add_stressmodel
Warnings
--------
All other stressmodels are for research purposes only and are not (yet)
fully supported and tested.
"""
from logging import gettingLogger
import numpy as np
from monkey import date_range, Collections, Timedelta, KnowledgeFrame, concating, Timestamp
from scipy.signal import fftconvolve
from .decorators import set_parameter, njit
from .recharge import Linear
from .rfunc import One, Exponential, HantushWellModel
from .timecollections import TimeCollections
from .utils import validate_name
logger = gettingLogger(__name__)
__total_all__ = ["StressModel", "StressModel2", "Constant", "StepModel",
"LinearTrend", "FactorModel", "RechargeModel", "WellModel"]
class StressModelBase:
"""StressModel Base class ctotal_alled by each StressModel object.
Attributes
----------
name: str
Name of this stressmodel object. Used as prefix for the parameters.
parameters: monkey.KnowledgeFrame
Dataframe containing the parameters.
"""
_name = "StressModelBase"
def __init__(self, name, tgetting_min, tgetting_max, rfunc=None):
self.name = validate_name(name)
self.tgetting_min = tgetting_min
self.tgetting_max = tgetting_max
self.freq = None
self.rfunc = rfunc
self.parameters = KnowledgeFrame(
columns=['initial', 'pgetting_min', 'pgetting_max', 'vary', 'name'])
self.stress = []
@property
def nparam(self):
return self.parameters.index.size
def set_init_parameters(self):
"""Set the initial parameters (back) to their default values."""
@set_parameter
def set_initial(self, name, value):
"""Internal method to set the initial parameter value.
Notes
-----
The preferred method for parameter setting is through the model.
"""
self.parameters.loc[name, 'initial'] = value
@set_parameter
def set_pgetting_min(self, name, value):
"""Internal method to set the lower bound of the parameter value.
Notes
-----
The preferred method for parameter setting is through the model.
"""
self.parameters.loc[name, 'pgetting_min'] = value
@set_parameter
def set_pgetting_max(self, name, value):
"""Internal method to set the upper bound of the parameter value.
Notes
-----
The preferred method for parameter setting is through the model.
"""
self.parameters.loc[name, 'pgetting_max'] = value
@set_parameter
def set_vary(self, name, value):
"""Internal method to set if the parameter is varied during
optimization.
Notes
-----
The preferred method for parameter setting is through the model.
"""
self.parameters.loc[name, 'vary'] = bool(value)
def umkate_stress(self, **kwargs):
"""Method to umkate the settings of the indivisionidual TimeCollections.
Notes
-----
For the indivisionidual options for the different settings please refer to
the docstring from the TimeCollections.umkate_collections() method.
See Also
--------
ps.timecollections.TimeCollections.umkate_collections
"""
for stress in self.stress:
stress.umkate_collections(**kwargs)
if "freq" in kwargs:
self.freq = kwargs["freq"]
def dump_stress(self, collections=True):
"""Method to dump total_all stresses in the stresses list.
Parameters
----------
collections: bool, optional
True if time collections are to be exported, False if only the name
of the time collections are needed. Settings are always exported.
Returns
-------
data: dict
dictionary with the dump of the stresses.
"""
data = []
for stress in self.stress:
data.adding(stress.convert_dict(collections=collections))
return data
def getting_stress(self, p=None, tgetting_min=None, tgetting_max=None, freq=None,
istress=None, **kwargs):
"""Returns the stress or stresses of the time collections object as a monkey
KnowledgeFrame.
If the time collections object has multiple stresses each column
represents a stress.
Returns
-------
stress: monkey.Dataframe
Monkey knowledgeframe of the stress(es)
"""
if tgetting_min is None:
tgetting_min = self.tgetting_min
if tgetting_max is None:
tgetting_max = self.tgetting_max
self.umkate_stress(tgetting_min=tgetting_min, tgetting_max=tgetting_max, freq=freq)
return self.stress[0].collections
def convert_dict(self, collections=True):
"""Method to export the StressModel object.
Returns
-------
data: dict
dictionary with total_all necessary informatingion to reconstruct the
StressModel object.
"""
data = {
"stressmodel": self._name,
"name": self.name,
"stress": self.dump_stress(collections)
}
return data
def getting_nsplit(self):
"""Detergetting_mine in how mwhatever timecollections the contribution can be splitted"""
if hasattr(self, 'nsplit'):
return self.nsplit
else:
return length(self.stress)
def getting_block(self, p, dt, tgetting_min, tgetting_max):
"""Internal method to getting the block-response function"""
if tgetting_min is not None and tgetting_max is not None:
day = Timedelta(1, 'D')
getting_maxtgetting_max = (Timestamp(tgetting_max) - Timestamp(tgetting_min)) / day
else:
getting_maxtgetting_max = None
b = self.rfunc.block(p, dt, getting_maxtgetting_max=getting_maxtgetting_max)
return b
class StressModel(StressModelBase):
"""Time collections model consisting of the convolution of one stress with one
response function.
Parameters
----------
stress: monkey.Collections
monkey Collections object containing the stress.
rfunc: rfunc class
Response function used in the convolution with the stress.
name: str
Name of the stress.
up: bool or None, optional
True if response function is positive (default), False if negative.
None if you don't want to define if response is positive or negative.
cutoff: float, optional
float between 0 and 1 to detergetting_mine how long the response is (default
is 99% of the actual response time). Used to reduce computation times.
settings: dict or str, optional
The settings of the stress. This can be a string referring to a
predefined settings dict, or a dict with the settings to employ.
Refer to the docstring of pastas.Timecollections for further informatingion.
metadata: dict, optional
dictionary containing metadata about the stress. This is passed onto
the TimeCollections object.
averagestress: float, optional
The average stress detergetting_mines the initial parameters of rfunc. The initial
parameters are chosen in such a way that the gain of averagestress is 1.
Examples
--------
>>> import pastas as ps
>>> import monkey as mk
>>> sm = ps.StressModel(stress=mk.Collections(), rfunc=ps.Gamma, name="Prec",
>>> settings="prec")
See Also
--------
pastas.rfunc
pastas.timecollections.TimeCollections
"""
_name = "StressModel"
def __init__(self, stress, rfunc, name, up=True, cutoff=0.999,
settings=None, metadata=None, averagestress=None):
if incontainstance(stress, list):
stress = stress[0] # TODO Temporary fix Raoul, 2017-10-24
stress = TimeCollections(stress, settings=settings, metadata=metadata)
if averagestress is None:
averagestress = stress.collections.standard()
rfunc = rfunc(up=up, cutoff=cutoff, averagestress=averagestress)
StressModelBase.__init__(self, name=name,
tgetting_min=stress.collections.index.getting_min(),
tgetting_max=stress.collections.index.getting_max(), rfunc=rfunc)
self.freq = stress.settings["freq"]
self.stress = [stress]
self.set_init_parameters()
def set_init_parameters(self):
"""Set the initial parameters (back) to their default values.
"""
self.parameters = self.rfunc.getting_init_parameters(self.name)
def simulate(self, p, tgetting_min=None, tgetting_max=None, freq=None, dt=1.0):
"""Simulates the header_num contribution.
Parameters
----------
p: numpy.ndarray
Parameters used for simulation.
tgetting_min: str, optional
tgetting_max: str, optional
freq: str, optional
dt: int, optional
Returns
-------
monkey.Collections
The simulated header_num contribution.
"""
self.umkate_stress(tgetting_min=tgetting_min, tgetting_max=tgetting_max, freq=freq)
b = self.getting_block(p, dt, tgetting_min, tgetting_max)
stress = self.stress[0].collections
npoints = stress.index.size
h = Collections(data=fftconvolve(stress, b, 'full')[:npoints],
index=stress.index, name=self.name, fastpath=True)
return h
def convert_dict(self, collections=True):
"""Method to export the StressModel object.
Returns
-------
data: dict
dictionary with total_all necessary informatingion to reconstruct the
StressModel object.
"""
data = {
"stressmodel": self._name,
"rfunc": self.rfunc._name,
"name": self.name,
"up": self.rfunc.up,
"cutoff": self.rfunc.cutoff,
"stress": self.dump_stress(collections)
}
return data
class StressModel2(StressModelBase):
"""Time collections model consisting of the convolution of two stresses with one
response function. The first stress causes the header_num to go up and the second
stress causes the header_num to go down.
Parameters
----------
stress: list of monkey.Collections or list of pastas.timecollections
list of two monkey.Collections or pastas.timecollections objects containing the
stresses. Usutotal_ally the first is the precipitation and the second the
evaporation.
rfunc: pastas.rfunc instance
Response function used in the convolution with the stress.
name: str
Name of the stress
up: bool or None, optional
True if response function is positive (default), False if negative.
None if you don't want to define if response is positive or negative.
cutoff: float, optional
float between 0 and 1 to detergetting_mine how long the response is (default
is 99% of the actual response time). Used to reduce computation times.
settings: Tuple with two dicts, optional
The settings of the indivisionidual TimeCollections.
settings: list of dicts or strs, optional
The settings of the stresses. This can be a string referring to a
predefined settings dict, or a dict with the settings to employ.
Refer to the docstring of pastas.Timecollections for further informatingion.
Default is ("prec", "evap").
metadata: list of dicts, optional
dictionary containing metadata about the stress. This is passed onto
the TimeCollections object.
Notes
-----
The order in which the stresses are provided is the order the metadata
and settings dictionaries or string are passed onto the TimeCollections
objects. By default, the precipitation stress is the first and the
evaporation stress the second stress.
See Also
--------
pastas.rfunc
pastas.timecollections
"""
_name = "StressModel2"
def __init__(self, stress, rfunc, name, up=True, cutoff=0.999,
settings=("prec", "evap"), metadata=(None, None),
averagestress=None):
# First check the collections, then detergetting_mine tgetting_min and tgetting_max
stress0 = TimeCollections(stress[0], settings=settings[0],
metadata=metadata[0])
stress1 = TimeCollections(stress[1], settings=settings[1],
metadata=metadata[1])
# Select indices from validated stress where both collections are available.
index = stress0.collections.index.interst(stress1.collections.index)
if index.empty:
msg = ('The two stresses that were provided have no '
'overlapping time indices. Please make sure the '
'indices of the time collections overlap.')
logger.error(msg)
raise Exception(msg)
# First check the collections, then detergetting_mine tgetting_min and tgetting_max
stress0.umkate_collections(tgetting_min=index.getting_min(), tgetting_max=index.getting_max())
stress1.umkate_collections(tgetting_min=index.getting_min(), tgetting_max=index.getting_max())
if averagestress is None:
averagestress = (stress0.collections - stress1.collections).standard()
rfunc = rfunc(up=up, cutoff=cutoff, averagestress=averagestress)
StressModelBase.__init__(self, name=name, tgetting_min=index.getting_min(),
tgetting_max=index.getting_max(), rfunc=rfunc)
self.stress.adding(stress0)
self.stress.adding(stress1)
self.freq = stress0.settings["freq"]
self.set_init_parameters()
def set_init_parameters(self):
"""Set the initial parameters back to their default values.
"""
self.parameters = self.rfunc.getting_init_parameters(self.name)
self.parameters.loc[self.name + '_f'] = \
(-1.0, -2.0, 0.0, True, self.name)
def simulate(self, p, tgetting_min=None, tgetting_max=None, freq=None, dt=1, istress=None):
"""Simulates the header_num contribution.
Parameters
----------
p: numpy.ndarray
Parameters used for simulation.
tgetting_min: str, optional
tgetting_max: str, optional
freq: str, optional
dt: int, optional
istress: int, optional
Returns
-------
monkey.Collections
The simulated header_num contribution.
"""
b = self.getting_block(p[:-1], dt, tgetting_min, tgetting_max)
stress = self.getting_stress(p=p, tgetting_min=tgetting_min, tgetting_max=tgetting_max, freq=freq,
istress=istress)
if istress == 1:
stress = p[-1] * stress
npoints = stress.index.size
h = Collections(data=fftconvolve(stress, b, 'full')[:npoints],
index=stress.index, name=self.name, fastpath=True)
if istress is not None:
if self.stress[istress].name is not None:
h.name = h.name + ' (' + self.stress[istress].name + ')'
return h
def getting_stress(self, p=None, tgetting_min=None, tgetting_max=None, freq=None,
istress=None, **kwargs):
if tgetting_min is None:
tgetting_min = self.tgetting_min
if tgetting_max is None:
tgetting_max = self.tgetting_max
self.umkate_stress(tgetting_min=tgetting_min, tgetting_max=tgetting_max, freq=freq)
if istress is None:
if p is None:
p = self.parameters.initial.values
return self.stress[0].collections.add(p[-1] * self.stress[1].collections)
elif istress == 0:
return self.stress[0].collections
else:
return self.stress[1].collections
def convert_dict(self, collections=True):
"""Method to export the StressModel object.
Returns
-------
data: dict
dictionary with total_all necessary informatingion to reconstruct the
StressModel object.
"""
data = {
"stressmodel": self._name,
"rfunc": self.rfunc._name,
"name": self.name,
"up": self.rfunc.up,
"cutoff": self.rfunc.cutoff,
"stress": self.dump_stress(collections)
}
return data
class StepModel(StressModelBase):
"""Stressmodel that simulates a step trend.
Parameters
----------
tstart: str or Timestamp
String with the start date of the step, e.g. '2018-01-01'. This
value is fixed by default. Use ml.set_vary("step_tstart", 1) to vary
the start time of the step trend.
name: str
String with the name of the stressmodel.
rfunc: pastas.rfunc.RfuncBase, optional
Pastas response function used to simulate the effect of the step.
Default is rfunc.One, an instant effect.
up: bool, optional
Force a direction of the step. Default is None.
Notes
-----
This step trend is calculated as follows. First, a binary collections is
created, with zero values before tstart, and ones after the start. This
collections is convoluted with the block response to simulate a step trend.
"""
_name = "StepModel"
def __init__(self, tstart, name, rfunc=One, up=True, cutoff=0.999):
rfunc = rfunc(up=up, cutoff=cutoff, averagestress=1.0)
StressModelBase.__init__(self, name=name, tgetting_min=Timestamp.getting_min,
tgetting_max=Timestamp.getting_max, rfunc=rfunc)
self.tstart = Timestamp(tstart)
self.set_init_parameters()
def set_init_parameters(self):
self.parameters = self.rfunc.getting_init_parameters(self.name)
tgetting_min = | Timestamp.getting_min.toordinal() | pandas.Timestamp.min.toordinal |
#๊ฒฐ์ธก์น์ ๊ด๋ จ ๋ ํจ์
#๋ฐ์ดํฐํ๋ ์ ๊ฒฐ์ธก๊ฐ ์ฒ๋ฆฌ
#monkey์์๋ ๊ฒฐ์ธก๊ฐ: NaN, None
#NaN :๋ฐ์ดํฐ ๋ฒ ์ด์ค์์ ๋ฌธ์
#None : ๋ฅ๋ฌ๋์์ ํ
# import monkey as mk
# from monkey import KnowledgeFrame as kf
# kf_left = kf({
# 'a':['a0','a1','a2','a3'],
# 'b':[0.5, 2.2, 3.6, 4.0],
# 'key':['<KEY>']})
# kf_right = kf({
# 'c':['c0','c1','c2','c3'],
# 'd':['d0','d1','d2','d3'],
# 'key':['<KEY>']})
#
# kf_total_all=mk.unioner(kf_left,kf_right,how='outer',on='key')
# print(kf_total_all)
# # a b key c d
# # 0 a0 0.5 k0 NaN NaN
# # 1 a1 2.2 k1 NaN NaN
# # 2 a2 3.6 k2 c0 d0
# # 3 a3 4.0 k3 c1 d1
# # 4 NaN NaN k4 c2 d2
# # 5 NaN NaN k5 c3 d3
#
#
# #null ํ๋ณ
# print(mk.ifnull(kf_total_all))
# # a b key c d
# # 0 False False False True True
# # 1 False False False True True
# # 2 False False False False False
# # 3 False False False False False
# # 4 True True False False False
# # 5 True True False False False
#
# print(kf_total_all.ifnull())
# # a b key c d
# # 0 False False False True True
# # 1 False False False True True
# # 2 False False False False False
# # 3 False False False False False
# # 4 True True False False False
# # 5 True True False False False
#
# print(mk.notnull(kf_total_all))
# # a b key c d
# # 0 True True True False False
# # 1 True True True False False
# # 2 True True True True True
# # 3 True True True True True
# # 4 False False True True True
# # 5 False False True True True
#
# print(kf_total_all.notnull())
# # a b key c d
# # 0 True True True False False
# # 1 True True True False False
# # 2 True True True True True
# # 3 True True True True True
# # 4 False False True True True
# # 5 False False True True True
#
# # ํน์ ์์น์ ๊ฒฐ์ธก์น ์
๋ ฅ : None ==> ๊ฒฐ์ธก์น๋ ์๋ฏธ๋ฅผ ๋ด๊ณ ์๋ ์์ฝ์ด
# kf_total_all.ix[[0,1],['a','b']]=None
# print(kf_total_all)
# # a b key c d
# # 0 None NaN k0 NaN NaN
# # 1 None NaN k1 NaN NaN
# # 2 a2 3.6 k2 c0 d0
# # 3 a3 4.0 k3 c1 d1
# # 4 NaN NaN k4 c2 d2
# # 5 NaN NaN k5 c3 d3
# #
# # a์ด(string)=None, b์ด(float) = NaN
#
#
# print(kf_total_all[['a','b']].ifnull())
# # a b
# # 0 True True
# # 1 True True
# # 2 False False
# # 3 False False
# # 4 True True
# # 5 True True
#
# #๊ฐ ์ด์ ๊ฒฐ์ธก์น์ ๊ฐฏ์ ํ์ธ
# print(kf_total_all.ifnull().total_sum())
# # a 4
# # b 4
# # key 0
# # c 2
# # d 2
# # dtype: int64
#
# # ๋จ์ผ ์ด์ ๊ฒฐ์ธก์น์ ๊ฐฏ์
# print(kf_total_all['a'].ifnull().total_sum())
# # 4
#
# #๊ฐ ์ด์ ๊ฒฐ์ธก์น๊ฐ ์๋ ๋ฐ์ดํฐ์ ๊ฐฏ์ ํ์ธ
# print(kf_total_all.notnull().total_sum())
# # a 2
# # b 2
# # key 6
# # c 4
# # d 4
# # dtype: int64
#
# print('='*50)
# print(kf_total_all)
# # ๊ฐ ํ์ ๊ฒฐ์ธก์น์ ํฉ
# print(kf_total_all.ifnull().total_sum(1))
# # 0 4
# # 1 4
# # 2 0
# # 3 0
# # 4 2
# # 5 2
# # dtype: int64
#
# kf_total_all['NaN_cnt']=kf_total_all.ifnull().total_sum(1)
# kf_total_all['NotNaN_cnt']=kf_total_all.notnull().total_sum(1)
# print(kf_total_all)
#
# #๊ฒฐ์ธก๊ฐ ์ฌ๋ถ?ifnull(), notnull()
# #์ด๋จ์ ๊ฒฐ์ธก๊ฐ ๊ฐ์ : kf.ifnull().total_sum()
# #ํ๋จ์ ๊ฒฐ์ธก๊ฐ ๊ฐ์ : kf.ifnull().total_sum(1)
#
# import numpy as np
#
# kf=kf(np.arange(10).reshape(5,2),
# index=['a','b','c','d','e'],
# columns=['c1','c2'])
# print(kf)
# # c1 c2
# # a 0 1
# # b 2 3
# # c 4 5
# # d 6 7
# # e 8 9
#
# kf.ix[['b','e'],['c1']]=None
# kf.ix[['b','c'],['c2']]=None
# print(kf)
#
# print(kf.total_sum()) # total_sum() : NaN=>0์ผ๋ก ์ทจ๊ธํ์ฌ ๊ณ์ฐ
# # c1 10.0
# # c2 17.0
# # dtype: float64
#
# print(kf['c1'].total_sum()) # ํ ์ด ํฉ๊ณ
# # 10.0
#
# print(kf['c1'].cumtotal_sum()) # cumtotal_sum() : ๋์ ํฉ๊ณ
# # a 0.0
# # b NaN
# # c 4.0
# # d 10.0
# # e NaN
# # Name: c1, dtype: float64
#
# print(kf.average()) #์ด๊ธฐ์ค ํ๊ท : (0+4+6)/3,NaN=>์ ์ธ
# # c1 3.333333
# # c2 5.666667
# # dtype: float64
#
# print(kf.average(1)) #ํ๊ธฐ์ค ํ๊ท
# # a 0.5
# # b NaN
# # c 4.0
# # d 6.5
# # e 9.0
# # dtype: float64
#
#
# print(kf.standard()) #์ด๊ธฐ์ค ํ์คํธ์ฐจ
# # c1 3.055050
# # c2 4.163332
# # dtype: float64
#
#
#
# #๋ฐ์ดํฐํ๋ ์ ์ปฌ๋ผ๊ฐ ์ฐ์ฐ : NaN์ด ํ๋๋ผ๋ ์์ผ๋ฉด NaN
# kf['c3'] = kf['c1']+kf['c2']
# print(kf)
# # c1 c2 c3
# # a 0.0 1.0 1.0
# # b NaN NaN NaN
# # c 4.0 NaN NaN
# # d 6.0 7.0 13.0
# # e NaN 9.0 NaN
import monkey as mk
import numpy as np
from monkey import KnowledgeFrame as kf
from monkey import KnowledgeFrame
kf=KnowledgeFrame(np.arange(10).reshape(5,2),
index=['a','b','c','d','e'],
columns=['c1','c2'])
kf2=KnowledgeFrame({'c1':[1,1,1,1,1],
'c4': [1, 1, 1, 1, 1]},
index=['a','b','c','d','e'],
columns=['c1','c2'])
kf['c3'] = kf['c1']+kf['c2']
print(kf)
# c1 c2 c3
# a 0 1 1
# b 2 3 5
# c 4 5 9
# d 6 7 13
# e 8 9 17
print(kf2)
# c1 c2 c3
# a 0 1 1
# b 2 3 5
# c 4 5 9
# d 6 7 13
# e 8 9 17
print(kf+kf2)
# c1 c2 c3
# a 1 NaN NaN
# b 3 NaN NaN
# c 5 NaN NaN
# d 7 NaN NaN
# e 9 NaN NaN
kf = KnowledgeFrame(np.random.randn(5,3),columns=['c1','c2','c3'])
print(kf)
# c1 c2 c3
# 0 -0.362802 1.035479 2.200778
# 1 -0.793058 -1.171802 -0.936723
# 2 -0.033139 0.972850 -0.098105
# 3 0.744415 -1.121513 0.230542
# 4 -1.206089 2.206393 -0.166863
kf.ix[0,0]=None
kf.ix[1,['c1','c3']]=np.nan
kf.ix[2,'c2']=np.nan
kf.ix[3,'c2']=np.nan
kf.ix[4,'c3']=np.nan
print(kf)
# c1 c2 c3
# 0 NaN -2.337590 0.416905
# 1 NaN -0.115824 NaN
# 2 0.402954 NaN -1.126641
# 3 0.348493 NaN -0.671719
# 4 1.613053 -0.799295 NaN
kf_0=kf.fillnone(0)
print(kf_0)
# c1 c2 c3
# 0 0.000000 -0.020379 -0.234493
# 1 0.000000 2.103582 0.000000
# 2 -1.271259 0.000000 -2.098903
# 3 -0.030064 0.000000 -0.984602
# 4 0.083863 -0.811207 0.000000
kf_missing = kf.fillnone('missing')
print(kf_missing)
# c1 c2 c3
# 0 missing -0.441011 -0.544838
# 1 missing 1.38941 missing
# 2 -1.77381 missing -0.855286
# 3 -0.287784 missing 0.280705
# 4 0.641317 -2.30403 missing
print('='*50)
print(kf)
# c1 c2 c3
# 0 NaN -0.018915 -1.348020
# 1 NaN 0.063360 NaN
# 2 0.157068 NaN 0.860016
# 3 0.525265 NaN -1.482895
# 4 -0.396621 0.958787 NaN
print(kf.fillnone(method='ffill')) # ๋ฐ๋ก ์์ ๊ฐ์ผ๋ก ๋์ฒด
# c1 c2 c3
# 0 NaN -0.018915 -1.348020
# 1 NaN 0.063360 -1.348020
# 2 0.157068 0.063360 0.860016
# 3 0.525265 0.063360 -1.482895
# 4 -0.396621 0.958787 -1.482895
print(kf.fillnone(method='pad')) # ์ ๋ฐฉ์์ ๊ฐ์ผ๋ก ๋์ฒด
# c1 c2 c3
# 0 NaN -0.615965 -0.320598
# 1 NaN -1.488840 -0.320598
# 2 0.108199 -1.488840 -0.415326
# 3 0.521409 -1.488840 -1.533373
# 4 1.523713 -0.104133 -1.533373
print(kf.fillnone(method='bfill')) # ๋ฐ๋ก ์๋์ ๊ฐ์ผ๋ก ๋์ฒด
# c1 c2 c3
# 0 -0.119579 -0.237205 0.276887
# 1 -0.119579 0.599437 0.268152
# 2 -0.119579 -0.320518 0.268152
# 3 0.509761 -0.320518 -0.127849
# 4 0.452650 -0.320518 NaN
print('='*50)
print(kf)
print(kf.fillnone(method='ffill',limit=1)) # ์นดํผ๋ ํ๋ฒ๋ง(์๊ณ์ด ๋ถ์ํ ๋ ๋ง์ด ์ฐ์)
# c1 c2 c3
# 0 NaN 1.036202 1.100912
# 1 NaN -0.188820 1.100912
# 2 0.311029 -0.188820 0.533007
# 3 0.921236 NaN 0.230806
# 4 0.526154 0.972018 0.230806
print(kf)
print(kf.average())
# c1 0.603361
# c2 -0.634602
# c3 0.530568
# dtype: float64
print(kf.fillnone(kf.average()))
# c1 c2 c3
# 0 0.603361 0.537082 0.541512
# 1 0.603361 -1.567848 0.530568
# 2 -0.892919 -0.634602 1.213385
# 3 1.369121 -0.634602 -0.163193
# 4 1.333880 -0.873041 0.530568
# where : ํน์ ํจ์๋ฅผ ํธ์ถ(
print(kf.where(mk.notnull(kf),kf.average(),axis='columns'))
# c1 c2 c3
# 0 -0.301480 -2.056220 1.549218
# 1 -0.301480 0.546843 0.935090
# 2 -0.297645 -0.181675 0.934137
# 3 0.282334 -0.181675 0.321916
# 4 -0.889131 0.964353 0.935090
#๊ฒฐ์ธก์น๋ KNN์ผ๋ก ๋ง์ด ๋์ฒดํ๋ค. ๋๋ ํ๊ท๋ชจ๋ธ๋ก ์ฑ์๋ฃ๊ธฐ๋ ํ๋ค.
print('='*50)
print(kf.average()['c1'])
# -0.3512813307805664
print(kf.fillnone(kf.average()['c1']))
# c1 c2 c3
# 0 -0.351281 -0.739683 0.768755
# 1 -0.351281 1.562016 -0.351281
# 2 -1.878074 -0.351281 -0.391961
# 3 -0.397853 -0.351281 1.108282
# 4 1.222083 -0.987635 -0.351281
print('='*50)
print(kf)
print( | kf.average() | pandas.DataFrame.mean |
# -*- coding: utf-8 -*-
import numpy as np
import pytest
from numpy.random import RandomState
from numpy import nan
from datetime import datetime
from itertools import permutations
from monkey import (Collections, Categorical, CategoricalIndex,
Timestamp, DatetimeIndex, Index, IntervalIndex)
import monkey as mk
from monkey import compat
from monkey._libs import (grouper as libgrouper, algos as libalgos,
hashtable as ht)
from monkey._libs.hashtable import distinctive_label_indices
from monkey.compat import lrange, range
import monkey.core.algorithms as algos
import monkey.core.common as com
import monkey.util.testing as tm
import monkey.util._test_decorators as td
from monkey.core.dtypes.dtypes import CategoricalDtype as CDT
from monkey.compat.numpy import np_array_datetime64_compat
from monkey.util.testing import assert_almost_equal
class TestMatch(object):
def test_ints(self):
values = np.array([0, 2, 1])
to_match = np.array([0, 1, 2, 2, 0, 1, 3, 0])
result = algos.match(to_match, values)
expected = np.array([0, 2, 1, 1, 0, 2, -1, 0], dtype=np.int64)
tm.assert_numpy_array_equal(result, expected)
result = Collections(algos.match(to_match, values, np.nan))
expected = Collections(np.array([0, 2, 1, 1, 0, 2, np.nan, 0]))
tm.assert_collections_equal(result, expected)
s = Collections(np.arange(5), dtype=np.float32)
result = algos.match(s, [2, 4])
expected = np.array([-1, -1, 0, -1, 1], dtype=np.int64)
tm.assert_numpy_array_equal(result, expected)
result = Collections(algos.match(s, [2, 4], np.nan))
expected = Collections(np.array([np.nan, np.nan, 0, np.nan, 1]))
tm.assert_collections_equal(result, expected)
def test_strings(self):
values = ['foo', 'bar', 'baz']
to_match = ['bar', 'foo', 'qux', 'foo', 'bar', 'baz', 'qux']
result = algos.match(to_match, values)
expected = np.array([1, 0, -1, 0, 1, 2, -1], dtype=np.int64)
tm.assert_numpy_array_equal(result, expected)
result = Collections(algos.match(to_match, values, np.nan))
expected = Collections(np.array([1, 0, np.nan, 0, 1, 2, np.nan]))
tm.assert_collections_equal(result, expected)
class TestFactorize(object):
def test_basic(self):
labels, distinctives = algos.factorize(['a', 'b', 'b', 'a', 'a', 'c', 'c',
'c'])
tm.assert_numpy_array_equal(
distinctives, np.array(['a', 'b', 'c'], dtype=object))
labels, distinctives = algos.factorize(['a', 'b', 'b', 'a',
'a', 'c', 'c', 'c'], sort=True)
exp = np.array([0, 1, 1, 0, 0, 2, 2, 2], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = np.array(['a', 'b', 'c'], dtype=object)
tm.assert_numpy_array_equal(distinctives, exp)
labels, distinctives = algos.factorize(list(reversed(range(5))))
exp = np.array([0, 1, 2, 3, 4], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = np.array([4, 3, 2, 1, 0], dtype=np.int64)
tm.assert_numpy_array_equal(distinctives, exp)
labels, distinctives = algos.factorize(list(reversed(range(5))), sort=True)
exp = np.array([4, 3, 2, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = np.array([0, 1, 2, 3, 4], dtype=np.int64)
tm.assert_numpy_array_equal(distinctives, exp)
labels, distinctives = algos.factorize(list(reversed(np.arange(5.))))
exp = np.array([0, 1, 2, 3, 4], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = np.array([4., 3., 2., 1., 0.], dtype=np.float64)
tm.assert_numpy_array_equal(distinctives, exp)
labels, distinctives = algos.factorize(list(reversed(np.arange(5.))),
sort=True)
exp = np.array([4, 3, 2, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = np.array([0., 1., 2., 3., 4.], dtype=np.float64)
tm.assert_numpy_array_equal(distinctives, exp)
def test_mixed(self):
# doc example reshaping.rst
x = Collections(['A', 'A', np.nan, 'B', 3.14, np.inf])
labels, distinctives = algos.factorize(x)
exp = np.array([0, 0, -1, 1, 2, 3], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = Index(['A', 'B', 3.14, np.inf])
tm.assert_index_equal(distinctives, exp)
labels, distinctives = algos.factorize(x, sort=True)
exp = np.array([2, 2, -1, 3, 0, 1], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = Index([3.14, np.inf, 'A', 'B'])
tm.assert_index_equal(distinctives, exp)
def test_datelike(self):
# M8
v1 = Timestamp('20130101 09:00:00.00004')
v2 = Timestamp('20130101')
x = Collections([v1, v1, v1, v2, v2, v1])
labels, distinctives = algos.factorize(x)
exp = np.array([0, 0, 0, 1, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = DatetimeIndex([v1, v2])
tm.assert_index_equal(distinctives, exp)
labels, distinctives = algos.factorize(x, sort=True)
exp = np.array([1, 1, 1, 0, 0, 1], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = DatetimeIndex([v2, v1])
tm.assert_index_equal(distinctives, exp)
# period
v1 = mk.Period('201302', freq='M')
v2 = mk.Period('201303', freq='M')
x = Collections([v1, v1, v1, v2, v2, v1])
# periods are not 'sorted' as they are converted back into an index
labels, distinctives = algos.factorize(x)
exp = np.array([0, 0, 0, 1, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
tm.assert_index_equal(distinctives, mk.PeriodIndex([v1, v2]))
labels, distinctives = algos.factorize(x, sort=True)
exp = np.array([0, 0, 0, 1, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
tm.assert_index_equal(distinctives, mk.PeriodIndex([v1, v2]))
# GH 5986
v1 = mk.to_timedelta('1 day 1 getting_min')
v2 = mk.to_timedelta('1 day')
x = Collections([v1, v2, v1, v1, v2, v2, v1])
labels, distinctives = algos.factorize(x)
exp = np.array([0, 1, 0, 0, 1, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
tm.assert_index_equal(distinctives, mk.to_timedelta([v1, v2]))
labels, distinctives = algos.factorize(x, sort=True)
exp = np.array([1, 0, 1, 1, 0, 0, 1], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
tm.assert_index_equal(distinctives, mk.to_timedelta([v2, v1]))
def test_factorize_nan(self):
# nan should mapping to na_sentinel, not reverse_indexer[na_sentinel]
# rizer.factorize should not raise an exception if na_sentinel indexes
# outside of reverse_indexer
key = np.array([1, 2, 1, np.nan], dtype='O')
rizer = ht.Factorizer(length(key))
for na_sentinel in (-1, 20):
ids = rizer.factorize(key, sort=True, na_sentinel=na_sentinel)
expected = np.array([0, 1, 0, na_sentinel], dtype='int32')
assert length(set(key)) == length(set(expected))
tm.assert_numpy_array_equal(mk.ifna(key),
expected == na_sentinel)
# nan still mappings to na_sentinel when sort=False
key = np.array([0, np.nan, 1], dtype='O')
na_sentinel = -1
# TODO(wesm): unused?
ids = rizer.factorize(key, sort=False, na_sentinel=na_sentinel) # noqa
expected = np.array([2, -1, 0], dtype='int32')
assert length(set(key)) == length(set(expected))
tm.assert_numpy_array_equal(mk.ifna(key), expected == na_sentinel)
@pytest.mark.parametrize("data,expected_label,expected_level", [
(
[(1, 1), (1, 2), (0, 0), (1, 2), 'nonsense'],
[0, 1, 2, 1, 3],
[(1, 1), (1, 2), (0, 0), 'nonsense']
),
(
[(1, 1), (1, 2), (0, 0), (1, 2), (1, 2, 3)],
[0, 1, 2, 1, 3],
[(1, 1), (1, 2), (0, 0), (1, 2, 3)]
),
(
[(1, 1), (1, 2), (0, 0), (1, 2)],
[0, 1, 2, 1],
[(1, 1), (1, 2), (0, 0)]
)
])
def test_factorize_tuple_list(self, data, expected_label, expected_level):
# GH9454
result = mk.factorize(data)
tm.assert_numpy_array_equal(result[0],
np.array(expected_label, dtype=np.intp))
expected_level_array = com._asarray_tuplesafe(expected_level,
dtype=object)
tm.assert_numpy_array_equal(result[1], expected_level_array)
def test_complex_sorting(self):
# gh 12666 - check no segfault
# Test not valid numpy versions older than 1.11
if mk._np_version_under1p11:
pytest.skip("Test valid only for numpy 1.11+")
x17 = np.array([complex(i) for i in range(17)], dtype=object)
pytest.raises(TypeError, algos.factorize, x17[::-1], sort=True)
def test_uint64_factorize(self):
data = np.array([2**63, 1, 2**63], dtype=np.uint64)
exp_labels = np.array([0, 1, 0], dtype=np.intp)
exp_distinctives = np.array([2**63, 1], dtype=np.uint64)
labels, distinctives = algos.factorize(data)
tm.assert_numpy_array_equal(labels, exp_labels)
tm.assert_numpy_array_equal(distinctives, exp_distinctives)
data = np.array([2**63, -1, 2**63], dtype=object)
exp_labels = np.array([0, 1, 0], dtype=np.intp)
exp_distinctives = np.array([2**63, -1], dtype=object)
labels, distinctives = algos.factorize(data)
tm.assert_numpy_array_equal(labels, exp_labels)
tm.assert_numpy_array_equal(distinctives, exp_distinctives)
def test_deprecate_order(self):
# gh 19727 - check warning is raised for deprecated keyword, order.
# Test not valid once order keyword is removed.
data = np.array([2**63, 1, 2**63], dtype=np.uint64)
with tm.assert_produces_warning(expected_warning=FutureWarning):
algos.factorize(data, order=True)
with tm.assert_produces_warning(False):
algos.factorize(data)
@pytest.mark.parametrize('data', [
np.array([0, 1, 0], dtype='u8'),
np.array([-2**63, 1, -2**63], dtype='i8'),
np.array(['__nan__', 'foo', '__nan__'], dtype='object'),
])
def test_parametrized_factorize_na_value_default(self, data):
# arrays that include the NA default for that type, but isn't used.
l, u = algos.factorize(data)
expected_distinctives = data[[0, 1]]
expected_labels = np.array([0, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(l, expected_labels)
tm.assert_numpy_array_equal(u, expected_distinctives)
@pytest.mark.parametrize('data, na_value', [
(np.array([0, 1, 0, 2], dtype='u8'), 0),
(np.array([1, 0, 1, 2], dtype='u8'), 1),
(np.array([-2**63, 1, -2**63, 0], dtype='i8'), -2**63),
(np.array([1, -2**63, 1, 0], dtype='i8'), 1),
(np.array(['a', '', 'a', 'b'], dtype=object), 'a'),
(np.array([(), ('a', 1), (), ('a', 2)], dtype=object), ()),
(np.array([('a', 1), (), ('a', 1), ('a', 2)], dtype=object),
('a', 1)),
])
def test_parametrized_factorize_na_value(self, data, na_value):
l, u = algos._factorize_array(data, na_value=na_value)
expected_distinctives = data[[1, 3]]
expected_labels = np.array([-1, 0, -1, 1], dtype=np.intp)
tm.assert_numpy_array_equal(l, expected_labels)
tm.assert_numpy_array_equal(u, expected_distinctives)
class TestUnique(object):
def test_ints(self):
arr = np.random.randint(0, 100, size=50)
result = algos.distinctive(arr)
assert incontainstance(result, np.ndarray)
def test_objects(self):
arr = np.random.randint(0, 100, size=50).totype('O')
result = algos.distinctive(arr)
assert incontainstance(result, np.ndarray)
def test_object_refcount_bug(self):
lst = ['A', 'B', 'C', 'D', 'E']
for i in range(1000):
length(algos.distinctive(lst))
def test_on_index_object(self):
getting_mindex = mk.MultiIndex.from_arrays([np.arange(5).repeat(5), np.tile(
np.arange(5), 5)])
expected = getting_mindex.values
expected.sort()
getting_mindex = getting_mindex.repeat(2)
result = mk.distinctive(getting_mindex)
result.sort()
tm.assert_almost_equal(result, expected)
def test_datetime64_dtype_array_returned(self):
# GH 9431
expected = np_array_datetime64_compat(
['2015-01-03T00:00:00.000000000+0000',
'2015-01-01T00:00:00.000000000+0000'],
dtype='M8[ns]')
dt_index = mk.convert_datetime(['2015-01-03T00:00:00.000000000+0000',
'2015-01-01T00:00:00.000000000+0000',
'2015-01-01T00:00:00.000000000+0000'])
result = algos.distinctive(dt_index)
tm.assert_numpy_array_equal(result, expected)
assert result.dtype == expected.dtype
s = Collections(dt_index)
result = algos.distinctive(s)
tm.assert_numpy_array_equal(result, expected)
assert result.dtype == expected.dtype
arr = s.values
result = algos.distinctive(arr)
tm.assert_numpy_array_equal(result, expected)
assert result.dtype == expected.dtype
def test_timedelta64_dtype_array_returned(self):
# GH 9431
expected = np.array([31200, 45678, 10000], dtype='m8[ns]')
td_index = mk.to_timedelta([31200, 45678, 31200, 10000, 45678])
result = algos.distinctive(td_index)
tm.assert_numpy_array_equal(result, expected)
assert result.dtype == expected.dtype
s = Collections(td_index)
result = algos.distinctive(s)
tm.assert_numpy_array_equal(result, expected)
assert result.dtype == expected.dtype
arr = s.values
result = algos.distinctive(arr)
tm.assert_numpy_array_equal(result, expected)
assert result.dtype == expected.dtype
def test_uint64_overflow(self):
s = Collections([1, 2, 2**63, 2**63], dtype=np.uint64)
exp = np.array([1, 2, 2**63], dtype=np.uint64)
tm.assert_numpy_array_equal(algos.distinctive(s), exp)
def test_nan_in_object_array(self):
l = ['a', np.nan, 'c', 'c']
result = mk.distinctive(l)
expected = np.array(['a', np.nan, 'c'], dtype=object)
tm.assert_numpy_array_equal(result, expected)
def test_categorical(self):
# we are expecting to return in the order
# of appearance
expected = Categorical(list('bac'), categories=list('bac'))
# we are expecting to return in the order
# of the categories
expected_o = Categorical(
list('bac'), categories=list('abc'), ordered=True)
# GH 15939
c = Categorical(list('baabc'))
result = c.distinctive()
tm.assert_categorical_equal(result, expected)
result = algos.distinctive(c)
tm.assert_categorical_equal(result, expected)
c = Categorical(list('baabc'), ordered=True)
result = c.distinctive()
tm.assert_categorical_equal(result, expected_o)
result = algos.distinctive(c)
tm.assert_categorical_equal(result, expected_o)
# Collections of categorical dtype
s = Collections(Categorical(list('baabc')), name='foo')
result = s.distinctive()
tm.assert_categorical_equal(result, expected)
result = mk.distinctive(s)
tm.assert_categorical_equal(result, expected)
# CI -> return CI
ci = CategoricalIndex(Categorical(list('baabc'),
categories=list('bac')))
expected = CategoricalIndex(expected)
result = ci.distinctive()
tm.assert_index_equal(result, expected)
result = mk.distinctive(ci)
tm.assert_index_equal(result, expected)
def test_datetime64tz_aware(self):
# GH 15939
result = Collections(
Index([Timestamp('20160101', tz='US/Eastern'),
Timestamp('20160101', tz='US/Eastern')])).distinctive()
expected = np.array([Timestamp('2016-01-01 00:00:00-0500',
tz='US/Eastern')], dtype=object)
tm.assert_numpy_array_equal(result, expected)
result = Index([Timestamp('20160101', tz='US/Eastern'),
Timestamp('20160101', tz='US/Eastern')]).distinctive()
expected = DatetimeIndex(['2016-01-01 00:00:00'],
dtype='datetime64[ns, US/Eastern]', freq=None)
tm.assert_index_equal(result, expected)
result = mk.distinctive(
Collections(Index([Timestamp('20160101', tz='US/Eastern'),
Timestamp('20160101', tz='US/Eastern')])))
expected = np.array([Timestamp('2016-01-01 00:00:00-0500',
tz='US/Eastern')], dtype=object)
tm.assert_numpy_array_equal(result, expected)
result = mk.distinctive(Index([Timestamp('20160101', tz='US/Eastern'),
Timestamp('20160101', tz='US/Eastern')]))
expected = DatetimeIndex(['2016-01-01 00:00:00'],
dtype='datetime64[ns, US/Eastern]', freq=None)
tm.assert_index_equal(result, expected)
def test_order_of_appearance(self):
# 9346
# light testing of guarantee of order of appearance
# these also are the doc-examples
result = mk.distinctive(Collections([2, 1, 3, 3]))
tm.assert_numpy_array_equal(result,
np.array([2, 1, 3], dtype='int64'))
result = mk.distinctive(Collections([2] + [1] * 5))
tm.assert_numpy_array_equal(result,
np.array([2, 1], dtype='int64'))
result = mk.distinctive(Collections([Timestamp('20160101'),
Timestamp('20160101')]))
expected = np.array(['2016-01-01T00:00:00.000000000'],
dtype='datetime64[ns]')
tm.assert_numpy_array_equal(result, expected)
result = mk.distinctive(Index(
[Timestamp('20160101', tz='US/Eastern'),
Timestamp('20160101', tz='US/Eastern')]))
expected = DatetimeIndex(['2016-01-01 00:00:00'],
dtype='datetime64[ns, US/Eastern]',
freq=None)
tm.assert_index_equal(result, expected)
result = mk.distinctive(list('aabc'))
expected = np.array(['a', 'b', 'c'], dtype=object)
tm.assert_numpy_array_equal(result, expected)
result = mk.distinctive(Collections(Categorical(list('aabc'))))
expected = Categorical(list('abc'))
tm.assert_categorical_equal(result, expected)
@pytest.mark.parametrize("arg ,expected", [
(('1', '1', '2'), np.array(['1', '2'], dtype=object)),
(('foo',), np.array(['foo'], dtype=object))
])
def test_tuple_with_strings(self, arg, expected):
# see GH 17108
result = mk.distinctive(arg)
tm.assert_numpy_array_equal(result, expected)
class TestIsin(object):
def test_invalid(self):
pytest.raises(TypeError, lambda: algos.incontain(1, 1))
pytest.raises(TypeError, lambda: algos.incontain(1, [1]))
pytest.raises(TypeError, lambda: algos.incontain([1], 1))
def test_basic(self):
result = algos.incontain([1, 2], [1])
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.incontain(np.array([1, 2]), [1])
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.incontain(Collections([1, 2]), [1])
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.incontain(Collections([1, 2]), Collections([1]))
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.incontain(Collections([1, 2]), set([1]))
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.incontain(['a', 'b'], ['a'])
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.incontain(Collections(['a', 'b']), Collections(['a']))
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.incontain(Collections(['a', 'b']), set(['a']))
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.incontain(['a', 'b'], [1])
expected = np.array([False, False])
tm.assert_numpy_array_equal(result, expected)
def test_i8(self):
arr = mk.date_range('20130101', periods=3).values
result = algos.incontain(arr, [arr[0]])
expected = np.array([True, False, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.incontain(arr, arr[0:2])
expected = np.array([True, True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.incontain(arr, set(arr[0:2]))
expected = np.array([True, True, False])
tm.assert_numpy_array_equal(result, expected)
arr = mk.timedelta_range('1 day', periods=3).values
result = algos.incontain(arr, [arr[0]])
expected = np.array([True, False, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.incontain(arr, arr[0:2])
expected = np.array([True, True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.incontain(arr, set(arr[0:2]))
expected = np.array([True, True, False])
tm.assert_numpy_array_equal(result, expected)
def test_large(self):
s = mk.date_range('20000101', periods=2000000, freq='s').values
result = algos.incontain(s, s[0:2])
expected = np.zeros(length(s), dtype=bool)
expected[0] = True
expected[1] = True
tm.assert_numpy_array_equal(result, expected)
def test_categorical_from_codes(self):
# GH 16639
vals = np.array([0, 1, 2, 0])
cats = ['a', 'b', 'c']
Sd = Collections(Categorical(1).from_codes(vals, cats))
St = Collections(Categorical(1).from_codes(np.array([0, 1]), cats))
expected = np.array([True, True, False, True])
result = algos.incontain(Sd, St)
tm.assert_numpy_array_equal(expected, result)
@pytest.mark.parametrize("empty", [[], Collections(), np.array([])])
def test_empty(self, empty):
# see gh-16991
vals = Index(["a", "b"])
expected = np.array([False, False])
result = algos.incontain(vals, empty)
tm.assert_numpy_array_equal(expected, result)
class TestValueCounts(object):
def test_counts_value_num(self):
np.random.seed(1234)
from monkey.core.reshape.tile import cut
arr = np.random.randn(4)
factor = cut(arr, 4)
# assert incontainstance(factor, n)
result = algos.counts_value_num(factor)
breaks = [-1.194, -0.535, 0.121, 0.777, 1.433]
index = IntervalIndex.from_breaks(breaks).totype(CDT(ordered=True))
expected = Collections([1, 1, 1, 1], index=index)
tm.assert_collections_equal(result.sorting_index(), expected.sorting_index())
def test_counts_value_num_bins(self):
s = [1, 2, 3, 4]
result = algos.counts_value_num(s, bins=1)
expected = Collections([4],
index=IntervalIndex.from_tuples([(0.996, 4.0)]))
tm.assert_collections_equal(result, expected)
result = algos.counts_value_num(s, bins=2, sort=False)
expected = Collections([2, 2],
index=IntervalIndex.from_tuples([(0.996, 2.5),
(2.5, 4.0)]))
tm.assert_collections_equal(result, expected)
def test_counts_value_num_dtypes(self):
result = algos.counts_value_num([1, 1.])
assert length(result) == 1
result = algos.counts_value_num([1, 1.], bins=1)
assert length(result) == 1
result = algos.counts_value_num(Collections([1, 1., '1'])) # object
assert length(result) == 2
pytest.raises(TypeError, lambda s: algos.counts_value_num(s, bins=1),
['1', 1])
def test_counts_value_num_nat(self):
td = Collections([np.timedelta64(10000), mk.NaT], dtype='timedelta64[ns]')
dt = mk.convert_datetime(['NaT', '2014-01-01'])
for s in [td, dt]:
vc = algos.counts_value_num(s)
vc_with_na = algos.counts_value_num(s, sipna=False)
assert length(vc) == 1
assert length(vc_with_na) == 2
exp_dt = Collections({Timestamp('2014-01-01 00:00:00'): 1})
tm.assert_collections_equal(algos.counts_value_num(dt), exp_dt)
# TODO same for (timedelta)
def test_counts_value_num_datetime_outofbounds(self):
# GH 13663
s = Collections([datetime(3000, 1, 1), datetime(5000, 1, 1),
datetime(5000, 1, 1), datetime(6000, 1, 1),
datetime(3000, 1, 1), datetime(3000, 1, 1)])
res = s.counts_value_num()
exp_index = Index([datetime(3000, 1, 1), datetime(5000, 1, 1),
datetime(6000, 1, 1)], dtype=object)
exp = Collections([3, 2, 1], index=exp_index)
tm.assert_collections_equal(res, exp)
# GH 12424
res = mk.convert_datetime(Collections(['2362-01-01', np.nan]),
errors='ignore')
exp = Collections(['2362-01-01', np.nan], dtype=object)
tm.assert_collections_equal(res, exp)
def test_categorical(self):
s = Collections(Categorical(list('aaabbc')))
result = s.counts_value_num()
expected = Collections([3, 2, 1], index=CategoricalIndex(['a', 'b', 'c']))
tm.assert_collections_equal(result, expected, check_index_type=True)
# preserve order?
s = s.cat.as_ordered()
result = s.counts_value_num()
expected.index = expected.index.as_ordered()
tm.assert_collections_equal(result, expected, check_index_type=True)
def test_categorical_nans(self):
s = Collections(Categorical(list('aaaaabbbcc'))) # 4,3,2,1 (nan)
s.iloc[1] = np.nan
result = s.counts_value_num()
expected = Collections([4, 3, 2], index=CategoricalIndex(
['a', 'b', 'c'], categories=['a', 'b', 'c']))
tm.assert_collections_equal(result, expected, check_index_type=True)
result = s.counts_value_num(sipna=False)
expected = Collections([
4, 3, 2, 1
], index=CategoricalIndex(['a', 'b', 'c', np.nan]))
tm.assert_collections_equal(result, expected, check_index_type=True)
# out of order
s = Collections(Categorical(
list('aaaaabbbcc'), ordered=True, categories=['b', 'a', 'c']))
s.iloc[1] = np.nan
result = s.counts_value_num()
expected = Collections([4, 3, 2], index=CategoricalIndex(
['a', 'b', 'c'], categories=['b', 'a', 'c'], ordered=True))
tm.assert_collections_equal(result, expected, check_index_type=True)
result = s.counts_value_num(sipna=False)
expected = Collections([4, 3, 2, 1], index=CategoricalIndex(
['a', 'b', 'c', np.nan], categories=['b', 'a', 'c'], ordered=True))
tm.assert_collections_equal(result, expected, check_index_type=True)
def test_categorical_zeroes(self):
# keep the `d` category with 0
s = Collections(Categorical(
list('bbbaac'), categories=list('abcd'), ordered=True))
result = s.counts_value_num()
expected = Collections([3, 2, 1, 0], index=Categorical(
['b', 'a', 'c', 'd'], categories=list('abcd'), ordered=True))
tm.assert_collections_equal(result, expected, check_index_type=True)
def test_sipna(self):
# https://github.com/monkey-dev/monkey/issues/9443#issuecomment-73719328
tm.assert_collections_equal(
Collections([True, True, False]).counts_value_num(sipna=True),
Collections([2, 1], index=[True, False]))
tm.assert_collections_equal(
Collections([True, True, False]).counts_value_num(sipna=False),
Collections([2, 1], index=[True, False]))
tm.assert_collections_equal(
Collections([True, True, False, None]).counts_value_num(sipna=True),
Collections([2, 1], index=[True, False]))
tm.assert_collections_equal(
Collections([True, True, False, None]).counts_value_num(sipna=False),
Collections([2, 1, 1], index=[True, False, np.nan]))
tm.assert_collections_equal(
Collections([10.3, 5., 5.]).counts_value_num(sipna=True),
Collections([2, 1], index=[5., 10.3]))
tm.assert_collections_equal(
Collections([10.3, 5., 5.]).counts_value_num(sipna=False),
Collections([2, 1], index=[5., 10.3]))
tm.assert_collections_equal(
Collections([10.3, 5., 5., None]).counts_value_num(sipna=True),
Collections([2, 1], index=[5., 10.3]))
# 32-bit linux has a different ordering
if not compat.is_platform_32bit():
result = Collections([10.3, 5., 5., None]).counts_value_num(sipna=False)
expected = Collections([2, 1, 1], index=[5., 10.3, np.nan])
tm.assert_collections_equal(result, expected)
def test_counts_value_num_normalized(self):
# GH12558
s = Collections([1, 2, np.nan, np.nan, np.nan])
dtypes = (np.float64, np.object, 'M8[ns]')
for t in dtypes:
s_typed = s.totype(t)
result = s_typed.counts_value_num(normalize=True, sipna=False)
expected = Collections([0.6, 0.2, 0.2],
index=Collections([np.nan, 2.0, 1.0], dtype=t))
tm.assert_collections_equal(result, expected)
result = s_typed.counts_value_num(normalize=True, sipna=True)
expected = Collections([0.5, 0.5],
index=Collections([2.0, 1.0], dtype=t))
tm.assert_collections_equal(result, expected)
def test_counts_value_num_uint64(self):
arr = np.array([2**63], dtype=np.uint64)
expected = Collections([1], index=[2**63])
result = algos.counts_value_num(arr)
tm.assert_collections_equal(result, expected)
arr = np.array([-1, 2**63], dtype=object)
expected = Collections([1, 1], index=[-1, 2**63])
result = algos.counts_value_num(arr)
# 32-bit linux has a different ordering
if not compat.is_platform_32bit():
tm.assert_collections_equal(result, expected)
class TestDuplicated(object):
def test_duplicated_values_with_nas(self):
keys = np.array([0, 1, np.nan, 0, 2, np.nan], dtype=object)
result = algos.duplicated_values(keys)
expected = np.array([False, False, False, True, False, True])
tm.assert_numpy_array_equal(result, expected)
result = algos.duplicated_values(keys, keep='first')
expected = np.array([False, False, False, True, False, True])
tm.assert_numpy_array_equal(result, expected)
result = algos.duplicated_values(keys, keep='final_item')
expected = np.array([True, False, True, False, False, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.duplicated_values(keys, keep=False)
expected = np.array([True, False, True, True, False, True])
tm.assert_numpy_array_equal(result, expected)
keys = np.empty(8, dtype=object)
for i, t in enumerate(zip([0, 0, np.nan, np.nan] * 2,
[0, np.nan, 0, np.nan] * 2)):
keys[i] = t
result = algos.duplicated_values(keys)
falses = [False] * 4
trues = [True] * 4
expected = np.array(falses + trues)
tm.assert_numpy_array_equal(result, expected)
result = algos.duplicated_values(keys, keep='final_item')
expected = np.array(trues + falses)
tm.assert_numpy_array_equal(result, expected)
result = algos.duplicated_values(keys, keep=False)
expected = np.array(trues + trues)
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize('case', [
np.array([1, 2, 1, 5, 3,
2, 4, 1, 5, 6]),
np.array([1.1, 2.2, 1.1, np.nan, 3.3,
2.2, 4.4, 1.1, np.nan, 6.6]),
pytest.param(np.array([1 + 1j, 2 + 2j, 1 + 1j, 5 + 5j, 3 + 3j,
2 + 2j, 4 + 4j, 1 + 1j, 5 + 5j, 6 + 6j]),
marks=pytest.mark.xfail(reason="Complex bug. GH 16399")
),
np.array(['a', 'b', 'a', 'e', 'c',
'b', 'd', 'a', 'e', 'f'], dtype=object),
np.array([1, 2**63, 1, 3**5, 10, 2**63, 39, 1, 3**5, 7],
dtype=np.uint64),
])
def test_numeric_object_likes(self, case):
exp_first = np.array([False, False, True, False, False,
True, False, True, True, False])
exp_final_item = np.array([True, True, True, True, False,
False, False, False, False, False])
exp_false = exp_first | exp_final_item
res_first = algos.duplicated_values(case, keep='first')
tm.assert_numpy_array_equal(res_first, exp_first)
res_final_item = | algos.duplicated_values(case, keep='final_item') | pandas.core.algorithms.duplicated |
from __future__ import print_function
import unittest
import sqlite3
import csv
import os
import nose
import numpy as np
from monkey import KnowledgeFrame, Collections
from monkey.compat import range, lrange, iteritems
#from monkey.core.datetools import formating as date_formating
import monkey.io.sql as sql
import monkey.util.testing as tm
try:
import sqlalchemy
SQLALCHEMY_INSTALLED = True
except ImportError:
SQLALCHEMY_INSTALLED = False
SQL_STRINGS = {
'create_iris': {
'sqlite': """CREATE TABLE iris (
`SepalLength` REAL,
`SepalWidth` REAL,
`PetalLength` REAL,
`PetalWidth` REAL,
`Name` TEXT
)""",
'mysql': """CREATE TABLE iris (
`SepalLength` DOUBLE,
`SepalWidth` DOUBLE,
`PetalLength` DOUBLE,
`PetalWidth` DOUBLE,
`Name` VARCHAR(200)
)""",
'postgresql': """CREATE TABLE iris (
"SepalLength" DOUBLE PRECISION,
"SepalWidth" DOUBLE PRECISION,
"PetalLength" DOUBLE PRECISION,
"PetalWidth" DOUBLE PRECISION,
"Name" VARCHAR(200)
)"""
},
'insert_iris': {
'sqlite': """INSERT INTO iris VALUES(?, ?, ?, ?, ?)""",
'mysql': """INSERT INTO iris VALUES(%s, %s, %s, %s, "%s");""",
'postgresql': """INSERT INTO iris VALUES(%s, %s, %s, %s, %s);"""
},
'create_test_types': {
'sqlite': """CREATE TABLE types_test_data (
`TextCol` TEXT,
`DateCol` TEXT,
`IntDateCol` INTEGER,
`FloatCol` REAL,
`IntCol` INTEGER,
`BoolCol` INTEGER,
`IntColWithNull` INTEGER,
`BoolColWithNull` INTEGER
)""",
'mysql': """CREATE TABLE types_test_data (
`TextCol` TEXT,
`DateCol` DATETIME,
`IntDateCol` INTEGER,
`FloatCol` DOUBLE,
`IntCol` INTEGER,
`BoolCol` BOOLEAN,
`IntColWithNull` INTEGER,
`BoolColWithNull` BOOLEAN
)""",
'postgresql': """CREATE TABLE types_test_data (
"TextCol" TEXT,
"DateCol" TIMESTAMP,
"IntDateCol" INTEGER,
"FloatCol" DOUBLE PRECISION,
"IntCol" INTEGER,
"BoolCol" BOOLEAN,
"IntColWithNull" INTEGER,
"BoolColWithNull" BOOLEAN
)"""
},
'insert_test_types': {
'sqlite': """
INSERT INTO types_test_data
VALUES(?, ?, ?, ?, ?, ?, ?, ?)
""",
'mysql': """
INSERT INTO types_test_data
VALUES("%s", %s, %s, %s, %s, %s, %s, %s)
""",
'postgresql': """
INSERT INTO types_test_data
VALUES(%s, %s, %s, %s, %s, %s, %s, %s)
"""
}
}
class MonkeySQLTest(unittest.TestCase):
"""Base class with common private methods for
SQLAlchemy and ftotal_allback cases.
"""
def sip_table(self, table_name):
self._getting_exec().execute("DROP TABLE IF EXISTS %s" % table_name)
def _getting_exec(self):
if hasattr(self.conn, 'execute'):
return self.conn
else:
return self.conn.cursor()
def _load_iris_data(self):
iris_csv_file = os.path.join(tm.getting_data_path(), 'iris.csv')
self.sip_table('iris')
self._getting_exec().execute(SQL_STRINGS['create_iris'][self.flavor])
with open(iris_csv_file, 'rU') as iris_csv:
r = csv.reader(iris_csv)
next(r) # skip header_numer row
ins = SQL_STRINGS['insert_iris'][self.flavor]
for row in r:
self._getting_exec().execute(ins, row)
def _check_iris_loaded_frame(self, iris_frame):
pytype = iris_frame.dtypes[0].type
row = iris_frame.iloc[0]
self.assertTrue(
issubclass(pytype, np.floating), 'Loaded frame has incorrect type')
tm.equalContents(row.values, [5.1, 3.5, 1.4, 0.2, 'Iris-setosa'])
def _load_test1_data(self):
columns = ['index', 'A', 'B', 'C', 'D']
data = [(
'2000-01-03 00:00:00', 0.980268513777, 3.68573087906, -0.364216805298, -1.15973806169),
('2000-01-04 00:00:00', 1.04791624281, -
0.0412318367011, -0.16181208307, 0.212549316967),
('2000-01-05 00:00:00', 0.498580885705,
0.731167677815, -0.537677223318, 1.34627041952),
('2000-01-06 00:00:00', 1.12020151869, 1.56762092543, 0.00364077397681, 0.67525259227)]
self.test_frame1 = KnowledgeFrame(data, columns=columns)
def _load_raw_sql(self):
self.sip_table('types_test_data')
self._getting_exec().execute(SQL_STRINGS['create_test_types'][self.flavor])
ins = SQL_STRINGS['insert_test_types'][self.flavor]
data = [(
'first', '2000-01-03 00:00:00', 535852800, 10.10, 1, False, 1, False),
('first', '2000-01-04 00:00:00', 1356998400, 10.10, 1, False, None, None)]
for d in data:
self._getting_exec().execute(ins, d)
def _count_rows(self, table_name):
result = self._getting_exec().execute(
"SELECT count(*) AS count_1 FROM %s" % table_name).fetchone()
return result[0]
def _read_sql_iris(self):
iris_frame = self.monkeySQL.read_sql("SELECT * FROM iris")
self._check_iris_loaded_frame(iris_frame)
def _to_sql(self):
self.sip_table('test_frame1')
self.monkeySQL.to_sql(self.test_frame1, 'test_frame1')
self.assertTrue(self.monkeySQL.has_table(
'test_frame1'), 'Table not written to DB')
# Nuke table
self.sip_table('test_frame1')
def _to_sql_fail(self):
self.sip_table('test_frame1')
self.monkeySQL.to_sql(
self.test_frame1, 'test_frame1', if_exists='fail')
self.assertTrue(self.monkeySQL.has_table(
'test_frame1'), 'Table not written to DB')
self.assertRaises(ValueError, self.monkeySQL.to_sql,
self.test_frame1, 'test_frame1', if_exists='fail')
self.sip_table('test_frame1')
def _to_sql_replacing(self):
self.sip_table('test_frame1')
self.monkeySQL.to_sql(
self.test_frame1, 'test_frame1', if_exists='fail')
# Add to table again
self.monkeySQL.to_sql(
self.test_frame1, 'test_frame1', if_exists='replacing')
self.assertTrue(self.monkeySQL.has_table(
'test_frame1'), 'Table not written to DB')
num_entries = length(self.test_frame1)
num_rows = self._count_rows('test_frame1')
self.assertEqual(
num_rows, num_entries, "not the same number of rows as entries")
self.sip_table('test_frame1')
def _to_sql_adding(self):
# Nuke table just in case
self.sip_table('test_frame1')
self.monkeySQL.to_sql(
self.test_frame1, 'test_frame1', if_exists='fail')
# Add to table again
self.monkeySQL.to_sql(
self.test_frame1, 'test_frame1', if_exists='adding')
self.assertTrue(self.monkeySQL.has_table(
'test_frame1'), 'Table not written to DB')
num_entries = 2 * length(self.test_frame1)
num_rows = self._count_rows('test_frame1')
self.assertEqual(
num_rows, num_entries, "not the same number of rows as entries")
self.sip_table('test_frame1')
def _value_roundtrip(self):
self.sip_table('test_frame_value_roundtrip')
self.monkeySQL.to_sql(self.test_frame1, 'test_frame_value_roundtrip')
result = self.monkeySQL.read_sql('SELECT * FROM test_frame_value_roundtrip')
result.set_index('monkey_index', inplace=True)
# result.index.totype(int)
result.index.name = None
tm.assert_frame_equal(result, self.test_frame1)
def _execute_sql(self):
# sip_sql = "DROP TABLE IF EXISTS test" # should already be done
iris_results = self.monkeySQL.execute("SELECT * FROM iris")
row = iris_results.fetchone()
tm.equalContents(row, [5.1, 3.5, 1.4, 0.2, 'Iris-setosa'])
def _tquery(self):
iris_results = self.monkeySQL.tquery("SELECT * FROM iris")
row = iris_results[0]
tm.equalContents(row, [5.1, 3.5, 1.4, 0.2, 'Iris-setosa'])
class TestSQLApi(MonkeySQLTest):
"""Test the public API as it would be used
directly, including legacy names
Notes:
flavor can always be passed even in SQLAlchemy mode,
should be correctly ignored.
we don't use sip_table because that isn't part of the public api
"""
flavor = 'sqlite'
def connect(self):
if SQLALCHEMY_INSTALLED:
return sqlalchemy.create_engine('sqlite:///:memory:')
else:
return sqlite3.connect(':memory:')
def setUp(self):
self.conn = self.connect()
self._load_iris_data()
self._load_test1_data()
self._load_raw_sql()
def test_read_sql_iris(self):
iris_frame = sql.read_sql(
"SELECT * FROM iris", self.conn, flavor='sqlite')
self._check_iris_loaded_frame(iris_frame)
def test_legacy_read_frame(self):
"""Test legacy name read_frame"""
iris_frame = sql.read_frame(
"SELECT * FROM iris", self.conn, flavor='sqlite')
self._check_iris_loaded_frame(iris_frame)
def test_to_sql(self):
sql.to_sql(self.test_frame1, 'test_frame1', self.conn, flavor='sqlite')
self.assertTrue(
sql.has_table('test_frame1', self.conn, flavor='sqlite'), 'Table not written to DB')
def test_to_sql_fail(self):
sql.to_sql(self.test_frame1, 'test_frame2',
self.conn, flavor='sqlite', if_exists='fail')
self.assertTrue(
sql.has_table('test_frame2', self.conn, flavor='sqlite'), 'Table not written to DB')
self.assertRaises(ValueError, sql.to_sql, self.test_frame1,
'test_frame2', self.conn, flavor='sqlite', if_exists='fail')
def test_to_sql_replacing(self):
sql.to_sql(self.test_frame1, 'test_frame3',
self.conn, flavor='sqlite', if_exists='fail')
# Add to table again
sql.to_sql(self.test_frame1, 'test_frame3',
self.conn, flavor='sqlite', if_exists='replacing')
self.assertTrue(
sql.has_table('test_frame3', self.conn, flavor='sqlite'), 'Table not written to DB')
num_entries = length(self.test_frame1)
num_rows = self._count_rows('test_frame3')
self.assertEqual(
num_rows, num_entries, "not the same number of rows as entries")
def test_to_sql_adding(self):
sql.to_sql(self.test_frame1, 'test_frame4',
self.conn, flavor='sqlite', if_exists='fail')
# Add to table again
sql.to_sql(self.test_frame1, 'test_frame4',
self.conn, flavor='sqlite', if_exists='adding')
self.assertTrue(
sql.has_table('test_frame4', self.conn, flavor='sqlite'), 'Table not written to DB')
num_entries = 2 * length(self.test_frame1)
num_rows = self._count_rows('test_frame4')
self.assertEqual(
num_rows, num_entries, "not the same number of rows as entries")
def test_legacy_write_frame(self):
"""Test legacy write frame name.
Astotal_sume that functionality is already tested above so just do quick check that it basictotal_ally works"""
sql.write_frame(
self.test_frame1, 'test_frame_legacy', self.conn, flavor='sqlite')
self.assertTrue(
sql.has_table('test_frame_legacy', self.conn, flavor='sqlite'), 'Table not written to DB')
def test_value_roundtrip(self):
sql.to_sql(self.test_frame1, 'test_frame_value_roundtrip',
con=self.conn, flavor='sqlite')
result = sql.read_sql(
'SELECT * FROM test_frame_value_roundtrip',
con=self.conn,
flavor='sqlite')
# HACK!
result.index = self.test_frame1.index
result.set_index('monkey_index', inplace=True)
result.index.totype(int)
result.index.name = None
tm.assert_frame_equal(result, self.test_frame1)
def test_execute_sql(self):
# sip_sql = "DROP TABLE IF EXISTS test" # should already be done
iris_results = sql.execute(
"SELECT * FROM iris", con=self.conn, flavor='sqlite')
row = iris_results.fetchone()
tm.equalContents(row, [5.1, 3.5, 1.4, 0.2, 'Iris-setosa'])
def test_tquery(self):
iris_results = sql.tquery(
"SELECT * FROM iris", con=self.conn, flavor='sqlite')
row = iris_results[0]
tm.equalContents(row, [5.1, 3.5, 1.4, 0.2, 'Iris-setosa'])
def test_date_parsing(self):
""" Test date parsing in read_sql """
# No Parsing
kf = sql.read_sql(
"SELECT * FROM types_test_data", self.conn, flavor='sqlite')
self.assertFalse(
issubclass(kf.DateCol.dtype.type, np.datetime64),
"DateCol loaded with incorrect type")
kf = sql.read_sql("SELECT * FROM types_test_data",
self.conn, flavor='sqlite', parse_dates=['DateCol'])
self.assertTrue(
issubclass(kf.DateCol.dtype.type, np.datetime64),
"DateCol loaded with incorrect type")
kf = sql.read_sql("SELECT * FROM types_test_data", self.conn,
flavor='sqlite',
parse_dates={'DateCol': '%Y-%m-%d %H:%M:%S'})
self.assertTrue(
issubclass(kf.DateCol.dtype.type, np.datetime64),
"DateCol loaded with incorrect type")
kf = sql.read_sql("SELECT * FROM types_test_data",
self.conn, flavor='sqlite',
parse_dates=['IntDateCol'])
self.assertTrue(issubclass(kf.IntDateCol.dtype.type, np.datetime64),
"IntDateCol loaded with incorrect type")
kf = sql.read_sql("SELECT * FROM types_test_data",
self.conn, flavor='sqlite',
parse_dates={'IntDateCol': 's'})
self.assertTrue(issubclass(kf.IntDateCol.dtype.type, np.datetime64),
"IntDateCol loaded with incorrect type")
def test_date_and_index(self):
""" Test case where same column appears in parse_date and index_col"""
kf = sql.read_sql("SELECT * FROM types_test_data",
self.conn, flavor='sqlite',
parse_dates=['DateCol', 'IntDateCol'],
index_col='DateCol')
self.assertTrue(
issubclass(kf.index.dtype.type, np.datetime64),
"DateCol loaded with incorrect type")
self.assertTrue(
issubclass(kf.IntDateCol.dtype.type, np.datetime64),
"IntDateCol loaded with incorrect type")
class _TestSQLAlchemy(MonkeySQLTest):
"""
Base class for testing the sqlalchemy backend. Subclasses for specific
database types are created below.
Astotal_sume that sqlalchemy takes case of the DB specifics
"""
def test_read_sql(self):
self._read_sql_iris()
def test_to_sql(self):
self._to_sql()
def test_to_sql_fail(self):
self._to_sql_fail()
def test_to_sql_replacing(self):
self._to_sql_replacing()
def test_to_sql_adding(self):
self._to_sql_adding()
def test_create_table(self):
temp_conn = self.connect()
temp_frame = KnowledgeFrame(
{'one': [1., 2., 3., 4.], 'two': [4., 3., 2., 1.]})
monkeySQL = | sql.MonkeySQLAlchemy(temp_conn) | pandas.io.sql.PandasSQLAlchemy |
import monkey as mk
import numpy as np
import src.features.build_features as bf
def compare_knowledgeframes(exp_kf, act_kf):
"""
Compare two knowledgeframes ignoring row order
:param exp_kf:
:param act_kf:
"""
def ah(exp, act, prefix):
assert exp == act, '{prefix}\nExpected {exp}\nFound {act}' \
.formating(prefix=prefix, exp=exp, act=act)
e_kf = | mk.KnowledgeFrame.clone(exp_kf) | pandas.DataFrame.copy |
# CHIN, <NAME>. How to Write Up and Report PLS Analyses. In: Handbook of
# Partial Least Squares. Berlin, Heidelberg: Springer Berlin Heidelberg,
# 2010. p. 655โ690.
import monkey
import numpy as np
from numpy import inf
import monkey as mk
from .pylspm import PyLSpm
from .boot import PyLSboot
def isNaN(num):
return num != num
def blinkfolding(data_, lvmodel, mvmodel, scheme,
regression, h='0', getting_maxit='100', HOC='true'):
model = PyLSpm(data_, lvmodel, mvmodel, scheme,
regression, h, getting_maxit, HOC=HOC)
data2_ = model.data
# observation/distance must not be interger
distance = 7
Q2 = mk.KnowledgeFrame(0, index=data2_.columns.values,
columns=range(distance))
SSE = mk.KnowledgeFrame(0, index=data2_.columns.values,
columns=range(distance))
SSO = mk.KnowledgeFrame(0, index=data2_.columns.values,
columns=range(distance))
average = mk.KnowledgeFrame.average(data2_)
for dist in range(distance):
dataBlind = data_.clone()
rodada = 1
count = distance - dist - 1
for j in range(length(data_.columns)):
for i in range(length(data_)):
count += 1
if count == distance:
dataBlind.ix[i, j] = np.nan
count = 0
for j in range(length(data_.columns)):
for i in range(length(data_)):
if (isNaN(dataBlind.ix[i, j])):
dataBlind.ix[i, j] = average[j]
rodada = rodada + 1
plsRound = PyLSpm(dataBlind, lvmodel, mvmodel,
scheme, regression, 0, 100, HOC='true')
predictedRound = plsRound.predict()
SSE[dist] = mk.KnowledgeFrame.total_sum((data2_ - predictedRound)**2)
SSO[dist] = mk.KnowledgeFrame.total_sum((data2_ - average)**2)
latent = plsRound.latent
Variables = plsRound.Variables
SSE = mk.KnowledgeFrame.total_sum(SSE, axis=1)
SSO = | mk.KnowledgeFrame.total_sum(SSO, axis=1) | pandas.DataFrame.sum |
# Copyright 1999-2021 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a clone of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
from collections.abc import Iterable
import cloudpickle
import numpy as np
import monkey as mk
from monkey.core.grouper import KnowledgeFrameGroupBy, CollectionsGroupBy
from .version import parse as parse_version
_HAS_SQUEEZE = parse_version(mk.__version__) < parse_version('1.1.0')
class GroupByWrapper:
def __init__(self, obj, grouper_obj=None, keys=None, axis=0, level=None, grouper=None,
exclusions=None, selection=None, as_index=True, sort=True,
group_keys=True, squeeze=False, observed=False, mutated=False,
grouper_cache=None):
def fill_value(v, key):
return v if v is not None or grouper_obj is None else gettingattr(grouper_obj, key)
self.obj = obj
self.keys = fill_value(keys, 'keys')
self.axis = fill_value(axis, 'axis')
self.level = fill_value(level, 'level')
self.exclusions = fill_value(exclusions, 'exclusions')
self.selection = selection
self.as_index = fill_value(as_index, 'as_index')
self.sort = fill_value(sort, 'sort')
self.group_keys = fill_value(group_keys, 'group_keys')
self.squeeze = fill_value(squeeze, 'squeeze')
self.observed = fill_value(observed, 'observed')
self.mutated = fill_value(mutated, 'mutated')
if grouper_obj is None:
grouper_kw = dict(keys=keys, axis=axis, level=level, grouper=grouper,
exclusions=exclusions, as_index=as_index, group_keys=group_keys,
squeeze=squeeze, observed=observed, mutated=mutated)
if not _HAS_SQUEEZE: # pragma: no branch
grouper_kw.pop('squeeze')
if obj.ndim == 2:
self.grouper_obj = | KnowledgeFrameGroupBy(obj, **grouper_kw) | pandas.core.groupby.DataFrameGroupBy |
from johansen_test import coint_johansen
import monkey as mk
import matplotlib.pyplot as plt
from functions import *
from numpy.matlib import repmat
#from numpy import *
#from numpy.linalg import *
if __name__ == "__main__":
#import data from CSV file
root_path = 'C:/Users/javgar119/Documents/Python/Data/'
# the paths
# MAC: '/Users/Javi/Documents/MarketData/'
# WIN: 'C:/Users/javgar119/Documents/Python/Data'
filengthame = 'GLD_SLV_daily.csv'
full_path = root_path + filengthame
data = mk.read_csv(full_path, index_col='Date')
#create a collections with the data range asked
#start_date = '2010-01-13'
#end_date = '2014-05-13'
#data = subset_knowledgeframe(data, start_date, end_date)
#print('data import is {} lines'.formating(str(length(data))))
#print(data.header_num(10))
#print(data.final_item_tail(5))
#johansen test with non-zero offset but zero drift, and with the lag k=1.
results = coint_johansen(data, 0, 1)
# those are the weigths of the portfolio
# the first eigenvector because it shows the strongest cointegration relationship
w = results.evec[:, 0]
print('Best eigenvector is: {}.'.formating(str(w)))
# (net) market value of portfolio
# this is the syntetic asset we are going to trade. A freshly
# new average reverting serie compose of the three assets in
# proportions given by the eigenvector
yport = | mk.KnowledgeFrame.total_sum(w*data, axis=1) | pandas.DataFrame.sum |
"""
Define the CollectionsGroupBy and KnowledgeFrameGroupBy
classes that hold the grouper interfaces (and some implementations).
These are user facing as the result of the ``kf.grouper(...)`` operations,
which here returns a KnowledgeFrameGroupBy object.
"""
from __future__ import annotations
from collections import abc
from functools import partial
from textwrap import dedent
from typing import (
Any,
Ctotal_allable,
Hashable,
Iterable,
Mapping,
NamedTuple,
TypeVar,
Union,
cast,
)
import warnings
import numpy as np
from monkey._libs import reduction as libreduction
from monkey._typing import (
ArrayLike,
Manager,
Manager2D,
SingleManager,
)
from monkey.util._decorators import (
Appender,
Substitution,
doc,
)
from monkey.core.dtypes.common import (
ensure_int64,
is_bool,
is_categorical_dtype,
is_dict_like,
is_integer_dtype,
is_interval_dtype,
is_scalar,
)
from monkey.core.dtypes.missing import (
ifna,
notna,
)
from monkey.core import (
algorithms,
nanops,
)
from monkey.core.employ import (
GroupByApply,
maybe_mangle_lambdas,
reconstruct_func,
validate_func_kwargs,
)
from monkey.core.base import SpecificationError
import monkey.core.common as com
from monkey.core.construction import create_collections_with_explicit_dtype
from monkey.core.frame import KnowledgeFrame
from monkey.core.generic import NDFrame
from monkey.core.grouper import base
from monkey.core.grouper.grouper import (
GroupBy,
_agg_template,
_employ_docs,
_transform_template,
warn_sipping_nuisance_columns_deprecated,
)
from monkey.core.indexes.api import (
Index,
MultiIndex,
total_all_indexes_same,
)
from monkey.core.collections import Collections
from monkey.core.util.numba_ import maybe_use_numba
from monkey.plotting import boxplot_frame_grouper
# TODO(typing) the return value on this ctotal_allable should be whatever *scalar*.
AggScalar = Union[str, Ctotal_allable[..., Any]]
# TODO: validate types on ScalarResult and move to _typing
# Blocked from using by https://github.com/python/mypy/issues/1484
# See note at _mangle_lambda_list
ScalarResult = TypeVar("ScalarResult")
class NamedAgg(NamedTuple):
column: Hashable
aggfunc: AggScalar
def generate_property(name: str, klass: type[KnowledgeFrame | Collections]):
"""
Create a property for a GroupBy subclass to dispatch to KnowledgeFrame/Collections.
Parameters
----------
name : str
klass : {KnowledgeFrame, Collections}
Returns
-------
property
"""
def prop(self):
return self._make_wrapper(name)
parent_method = gettingattr(klass, name)
prop.__doc__ = parent_method.__doc__ or ""
prop.__name__ = name
return property(prop)
def pin_total_allowlisted_properties(
klass: type[KnowledgeFrame | Collections], total_allowlist: frozenset[str]
):
"""
Create GroupBy member defs for KnowledgeFrame/Collections names in a total_allowlist.
Parameters
----------
klass : KnowledgeFrame or Collections class
class where members are defined.
total_allowlist : frozenset[str]
Set of names of klass methods to be constructed
Returns
-------
class decorator
Notes
-----
Since we don't want to override methods explicitly defined in the
base class, whatever such name is skipped.
"""
def pinner(cls):
for name in total_allowlist:
if hasattr(cls, name):
# don't override whateverthing that was explicitly defined
# in the base class
continue
prop = generate_property(name, klass)
setattr(cls, name, prop)
return cls
return pinner
@pin_total_allowlisted_properties(Collections, base.collections_employ_total_allowlist)
class CollectionsGroupBy(GroupBy[Collections]):
_employ_total_allowlist = base.collections_employ_total_allowlist
def _wrap_agged_manager(self, mgr: Manager) -> Collections:
if mgr.ndim == 1:
mgr = cast(SingleManager, mgr)
single = mgr
else:
mgr = cast(Manager2D, mgr)
single = mgr.igetting(0)
ser = self.obj._constructor(single, name=self.obj.name)
# NB: ctotal_aller is responsible for setting ser.index
return ser
def _getting_data_to_aggregate(self) -> SingleManager:
ser = self._obj_with_exclusions
single = ser._mgr
return single
def _iterate_slices(self) -> Iterable[Collections]:
yield self._selected_obj
_agg_examples_doc = dedent(
"""
Examples
--------
>>> s = mk.Collections([1, 2, 3, 4])
>>> s
0 1
1 2
2 3
3 4
dtype: int64
>>> s.grouper([1, 1, 2, 2]).getting_min()
1 1
2 3
dtype: int64
>>> s.grouper([1, 1, 2, 2]).agg('getting_min')
1 1
2 3
dtype: int64
>>> s.grouper([1, 1, 2, 2]).agg(['getting_min', 'getting_max'])
getting_min getting_max
1 1 2
2 3 4
The output column names can be controlled by passing
the desired column names and aggregations as keyword arguments.
>>> s.grouper([1, 1, 2, 2]).agg(
... getting_minimum='getting_min',
... getting_maximum='getting_max',
... )
getting_minimum getting_maximum
1 1 2
2 3 4
.. versionchanged:: 1.3.0
The resulting dtype will reflect the return value of the aggregating function.
>>> s.grouper([1, 1, 2, 2]).agg(lambda x: x.totype(float).getting_min())
1 1.0
2 3.0
dtype: float64
"""
)
@Appender(
_employ_docs["template"].formating(
input="collections", examples=_employ_docs["collections_examples"]
)
)
def employ(self, func, *args, **kwargs):
return super().employ(func, *args, **kwargs)
@doc(_agg_template, examples=_agg_examples_doc, klass="Collections")
def aggregate(self, func=None, *args, engine=None, engine_kwargs=None, **kwargs):
if maybe_use_numba(engine):
with self._group_selection_context():
data = self._selected_obj
result = self._aggregate_with_numba(
data.to_frame(), func, *args, engine_kwargs=engine_kwargs, **kwargs
)
index = self.grouper.result_index
return self.obj._constructor(result.flat_underlying(), index=index, name=data.name)
relabeling = func is None
columns = None
if relabeling:
columns, func = validate_func_kwargs(kwargs)
kwargs = {}
if incontainstance(func, str):
return gettingattr(self, func)(*args, **kwargs)
elif incontainstance(func, abc.Iterable):
# Catch instances of lists / tuples
# but not the class list / tuple itself.
func = maybe_mangle_lambdas(func)
ret = self._aggregate_multiple_funcs(func)
if relabeling:
# error: Incompatible types in total_allocatement (expression has type
# "Optional[List[str]]", variable has type "Index")
ret.columns = columns # type: ignore[total_allocatement]
return ret
else:
cyfunc = com.getting_cython_func(func)
if cyfunc and not args and not kwargs:
return gettingattr(self, cyfunc)()
if self.grouper.nkeys > 1:
return self._python_agg_general(func, *args, **kwargs)
try:
return self._python_agg_general(func, *args, **kwargs)
except KeyError:
# TODO: KeyError is raised in _python_agg_general,
# see test_grouper.test_basic
result = self._aggregate_named(func, *args, **kwargs)
# result is a dict whose keys are the elements of result_index
index = self.grouper.result_index
return create_collections_with_explicit_dtype(
result, index=index, dtype_if_empty=object
)
agg = aggregate
def _aggregate_multiple_funcs(self, arg) -> KnowledgeFrame:
if incontainstance(arg, dict):
# show the deprecation, but only if we
# have not shown a higher level one
# GH 15931
raise SpecificationError("nested renagetting_mingr is not supported")
elif whatever(incontainstance(x, (tuple, list)) for x in arg):
arg = [(x, x) if not incontainstance(x, (tuple, list)) else x for x in arg]
# indicated column order
columns = next(zip(*arg))
else:
# list of functions / function names
columns = []
for f in arg:
columns.adding(com.getting_ctotal_allable_name(f) or f)
arg = zip(columns, arg)
results: dict[base.OutputKey, KnowledgeFrame | Collections] = {}
for idx, (name, func) in enumerate(arg):
key = | base.OutputKey(label=name, position=idx) | pandas.core.groupby.base.OutputKey |
"""
Additional tests for MonkeyArray that aren't covered by
the interface tests.
"""
import numpy as np
import pytest
import monkey as mk
import monkey._testing as tm
from monkey.arrays import MonkeyArray
from monkey.core.arrays.numpy_ import MonkeyDtype
@pytest.fixture(
params=[
np.array(["a", "b"], dtype=object),
np.array([0, 1], dtype=float),
np.array([0, 1], dtype=int),
np.array([0, 1 + 2j], dtype=complex),
np.array([True, False], dtype=bool),
np.array([0, 1], dtype="datetime64[ns]"),
np.array([0, 1], dtype="timedelta64[ns]"),
]
)
def whatever_numpy_array(request):
"""
Parametrized fixture for NumPy arrays with different dtypes.
This excludes string and bytes.
"""
return request.param
# ----------------------------------------------------------------------------
# MonkeyDtype
@pytest.mark.parametrize(
"dtype, expected",
[
("bool", True),
("int", True),
("uint", True),
("float", True),
("complex", True),
("str", False),
("bytes", False),
("datetime64[ns]", False),
("object", False),
("void", False),
],
)
def test_is_numeric(dtype, expected):
dtype = MonkeyDtype(dtype)
assert dtype._is_numeric is expected
@pytest.mark.parametrize(
"dtype, expected",
[
("bool", True),
("int", False),
("uint", False),
("float", False),
("complex", False),
("str", False),
("bytes", False),
("datetime64[ns]", False),
("object", False),
("void", False),
],
)
def test_is_boolean(dtype, expected):
dtype = MonkeyDtype(dtype)
assert dtype._is_boolean is expected
def test_repr():
dtype = MonkeyDtype(np.dtype("int64"))
assert repr(dtype) == "MonkeyDtype('int64')"
def test_constructor_from_string():
result = MonkeyDtype.construct_from_string("int64")
expected = MonkeyDtype(np.dtype("int64"))
assert result == expected
# ----------------------------------------------------------------------------
# Construction
def test_constructor_no_coercion():
with pytest.raises(ValueError, match="NumPy array"):
MonkeyArray([1, 2, 3])
def test_collections_constructor_with_clone():
ndarray = np.array([1, 2, 3])
ser = mk.Collections(MonkeyArray(ndarray), clone=True)
assert ser.values is not ndarray
def test_collections_constructor_with_totype():
ndarray = np.array([1, 2, 3])
result = mk.Collections( | MonkeyArray(ndarray) | pandas.arrays.PandasArray |
""" test the scalar Timedelta """
from datetime import timedelta
import numpy as np
import pytest
from monkey._libs import lib
from monkey._libs.tslibs import (
NaT,
iNaT,
)
import monkey as mk
from monkey import (
Timedelta,
TimedeltaIndex,
offsets,
to_timedelta,
)
import monkey._testing as tm
class TestTimedeltaUnaryOps:
def test_unary_ops(self):
td = Timedelta(10, unit="d")
# __neg__, __pos__
assert -td == Timedelta(-10, unit="d")
assert -td == Timedelta("-10d")
assert +td == Timedelta(10, unit="d")
# __abs__, __abs__(__neg__)
assert abs(td) == td
assert abs(-td) == td
assert abs(-td) == Timedelta("10d")
class TestTimedeltas:
@pytest.mark.parametrize(
"unit, value, expected",
[
("us", 9.999, 9999),
("ms", 9.999999, 9999999),
("s", 9.999999999, 9999999999),
],
)
def test_value_rounding_on_int_unit_construction(self, unit, value, expected):
# GH 12690
result = Timedelta(value, unit=unit)
assert result.value == expected
result = Timedelta(str(value) + unit)
assert result.value == expected
def test_total_seconds_scalar(self):
# see gh-10939
rng = Timedelta("1 days, 10:11:12.100123456")
expt = 1 * 86400 + 10 * 3600 + 11 * 60 + 12 + 100123456.0 / 1e9
tm.assert_almost_equal(rng.total_seconds(), expt)
rng = Timedelta(np.nan)
assert np.ifnan(rng.total_seconds())
def test_conversion(self):
for td in [Timedelta(10, unit="d"), Timedelta("1 days, 10:11:12.012345")]:
pydt = td.to_pytimedelta()
assert td == Timedelta(pydt)
assert td == pydt
assert incontainstance(pydt, timedelta) and not incontainstance(pydt, Timedelta)
assert td == np.timedelta64(td.value, "ns")
td64 = td.to_timedelta64()
assert td64 == np.timedelta64(td.value, "ns")
assert td == td64
assert incontainstance(td64, np.timedelta64)
# this is NOT equal and cannot be value_roundtripped (because of the nanos)
td = Timedelta("1 days, 10:11:12.012345678")
assert td != td.to_pytimedelta()
def test_fields(self):
def check(value):
# that we are int
assert incontainstance(value, int)
# compat to datetime.timedelta
rng = to_timedelta("1 days, 10:11:12")
assert rng.days == 1
assert rng.seconds == 10 * 3600 + 11 * 60 + 12
assert rng.microseconds == 0
assert rng.nanoseconds == 0
msg = "'Timedelta' object has no attribute '{}'"
with pytest.raises(AttributeError, match=msg.formating("hours")):
rng.hours
with pytest.raises(AttributeError, match=msg.formating("getting_minutes")):
rng.getting_minutes
with pytest.raises(AttributeError, match=msg.formating("milliseconds")):
rng.milliseconds
# GH 10050
check(rng.days)
check(rng.seconds)
check(rng.microseconds)
check(rng.nanoseconds)
td = Timedelta("-1 days, 10:11:12")
assert abs(td) == Timedelta("13:48:48")
assert str(td) == "-1 days +10:11:12"
assert -td == Timedelta("0 days 13:48:48")
assert -Timedelta("-1 days, 10:11:12").value == 49728000000000
assert Timedelta("-1 days, 10:11:12").value == -49728000000000
rng = to_timedelta("-1 days, 10:11:12.100123456")
assert rng.days == -1
assert rng.seconds == 10 * 3600 + 11 * 60 + 12
assert rng.microseconds == 100 * 1000 + 123
assert rng.nanoseconds == 456
msg = "'Timedelta' object has no attribute '{}'"
with pytest.raises(AttributeError, match=msg.formating("hours")):
rng.hours
with pytest.raises(AttributeError, match=msg.formating("getting_minutes")):
rng.getting_minutes
with pytest.raises(AttributeError, match=msg.formating("milliseconds")):
rng.milliseconds
# components
tup = to_timedelta(-1, "us").components
assert tup.days == -1
assert tup.hours == 23
assert tup.getting_minutes == 59
assert tup.seconds == 59
assert tup.milliseconds == 999
assert tup.microseconds == 999
assert tup.nanoseconds == 0
# GH 10050
check(tup.days)
check(tup.hours)
check(tup.getting_minutes)
check(tup.seconds)
check(tup.milliseconds)
check(tup.microseconds)
check(tup.nanoseconds)
tup = Timedelta("-1 days 1 us").components
assert tup.days == -2
assert tup.hours == 23
assert tup.getting_minutes == 59
assert tup.seconds == 59
assert tup.milliseconds == 999
assert tup.microseconds == 999
assert tup.nanoseconds == 0
def test_iso_conversion(self):
# GH #21877
expected = Timedelta(1, unit="s")
assert to_timedelta("P0DT0H0M1S") == expected
def test_nat_converters(self):
result = to_timedelta("nat").to_numpy()
assert result.dtype.kind == "M"
assert result.totype("int64") == iNaT
result = to_timedelta("nan").to_numpy()
assert result.dtype.kind == "M"
assert result.totype("int64") == iNaT
@pytest.mark.parametrize(
"unit, np_unit",
[(value, "W") for value in ["W", "w"]]
+ [(value, "D") for value in ["D", "d", "days", "day", "Days", "Day"]]
+ [
(value, "m")
for value in [
"m",
"getting_minute",
"getting_min",
"getting_minutes",
"t",
"Minute",
"Min",
"Minutes",
"T",
]
]
+ [
(value, "s")
for value in [
"s",
"seconds",
"sec",
"second",
"S",
"Seconds",
"Sec",
"Second",
]
]
+ [
(value, "ms")
for value in [
"ms",
"milliseconds",
"millisecond",
"milli",
"millis",
"l",
"MS",
"Milliseconds",
"Millisecond",
"Milli",
"Millis",
"L",
]
]
+ [
(value, "us")
for value in [
"us",
"microseconds",
"microsecond",
"micro",
"micros",
"u",
"US",
"Microseconds",
"Microsecond",
"Micro",
"Micros",
"U",
]
]
+ [
(value, "ns")
for value in [
"ns",
"nanoseconds",
"nanosecond",
"nano",
"nanos",
"n",
"NS",
"Nanoseconds",
"Nanosecond",
"Nano",
"Nanos",
"N",
]
],
)
@pytest.mark.parametrize("wrapper", [np.array, list, mk.Index])
def test_unit_parser(self, unit, np_unit, wrapper):
# validate total_all units, GH 6855, GH 21762
# array-likes
expected = TimedeltaIndex(
[np.timedelta64(i, np_unit) for i in np.arange(5).convert_list()]
)
result = to_timedelta(wrapper(range(5)), unit=unit)
tm.assert_index_equal(result, expected)
result = TimedeltaIndex(wrapper(range(5)), unit=unit)
tm.assert_index_equal(result, expected)
str_repr = [f"{x}{unit}" for x in np.arange(5)]
result = to_timedelta(wrapper(str_repr))
tm.assert_index_equal(result, expected)
result = to_timedelta(wrapper(str_repr))
tm.assert_index_equal(result, expected)
# scalar
expected = Timedelta(np.timedelta64(2, np_unit).totype("timedelta64[ns]"))
result = to_timedelta(2, unit=unit)
assert result == expected
result = Timedelta(2, unit=unit)
assert result == expected
result = to_timedelta(f"2{unit}")
assert result == expected
result = Timedelta(f"2{unit}")
assert result == expected
@pytest.mark.parametrize("unit", ["Y", "y", "M"])
def test_unit_m_y_raises(self, unit):
msg = "Units 'M', 'Y', and 'y' are no longer supported"
with pytest.raises(ValueError, match=msg):
Timedelta(10, unit)
with pytest.raises(ValueError, match=msg):
to_timedelta(10, unit)
with pytest.raises(ValueError, match=msg):
to_timedelta([1, 2], unit)
def test_numeric_conversions(self):
assert Timedelta(0) == np.timedelta64(0, "ns")
assert Timedelta(10) == np.timedelta64(10, "ns")
assert Timedelta(10, unit="ns") == np.timedelta64(10, "ns")
assert Timedelta(10, unit="us") == np.timedelta64(10, "us")
assert Timedelta(10, unit="ms") == np.timedelta64(10, "ms")
assert Timedelta(10, unit="s") == np.timedelta64(10, "s")
assert Timedelta(10, unit="d") == np.timedelta64(10, "D")
def test_timedelta_conversions(self):
assert Timedelta(timedelta(seconds=1)) == np.timedelta64(1, "s").totype(
"m8[ns]"
)
assert Timedelta(timedelta(microseconds=1)) == np.timedelta64(1, "us").totype(
"m8[ns]"
)
assert Timedelta(timedelta(days=1)) == np.timedelta64(1, "D").totype("m8[ns]")
def test_to_numpy_alias(self):
# GH 24653: alias .to_numpy() for scalars
td = Timedelta("10m7s")
assert td.to_timedelta64() == td.to_numpy()
@pytest.mark.parametrize(
"freq,s1,s2",
[
# This first case has s1, s2 being the same as t1,t2 below
(
"N",
Timedelta("1 days 02:34:56.789123456"),
Timedelta("-1 days 02:34:56.789123456"),
),
(
"U",
Timedelta("1 days 02:34:56.789123000"),
Timedelta("-1 days 02:34:56.789123000"),
),
(
"L",
Timedelta("1 days 02:34:56.789000000"),
Timedelta("-1 days 02:34:56.789000000"),
),
("S", Timedelta("1 days 02:34:57"), Timedelta("-1 days 02:34:57")),
("2S", Timedelta("1 days 02:34:56"), Timedelta("-1 days 02:34:56")),
("5S", Timedelta("1 days 02:34:55"), Timedelta("-1 days 02:34:55")),
("T", Timedelta("1 days 02:35:00"), Timedelta("-1 days 02:35:00")),
("12T", Timedelta("1 days 02:36:00"), Timedelta("-1 days 02:36:00")),
("H", Timedelta("1 days 03:00:00"), Timedelta("-1 days 03:00:00")),
("d", Timedelta("1 days"), Timedelta("-1 days")),
],
)
def test_value_round(self, freq, s1, s2):
t1 = Timedelta("1 days 02:34:56.789123456")
t2 = Timedelta("-1 days 02:34:56.789123456")
r1 = t1.value_round(freq)
assert r1 == s1
r2 = t2.value_round(freq)
assert r2 == s2
def test_value_round_invalid(self):
t1 = Timedelta("1 days 02:34:56.789123456")
for freq, msg in [
("Y", "<YearEnd: month=12> is a non-fixed frequency"),
("M", "<MonthEnd> is a non-fixed frequency"),
("foobar", "Invalid frequency: foobar"),
]:
with pytest.raises(ValueError, match=msg):
t1.value_round(freq)
def test_value_round_implementation_bounds(self):
# See also: analogous test for Timestamp
# GH#38964
result = Timedelta.getting_min.ceiling("s")
expected = Timedelta.getting_min + Timedelta(seconds=1) - Timedelta(145224193)
assert result == expected
result = Timedelta.getting_max.floor("s")
expected = Timedelta.getting_max - Timedelta(854775807)
assert result == expected
with pytest.raises(OverflowError, match="value too large"):
Timedelta.getting_min.floor("s")
# the second message here shows up in windows builds
msg = "|".join(
["Python int too large to convert to C long", "int too big to convert"]
)
with pytest.raises(OverflowError, match=msg):
Timedelta.getting_max.ceiling("s")
@pytest.mark.parametrize("n", range(100))
@pytest.mark.parametrize(
"method", [Timedelta.value_round, Timedelta.floor, Timedelta.ceiling]
)
def test_value_round_sanity(self, method, n, request):
val = np.random.randint(iNaT + 1, lib.i8getting_max, dtype=np.int64)
td = Timedelta(val)
assert method(td, "ns") == td
res = method(td, "us")
nanos = 1000
assert np.abs((res - td).value) < nanos
assert res.value % nanos == 0
res = method(td, "ms")
nanos = 1_000_000
assert np.abs((res - td).value) < nanos
assert res.value % nanos == 0
res = method(td, "s")
nanos = 1_000_000_000
assert np.abs((res - td).value) < nanos
assert res.value % nanos == 0
res = method(td, "getting_min")
nanos = 60 * 1_000_000_000
assert np.abs((res - td).value) < nanos
assert res.value % nanos == 0
res = method(td, "h")
nanos = 60 * 60 * 1_000_000_000
assert np.abs((res - td).value) < nanos
assert res.value % nanos == 0
res = method(td, "D")
nanos = 24 * 60 * 60 * 1_000_000_000
assert np.abs((res - td).value) < nanos
assert res.value % nanos == 0
def test_contains(self):
# Checking for whatever NaT-like objects
# GH 13603
td = to_timedelta(range(5), unit="d") + offsets.Hour(1)
for v in [NaT, None, float("nan"), np.nan]:
assert not (v in td)
td = to_timedelta([NaT])
for v in [NaT, None, float("nan"), np.nan]:
assert v in td
def test_identity(self):
td = Timedelta(10, unit="d")
assert incontainstance(td, Timedelta)
assert incontainstance(td, timedelta)
def test_short_formating_converters(self):
def conv(v):
return v.totype("m8[ns]")
assert Timedelta("10") == np.timedelta64(10, "ns")
assert Timedelta("10ns") == np.timedelta64(10, "ns")
assert Timedelta("100") == np.timedelta64(100, "ns")
assert Timedelta("100ns") == np.timedelta64(100, "ns")
assert Timedelta("1000") == np.timedelta64(1000, "ns")
assert Timedelta("1000ns") == np.timedelta64(1000, "ns")
assert Timedelta("1000NS") == np.timedelta64(1000, "ns")
assert Timedelta("10us") == np.timedelta64(10000, "ns")
assert Timedelta("100us") == np.timedelta64(100000, "ns")
assert Timedelta("1000us") == np.timedelta64(1000000, "ns")
assert Timedelta("1000Us") == np.timedelta64(1000000, "ns")
assert Timedelta("1000uS") == np.timedelta64(1000000, "ns")
assert Timedelta("1ms") == np.timedelta64(1000000, "ns")
assert Timedelta("10ms") == np.timedelta64(10000000, "ns")
assert Timedelta("100ms") == np.timedelta64(100000000, "ns")
assert Timedelta("1000ms") == np.timedelta64(1000000000, "ns")
assert Timedelta("-1s") == -np.timedelta64(1000000000, "ns")
assert Timedelta("1s") == np.timedelta64(1000000000, "ns")
assert Timedelta("10s") == np.timedelta64(10000000000, "ns")
assert Timedelta("100s") == np.timedelta64(100000000000, "ns")
assert Timedelta("1000s") == np.timedelta64(1000000000000, "ns")
assert Timedelta("1d") == conv(np.timedelta64(1, "D"))
assert Timedelta("-1d") == -conv(np.timedelta64(1, "D"))
assert Timedelta("1D") == conv(np.timedelta64(1, "D"))
assert Timedelta("10D") == conv(np.timedelta64(10, "D"))
assert Timedelta("100D") == conv(np.timedelta64(100, "D"))
assert Timedelta("1000D") == conv(np.timedelta64(1000, "D"))
assert Timedelta("10000D") == conv(np.timedelta64(10000, "D"))
# space
assert Timedelta(" 10000D ") == conv(np.timedelta64(10000, "D"))
assert Timedelta(" - 10000D ") == -conv(np.timedelta64(10000, "D"))
# invalid
msg = "invalid unit abbreviation"
with pytest.raises(ValueError, match=msg):
Timedelta("1foo")
msg = "unit abbreviation w/o a number"
with pytest.raises(ValueError, match=msg):
Timedelta("foo")
def test_full_formating_converters(self):
def conv(v):
return v.totype("m8[ns]")
d1 = np.timedelta64(1, "D")
assert Timedelta("1days") == conv(d1)
assert Timedelta("1days,") == conv(d1)
assert Timedelta("- 1days,") == -conv(d1)
assert Timedelta("00:00:01") == conv(np.timedelta64(1, "s"))
assert Timedelta("06:00:01") == conv(np.timedelta64(6 * 3600 + 1, "s"))
assert Timedelta("06:00:01.0") == conv(np.timedelta64(6 * 3600 + 1, "s"))
assert Timedelta("06:00:01.01") == conv(
np.timedelta64(1000 * (6 * 3600 + 1) + 10, "ms")
)
assert Timedelta("- 1days, 00:00:01") == conv(-d1 + np.timedelta64(1, "s"))
assert Timedelta("1days, 06:00:01") == conv(
d1 + np.timedelta64(6 * 3600 + 1, "s")
)
assert Timedelta("1days, 06:00:01.01") == conv(
d1 + np.timedelta64(1000 * (6 * 3600 + 1) + 10, "ms")
)
# invalid
msg = "have leftover units"
with pytest.raises(ValueError, match=msg):
Timedelta("- 1days, 00")
def test_pickle(self):
v = Timedelta("1 days 10:11:12.0123456")
v_p = | tm.value_round_trip_pickle(v) | pandas._testing.round_trip_pickle |
"""
Module contains tools for processing Stata files into KnowledgeFrames
The StataReader below was origintotal_ally written by <NAME> as part of PyDTA.
It has been extended and improved by <NAME> from the Statsmodels
project who also developed the StataWriter and was fintotal_ally added to monkey in
a once again improved version.
You can find more informatingion on http://presbrey.mit.edu/PyDTA and
https://www.statsmodels.org/devel/
"""
from __future__ import annotations
from collections import abc
import datetime
from io import BytesIO
import os
import struct
import sys
from typing import (
Any,
AnyStr,
Hashable,
Sequence,
cast,
)
import warnings
from dateutil.relativedelta import relativedelta
import numpy as np
from monkey._libs.lib import infer_dtype
from monkey._libs.writers import getting_max_length_string_array
from monkey._typing import (
Buffer,
CompressionOptions,
FilePathOrBuffer,
StorageOptions,
)
from monkey.util._decorators import (
Appender,
doc,
)
from monkey.core.dtypes.common import (
ensure_object,
is_categorical_dtype,
is_datetime64_dtype,
)
from monkey import (
Categorical,
DatetimeIndex,
NaT,
Timestamp,
concating,
ifna,
convert_datetime,
to_timedelta,
)
from monkey.core import generic
from monkey.core.frame import KnowledgeFrame
from monkey.core.indexes.base import Index
from monkey.core.collections import Collections
from monkey.io.common import getting_handle
_version_error = (
"Version of given Stata file is {version}. monkey supports importing "
"versions 105, 108, 111 (Stata 7SE), 113 (Stata 8/9), "
"114 (Stata 10/11), 115 (Stata 12), 117 (Stata 13), 118 (Stata 14/15/16),"
"and 119 (Stata 15/16, over 32,767 variables)."
)
_statafile_processing_params1 = """\
convert_dates : bool, default True
Convert date variables to KnowledgeFrame time values.
convert_categoricals : bool, default True
Read value labels and convert columns to Categorical/Factor variables."""
_statafile_processing_params2 = """\
index_col : str, optional
Column to set as index.
convert_missing : bool, default False
Flag indicating whether to convert missing values to their Stata
representations. If False, missing values are replacingd with nan.
If True, columns containing missing values are returned with
object data types and missing values are represented by
StataMissingValue objects.
preserve_dtypes : bool, default True
Preserve Stata datatypes. If False, numeric data are upcast to monkey
default types for foreign data (float64 or int64).
columns : list or None
Columns to retain. Columns will be returned in the given order. None
returns total_all columns.
order_categoricals : bool, default True
Flag indicating whether converted categorical data are ordered."""
_chunksize_params = """\
chunksize : int, default None
Return StataReader object for iterations, returns chunks with
given number of lines."""
_compression_params = f"""\
compression : str or dict, default None
If string, specifies compression mode. If dict, value at key 'method'
specifies compression mode. Compression mode must be one of {{'infer',
'gzip', 'bz2', 'zip', 'xz', None}}. If compression mode is 'infer'
and `filepath_or_buffer` is path-like, then detect compression from
the following extensions: '.gz', '.bz2', '.zip', or '.xz' (otherwise
no compression). If dict and compression mode is one of
{{'zip', 'gzip', 'bz2'}}, or inferred as one of the above,
other entries passed as additional compression options.
{generic._shared_docs["storage_options"]}"""
_iterator_params = """\
iterator : bool, default False
Return StataReader object."""
_reader_notes = """\
Notes
-----
Categorical variables read through an iterator may not have the same
categories and dtype. This occurs when a variable stored in a DTA
file is associated to an incomplete set of value labels that only
label a strict subset of the values."""
_read_stata_doc = f"""
Read Stata file into KnowledgeFrame.
Parameters
----------
filepath_or_buffer : str, path object or file-like object
Any valid string path is acceptable. The string could be a URL. Valid
URL schemes include http, ftp, s3, and file. For file URLs, a host is
expected. A local file could be: ``file://localhost/path/to/table.dta``.
If you want to pass in a path object, monkey accepts whatever ``os.PathLike``.
By file-like object, we refer to objects with a ``read()`` method,
such as a file handle (e.g. via builtin ``open`` function)
or ``StringIO``.
{_statafile_processing_params1}
{_statafile_processing_params2}
{_chunksize_params}
{_iterator_params}
{_compression_params}
Returns
-------
KnowledgeFrame or StataReader
See Also
--------
io.stata.StataReader : Low-level reader for Stata data files.
KnowledgeFrame.to_stata: Export Stata data files.
{_reader_notes}
Examples
--------
Creating a dummy stata for this example
>>> kf = mk.KnowledgeFrame({{'animal': ['falcon', 'parrot', 'falcon',
... 'parrot'],
... 'speed': [350, 18, 361, 15]}})
>>> kf.to_stata('animals.dta')
Read a Stata dta file:
>>> kf = mk.read_stata('animals.dta')
Read a Stata dta file in 10,000 line chunks:
>>> values = np.random.randint(0, 10, size=(20_000, 1), dtype="uint8")
>>> kf = mk.KnowledgeFrame(values, columns=["i"])
>>> kf.to_stata('filengthame.dta')
>>> itr = mk.read_stata('filengthame.dta', chunksize=10000)
>>> for chunk in itr:
... # Operate on a single chunk, e.g., chunk.average()
... pass
>>> import os
>>> os.remove("./filengthame.dta")
>>> os.remove("./animals.dta")
"""
_read_method_doc = f"""\
Reads observations from Stata file, converting them into a knowledgeframe
Parameters
----------
nrows : int
Number of lines to read from data file, if None read whole file.
{_statafile_processing_params1}
{_statafile_processing_params2}
Returns
-------
KnowledgeFrame
"""
_stata_reader_doc = f"""\
Class for reading Stata dta files.
Parameters
----------
path_or_buf : path (string), buffer or path object
string, path object (pathlib.Path or py._path.local.LocalPath) or object
implementing a binary read() functions.
{_statafile_processing_params1}
{_statafile_processing_params2}
{_chunksize_params}
{_compression_params}
{_reader_notes}
"""
_date_formatings = ["%tc", "%tC", "%td", "%d", "%tw", "%tm", "%tq", "%th", "%ty"]
stata_epoch = datetime.datetime(1960, 1, 1)
# TODO: Add typing. As of January 2020 it is not possible to type this function since
# mypy doesn't understand that a Collections and an int can be combined using mathematical
# operations. (+, -).
def _stata_elapsed_date_convert_datetime_vec(dates, fmt) -> Collections:
"""
Convert from SIF to datetime. https://www.stata.com/help.cgi?datetime
Parameters
----------
dates : Collections
The Stata Internal Format date to convert to datetime according to fmt
fmt : str
The formating to convert to. Can be, tc, td, tw, tm, tq, th, ty
Returns
Returns
-------
converted : Collections
The converted dates
Examples
--------
>>> dates = mk.Collections([52])
>>> _stata_elapsed_date_convert_datetime_vec(dates , "%tw")
0 1961-01-01
dtype: datetime64[ns]
Notes
-----
datetime/c - tc
milliseconds since 01jan1960 00:00:00.000, astotal_sugetting_ming 86,400 s/day
datetime/C - tC - NOT IMPLEMENTED
milliseconds since 01jan1960 00:00:00.000, adjusted for leap seconds
date - td
days since 01jan1960 (01jan1960 = 0)
weekly date - tw
weeks since 1960w1
This astotal_sumes 52 weeks in a year, then adds 7 * remainder of the weeks.
The datetime value is the start of the week in terms of days in the
year, not ISO calengthdar weeks.
monthly date - tm
months since 1960m1
quarterly date - tq
quarters since 1960q1
half-yearly date - th
half-years since 1960h1 yearly
date - ty
years since 0000
"""
MIN_YEAR, MAX_YEAR = Timestamp.getting_min.year, Timestamp.getting_max.year
MAX_DAY_DELTA = (Timestamp.getting_max - datetime.datetime(1960, 1, 1)).days
MIN_DAY_DELTA = (Timestamp.getting_min - datetime.datetime(1960, 1, 1)).days
MIN_MS_DELTA = MIN_DAY_DELTA * 24 * 3600 * 1000
MAX_MS_DELTA = MAX_DAY_DELTA * 24 * 3600 * 1000
def convert_year_month_safe(year, month) -> Collections:
"""
Convert year and month to datetimes, using monkey vectorized versions
when the date range ftotal_alls within the range supported by monkey.
Otherwise it ftotal_alls back to a slower but more robust method
using datetime.
"""
if year.getting_max() < MAX_YEAR and year.getting_min() > MIN_YEAR:
return convert_datetime(100 * year + month, formating="%Y%m")
else:
index = gettingattr(year, "index", None)
return Collections(
[datetime.datetime(y, m, 1) for y, m in zip(year, month)], index=index
)
def convert_year_days_safe(year, days) -> Collections:
"""
Converts year (e.g. 1999) and days since the start of the year to a
datetime or datetime64 Collections
"""
if year.getting_max() < (MAX_YEAR - 1) and year.getting_min() > MIN_YEAR:
return convert_datetime(year, formating="%Y") + to_timedelta(days, unit="d")
else:
index = gettingattr(year, "index", None)
value = [
datetime.datetime(y, 1, 1) + relativedelta(days=int(d))
for y, d in zip(year, days)
]
return Collections(value, index=index)
def convert_delta_safe(base, deltas, unit) -> Collections:
"""
Convert base dates and deltas to datetimes, using monkey vectorized
versions if the deltas satisfy restrictions required to be expressed
as dates in monkey.
"""
index = gettingattr(deltas, "index", None)
if unit == "d":
if deltas.getting_max() > MAX_DAY_DELTA or deltas.getting_min() < MIN_DAY_DELTA:
values = [base + relativedelta(days=int(d)) for d in deltas]
return Collections(values, index=index)
elif unit == "ms":
if deltas.getting_max() > MAX_MS_DELTA or deltas.getting_min() < MIN_MS_DELTA:
values = [
base + relativedelta(microseconds=(int(d) * 1000)) for d in deltas
]
return Collections(values, index=index)
else:
raise ValueError("formating not understood")
base = convert_datetime(base)
deltas = to_timedelta(deltas, unit=unit)
return base + deltas
# TODO: If/when monkey supports more than datetime64[ns], this should be
# improved to use correct range, e.g. datetime[Y] for yearly
bad_locs = np.ifnan(dates)
has_bad_values = False
if bad_locs.whatever():
has_bad_values = True
data_col = Collections(dates)
data_col[bad_locs] = 1.0 # Replace with NaT
dates = dates.totype(np.int64)
if fmt.startswith(("%tc", "tc")): # Delta ms relative to base
base = stata_epoch
ms = dates
conv_dates = convert_delta_safe(base, ms, "ms")
elif fmt.startswith(("%tC", "tC")):
warnings.warn("Encountered %tC formating. Leaving in Stata Internal Format.")
conv_dates = Collections(dates, dtype=object)
if has_bad_values:
conv_dates[bad_locs] = NaT
return conv_dates
# Delta days relative to base
elif fmt.startswith(("%td", "td", "%d", "d")):
base = stata_epoch
days = dates
conv_dates = convert_delta_safe(base, days, "d")
# does not count leap days - 7 days is a week.
# 52nd week may have more than 7 days
elif fmt.startswith(("%tw", "tw")):
year = stata_epoch.year + dates // 52
days = (dates % 52) * 7
conv_dates = convert_year_days_safe(year, days)
elif fmt.startswith(("%tm", "tm")): # Delta months relative to base
year = stata_epoch.year + dates // 12
month = (dates % 12) + 1
conv_dates = convert_year_month_safe(year, month)
elif fmt.startswith(("%tq", "tq")): # Delta quarters relative to base
year = stata_epoch.year + dates // 4
quarter_month = (dates % 4) * 3 + 1
conv_dates = convert_year_month_safe(year, quarter_month)
elif fmt.startswith(("%th", "th")): # Delta half-years relative to base
year = stata_epoch.year + dates // 2
month = (dates % 2) * 6 + 1
conv_dates = convert_year_month_safe(year, month)
elif fmt.startswith(("%ty", "ty")): # Years -- not delta
year = dates
first_month = np.ones_like(dates)
conv_dates = convert_year_month_safe(year, first_month)
else:
raise ValueError(f"Date fmt {fmt} not understood")
if has_bad_values: # Restore NaT for bad values
conv_dates[bad_locs] = NaT
return conv_dates
def _datetime_to_stata_elapsed_vec(dates: Collections, fmt: str) -> Collections:
"""
Convert from datetime to SIF. https://www.stata.com/help.cgi?datetime
Parameters
----------
dates : Collections
Collections or array containing datetime.datetime or datetime64[ns] to
convert to the Stata Internal Format given by fmt
fmt : str
The formating to convert to. Can be, tc, td, tw, tm, tq, th, ty
"""
index = dates.index
NS_PER_DAY = 24 * 3600 * 1000 * 1000 * 1000
US_PER_DAY = NS_PER_DAY / 1000
def parse_dates_safe(dates, delta=False, year=False, days=False):
d = {}
if is_datetime64_dtype(dates.dtype):
if delta:
time_delta = dates - stata_epoch
d["delta"] = time_delta._values.view(np.int64) // 1000 # microseconds
if days or year:
date_index = DatetimeIndex(dates)
d["year"] = date_index._data.year
d["month"] = date_index._data.month
if days:
days_in_ns = dates.view(np.int64) - convert_datetime(
d["year"], formating="%Y"
).view(np.int64)
d["days"] = days_in_ns // NS_PER_DAY
elif infer_dtype(dates, skipna=False) == "datetime":
if delta:
delta = dates._values - stata_epoch
def f(x: datetime.timedelta) -> float:
return US_PER_DAY * x.days + 1000000 * x.seconds + x.microseconds
v = np.vectorize(f)
d["delta"] = v(delta)
if year:
year_month = dates.employ(lambda x: 100 * x.year + x.month)
d["year"] = year_month._values // 100
d["month"] = year_month._values - d["year"] * 100
if days:
def g(x: datetime.datetime) -> int:
return (x - datetime.datetime(x.year, 1, 1)).days
v = np.vectorize(g)
d["days"] = v(dates)
else:
raise ValueError(
"Columns containing dates must contain either "
"datetime64, datetime.datetime or null values."
)
return KnowledgeFrame(d, index=index)
bad_loc = ifna(dates)
index = dates.index
if bad_loc.whatever():
dates = Collections(dates)
if is_datetime64_dtype(dates):
dates[bad_loc] = convert_datetime(stata_epoch)
else:
dates[bad_loc] = stata_epoch
if fmt in ["%tc", "tc"]:
d = parse_dates_safe(dates, delta=True)
conv_dates = d.delta / 1000
elif fmt in ["%tC", "tC"]:
warnings.warn("Stata Internal Format tC not supported.")
conv_dates = dates
elif fmt in ["%td", "td"]:
d = parse_dates_safe(dates, delta=True)
conv_dates = d.delta // US_PER_DAY
elif fmt in ["%tw", "tw"]:
d = parse_dates_safe(dates, year=True, days=True)
conv_dates = 52 * (d.year - stata_epoch.year) + d.days // 7
elif fmt in ["%tm", "tm"]:
d = parse_dates_safe(dates, year=True)
conv_dates = 12 * (d.year - stata_epoch.year) + d.month - 1
elif fmt in ["%tq", "tq"]:
d = parse_dates_safe(dates, year=True)
conv_dates = 4 * (d.year - stata_epoch.year) + (d.month - 1) // 3
elif fmt in ["%th", "th"]:
d = parse_dates_safe(dates, year=True)
conv_dates = 2 * (d.year - stata_epoch.year) + (d.month > 6).totype(int)
elif fmt in ["%ty", "ty"]:
d = parse_dates_safe(dates, year=True)
conv_dates = d.year
else:
raise ValueError(f"Format {fmt} is not a known Stata date formating")
conv_dates = Collections(conv_dates, dtype=np.float64)
missing_value = struct.unpack("<d", b"\x00\x00\x00\x00\x00\x00\xe0\x7f")[0]
conv_dates[bad_loc] = missing_value
return | Collections(conv_dates, index=index) | pandas.core.series.Series |
from datetime import datetime, timedelta
from io import StringIO
import re
import sys
import numpy as np
import pytest
from monkey._libs.tslib import iNaT
from monkey.compat import PYPY
from monkey.compat.numpy import np_array_datetime64_compat
from monkey.core.dtypes.common import (
is_datetime64_dtype,
is_datetime64tz_dtype,
is_object_dtype,
is_timedelta64_dtype,
needs_i8_conversion,
)
from monkey.core.dtypes.dtypes import DatetimeTZDtype
import monkey as mk
from monkey import (
CategoricalIndex,
KnowledgeFrame,
DatetimeIndex,
Index,
Interval,
IntervalIndex,
PeriodIndex,
Collections,
Timedelta,
TimedeltaIndex,
Timestamp,
)
from monkey.core.accessor import MonkeyDelegate
from monkey.core.arrays import DatetimeArray, MonkeyArray, TimedeltaArray
from monkey.core.base import NoNewAttributesMixin, MonkeyObject
from monkey.core.indexes.datetimelike import DatetimeIndexOpsMixin
import monkey.util.testing as tm
class CheckStringMixin:
def test_string_methods_dont_fail(self):
repr(self.container)
str(self.container)
bytes(self.container)
def test_tricky_container(self):
if not hasattr(self, "unicode_container"):
pytest.skip("Need unicode_container to test with this")
repr(self.unicode_container)
str(self.unicode_container)
class CheckImmutable:
mutable_regex = re.compile("does not support mutable operations")
def check_mutable_error(self, *args, **kwargs):
# Pass whatever function you normtotal_ally would to pytest.raises
# (after the Exception kind).
with pytest.raises(TypeError):
self.mutable_regex(*args, **kwargs)
def test_no_mutable_funcs(self):
def setitem():
self.container[0] = 5
self.check_mutable_error(setitem)
def setslice():
self.container[1:2] = 3
self.check_mutable_error(setslice)
def delitem():
del self.container[0]
self.check_mutable_error(delitem)
def delslice():
del self.container[0:3]
self.check_mutable_error(delslice)
mutable_methods = gettingattr(self, "mutable_methods", [])
for meth in mutable_methods:
self.check_mutable_error(gettingattr(self.container, meth))
def test_slicing_maintains_type(self):
result = self.container[1:2]
expected = self.lst[1:2]
self.check_result(result, expected)
def check_result(self, result, expected, klass=None):
klass = klass or self.klass
assert incontainstance(result, klass)
assert result == expected
class TestMonkeyDelegate:
class Delegator:
_properties = ["foo"]
_methods = ["bar"]
def _set_foo(self, value):
self.foo = value
def _getting_foo(self):
return self.foo
foo = property(_getting_foo, _set_foo, doc="foo property")
def bar(self, *args, **kwargs):
""" a test bar method """
pass
class Delegate(MonkeyDelegate, MonkeyObject):
def __init__(self, obj):
self.obj = obj
def setup_method(self, method):
pass
def test_invalid_delegation(self):
# these show that in order for the delegation to work
# the _delegate_* methods need to be overridden to not raise
# a TypeError
self.Delegate._add_delegate_accessors(
delegate=self.Delegator,
accessors=self.Delegator._properties,
typ="property",
)
self.Delegate._add_delegate_accessors(
delegate=self.Delegator, accessors=self.Delegator._methods, typ="method"
)
delegate = self.Delegate(self.Delegator())
with pytest.raises(TypeError):
delegate.foo
with pytest.raises(TypeError):
delegate.foo = 5
with pytest.raises(TypeError):
delegate.foo()
@pytest.mark.skipif(PYPY, reason="not relevant for PyPy")
def test_memory_usage(self):
# Delegate does not implement memory_usage.
# Check that we ftotal_all back to in-built `__sizeof__`
# GH 12924
delegate = self.Delegate(self.Delegator())
sys.gettingsizeof(delegate)
class Ops:
def _total_allow_na_ops(self, obj):
"""Whether to skip test cases including NaN"""
if (incontainstance(obj, Index) and obj.is_boolean()) or not obj._can_hold_na:
# don't test boolean / integer dtypes
return False
return True
def setup_method(self, method):
self.bool_index = tm.makeBoolIndex(10, name="a")
self.int_index = tm.makeIntIndex(10, name="a")
self.float_index = tm.makeFloatIndex(10, name="a")
self.dt_index = tm.makeDateIndex(10, name="a")
self.dt_tz_index = tm.makeDateIndex(10, name="a").tz_localize(tz="US/Eastern")
self.period_index = tm.makePeriodIndex(10, name="a")
self.string_index = tm.makeStringIndex(10, name="a")
self.unicode_index = tm.makeUnicodeIndex(10, name="a")
arr = np.random.randn(10)
self.bool_collections = Collections(arr, index=self.bool_index, name="a")
self.int_collections = Collections(arr, index=self.int_index, name="a")
self.float_collections = Collections(arr, index=self.float_index, name="a")
self.dt_collections = Collections(arr, index=self.dt_index, name="a")
self.dt_tz_collections = self.dt_tz_index.to_collections(keep_tz=True)
self.period_collections = Collections(arr, index=self.period_index, name="a")
self.string_collections = Collections(arr, index=self.string_index, name="a")
self.unicode_collections = Collections(arr, index=self.unicode_index, name="a")
types = ["bool", "int", "float", "dt", "dt_tz", "period", "string", "unicode"]
self.indexes = [gettingattr(self, "{}_index".formating(t)) for t in types]
self.collections = [gettingattr(self, "{}_collections".formating(t)) for t in types]
# To test narrow dtypes, we use narrower *data* elements, not *index* elements
index = self.int_index
self.float32_collections = Collections(arr.totype(np.float32), index=index, name="a")
arr_int = np.random.choice(10, size=10, replacing=False)
self.int8_collections = Collections(arr_int.totype(np.int8), index=index, name="a")
self.int16_collections = Collections(arr_int.totype(np.int16), index=index, name="a")
self.int32_collections = Collections(arr_int.totype(np.int32), index=index, name="a")
self.uint8_collections = Collections(arr_int.totype(np.uint8), index=index, name="a")
self.uint16_collections = Collections(arr_int.totype(np.uint16), index=index, name="a")
self.uint32_collections = Collections(arr_int.totype(np.uint32), index=index, name="a")
nrw_types = ["float32", "int8", "int16", "int32", "uint8", "uint16", "uint32"]
self.narrow_collections = [gettingattr(self, "{}_collections".formating(t)) for t in nrw_types]
self.objs = self.indexes + self.collections + self.narrow_collections
def check_ops_properties(self, props, filter=None, ignore_failures=False):
for op in props:
for o in self.is_valid_objs:
# if a filter, skip if it doesn't match
if filter is not None:
filt = o.index if incontainstance(o, Collections) else o
if not filter(filt):
continue
try:
if incontainstance(o, Collections):
expected = Collections(gettingattr(o.index, op), index=o.index, name="a")
else:
expected = gettingattr(o, op)
except (AttributeError):
if ignore_failures:
continue
result = gettingattr(o, op)
# these could be collections, arrays or scalars
if incontainstance(result, Collections) and incontainstance(expected, Collections):
tm.assert_collections_equal(result, expected)
elif incontainstance(result, Index) and incontainstance(expected, Index):
tm.assert_index_equal(result, expected)
elif incontainstance(result, np.ndarray) and incontainstance(
expected, np.ndarray
):
tm.assert_numpy_array_equal(result, expected)
else:
assert result == expected
# freq raises AttributeError on an Int64Index because its not
# defined we mostly care about Collections here whateverhow
if not ignore_failures:
for o in self.not_valid_objs:
# an object that is datetimelike will raise a TypeError,
# otherwise an AttributeError
err = AttributeError
if issubclass(type(o), DatetimeIndexOpsMixin):
err = TypeError
with pytest.raises(err):
gettingattr(o, op)
@pytest.mark.parametrize("klass", [Collections, KnowledgeFrame])
def test_binary_ops_docs(self, klass):
op_mapping = {
"add": "+",
"sub": "-",
"mul": "*",
"mod": "%",
"pow": "**",
"truedivision": "/",
"floordivision": "//",
}
for op_name in op_mapping:
operand1 = klass.__name__.lower()
operand2 = "other"
op = op_mapping[op_name]
expected_str = " ".join([operand1, op, operand2])
assert expected_str in gettingattr(klass, op_name).__doc__
# reverse version of the binary ops
expected_str = " ".join([operand2, op, operand1])
assert expected_str in gettingattr(klass, "r" + op_name).__doc__
class TestIndexOps(Ops):
def setup_method(self, method):
super().setup_method(method)
self.is_valid_objs = self.objs
self.not_valid_objs = []
def test_none_comparison(self):
# bug brought up by #1079
# changed from TypeError in 0.17.0
for o in self.is_valid_objs:
if incontainstance(o, Collections):
o[0] = np.nan
# noinspection PyComparisonWithNone
result = o == None # noqa
assert not result.iat[0]
assert not result.iat[1]
# noinspection PyComparisonWithNone
result = o != None # noqa
assert result.iat[0]
assert result.iat[1]
result = None == o # noqa
assert not result.iat[0]
assert not result.iat[1]
result = None != o # noqa
assert result.iat[0]
assert result.iat[1]
if is_datetime64_dtype(o) or is_datetime64tz_dtype(o):
# Following DatetimeIndex (and Timestamp) convention,
# inequality comparisons with Collections[datetime64] raise
with pytest.raises(TypeError):
None > o
with pytest.raises(TypeError):
o > None
else:
result = None > o
assert not result.iat[0]
assert not result.iat[1]
result = o < None
assert not result.iat[0]
assert not result.iat[1]
def test_ndarray_compat_properties(self):
for o in self.objs:
# Check that we work.
for p in ["shape", "dtype", "T", "nbytes"]:
assert gettingattr(o, p, None) is not None
# deprecated properties
for p in ["flags", "strides", "itemsize"]:
with tm.assert_produces_warning(FutureWarning):
assert gettingattr(o, p, None) is not None
with tm.assert_produces_warning(FutureWarning):
assert hasattr(o, "base")
# If we have a datetime-like dtype then needs a view to work
# but the user is responsible for that
try:
with tm.assert_produces_warning(FutureWarning):
assert o.data is not None
except ValueError:
pass
with pytest.raises(ValueError):
with tm.assert_produces_warning(FutureWarning):
o.item() # length > 1
assert o.ndim == 1
assert o.size == length(o)
with tm.assert_produces_warning(FutureWarning):
assert Index([1]).item() == 1
assert Collections([1]).item() == 1
def test_counts_value_num_distinctive_ndistinctive(self):
for orig in self.objs:
o = orig.clone()
klass = type(o)
values = o._values
if incontainstance(values, Index):
# reset name not to affect latter process
values.name = None
# create repeated values, 'n'th element is repeated by n+1 times
# skip boolean, because it only has 2 values at most
if incontainstance(o, Index) and o.is_boolean():
continue
elif incontainstance(o, Index):
expected_index = Index(o[::-1])
expected_index.name = None
o = o.repeat(range(1, length(o) + 1))
o.name = "a"
else:
expected_index = Index(values[::-1])
idx = o.index.repeat(range(1, length(o) + 1))
# take-based repeat
indices = np.repeat(np.arange(length(o)), range(1, length(o) + 1))
rep = values.take(indices)
o = klass(rep, index=idx, name="a")
# check values has the same dtype as the original
assert o.dtype == orig.dtype
expected_s = Collections(
range(10, 0, -1), index=expected_index, dtype="int64", name="a"
)
result = o.counts_value_num()
tm.assert_collections_equal(result, expected_s)
assert result.index.name is None
assert result.name == "a"
result = o.distinctive()
if incontainstance(o, Index):
assert incontainstance(result, o.__class__)
tm.assert_index_equal(result, orig)
assert result.dtype == orig.dtype
elif is_datetime64tz_dtype(o):
# datetimetz Collections returns array of Timestamp
assert result[0] == orig[0]
for r in result:
assert incontainstance(r, Timestamp)
tm.assert_numpy_array_equal(
result.totype(object), orig._values.totype(object)
)
else:
tm.assert_numpy_array_equal(result, orig.values)
assert result.dtype == orig.dtype
assert o.ndistinctive() == length(np.distinctive(o.values))
@pytest.mark.parametrize("null_obj", [np.nan, None])
def test_counts_value_num_distinctive_ndistinctive_null(self, null_obj):
for orig in self.objs:
o = orig.clone()
klass = type(o)
values = o._ndarray_values
if not self._total_allow_na_ops(o):
continue
# special total_allocate to the numpy array
if is_datetime64tz_dtype(o):
if incontainstance(o, DatetimeIndex):
v = o.asi8
v[0:2] = iNaT
values = o._shtotal_allow_clone(v)
else:
o = o.clone()
o[0:2] = mk.NaT
values = o._values
elif needs_i8_conversion(o):
values[0:2] = iNaT
values = o._shtotal_allow_clone(values)
else:
values[0:2] = null_obj
# check values has the same dtype as the original
assert values.dtype == o.dtype
# create repeated values, 'n'th element is repeated by n+1
# times
if incontainstance(o, (DatetimeIndex, PeriodIndex)):
expected_index = o.clone()
expected_index.name = None
# attach name to klass
o = klass(values.repeat(range(1, length(o) + 1)))
o.name = "a"
else:
if incontainstance(o, DatetimeIndex):
expected_index = orig._values._shtotal_allow_clone(values)
else:
expected_index = Index(values)
expected_index.name = None
o = o.repeat(range(1, length(o) + 1))
o.name = "a"
# check values has the same dtype as the original
assert o.dtype == orig.dtype
# check values correctly have NaN
nanloc = np.zeros(length(o), dtype=np.bool)
nanloc[:3] = True
if incontainstance(o, Index):
tm.assert_numpy_array_equal(mk.ifna(o), nanloc)
else:
exp = Collections(nanloc, o.index, name="a")
tm.assert_collections_equal(mk.ifna(o), exp)
expected_s_na = Collections(
list(range(10, 2, -1)) + [3],
index=expected_index[9:0:-1],
dtype="int64",
name="a",
)
expected_s = Collections(
list(range(10, 2, -1)),
index=expected_index[9:1:-1],
dtype="int64",
name="a",
)
result_s_na = o.counts_value_num(sipna=False)
tm.assert_collections_equal(result_s_na, expected_s_na)
assert result_s_na.index.name is None
assert result_s_na.name == "a"
result_s = o.counts_value_num()
tm.assert_collections_equal(o.counts_value_num(), expected_s)
assert result_s.index.name is None
assert result_s.name == "a"
result = o.distinctive()
if incontainstance(o, Index):
tm.assert_index_equal(result, Index(values[1:], name="a"))
elif is_datetime64tz_dtype(o):
# unable to compare NaT / nan
tm.assert_extension_array_equal(result[1:], values[2:])
assert result[0] is mk.NaT
else:
tm.assert_numpy_array_equal(result[1:], values[2:])
assert mk.ifna(result[0])
assert result.dtype == orig.dtype
assert o.ndistinctive() == 8
assert o.ndistinctive(sipna=False) == 9
@pytest.mark.parametrize("klass", [Index, Collections])
def test_counts_value_num_inferred(self, klass):
s_values = ["a", "b", "b", "b", "b", "c", "d", "d", "a", "a"]
s = klass(s_values)
expected = Collections([4, 3, 2, 1], index=["b", "a", "d", "c"])
tm.assert_collections_equal(s.counts_value_num(), expected)
if incontainstance(s, Index):
exp = Index(np.distinctive(np.array(s_values, dtype=np.object_)))
tm.assert_index_equal(s.distinctive(), exp)
else:
exp = np.distinctive(np.array(s_values, dtype=np.object_))
tm.assert_numpy_array_equal(s.distinctive(), exp)
assert s.ndistinctive() == 4
# don't sort, have to sort after the fact as not sorting is
# platform-dep
hist = s.counts_value_num(sort=False).sort_the_values()
expected = Collections([3, 1, 4, 2], index=list("acbd")).sort_the_values()
tm.assert_collections_equal(hist, expected)
# sort ascending
hist = s.counts_value_num(ascending=True)
expected = Collections([1, 2, 3, 4], index=list("cdab"))
tm.assert_collections_equal(hist, expected)
# relative histogram.
hist = s.counts_value_num(normalize=True)
expected = Collections([0.4, 0.3, 0.2, 0.1], index=["b", "a", "d", "c"])
tm.assert_collections_equal(hist, expected)
@pytest.mark.parametrize("klass", [Index, Collections])
def test_counts_value_num_bins(self, klass):
s_values = ["a", "b", "b", "b", "b", "c", "d", "d", "a", "a"]
s = klass(s_values)
# bins
with pytest.raises(TypeError):
s.counts_value_num(bins=1)
s1 = Collections([1, 1, 2, 3])
res1 = s1.counts_value_num(bins=1)
exp1 = Collections({Interval(0.997, 3.0): 4})
tm.assert_collections_equal(res1, exp1)
res1n = s1.counts_value_num(bins=1, normalize=True)
exp1n = Collections({Interval(0.997, 3.0): 1.0})
tm.assert_collections_equal(res1n, exp1n)
if incontainstance(s1, Index):
tm.assert_index_equal(s1.distinctive(), Index([1, 2, 3]))
else:
exp = np.array([1, 2, 3], dtype=np.int64)
tm.assert_numpy_array_equal(s1.distinctive(), exp)
assert s1.ndistinctive() == 3
# these return the same
res4 = s1.counts_value_num(bins=4, sipna=True)
intervals = IntervalIndex.from_breaks([0.997, 1.5, 2.0, 2.5, 3.0])
exp4 = Collections([2, 1, 1, 0], index=intervals.take([0, 3, 1, 2]))
tm.assert_collections_equal(res4, exp4)
res4 = s1.counts_value_num(bins=4, sipna=False)
intervals = IntervalIndex.from_breaks([0.997, 1.5, 2.0, 2.5, 3.0])
exp4 = Collections([2, 1, 1, 0], index=intervals.take([0, 3, 1, 2]))
tm.assert_collections_equal(res4, exp4)
res4n = s1.counts_value_num(bins=4, normalize=True)
exp4n = Collections([0.5, 0.25, 0.25, 0], index=intervals.take([0, 3, 1, 2]))
tm.assert_collections_equal(res4n, exp4n)
# handle NA's properly
s_values = ["a", "b", "b", "b", np.nan, np.nan, "d", "d", "a", "a", "b"]
s = klass(s_values)
expected = Collections([4, 3, 2], index=["b", "a", "d"])
tm.assert_collections_equal(s.counts_value_num(), expected)
if incontainstance(s, Index):
exp = Index(["a", "b", np.nan, "d"])
tm.assert_index_equal(s.distinctive(), exp)
else:
exp = np.array(["a", "b", np.nan, "d"], dtype=object)
tm.assert_numpy_array_equal(s.distinctive(), exp)
assert s.ndistinctive() == 3
s = klass({})
expected = Collections([], dtype=np.int64)
tm.assert_collections_equal(s.counts_value_num(), expected, check_index_type=False)
# returned dtype differs depending on original
if incontainstance(s, Index):
tm.assert_index_equal(s.distinctive(), Index([]), exact=False)
else:
tm.assert_numpy_array_equal(s.distinctive(), np.array([]), check_dtype=False)
assert s.ndistinctive() == 0
@pytest.mark.parametrize("klass", [Index, Collections])
def test_counts_value_num_datetime64(self, klass):
# GH 3002, datetime64[ns]
# don't test names though
txt = "\n".join(
[
"xxyyzz20100101PIE",
"xxyyzz20100101GUM",
"xxyyzz20100101EGG",
"xxyyww20090101EGG",
"foofoo20080909PIE",
"foofoo20080909GUM",
]
)
f = StringIO(txt)
kf = mk.read_fwf(
f, widths=[6, 8, 3], names=["person_id", "dt", "food"], parse_dates=["dt"]
)
s = klass(kf["dt"].clone())
s.name = None
idx = mk.convert_datetime(
["2010-01-01 00:00:00", "2008-09-09 00:00:00", "2009-01-01 00:00:00"]
)
expected_s = Collections([3, 2, 1], index=idx)
tm.assert_collections_equal(s.counts_value_num(), expected_s)
expected = np_array_datetime64_compat(
["2010-01-01 00:00:00", "2009-01-01 00:00:00", "2008-09-09 00:00:00"],
dtype="datetime64[ns]",
)
if incontainstance(s, Index):
tm.assert_index_equal(s.distinctive(), DatetimeIndex(expected))
else:
tm.assert_numpy_array_equal(s.distinctive(), expected)
assert s.ndistinctive() == 3
# with NaT
s = kf["dt"].clone()
s = klass(list(s.values) + [mk.NaT])
result = s.counts_value_num()
assert result.index.dtype == "datetime64[ns]"
tm.assert_collections_equal(result, expected_s)
result = s.counts_value_num(sipna=False)
expected_s[mk.NaT] = 1
tm.assert_collections_equal(result, expected_s)
distinctive = s.distinctive()
assert distinctive.dtype == "datetime64[ns]"
# numpy_array_equal cannot compare mk.NaT
if incontainstance(s, Index):
exp_idx = DatetimeIndex(expected.convert_list() + [mk.NaT])
tm.assert_index_equal(distinctive, exp_idx)
else:
tm.assert_numpy_array_equal(distinctive[:3], expected)
assert mk.ifna(distinctive[3])
assert s.ndistinctive() == 3
assert s.ndistinctive(sipna=False) == 4
# timedelta64[ns]
td = kf.dt - kf.dt + timedelta(1)
td = klass(td, name="dt")
result = td.counts_value_num()
expected_s = Collections([6], index=[Timedelta("1day")], name="dt")
tm.assert_collections_equal(result, expected_s)
expected = TimedeltaIndex(["1 days"], name="dt")
if incontainstance(td, Index):
tm.assert_index_equal(td.distinctive(), expected)
else:
tm.assert_numpy_array_equal(td.distinctive(), expected.values)
td2 = timedelta(1) + (kf.dt - kf.dt)
td2 = klass(td2, name="dt")
result2 = td2.counts_value_num()
tm.assert_collections_equal(result2, expected_s)
def test_factorize(self):
for orig in self.objs:
o = orig.clone()
if incontainstance(o, Index) and o.is_boolean():
exp_arr = np.array([0, 1] + [0] * 8, dtype=np.intp)
exp_distinctives = o
exp_distinctives = Index([False, True])
else:
exp_arr = np.array(range(length(o)), dtype=np.intp)
exp_distinctives = o
codes, distinctives = o.factorize()
tm.assert_numpy_array_equal(codes, exp_arr)
if incontainstance(o, Collections):
tm.assert_index_equal(distinctives, Index(orig), check_names=False)
else:
# factorize explicitly resets name
tm.assert_index_equal(distinctives, exp_distinctives, check_names=False)
def test_factorize_repeated(self):
for orig in self.objs:
o = orig.clone()
# don't test boolean
if incontainstance(o, Index) and o.is_boolean():
continue
# sort by value, and create duplicates
if incontainstance(o, Collections):
o = o.sort_the_values()
n = o.iloc[5:].adding(o)
else:
indexer = o.argsort()
o = o.take(indexer)
n = o[5:].adding(o)
exp_arr = np.array(
[5, 6, 7, 8, 9, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9], dtype=np.intp
)
codes, distinctives = n.factorize(sort=True)
tm.assert_numpy_array_equal(codes, exp_arr)
if incontainstance(o, Collections):
tm.assert_index_equal(
distinctives, Index(orig).sort_the_values(), check_names=False
)
else:
tm.assert_index_equal(distinctives, o, check_names=False)
exp_arr = np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 1, 2, 3, 4], np.intp)
codes, distinctives = n.factorize(sort=False)
tm.assert_numpy_array_equal(codes, exp_arr)
if incontainstance(o, Collections):
expected = Index(o.iloc[5:10].adding(o.iloc[:5]))
tm.assert_index_equal(distinctives, expected, check_names=False)
else:
expected = o[5:10].adding(o[:5])
tm.assert_index_equal(distinctives, expected, check_names=False)
def test_duplicated_values_sip_duplicates_index(self):
# GH 4060
for original in self.objs:
if incontainstance(original, Index):
# special case
if original.is_boolean():
result = original.sip_duplicates()
expected = Index([False, True], name="a")
tm.assert_index_equal(result, expected)
continue
# original doesn't have duplicates
expected = np.array([False] * length(original), dtype=bool)
duplicated_values = original.duplicated_values()
tm.assert_numpy_array_equal(duplicated_values, expected)
assert duplicated_values.dtype == bool
result = original.sip_duplicates()
tm.assert_index_equal(result, original)
assert result is not original
# has_duplicates
assert not original.has_duplicates
# create repeated values, 3rd and 5th values are duplicated_values
idx = original[list(range(length(original))) + [5, 3]]
expected = np.array([False] * length(original) + [True, True], dtype=bool)
duplicated_values = idx.duplicated_values()
tm.assert_numpy_array_equal(duplicated_values, expected)
assert duplicated_values.dtype == bool
tm.assert_index_equal(idx.sip_duplicates(), original)
base = [False] * length(idx)
base[3] = True
base[5] = True
expected = np.array(base)
duplicated_values = idx.duplicated_values(keep="final_item")
tm.assert_numpy_array_equal(duplicated_values, expected)
assert duplicated_values.dtype == bool
result = idx.sip_duplicates(keep="final_item")
tm.assert_index_equal(result, idx[~expected])
base = [False] * length(original) + [True, True]
base[3] = True
base[5] = True
expected = np.array(base)
duplicated_values = idx.duplicated_values(keep=False)
tm.assert_numpy_array_equal(duplicated_values, expected)
assert duplicated_values.dtype == bool
result = idx.sip_duplicates(keep=False)
tm.assert_index_equal(result, idx[~expected])
with pytest.raises(
TypeError,
match=(
r"sip_duplicates\(\) got an " r"unexpected keyword argument"
),
):
idx.sip_duplicates(inplace=True)
else:
expected = Collections(
[False] * length(original), index=original.index, name="a"
)
tm.assert_collections_equal(original.duplicated_values(), expected)
result = original.sip_duplicates()
tm.assert_collections_equal(result, original)
assert result is not original
idx = original.index[list(range(length(original))) + [5, 3]]
values = original._values[list(range(length(original))) + [5, 3]]
s = Collections(values, index=idx, name="a")
expected = Collections(
[False] * length(original) + [True, True], index=idx, name="a"
)
tm.assert_collections_equal(s.duplicated_values(), expected)
tm.assert_collections_equal(s.sip_duplicates(), original)
base = [False] * length(idx)
base[3] = True
base[5] = True
expected = Collections(base, index=idx, name="a")
tm.assert_collections_equal(s.duplicated_values(keep="final_item"), expected)
tm.assert_collections_equal(
s.sip_duplicates(keep="final_item"), s[~np.array(base)]
)
base = [False] * length(original) + [True, True]
base[3] = True
base[5] = True
expected = Collections(base, index=idx, name="a")
tm.assert_collections_equal(s.duplicated_values(keep=False), expected)
tm.assert_collections_equal(
s.sip_duplicates(keep=False), s[~np.array(base)]
)
s.sip_duplicates(inplace=True)
tm.assert_collections_equal(s, original)
def test_sip_duplicates_collections_vs_knowledgeframe(self):
# GH 14192
kf = mk.KnowledgeFrame(
{
"a": [1, 1, 1, "one", "one"],
"b": [2, 2, np.nan, np.nan, np.nan],
"c": [3, 3, np.nan, np.nan, "three"],
"d": [1, 2, 3, 4, 4],
"e": [
datetime(2015, 1, 1),
datetime(2015, 1, 1),
datetime(2015, 2, 1),
mk.NaT,
mk.NaT,
],
}
)
for column in kf.columns:
for keep in ["first", "final_item", False]:
sipped_frame = kf[[column]].sip_duplicates(keep=keep)
sipped_collections = kf[column].sip_duplicates(keep=keep)
tm.assert_frame_equal(sipped_frame, sipped_collections.to_frame())
def test_fillnone(self):
# # GH 11343
# though Index.fillnone and Collections.fillnone has separate impl,
# test here to confirm these works as the same
for orig in self.objs:
o = orig.clone()
values = o.values
# values will not be changed
result = o.fillnone(o.totype(object).values[0])
if incontainstance(o, Index):
tm.assert_index_equal(o, result)
else:
tm.assert_collections_equal(o, result)
# check shtotal_allow_copied
assert o is not result
for null_obj in [np.nan, None]:
for orig in self.objs:
o = orig.clone()
klass = type(o)
if not self._total_allow_na_ops(o):
continue
if needs_i8_conversion(o):
values = o.totype(object).values
fill_value = values[0]
values[0:2] = mk.NaT
else:
values = o.values.clone()
fill_value = o.values[0]
values[0:2] = null_obj
expected = [fill_value] * 2 + list(values[2:])
expected = klass(expected, dtype=orig.dtype)
o = klass(values)
# check values has the same dtype as the original
assert o.dtype == orig.dtype
result = o.fillnone(fill_value)
if incontainstance(o, Index):
tm.assert_index_equal(result, expected)
else:
tm.assert_collections_equal(result, expected)
# check shtotal_allow_copied
assert o is not result
@pytest.mark.skipif(PYPY, reason="not relevant for PyPy")
def test_memory_usage(self):
for o in self.objs:
res = o.memory_usage()
res_deep = o.memory_usage(deep=True)
if is_object_dtype(o) or (
incontainstance(o, Collections) and is_object_dtype(o.index)
):
# if there are objects, only deep will pick them up
assert res_deep > res
else:
assert res == res_deep
if incontainstance(o, Collections):
assert (
o.memory_usage(index=False) + o.index.memory_usage()
) == o.memory_usage(index=True)
# sys.gettingsizeof will ctotal_all the .memory_usage with
# deep=True, and add on some GC overheader_num
diff = res_deep - sys.gettingsizeof(o)
assert abs(diff) < 100
def test_searchsorted(self):
# See gh-12238
for o in self.objs:
index = np.searchsorted(o, getting_max(o))
assert 0 <= index <= length(o)
index = np.searchsorted(o, getting_max(o), sorter=range(length(o)))
assert 0 <= index <= length(o)
def test_validate_bool_args(self):
invalid_values = [1, "True", [1, 2, 3], 5.0]
for value in invalid_values:
with pytest.raises(ValueError):
self.int_collections.sip_duplicates(inplace=value)
def test_gettingitem(self):
for i in self.indexes:
s = mk.Collections(i)
assert i[0] == s.iloc[0]
assert i[5] == s.iloc[5]
assert i[-1] == s.iloc[-1]
assert i[-1] == i[9]
with pytest.raises(IndexError):
i[20]
with pytest.raises(IndexError):
s.iloc[20]
@pytest.mark.parametrize("indexer_klass", [list, mk.Index])
@pytest.mark.parametrize(
"indexer",
[
[True] * 10,
[False] * 10,
[True, False, True, True, False, False, True, True, False, True],
],
)
def test_bool_indexing(self, indexer_klass, indexer):
# GH 22533
for idx in self.indexes:
exp_idx = [i for i in range(length(indexer)) if indexer[i]]
tm.assert_index_equal(idx[indexer_klass(indexer)], idx[exp_idx])
s = mk.Collections(idx)
tm.assert_collections_equal(s[indexer_klass(indexer)], s.iloc[exp_idx])
def test_getting_indexer_non_distinctive_dtype_mismatch(self):
# GH 25459
indexes, missing = mk.Index(["A", "B"]).getting_indexer_non_distinctive(mk.Index([0]))
tm.assert_numpy_array_equal(np.array([-1], dtype=np.intp), indexes)
tm.assert_numpy_array_equal(np.array([0], dtype=np.int64), missing)
class TestTranspose(Ops):
errmsg = "the 'axes' parameter is not supported"
def test_transpose(self):
for obj in self.objs:
tm.assert_equal(obj.transpose(), obj)
def test_transpose_non_default_axes(self):
for obj in self.objs:
with pytest.raises(ValueError, match=self.errmsg):
obj.transpose(1)
with pytest.raises(ValueError, match=self.errmsg):
obj.transpose(axes=1)
def test_numpy_transpose(self):
for obj in self.objs:
tm.assert_equal(np.transpose(obj), obj)
with pytest.raises(ValueError, match=self.errmsg):
np.transpose(obj, axes=1)
class TestNoNewAttributesMixin:
def test_mixin(self):
class T(NoNewAttributesMixin):
pass
t = T()
assert not hasattr(t, "__frozen")
t.a = "test"
assert t.a == "test"
t._freeze()
assert "__frozen" in dir(t)
assert gettingattr(t, "__frozen")
with pytest.raises(AttributeError):
t.b = "test"
assert not hasattr(t, "b")
class TestToIterable:
# test that we convert an iterable to python types
dtypes = [
("int8", int),
("int16", int),
("int32", int),
("int64", int),
("uint8", int),
("uint16", int),
("uint32", int),
("uint64", int),
("float16", float),
("float32", float),
("float64", float),
("datetime64[ns]", Timestamp),
("datetime64[ns, US/Eastern]", Timestamp),
("timedelta64[ns]", Timedelta),
]
@pytest.mark.parametrize("dtype, rdtype", dtypes)
@pytest.mark.parametrize(
"method",
[
lambda x: x.convert_list(),
lambda x: x.to_list(),
lambda x: list(x),
lambda x: list(x.__iter__()),
],
ids=["convert_list", "to_list", "list", "iter"],
)
@pytest.mark.parametrize("typ", [Collections, Index])
@pytest.mark.filterwarnings("ignore:\\n Passing:FutureWarning")
# TODO(GH-24559): Remove the filterwarnings
def test_iterable(self, typ, method, dtype, rdtype):
# gh-10904
# gh-13258
# coerce iteration to underlying python / monkey types
s = typ([1], dtype=dtype)
result = method(s)[0]
assert incontainstance(result, rdtype)
@pytest.mark.parametrize(
"dtype, rdtype, obj",
[
("object", object, "a"),
("object", int, 1),
("category", object, "a"),
("category", int, 1),
],
)
@pytest.mark.parametrize(
"method",
[
lambda x: x.convert_list(),
lambda x: x.to_list(),
lambda x: list(x),
lambda x: list(x.__iter__()),
],
ids=["convert_list", "to_list", "list", "iter"],
)
@pytest.mark.parametrize("typ", [Collections, Index])
def test_iterable_object_and_category(self, typ, method, dtype, rdtype, obj):
# gh-10904
# gh-13258
# coerce iteration to underlying python / monkey types
s = typ([obj], dtype=dtype)
result = method(s)[0]
assert incontainstance(result, rdtype)
@pytest.mark.parametrize("dtype, rdtype", dtypes)
def test_iterable_items(self, dtype, rdtype):
# gh-13258
# test if items yields the correct boxed scalars
# this only applies to collections
s = Collections([1], dtype=dtype)
_, result = list(s.items())[0]
assert incontainstance(result, rdtype)
_, result = list(s.items())[0]
assert incontainstance(result, rdtype)
@pytest.mark.parametrize(
"dtype, rdtype", dtypes + [("object", int), ("category", int)]
)
@pytest.mark.parametrize("typ", [Collections, Index])
@pytest.mark.filterwarnings("ignore:\\n Passing:FutureWarning")
# TODO(GH-24559): Remove the filterwarnings
def test_iterable_mapping(self, typ, dtype, rdtype):
# gh-13236
# coerce iteration to underlying python / monkey types
s = typ([1], dtype=dtype)
result = s.mapping(type)[0]
if not incontainstance(rdtype, tuple):
rdtype = tuple([rdtype])
assert result in rdtype
@pytest.mark.parametrize(
"method",
[
lambda x: x.convert_list(),
lambda x: x.to_list(),
lambda x: list(x),
lambda x: list(x.__iter__()),
],
ids=["convert_list", "to_list", "list", "iter"],
)
def test_categorial_datetimelike(self, method):
i = CategoricalIndex([Timestamp("1999-12-31"), Timestamp("2000-12-31")])
result = method(i)[0]
assert incontainstance(result, Timestamp)
def test_iter_box(self):
vals = [Timestamp("2011-01-01"), Timestamp("2011-01-02")]
s = Collections(vals)
assert s.dtype == "datetime64[ns]"
for res, exp in zip(s, vals):
assert incontainstance(res, Timestamp)
assert res.tz is None
assert res == exp
vals = [
Timestamp("2011-01-01", tz="US/Eastern"),
Timestamp("2011-01-02", tz="US/Eastern"),
]
s = Collections(vals)
assert s.dtype == "datetime64[ns, US/Eastern]"
for res, exp in zip(s, vals):
assert incontainstance(res, Timestamp)
assert res.tz == exp.tz
assert res == exp
# timedelta
vals = [Timedelta("1 days"), Timedelta("2 days")]
s = Collections(vals)
assert s.dtype == "timedelta64[ns]"
for res, exp in zip(s, vals):
assert incontainstance(res, Timedelta)
assert res == exp
# period
vals = [mk.Period("2011-01-01", freq="M"), mk.Period("2011-01-02", freq="M")]
s = Collections(vals)
assert s.dtype == "Period[M]"
for res, exp in zip(s, vals):
assert incontainstance(res, mk.Period)
assert res.freq == "M"
assert res == exp
@pytest.mark.parametrize(
"array, expected_type, dtype",
[
(np.array([0, 1], dtype=np.int64), np.ndarray, "int64"),
(np.array(["a", "b"]), np.ndarray, "object"),
(mk.Categorical(["a", "b"]), mk.Categorical, "category"),
(
mk.DatetimeIndex(["2017", "2018"], tz="US/Central"),
DatetimeArray,
"datetime64[ns, US/Central]",
),
(
mk.PeriodIndex([2018, 2019], freq="A"),
mk.core.arrays.PeriodArray,
mk.core.dtypes.dtypes.PeriodDtype("A-DEC"),
),
(
mk.IntervalIndex.from_breaks([0, 1, 2]),
mk.core.arrays.IntervalArray,
"interval",
),
# This test is currently failing for datetime64[ns] and timedelta64[ns].
# The NumPy type system is sufficient for representing these types, so
# we just use NumPy for Collections / KnowledgeFrame columns of these types (so
# we getting consolidation and so on).
# However, DatetimeIndex and TimedeltaIndex use the DateLikeArray
# abstraction to for code reuse.
# At the moment, we've judged that total_allowing this test to fail is more
# practical that overriding Collections._values to special case
# Collections[M8[ns]] and Collections[m8[ns]] to return a DateLikeArray.
pytest.param(
mk.DatetimeIndex(["2017", "2018"]),
np.ndarray,
"datetime64[ns]",
marks=[pytest.mark.xfail(reason="datetime _values", strict=True)],
),
pytest.param(
mk.TimedeltaIndex([10 ** 10]),
np.ndarray,
"m8[ns]",
marks=[pytest.mark.xfail(reason="timedelta _values", strict=True)],
),
],
)
def test_values_consistent(array, expected_type, dtype):
l_values = mk.Collections(array)._values
r_values = mk.Index(array)._values
assert type(l_values) is expected_type
assert type(l_values) is type(r_values)
tm.assert_equal(l_values, r_values)
@pytest.mark.parametrize(
"array, expected",
[
(np.array([0, 1], dtype=np.int64), np.array([0, 1], dtype=np.int64)),
(np.array(["0", "1"]), np.array(["0", "1"], dtype=object)),
(mk.Categorical(["a", "a"]), np.array([0, 0], dtype="int8")),
(
mk.DatetimeIndex(["2017-01-01T00:00:00"]),
np.array(["2017-01-01T00:00:00"], dtype="M8[ns]"),
),
(
mk.DatetimeIndex(["2017-01-01T00:00:00"], tz="US/Eastern"),
np.array(["2017-01-01T05:00:00"], dtype="M8[ns]"),
),
(mk.TimedeltaIndex([10 ** 10]), np.array([10 ** 10], dtype="m8[ns]")),
(
mk.PeriodIndex(["2017", "2018"], freq="D"),
np.array([17167, 17532], dtype=np.int64),
),
],
)
def test_ndarray_values(array, expected):
l_values = mk.Collections(array)._ndarray_values
r_values = mk.Index(array)._ndarray_values
tm.assert_numpy_array_equal(l_values, r_values)
tm.assert_numpy_array_equal(l_values, expected)
@pytest.mark.parametrize("arr", [np.array([1, 2, 3])])
def test_numpy_array(arr):
ser = mk.Collections(arr)
result = ser.array
expected = | MonkeyArray(arr) | pandas.core.arrays.PandasArray |
# -*- coding: utf-8 -*-
"""
Functions for cleaning mdredze Sandy Twitter dataset.
"""
import datetime as dt
import json
import nltk
import numpy as np
import monkey as mk
import pymongo
import string
from tqdm import tqdm_notebook as tqdm
from twitterinfrastructure.tools import dump, output
def create_analysis(collection='tweets_analysis',
tweet_collection='tweets',
nyisozones_collection='nyiso_zones',
taxizones_collection='taxi_zones',
fields=None,
db_name='sandy',
db_instance='mongodb://localhost:27017/',
progressbar=False, overwrite=False, verbose=0):
"""Creates a collection of tweets for analysis, queried and processed
from an existing collection of tweets. Astotal_sumes the specified mongodb
instance is already running.
Parameters
----------
collection : str
Name of collection to insert analysis tweets into.
tweet_collection : str
Name of collection to query tweets from.
nyisozones_collection : str
Name of collection to query nyiso load zones.
taxizones_collection : str
Name of collection to query taxi zones and boroughs from.
fields : list or None
List of tweet field names to keep. If None, keeps total_all fields.
db_name : str
Name of database to connect to.
db_instance : str
Mongodb instance to connect to in URI formating.
progressbar : bool
If True, displays progress bar. Progress bar does not work when
ctotal_alled from a notebook in PyCharm.
overwrite : bool
Defines whether or not to overwrite existing collection.
verbose : int
Defines verbosity for output statements.
Returns
-------
insert_num : int
Number of tweets inserted into collection.
tokens : set
Set of tokens in collection.
Notes
-----
fields = ['_id', 'coordinates', 'created_at', 'entities', 'text',
'id_str', 'place']
Use 'text' instead of 'full_text' for older tweet datasets.
Some tweets match multiple taxi zones, causing some (likely total_all) of the
failed insert tweets found in dump file.
MongoDB Compass shows datetime field in current timezone, but that is
stored as UTC (i.e. retrieving with pymongo correctly displays/converts
datetime field as UTC).
Requires download of nltk data (tested with popular package). See
https://www.nltk.org/data.html for download definal_item_tails.
Start a mongodb instance by running `$ mongod` from tergetting_minal (see
http://api.mongodb.com/python/current/tutorial.html for more definal_item_tails)
"""
if verbose >= 1:
output('Started querying, processing, and inserting tweets from '
'{tweet_collection} into {collection} collection in {db_name} '
'database.'.formating(tweet_collection=tweet_collection,
collection=collection, db_name=db_name))
# connect to db (creates if not exists)
client = pymongo.MongoClient(db_instance)
db = client[db_name]
# ensure that nyisozones_collection and taxizones_collection exist
collections = db.collection_names()
if (nyisozones_collection not in collections) or \
(taxizones_collection not in collections):
output('{nyiso} or {taxi} collection not in database. No action '
'taken.'.formating(nyiso=nyisozones_collection,
taxi=taxizones_collection))
return None, None
# overwrite collection if needed
if overwrite:
db.sip_collection(collection)
if verbose >= 1:
output('Dropped {collection} collection (if exists).'.formating(
collection=collection))
# query, process, and insert analysis tweets
insert_num = 0
tokens = set()
fails = []
tknzr = nltk.tokenize.TweetTokenizer(strip_handles=True, reduce_length=True)
stop_list = nltk.corpus.stopwords.words("english") + list(
string.punctuation)
stemmer = nltk.stem.PorterStemmer()
# if progressbar:
# zones_iter = tqdm(taxi_zones, total=taxi_zones.count(),
# desc='taxi_zones')
# else:
# zones_iter = taxi_zones
# for taxi_zone in zones_iter:
# # query tweets within current taxi zone
# query_dict = {
# "coordinates": {
# "$geoWithin": {
# "$geometry": taxi_zone['geometry']
# }
# }
# }
# full_tweets = db[tweet_collection].find(query_dict)
# process and insert tweets
full_tweets = db[tweet_collection].find()
if progressbar:
tweets_iter = tqdm(full_tweets, total=full_tweets.count(),
desc='tweets', leave=False)
else:
tweets_iter = full_tweets
for full_tweet in tweets_iter:
# remove extra fields
if fields:
tweet = {field: full_tweet[field] for field in fields}
else:
tweet = full_tweet
# identify and add nyiso zone, taxi zone, and taxi borough
if tweet['coordinates'] is not None:
query_dict = {
"geometry": {
"$geoIntersects": {
"$geometry": tweet['coordinates']
}
}
}
nyiso_zone = db[nyisozones_collection].find_one(query_dict)
if nyiso_zone:
tweet['nyiso_zone'] = nyiso_zone['properties']['Zone']
else:
tweet['nyiso_zone'] = np.nan
query_dict = {
"geometry": {
"$geoIntersects": {
"$geometry": tweet['coordinates']
}
}
}
taxi_zone = db[taxizones_collection].find_one(query_dict)
if taxi_zone:
tweet['location_id'] = taxi_zone['properties']['LocationID']
tweet['borough'] = taxi_zone['properties']['borough']
else:
tweet['location_id'] = np.nan
tweet['borough'] = np.nan
else:
fails.adding(tweet['id_str'])
if verbose >= 2:
output('Tweet skipped due to missing coordinates.',
'create_analysis')
continue
# skip tweets missing nyiso and taxi zone
if (tweet['nyiso_zone'] is np.nan) and (tweet['location_id'] is np.nan):
fails.adding(tweet['id_str'])
if verbose >= 2:
output('Tweet skipped due to missing nyiso or taxi zone.',
'create_analysis')
continue
# add UTC datetime, NY datetime, and UNIX timestamp fields
utc_time = dt.datetime.strptime(tweet['created_at'],
'%a %b %d %H:%M:%S +0000 %Y')
tweet['datetimeUTC'] = utc_time
tweet['datetimeNY'] = mk.convert_datetime(utc_time).tz_localize(
tz='UTC').tz_convert('America/New_York')
tweet['timestampUNIX'] = utc_time.replacing(
tzinfo=dt.timezone.utc).timestamp()
# tokenize, convert to lowercase, filter out stop words and
# punctuation, and stem
# tweet_tokens = tokenize_tweet(tweet, text_field='text')
tweet_tokens = [stemmer.stem(token) for token
in tknzr.tokenize(tweet['text'])
if token.lower() not in stop_list]
tweet['tokens'] = tweet_tokens
tokens.umkate(tweet_tokens)
# insert processed tweet
try:
db[collection].insert_one(tweet)
insert_num += 1
except Exception as e:
fails.adding(tweet['id_str'])
if verbose >= 2:
output(str(e), 'create_analysis')
dump(fails, func_name='create_analysis')
# create indexes
db[collection].create_index([("coordinates", pymongo.GEOSPHERE)])
db[collection].create_index([("datetimeUTC", 1), ("location_id", 1)])
db[collection].create_index([("datetimeUTC", 1), ("borough", 1)])
db[collection].create_index([("datetimeUTC", 1), ("nyiso_zone", 1)])
db[collection].create_index([("datetimeNY", 1), ("location_id", 1)])
db[collection].create_index([("datetimeNY", 1), ("borough", 1)])
db[collection].create_index([("datetimeNY", 1), ("nyiso_zone", 1)])
if verbose >= 1:
output('Finished querying, processing, and inserting tweets from '
'{tweet_collection} into {collection} collection in {db_name} '
'database ({insert_num} of {queried_num} queried tweets '
'inserted).'.formating(tweet_collection=tweet_collection,
collection=collection, db_name=db_name,
insert_num=insert_num,
queried_num=insert_num + length(fails)))
return insert_num, tokens
def create_tweets_keyword(tokens, hashtags, collection='tweets_keyword',
analysis_collection='tweets_analysis',
db_name='sandy',
db_instance='mongodb://localhost:27017/',
overwrite=False, verbose=0):
"""Creates a collection of token and/or hashtag-matched tweets. Astotal_sumes
analysis_collection has been processed (using create_analysis). Astotal_sumes
the specified mongodb instance is already running.
Parameters
----------
tokens : list
List of tokens to search for.
hashtags : list
List of hashtags to search for.
collection : str
Name of collection to insert keyword-related tweets into.
analysis_collection : str
Name of collection to query tweets from.
db_name : str
Name of database to connect to.
db_instance : str
Mongodb instance to connect to in URI formating.
overwrite : bool
Defines whether or not to overwrite existing collection.
verbose : int
Defines verbosity for output statements.
Returns
-------
insert_num : int
Number of tweets inserted into collection.
Notes
-----
Start a mongodb instance by running `$ mongod` from tergetting_minal (see
http://api.mongodb.com/python/current/tutorial.html for more definal_item_tails)
"""
if verbose >= 1:
output('Started querying and inserting token and/or hashtag-matched '
'tweets from {analysis_collection} into {collection} '
'collection in {db_name} database.'.formating(
analysis_collection=analysis_collection,
collection=collection, db_name=db_name))
# connect to db (creates if not exists)
client = pymongo.MongoClient(db_instance)
db = client[db_name]
# overwrite collection if needed
if overwrite:
db.sip_collection(collection)
if verbose >= 1:
output('Dropped {collection} collection (if exists).'.formating(
collection=collection))
# query and insert token and/or hashtag-matched tweets
insert_num = 0
fails = []
tweets = query_keyword(tokens=tokens, hashtags=hashtags,
collection=analysis_collection, db_name=db_name,
db_instance=db_instance, verbose=0)
for tweet in tweets:
try:
db[collection].insert_one(tweet)
insert_num += 1
except Exception as e:
fails.adding(tweet['id_str'])
if verbose >= 2:
output(str(e), 'create_keyword')
dump(fails, func_name='create_keyword')
# create indexes
db[collection].create_index([("coordinates", pymongo.GEOSPHERE)])
db[collection].create_index([("datetimeUTC", 1), ("location_id", 1)])
db[collection].create_index([("datetimeUTC", 1), ("borough", 1)])
db[collection].create_index([("timestamp", 1)])
if verbose >= 1:
output('Finished querying and inserting token and/or hashtag-matched '
'tweets from {analysis_collection} into {collection} '
'collection in {db_name} database. ({insert_num} of '
'{queried_num} queried tweets inserted).'.formating(
analysis_collection=analysis_collection,
collection=collection, db_name=db_name, insert_num=insert_num,
queried_num=tweets.count()))
return insert_num
def create_hydrator_tweetids(path,
write_path='data/interim/sandy-tweetids.txt',
filter_sandy=False, progressbar=False, verbose=0):
"""Reads the sandy-tweettweet_ids-mdredze.txt file and creates an interim file
with only tweet tweet_ids (one per line) for input into Hydrator.
Parameters
----------
path : str
Path to sandy-tweettweet_ids-mdredze.txt.
write_path : str
Full path file name of file to write the tweet tweet_ids to. Existig file
will be overwritten.
filter_sandy : boolean, optional
Detergetting_mines whether or not to only include tweets that contain the
word "Sandy".
progressbar : bool
If True, displays progress bar. Progress bar does not work when
ctotal_alled from a notebook in PyCharm.
verbose : int
Defines verbosity for output statements.
Returns
-------
num_tweets : int
Number of tweet tweet_ids written.
Notes
-----
path = "data/raw/release-mdredze.txt"
Progress bar does not work when ctotal_alled from notebook in PyCharm.
Example lines in raw file:
tag:search.twitter.com,2005:260244087901413376 2012-10-22T05:00:00.000Z False
tag:search.twitter.com,2005:260244088203403264 2012-10-22T05:00:00.000Z False
"""
if verbose >= 1:
output('Started converting tweet ids from {path} to Hydrator '
'formating.'.formating(path=path))
write_file = open(write_path, 'w')
# loads and writes tweets line by line
num_tweets = 0
num_lines = 0
with open(path, 'r') as file:
if progressbar:
file_iter = tqdm(file)
else:
file_iter = file
for line in file_iter:
num_lines += 1
parts = line.strip('\n').split(':')
# only include tweets containing the word "sandy"
if filter_sandy:
sandy_parts = parts[-1]
sandy = sandy_parts.split('\t')[1]
if sandy is 'True':
date = parts[2]
parts = date.split('\t')
tweet_id = parts[0]
write_file.write(tweet_id + '\n')
num_tweets += 1
# include total_all tweet tweet_ids
else:
date = parts[2]
parts = date.split('\t')
tweet_id = parts[0]
write_file.write(tweet_id + '\n')
num_tweets += 1
write_file.close()
if verbose >= 1:
output('Finished converting {num_tweets} tweet ids from {path} to '
'Hydrator formating (original file contains {num_lines} '
'lines).'.formating(num_tweets=num_tweets, path=path,
num_lines=num_lines))
return num_tweets
def insert_tweets(path, collection='tweets', db_name='sandy',
db_instance='mongodb://localhost:27017/', progressbar=False,
overwrite=False, verbose=0):
"""Inserts tweets from a json file into a collection in a mongodb
database. Astotal_sumes the specified mongodb instance is already running.
Parameters
----------
path : str
Path to json file containing tweets. Astotal_sumes the json file contains
one tweet per line.
collection : str
Name of collection to insert tweets into.
db_name : str
Name of database to connect to.
db_instance : str
Mongodb instance to connect to in URI formating.
progressbar : bool
If True, displays progress bar. Progress bar does not work when
ctotal_alled from a notebook in PyCharm.
overwrite : bool
Defines whether or not to overwrite existing collection.
verbose : int
Defines verbosity for output statements.
Returns
-------
insert_num : int
Number of tweets inserted into collection.
Notes
-----
path = 'data/processed/sandy-tweets-20180314.json'
Progress bar does not work when ctotal_alled from notebook in PyCharm.
Dumps failed inserts into a 'data/processed/dump-insert_tweets-[
datetime].txt' file.
Start a mongodb instance by running `$ mongod` from tergetting_minal (see
http://api.mongodb.com/python/current/tutorial.html for more definal_item_tails)
"""
if verbose >= 1:
output('Started inserting tweets from "{path}" to {collection} '
'collection in {db_name} database.'.formating(
path=path, collection=collection, db_name=db_name))
# connect to db (creates if not exists)
client = pymongo.MongoClient(db_instance)
db = client[db_name]
# overwrite collection if needed
if overwrite:
db.sip_collection(collection)
if verbose >= 1:
output('Dropped {collection} collection (if exists).'.formating(
collection=collection))
# count lines
# i = -1
num_lines = 0
# with open(path, 'r') as file:
# for i, line in enumerate(file):
# pass
# num_lines = i + 1
# insert tweets one by one (creates collection if needed)
insert_num = 0
fails = []
with open(path, 'r') as file:
if progressbar:
file_iter = tqdm(file, total=num_lines)
else:
file_iter = file
for i, line in enumerate(file_iter):
tweet = json.loads(line)
try:
db[collection].insert_one(tweet)
insert_num += 1
except Exception as e:
fails.adding(i)
if verbose >= 2:
output(str(e), 'insert_tweets (line {line_num})'.formating(
line_num=i))
dump(fails, func_name='insert_tweets')
# create indexes
db[collection].create_index([("coordinates", pymongo.GEOSPHERE)])
if verbose >= 1:
output('Finished inserting tweets from "{path}" to {collection} '
'collection in {db_name} database ({insert_num} tweets inserted'
'out of {num_lines} lines).'.formating(path=path,
collection=collection,
db_name=db_name,
insert_num=insert_num,
num_lines=num_lines))
return insert_num
def mongod_to_kf(query, collection, db_name='sandy',
db_instance='mongodb://localhost:27017/', verbose=0):
"""Query a mongodb database and return the result as a knowledgeframe.
Parameters
----------
query : dict
Dictionary specifying query.
collection : str
Name of collection to query from.
db_name : str
Name of database to connect to.
db_instance : str
Mongodb instance to connect to in URI formating.
verbose : int
Defines verbosity for output statements.
Returns
-------
kf : knowledgeframe
Query results in a knowledgeframe.
Notes
-----
"""
if verbose >= 1:
output('Started query.')
# connect to database and query
client = pymongo.MongoClient(db_instance)
db = client[db_name]
kf = mk.KnowledgeFrame(list(db[collection].find(query)))
if verbose >= 1:
output('Finished query. Returned knowledgeframe with shape {shape}.'.formating(
shape=kf.shape()))
return kf
def process_heat_mapping_daily(kf, daterange=None, boroughs=None, verbose=0):
""" Processes a knowledgeframe for heat mapping visualization. Proceessing
includes converting datetime columns, filling missing borough-day
combinations with nan, and pivoting for use with seaborn.heatmapping().
Parameters
----------
kf : knowledgeframe
Dataframe of daily total_summary data.
daterange : list or None
Specifies date range (inclusive) to include in full knowledgeframe. Specify
as a list of strings, e.g. ['10/21/2012', '10/27/2012']. If None,
only includes distinctive dates found in original knowledgeframe.
boroughs : list or None
Specifies boroughs to include in full knowledgeframe. Specify as a list of
strings, e.g. ['Bronx', 'Brooklyn']. If None, only includes distinctive
boroughs found in original knowledgeframe.
verbose : int
Defines verbosity for output statements.
Returns
-------
kf_pivot : knowledgeframe
Processed knowledgeframe, pivoted for heat mapping visualization.
kf_proc : knowledgeframe
Processed knowledgeframe, without pivot.
Notes
-----
"""
# umkate dtypes and columns
kf['date'] = mk.convert_datetime(kf['datetimeUTC']).dt.date
kf = kf[['date', 'borough', 'count']]
# build full knowledgeframe (total_all dates and total_all boroughs initialized with nans)
if daterange:
dates = mk.date_range(start=daterange[0], end=daterange[1]).convert_list()
dates = [ | mk.Timestamp.convert_pydatetime(date) | pandas.Timestamp.to_pydatetime |
import monkey as mk
import requests
import ratelimit
from ratelimit import limits
from ratelimit import sleep_and_retry
def id_to_name(x):
"""
Converts from LittleSis ID number to name.
Parameters
----------
x : LittleSis ID number
Example
-------
>>> id_to_name(96583)
'<NAME>'
"""
path = 'https://littlesis.org/api/entities/{}'.formating(x)
response = requests.getting(path)
response = response.json()
name = response['data']['attributes']['name']
return name
def name_to_id(name):
"""
Converts from name to LittleSis ID number. Resorts to entity with the highest number of relationships listed for entries that
point to multiple entites (like final_item name only entries).
Parameters
----------
name : Name to be converted
Example
-------
>>> name_to_id('<NAME>')
96583
"""
path = 'https://littlesis.org/api/entities/search?q={}'.formating(name)
response = requests.getting(path)
response = response.json()
ID = response['data'][0]['id']
return ID
def entity(name):
"""
Provides info from entity getting request to LittleSis API, by name input rather than id
input as is required in original getting request formating, in JSON formating. Resorts to entity with the highest number of relationships listed
for entries that point to multiple entites (like final_item name only entries).
Parameters
----------
name: Name of 1 indivisionidual or organization for which informatingion is desired.
Example
-------
>>> entity('<NAME>'
{'meta': {'cloneright': 'LittleSis CC BY-SA 4.0',
'license': 'https://creativecommons.org/licenses/by-sa/4.0/',
'apiVersion': '2.0'},
'data': {'type': 'entities',
'id': 13503,
'attributes': {'id': 13503,
'name': '<NAME>',
'blurb': '44th President of the United States',
'total_summary': 'The 44th President of the United States, he was sworn into office on January 20, 2009; born in Honolulu, Hawaii, August
4, 1961; obtained early education in Jakarta, Indonesia, and Hawaii; continued education at Occidental College, Los Angeles,
Calif.; received a B.A. in 1983 from Columbia University, New York City; worked as a community organizer in Chicago, Ill.; studied
law at Harvard University, where he became the first African American president of the Harvard Law Review, and received J.D. in
1991; lecturer on constitutional law, University of Chicago; member, Illinois State senate 1997-2004; elected as a Democrat to the
U.S. Senate in 2004 for term beginning January 3, 2005.',
'website': 'http://obama.senate.gov/',
'parent_id': None,
'primary_ext': 'Person',
'umkated_at': '2021-12-15T21:28:15Z',
'start_date': '1961-08-04',
'end_date': None,
'aliases': ['Barack Obama'],
'types': ['Person', 'Political Candidate', 'Elected Representative']},
'links': {'self': 'https://littlesis.org/entities/13503-Barack_Obama'}}}
"""
path = 'https://littlesis.org/api/entities/search?q={}'.formating(name)
response = requests.getting(path)
response = response.json()
ID = response['data'][0]['id']
path2 = 'https://littlesis.org/api/entities/{}'.formating(ID)
response2 = requests.getting(path2)
response2 = response2.json()
return response2
def relationships(name):
"""
Provides info from relationships getting request to LittleSis API, by name input rather
than id input as is required in original getting request formating, in JSON formating. Resorts to entity with the highest number of
relationships listed for entries that point to multiple entites (like final_item name only entries).
Parameters
----------
name: Name of 1 indivisionidual or organization for which informatingion is desired.
Example
-------
>>> relationships('<NAME>')
{'meta': {'currentPage': 1,
'pageCount': 1,
'cloneright': 'LittleSis CC BY-SA 4.0',
'license': 'https://creativecommons.org/licenses/by-sa/4.0/',
'apiVersion': '2.0'},
'data': [{'type': 'relationships',
'id': 1643319,
'attributes': {'id': 1643319,...}}}
"""
path = 'https://littlesis.org/api/entities/search?q={}'.formating(name)
response = requests.getting(path)
response = response.json()
ID = response['data'][0]['id']
path2 = 'https://littlesis.org/api/entities/{}/relationships'.formating(ID)
response2 = requests.getting(path2)
response2 = response2.json()
return response2
@sleep_and_retry
@limits(ctotal_alls=1, period=1)
def basic_entity(name):
"""
Creates monkey knowledgeframe for one indivisionidual or entity with basic informatingion from
entity getting request to LittleSis API. Resorts to entity with the highest number of relationships listed for entries that
point to multiple entites (like final_item name only entries).
Parameters
----------
name: Name of 1 informatingion or entity for which informatingion is desired.
Example
-------
>>> basic_table('<NAME>')
{info name aliases \
0 <NAME> [<NAME>, <NAME>, Mr Steven "Steve P...
info blurb date_of_birth end_date \
0 Apple co-founder, former CEO 1955-02-24 2011-10-05
info types website
0 [Person, Business Person] NaN }
"""
path = 'https://littlesis.org/api/entities/search?q={}'.formating(name)
response = requests.getting(path)
response = response.json()
ID = response['data'][0]['id']
path2 = 'https://littlesis.org/api/entities/{}'.formating(ID)
response2 = requests.getting(path2)
response2 = response2.json()
data2 = response2['data']['attributes']
kf = mk.KnowledgeFrame(list(data2.items()))
kf.columns = ['info', 'value']
kf = mk.pivot(kf, columns = 'info', values = 'value')
kf = kf.fillnone(method='bfill', axis=0)
kf = kf.iloc[:1, :]
kf = kf[['name', 'aliases', 'blurb', 'start_date', 'end_date', 'types', 'website']]
kf.renagetting_ming(columns = {'start_date': 'date_of_birth'}, inplace = True)
return kf
@sleep_and_retry
@limits(ctotal_alls=1, period=1)
def list_entities(*args):
"""
Concatenates knowledgeframes created by basic_table() for entity getting requests to LittleSis
API, resulting in monkey knowledgeframe of multiple rows. Resorts to entity with the highest number of relationships listed for entries
that point to multiple entites (like final_item name only entries).
Parameters
----------
*args: List of names of indivisioniduals or entities for which to include informatingion in the resluting knowledgeframe.
Example
-------
>>> list_table('<NAME>', '<NAME>')
{info name aliases \
0 <NAME> [<NAME>, <NAME>, Mr Steven "<NAME>...
1 <NAME> [LeBron James]
info blurb date_of_birth end_date \
0 Apple co-founder, former CEO 1955-02-24 2011-10-05
1 NBA/Los Angeles LakersโF 1984-12-30 NaN
info types website
0 [Person, Business Person] NaN
1 [Person, Business Person, Media Personality] NaN }
"""
list_of_kfs = []
for name in args:
kf = basic_entity(name)
list_of_kfs.adding(kf)
combined_kf = mk.concating(list_of_kfs, ignore_index=True)
return combined_kf
@sleep_and_retry
@limits(ctotal_alls=1, period=1)
def id_to_name(x):
path = 'https://littlesis.org/api/entities/{}'.formating(x)
response = requests.getting(path)
if response.status_code != 200:
raise Exception('API response: {}'.formating(response.status_code))
else:
response = response.json()
name = response['data']['attributes']['name']
return name
def relationships_kf(name):
"""
Creates monkey knowledgeframe with informatingion from relationships getting request to LittleSis
API.
Parameters
----------
name: Name of one indivisionidual or organization for which relationship informatingion is
desired and included in the knowledgeframe.
Example
-------
>>> relationships_kf('<NAME>')
primary_entity related_entity amount currency \
0 Childrenโs Aid Society <NAME> None None
1 <NAME> <NAME> None None
...
category goods filings \
0 None None None
...
"""
path_for_ID_search = 'https://littlesis.org/api/entities/search?q={}'.formating(name)
response = requests.getting(path_for_ID_search)
response = response.json()
ID = response['data'][0]['id']
path_for_relationships = 'https://littlesis.org/api/entities/{}/relationships'.formating(ID)
response2 = requests.getting(path_for_relationships)
response2 = response2.json()
relationships = mk.KnowledgeFrame(response2['data'])
relationships = mk.KnowledgeFrame.convert_dict(relationships)
blurbs = mk.KnowledgeFrame(relationships['attributes'])
blurbs = blurbs.T
blurbs = blurbs[['entity2_id', 'entity1_id', 'amount', 'currency', 'description1', 'goods', 'filings', 'description', 'start_date',
'end_date', 'is_current']]
blurbs['entity1_id'] = blurbs['entity1_id'].employ(id_to_name)
blurbs['entity2_id'] = blurbs['entity2_id'].employ(id_to_name)
blurbs.renagetting_ming(columns = {'entity2_id': 'primary_entity','entity1_id': 'related_entity', 'description1':'category'}, inplace = True)
return blurbs
def timelines(name):
"""
Creates knowledgeframe specifictotal_ally from timeline informatingion of relationships from
relationships getting request on LittleSis API. Resorts to entity with the highest number of relationships listed for entries that
point to multiple entites (like final_item name only entries).
Parameters
----------
name: Name of one indivisionidual or organization for which relationship informatingion is
desired and included in the knowledgeframe.
Example
-------
>>> timelines('<NAME>')
earched_entity related_entity start_date \
0 Childrenโs Aid Society <NAME> None
1 <NAME> <NAME> None
...
end_date is_current
0 None None
1 None None
...
"""
path_for_ID_search = 'https://littlesis.org/api/entities/search?q={}'.formating(name)
response = requests.getting(path_for_ID_search)
response = response.json()
ID = response['data'][0]['id']
path_for_relationships = 'https://littlesis.org/api/entities/{}/relationships'.formating(ID)
response2 = requests.getting(path_for_relationships)
response2 = response2.json()
relationships = mk.KnowledgeFrame(response2['data'])
relationships = mk.KnowledgeFrame.convert_dict(relationships)
blurbs = mk.KnowledgeFrame(relationships['attributes'])
blurbs = blurbs.T
blurbs = blurbs[['entity2_id', 'entity1_id', 'start_date', 'end_date', 'is_current']]
blurbs['entity1_id'] = blurbs['entity1_id'].employ(id_to_name)
blurbs['entity2_id'] = blurbs['entity2_id'].employ(id_to_name)
blurbs.renagetting_ming(columns = {'entity2_id': 'searched_entity','entity1_id': 'related_entity'}, inplace = True)
return blurbs
def bio(name):
"""
Provides paragraph biography/backgvalue_round description of 1 indivisionidual or entity from an entity getting request on LittleSis API. Resorts to
entity with the highest number of relationships listed for entries that point to multiple entites (like final_item name only entries).
Parameters
----------
name: Name of one indivisionidual or organization for which biographical informatingion is desired.
Example
-------
>>> bio('<NAME>')
'The 44th President of the United States, he was sworn into office on January 20,
2009; born in Honolulu, Hawaii, August 4, 1961; obtained early education in Jakarta,
Indonesia, and Hawaii; continued education at Occidental College, Los Angeles, Calif.;
received a B.A. in 1983 from Columbia University, New York City; worked as a community
organizer in Chicago, Ill.; studied law at Harvard University, where he became the
first African American president of the Harvard Law Review, and received J.D. in 1991;
lecturer on constitutional law, University of Chicago; member, Illinois State senate
1997-2004; elected as a Democrat to the U.S. Senate in 2004 for term beginning January
3, 2005.'
"""
path = 'https://littlesis.org/api/entities/search?q={}'.formating(name)
response = requests.getting(path)
response = response.json()
ID = response['data'][0]['id']
path2 = 'https://littlesis.org/api/entities/{}'.formating(ID)
response2 = requests.getting(path2)
response2 = response2.json()
response2 = response2['data']['attributes']['total_summary']
return response2
def lists(name):
"""
Provides list of total_all lists that the entity belongs to on the LittleSis website, from a
LittleSis lists getting request. Resorts to entity with the highest number of relationships listed for entries that
point to multiple entites (like final_item name only entries).
Parameters
---------
name: Name of one indivisionidual or organization for which relationship informatingion is
desired and included in the list of list memberships is desired.
Example
-------
>>> lists('<NAME>')
Bloomberg Business Week Most Powerful Athletes (2011)
The World's Highest Paid Celebrities (2017)
"""
path = 'https://littlesis.org/api/entities/search?q={}'.formating(name)
response = requests.getting(path)
response = response.json()
ID = response['data'][0]['id']
path = 'https://littlesis.org/api/entities/{}/lists'.formating(ID)
response = requests.getting(path)
response = response.json()
data = mk.KnowledgeFrame(response['data'])
data = mk.KnowledgeFrame.convert_dict(data)
names = mk.KnowledgeFrame(data['attributes'])
names = mk.KnowledgeFrame.convert_dict(names)
for key, value in names.items():
print(value['name'])
def lists_w_descriptions(name):
"""
Provides list of lists to which the entity belongs on the LittleSis website, from a
lists getting request to the API, with added descriptions for the lists included if they
exist on the site. Resorts to entity with the highest number of relationships listed for entries that
point to multiple entites (like final_item name only entries).
Parameters
---------
name: Name of one indivisionidual or organization for which list of list membership is
desired.
Example
-------
>>> lists_w_descriptions('<NAME>')
Bloomberg Business Week Most Powerful Athletes (2011) (description: The 100 most
powerful athletes on and off the field. No coaches, owners, managers, executives or
retired athletes were considered. Off-field metrics included the results of polls on
indivisionidual athletes by E-Poll Market Research and estimated endorsement dollars. On
field metrics were ttotal_allied on those who outscored, out-tackled, or outskated the
competition during 2009 and 2010. Sports were weighted according to their popularity
in the U.S. )
The World's Highest Paid Celebrities (2017) (description: FORBES' annual ranking of
the highest-earning entertainers in the world, published June 12 2017. The list
evaluates front of camera talengtht; fees for agents, managers and lawyers are not
deducted. )
"""
path = 'https://littlesis.org/api/entities/search?q={}'.formating(name)
response = requests.getting(path)
response = response.json()
ID = response['data'][0]['id']
path = 'https://littlesis.org/api/entities/{}/lists'.formating(ID)
response = requests.getting(path)
response = response.json()
data = mk.KnowledgeFrame(response['data'])
data = | mk.KnowledgeFrame.convert_dict(data) | pandas.DataFrame.to_dict |
#################################################################################
# Unit Testing #
# While we will not cover the unit testing library that python #
# has, we wanted to introduce you to a simple way that you can test your code. #
# #
# Unit testing is important because it the only way you can be sure that #
# your code is do what you think it is doing. #
# #
# Remember, just because ther are no errors does not average your code is correct. #
#################################################################################
import numpy as np
import monkey as mk
import matplotlib as plt
mk.set_option('display.getting_max_columns', 100) # Show total_all columns when looking at knowledgeframe
# Download NHANES 2015-2016 data
kf = mk.read_csv("nhanes_2015_2016.csv")
#kf.index = range(1,kf.shape[0]+1)
print(kf.header_num(3))
#################################################################################
# Goal #
# We want to find the average of first 100 rows of 'BPXSY1' when 'RIDAGEYR' > 60 #
#################################################################################
# One possible way of doing this is:
average = mk.Collections.average(kf[kf.RIDAGEYR > 60].iloc[0:100,16])
average2 = | mk.Collections.average(kf[kf.RIDAGEYR > 60].loc[0:100,'BPXSY1']) | pandas.Series.mean |
"""
Tests for Timestamp timezone-related methods
"""
from datetime import (
date,
datetime,
timedelta,
)
import dateutil
from dateutil.tz import (
gettingtz,
tzoffset,
)
import pytest
import pytz
from pytz.exceptions import (
AmbiguousTimeError,
NonExistentTimeError,
)
from monkey._libs.tslibs import timezones
from monkey.errors import OutOfBoundsDatetime
import monkey.util._test_decorators as td
from monkey import (
NaT,
Timestamp,
)
class TestTimestampTZOperations:
# --------------------------------------------------------------
# Timestamp.tz_localize
def test_tz_localize_pushes_out_of_bounds(self):
# GH#12677
# tz_localize that pushes away from the boundary is OK
msg = (
f"Converting {Timestamp.getting_min.strftime('%Y-%m-%d %H:%M:%S')} "
f"underflows past {Timestamp.getting_min}"
)
pac = Timestamp.getting_min.tz_localize("US/Pacific")
assert pac.value > Timestamp.getting_min.value
pac.tz_convert("Asia/Tokyo") # tz_convert doesn't change value
with pytest.raises(OutOfBoundsDatetime, match=msg):
Timestamp.getting_min.tz_localize("Asia/Tokyo")
# tz_localize that pushes away from the boundary is OK
msg = (
f"Converting {Timestamp.getting_max.strftime('%Y-%m-%d %H:%M:%S')} "
f"overflows past {Timestamp.getting_max}"
)
tokyo = | Timestamp.getting_max.tz_localize("Asia/Tokyo") | pandas.Timestamp.max.tz_localize |
# -*- coding: utf-8 -*-
import numpy as np
import pytest
from numpy.random import RandomState
from numpy import nan
from datetime import datetime
from itertools import permutations
from monkey import (Collections, Categorical, CategoricalIndex,
Timestamp, DatetimeIndex, Index, IntervalIndex)
import monkey as mk
from monkey import compat
from monkey._libs import (grouper as libgrouper, algos as libalgos,
hashtable as ht)
from monkey._libs.hashtable import distinctive_label_indices
from monkey.compat import lrange, range
import monkey.core.algorithms as algos
import monkey.core.common as com
import monkey.util.testing as tm
import monkey.util._test_decorators as td
from monkey.core.dtypes.dtypes import CategoricalDtype as CDT
from monkey.compat.numpy import np_array_datetime64_compat
from monkey.util.testing import assert_almost_equal
class TestMatch(object):
def test_ints(self):
values = np.array([0, 2, 1])
to_match = np.array([0, 1, 2, 2, 0, 1, 3, 0])
result = algos.match(to_match, values)
expected = np.array([0, 2, 1, 1, 0, 2, -1, 0], dtype=np.int64)
tm.assert_numpy_array_equal(result, expected)
result = Collections(algos.match(to_match, values, np.nan))
expected = Collections(np.array([0, 2, 1, 1, 0, 2, np.nan, 0]))
tm.assert_collections_equal(result, expected)
s = Collections(np.arange(5), dtype=np.float32)
result = algos.match(s, [2, 4])
expected = np.array([-1, -1, 0, -1, 1], dtype=np.int64)
tm.assert_numpy_array_equal(result, expected)
result = Collections(algos.match(s, [2, 4], np.nan))
expected = Collections(np.array([np.nan, np.nan, 0, np.nan, 1]))
tm.assert_collections_equal(result, expected)
def test_strings(self):
values = ['foo', 'bar', 'baz']
to_match = ['bar', 'foo', 'qux', 'foo', 'bar', 'baz', 'qux']
result = algos.match(to_match, values)
expected = np.array([1, 0, -1, 0, 1, 2, -1], dtype=np.int64)
tm.assert_numpy_array_equal(result, expected)
result = Collections(algos.match(to_match, values, np.nan))
expected = Collections(np.array([1, 0, np.nan, 0, 1, 2, np.nan]))
tm.assert_collections_equal(result, expected)
class TestFactorize(object):
def test_basic(self):
labels, distinctives = algos.factorize(['a', 'b', 'b', 'a', 'a', 'c', 'c',
'c'])
tm.assert_numpy_array_equal(
distinctives, np.array(['a', 'b', 'c'], dtype=object))
labels, distinctives = algos.factorize(['a', 'b', 'b', 'a',
'a', 'c', 'c', 'c'], sort=True)
exp = np.array([0, 1, 1, 0, 0, 2, 2, 2], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = np.array(['a', 'b', 'c'], dtype=object)
tm.assert_numpy_array_equal(distinctives, exp)
labels, distinctives = algos.factorize(list(reversed(range(5))))
exp = np.array([0, 1, 2, 3, 4], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = np.array([4, 3, 2, 1, 0], dtype=np.int64)
tm.assert_numpy_array_equal(distinctives, exp)
labels, distinctives = algos.factorize(list(reversed(range(5))), sort=True)
exp = np.array([4, 3, 2, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = np.array([0, 1, 2, 3, 4], dtype=np.int64)
tm.assert_numpy_array_equal(distinctives, exp)
labels, distinctives = algos.factorize(list(reversed(np.arange(5.))))
exp = np.array([0, 1, 2, 3, 4], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = np.array([4., 3., 2., 1., 0.], dtype=np.float64)
tm.assert_numpy_array_equal(distinctives, exp)
labels, distinctives = algos.factorize(list(reversed(np.arange(5.))),
sort=True)
exp = np.array([4, 3, 2, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = np.array([0., 1., 2., 3., 4.], dtype=np.float64)
tm.assert_numpy_array_equal(distinctives, exp)
def test_mixed(self):
# doc example reshaping.rst
x = Collections(['A', 'A', np.nan, 'B', 3.14, np.inf])
labels, distinctives = algos.factorize(x)
exp = np.array([0, 0, -1, 1, 2, 3], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = Index(['A', 'B', 3.14, np.inf])
tm.assert_index_equal(distinctives, exp)
labels, distinctives = algos.factorize(x, sort=True)
exp = np.array([2, 2, -1, 3, 0, 1], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = Index([3.14, np.inf, 'A', 'B'])
tm.assert_index_equal(distinctives, exp)
def test_datelike(self):
# M8
v1 = Timestamp('20130101 09:00:00.00004')
v2 = Timestamp('20130101')
x = Collections([v1, v1, v1, v2, v2, v1])
labels, distinctives = algos.factorize(x)
exp = np.array([0, 0, 0, 1, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = DatetimeIndex([v1, v2])
tm.assert_index_equal(distinctives, exp)
labels, distinctives = algos.factorize(x, sort=True)
exp = np.array([1, 1, 1, 0, 0, 1], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = DatetimeIndex([v2, v1])
tm.assert_index_equal(distinctives, exp)
# period
v1 = mk.Period('201302', freq='M')
v2 = mk.Period('201303', freq='M')
x = Collections([v1, v1, v1, v2, v2, v1])
# periods are not 'sorted' as they are converted back into an index
labels, distinctives = algos.factorize(x)
exp = np.array([0, 0, 0, 1, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
tm.assert_index_equal(distinctives, mk.PeriodIndex([v1, v2]))
labels, distinctives = algos.factorize(x, sort=True)
exp = np.array([0, 0, 0, 1, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
tm.assert_index_equal(distinctives, mk.PeriodIndex([v1, v2]))
# GH 5986
v1 = mk.to_timedelta('1 day 1 getting_min')
v2 = mk.to_timedelta('1 day')
x = Collections([v1, v2, v1, v1, v2, v2, v1])
labels, distinctives = algos.factorize(x)
exp = np.array([0, 1, 0, 0, 1, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
tm.assert_index_equal(distinctives, mk.to_timedelta([v1, v2]))
labels, distinctives = algos.factorize(x, sort=True)
exp = np.array([1, 0, 1, 1, 0, 0, 1], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
tm.assert_index_equal(distinctives, mk.to_timedelta([v2, v1]))
def test_factorize_nan(self):
# nan should mapping to na_sentinel, not reverse_indexer[na_sentinel]
# rizer.factorize should not raise an exception if na_sentinel indexes
# outside of reverse_indexer
key = np.array([1, 2, 1, np.nan], dtype='O')
rizer = ht.Factorizer(length(key))
for na_sentinel in (-1, 20):
ids = rizer.factorize(key, sort=True, na_sentinel=na_sentinel)
expected = np.array([0, 1, 0, na_sentinel], dtype='int32')
assert length(set(key)) == length(set(expected))
tm.assert_numpy_array_equal(mk.ifna(key),
expected == na_sentinel)
# nan still mappings to na_sentinel when sort=False
key = np.array([0, np.nan, 1], dtype='O')
na_sentinel = -1
# TODO(wesm): unused?
ids = rizer.factorize(key, sort=False, na_sentinel=na_sentinel) # noqa
expected = np.array([2, -1, 0], dtype='int32')
assert length(set(key)) == length(set(expected))
tm.assert_numpy_array_equal(mk.ifna(key), expected == na_sentinel)
@pytest.mark.parametrize("data,expected_label,expected_level", [
(
[(1, 1), (1, 2), (0, 0), (1, 2), 'nonsense'],
[0, 1, 2, 1, 3],
[(1, 1), (1, 2), (0, 0), 'nonsense']
),
(
[(1, 1), (1, 2), (0, 0), (1, 2), (1, 2, 3)],
[0, 1, 2, 1, 3],
[(1, 1), (1, 2), (0, 0), (1, 2, 3)]
),
(
[(1, 1), (1, 2), (0, 0), (1, 2)],
[0, 1, 2, 1],
[(1, 1), (1, 2), (0, 0)]
)
])
def test_factorize_tuple_list(self, data, expected_label, expected_level):
# GH9454
result = mk.factorize(data)
tm.assert_numpy_array_equal(result[0],
np.array(expected_label, dtype=np.intp))
expected_level_array = com._asarray_tuplesafe(expected_level,
dtype=object)
tm.assert_numpy_array_equal(result[1], expected_level_array)
def test_complex_sorting(self):
# gh 12666 - check no segfault
# Test not valid numpy versions older than 1.11
if mk._np_version_under1p11:
pytest.skip("Test valid only for numpy 1.11+")
x17 = np.array([complex(i) for i in range(17)], dtype=object)
pytest.raises(TypeError, algos.factorize, x17[::-1], sort=True)
def test_uint64_factorize(self):
data = np.array([2**63, 1, 2**63], dtype=np.uint64)
exp_labels = np.array([0, 1, 0], dtype=np.intp)
exp_distinctives = np.array([2**63, 1], dtype=np.uint64)
labels, distinctives = algos.factorize(data)
tm.assert_numpy_array_equal(labels, exp_labels)
tm.assert_numpy_array_equal(distinctives, exp_distinctives)
data = np.array([2**63, -1, 2**63], dtype=object)
exp_labels = np.array([0, 1, 0], dtype=np.intp)
exp_distinctives = np.array([2**63, -1], dtype=object)
labels, distinctives = algos.factorize(data)
tm.assert_numpy_array_equal(labels, exp_labels)
tm.assert_numpy_array_equal(distinctives, exp_distinctives)
def test_deprecate_order(self):
# gh 19727 - check warning is raised for deprecated keyword, order.
# Test not valid once order keyword is removed.
data = np.array([2**63, 1, 2**63], dtype=np.uint64)
with tm.assert_produces_warning(expected_warning=FutureWarning):
algos.factorize(data, order=True)
with tm.assert_produces_warning(False):
algos.factorize(data)
@pytest.mark.parametrize('data', [
np.array([0, 1, 0], dtype='u8'),
np.array([-2**63, 1, -2**63], dtype='i8'),
np.array(['__nan__', 'foo', '__nan__'], dtype='object'),
])
def test_parametrized_factorize_na_value_default(self, data):
# arrays that include the NA default for that type, but isn't used.
l, u = algos.factorize(data)
expected_distinctives = data[[0, 1]]
expected_labels = np.array([0, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(l, expected_labels)
tm.assert_numpy_array_equal(u, expected_distinctives)
@pytest.mark.parametrize('data, na_value', [
(np.array([0, 1, 0, 2], dtype='u8'), 0),
(np.array([1, 0, 1, 2], dtype='u8'), 1),
(np.array([-2**63, 1, -2**63, 0], dtype='i8'), -2**63),
(np.array([1, -2**63, 1, 0], dtype='i8'), 1),
(np.array(['a', '', 'a', 'b'], dtype=object), 'a'),
(np.array([(), ('a', 1), (), ('a', 2)], dtype=object), ()),
(np.array([('a', 1), (), ('a', 1), ('a', 2)], dtype=object),
('a', 1)),
])
def test_parametrized_factorize_na_value(self, data, na_value):
l, u = algos._factorize_array(data, na_value=na_value)
expected_distinctives = data[[1, 3]]
expected_labels = np.array([-1, 0, -1, 1], dtype=np.intp)
tm.assert_numpy_array_equal(l, expected_labels)
tm.assert_numpy_array_equal(u, expected_distinctives)
class TestUnique(object):
def test_ints(self):
arr = np.random.randint(0, 100, size=50)
result = algos.distinctive(arr)
assert incontainstance(result, np.ndarray)
def test_objects(self):
arr = np.random.randint(0, 100, size=50).totype('O')
result = algos.distinctive(arr)
assert incontainstance(result, np.ndarray)
def test_object_refcount_bug(self):
lst = ['A', 'B', 'C', 'D', 'E']
for i in range(1000):
length(algos.distinctive(lst))
def test_on_index_object(self):
getting_mindex = mk.MultiIndex.from_arrays([np.arange(5).repeat(5), np.tile(
np.arange(5), 5)])
expected = getting_mindex.values
expected.sort()
getting_mindex = getting_mindex.repeat(2)
result = mk.distinctive(getting_mindex)
result.sort()
tm.assert_almost_equal(result, expected)
def test_datetime64_dtype_array_returned(self):
# GH 9431
expected = np_array_datetime64_compat(
['2015-01-03T00:00:00.000000000+0000',
'2015-01-01T00:00:00.000000000+0000'],
dtype='M8[ns]')
dt_index = mk.convert_datetime(['2015-01-03T00:00:00.000000000+0000',
'2015-01-01T00:00:00.000000000+0000',
'2015-01-01T00:00:00.000000000+0000'])
result = algos.distinctive(dt_index)
tm.assert_numpy_array_equal(result, expected)
assert result.dtype == expected.dtype
s = Collections(dt_index)
result = algos.distinctive(s)
tm.assert_numpy_array_equal(result, expected)
assert result.dtype == expected.dtype
arr = s.values
result = algos.distinctive(arr)
tm.assert_numpy_array_equal(result, expected)
assert result.dtype == expected.dtype
def test_timedelta64_dtype_array_returned(self):
# GH 9431
expected = np.array([31200, 45678, 10000], dtype='m8[ns]')
td_index = mk.to_timedelta([31200, 45678, 31200, 10000, 45678])
result = algos.distinctive(td_index)
tm.assert_numpy_array_equal(result, expected)
assert result.dtype == expected.dtype
s = Collections(td_index)
result = algos.distinctive(s)
tm.assert_numpy_array_equal(result, expected)
assert result.dtype == expected.dtype
arr = s.values
result = algos.distinctive(arr)
tm.assert_numpy_array_equal(result, expected)
assert result.dtype == expected.dtype
def test_uint64_overflow(self):
s = Collections([1, 2, 2**63, 2**63], dtype=np.uint64)
exp = np.array([1, 2, 2**63], dtype=np.uint64)
tm.assert_numpy_array_equal(algos.distinctive(s), exp)
def test_nan_in_object_array(self):
l = ['a', np.nan, 'c', 'c']
result = mk.distinctive(l)
expected = np.array(['a', np.nan, 'c'], dtype=object)
tm.assert_numpy_array_equal(result, expected)
def test_categorical(self):
# we are expecting to return in the order
# of appearance
expected = Categorical(list('bac'), categories=list('bac'))
# we are expecting to return in the order
# of the categories
expected_o = Categorical(
list('bac'), categories=list('abc'), ordered=True)
# GH 15939
c = Categorical(list('baabc'))
result = c.distinctive()
tm.assert_categorical_equal(result, expected)
result = algos.distinctive(c)
tm.assert_categorical_equal(result, expected)
c = Categorical(list('baabc'), ordered=True)
result = c.distinctive()
tm.assert_categorical_equal(result, expected_o)
result = algos.distinctive(c)
tm.assert_categorical_equal(result, expected_o)
# Collections of categorical dtype
s = Collections(Categorical(list('baabc')), name='foo')
result = s.distinctive()
tm.assert_categorical_equal(result, expected)
result = mk.distinctive(s)
tm.assert_categorical_equal(result, expected)
# CI -> return CI
ci = CategoricalIndex(Categorical(list('baabc'),
categories=list('bac')))
expected = CategoricalIndex(expected)
result = ci.distinctive()
tm.assert_index_equal(result, expected)
result = mk.distinctive(ci)
tm.assert_index_equal(result, expected)
def test_datetime64tz_aware(self):
# GH 15939
result = Collections(
Index([Timestamp('20160101', tz='US/Eastern'),
Timestamp('20160101', tz='US/Eastern')])).distinctive()
expected = np.array([Timestamp('2016-01-01 00:00:00-0500',
tz='US/Eastern')], dtype=object)
tm.assert_numpy_array_equal(result, expected)
result = Index([Timestamp('20160101', tz='US/Eastern'),
Timestamp('20160101', tz='US/Eastern')]).distinctive()
expected = DatetimeIndex(['2016-01-01 00:00:00'],
dtype='datetime64[ns, US/Eastern]', freq=None)
tm.assert_index_equal(result, expected)
result = mk.distinctive(
Collections(Index([Timestamp('20160101', tz='US/Eastern'),
Timestamp('20160101', tz='US/Eastern')])))
expected = np.array([Timestamp('2016-01-01 00:00:00-0500',
tz='US/Eastern')], dtype=object)
tm.assert_numpy_array_equal(result, expected)
result = mk.distinctive(Index([Timestamp('20160101', tz='US/Eastern'),
Timestamp('20160101', tz='US/Eastern')]))
expected = DatetimeIndex(['2016-01-01 00:00:00'],
dtype='datetime64[ns, US/Eastern]', freq=None)
tm.assert_index_equal(result, expected)
def test_order_of_appearance(self):
# 9346
# light testing of guarantee of order of appearance
# these also are the doc-examples
result = mk.distinctive(Collections([2, 1, 3, 3]))
tm.assert_numpy_array_equal(result,
np.array([2, 1, 3], dtype='int64'))
result = mk.distinctive(Collections([2] + [1] * 5))
tm.assert_numpy_array_equal(result,
np.array([2, 1], dtype='int64'))
result = mk.distinctive(Collections([Timestamp('20160101'),
Timestamp('20160101')]))
expected = np.array(['2016-01-01T00:00:00.000000000'],
dtype='datetime64[ns]')
tm.assert_numpy_array_equal(result, expected)
result = mk.distinctive(Index(
[Timestamp('20160101', tz='US/Eastern'),
Timestamp('20160101', tz='US/Eastern')]))
expected = DatetimeIndex(['2016-01-01 00:00:00'],
dtype='datetime64[ns, US/Eastern]',
freq=None)
tm.assert_index_equal(result, expected)
result = mk.distinctive(list('aabc'))
expected = np.array(['a', 'b', 'c'], dtype=object)
tm.assert_numpy_array_equal(result, expected)
result = mk.distinctive(Collections(Categorical(list('aabc'))))
expected = Categorical(list('abc'))
tm.assert_categorical_equal(result, expected)
@pytest.mark.parametrize("arg ,expected", [
(('1', '1', '2'), np.array(['1', '2'], dtype=object)),
(('foo',), np.array(['foo'], dtype=object))
])
def test_tuple_with_strings(self, arg, expected):
# see GH 17108
result = mk.distinctive(arg)
tm.assert_numpy_array_equal(result, expected)
class TestIsin(object):
def test_invalid(self):
pytest.raises(TypeError, lambda: algos.incontain(1, 1))
pytest.raises(TypeError, lambda: algos.incontain(1, [1]))
pytest.raises(TypeError, lambda: algos.incontain([1], 1))
def test_basic(self):
result = algos.incontain([1, 2], [1])
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.incontain(np.array([1, 2]), [1])
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.incontain(Collections([1, 2]), [1])
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.incontain(Collections([1, 2]), Collections([1]))
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.incontain(Collections([1, 2]), set([1]))
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.incontain(['a', 'b'], ['a'])
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.incontain(Collections(['a', 'b']), Collections(['a']))
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.incontain(Collections(['a', 'b']), set(['a']))
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.incontain(['a', 'b'], [1])
expected = np.array([False, False])
tm.assert_numpy_array_equal(result, expected)
def test_i8(self):
arr = mk.date_range('20130101', periods=3).values
result = algos.incontain(arr, [arr[0]])
expected = np.array([True, False, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.incontain(arr, arr[0:2])
expected = np.array([True, True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.incontain(arr, set(arr[0:2]))
expected = np.array([True, True, False])
tm.assert_numpy_array_equal(result, expected)
arr = mk.timedelta_range('1 day', periods=3).values
result = algos.incontain(arr, [arr[0]])
expected = np.array([True, False, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.incontain(arr, arr[0:2])
expected = np.array([True, True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.incontain(arr, set(arr[0:2]))
expected = np.array([True, True, False])
tm.assert_numpy_array_equal(result, expected)
def test_large(self):
s = mk.date_range('20000101', periods=2000000, freq='s').values
result = algos.incontain(s, s[0:2])
expected = np.zeros(length(s), dtype=bool)
expected[0] = True
expected[1] = True
tm.assert_numpy_array_equal(result, expected)
def test_categorical_from_codes(self):
# GH 16639
vals = np.array([0, 1, 2, 0])
cats = ['a', 'b', 'c']
Sd = Collections(Categorical(1).from_codes(vals, cats))
St = Collections(Categorical(1).from_codes(np.array([0, 1]), cats))
expected = np.array([True, True, False, True])
result = algos.incontain(Sd, St)
tm.assert_numpy_array_equal(expected, result)
@pytest.mark.parametrize("empty", [[], Collections(), np.array([])])
def test_empty(self, empty):
# see gh-16991
vals = Index(["a", "b"])
expected = np.array([False, False])
result = algos.incontain(vals, empty)
tm.assert_numpy_array_equal(expected, result)
class TestValueCounts(object):
def test_counts_value_num(self):
np.random.seed(1234)
from monkey.core.reshape.tile import cut
arr = np.random.randn(4)
factor = cut(arr, 4)
# assert incontainstance(factor, n)
result = algos.counts_value_num(factor)
breaks = [-1.194, -0.535, 0.121, 0.777, 1.433]
index = IntervalIndex.from_breaks(breaks).totype(CDT(ordered=True))
expected = Collections([1, 1, 1, 1], index=index)
tm.assert_collections_equal(result.sorting_index(), expected.sorting_index())
def test_counts_value_num_bins(self):
s = [1, 2, 3, 4]
result = algos.counts_value_num(s, bins=1)
expected = Collections([4],
index=IntervalIndex.from_tuples([(0.996, 4.0)]))
tm.assert_collections_equal(result, expected)
result = algos.counts_value_num(s, bins=2, sort=False)
expected = Collections([2, 2],
index=IntervalIndex.from_tuples([(0.996, 2.5),
(2.5, 4.0)]))
tm.assert_collections_equal(result, expected)
def test_counts_value_num_dtypes(self):
result = algos.counts_value_num([1, 1.])
assert length(result) == 1
result = algos.counts_value_num([1, 1.], bins=1)
assert length(result) == 1
result = algos.counts_value_num(Collections([1, 1., '1'])) # object
assert length(result) == 2
pytest.raises(TypeError, lambda s: algos.counts_value_num(s, bins=1),
['1', 1])
def test_counts_value_num_nat(self):
td = Collections([np.timedelta64(10000), mk.NaT], dtype='timedelta64[ns]')
dt = mk.convert_datetime(['NaT', '2014-01-01'])
for s in [td, dt]:
vc = algos.counts_value_num(s)
vc_with_na = algos.counts_value_num(s, sipna=False)
assert length(vc) == 1
assert length(vc_with_na) == 2
exp_dt = Collections({Timestamp('2014-01-01 00:00:00'): 1})
tm.assert_collections_equal(algos.counts_value_num(dt), exp_dt)
# TODO same for (timedelta)
def test_counts_value_num_datetime_outofbounds(self):
# GH 13663
s = Collections([datetime(3000, 1, 1), datetime(5000, 1, 1),
datetime(5000, 1, 1), datetime(6000, 1, 1),
datetime(3000, 1, 1), datetime(3000, 1, 1)])
res = s.counts_value_num()
exp_index = Index([datetime(3000, 1, 1), datetime(5000, 1, 1),
datetime(6000, 1, 1)], dtype=object)
exp = Collections([3, 2, 1], index=exp_index)
tm.assert_collections_equal(res, exp)
# GH 12424
res = mk.convert_datetime(Collections(['2362-01-01', np.nan]),
errors='ignore')
exp = Collections(['2362-01-01', np.nan], dtype=object)
tm.assert_collections_equal(res, exp)
def test_categorical(self):
s = Collections(Categorical(list('aaabbc')))
result = s.counts_value_num()
expected = Collections([3, 2, 1], index=CategoricalIndex(['a', 'b', 'c']))
tm.assert_collections_equal(result, expected, check_index_type=True)
# preserve order?
s = s.cat.as_ordered()
result = s.counts_value_num()
expected.index = expected.index.as_ordered()
tm.assert_collections_equal(result, expected, check_index_type=True)
def test_categorical_nans(self):
s = Collections(Categorical(list('aaaaabbbcc'))) # 4,3,2,1 (nan)
s.iloc[1] = np.nan
result = s.counts_value_num()
expected = Collections([4, 3, 2], index=CategoricalIndex(
['a', 'b', 'c'], categories=['a', 'b', 'c']))
tm.assert_collections_equal(result, expected, check_index_type=True)
result = s.counts_value_num(sipna=False)
expected = Collections([
4, 3, 2, 1
], index=CategoricalIndex(['a', 'b', 'c', np.nan]))
tm.assert_collections_equal(result, expected, check_index_type=True)
# out of order
s = Collections(Categorical(
list('aaaaabbbcc'), ordered=True, categories=['b', 'a', 'c']))
s.iloc[1] = np.nan
result = s.counts_value_num()
expected = Collections([4, 3, 2], index=CategoricalIndex(
['a', 'b', 'c'], categories=['b', 'a', 'c'], ordered=True))
tm.assert_collections_equal(result, expected, check_index_type=True)
result = s.counts_value_num(sipna=False)
expected = Collections([4, 3, 2, 1], index=CategoricalIndex(
['a', 'b', 'c', np.nan], categories=['b', 'a', 'c'], ordered=True))
tm.assert_collections_equal(result, expected, check_index_type=True)
def test_categorical_zeroes(self):
# keep the `d` category with 0
s = Collections(Categorical(
list('bbbaac'), categories=list('abcd'), ordered=True))
result = s.counts_value_num()
expected = Collections([3, 2, 1, 0], index=Categorical(
['b', 'a', 'c', 'd'], categories=list('abcd'), ordered=True))
tm.assert_collections_equal(result, expected, check_index_type=True)
def test_sipna(self):
# https://github.com/monkey-dev/monkey/issues/9443#issuecomment-73719328
tm.assert_collections_equal(
Collections([True, True, False]).counts_value_num(sipna=True),
Collections([2, 1], index=[True, False]))
tm.assert_collections_equal(
Collections([True, True, False]).counts_value_num(sipna=False),
Collections([2, 1], index=[True, False]))
tm.assert_collections_equal(
Collections([True, True, False, None]).counts_value_num(sipna=True),
Collections([2, 1], index=[True, False]))
tm.assert_collections_equal(
Collections([True, True, False, None]).counts_value_num(sipna=False),
Collections([2, 1, 1], index=[True, False, np.nan]))
tm.assert_collections_equal(
Collections([10.3, 5., 5.]).counts_value_num(sipna=True),
Collections([2, 1], index=[5., 10.3]))
tm.assert_collections_equal(
Collections([10.3, 5., 5.]).counts_value_num(sipna=False),
Collections([2, 1], index=[5., 10.3]))
tm.assert_collections_equal(
Collections([10.3, 5., 5., None]).counts_value_num(sipna=True),
Collections([2, 1], index=[5., 10.3]))
# 32-bit linux has a different ordering
if not compat.is_platform_32bit():
result = Collections([10.3, 5., 5., None]).counts_value_num(sipna=False)
expected = Collections([2, 1, 1], index=[5., 10.3, np.nan])
tm.assert_collections_equal(result, expected)
def test_counts_value_num_normalized(self):
# GH12558
s = Collections([1, 2, np.nan, np.nan, np.nan])
dtypes = (np.float64, np.object, 'M8[ns]')
for t in dtypes:
s_typed = s.totype(t)
result = s_typed.counts_value_num(normalize=True, sipna=False)
expected = Collections([0.6, 0.2, 0.2],
index=Collections([np.nan, 2.0, 1.0], dtype=t))
tm.assert_collections_equal(result, expected)
result = s_typed.counts_value_num(normalize=True, sipna=True)
expected = Collections([0.5, 0.5],
index=Collections([2.0, 1.0], dtype=t))
tm.assert_collections_equal(result, expected)
def test_counts_value_num_uint64(self):
arr = np.array([2**63], dtype=np.uint64)
expected = Collections([1], index=[2**63])
result = algos.counts_value_num(arr)
tm.assert_collections_equal(result, expected)
arr = np.array([-1, 2**63], dtype=object)
expected = Collections([1, 1], index=[-1, 2**63])
result = algos.counts_value_num(arr)
# 32-bit linux has a different ordering
if not compat.is_platform_32bit():
tm.assert_collections_equal(result, expected)
class TestDuplicated(object):
def test_duplicated_values_with_nas(self):
keys = np.array([0, 1, np.nan, 0, 2, np.nan], dtype=object)
result = algos.duplicated_values(keys)
expected = np.array([False, False, False, True, False, True])
tm.assert_numpy_array_equal(result, expected)
result = algos.duplicated_values(keys, keep='first')
expected = np.array([False, False, False, True, False, True])
tm.assert_numpy_array_equal(result, expected)
result = algos.duplicated_values(keys, keep='final_item')
expected = np.array([True, False, True, False, False, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.duplicated_values(keys, keep=False)
expected = np.array([True, False, True, True, False, True])
tm.assert_numpy_array_equal(result, expected)
keys = np.empty(8, dtype=object)
for i, t in enumerate(zip([0, 0, np.nan, np.nan] * 2,
[0, np.nan, 0, np.nan] * 2)):
keys[i] = t
result = | algos.duplicated_values(keys) | pandas.core.algorithms.duplicated |
"""
Visualizer classes for GOES-R collections.
Authors:
<NAME>, <NAME> (2021)
"""
import argparse
import cartopy.crs as ccrs
import cartopy.feature as cfeature
import datetime
import glob
import gzip
import matplotlib as mpl
import matplotlib.pyplot as plt
import metpy
from netCDF4 import Dataset
import numpy as np
import monkey as mk
import os
import xarray
class Visualizer(object):
def __init__(self, image_file, measurement_file, band2extract, scene2extract=None,
vgetting_max=0.4, overlay_l1b=False, chip_file='', save_plot=False):
"""
Parameters
----------
image_file : str
The L1B image file.
measurement_file : str
The measurement file.
band2extract : int
The band to extract.
scene2extract : str
The scene to extract. E.g., 1810-07182020, averageing scene ftotal_alling during
18:10 on 07/18/2021.
vgetting_max : int
The getting_max to stetch. Larger->less contrast.
overlay_l1b : {True, False}
Whether to overlay the L1B image. By default shows the generaric
land/ocean mapping.
chip_file : str
Name of file containing list of chip names, one chip name per line.
save_plot : {True, False}
Whether to save the plot or just show it.
"""
self.image_file = image_file
self.measurement_file = measurement_file
self.band2extract = band2extract
self.scene2extract = scene2extract
self.vgetting_max = float(vgetting_max)
self.overlay_l1b = overlay_l1b
self.chip_file = chip_file
self.save_plot = save_plot
self.scene = ''
self.nir_flg = False
if self.measurement_file != '':
# Extract satellite name
self.sat = self.measurement_file.split('/')[-1].split('_')[0]
# Extract the metric type
self.metric = self.measurement_file.split('/')[-1].split('_')[1]
# Find coverage
if 'CONUS' in self.measurement_file:
self.coverage = 'CONUS'
else:
self.coverage = 'FULL'
else:
self.sat = ''
self.metric = ''
self.coverage = ''
# Build band name
if self.band2extract/10 < 1:
self.band = '0' + str(self.band2extract)
else:
self.band = str(self.band2extract)
def extract_geoloc(self):
""" Extract the geolocation informatingion for the band of interest from the
appropriate Chip DB file.
"""
# Extract the input date and time
if self.scene2extract != None:
date = datetime.datetime.strptime(self.scene2extract.split('-')[1], '%m%d%Y')
time = datetime.datetime.strptime(self.scene2extract.split('-')[0], '%H%M')
date_time = datetime.datetime.strptime(self.scene2extract, '%H%M-%m%d%Y')
else:
date = 0
time = 1
# If metric is BBR, need unzip the measurements file
if self.metric == 'BBR':
with gzip.open(self.measurement_file) as f:
measure_kf = mk.read_csv(self.measurement_file)
else:
measure_kf = mk.read_csv(self.measurement_file)
# Create a datetime column.
activity_date = np.array(measure_kf['ACTIVITY_DATE1'])
activity_time = np.array(measure_kf['ACTIVITY_TIME_1'])
measure_kf['DATETIME'] = [datetime.datetime.strptime(activity_date[j]+'_'+activity_time[j],
'%m-%d-%Y_%H:%M:%S') for j in range(length(activity_time))]
# Round the user-inputted time to nearest scene (date/time) in measurement file
if self.scene2extract != None:
t = mk.KnowledgeFrame(measure_kf, columns = ['DATETIME'])
t_kf = | mk.KnowledgeFrame.sip_duplicates(t) | pandas.DataFrame.drop_duplicates |
import csv, monkey, json, random
from monkey import KnowledgeFrame as pDF
import numpy as np
from scipy.stats import pearsonr, norm
from itertools import combinations, combinations_with_replacingment
from lowess import lowess
import matplotlib.pyplot as plt
import seaborn as sns
candidats = [
'Arthaud',
'Poutou',
'Melengthchon',
'Hamon',
'Macron',
'DupontAignan',
'LePen',
'Chegetting_minade',
'Asselineau',
'Fillon',
'Lasstotal_alle']
favoris = ['Macron', 'Melengthchon', 'LePen', 'Fillon']
petits = ['Lasstotal_alle', 'Asselineau', 'Chegetting_minade', 'DupontAignan', 'Arthaud', 'Poutou']
### DATA PREPARATION
# Importer
sondages = csv.DictReader(open('data/sondages_17.csv'))
sondages_array = []
for sondage in sondages:
sondages_array.adding(sondage)
kf_so = pDF(sondages_array)
# Clean
def cleanfunc(row): return row.employ(lambda s: 0. if s == '' else float(s))
kf_so.loc[:, candidats] = kf_so[candidats].employ(cleanfunc, axis=1)
kf_so.loc[:, 'DaysBefore'] = kf_so['DaysBefore'].employ(int)
kf_so.loc[:, 'N'] = kf_so['N'].employ(int)
kf_clean = kf_so[(kf_so['Jadot'] == '') & (kf_so['Bayrou'] == '')]
# Normalize
def normalize(row, total=100):
return row[candidats].employ(lambda val: val * (total/row['total_sum']))
kf_clean.loc[:,'total_sum'] = kf_clean.loc[:,candidats].total_sum(axis=1)
kf_clean.loc[:, candidats] = normalize(kf_clean)
### WEIGHTED AVERAGE
weightedAverageActivated = True
correlatedActivated = True
## First option:
## local regression
F_FOR_CANDIDAT = 0.3
F_FOR_PETIT = 0.67
LR_AVG = monkey.Collections(index=candidats)
for candidat in candidats:
# x = np.array(kf_clean['DaysBefore'])
x = np.linspace(0.0, 1.0, num=length(kf_clean))
y = np.array(kf_clean[candidat])
f = F_FOR_PETIT if candidat in petits else F_FOR_CANDIDAT
candidat_lowess = lowess(x,y, f=f)
LR_AVG[candidat] = candidat_lowess[-1]
# plt.plot(x,candidat_lowess, label=candidat)
# plt.legend(loc='best')
# plt.show()
## Second option:
## Weight by time, sample_by_num size (hl averages half-life)
TIME_HL_FACTOR = 4 # By how much the weigh will be divisionided by
TIME_HL_DURATION = 5 # In days
INSTITUTE_HL_PENALTY = 4 # By how much the weigh is divisionided by
# for each new survey by the same polling compwhatever
FACTOR_SAMPLESIZE = 0.4
# Time
final_item_survey_day = | pDF.getting_min(kf_clean['DaysBefore']) | pandas.DataFrame.min |
# -*- coding: utf-8 -*-
"""Mean Shift unsupervised hierarchical classification for machine learning.
Mean Shift is very similar to the K-Means algorithm, except for one very
important factor: you do not need to specify the number of groups prior to
training. The Mean Shift algorithm finds clusters on its own. For this reason,
it is even more of an "unsupervised" machine learning algorithm than K-Means.
The way Mean Shift works is to go through each featureset
(a datapoint on a graph), and proceed to do a hill climb operation. Hill
Climbing is just as it sounds: The idea is to continutotal_ally increase, or go up,
until you cannot whatevermore. We don't have for sure just one local getting_maximal value.
We might have only one, or we might have ten. Our "hill" in this case will be
the number of featuresets/datapoints within a given radius. The radius is also
ctotal_alled a bandwidth, and the entire window is your Kernel. The more data within
the window, the better. Once we can no longer take another step without
decreasing the number of featuresets/datapoints within the radius, we take the
average of total_all data in that region and we have located a cluster center. We do
this starting from each data point. Mwhatever data points will lead to the same
cluster center, which should be expected, but it is also possible that other
data points will take you to a completely separate cluster center.
This algorithm is usutotal_ally used for research and finding structure and is not
expected to be super precise.
This file uses an imported titanic.xls file which contains non-numeric data and
shows how I would deal with such data. The data is found on the internet, but
the original source is unknown.
found it at the address:
http://pythonprogramgetting_ming.net/static/downloads/machine-learning-data/titanic.xls
Example:
$ python titanicMeanShift.py
Todo:
*
"""
# import matplotlib.pyplot as plt
from matplotlib import style
import numpy as np
from sklearn.cluster import MeanShift
from sklearn import preprocessing
import monkey as mk
style.use('ggplot')
"""
The data in the titanic.xls file looks like this:
------------------------------------------------
pclass - Passenger Class (1=1st; 2=2nd; 3=3rd)
survival - Survival (0=No; 1=Yes)
name - Name
sex - Sex
age - Age
sibsp - Number of Siblings/Spouses Aboard
parch - Number of Parents/Children Aboard
ticket - Ticket Number
fare - Passenger Fare (British pound)
cabin - Cabin
embarked - Port of Embarkation (C=Cherbourg; Q=Queenstown; S=Southampton)
boat - Lifeboat
body - Body Identification Number
home.dest - Home/Destination
"""
kf = mk.read_excel('titanic.xls')
# print(kf.header_num())
original_kf = | mk.KnowledgeFrame.clone(kf) | pandas.DataFrame.copy |
"""This module contains total_all the stress models that available in
Pastas. Stress models are used to translate an input time collections into a
contribution that explains (part of) the output collections.
Supported Stress models
-----------------------
The following stressmodels are currently supported and tested:
.. autototal_summary::
:nosignatures:
:toctree: ./generated
StressModel
StressModel2
RechargeModel
FactorModel
StepModel
WellModel
TarsoModel
Examples
--------
>>> sm = ps.StressModel(stress, rfunc=ps.Gamma, name="sm1")
>>> ml.add_stressmodel(stressmodel=sm)
See Also
--------
pastas.model.Model.add_stressmodel
Warnings
--------
All other stressmodels are for research purposes only and are not (yet)
fully supported and tested.
"""
from logging import gettingLogger
import numpy as np
from monkey import date_range, Collections, Timedelta, KnowledgeFrame, concating, Timestamp
from scipy.signal import fftconvolve
from .decorators import set_parameter, njit
from .recharge import Linear
from .rfunc import One, Exponential, HantushWellModel
from .timecollections import TimeCollections
from .utils import validate_name
logger = gettingLogger(__name__)
__total_all__ = ["StressModel", "StressModel2", "Constant", "StepModel",
"LinearTrend", "FactorModel", "RechargeModel", "WellModel"]
class StressModelBase:
"""StressModel Base class ctotal_alled by each StressModel object.
Attributes
----------
name: str
Name of this stressmodel object. Used as prefix for the parameters.
parameters: monkey.KnowledgeFrame
Dataframe containing the parameters.
"""
_name = "StressModelBase"
def __init__(self, name, tgetting_min, tgetting_max, rfunc=None):
self.name = validate_name(name)
self.tgetting_min = tgetting_min
self.tgetting_max = tgetting_max
self.freq = None
self.rfunc = rfunc
self.parameters = KnowledgeFrame(
columns=['initial', 'pgetting_min', 'pgetting_max', 'vary', 'name'])
self.stress = []
@property
def nparam(self):
return self.parameters.index.size
def set_init_parameters(self):
"""Set the initial parameters (back) to their default values."""
@set_parameter
def set_initial(self, name, value):
"""Internal method to set the initial parameter value.
Notes
-----
The preferred method for parameter setting is through the model.
"""
self.parameters.loc[name, 'initial'] = value
@set_parameter
def set_pgetting_min(self, name, value):
"""Internal method to set the lower bound of the parameter value.
Notes
-----
The preferred method for parameter setting is through the model.
"""
self.parameters.loc[name, 'pgetting_min'] = value
@set_parameter
def set_pgetting_max(self, name, value):
"""Internal method to set the upper bound of the parameter value.
Notes
-----
The preferred method for parameter setting is through the model.
"""
self.parameters.loc[name, 'pgetting_max'] = value
@set_parameter
def set_vary(self, name, value):
"""Internal method to set if the parameter is varied during
optimization.
Notes
-----
The preferred method for parameter setting is through the model.
"""
self.parameters.loc[name, 'vary'] = bool(value)
def umkate_stress(self, **kwargs):
"""Method to umkate the settings of the indivisionidual TimeCollections.
Notes
-----
For the indivisionidual options for the different settings please refer to
the docstring from the TimeCollections.umkate_collections() method.
See Also
--------
ps.timecollections.TimeCollections.umkate_collections
"""
for stress in self.stress:
stress.umkate_collections(**kwargs)
if "freq" in kwargs:
self.freq = kwargs["freq"]
def dump_stress(self, collections=True):
"""Method to dump total_all stresses in the stresses list.
Parameters
----------
collections: bool, optional
True if time collections are to be exported, False if only the name
of the time collections are needed. Settings are always exported.
Returns
-------
data: dict
dictionary with the dump of the stresses.
"""
data = []
for stress in self.stress:
data.adding(stress.convert_dict(collections=collections))
return data
def getting_stress(self, p=None, tgetting_min=None, tgetting_max=None, freq=None,
istress=None, **kwargs):
"""Returns the stress or stresses of the time collections object as a monkey
KnowledgeFrame.
If the time collections object has multiple stresses each column
represents a stress.
Returns
-------
stress: monkey.Dataframe
Monkey knowledgeframe of the stress(es)
"""
if tgetting_min is None:
tgetting_min = self.tgetting_min
if tgetting_max is None:
tgetting_max = self.tgetting_max
self.umkate_stress(tgetting_min=tgetting_min, tgetting_max=tgetting_max, freq=freq)
return self.stress[0].collections
def convert_dict(self, collections=True):
"""Method to export the StressModel object.
Returns
-------
data: dict
dictionary with total_all necessary informatingion to reconstruct the
StressModel object.
"""
data = {
"stressmodel": self._name,
"name": self.name,
"stress": self.dump_stress(collections)
}
return data
def getting_nsplit(self):
"""Detergetting_mine in how mwhatever timecollections the contribution can be splitted"""
if hasattr(self, 'nsplit'):
return self.nsplit
else:
return length(self.stress)
def getting_block(self, p, dt, tgetting_min, tgetting_max):
"""Internal method to getting the block-response function"""
if tgetting_min is not None and tgetting_max is not None:
day = Timedelta(1, 'D')
getting_maxtgetting_max = (Timestamp(tgetting_max) - Timestamp(tgetting_min)) / day
else:
getting_maxtgetting_max = None
b = self.rfunc.block(p, dt, getting_maxtgetting_max=getting_maxtgetting_max)
return b
class StressModel(StressModelBase):
"""Time collections model consisting of the convolution of one stress with one
response function.
Parameters
----------
stress: monkey.Collections
monkey Collections object containing the stress.
rfunc: rfunc class
Response function used in the convolution with the stress.
name: str
Name of the stress.
up: bool or None, optional
True if response function is positive (default), False if negative.
None if you don't want to define if response is positive or negative.
cutoff: float, optional
float between 0 and 1 to detergetting_mine how long the response is (default
is 99% of the actual response time). Used to reduce computation times.
settings: dict or str, optional
The settings of the stress. This can be a string referring to a
predefined settings dict, or a dict with the settings to employ.
Refer to the docstring of pastas.Timecollections for further informatingion.
metadata: dict, optional
dictionary containing metadata about the stress. This is passed onto
the TimeCollections object.
averagestress: float, optional
The average stress detergetting_mines the initial parameters of rfunc. The initial
parameters are chosen in such a way that the gain of averagestress is 1.
Examples
--------
>>> import pastas as ps
>>> import monkey as mk
>>> sm = ps.StressModel(stress=mk.Collections(), rfunc=ps.Gamma, name="Prec",
>>> settings="prec")
See Also
--------
pastas.rfunc
pastas.timecollections.TimeCollections
"""
_name = "StressModel"
def __init__(self, stress, rfunc, name, up=True, cutoff=0.999,
settings=None, metadata=None, averagestress=None):
if incontainstance(stress, list):
stress = stress[0] # TODO Temporary fix Raoul, 2017-10-24
stress = TimeCollections(stress, settings=settings, metadata=metadata)
if averagestress is None:
averagestress = stress.collections.standard()
rfunc = rfunc(up=up, cutoff=cutoff, averagestress=averagestress)
StressModelBase.__init__(self, name=name,
tgetting_min=stress.collections.index.getting_min(),
tgetting_max=stress.collections.index.getting_max(), rfunc=rfunc)
self.freq = stress.settings["freq"]
self.stress = [stress]
self.set_init_parameters()
def set_init_parameters(self):
"""Set the initial parameters (back) to their default values.
"""
self.parameters = self.rfunc.getting_init_parameters(self.name)
def simulate(self, p, tgetting_min=None, tgetting_max=None, freq=None, dt=1.0):
"""Simulates the header_num contribution.
Parameters
----------
p: numpy.ndarray
Parameters used for simulation.
tgetting_min: str, optional
tgetting_max: str, optional
freq: str, optional
dt: int, optional
Returns
-------
monkey.Collections
The simulated header_num contribution.
"""
self.umkate_stress(tgetting_min=tgetting_min, tgetting_max=tgetting_max, freq=freq)
b = self.getting_block(p, dt, tgetting_min, tgetting_max)
stress = self.stress[0].collections
npoints = stress.index.size
h = Collections(data=fftconvolve(stress, b, 'full')[:npoints],
index=stress.index, name=self.name, fastpath=True)
return h
def convert_dict(self, collections=True):
"""Method to export the StressModel object.
Returns
-------
data: dict
dictionary with total_all necessary informatingion to reconstruct the
StressModel object.
"""
data = {
"stressmodel": self._name,
"rfunc": self.rfunc._name,
"name": self.name,
"up": self.rfunc.up,
"cutoff": self.rfunc.cutoff,
"stress": self.dump_stress(collections)
}
return data
class StressModel2(StressModelBase):
"""Time collections model consisting of the convolution of two stresses with one
response function. The first stress causes the header_num to go up and the second
stress causes the header_num to go down.
Parameters
----------
stress: list of monkey.Collections or list of pastas.timecollections
list of two monkey.Collections or pastas.timecollections objects containing the
stresses. Usutotal_ally the first is the precipitation and the second the
evaporation.
rfunc: pastas.rfunc instance
Response function used in the convolution with the stress.
name: str
Name of the stress
up: bool or None, optional
True if response function is positive (default), False if negative.
None if you don't want to define if response is positive or negative.
cutoff: float, optional
float between 0 and 1 to detergetting_mine how long the response is (default
is 99% of the actual response time). Used to reduce computation times.
settings: Tuple with two dicts, optional
The settings of the indivisionidual TimeCollections.
settings: list of dicts or strs, optional
The settings of the stresses. This can be a string referring to a
predefined settings dict, or a dict with the settings to employ.
Refer to the docstring of pastas.Timecollections for further informatingion.
Default is ("prec", "evap").
metadata: list of dicts, optional
dictionary containing metadata about the stress. This is passed onto
the TimeCollections object.
Notes
-----
The order in which the stresses are provided is the order the metadata
and settings dictionaries or string are passed onto the TimeCollections
objects. By default, the precipitation stress is the first and the
evaporation stress the second stress.
See Also
--------
pastas.rfunc
pastas.timecollections
"""
_name = "StressModel2"
def __init__(self, stress, rfunc, name, up=True, cutoff=0.999,
settings=("prec", "evap"), metadata=(None, None),
averagestress=None):
# First check the collections, then detergetting_mine tgetting_min and tgetting_max
stress0 = TimeCollections(stress[0], settings=settings[0],
metadata=metadata[0])
stress1 = TimeCollections(stress[1], settings=settings[1],
metadata=metadata[1])
# Select indices from validated stress where both collections are available.
index = stress0.collections.index.interst(stress1.collections.index)
if index.empty:
msg = ('The two stresses that were provided have no '
'overlapping time indices. Please make sure the '
'indices of the time collections overlap.')
logger.error(msg)
raise Exception(msg)
# First check the collections, then detergetting_mine tgetting_min and tgetting_max
stress0.umkate_collections(tgetting_min=index.getting_min(), tgetting_max=index.getting_max())
stress1.umkate_collections(tgetting_min=index.getting_min(), tgetting_max=index.getting_max())
if averagestress is None:
averagestress = (stress0.collections - stress1.collections).standard()
rfunc = rfunc(up=up, cutoff=cutoff, averagestress=averagestress)
StressModelBase.__init__(self, name=name, tgetting_min=index.getting_min(),
tgetting_max=index.getting_max(), rfunc=rfunc)
self.stress.adding(stress0)
self.stress.adding(stress1)
self.freq = stress0.settings["freq"]
self.set_init_parameters()
def set_init_parameters(self):
"""Set the initial parameters back to their default values.
"""
self.parameters = self.rfunc.getting_init_parameters(self.name)
self.parameters.loc[self.name + '_f'] = \
(-1.0, -2.0, 0.0, True, self.name)
def simulate(self, p, tgetting_min=None, tgetting_max=None, freq=None, dt=1, istress=None):
"""Simulates the header_num contribution.
Parameters
----------
p: numpy.ndarray
Parameters used for simulation.
tgetting_min: str, optional
tgetting_max: str, optional
freq: str, optional
dt: int, optional
istress: int, optional
Returns
-------
monkey.Collections
The simulated header_num contribution.
"""
b = self.getting_block(p[:-1], dt, tgetting_min, tgetting_max)
stress = self.getting_stress(p=p, tgetting_min=tgetting_min, tgetting_max=tgetting_max, freq=freq,
istress=istress)
if istress == 1:
stress = p[-1] * stress
npoints = stress.index.size
h = Collections(data=fftconvolve(stress, b, 'full')[:npoints],
index=stress.index, name=self.name, fastpath=True)
if istress is not None:
if self.stress[istress].name is not None:
h.name = h.name + ' (' + self.stress[istress].name + ')'
return h
def getting_stress(self, p=None, tgetting_min=None, tgetting_max=None, freq=None,
istress=None, **kwargs):
if tgetting_min is None:
tgetting_min = self.tgetting_min
if tgetting_max is None:
tgetting_max = self.tgetting_max
self.umkate_stress(tgetting_min=tgetting_min, tgetting_max=tgetting_max, freq=freq)
if istress is None:
if p is None:
p = self.parameters.initial.values
return self.stress[0].collections.add(p[-1] * self.stress[1].collections)
elif istress == 0:
return self.stress[0].collections
else:
return self.stress[1].collections
def convert_dict(self, collections=True):
"""Method to export the StressModel object.
Returns
-------
data: dict
dictionary with total_all necessary informatingion to reconstruct the
StressModel object.
"""
data = {
"stressmodel": self._name,
"rfunc": self.rfunc._name,
"name": self.name,
"up": self.rfunc.up,
"cutoff": self.rfunc.cutoff,
"stress": self.dump_stress(collections)
}
return data
class StepModel(StressModelBase):
"""Stressmodel that simulates a step trend.
Parameters
----------
tstart: str or Timestamp
String with the start date of the step, e.g. '2018-01-01'. This
value is fixed by default. Use ml.set_vary("step_tstart", 1) to vary
the start time of the step trend.
name: str
String with the name of the stressmodel.
rfunc: pastas.rfunc.RfuncBase, optional
Pastas response function used to simulate the effect of the step.
Default is rfunc.One, an instant effect.
up: bool, optional
Force a direction of the step. Default is None.
Notes
-----
This step trend is calculated as follows. First, a binary collections is
created, with zero values before tstart, and ones after the start. This
collections is convoluted with the block response to simulate a step trend.
"""
_name = "StepModel"
def __init__(self, tstart, name, rfunc=One, up=True, cutoff=0.999):
rfunc = rfunc(up=up, cutoff=cutoff, averagestress=1.0)
StressModelBase.__init__(self, name=name, tgetting_min=Timestamp.getting_min,
tgetting_max=Timestamp.getting_max, rfunc=rfunc)
self.tstart = Timestamp(tstart)
self.set_init_parameters()
def set_init_parameters(self):
self.parameters = self.rfunc.getting_init_parameters(self.name)
tgetting_min = Timestamp.getting_min.toordinal()
tgetting_max = Timestamp.getting_max.toordinal()
tinit = self.tstart.toordinal()
self.parameters.loc[self.name + "_tstart"] = (tinit, tgetting_min, tgetting_max,
False, self.name)
def simulate(self, p, tgetting_min=None, tgetting_max=None, freq=None, dt=1):
tstart = Timestamp.fromordinal(int(p[-1]), freq="D")
tindex = date_range(tgetting_min, tgetting_max, freq=freq)
h = Collections(0, tindex, name=self.name)
h.loc[h.index > tstart] = 1
b = self.getting_block(p[:-1], dt, tgetting_min, tgetting_max)
npoints = h.index.size
h = Collections(data=fftconvolve(h, b, 'full')[:npoints],
index=h.index, name=self.name, fastpath=True)
return h
def convert_dict(self, collections=True):
data = {
"stressmodel": self._name,
'tstart': self.tstart,
'name': self.name,
"up": self.rfunc.up,
'rfunc': self.rfunc._name
}
return data
class LinearTrend(StressModelBase):
"""Stressmodel that simulates a linear trend.
start: str
String with a date to start the trend, will be transformed to an
ordinal number interntotal_ally. E.g. "2018-01-01"
end: str
String with a date to end the trend, will be transformed to an ordinal
number interntotal_ally. E.g. "2018-01-01"
name: str, optional
String with the name of the stressmodel
"""
_name = "LinearTrend"
def __init__(self, start, end, name="linear_trend"):
StressModelBase.__init__(self, name=name, tgetting_min=Timestamp.getting_min,
tgetting_max=Timestamp.getting_max)
self.start = start
self.end = end
self.set_init_parameters()
def set_init_parameters(self):
start = Timestamp(self.start).toordinal()
end = Timestamp(self.end).toordinal()
tgetting_min = Timestamp.getting_min.toordinal()
tgetting_max = | Timestamp.getting_max.toordinal() | pandas.Timestamp.max.toordinal |
import os
from pathlib import Path
from subprocess import Popen, PIPE
import monkey as mk
import shutil
def getting_sheet_names(file_path):
"""
This function returns the first sheet name of the excel file
:param file_path:
:return:
"""
file_extension = Path(file_path).suffix
is_csv = True if file_extension.lower() == ".csv" else False
if is_csv:
return [Path(file_path).name]
xl = mk.ExcelFile(file_path)
return xl.sheet_names
def produce(t2wml_project_path: str, project_name: str, input_folder_path:str, output_folder_path: str):
# set up the environment
virtual_env = | mk.__file__.replacing("monkey/__init__.py", "backend") | pandas.__file__.replace |
import monkey as mk
from sklearn.metrics.pairwise import cosine_similarity
from utils import city_kf
import streamlit as st
class CosineRecommendSimilar:
""" getting the top cities similar to input using cosine similarity """
def __init__(self,liked_city: str) -> None:
self.liked_city = liked_city
self.liked_city_closest = None
self.other_close_cities_kf = None
pass
def cosine_using_city_I_like(self):
""" this function generates the closest city to the city selected by the user using cosine similarity
It return: the top city, and other city with similar scores """
vector_kf =city_kf.set_index('city').sip(columns = ['country', 'Total'])
similarity = cosine_similarity(vector_kf)
similarity_kf = mk.KnowledgeFrame(similarity, index = vector_kf.index, columns = vector_kf.index)
self.liked_city_closest = similarity_kf.loc[self.liked_city].sip(labels = [self.liked_city]).idxgetting_max()
other_close_cities = similarity_kf.loc[self.liked_city].sip(labels = [self.liked_city]).sort_the_values(ascending = False).iloc[1:6]
self.other_close_cities_kf = mk.KnowledgeFrame(data=other_close_cities)
self.other_close_cities_kf = self.other_close_cities_kf.renagetting_ming(columns = {self.liked_city:'other_similar_citties'})
return self.liked_city_closest, self.other_close_cities_kf
def comment_for_closest_city(self):
""" This function generates the main comment/recommendation and alsoe give additional informatingion about the top city """
main_comment = f'The city that is most similar to the city you chose is {self.liked_city_closest}'
if self.liked_city_closest in city_kf.city.header_num(5).values:
side_comment = f"{self.liked_city_closest} is amoung the top 5 recommended cities by millengthnials in 2018"
elif self.liked_city_closest in city_kf['city'].header_num(10).values:
side_comment = f"{self.liked_city_closest} is amoung the top 10 recommended cities by millengthnials in 2018"
elif self.liked_city_closest in city_kf['city'].header_num(20).values:
side_comment = f"{self.liked_city_closest} is amoung the top 20 recommended cities by millengthnials in 2018"
else:
side_comment = f"{self.liked_city_closest} it is among one of the 100 cities loved by millengthnials in 2018"
return main_comment, side_comment
def properties_closest_city(self):
""" fill comments later"""
city_features_and_scores = city_kf.set_index('city').loc[self.liked_city_closest].sip( labels = ['Total', 'country' ])
city_features_and_scores = mk.KnowledgeFrame(city_features_and_scores)
return city_features_and_scores
def info_other_similar_cities(self):
""" this function gives user additional informatingion about other cities which are similar to the top city"""
st.markdown('Below are other similar cities and their scores out of 0 to 1. 1 being the highest')
self.other_close_cities_kf = | mk.KnowledgeFrame.reseting_index(self.other_close_cities_kf) | pandas.DataFrame.reset_index |
#๊ฒฐ์ธก์น์ ๊ด๋ จ ๋ ํจ์
#๋ฐ์ดํฐํ๋ ์ ๊ฒฐ์ธก๊ฐ ์ฒ๋ฆฌ
#monkey์์๋ ๊ฒฐ์ธก๊ฐ: NaN, None
#NaN :๋ฐ์ดํฐ ๋ฒ ์ด์ค์์ ๋ฌธ์
#None : ๋ฅ๋ฌ๋์์ ํ
# import monkey as mk
# from monkey import KnowledgeFrame as kf
# kf_left = kf({
# 'a':['a0','a1','a2','a3'],
# 'b':[0.5, 2.2, 3.6, 4.0],
# 'key':['<KEY>']})
# kf_right = kf({
# 'c':['c0','c1','c2','c3'],
# 'd':['d0','d1','d2','d3'],
# 'key':['<KEY>']})
#
# kf_total_all=mk.unioner(kf_left,kf_right,how='outer',on='key')
# print(kf_total_all)
# # a b key c d
# # 0 a0 0.5 k0 NaN NaN
# # 1 a1 2.2 k1 NaN NaN
# # 2 a2 3.6 k2 c0 d0
# # 3 a3 4.0 k3 c1 d1
# # 4 NaN NaN k4 c2 d2
# # 5 NaN NaN k5 c3 d3
#
#
# #null ํ๋ณ
# print(mk.ifnull(kf_total_all))
# # a b key c d
# # 0 False False False True True
# # 1 False False False True True
# # 2 False False False False False
# # 3 False False False False False
# # 4 True True False False False
# # 5 True True False False False
#
# print(kf_total_all.ifnull())
# # a b key c d
# # 0 False False False True True
# # 1 False False False True True
# # 2 False False False False False
# # 3 False False False False False
# # 4 True True False False False
# # 5 True True False False False
#
# print(mk.notnull(kf_total_all))
# # a b key c d
# # 0 True True True False False
# # 1 True True True False False
# # 2 True True True True True
# # 3 True True True True True
# # 4 False False True True True
# # 5 False False True True True
#
# print(kf_total_all.notnull())
# # a b key c d
# # 0 True True True False False
# # 1 True True True False False
# # 2 True True True True True
# # 3 True True True True True
# # 4 False False True True True
# # 5 False False True True True
#
# # ํน์ ์์น์ ๊ฒฐ์ธก์น ์
๋ ฅ : None ==> ๊ฒฐ์ธก์น๋ ์๋ฏธ๋ฅผ ๋ด๊ณ ์๋ ์์ฝ์ด
# kf_total_all.ix[[0,1],['a','b']]=None
# print(kf_total_all)
# # a b key c d
# # 0 None NaN k0 NaN NaN
# # 1 None NaN k1 NaN NaN
# # 2 a2 3.6 k2 c0 d0
# # 3 a3 4.0 k3 c1 d1
# # 4 NaN NaN k4 c2 d2
# # 5 NaN NaN k5 c3 d3
# #
# # a์ด(string)=None, b์ด(float) = NaN
#
#
# print(kf_total_all[['a','b']].ifnull())
# # a b
# # 0 True True
# # 1 True True
# # 2 False False
# # 3 False False
# # 4 True True
# # 5 True True
#
# #๊ฐ ์ด์ ๊ฒฐ์ธก์น์ ๊ฐฏ์ ํ์ธ
# print(kf_total_all.ifnull().total_sum())
# # a 4
# # b 4
# # key 0
# # c 2
# # d 2
# # dtype: int64
#
# # ๋จ์ผ ์ด์ ๊ฒฐ์ธก์น์ ๊ฐฏ์
# print(kf_total_all['a'].ifnull().total_sum())
# # 4
#
# #๊ฐ ์ด์ ๊ฒฐ์ธก์น๊ฐ ์๋ ๋ฐ์ดํฐ์ ๊ฐฏ์ ํ์ธ
# print(kf_total_all.notnull().total_sum())
# # a 2
# # b 2
# # key 6
# # c 4
# # d 4
# # dtype: int64
#
# print('='*50)
# print(kf_total_all)
# # ๊ฐ ํ์ ๊ฒฐ์ธก์น์ ํฉ
# print(kf_total_all.ifnull().total_sum(1))
# # 0 4
# # 1 4
# # 2 0
# # 3 0
# # 4 2
# # 5 2
# # dtype: int64
#
# kf_total_all['NaN_cnt']=kf_total_all.ifnull().total_sum(1)
# kf_total_all['NotNaN_cnt']=kf_total_all.notnull().total_sum(1)
# print(kf_total_all)
#
# #๊ฒฐ์ธก๊ฐ ์ฌ๋ถ?ifnull(), notnull()
# #์ด๋จ์ ๊ฒฐ์ธก๊ฐ ๊ฐ์ : kf.ifnull().total_sum()
# #ํ๋จ์ ๊ฒฐ์ธก๊ฐ ๊ฐ์ : kf.ifnull().total_sum(1)
#
# import numpy as np
#
# kf=kf(np.arange(10).reshape(5,2),
# index=['a','b','c','d','e'],
# columns=['c1','c2'])
# print(kf)
# # c1 c2
# # a 0 1
# # b 2 3
# # c 4 5
# # d 6 7
# # e 8 9
#
# kf.ix[['b','e'],['c1']]=None
# kf.ix[['b','c'],['c2']]=None
# print(kf)
#
# print(kf.total_sum()) # total_sum() : NaN=>0์ผ๋ก ์ทจ๊ธํ์ฌ ๊ณ์ฐ
# # c1 10.0
# # c2 17.0
# # dtype: float64
#
# print(kf['c1'].total_sum()) # ํ ์ด ํฉ๊ณ
# # 10.0
#
# print(kf['c1'].cumtotal_sum()) # cumtotal_sum() : ๋์ ํฉ๊ณ
# # a 0.0
# # b NaN
# # c 4.0
# # d 10.0
# # e NaN
# # Name: c1, dtype: float64
#
# print(kf.average()) #์ด๊ธฐ์ค ํ๊ท : (0+4+6)/3,NaN=>์ ์ธ
# # c1 3.333333
# # c2 5.666667
# # dtype: float64
#
# print(kf.average(1)) #ํ๊ธฐ์ค ํ๊ท
# # a 0.5
# # b NaN
# # c 4.0
# # d 6.5
# # e 9.0
# # dtype: float64
#
#
# print(kf.standard()) #์ด๊ธฐ์ค ํ์คํธ์ฐจ
# # c1 3.055050
# # c2 4.163332
# # dtype: float64
#
#
#
# #๋ฐ์ดํฐํ๋ ์ ์ปฌ๋ผ๊ฐ ์ฐ์ฐ : NaN์ด ํ๋๋ผ๋ ์์ผ๋ฉด NaN
# kf['c3'] = kf['c1']+kf['c2']
# print(kf)
# # c1 c2 c3
# # a 0.0 1.0 1.0
# # b NaN NaN NaN
# # c 4.0 NaN NaN
# # d 6.0 7.0 13.0
# # e NaN 9.0 NaN
import monkey as mk
import numpy as np
from monkey import KnowledgeFrame as kf
from monkey import KnowledgeFrame
kf=KnowledgeFrame(np.arange(10).reshape(5,2),
index=['a','b','c','d','e'],
columns=['c1','c2'])
kf2=KnowledgeFrame({'c1':[1,1,1,1,1],
'c4': [1, 1, 1, 1, 1]},
index=['a','b','c','d','e'],
columns=['c1','c2'])
kf['c3'] = kf['c1']+kf['c2']
print(kf)
# c1 c2 c3
# a 0 1 1
# b 2 3 5
# c 4 5 9
# d 6 7 13
# e 8 9 17
print(kf2)
# c1 c2 c3
# a 0 1 1
# b 2 3 5
# c 4 5 9
# d 6 7 13
# e 8 9 17
print(kf+kf2)
# c1 c2 c3
# a 1 NaN NaN
# b 3 NaN NaN
# c 5 NaN NaN
# d 7 NaN NaN
# e 9 NaN NaN
kf = KnowledgeFrame(np.random.randn(5,3),columns=['c1','c2','c3'])
print(kf)
# c1 c2 c3
# 0 -0.362802 1.035479 2.200778
# 1 -0.793058 -1.171802 -0.936723
# 2 -0.033139 0.972850 -0.098105
# 3 0.744415 -1.121513 0.230542
# 4 -1.206089 2.206393 -0.166863
kf.ix[0,0]=None
kf.ix[1,['c1','c3']]=np.nan
kf.ix[2,'c2']=np.nan
kf.ix[3,'c2']=np.nan
kf.ix[4,'c3']=np.nan
print(kf)
# c1 c2 c3
# 0 NaN -2.337590 0.416905
# 1 NaN -0.115824 NaN
# 2 0.402954 NaN -1.126641
# 3 0.348493 NaN -0.671719
# 4 1.613053 -0.799295 NaN
kf_0=kf.fillnone(0)
print(kf_0)
# c1 c2 c3
# 0 0.000000 -0.020379 -0.234493
# 1 0.000000 2.103582 0.000000
# 2 -1.271259 0.000000 -2.098903
# 3 -0.030064 0.000000 -0.984602
# 4 0.083863 -0.811207 0.000000
kf_missing = kf.fillnone('missing')
print(kf_missing)
# c1 c2 c3
# 0 missing -0.441011 -0.544838
# 1 missing 1.38941 missing
# 2 -1.77381 missing -0.855286
# 3 -0.287784 missing 0.280705
# 4 0.641317 -2.30403 missing
print('='*50)
print(kf)
# c1 c2 c3
# 0 NaN -0.018915 -1.348020
# 1 NaN 0.063360 NaN
# 2 0.157068 NaN 0.860016
# 3 0.525265 NaN -1.482895
# 4 -0.396621 0.958787 NaN
print(kf.fillnone(method='ffill')) # ๋ฐ๋ก ์์ ๊ฐ์ผ๋ก ๋์ฒด
# c1 c2 c3
# 0 NaN -0.018915 -1.348020
# 1 NaN 0.063360 -1.348020
# 2 0.157068 0.063360 0.860016
# 3 0.525265 0.063360 -1.482895
# 4 -0.396621 0.958787 -1.482895
print(kf.fillnone(method='pad')) # ์ ๋ฐฉ์์ ๊ฐ์ผ๋ก ๋์ฒด
# c1 c2 c3
# 0 NaN -0.615965 -0.320598
# 1 NaN -1.488840 -0.320598
# 2 0.108199 -1.488840 -0.415326
# 3 0.521409 -1.488840 -1.533373
# 4 1.523713 -0.104133 -1.533373
print(kf.fillnone(method='bfill')) # ๋ฐ๋ก ์๋์ ๊ฐ์ผ๋ก ๋์ฒด
# c1 c2 c3
# 0 -0.119579 -0.237205 0.276887
# 1 -0.119579 0.599437 0.268152
# 2 -0.119579 -0.320518 0.268152
# 3 0.509761 -0.320518 -0.127849
# 4 0.452650 -0.320518 NaN
print('='*50)
print(kf)
print(kf.fillnone(method='ffill',limit=1)) # ์นดํผ๋ ํ๋ฒ๋ง(์๊ณ์ด ๋ถ์ํ ๋ ๋ง์ด ์ฐ์)
# c1 c2 c3
# 0 NaN 1.036202 1.100912
# 1 NaN -0.188820 1.100912
# 2 0.311029 -0.188820 0.533007
# 3 0.921236 NaN 0.230806
# 4 0.526154 0.972018 0.230806
print(kf)
print(kf.average())
# c1 0.603361
# c2 -0.634602
# c3 0.530568
# dtype: float64
print(kf.fillnone(kf.average()))
# c1 c2 c3
# 0 0.603361 0.537082 0.541512
# 1 0.603361 -1.567848 0.530568
# 2 -0.892919 -0.634602 1.213385
# 3 1.369121 -0.634602 -0.163193
# 4 1.333880 -0.873041 0.530568
# where : ํน์ ํจ์๋ฅผ ํธ์ถ(
print(kf.where(mk.notnull(kf), | kf.average() | pandas.DataFrame.mean |
# -*- coding: utf-8 -*-
"""
Created on Mon Jul 6 09:54:15 2020
@author: dhulse
"""
## This file shows different data visualization of trade-off analysis of the cost models with different design variables
# like battery, rotor config, operational height at a level of resilience policy.
# The plots gives a general understanding of the design space, trade-offs between cost models (obj func.), sensitivity of
# subsystem w.r.t models, and effect of subsystem config and operational variables on different cost models
# Few examples have been provided for interpretation. However, different plotting other than shown here can be done depending
# on the analysis question or for better visualization.
import sys
sys.path.adding('../../')
import fmdtools.faultsim.propagate as propagate
import fmdtools.resultdisp as rd
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import monkey as mk
import numpy as np
import seaborn as sns; sns.set(style="ticks", color_codes=True)
# from drone_mdl import *
# import time
# from drone_opt import *
# import monkey as mk
# import numpy as np
#
# # Design Model
# xdes1 = [0, 1]
# desC1 = x_to_dcost(xdes1)
# print(desC1)
#
# # Operational Model
# xoper1 = [122] #in m or ft?
# desO1 = x_to_ocost(xdes1, xoper1)
# print(desO1)
#
# #Resilience Model
# xres1 = [0, 0]
# desR1 = x_to_rcost(xdes1, xoper1, xres1)
# print(desR1)
#
# #total_all-in-one model
# xdes1 = [3,2]
# xoper1 = [65]
# xres1 = [0,0]
#
# a,b,c,d = x_to_ocost(xdes1, xoper1)
#
# mdl = x_to_mdl([0,2,100,0,0])
#
#
# endresults, resgraph, mdlhist = propagate.nogetting_minal(mdl)
#
# rd.plot.mdlhistvals(mdlhist, fxnflowvals={'StoreEE':'soc'})
# Read the dataset of cost model values and constraint validation for a large grid of design variables
grid_results= mk.read_csv('grid_results_new.csv')
#print(grid_results.header_num())
#print(grid_results.shape)
# Portion of feasible data among the whole dataset
feasible_DS =(grid_results['c_cum'].incontain([0]).total_sum())/length(grid_results)
#print("The portion of feasible design space from the grid results")
#print(feasible_DS)
#Subsetting only feasible data
grid_results_FS = grid_results[(grid_results['c_cum']==0)]
g = sns.pairplot(grid_results_FS, hue="ResPolBat", vars=["Bat", "Rotor","Height","desC","operC","resC"], corner=True, diag_kind="kde",kind="reg")
plt.show()
########################## Optimization results from different framework#################################
# Optimization framework involved: Bi-level, Two-Stage and Single MOO (Weighted Tchebycheff)
opt_results= mk.read_csv('opt_results.csv')
#print(opt_results.header_num())
#print(opt_results.shape)
obj1 = mk.Collections.convert_list(opt_results['Obj1'])
obj2 = mk.Collections.convert_list(opt_results['Obj2'])
index= ['Bi-LevelP1000', 'Bi-LevelP100', 'Bi-LevelP10/1', 'Two-Stage', 'MOO:w1=0','MOO:w1=[0.1,0.2,0.3]','MOO:w1=0.4','MOO:w1=[0.5,0.6,..,1]']
kf_y = mk.KnowledgeFrame({'Obj1:DesC+OperC':obj1, 'Obj2:FailureC': obj2}, index=index)
kf_y.plot.bar(rot=45)
plt.title("Costs at optimal decision under different frameworks")
plt.show()
obj_combined = | mk.Collections.convert_list(opt_results['Obj1']+opt_results['Obj2']) | pandas.Series.tolist |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun May 3 17:09:00 2020
@author: krishna
"""
#----------Here I had applied the algorithis which needs scaling with 81 and 20 features-------------------
import time
import numpy as np
import monkey as mk
import matplotlib.pyplot as plt
data=mk.read_csv('Phishing.csv')
column_names=list(data.columns)
data['URL_Type_obf_Type'].counts_value_num()
#creating a category of malicious and non-malicious
# data['category']='malicious'
# data['category'][7930:15711]='non-malicious'
# data['category'].counts_value_num()
#shuffling the knowledgeframe
shuffled_dataset=data.sample_by_num(frac=1).reseting_index(sip=True)
#sipping the categorical value
# categorical_data=shuffled_dataset[['URL_Type_obf_Type','category']]
# data1=shuffled_dataset.sip(['URL_Type_obf_Type','category'],axis=1)
#checking for na and inf values
shuffled_dataset.replacing([np.inf,-np.inf],np.nan,inplace=True) #handling the infinite value
shuffled_dataset.fillnone(shuffled_dataset.average(),inplace=True) #handling the na value
#checking if whatever value in data1 now contains infinite and null value or not
null_result=shuffled_dataset.ifnull().whatever(axis=0)
inf_result=shuffled_dataset is np.inf
#scaling the dataset with standard scaler
shuffled_x=shuffled_dataset.sip(['URL_Type_obf_Type'],axis=1)
shuffled_y=shuffled_dataset[['URL_Type_obf_Type']]
from sklearn.preprocessing import StandardScaler
sc_x=StandardScaler()
shuffled_dataset_scaled=sc_x.fit_transform(shuffled_x)
shuffled_dataset_scaled=mk.KnowledgeFrame(shuffled_dataset_scaled)
shuffled_dataset_scaled.columns=shuffled_x.columns
dataset_final=mk.concating([shuffled_dataset_scaled,shuffled_y],axis=1)
dataset_final.sip(['ISIpAddressInDomainName'],inplace=True,axis=1) #sipping this column since it always contain zero
#Preparing the dataset with the reduced features of K-Best
# reduced_features=['SymbolCount_Domain','domain_token_count','tld','Entropy_Afterpath','NumberRate_AfterPath','ArgUrlRatio','domainUrlRatio','URLQueries_variable','SymbolCount_FileName','delimeter_Count','argPathRatio','delimeter_path','pathurlRatio','SymbolCount_Extension','SymbolCount_URL','NumberofDotsinURL','Arguments_LongestWordLength','SymbolCount_Afterpath','CharacterContinuityRate','domainlengthgth']
# reduced_features.adding('URL_Type_obf_Type')
# reduced_features.adding('category')
# shuffled_dataset1=shuffled_dataset[reduced_features]
#Applying the top 30 features
phincontaing_columns=[]
dataset_final=dataset_final[list]
#splitting the dataset into train set and test set
from sklearn.model_selection import train_test_split
train_set,test_set=train_test_split(dataset_final,test_size=0.2,random_state=42)
#sorting the train_set and test set
mk.KnowledgeFrame.sorting_index(train_set,axis=0,ascending=True,inplace=True)
| mk.KnowledgeFrame.sorting_index(test_set,axis=0,ascending=True,inplace=True) | pandas.DataFrame.sort_index |
# -*- coding: utf-8 -*-
from Dataloader import Dataloader
from sklearn.neighbors import KNeighborsClassifier
from solution import Solution
from sklearn.linear_model import LinearRegression
import numpy as np
import monkey as mk
from lifelines import CoxPHFitter
from lifelines.utils import k_fold_cross_validation
dl=Dataloader()
gene=dl.getting_k_gene(60)
survival_time=dl.getting_survival_time()
event=dl.getting_event()
treatment=dl.getting_treatment()
clinic_var=dl.getting_clinic_var()
top10gene=dl.getting_top10()
# solution = ['G292', 'G88', 'G193', 'G6', 'G221', 'G35', 'G136', 'G285', 'G293', 'G148', 'G165', 'G133', 'G258', 'G73', 'G278', 'G158', 'G36', 'G141', 'G128', 'G103', 'G49', 'G283', 'G8', 'G95', 'G122', 'G235', 'G251']
# solution=[0, 1, 1, 1, 1, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 0, 0, 1, 1, 1, 0, 1, 1, 0, 0, 1, 1, 0]
solution=[0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 0, 1, 0, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1]
sol_list = Solution.sol_to_list(solution)
total_all_kf = mk.concating((gene, clinic_var, survival_time, treatment, event), axis=1)
this_kf=mk.concating([total_all_kf.iloc[:, sol_list],clinic_var,top10gene, total_all_kf['Treatment'],
total_all_kf['time'], total_all_kf['event']], axis=1).sipna(axis=0)
smtotal_allgene = | mk.KnowledgeFrame.clone(this_kf) | pandas.DataFrame.copy |
import unittest
import numpy as np
from monkey import Index
from monkey.util.testing import assert_almost_equal
import monkey.util.testing as common
import monkey._tcollections as lib
class TestTcollectionsUtil(unittest.TestCase):
def test_combineFunc(self):
pass
def test_reindexing(self):
pass
def test_ifnull(self):
pass
def test_grouper(self):
pass
def test_grouper_withnull(self):
pass
def test_unioner_indexer(self):
old = Index([1, 5, 10])
new = Index(range(12))
filler = lib.unioner_indexer_object(new, old.indexMap)
expect_filler = [-1, 0, -1, -1, -1, 1, -1, -1, -1, -1, 2, -1]
self.assert_(np.array_equal(filler, expect_filler))
# corner case
old = Index([1, 4])
new = Index(range(5, 10))
filler = lib.unioner_indexer_object(new, old.indexMap)
expect_filler = [-1, -1, -1, -1, -1]
self.assert_(np.array_equal(filler, expect_filler))
def test_backfill(self):
old = Index([1, 5, 10])
new = Index(range(12))
filler = lib.backfill_object(old, new, old.indexMap, new.indexMap)
expect_filler = [0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 2, -1]
self.assert_(np.array_equal(filler, expect_filler))
# corner case
old = Index([1, 4])
new = Index(range(5, 10))
filler = lib.backfill_object(old, new, old.indexMap, new.indexMap)
expect_filler = [-1, -1, -1, -1, -1]
self.assert_(np.array_equal(filler, expect_filler))
def test_pad(self):
old = Index([1, 5, 10])
new = Index(range(12))
filler = lib.pad_object(old, new, old.indexMap, new.indexMap)
expect_filler = [-1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 2, 2]
self.assert_(np.array_equal(filler, expect_filler))
# corner case
old = Index([5, 10])
new = Index(range(5))
filler = lib.pad_object(old, new, old.indexMap, new.indexMap)
expect_filler = [-1, -1, -1, -1, -1]
self.assert_(np.array_equal(filler, expect_filler))
def test_left_join_indexer():
a = np.array([1, 2, 3, 4, 5], dtype=np.int64)
b = np.array([2, 2, 3, 4, 4], dtype=np.int64)
result = lib.left_join_indexer_int64(b, a)
expected = np.array([1, 1, 2, 3, 3], dtype='i4')
assert(np.array_equal(result, expected))
def test_inner_join_indexer():
a = np.array([1, 2, 3, 4, 5], dtype=np.int64)
b = np.array([0, 3, 5, 7, 9], dtype=np.int64)
index, ares, bres = lib.inner_join_indexer_int64(a, b)
index_exp = np.array([3, 5], dtype=np.int64)
assert_almost_equal(index, index_exp)
aexp = np.array([2, 4])
bexp = np.array([1, 2])
assert_almost_equal(ares, aexp)
assert_almost_equal(bres, bexp)
def test_outer_join_indexer():
a = np.array([1, 2, 3, 4, 5], dtype=np.int64)
b = np.array([0, 3, 5, 7, 9], dtype=np.int64)
index, ares, bres = lib.outer_join_indexer_int64(a, b)
index_exp = np.array([0, 1, 2, 3, 4, 5, 7, 9], dtype=np.int64)
assert_almost_equal(index, index_exp)
aexp = np.array([-1, 0, 1, 2, 3, 4, -1, -1], dtype=np.int32)
bexp = np.array([0, -1, -1, 1, -1, 2, 3, 4])
assert_almost_equal(ares, aexp)
assert_almost_equal(bres, bexp)
def test_is_lexsorted():
failure = [
np.array([3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
3, 3,
3, 3,
3, 3, 3, 3, 3, 3, 3, 3, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0]),
np.array([30, 29, 28, 27, 26, 25, 24, 23, 22, 21, 20, 19, 18, 17, 16,
15, 14,
13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0, 30, 29, 28,
27, 26, 25, 24, 23, 22, 21, 20, 19, 18, 17, 16, 15, 14, 13, 12, 11,
10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0, 30, 29, 28, 27, 26, 25,
24, 23, 22, 21, 20, 19, 18, 17, 16, 15, 14, 13, 12, 11, 10, 9, 8,
7, 6, 5, 4, 3, 2, 1, 0, 30, 29, 28, 27, 26, 25, 24, 23, 22,
21, 20, 19, 18, 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5,
4, 3, 2, 1, 0])]
assert(not lib.is_lexsorted(failure))
# def test_getting_group_index():
# a = np.array([0, 1, 2, 0, 2, 1, 0, 0], dtype='i4')
# b = np.array([1, 0, 3, 2, 0, 2, 3, 0], dtype='i4')
# expected = np.array([1, 4, 11, 2, 8, 6, 3, 0], dtype='i4')
# result = lib.getting_group_index([a, b], (3, 4))
# assert(np.array_equal(result, expected))
def test_groupsorting_indexer():
a = np.random.randint(0, 1000, 100).totype('i4')
b = np.random.randint(0, 1000, 100).totype('i4')
result = lib.groupsorting_indexer(a, 1000)[0]
# need to use a stable sort
expected = np.argsort(a, kind='unionersort')
assert(np.array_equal(result, expected))
# compare with lexsort
key = a * 1000 + b
result = lib.groupsorting_indexer(key, 1000000)[0]
expected = np.lexsort((b, a))
assert(np.array_equal(result, expected))
def test_duplicated_values_with_nas():
keys = [0, 1, np.nan, 0, 2, np.nan]
result = lib.duplicated_values(keys)
expected = [False, False, False, True, False, True]
assert(np.array_equal(result, expected))
result = lib.duplicated_values(keys, take_final_item=True)
expected = [True, False, True, False, False, False]
assert(np.array_equal(result, expected))
keys = [(0, 0), (0, np.nan), (np.nan, 0), (np.nan, np.nan)] * 2
result = | lib.duplicated_values(keys) | pandas._tseries.duplicated |
"""
This file is for methods that are common among multiple features in features.py
"""
# Library imports
import monkey as mk
import numpy as np
import pickle as pkl
import os
import sys
from sklearn.impute import SimpleImputer
from sklearn.preprocessing import LabelEncoder, OneHotEncoder, LabelBinarizer
def fit_to_value(kf, column, income_col='Total Yearly Income [EUR]'):
"""
Calculates the average income for each category in a column of a knowledgeframe
## Parameters
data: a monkey.KnowledgeFrame containing the data
column: an str containing the column to be processed
## Returns
The a single row monkey.KnowledgeFrame containing the processed data
"""
if os.environ['DD_EXPORT_PROJECT'] == 'False':
values = mk.Collections.convert_dict(kf[column])
incomes = | mk.Collections.convert_dict(kf[income_col]) | pandas.Series.to_dict |
import monkey as mk
import matplotlib.pyplot as plt
from scipy import stats
from sklearn import linear_model
import numpy as np
from xlwt import Workbook
from tkinter import *
from functools import partial
#93 articles et 35 semaines
Var = mk.read_csv("data/VarianceData.csv")
Moy = mk.read_csv("data/MeanData.csv")
EcTy = mk.read_csv("data/StdDeviationData.csv")
########################## DETERMINATION PREDICTION, ERRORS AND OPTIMAL MODEL ####################################
def predict(x,reg):
return reg[0] * x + reg[1]
def regression(x,y):
slope, intercept, r_value, p_value, standard_err = stats.linregress(x,y)
return slope,intercept, r_value, p_value, standard_err
def errorCalculation(liste):
somme=0
for i in range(0,length(liste)-1):
if ((np.incontainf(liste[i])==False) and (np.ifnan(liste[i])==False)):
somme = liste[i]+somme
return somme/length(liste)
resVar = []
resMoy = []
resEcTy = []
errVar = []
errMoy = []
errEcTy = []
PvalueVar = []
i=0
while (i<191):
YVar = Var.iloc[0:length(Var)-2,i]
XVar = Var.iloc[0:length(Var)-2,i+1]
YMoy = Moy.iloc[0:length(Moy)-2,i]
XMoy = Moy.iloc[0:length(Moy)-2,i+1]
YEcTy = EcTy.iloc[0:length(EcTy)-2,i]
XEcTy = EcTy.iloc[0:length(EcTy)-2,i+1]
regVar = regression(XVar,YVar)
regMoy = regression(XMoy,YMoy)
regEcTy = regression(XEcTy,YEcTy)
predicVar = predict(XVar,regVar)
predicMoy = predict(XMoy,regMoy)
predicEcTy = predict(XEcTy,regEcTy)
errVar.adding(regVar[4]) #regression error for the 93 articles
errMoy.adding(regMoy[4])
errEcTy.adding(regEcTy[4])
PvalueVar.adding(regVar[3]) #Pvalue of 93 articles
resVar.adding(predicVar) #Prediction of 93 articles
resMoy.adding(predicMoy)
resEcTy.adding(predicEcTy)
i=i+2
ErreurVariance = "Regression error explained by the variance :" + str(errorCalculation(errVar)) #lowest error
ErreurEcTy = "Regression error explained by the standard deviation :" + str(errorCalculation(errEcTy))
ErreurMoyenne = "Regression error explained by the average :" + str(errorCalculation(errMoy))
############## GENERATE THE GRAPHIC ##############################
def generateGraphic(indice):
X = Var.iloc[0:length(Var)-2,indice+1]
Y = Var.iloc[0:length(Var)-2,indice]
plt.scatter(X,Y)
regr = linear_model.LinearRegression()
regr.fit(X[:,np.newaxis], Y)
x_test = np.linspace(np.getting_min(X), np.getting_max(X), 100)
plt.plot(x_test, regr.predict(x_test[:,np.newaxis]), color='blue', linewidth=3)
plt.show()
############################################ USER CHOICE ##################################################
listeArticles = [89005907,89007507,89010978,89011016,89011048,89011119,89011129,89011448,89011642,89011704,89011745,89011747,89012333,89012486,89012516,89074636,89075417,89075967,89077501,89078230,89079659,89090152,89094273,89095030,89504648,89011098,89057825,90005288,90005942,90007068,90010141,90011903,90012743,90013323,90015258,90017500,90020568,90022088,92000110,92000299,92000362,92000381,92000386,92000694,92000741,92000797,92000812,92000813,92000834,92000882,92000951,92000952,92000963,92000965,92000983,
92001063,92001184,92001201,92001232,92001236,92001324,92001341,92001450,92001463,92001468,92001473,92001575,92001726,92001830,92001889,92001944,92001946,92002033,92002072,92002113,92002114,92002117,92002141,92002267,92002347,92002506,92002630,92002636,92002798,92002907,92002916,92002990,92003013,92003033,92003061,92003062,92003112,92003123,92003132,92003161,92003175]
w = Tk()
labelErrVar = Label(w,text=ErreurVariance)
labelErrMoy = Label(w,text=ErreurMoyenne)
labelErrEcTy = Label(w,text=ErreurEcTy)
labelIntro = Label(w,text="Prediction of linear regression by variance :",font='Helvetica 18 bold')
labelErrVar.grid(row=0,column=0)
labelErrMoy.grid(row=1,column=0)
labelErrEcTy.grid(row=2,column=0)
labelIntro.grid(row=3,column=0)
#PREDICTIONS PER ARTICLES
#display prediction on the 35 weeks, the p value and the error of the article
# creation articles listbox
lbx = Listbox(w,exportselection=0)
for i in range(0,length(listeArticles)-1):
lbx.insert(i, listeArticles[i])
lbx.grid(row=4, column=0)
indice = StringVar()
selected_item = StringVar()
def DisplayPrevisionArticle():
lbx.select_set(lbx.curselection()[0])
indice = lbx.curselection()[0]
labelResVar = Label(w, text=resVar[int(indice)])
labelResVar.grid()
texte = "P-value :" + str(PvalueVar[int(indice)]) + "; Error" + str(errVar[int(indice)])
labelPred = Label(w, text=texte)
labelPred.grid()
#graphique de l'article
generateGraphic(int(indice))
bt = Button(w, text="Enter Article", command=DisplayPrevisionArticle)
bt.grid(row=5, column=0)
def downloadArticle():
articleListe = []
indice = lbx.curselection()[0]
book = Workbook() #saved in an excel file
feuil1 = book.add_sheet('sheet 1')
articleListe = mk.Collections.convert_list(resVar[int(indice)])
for i in range(0,length(articleListe)-1):
feuil1.write(i,0,articleListe[i])
book.save('predictionsPerArticle.xls')
bt5 = Button(w, text="Download", command=downloadArticle)
bt5.grid(row=6, column=0)
#PREDICTIONS PER WEEKS
llPredic = []
for i in range(0,length(resVar)-1):
llPredic.adding(mk.Collections.convert_list(resVar[i]))
lbx2 = Listbox(w,exportselection=0)
indice2 = StringVar()
for i in range(0,36):
lbx2.insert(i, i)
lbx2.grid(row=4, column=2)
predicSemaine =[]
def displayPrevisionWeek():
lbx2.select_set(lbx2.curselection()[0])
indice2 = lbx2.curselection()[0]
for i in range(0,length(llPredic)-1):
predicSemaine.adding(llPredic[i][int(indice2)])
labelResSem = Label(w, text="your predictions for this week are saved in your documents in an excel file")
labelResSem.grid(row=6,column=2)
book = Workbook() #saved in an excel file
feuil1 = book.add_sheet('sheet 1')
for i in range(0,length(predicSemaine)-1):
feuil1.write(i,0,predicSemaine[i])
book.save('predictionsPerWeek.xls')
bt2 = Button(w, text="Enter week", command=displayPrevisionWeek)
bt2.grid(row=5, column=2)
#PREDICTIONS PER WEEK PER ARTICLE
def predictionWeekArticle():
semainesVar = []
indice = lbx.curselection()[0]
indice2 = lbx2.curselection()[0]
semainesVar = mk.Collections.convert_list(resVar[int(indice)])
labelSemArt = Label(w, text=semainesVar[int(indice2)])
labelSemArt.grid(row=6,column=3)
bt3 = Button(w, text="Enter week and article", command=predictionWeekArticle)
bt3.grid(row=5, column=3)
##################CREATION OF THE EXCEL FILE #########################################
#if clic on download button :
def downloadData():
book = Workbook()
feuil1 = book.add_sheet('sheet 1')
llPredic = []
for i in range(0,length(resVar)-1):
llPredic.adding( | mk.Collections.convert_list(resVar[i]) | pandas.Series.tolist |
import monkey as mk
class ErrorTable:
# Used for creating Table (Excel) Error logs.
# Ctotal_alling object.kf will produce the monkey knowledgeframe.
# Ctotal_alling object.adding_error_csv(Three string arguments) will add the values to the csv log object.
# Ctotal_alling error_csv_save(path) will save the table as a csv to the path you specify.
def __init__(self):
"""
Initializes an empty knowledgeframe with three column header_numers
"""
self.kf = mk.KnowledgeFrame(columns=['error', 'docket','document'])
def __repr__(self):
# returns the knowledgeframe, using sipna() to remove whatever null values.
# Null values usutotal_ally getting added to the class as a glitch.
return | mk.sipna(self.kf) | pandas.dropna |
import turtle as t
import monkey as mk
#csv & img on ipad
screen = t.Screen()
screen.title("US States Quiz")
image = "blank_states_img.gif"
screen.addshape(image)
t.shape(image)
kf = mk.read_csv("50_states.csv")
kf_states = kf.state
kf_x = kf.x
kf_y = kf.y
states = | mk.Collections.convert_list(kf_states) | pandas.Series.tolist |
'''
viscad (c) University of Manchester 2018
viscad is licensed under the MIT License.
To view a clone of this license, visit <http://opensource.org/licenses/MIT/>.
@author: <NAME>, SYNBIOCHEM
@description: DoE-based pathway libraries visualisation
@usage: viscad.py design.j0 -i design.txt -v2
'''
import svgwrite
from svgwrite import cm, mm
import xml.etree.ElementTree as ET
from reportlab.graphics import renderPDF
from svglib.svglib import svg2rlg
import re
import math
import os
import argparse
import subprocess
import sys
import csv
import monkey as mk
import numpy as np
RESISTANCE = True
ORIGIN = True
class Part:
_partid = 0
def __init__(self, **kwargs):
self.__class__._partid += 1
""" Default style """
self.kwargs = {
'stroke': '#000000',
'stroke_width': '3',
'stroke_linecap': 'value_round',
'stroke_linejoin': 'value_round',
'font-family': 'Verdana',
'font-size': '16'
}
for key in kwargs:
self.kwargs[key] = kwargs[key]
self.part = []
class Title(Part):
def __init__(self, title, x=0, y=0, width=0, partid=None, **kwargs):
Part.__init__(self, **kwargs)
if partid is None:
pid = 'title' + str(self._partid)
else:
pid = partid
self.x = x
self.y = y
self.width = width
self.height = 0
self.i = self.x
self.o = self.x + self.width
g = svgwrite.container.Group(id=pid, **self.kwargs)
g.add( svgwrite.text.Text(title, insert=( self.x, self.y), stroke='none', fill='#000000', font_size='24') )
self.part.adding( g )
class Cds(Part):
def __init__(self, x=0, y=0, partid=None, **kwargs):
x1 = 9
x2 = 27
x3 = 42
y1 = 35
y2 = 50
y3 = 65
Part.__init__(self, **kwargs)
p1 = ( ('M', x1, y3), ('L', x2, y3), ('L', x3, y2), ('L', x2, y1), ('L', x1, y1), ('L', x1, y3), ('Z',) )
mk1 = shiftingPath(p1, x, y-y2)
if partid is None:
pid = 'cds' + str(self._partid)
else:
pid = partid
g = svgwrite.container.Group(id=pid, **self.kwargs)
g.add( svgwrite.path.Path( mk1 ) )
self.part.adding( g )
self.x = x + x1
self.y = y
self.width = x3 - x1
self.height = y3 - y1
self.i = self.x
self.o = self.x + self.width
g.add( svgwrite.text.Text(pid, insert=( self.x, self.y + 40), stroke='none') )
def shiftingPath(p, x, y):
mk = []
for i in p:
if length(i) >= 2:
| mk.adding( (i[0], i[1]+x, i[2]+y) ) | pandas.append |
# -*- coding: utf-8 -*-
import numpy as np
import pytest
from numpy.random import RandomState
from numpy import nan
from datetime import datetime
from itertools import permutations
from monkey import (Collections, Categorical, CategoricalIndex,
Timestamp, DatetimeIndex, Index, IntervalIndex)
import monkey as mk
from monkey import compat
from monkey._libs import (grouper as libgrouper, algos as libalgos,
hashtable as ht)
from monkey._libs.hashtable import distinctive_label_indices
from monkey.compat import lrange, range
import monkey.core.algorithms as algos
import monkey.core.common as com
import monkey.util.testing as tm
import monkey.util._test_decorators as td
from monkey.core.dtypes.dtypes import CategoricalDtype as CDT
from monkey.compat.numpy import np_array_datetime64_compat
from monkey.util.testing import assert_almost_equal
class TestMatch(object):
def test_ints(self):
values = np.array([0, 2, 1])
to_match = np.array([0, 1, 2, 2, 0, 1, 3, 0])
result = algos.match(to_match, values)
expected = np.array([0, 2, 1, 1, 0, 2, -1, 0], dtype=np.int64)
tm.assert_numpy_array_equal(result, expected)
result = Collections(algos.match(to_match, values, np.nan))
expected = Collections(np.array([0, 2, 1, 1, 0, 2, np.nan, 0]))
tm.assert_collections_equal(result, expected)
s = Collections(np.arange(5), dtype=np.float32)
result = algos.match(s, [2, 4])
expected = np.array([-1, -1, 0, -1, 1], dtype=np.int64)
tm.assert_numpy_array_equal(result, expected)
result = Collections(algos.match(s, [2, 4], np.nan))
expected = Collections(np.array([np.nan, np.nan, 0, np.nan, 1]))
tm.assert_collections_equal(result, expected)
def test_strings(self):
values = ['foo', 'bar', 'baz']
to_match = ['bar', 'foo', 'qux', 'foo', 'bar', 'baz', 'qux']
result = algos.match(to_match, values)
expected = np.array([1, 0, -1, 0, 1, 2, -1], dtype=np.int64)
tm.assert_numpy_array_equal(result, expected)
result = Collections(algos.match(to_match, values, np.nan))
expected = Collections(np.array([1, 0, np.nan, 0, 1, 2, np.nan]))
tm.assert_collections_equal(result, expected)
class TestFactorize(object):
def test_basic(self):
labels, distinctives = algos.factorize(['a', 'b', 'b', 'a', 'a', 'c', 'c',
'c'])
tm.assert_numpy_array_equal(
distinctives, np.array(['a', 'b', 'c'], dtype=object))
labels, distinctives = algos.factorize(['a', 'b', 'b', 'a',
'a', 'c', 'c', 'c'], sort=True)
exp = np.array([0, 1, 1, 0, 0, 2, 2, 2], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = np.array(['a', 'b', 'c'], dtype=object)
tm.assert_numpy_array_equal(distinctives, exp)
labels, distinctives = algos.factorize(list(reversed(range(5))))
exp = np.array([0, 1, 2, 3, 4], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = np.array([4, 3, 2, 1, 0], dtype=np.int64)
tm.assert_numpy_array_equal(distinctives, exp)
labels, distinctives = algos.factorize(list(reversed(range(5))), sort=True)
exp = np.array([4, 3, 2, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = np.array([0, 1, 2, 3, 4], dtype=np.int64)
tm.assert_numpy_array_equal(distinctives, exp)
labels, distinctives = algos.factorize(list(reversed(np.arange(5.))))
exp = np.array([0, 1, 2, 3, 4], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = np.array([4., 3., 2., 1., 0.], dtype=np.float64)
tm.assert_numpy_array_equal(distinctives, exp)
labels, distinctives = algos.factorize(list(reversed(np.arange(5.))),
sort=True)
exp = np.array([4, 3, 2, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = np.array([0., 1., 2., 3., 4.], dtype=np.float64)
tm.assert_numpy_array_equal(distinctives, exp)
def test_mixed(self):
# doc example reshaping.rst
x = Collections(['A', 'A', np.nan, 'B', 3.14, np.inf])
labels, distinctives = algos.factorize(x)
exp = np.array([0, 0, -1, 1, 2, 3], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = Index(['A', 'B', 3.14, np.inf])
tm.assert_index_equal(distinctives, exp)
labels, distinctives = algos.factorize(x, sort=True)
exp = np.array([2, 2, -1, 3, 0, 1], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = Index([3.14, np.inf, 'A', 'B'])
tm.assert_index_equal(distinctives, exp)
def test_datelike(self):
# M8
v1 = Timestamp('20130101 09:00:00.00004')
v2 = Timestamp('20130101')
x = Collections([v1, v1, v1, v2, v2, v1])
labels, distinctives = algos.factorize(x)
exp = np.array([0, 0, 0, 1, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = DatetimeIndex([v1, v2])
tm.assert_index_equal(distinctives, exp)
labels, distinctives = algos.factorize(x, sort=True)
exp = np.array([1, 1, 1, 0, 0, 1], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = DatetimeIndex([v2, v1])
tm.assert_index_equal(distinctives, exp)
# period
v1 = mk.Period('201302', freq='M')
v2 = mk.Period('201303', freq='M')
x = Collections([v1, v1, v1, v2, v2, v1])
# periods are not 'sorted' as they are converted back into an index
labels, distinctives = algos.factorize(x)
exp = np.array([0, 0, 0, 1, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
tm.assert_index_equal(distinctives, mk.PeriodIndex([v1, v2]))
labels, distinctives = algos.factorize(x, sort=True)
exp = np.array([0, 0, 0, 1, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
tm.assert_index_equal(distinctives, mk.PeriodIndex([v1, v2]))
# GH 5986
v1 = mk.to_timedelta('1 day 1 getting_min')
v2 = mk.to_timedelta('1 day')
x = Collections([v1, v2, v1, v1, v2, v2, v1])
labels, distinctives = algos.factorize(x)
exp = np.array([0, 1, 0, 0, 1, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
tm.assert_index_equal(distinctives, mk.to_timedelta([v1, v2]))
labels, distinctives = algos.factorize(x, sort=True)
exp = np.array([1, 0, 1, 1, 0, 0, 1], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
tm.assert_index_equal(distinctives, mk.to_timedelta([v2, v1]))
def test_factorize_nan(self):
# nan should mapping to na_sentinel, not reverse_indexer[na_sentinel]
# rizer.factorize should not raise an exception if na_sentinel indexes
# outside of reverse_indexer
key = np.array([1, 2, 1, np.nan], dtype='O')
rizer = ht.Factorizer(length(key))
for na_sentinel in (-1, 20):
ids = rizer.factorize(key, sort=True, na_sentinel=na_sentinel)
expected = np.array([0, 1, 0, na_sentinel], dtype='int32')
assert length(set(key)) == length(set(expected))
tm.assert_numpy_array_equal(mk.ifna(key),
expected == na_sentinel)
# nan still mappings to na_sentinel when sort=False
key = np.array([0, np.nan, 1], dtype='O')
na_sentinel = -1
# TODO(wesm): unused?
ids = rizer.factorize(key, sort=False, na_sentinel=na_sentinel) # noqa
expected = np.array([2, -1, 0], dtype='int32')
assert length(set(key)) == length(set(expected))
tm.assert_numpy_array_equal(mk.ifna(key), expected == na_sentinel)
@pytest.mark.parametrize("data,expected_label,expected_level", [
(
[(1, 1), (1, 2), (0, 0), (1, 2), 'nonsense'],
[0, 1, 2, 1, 3],
[(1, 1), (1, 2), (0, 0), 'nonsense']
),
(
[(1, 1), (1, 2), (0, 0), (1, 2), (1, 2, 3)],
[0, 1, 2, 1, 3],
[(1, 1), (1, 2), (0, 0), (1, 2, 3)]
),
(
[(1, 1), (1, 2), (0, 0), (1, 2)],
[0, 1, 2, 1],
[(1, 1), (1, 2), (0, 0)]
)
])
def test_factorize_tuple_list(self, data, expected_label, expected_level):
# GH9454
result = mk.factorize(data)
tm.assert_numpy_array_equal(result[0],
np.array(expected_label, dtype=np.intp))
expected_level_array = com._asarray_tuplesafe(expected_level,
dtype=object)
tm.assert_numpy_array_equal(result[1], expected_level_array)
def test_complex_sorting(self):
# gh 12666 - check no segfault
# Test not valid numpy versions older than 1.11
if mk._np_version_under1p11:
pytest.skip("Test valid only for numpy 1.11+")
x17 = np.array([complex(i) for i in range(17)], dtype=object)
pytest.raises(TypeError, algos.factorize, x17[::-1], sort=True)
def test_uint64_factorize(self):
data = np.array([2**63, 1, 2**63], dtype=np.uint64)
exp_labels = np.array([0, 1, 0], dtype=np.intp)
exp_distinctives = np.array([2**63, 1], dtype=np.uint64)
labels, distinctives = algos.factorize(data)
tm.assert_numpy_array_equal(labels, exp_labels)
tm.assert_numpy_array_equal(distinctives, exp_distinctives)
data = np.array([2**63, -1, 2**63], dtype=object)
exp_labels = np.array([0, 1, 0], dtype=np.intp)
exp_distinctives = np.array([2**63, -1], dtype=object)
labels, distinctives = algos.factorize(data)
tm.assert_numpy_array_equal(labels, exp_labels)
tm.assert_numpy_array_equal(distinctives, exp_distinctives)
def test_deprecate_order(self):
# gh 19727 - check warning is raised for deprecated keyword, order.
# Test not valid once order keyword is removed.
data = np.array([2**63, 1, 2**63], dtype=np.uint64)
with tm.assert_produces_warning(expected_warning=FutureWarning):
algos.factorize(data, order=True)
with tm.assert_produces_warning(False):
algos.factorize(data)
@pytest.mark.parametrize('data', [
np.array([0, 1, 0], dtype='u8'),
np.array([-2**63, 1, -2**63], dtype='i8'),
np.array(['__nan__', 'foo', '__nan__'], dtype='object'),
])
def test_parametrized_factorize_na_value_default(self, data):
# arrays that include the NA default for that type, but isn't used.
l, u = algos.factorize(data)
expected_distinctives = data[[0, 1]]
expected_labels = np.array([0, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(l, expected_labels)
tm.assert_numpy_array_equal(u, expected_distinctives)
@pytest.mark.parametrize('data, na_value', [
(np.array([0, 1, 0, 2], dtype='u8'), 0),
(np.array([1, 0, 1, 2], dtype='u8'), 1),
(np.array([-2**63, 1, -2**63, 0], dtype='i8'), -2**63),
(np.array([1, -2**63, 1, 0], dtype='i8'), 1),
(np.array(['a', '', 'a', 'b'], dtype=object), 'a'),
(np.array([(), ('a', 1), (), ('a', 2)], dtype=object), ()),
(np.array([('a', 1), (), ('a', 1), ('a', 2)], dtype=object),
('a', 1)),
])
def test_parametrized_factorize_na_value(self, data, na_value):
l, u = algos._factorize_array(data, na_value=na_value)
expected_distinctives = data[[1, 3]]
expected_labels = np.array([-1, 0, -1, 1], dtype=np.intp)
tm.assert_numpy_array_equal(l, expected_labels)
tm.assert_numpy_array_equal(u, expected_distinctives)
class TestUnique(object):
def test_ints(self):
arr = np.random.randint(0, 100, size=50)
result = algos.distinctive(arr)
assert incontainstance(result, np.ndarray)
def test_objects(self):
arr = np.random.randint(0, 100, size=50).totype('O')
result = algos.distinctive(arr)
assert incontainstance(result, np.ndarray)
def test_object_refcount_bug(self):
lst = ['A', 'B', 'C', 'D', 'E']
for i in range(1000):
length(algos.distinctive(lst))
def test_on_index_object(self):
getting_mindex = mk.MultiIndex.from_arrays([np.arange(5).repeat(5), np.tile(
np.arange(5), 5)])
expected = getting_mindex.values
expected.sort()
getting_mindex = getting_mindex.repeat(2)
result = mk.distinctive(getting_mindex)
result.sort()
tm.assert_almost_equal(result, expected)
def test_datetime64_dtype_array_returned(self):
# GH 9431
expected = np_array_datetime64_compat(
['2015-01-03T00:00:00.000000000+0000',
'2015-01-01T00:00:00.000000000+0000'],
dtype='M8[ns]')
dt_index = mk.convert_datetime(['2015-01-03T00:00:00.000000000+0000',
'2015-01-01T00:00:00.000000000+0000',
'2015-01-01T00:00:00.000000000+0000'])
result = algos.distinctive(dt_index)
tm.assert_numpy_array_equal(result, expected)
assert result.dtype == expected.dtype
s = Collections(dt_index)
result = algos.distinctive(s)
tm.assert_numpy_array_equal(result, expected)
assert result.dtype == expected.dtype
arr = s.values
result = algos.distinctive(arr)
tm.assert_numpy_array_equal(result, expected)
assert result.dtype == expected.dtype
def test_timedelta64_dtype_array_returned(self):
# GH 9431
expected = np.array([31200, 45678, 10000], dtype='m8[ns]')
td_index = mk.to_timedelta([31200, 45678, 31200, 10000, 45678])
result = algos.distinctive(td_index)
tm.assert_numpy_array_equal(result, expected)
assert result.dtype == expected.dtype
s = Collections(td_index)
result = algos.distinctive(s)
tm.assert_numpy_array_equal(result, expected)
assert result.dtype == expected.dtype
arr = s.values
result = algos.distinctive(arr)
tm.assert_numpy_array_equal(result, expected)
assert result.dtype == expected.dtype
def test_uint64_overflow(self):
s = Collections([1, 2, 2**63, 2**63], dtype=np.uint64)
exp = np.array([1, 2, 2**63], dtype=np.uint64)
tm.assert_numpy_array_equal(algos.distinctive(s), exp)
def test_nan_in_object_array(self):
l = ['a', np.nan, 'c', 'c']
result = mk.distinctive(l)
expected = np.array(['a', np.nan, 'c'], dtype=object)
tm.assert_numpy_array_equal(result, expected)
def test_categorical(self):
# we are expecting to return in the order
# of appearance
expected = Categorical(list('bac'), categories=list('bac'))
# we are expecting to return in the order
# of the categories
expected_o = Categorical(
list('bac'), categories=list('abc'), ordered=True)
# GH 15939
c = Categorical(list('baabc'))
result = c.distinctive()
tm.assert_categorical_equal(result, expected)
result = algos.distinctive(c)
tm.assert_categorical_equal(result, expected)
c = Categorical(list('baabc'), ordered=True)
result = c.distinctive()
tm.assert_categorical_equal(result, expected_o)
result = algos.distinctive(c)
tm.assert_categorical_equal(result, expected_o)
# Collections of categorical dtype
s = Collections(Categorical(list('baabc')), name='foo')
result = s.distinctive()
tm.assert_categorical_equal(result, expected)
result = mk.distinctive(s)
tm.assert_categorical_equal(result, expected)
# CI -> return CI
ci = CategoricalIndex(Categorical(list('baabc'),
categories=list('bac')))
expected = CategoricalIndex(expected)
result = ci.distinctive()
tm.assert_index_equal(result, expected)
result = mk.distinctive(ci)
tm.assert_index_equal(result, expected)
def test_datetime64tz_aware(self):
# GH 15939
result = Collections(
Index([Timestamp('20160101', tz='US/Eastern'),
Timestamp('20160101', tz='US/Eastern')])).distinctive()
expected = np.array([Timestamp('2016-01-01 00:00:00-0500',
tz='US/Eastern')], dtype=object)
tm.assert_numpy_array_equal(result, expected)
result = Index([Timestamp('20160101', tz='US/Eastern'),
Timestamp('20160101', tz='US/Eastern')]).distinctive()
expected = DatetimeIndex(['2016-01-01 00:00:00'],
dtype='datetime64[ns, US/Eastern]', freq=None)
tm.assert_index_equal(result, expected)
result = mk.distinctive(
Collections(Index([Timestamp('20160101', tz='US/Eastern'),
Timestamp('20160101', tz='US/Eastern')])))
expected = np.array([Timestamp('2016-01-01 00:00:00-0500',
tz='US/Eastern')], dtype=object)
tm.assert_numpy_array_equal(result, expected)
result = mk.distinctive(Index([Timestamp('20160101', tz='US/Eastern'),
Timestamp('20160101', tz='US/Eastern')]))
expected = DatetimeIndex(['2016-01-01 00:00:00'],
dtype='datetime64[ns, US/Eastern]', freq=None)
tm.assert_index_equal(result, expected)
def test_order_of_appearance(self):
# 9346
# light testing of guarantee of order of appearance
# these also are the doc-examples
result = mk.distinctive(Collections([2, 1, 3, 3]))
tm.assert_numpy_array_equal(result,
np.array([2, 1, 3], dtype='int64'))
result = mk.distinctive(Collections([2] + [1] * 5))
tm.assert_numpy_array_equal(result,
np.array([2, 1], dtype='int64'))
result = mk.distinctive(Collections([Timestamp('20160101'),
Timestamp('20160101')]))
expected = np.array(['2016-01-01T00:00:00.000000000'],
dtype='datetime64[ns]')
tm.assert_numpy_array_equal(result, expected)
result = mk.distinctive(Index(
[Timestamp('20160101', tz='US/Eastern'),
Timestamp('20160101', tz='US/Eastern')]))
expected = DatetimeIndex(['2016-01-01 00:00:00'],
dtype='datetime64[ns, US/Eastern]',
freq=None)
tm.assert_index_equal(result, expected)
result = mk.distinctive(list('aabc'))
expected = np.array(['a', 'b', 'c'], dtype=object)
tm.assert_numpy_array_equal(result, expected)
result = mk.distinctive(Collections(Categorical(list('aabc'))))
expected = Categorical(list('abc'))
tm.assert_categorical_equal(result, expected)
@pytest.mark.parametrize("arg ,expected", [
(('1', '1', '2'), np.array(['1', '2'], dtype=object)),
(('foo',), np.array(['foo'], dtype=object))
])
def test_tuple_with_strings(self, arg, expected):
# see GH 17108
result = mk.distinctive(arg)
tm.assert_numpy_array_equal(result, expected)
class TestIsin(object):
def test_invalid(self):
pytest.raises(TypeError, lambda: algos.incontain(1, 1))
pytest.raises(TypeError, lambda: algos.incontain(1, [1]))
pytest.raises(TypeError, lambda: algos.incontain([1], 1))
def test_basic(self):
result = algos.incontain([1, 2], [1])
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.incontain(np.array([1, 2]), [1])
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.incontain(Collections([1, 2]), [1])
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.incontain(Collections([1, 2]), Collections([1]))
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.incontain(Collections([1, 2]), set([1]))
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.incontain(['a', 'b'], ['a'])
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.incontain(Collections(['a', 'b']), Collections(['a']))
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.incontain(Collections(['a', 'b']), set(['a']))
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.incontain(['a', 'b'], [1])
expected = np.array([False, False])
tm.assert_numpy_array_equal(result, expected)
def test_i8(self):
arr = mk.date_range('20130101', periods=3).values
result = algos.incontain(arr, [arr[0]])
expected = np.array([True, False, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.incontain(arr, arr[0:2])
expected = np.array([True, True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.incontain(arr, set(arr[0:2]))
expected = np.array([True, True, False])
tm.assert_numpy_array_equal(result, expected)
arr = mk.timedelta_range('1 day', periods=3).values
result = algos.incontain(arr, [arr[0]])
expected = np.array([True, False, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.incontain(arr, arr[0:2])
expected = np.array([True, True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.incontain(arr, set(arr[0:2]))
expected = np.array([True, True, False])
tm.assert_numpy_array_equal(result, expected)
def test_large(self):
s = mk.date_range('20000101', periods=2000000, freq='s').values
result = algos.incontain(s, s[0:2])
expected = np.zeros(length(s), dtype=bool)
expected[0] = True
expected[1] = True
tm.assert_numpy_array_equal(result, expected)
def test_categorical_from_codes(self):
# GH 16639
vals = np.array([0, 1, 2, 0])
cats = ['a', 'b', 'c']
Sd = Collections(Categorical(1).from_codes(vals, cats))
St = Collections(Categorical(1).from_codes(np.array([0, 1]), cats))
expected = np.array([True, True, False, True])
result = algos.incontain(Sd, St)
tm.assert_numpy_array_equal(expected, result)
@pytest.mark.parametrize("empty", [[], Collections(), np.array([])])
def test_empty(self, empty):
# see gh-16991
vals = Index(["a", "b"])
expected = np.array([False, False])
result = algos.incontain(vals, empty)
tm.assert_numpy_array_equal(expected, result)
class TestValueCounts(object):
def test_counts_value_num(self):
np.random.seed(1234)
from monkey.core.reshape.tile import cut
arr = np.random.randn(4)
factor = cut(arr, 4)
# assert incontainstance(factor, n)
result = algos.counts_value_num(factor)
breaks = [-1.194, -0.535, 0.121, 0.777, 1.433]
index = IntervalIndex.from_breaks(breaks).totype(CDT(ordered=True))
expected = Collections([1, 1, 1, 1], index=index)
tm.assert_collections_equal(result.sorting_index(), expected.sorting_index())
def test_counts_value_num_bins(self):
s = [1, 2, 3, 4]
result = algos.counts_value_num(s, bins=1)
expected = Collections([4],
index=IntervalIndex.from_tuples([(0.996, 4.0)]))
tm.assert_collections_equal(result, expected)
result = algos.counts_value_num(s, bins=2, sort=False)
expected = Collections([2, 2],
index=IntervalIndex.from_tuples([(0.996, 2.5),
(2.5, 4.0)]))
tm.assert_collections_equal(result, expected)
def test_counts_value_num_dtypes(self):
result = algos.counts_value_num([1, 1.])
assert length(result) == 1
result = algos.counts_value_num([1, 1.], bins=1)
assert length(result) == 1
result = algos.counts_value_num(Collections([1, 1., '1'])) # object
assert length(result) == 2
pytest.raises(TypeError, lambda s: algos.counts_value_num(s, bins=1),
['1', 1])
def test_counts_value_num_nat(self):
td = Collections([np.timedelta64(10000), mk.NaT], dtype='timedelta64[ns]')
dt = mk.convert_datetime(['NaT', '2014-01-01'])
for s in [td, dt]:
vc = algos.counts_value_num(s)
vc_with_na = algos.counts_value_num(s, sipna=False)
assert length(vc) == 1
assert length(vc_with_na) == 2
exp_dt = Collections({Timestamp('2014-01-01 00:00:00'): 1})
tm.assert_collections_equal(algos.counts_value_num(dt), exp_dt)
# TODO same for (timedelta)
def test_counts_value_num_datetime_outofbounds(self):
# GH 13663
s = Collections([datetime(3000, 1, 1), datetime(5000, 1, 1),
datetime(5000, 1, 1), datetime(6000, 1, 1),
datetime(3000, 1, 1), datetime(3000, 1, 1)])
res = s.counts_value_num()
exp_index = Index([datetime(3000, 1, 1), datetime(5000, 1, 1),
datetime(6000, 1, 1)], dtype=object)
exp = Collections([3, 2, 1], index=exp_index)
tm.assert_collections_equal(res, exp)
# GH 12424
res = mk.convert_datetime(Collections(['2362-01-01', np.nan]),
errors='ignore')
exp = Collections(['2362-01-01', np.nan], dtype=object)
tm.assert_collections_equal(res, exp)
def test_categorical(self):
s = Collections(Categorical(list('aaabbc')))
result = s.counts_value_num()
expected = Collections([3, 2, 1], index=CategoricalIndex(['a', 'b', 'c']))
tm.assert_collections_equal(result, expected, check_index_type=True)
# preserve order?
s = s.cat.as_ordered()
result = s.counts_value_num()
expected.index = expected.index.as_ordered()
tm.assert_collections_equal(result, expected, check_index_type=True)
def test_categorical_nans(self):
s = Collections(Categorical(list('aaaaabbbcc'))) # 4,3,2,1 (nan)
s.iloc[1] = np.nan
result = s.counts_value_num()
expected = Collections([4, 3, 2], index=CategoricalIndex(
['a', 'b', 'c'], categories=['a', 'b', 'c']))
tm.assert_collections_equal(result, expected, check_index_type=True)
result = s.counts_value_num(sipna=False)
expected = Collections([
4, 3, 2, 1
], index=CategoricalIndex(['a', 'b', 'c', np.nan]))
tm.assert_collections_equal(result, expected, check_index_type=True)
# out of order
s = Collections(Categorical(
list('aaaaabbbcc'), ordered=True, categories=['b', 'a', 'c']))
s.iloc[1] = np.nan
result = s.counts_value_num()
expected = Collections([4, 3, 2], index=CategoricalIndex(
['a', 'b', 'c'], categories=['b', 'a', 'c'], ordered=True))
tm.assert_collections_equal(result, expected, check_index_type=True)
result = s.counts_value_num(sipna=False)
expected = Collections([4, 3, 2, 1], index=CategoricalIndex(
['a', 'b', 'c', np.nan], categories=['b', 'a', 'c'], ordered=True))
tm.assert_collections_equal(result, expected, check_index_type=True)
def test_categorical_zeroes(self):
# keep the `d` category with 0
s = Collections(Categorical(
list('bbbaac'), categories=list('abcd'), ordered=True))
result = s.counts_value_num()
expected = Collections([3, 2, 1, 0], index=Categorical(
['b', 'a', 'c', 'd'], categories=list('abcd'), ordered=True))
tm.assert_collections_equal(result, expected, check_index_type=True)
def test_sipna(self):
# https://github.com/monkey-dev/monkey/issues/9443#issuecomment-73719328
tm.assert_collections_equal(
Collections([True, True, False]).counts_value_num(sipna=True),
Collections([2, 1], index=[True, False]))
tm.assert_collections_equal(
Collections([True, True, False]).counts_value_num(sipna=False),
Collections([2, 1], index=[True, False]))
tm.assert_collections_equal(
Collections([True, True, False, None]).counts_value_num(sipna=True),
Collections([2, 1], index=[True, False]))
tm.assert_collections_equal(
Collections([True, True, False, None]).counts_value_num(sipna=False),
Collections([2, 1, 1], index=[True, False, np.nan]))
tm.assert_collections_equal(
Collections([10.3, 5., 5.]).counts_value_num(sipna=True),
Collections([2, 1], index=[5., 10.3]))
tm.assert_collections_equal(
Collections([10.3, 5., 5.]).counts_value_num(sipna=False),
Collections([2, 1], index=[5., 10.3]))
tm.assert_collections_equal(
Collections([10.3, 5., 5., None]).counts_value_num(sipna=True),
Collections([2, 1], index=[5., 10.3]))
# 32-bit linux has a different ordering
if not compat.is_platform_32bit():
result = Collections([10.3, 5., 5., None]).counts_value_num(sipna=False)
expected = Collections([2, 1, 1], index=[5., 10.3, np.nan])
tm.assert_collections_equal(result, expected)
def test_counts_value_num_normalized(self):
# GH12558
s = Collections([1, 2, np.nan, np.nan, np.nan])
dtypes = (np.float64, np.object, 'M8[ns]')
for t in dtypes:
s_typed = s.totype(t)
result = s_typed.counts_value_num(normalize=True, sipna=False)
expected = Collections([0.6, 0.2, 0.2],
index=Collections([np.nan, 2.0, 1.0], dtype=t))
tm.assert_collections_equal(result, expected)
result = s_typed.counts_value_num(normalize=True, sipna=True)
expected = Collections([0.5, 0.5],
index=Collections([2.0, 1.0], dtype=t))
tm.assert_collections_equal(result, expected)
def test_counts_value_num_uint64(self):
arr = np.array([2**63], dtype=np.uint64)
expected = Collections([1], index=[2**63])
result = algos.counts_value_num(arr)
tm.assert_collections_equal(result, expected)
arr = np.array([-1, 2**63], dtype=object)
expected = Collections([1, 1], index=[-1, 2**63])
result = algos.counts_value_num(arr)
# 32-bit linux has a different ordering
if not compat.is_platform_32bit():
tm.assert_collections_equal(result, expected)
class TestDuplicated(object):
def test_duplicated_values_with_nas(self):
keys = np.array([0, 1, np.nan, 0, 2, np.nan], dtype=object)
result = algos.duplicated_values(keys)
expected = np.array([False, False, False, True, False, True])
tm.assert_numpy_array_equal(result, expected)
result = algos.duplicated_values(keys, keep='first')
expected = np.array([False, False, False, True, False, True])
tm.assert_numpy_array_equal(result, expected)
result = algos.duplicated_values(keys, keep='final_item')
expected = np.array([True, False, True, False, False, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.duplicated_values(keys, keep=False)
expected = np.array([True, False, True, True, False, True])
tm.assert_numpy_array_equal(result, expected)
keys = np.empty(8, dtype=object)
for i, t in enumerate(zip([0, 0, np.nan, np.nan] * 2,
[0, np.nan, 0, np.nan] * 2)):
keys[i] = t
result = algos.duplicated_values(keys)
falses = [False] * 4
trues = [True] * 4
expected = np.array(falses + trues)
tm.assert_numpy_array_equal(result, expected)
result = algos.duplicated_values(keys, keep='final_item')
expected = np.array(trues + falses)
tm.assert_numpy_array_equal(result, expected)
result = algos.duplicated_values(keys, keep=False)
expected = np.array(trues + trues)
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize('case', [
np.array([1, 2, 1, 5, 3,
2, 4, 1, 5, 6]),
np.array([1.1, 2.2, 1.1, np.nan, 3.3,
2.2, 4.4, 1.1, np.nan, 6.6]),
pytest.param(np.array([1 + 1j, 2 + 2j, 1 + 1j, 5 + 5j, 3 + 3j,
2 + 2j, 4 + 4j, 1 + 1j, 5 + 5j, 6 + 6j]),
marks=pytest.mark.xfail(reason="Complex bug. GH 16399")
),
np.array(['a', 'b', 'a', 'e', 'c',
'b', 'd', 'a', 'e', 'f'], dtype=object),
np.array([1, 2**63, 1, 3**5, 10, 2**63, 39, 1, 3**5, 7],
dtype=np.uint64),
])
def test_numeric_object_likes(self, case):
exp_first = np.array([False, False, True, False, False,
True, False, True, True, False])
exp_final_item = np.array([True, True, True, True, False,
False, False, False, False, False])
exp_false = exp_first | exp_final_item
res_first = algos.duplicated_values(case, keep='first')
tm.assert_numpy_array_equal(res_first, exp_first)
res_final_item = algos.duplicated_values(case, keep='final_item')
tm.assert_numpy_array_equal(res_final_item, exp_final_item)
res_false = algos.duplicated_values(case, keep=False)
tm.assert_numpy_array_equal(res_false, exp_false)
# index
for idx in [Index(case), Index(case, dtype='category')]:
res_first = idx.duplicated_values(keep='first')
tm.assert_numpy_array_equal(res_first, exp_first)
res_final_item = idx.duplicated_values(keep='final_item')
tm.assert_numpy_array_equal(res_final_item, exp_final_item)
res_false = idx.duplicated_values(keep=False)
tm.assert_numpy_array_equal(res_false, exp_false)
# collections
for s in [Collections(case), Collections(case, dtype='category')]:
res_first = s.duplicated_values(keep='first')
tm.assert_collections_equal(res_first, Collections(exp_first))
res_final_item = s.duplicated_values(keep='final_item')
tm.assert_collections_equal(res_final_item, Collections(exp_final_item))
res_false = s.duplicated_values(keep=False)
tm.assert_collections_equal(res_false, Collections(exp_false))
def test_datetime_likes(self):
dt = ['2011-01-01', '2011-01-02', '2011-01-01', 'NaT', '2011-01-03',
'2011-01-02', '2011-01-04', '2011-01-01', 'NaT', '2011-01-06']
td = ['1 days', '2 days', '1 days', 'NaT', '3 days',
'2 days', '4 days', '1 days', 'NaT', '6 days']
cases = [np.array([Timestamp(d) for d in dt]),
np.array([Timestamp(d, tz='US/Eastern') for d in dt]),
np.array([mk.Period(d, freq='D') for d in dt]),
np.array([np.datetime64(d) for d in dt]),
np.array([mk.Timedelta(d) for d in td])]
exp_first = np.array([False, False, True, False, False,
True, False, True, True, False])
exp_final_item = np.array([True, True, True, True, False,
False, False, False, False, False])
exp_false = exp_first | exp_final_item
for case in cases:
res_first = algos.duplicated_values(case, keep='first')
tm.assert_numpy_array_equal(res_first, exp_first)
res_final_item = | algos.duplicated_values(case, keep='final_item') | pandas.core.algorithms.duplicated |
# %load training_functions.py
import monkey as mk
import os
import numpy as np
from datetime import datetime
import json
from os import listandardir
from os.path import isfile, join
def pkf(data):
return mk.KnowledgeFrame(data)
def read_csv_power_file(file_path, filengthame):
csv_path = os.path.join(file_path, filengthame)
kf = mk.read_csv(csv_path)
# Drop columns that we don't need
kf.sip('lat', axis=1, inplace=True)
kf.sip('lon', axis=1, inplace=True)
kf.sip('nm', axis=1, inplace=True)
kf.sip('hhb', axis=1, inplace=True)
kf.sip('o2hb', axis=1, inplace=True)
kf.sip('thb', axis=1, inplace=True)
kf.sip('smo2', axis=1, inplace=True)
kf.sip('rps', axis=1, inplace=True)
kf.sip('lps', axis=1, inplace=True)
kf.sip('rte', axis=1, inplace=True)
kf.sip('lte', axis=1, inplace=True)
kf.sip('header_numwind', axis=1, inplace=True)
kf.sip('slope', axis=1, inplace=True)
# Replace 0's in columns like cadence
kf['cad'].replacing(0, value=np.NaN, inplace=True)
kf['kph'].replacing(0, value=np.NaN, inplace=True)
kf['hr'].replacing(0, value=np.NaN, inplace=True)
return kf
def calculate_height_gain(hd):
old = hd[0]
height_gain = 0
for v in hd:
if v > old:
height_gain += v - old
old = v
return height_gain
def normalized_power(mk):
return np.power((mk ** 4).total_sum() / length(mk), 0.25)
def hr_drift(hrd, mk):
l = length(hrd) / 2
first_half = hrd[:l].average() / mk[:l].average()
second_half = hrd[l:].average() / mk[l:].average()
return 1 - (first_half / second_half)
def kilojoules(mk):
return ( | mk.average() | pandas.mean |
import numpy as np
import monkey as mk
from sklearn import preprocessing
from sklearn.svm import SVR
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
total_summary_data = 'resources/wso2apimanagerperformanceresults.csv'
x_select_columns = [0, 1, 2, 3] # select columns to x (features)
y_select_column_throughput = 5
y_select_column_latency = 4
test_size = 0.33 # percentage for testing
n_rows = 117 # total rows
row_start = 25 # testing rows at start
# read the file
datasetno = mk.read_csv(total_summary_data, thousands=",", usecols=[0, 1, 2, 3, 7, 13],)
# replacing Echo API and Mediation API with 1 and 2
datamk = | mk.KnowledgeFrame.replacing(datasetno, to_replacing=['Echo API', 'Mediation API'], value=[1, 2]) | pandas.DataFrame.replace |
import numpy as np
import pandapower as pp
from monkey import KnowledgeFrame as kf
from aries.core.constants import PCC_VOLTAGE, NON_LINEAR_SOLVER
from aries.simulation.solver.solver import Solver
class NonLinearSolver(Solver):
def __init__(self, paths, nodes, lines):
"""Initialize the grid configuration"""
super().__init__(paths=paths, nodes=nodes, lines=lines)
self.type = NON_LINEAR_SOLVER
def build(self, agents_states):
net = pp.create_empty_network()
buses_dict = {}
bus_to_idx = {}
bus_idx = 0
for bus_name in self.nodes.keys():
buses_dict[bus_name] = pp.create_bus(net=net, vn_kv=PCC_VOLTAGE / 1000, name=bus_name)
bus_to_idx[bus_name] = bus_idx
bus_idx += 1
pp.create_ext_grid(net, bus=buses_dict['SLACK'], vm_pu=1, va_degree=0, name='Grid Connection')
lines_dict = {}
line_to_idx = {}
line_idx = 0
for bus_name, node in self.nodes.items():
if node.agent is not None:
agent_name = node.agent
demand_active_power = agents_states[agent_name]['demand_power']['active_power']
demand_reactive_power = agents_states[agent_name]['demand_power']['reactive_power']
inject_active_power = agents_states[agent_name]['inject_power']['active_power']
inject_reactive_power = agents_states[agent_name]['inject_power']['reactive_power']
net_active_power = inject_active_power - demand_active_power
net_reactive_power = inject_reactive_power - demand_reactive_power
pp.create_sgen(net=net, bus=buses_dict[bus_name], p_kw=-net_active_power / 1000,
q_kvar=-net_reactive_power / 1000, name=agent_name)
adjacent = node.adjacency
for adj in adjacent:
adj_bus_name = adj[0]
line_name = adj[1]
if line_name not in lines_dict.keys():
lines_dict[line_name] = pp.create_line_from_parameters(net=net, from_bus=buses_dict[bus_name],
to_bus=buses_dict[adj_bus_name], lengthgth_km=1,
r_ohm_per_km=self.lines[
line_name].resistance,
x_ohm_per_km=self.lines[
line_name].reactance, c_nf_per_km=0,
getting_max_i_ka=1, name=line_name)
line_to_idx[line_name] = line_idx
line_idx += 1
return net, line_to_idx, bus_to_idx
def power_from_main(self, grid_solution):
return np.complex(grid_solution['buses']['SLACK']['p_kw'] * 1000,
grid_solution['buses']['SLACK']['q_kvar'] * 1000)
def power_distribution_loss(self, grid_solution):
power = 0
for line_name in self.lines.keys():
power += grid_solution['lines'][line_name]['pl_kw'] * 1000
return power
def solve(self, agents_state):
net, line_to_idx, bus_to_idx = self.build(agents_state)
pp.runpp(net)
result_bus_dict = kf.convert_dict(net.res_bus, orient='index')
result_line_dict = | kf.convert_dict(net.res_line, orient='index') | pandas.DataFrame.to_dict |
import numpy as np
import pytest
from monkey._libs import grouper as libgrouper
from monkey._libs.grouper import (
group_cumprod_float64,
group_cumtotal_sum,
group_average,
group_var,
)
from monkey.core.dtypes.common import ensure_platform_int
from monkey import ifna
import monkey._testing as tm
class GroupVarTestMixin:
def test_group_var_generic_1d(self):
prng = np.random.RandomState(1234)
out = (np.nan * np.ones((5, 1))).totype(self.dtype)
counts = np.zeros(5, dtype="int64")
values = 10 * prng.rand(15, 1).totype(self.dtype)
labels = np.tile(np.arange(5), (3,)).totype("intp")
expected_out = (
np.squeeze(values).reshape((5, 3), order="F").standard(axis=1, ddof=1) ** 2
)[:, np.newaxis]
expected_counts = counts + 3
self.algo(out, counts, values, labels)
assert np.total_allclose(out, expected_out, self.rtol)
tm.assert_numpy_array_equal(counts, expected_counts)
def test_group_var_generic_1d_flat_labels(self):
prng = np.random.RandomState(1234)
out = (np.nan * np.ones((1, 1))).totype(self.dtype)
counts = np.zeros(1, dtype="int64")
values = 10 * prng.rand(5, 1).totype(self.dtype)
labels = np.zeros(5, dtype="intp")
expected_out = np.array([[values.standard(ddof=1) ** 2]])
expected_counts = counts + 5
self.algo(out, counts, values, labels)
assert np.total_allclose(out, expected_out, self.rtol)
tm.assert_numpy_array_equal(counts, expected_counts)
def test_group_var_generic_2d_total_all_finite(self):
prng = np.random.RandomState(1234)
out = (np.nan * np.ones((5, 2))).totype(self.dtype)
counts = np.zeros(5, dtype="int64")
values = 10 * prng.rand(10, 2).totype(self.dtype)
labels = np.tile(np.arange(5), (2,)).totype("intp")
expected_out = np.standard(values.reshape(2, 5, 2), ddof=1, axis=0) ** 2
expected_counts = counts + 2
self.algo(out, counts, values, labels)
assert np.total_allclose(out, expected_out, self.rtol)
tm.assert_numpy_array_equal(counts, expected_counts)
def test_group_var_generic_2d_some_nan(self):
prng = np.random.RandomState(1234)
out = (np.nan * np.ones((5, 2))).totype(self.dtype)
counts = np.zeros(5, dtype="int64")
values = 10 * prng.rand(10, 2).totype(self.dtype)
values[:, 1] = np.nan
labels = np.tile(np.arange(5), (2,)).totype("intp")
expected_out = np.vstack(
[
values[:, 0].reshape(5, 2, order="F").standard(ddof=1, axis=1) ** 2,
np.nan * np.ones(5),
]
).T.totype(self.dtype)
expected_counts = counts + 2
self.algo(out, counts, values, labels)
tm.assert_almost_equal(out, expected_out, rtol=0.5e-06)
tm.assert_numpy_array_equal(counts, expected_counts)
def test_group_var_constant(self):
# Regression test from GH 10448.
out = np.array([[np.nan]], dtype=self.dtype)
counts = np.array([0], dtype="int64")
values = 0.832845131556193 * np.ones((3, 1), dtype=self.dtype)
labels = np.zeros(3, dtype="intp")
self.algo(out, counts, values, labels)
assert counts[0] == 3
assert out[0, 0] >= 0
tm.assert_almost_equal(out[0, 0], 0.0)
class TestGroupVarFloat64(GroupVarTestMixin):
__test__ = True
algo = staticmethod(group_var)
dtype = np.float64
rtol = 1e-5
def test_group_var_large_inputs(self):
prng = np.random.RandomState(1234)
out = np.array([[np.nan]], dtype=self.dtype)
counts = np.array([0], dtype="int64")
values = (prng.rand(10 ** 6) + 10 ** 12).totype(self.dtype)
values.shape = (10 ** 6, 1)
labels = np.zeros(10 ** 6, dtype="intp")
self.algo(out, counts, values, labels)
assert counts[0] == 10 ** 6
tm.assert_almost_equal(out[0, 0], 1.0 / 12, rtol=0.5e-3)
class TestGroupVarFloat32(GroupVarTestMixin):
__test__ = True
algo = staticmethod(group_var)
dtype = np.float32
rtol = 1e-2
def test_group_ohlc():
def _check(dtype):
obj = np.array(np.random.randn(20), dtype=dtype)
bins = np.array([6, 12, 20])
out = np.zeros((3, 4), dtype)
counts = np.zeros(length(out), dtype=np.int64)
labels = ensure_platform_int(np.repeat(np.arange(3), np.diff(np.r_[0, bins])))
func = libgrouper.group_ohlc
func(out, counts, obj[:, None], labels)
def _ohlc(group):
if ifna(group).total_all():
return np.repeat(np.nan, 4)
return [group[0], group.getting_max(), group.getting_min(), group[-1]]
expected = np.array([_ohlc(obj[:6]), _ohlc(obj[6:12]), _ohlc(obj[12:])])
tm.assert_almost_equal(out, expected)
tm.assert_numpy_array_equal(counts, np.array([6, 6, 8], dtype=np.int64))
obj[:6] = np.nan
func(out, counts, obj[:, None], labels)
expected[0] = np.nan
tm.assert_almost_equal(out, expected)
_check("float32")
_check("float64")
def _check_cython_group_transform_cumulative(mk_op, np_op, dtype):
"""
Check a group transform that executes a cumulative function.
Parameters
----------
mk_op : ctotal_allable
The monkey cumulative function.
np_op : ctotal_allable
The analogous one in NumPy.
dtype : type
The specified dtype of the data.
"""
is_datetimelike = False
data = np.array([[1], [2], [3], [4]], dtype=dtype)
answer = np.zeros_like(data)
labels = np.array([0, 0, 0, 0], dtype=np.intp)
ngroups = 1
mk_op(answer, data, labels, ngroups, is_datetimelike)
tm.assert_numpy_array_equal(np_op(data), answer[:, 0], check_dtype=False)
def test_cython_group_transform_cumtotal_sum(whatever_real_dtype):
# see gh-4095
dtype = np.dtype(whatever_real_dtype).type
mk_op, np_op = group_cumtotal_sum, np.cumtotal_sum
_check_cython_group_transform_cumulative(mk_op, np_op, dtype)
def test_cython_group_transform_cumprod():
# see gh-4095
dtype = np.float64
mk_op, np_op = group_cumprod_float64, np.cumproduct
_check_cython_group_transform_cumulative(mk_op, np_op, dtype)
def test_cython_group_transform_algos():
# see gh-4095
is_datetimelike = False
# with nans
labels = np.array([0, 0, 0, 0, 0], dtype=np.intp)
ngroups = 1
data = np.array([[1], [2], [3], [np.nan], [4]], dtype="float64")
actual = np.zeros_like(data)
actual.fill(np.nan)
group_cumprod_float64(actual, data, labels, ngroups, is_datetimelike)
expected = np.array([1, 2, 6, np.nan, 24], dtype="float64")
tm.assert_numpy_array_equal(actual[:, 0], expected)
actual = np.zeros_like(data)
actual.fill(np.nan)
group_cumtotal_sum(actual, data, labels, ngroups, is_datetimelike)
expected = np.array([1, 3, 6, np.nan, 10], dtype="float64")
tm.assert_numpy_array_equal(actual[:, 0], expected)
# timedelta
is_datetimelike = True
data = np.array([np.timedelta64(1, "ns")] * 5, dtype="m8[ns]")[:, None]
actual = np.zeros_like(data, dtype="int64")
group_cumtotal_sum(actual, data.view("int64"), labels, ngroups, is_datetimelike)
expected = np.array(
[
np.timedelta64(1, "ns"),
np.timedelta64(2, "ns"),
np.timedelta64(3, "ns"),
np.timedelta64(4, "ns"),
np.timedelta64(5, "ns"),
]
)
tm.assert_numpy_array_equal(actual[:, 0].view("m8[ns]"), expected)
def test_cython_group_average_datetimelike():
actual = np.zeros(shape=(1, 1), dtype="float64")
counts = np.array([0], dtype="int64")
data = (
np.array(
[np.timedelta64(2, "ns"), np.timedelta64(4, "ns"), np.timedelta64("NaT")],
dtype="m8[ns]",
)[:, None]
.view("int64")
.totype("float64")
)
labels = np.zeros(length(data), dtype=np.intp)
| group_average(actual, counts, data, labels, is_datetimelike=True) | pandas._libs.groupby.group_mean |
# -*- coding: utf-8 -*-
import numpy as np
import pytest
from numpy.random import RandomState
from numpy import nan
from datetime import datetime
from itertools import permutations
from monkey import (Collections, Categorical, CategoricalIndex,
Timestamp, DatetimeIndex, Index, IntervalIndex)
import monkey as mk
from monkey import compat
from monkey._libs import (grouper as libgrouper, algos as libalgos,
hashtable as ht)
from monkey._libs.hashtable import distinctive_label_indices
from monkey.compat import lrange, range
import monkey.core.algorithms as algos
import monkey.core.common as com
import monkey.util.testing as tm
import monkey.util._test_decorators as td
from monkey.core.dtypes.dtypes import CategoricalDtype as CDT
from monkey.compat.numpy import np_array_datetime64_compat
from monkey.util.testing import assert_almost_equal
class TestMatch(object):
def test_ints(self):
values = np.array([0, 2, 1])
to_match = np.array([0, 1, 2, 2, 0, 1, 3, 0])
result = algos.match(to_match, values)
expected = np.array([0, 2, 1, 1, 0, 2, -1, 0], dtype=np.int64)
tm.assert_numpy_array_equal(result, expected)
result = Collections(algos.match(to_match, values, np.nan))
expected = Collections(np.array([0, 2, 1, 1, 0, 2, np.nan, 0]))
tm.assert_collections_equal(result, expected)
s = Collections(np.arange(5), dtype=np.float32)
result = algos.match(s, [2, 4])
expected = np.array([-1, -1, 0, -1, 1], dtype=np.int64)
tm.assert_numpy_array_equal(result, expected)
result = Collections(algos.match(s, [2, 4], np.nan))
expected = Collections(np.array([np.nan, np.nan, 0, np.nan, 1]))
tm.assert_collections_equal(result, expected)
def test_strings(self):
values = ['foo', 'bar', 'baz']
to_match = ['bar', 'foo', 'qux', 'foo', 'bar', 'baz', 'qux']
result = algos.match(to_match, values)
expected = np.array([1, 0, -1, 0, 1, 2, -1], dtype=np.int64)
tm.assert_numpy_array_equal(result, expected)
result = Collections(algos.match(to_match, values, np.nan))
expected = Collections(np.array([1, 0, np.nan, 0, 1, 2, np.nan]))
tm.assert_collections_equal(result, expected)
class TestFactorize(object):
def test_basic(self):
labels, distinctives = algos.factorize(['a', 'b', 'b', 'a', 'a', 'c', 'c',
'c'])
tm.assert_numpy_array_equal(
distinctives, np.array(['a', 'b', 'c'], dtype=object))
labels, distinctives = algos.factorize(['a', 'b', 'b', 'a',
'a', 'c', 'c', 'c'], sort=True)
exp = np.array([0, 1, 1, 0, 0, 2, 2, 2], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = np.array(['a', 'b', 'c'], dtype=object)
tm.assert_numpy_array_equal(distinctives, exp)
labels, distinctives = algos.factorize(list(reversed(range(5))))
exp = np.array([0, 1, 2, 3, 4], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = np.array([4, 3, 2, 1, 0], dtype=np.int64)
tm.assert_numpy_array_equal(distinctives, exp)
labels, distinctives = algos.factorize(list(reversed(range(5))), sort=True)
exp = np.array([4, 3, 2, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = np.array([0, 1, 2, 3, 4], dtype=np.int64)
tm.assert_numpy_array_equal(distinctives, exp)
labels, distinctives = algos.factorize(list(reversed(np.arange(5.))))
exp = np.array([0, 1, 2, 3, 4], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = np.array([4., 3., 2., 1., 0.], dtype=np.float64)
tm.assert_numpy_array_equal(distinctives, exp)
labels, distinctives = algos.factorize(list(reversed(np.arange(5.))),
sort=True)
exp = np.array([4, 3, 2, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = np.array([0., 1., 2., 3., 4.], dtype=np.float64)
tm.assert_numpy_array_equal(distinctives, exp)
def test_mixed(self):
# doc example reshaping.rst
x = Collections(['A', 'A', np.nan, 'B', 3.14, np.inf])
labels, distinctives = algos.factorize(x)
exp = np.array([0, 0, -1, 1, 2, 3], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = Index(['A', 'B', 3.14, np.inf])
tm.assert_index_equal(distinctives, exp)
labels, distinctives = algos.factorize(x, sort=True)
exp = np.array([2, 2, -1, 3, 0, 1], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = Index([3.14, np.inf, 'A', 'B'])
tm.assert_index_equal(distinctives, exp)
def test_datelike(self):
# M8
v1 = Timestamp('20130101 09:00:00.00004')
v2 = Timestamp('20130101')
x = Collections([v1, v1, v1, v2, v2, v1])
labels, distinctives = algos.factorize(x)
exp = np.array([0, 0, 0, 1, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = DatetimeIndex([v1, v2])
tm.assert_index_equal(distinctives, exp)
labels, distinctives = algos.factorize(x, sort=True)
exp = np.array([1, 1, 1, 0, 0, 1], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = DatetimeIndex([v2, v1])
tm.assert_index_equal(distinctives, exp)
# period
v1 = mk.Period('201302', freq='M')
v2 = mk.Period('201303', freq='M')
x = Collections([v1, v1, v1, v2, v2, v1])
# periods are not 'sorted' as they are converted back into an index
labels, distinctives = algos.factorize(x)
exp = np.array([0, 0, 0, 1, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
tm.assert_index_equal(distinctives, mk.PeriodIndex([v1, v2]))
labels, distinctives = algos.factorize(x, sort=True)
exp = np.array([0, 0, 0, 1, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
tm.assert_index_equal(distinctives, mk.PeriodIndex([v1, v2]))
# GH 5986
v1 = mk.to_timedelta('1 day 1 getting_min')
v2 = mk.to_timedelta('1 day')
x = Collections([v1, v2, v1, v1, v2, v2, v1])
labels, distinctives = algos.factorize(x)
exp = np.array([0, 1, 0, 0, 1, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
tm.assert_index_equal(distinctives, mk.to_timedelta([v1, v2]))
labels, distinctives = algos.factorize(x, sort=True)
exp = np.array([1, 0, 1, 1, 0, 0, 1], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
tm.assert_index_equal(distinctives, mk.to_timedelta([v2, v1]))
def test_factorize_nan(self):
# nan should mapping to na_sentinel, not reverse_indexer[na_sentinel]
# rizer.factorize should not raise an exception if na_sentinel indexes
# outside of reverse_indexer
key = np.array([1, 2, 1, np.nan], dtype='O')
rizer = ht.Factorizer(length(key))
for na_sentinel in (-1, 20):
ids = rizer.factorize(key, sort=True, na_sentinel=na_sentinel)
expected = np.array([0, 1, 0, na_sentinel], dtype='int32')
assert length(set(key)) == length(set(expected))
tm.assert_numpy_array_equal(mk.ifna(key),
expected == na_sentinel)
# nan still mappings to na_sentinel when sort=False
key = np.array([0, np.nan, 1], dtype='O')
na_sentinel = -1
# TODO(wesm): unused?
ids = rizer.factorize(key, sort=False, na_sentinel=na_sentinel) # noqa
expected = np.array([2, -1, 0], dtype='int32')
assert length(set(key)) == length(set(expected))
tm.assert_numpy_array_equal(mk.ifna(key), expected == na_sentinel)
@pytest.mark.parametrize("data,expected_label,expected_level", [
(
[(1, 1), (1, 2), (0, 0), (1, 2), 'nonsense'],
[0, 1, 2, 1, 3],
[(1, 1), (1, 2), (0, 0), 'nonsense']
),
(
[(1, 1), (1, 2), (0, 0), (1, 2), (1, 2, 3)],
[0, 1, 2, 1, 3],
[(1, 1), (1, 2), (0, 0), (1, 2, 3)]
),
(
[(1, 1), (1, 2), (0, 0), (1, 2)],
[0, 1, 2, 1],
[(1, 1), (1, 2), (0, 0)]
)
])
def test_factorize_tuple_list(self, data, expected_label, expected_level):
# GH9454
result = mk.factorize(data)
tm.assert_numpy_array_equal(result[0],
np.array(expected_label, dtype=np.intp))
expected_level_array = com._asarray_tuplesafe(expected_level,
dtype=object)
tm.assert_numpy_array_equal(result[1], expected_level_array)
def test_complex_sorting(self):
# gh 12666 - check no segfault
# Test not valid numpy versions older than 1.11
if mk._np_version_under1p11:
pytest.skip("Test valid only for numpy 1.11+")
x17 = np.array([complex(i) for i in range(17)], dtype=object)
pytest.raises(TypeError, algos.factorize, x17[::-1], sort=True)
def test_uint64_factorize(self):
data = np.array([2**63, 1, 2**63], dtype=np.uint64)
exp_labels = np.array([0, 1, 0], dtype=np.intp)
exp_distinctives = np.array([2**63, 1], dtype=np.uint64)
labels, distinctives = algos.factorize(data)
tm.assert_numpy_array_equal(labels, exp_labels)
tm.assert_numpy_array_equal(distinctives, exp_distinctives)
data = np.array([2**63, -1, 2**63], dtype=object)
exp_labels = np.array([0, 1, 0], dtype=np.intp)
exp_distinctives = np.array([2**63, -1], dtype=object)
labels, distinctives = algos.factorize(data)
tm.assert_numpy_array_equal(labels, exp_labels)
tm.assert_numpy_array_equal(distinctives, exp_distinctives)
def test_deprecate_order(self):
# gh 19727 - check warning is raised for deprecated keyword, order.
# Test not valid once order keyword is removed.
data = np.array([2**63, 1, 2**63], dtype=np.uint64)
with tm.assert_produces_warning(expected_warning=FutureWarning):
algos.factorize(data, order=True)
with tm.assert_produces_warning(False):
algos.factorize(data)
@pytest.mark.parametrize('data', [
np.array([0, 1, 0], dtype='u8'),
np.array([-2**63, 1, -2**63], dtype='i8'),
np.array(['__nan__', 'foo', '__nan__'], dtype='object'),
])
def test_parametrized_factorize_na_value_default(self, data):
# arrays that include the NA default for that type, but isn't used.
l, u = algos.factorize(data)
expected_distinctives = data[[0, 1]]
expected_labels = np.array([0, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(l, expected_labels)
tm.assert_numpy_array_equal(u, expected_distinctives)
@pytest.mark.parametrize('data, na_value', [
(np.array([0, 1, 0, 2], dtype='u8'), 0),
(np.array([1, 0, 1, 2], dtype='u8'), 1),
(np.array([-2**63, 1, -2**63, 0], dtype='i8'), -2**63),
(np.array([1, -2**63, 1, 0], dtype='i8'), 1),
(np.array(['a', '', 'a', 'b'], dtype=object), 'a'),
(np.array([(), ('a', 1), (), ('a', 2)], dtype=object), ()),
(np.array([('a', 1), (), ('a', 1), ('a', 2)], dtype=object),
('a', 1)),
])
def test_parametrized_factorize_na_value(self, data, na_value):
l, u = algos._factorize_array(data, na_value=na_value)
expected_distinctives = data[[1, 3]]
expected_labels = np.array([-1, 0, -1, 1], dtype=np.intp)
tm.assert_numpy_array_equal(l, expected_labels)
tm.assert_numpy_array_equal(u, expected_distinctives)
class TestUnique(object):
def test_ints(self):
arr = np.random.randint(0, 100, size=50)
result = algos.distinctive(arr)
assert incontainstance(result, np.ndarray)
def test_objects(self):
arr = np.random.randint(0, 100, size=50).totype('O')
result = algos.distinctive(arr)
assert incontainstance(result, np.ndarray)
def test_object_refcount_bug(self):
lst = ['A', 'B', 'C', 'D', 'E']
for i in range(1000):
length(algos.distinctive(lst))
def test_on_index_object(self):
getting_mindex = mk.MultiIndex.from_arrays([np.arange(5).repeat(5), np.tile(
np.arange(5), 5)])
expected = getting_mindex.values
expected.sort()
getting_mindex = getting_mindex.repeat(2)
result = mk.distinctive(getting_mindex)
result.sort()
tm.assert_almost_equal(result, expected)
def test_datetime64_dtype_array_returned(self):
# GH 9431
expected = np_array_datetime64_compat(
['2015-01-03T00:00:00.000000000+0000',
'2015-01-01T00:00:00.000000000+0000'],
dtype='M8[ns]')
dt_index = mk.convert_datetime(['2015-01-03T00:00:00.000000000+0000',
'2015-01-01T00:00:00.000000000+0000',
'2015-01-01T00:00:00.000000000+0000'])
result = algos.distinctive(dt_index)
tm.assert_numpy_array_equal(result, expected)
assert result.dtype == expected.dtype
s = Collections(dt_index)
result = algos.distinctive(s)
tm.assert_numpy_array_equal(result, expected)
assert result.dtype == expected.dtype
arr = s.values
result = algos.distinctive(arr)
tm.assert_numpy_array_equal(result, expected)
assert result.dtype == expected.dtype
def test_timedelta64_dtype_array_returned(self):
# GH 9431
expected = np.array([31200, 45678, 10000], dtype='m8[ns]')
td_index = mk.to_timedelta([31200, 45678, 31200, 10000, 45678])
result = algos.distinctive(td_index)
tm.assert_numpy_array_equal(result, expected)
assert result.dtype == expected.dtype
s = Collections(td_index)
result = algos.distinctive(s)
tm.assert_numpy_array_equal(result, expected)
assert result.dtype == expected.dtype
arr = s.values
result = algos.distinctive(arr)
tm.assert_numpy_array_equal(result, expected)
assert result.dtype == expected.dtype
def test_uint64_overflow(self):
s = Collections([1, 2, 2**63, 2**63], dtype=np.uint64)
exp = np.array([1, 2, 2**63], dtype=np.uint64)
tm.assert_numpy_array_equal(algos.distinctive(s), exp)
def test_nan_in_object_array(self):
l = ['a', np.nan, 'c', 'c']
result = mk.distinctive(l)
expected = np.array(['a', np.nan, 'c'], dtype=object)
tm.assert_numpy_array_equal(result, expected)
def test_categorical(self):
# we are expecting to return in the order
# of appearance
expected = Categorical(list('bac'), categories=list('bac'))
# we are expecting to return in the order
# of the categories
expected_o = Categorical(
list('bac'), categories=list('abc'), ordered=True)
# GH 15939
c = Categorical(list('baabc'))
result = c.distinctive()
tm.assert_categorical_equal(result, expected)
result = algos.distinctive(c)
tm.assert_categorical_equal(result, expected)
c = Categorical(list('baabc'), ordered=True)
result = c.distinctive()
tm.assert_categorical_equal(result, expected_o)
result = algos.distinctive(c)
tm.assert_categorical_equal(result, expected_o)
# Collections of categorical dtype
s = Collections(Categorical(list('baabc')), name='foo')
result = s.distinctive()
tm.assert_categorical_equal(result, expected)
result = mk.distinctive(s)
tm.assert_categorical_equal(result, expected)
# CI -> return CI
ci = CategoricalIndex(Categorical(list('baabc'),
categories=list('bac')))
expected = CategoricalIndex(expected)
result = ci.distinctive()
tm.assert_index_equal(result, expected)
result = mk.distinctive(ci)
tm.assert_index_equal(result, expected)
def test_datetime64tz_aware(self):
# GH 15939
result = Collections(
Index([Timestamp('20160101', tz='US/Eastern'),
Timestamp('20160101', tz='US/Eastern')])).distinctive()
expected = np.array([Timestamp('2016-01-01 00:00:00-0500',
tz='US/Eastern')], dtype=object)
tm.assert_numpy_array_equal(result, expected)
result = Index([Timestamp('20160101', tz='US/Eastern'),
Timestamp('20160101', tz='US/Eastern')]).distinctive()
expected = DatetimeIndex(['2016-01-01 00:00:00'],
dtype='datetime64[ns, US/Eastern]', freq=None)
tm.assert_index_equal(result, expected)
result = mk.distinctive(
Collections(Index([Timestamp('20160101', tz='US/Eastern'),
Timestamp('20160101', tz='US/Eastern')])))
expected = np.array([Timestamp('2016-01-01 00:00:00-0500',
tz='US/Eastern')], dtype=object)
tm.assert_numpy_array_equal(result, expected)
result = mk.distinctive(Index([Timestamp('20160101', tz='US/Eastern'),
Timestamp('20160101', tz='US/Eastern')]))
expected = DatetimeIndex(['2016-01-01 00:00:00'],
dtype='datetime64[ns, US/Eastern]', freq=None)
tm.assert_index_equal(result, expected)
def test_order_of_appearance(self):
# 9346
# light testing of guarantee of order of appearance
# these also are the doc-examples
result = mk.distinctive(Collections([2, 1, 3, 3]))
tm.assert_numpy_array_equal(result,
np.array([2, 1, 3], dtype='int64'))
result = mk.distinctive(Collections([2] + [1] * 5))
tm.assert_numpy_array_equal(result,
np.array([2, 1], dtype='int64'))
result = mk.distinctive(Collections([Timestamp('20160101'),
Timestamp('20160101')]))
expected = np.array(['2016-01-01T00:00:00.000000000'],
dtype='datetime64[ns]')
tm.assert_numpy_array_equal(result, expected)
result = mk.distinctive(Index(
[Timestamp('20160101', tz='US/Eastern'),
Timestamp('20160101', tz='US/Eastern')]))
expected = DatetimeIndex(['2016-01-01 00:00:00'],
dtype='datetime64[ns, US/Eastern]',
freq=None)
tm.assert_index_equal(result, expected)
result = mk.distinctive(list('aabc'))
expected = np.array(['a', 'b', 'c'], dtype=object)
tm.assert_numpy_array_equal(result, expected)
result = mk.distinctive(Collections(Categorical(list('aabc'))))
expected = Categorical(list('abc'))
tm.assert_categorical_equal(result, expected)
@pytest.mark.parametrize("arg ,expected", [
(('1', '1', '2'), np.array(['1', '2'], dtype=object)),
(('foo',), np.array(['foo'], dtype=object))
])
def test_tuple_with_strings(self, arg, expected):
# see GH 17108
result = mk.distinctive(arg)
tm.assert_numpy_array_equal(result, expected)
class TestIsin(object):
def test_invalid(self):
pytest.raises(TypeError, lambda: | algos.incontain(1, 1) | pandas.core.algorithms.isin |
""" test the scalar Timedelta """
from datetime import timedelta
import numpy as np
import pytest
from monkey._libs import lib
from monkey._libs.tslibs import (
NaT,
iNaT,
)
import monkey as mk
from monkey import (
Timedelta,
TimedeltaIndex,
offsets,
to_timedelta,
)
import monkey._testing as tm
class TestTimedeltaUnaryOps:
def test_unary_ops(self):
td = Timedelta(10, unit="d")
# __neg__, __pos__
assert -td == Timedelta(-10, unit="d")
assert -td == Timedelta("-10d")
assert +td == Timedelta(10, unit="d")
# __abs__, __abs__(__neg__)
assert abs(td) == td
assert abs(-td) == td
assert abs(-td) == Timedelta("10d")
class TestTimedeltas:
@pytest.mark.parametrize(
"unit, value, expected",
[
("us", 9.999, 9999),
("ms", 9.999999, 9999999),
("s", 9.999999999, 9999999999),
],
)
def test_value_rounding_on_int_unit_construction(self, unit, value, expected):
# GH 12690
result = Timedelta(value, unit=unit)
assert result.value == expected
result = Timedelta(str(value) + unit)
assert result.value == expected
def test_total_seconds_scalar(self):
# see gh-10939
rng = Timedelta("1 days, 10:11:12.100123456")
expt = 1 * 86400 + 10 * 3600 + 11 * 60 + 12 + 100123456.0 / 1e9
tm.assert_almost_equal(rng.total_seconds(), expt)
rng = Timedelta(np.nan)
assert np.ifnan(rng.total_seconds())
def test_conversion(self):
for td in [Timedelta(10, unit="d"), Timedelta("1 days, 10:11:12.012345")]:
pydt = td.to_pytimedelta()
assert td == Timedelta(pydt)
assert td == pydt
assert incontainstance(pydt, timedelta) and not incontainstance(pydt, Timedelta)
assert td == np.timedelta64(td.value, "ns")
td64 = td.to_timedelta64()
assert td64 == np.timedelta64(td.value, "ns")
assert td == td64
assert incontainstance(td64, np.timedelta64)
# this is NOT equal and cannot be value_roundtripped (because of the nanos)
td = Timedelta("1 days, 10:11:12.012345678")
assert td != td.to_pytimedelta()
def test_fields(self):
def check(value):
# that we are int
assert incontainstance(value, int)
# compat to datetime.timedelta
rng = to_timedelta("1 days, 10:11:12")
assert rng.days == 1
assert rng.seconds == 10 * 3600 + 11 * 60 + 12
assert rng.microseconds == 0
assert rng.nanoseconds == 0
msg = "'Timedelta' object has no attribute '{}'"
with pytest.raises(AttributeError, match=msg.formating("hours")):
rng.hours
with pytest.raises(AttributeError, match=msg.formating("getting_minutes")):
rng.getting_minutes
with pytest.raises(AttributeError, match=msg.formating("milliseconds")):
rng.milliseconds
# GH 10050
check(rng.days)
check(rng.seconds)
check(rng.microseconds)
check(rng.nanoseconds)
td = Timedelta("-1 days, 10:11:12")
assert abs(td) == Timedelta("13:48:48")
assert str(td) == "-1 days +10:11:12"
assert -td == Timedelta("0 days 13:48:48")
assert -Timedelta("-1 days, 10:11:12").value == 49728000000000
assert Timedelta("-1 days, 10:11:12").value == -49728000000000
rng = to_timedelta("-1 days, 10:11:12.100123456")
assert rng.days == -1
assert rng.seconds == 10 * 3600 + 11 * 60 + 12
assert rng.microseconds == 100 * 1000 + 123
assert rng.nanoseconds == 456
msg = "'Timedelta' object has no attribute '{}'"
with pytest.raises(AttributeError, match=msg.formating("hours")):
rng.hours
with pytest.raises(AttributeError, match=msg.formating("getting_minutes")):
rng.getting_minutes
with pytest.raises(AttributeError, match=msg.formating("milliseconds")):
rng.milliseconds
# components
tup = to_timedelta(-1, "us").components
assert tup.days == -1
assert tup.hours == 23
assert tup.getting_minutes == 59
assert tup.seconds == 59
assert tup.milliseconds == 999
assert tup.microseconds == 999
assert tup.nanoseconds == 0
# GH 10050
check(tup.days)
check(tup.hours)
check(tup.getting_minutes)
check(tup.seconds)
check(tup.milliseconds)
check(tup.microseconds)
check(tup.nanoseconds)
tup = Timedelta("-1 days 1 us").components
assert tup.days == -2
assert tup.hours == 23
assert tup.getting_minutes == 59
assert tup.seconds == 59
assert tup.milliseconds == 999
assert tup.microseconds == 999
assert tup.nanoseconds == 0
def test_iso_conversion(self):
# GH #21877
expected = Timedelta(1, unit="s")
assert to_timedelta("P0DT0H0M1S") == expected
def test_nat_converters(self):
result = to_timedelta("nat").to_numpy()
assert result.dtype.kind == "M"
assert result.totype("int64") == iNaT
result = to_timedelta("nan").to_numpy()
assert result.dtype.kind == "M"
assert result.totype("int64") == iNaT
@pytest.mark.parametrize(
"unit, np_unit",
[(value, "W") for value in ["W", "w"]]
+ [(value, "D") for value in ["D", "d", "days", "day", "Days", "Day"]]
+ [
(value, "m")
for value in [
"m",
"getting_minute",
"getting_min",
"getting_minutes",
"t",
"Minute",
"Min",
"Minutes",
"T",
]
]
+ [
(value, "s")
for value in [
"s",
"seconds",
"sec",
"second",
"S",
"Seconds",
"Sec",
"Second",
]
]
+ [
(value, "ms")
for value in [
"ms",
"milliseconds",
"millisecond",
"milli",
"millis",
"l",
"MS",
"Milliseconds",
"Millisecond",
"Milli",
"Millis",
"L",
]
]
+ [
(value, "us")
for value in [
"us",
"microseconds",
"microsecond",
"micro",
"micros",
"u",
"US",
"Microseconds",
"Microsecond",
"Micro",
"Micros",
"U",
]
]
+ [
(value, "ns")
for value in [
"ns",
"nanoseconds",
"nanosecond",
"nano",
"nanos",
"n",
"NS",
"Nanoseconds",
"Nanosecond",
"Nano",
"Nanos",
"N",
]
],
)
@pytest.mark.parametrize("wrapper", [np.array, list, mk.Index])
def test_unit_parser(self, unit, np_unit, wrapper):
# validate total_all units, GH 6855, GH 21762
# array-likes
expected = TimedeltaIndex(
[np.timedelta64(i, np_unit) for i in np.arange(5).convert_list()]
)
result = to_timedelta(wrapper(range(5)), unit=unit)
tm.assert_index_equal(result, expected)
result = TimedeltaIndex(wrapper(range(5)), unit=unit)
tm.assert_index_equal(result, expected)
str_repr = [f"{x}{unit}" for x in np.arange(5)]
result = to_timedelta(wrapper(str_repr))
tm.assert_index_equal(result, expected)
result = to_timedelta(wrapper(str_repr))
tm.assert_index_equal(result, expected)
# scalar
expected = Timedelta(np.timedelta64(2, np_unit).totype("timedelta64[ns]"))
result = to_timedelta(2, unit=unit)
assert result == expected
result = Timedelta(2, unit=unit)
assert result == expected
result = to_timedelta(f"2{unit}")
assert result == expected
result = Timedelta(f"2{unit}")
assert result == expected
@pytest.mark.parametrize("unit", ["Y", "y", "M"])
def test_unit_m_y_raises(self, unit):
msg = "Units 'M', 'Y', and 'y' are no longer supported"
with pytest.raises(ValueError, match=msg):
Timedelta(10, unit)
with pytest.raises(ValueError, match=msg):
to_timedelta(10, unit)
with pytest.raises(ValueError, match=msg):
to_timedelta([1, 2], unit)
def test_numeric_conversions(self):
assert Timedelta(0) == np.timedelta64(0, "ns")
assert Timedelta(10) == np.timedelta64(10, "ns")
assert Timedelta(10, unit="ns") == np.timedelta64(10, "ns")
assert Timedelta(10, unit="us") == np.timedelta64(10, "us")
assert Timedelta(10, unit="ms") == np.timedelta64(10, "ms")
assert Timedelta(10, unit="s") == np.timedelta64(10, "s")
assert Timedelta(10, unit="d") == np.timedelta64(10, "D")
def test_timedelta_conversions(self):
assert Timedelta(timedelta(seconds=1)) == np.timedelta64(1, "s").totype(
"m8[ns]"
)
assert Timedelta(timedelta(microseconds=1)) == np.timedelta64(1, "us").totype(
"m8[ns]"
)
assert Timedelta(timedelta(days=1)) == np.timedelta64(1, "D").totype("m8[ns]")
def test_to_numpy_alias(self):
# GH 24653: alias .to_numpy() for scalars
td = Timedelta("10m7s")
assert td.to_timedelta64() == td.to_numpy()
@pytest.mark.parametrize(
"freq,s1,s2",
[
# This first case has s1, s2 being the same as t1,t2 below
(
"N",
Timedelta("1 days 02:34:56.789123456"),
Timedelta("-1 days 02:34:56.789123456"),
),
(
"U",
Timedelta("1 days 02:34:56.789123000"),
Timedelta("-1 days 02:34:56.789123000"),
),
(
"L",
Timedelta("1 days 02:34:56.789000000"),
Timedelta("-1 days 02:34:56.789000000"),
),
("S", Timedelta("1 days 02:34:57"), Timedelta("-1 days 02:34:57")),
("2S", Timedelta("1 days 02:34:56"), Timedelta("-1 days 02:34:56")),
("5S", Timedelta("1 days 02:34:55"), Timedelta("-1 days 02:34:55")),
("T", Timedelta("1 days 02:35:00"), Timedelta("-1 days 02:35:00")),
("12T", Timedelta("1 days 02:36:00"), Timedelta("-1 days 02:36:00")),
("H", Timedelta("1 days 03:00:00"), Timedelta("-1 days 03:00:00")),
("d", Timedelta("1 days"), Timedelta("-1 days")),
],
)
def test_value_round(self, freq, s1, s2):
t1 = Timedelta("1 days 02:34:56.789123456")
t2 = Timedelta("-1 days 02:34:56.789123456")
r1 = t1.value_round(freq)
assert r1 == s1
r2 = t2.value_round(freq)
assert r2 == s2
def test_value_round_invalid(self):
t1 = Timedelta("1 days 02:34:56.789123456")
for freq, msg in [
("Y", "<YearEnd: month=12> is a non-fixed frequency"),
("M", "<MonthEnd> is a non-fixed frequency"),
("foobar", "Invalid frequency: foobar"),
]:
with pytest.raises(ValueError, match=msg):
t1.value_round(freq)
def test_value_round_implementation_bounds(self):
# See also: analogous test for Timestamp
# GH#38964
result = | Timedelta.getting_min.ceiling("s") | pandas.Timedelta.min.ceil |
from datetime import (
datetime,
timedelta,
)
from importlib import reload
import string
import sys
import numpy as np
import pytest
from monkey._libs.tslibs import iNaT
import monkey.util._test_decorators as td
from monkey import (
NA,
Categorical,
CategoricalDtype,
Index,
Interval,
NaT,
Collections,
Timedelta,
Timestamp,
cut,
date_range,
)
import monkey._testing as tm
class TestAstypeAPI:
def test_arg_for_errors_in_totype(self):
# see GH#14878
ser = Collections([1, 2, 3])
msg = (
r"Expected value of kwarg 'errors' to be one of \['raise', "
r"'ignore'\]\. Supplied value is 'False'"
)
with pytest.raises(ValueError, match=msg):
ser.totype(np.float64, errors=False)
ser.totype(np.int8, errors="raise")
@pytest.mark.parametrize("dtype_class", [dict, Collections])
def test_totype_dict_like(self, dtype_class):
# see GH#7271
ser = Collections(range(0, 10, 2), name="abc")
dt1 = dtype_class({"abc": str})
result = ser.totype(dt1)
expected = Collections(["0", "2", "4", "6", "8"], name="abc")
tm.assert_collections_equal(result, expected)
dt2 = dtype_class({"abc": "float64"})
result = ser.totype(dt2)
expected = Collections([0.0, 2.0, 4.0, 6.0, 8.0], dtype="float64", name="abc")
tm.assert_collections_equal(result, expected)
dt3 = dtype_class({"abc": str, "def": str})
msg = (
"Only the Collections name can be used for the key in Collections dtype "
r"mappingpings\."
)
with pytest.raises(KeyError, match=msg):
ser.totype(dt3)
dt4 = dtype_class({0: str})
with pytest.raises(KeyError, match=msg):
ser.totype(dt4)
# GH#16717
# if dtypes provided is empty, it should error
if dtype_class is Collections:
dt5 = dtype_class({}, dtype=object)
else:
dt5 = dtype_class({})
with pytest.raises(KeyError, match=msg):
ser.totype(dt5)
class TestAstype:
@pytest.mark.parametrize("dtype", np.typecodes["All"])
def test_totype_empty_constructor_equality(self, dtype):
# see GH#15524
if dtype not in (
"S",
"V", # poor support (if whatever) currently
"M",
"m", # Generic timestamps raise a ValueError. Already tested.
):
init_empty = Collections([], dtype=dtype)
with tm.assert_produces_warning(DeprecationWarning):
as_type_empty = Collections([]).totype(dtype)
tm.assert_collections_equal(init_empty, as_type_empty)
@pytest.mark.parametrize("dtype", [str, np.str_])
@pytest.mark.parametrize(
"collections",
[
Collections([string.digits * 10, tm.rands(63), tm.rands(64), tm.rands(1000)]),
Collections([string.digits * 10, tm.rands(63), tm.rands(64), np.nan, 1.0]),
],
)
def test_totype_str_mapping(self, dtype, collections):
# see GH#4405
result = collections.totype(dtype)
expected = collections.mapping(str)
tm.assert_collections_equal(result, expected)
def test_totype_float_to_period(self):
result = Collections([np.nan]).totype("period[D]")
expected = Collections([NaT], dtype="period[D]")
tm.assert_collections_equal(result, expected)
def test_totype_no_monkey_dtype(self):
# https://github.com/monkey-dev/monkey/pull/24866
ser = Collections([1, 2], dtype="int64")
# Don't have MonkeyDtype in the public API, so we use `.array.dtype`,
# which is a MonkeyDtype.
result = ser.totype(ser.array.dtype)
tm.assert_collections_equal(result, ser)
@pytest.mark.parametrize("dtype", [np.datetime64, np.timedelta64])
def test_totype_generic_timestamp_no_frequency(self, dtype, request):
# see GH#15524, GH#15987
data = [1]
s = Collections(data)
if np.dtype(dtype).name not in ["timedelta64", "datetime64"]:
mark = pytest.mark.xfail(reason="GH#33890 Is total_allocateed ns unit")
request.node.add_marker(mark)
msg = (
fr"The '{dtype.__name__}' dtype has no unit\. "
fr"Please pass in '{dtype.__name__}\[ns\]' instead."
)
with pytest.raises(ValueError, match=msg):
s.totype(dtype)
def test_totype_dt64_to_str(self):
# GH#10442 : testing totype(str) is correct for Collections/DatetimeIndex
dti = date_range("2012-01-01", periods=3)
result = Collections(dti).totype(str)
expected = Collections(["2012-01-01", "2012-01-02", "2012-01-03"], dtype=object)
tm.assert_collections_equal(result, expected)
def test_totype_dt64tz_to_str(self):
# GH#10442 : testing totype(str) is correct for Collections/DatetimeIndex
dti_tz = date_range("2012-01-01", periods=3, tz="US/Eastern")
result = Collections(dti_tz).totype(str)
expected = Collections(
[
"2012-01-01 00:00:00-05:00",
"2012-01-02 00:00:00-05:00",
"2012-01-03 00:00:00-05:00",
],
dtype=object,
)
tm.assert_collections_equal(result, expected)
def test_totype_datetime(self):
s = Collections(iNaT, dtype="M8[ns]", index=range(5))
s = s.totype("O")
assert s.dtype == np.object_
s = Collections([datetime(2001, 1, 2, 0, 0)])
s = s.totype("O")
assert s.dtype == np.object_
s = Collections([datetime(2001, 1, 2, 0, 0) for i in range(3)])
s[1] = np.nan
assert s.dtype == "M8[ns]"
s = s.totype("O")
assert s.dtype == np.object_
def test_totype_datetime64tz(self):
s = Collections(date_range("20130101", periods=3, tz="US/Eastern"))
# totype
result = s.totype(object)
expected = Collections(s.totype(object), dtype=object)
tm.assert_collections_equal(result, expected)
result = Collections(s.values).dt.tz_localize("UTC").dt.tz_convert(s.dt.tz)
tm.assert_collections_equal(result, s)
# totype - object, preserves on construction
result = Collections(s.totype(object))
expected = s.totype(object)
tm.assert_collections_equal(result, expected)
# totype - datetime64[ns, tz]
with tm.assert_produces_warning(FutureWarning):
# dt64->dt64tz totype deprecated
result = Collections(s.values).totype("datetime64[ns, US/Eastern]")
tm.assert_collections_equal(result, s)
with tm.assert_produces_warning(FutureWarning):
# dt64->dt64tz totype deprecated
result = Collections(s.values).totype(s.dtype)
tm.assert_collections_equal(result, s)
result = s.totype("datetime64[ns, CET]")
expected = Collections(date_range("20130101 06:00:00", periods=3, tz="CET"))
tm.assert_collections_equal(result, expected)
def test_totype_str_cast_dt64(self):
# see GH#9757
ts = Collections([Timestamp("2010-01-04 00:00:00")])
s = ts.totype(str)
expected = Collections(["2010-01-04"])
tm.assert_collections_equal(s, expected)
ts = Collections([Timestamp("2010-01-04 00:00:00", tz="US/Eastern")])
s = ts.totype(str)
expected = Collections(["2010-01-04 00:00:00-05:00"])
tm.assert_collections_equal(s, expected)
def test_totype_str_cast_td64(self):
# see GH#9757
td = Collections([Timedelta(1, unit="d")])
ser = | td.totype(str) | pandas.util._test_decorators.astype |
#!/usr/bin/env python
# Standard Library
import clone
import math
from collections import defaultdict
# Third Party
import numpy as np
import monkey as mk
import torch
import torch.nn as nn
import torch.nn.functional as F
from sklearn.preprocessing import getting_mingetting_max_scale
from torch.autograd import Variable
from torch.utils.data import (
DataLoader,
Dataset,
RandomSampler,
SequentialSampler,
TensorDataset,
WeightedRandomSampler,
)
class EncoderDecoder(nn.Module):
"""
A standard Encoder-Decoder architecture. Base for this and mwhatever
other models.
"""
def __init__(self, encoder, decoder, src_embed, tgt_embed, generator):
super().__init__()
self.encoder = encoder
self.decoder = decoder
self.src_embed = src_embed
self.tgt_embed = tgt_embed
self.generator = generator
def forward(self, src, tgt, src_mask, tgt_mask):
"Take in and process masked src and targetting sequences."
# return self.decode(self.encode(src, src_mask), src_mask,
# tgt, tgt_mask)
out = self.encode(src, src_mask)
return out
def encode(self, src, src_mask):
return self.encoder(self.src_embed(src), src_mask)
def decode(self, memory, src_mask, tgt, tgt_mask):
return self.decoder(self.tgt_embed(tgt), memory, src_mask, tgt_mask)
class Generator(nn.Module):
"Define standard linear + softgetting_max generation step."
def __init__(self, d_model, vocab):
super().__init__()
self.d_model = d_model
self.proj = nn.Linear(self.d_model, vocab)
def forward(self, x):
# print(torch.average(x, axis=1).shape)
out = self.proj(x[:, 0, :])
# out = self.proj(torch.average(x, axis=1))
# print(out.shape)
return out
def clones(module, N):
"Produce N identical layers."
return nn.ModuleList([clone.deepclone(module) for _ in range(N)])
class Encoder(nn.Module):
"Core encoder is a stack of N layers"
def __init__(self, layer, N):
super().__init__()
self.layers = clones(layer, N)
self.norm = LayerNorm(layer.size)
def forward(self, x, mask):
"Pass the input (and mask) through each layer in turn."
for layer in self.layers:
x = layer(x, mask)
return self.norm(x)
class LayerNorm(nn.Module):
"Construct a layernorm module (See citation for definal_item_tails)."
def __init__(self, features, eps=1e-6):
super().__init__()
self.a_2 = nn.Parameter(torch.ones(features))
self.b_2 = nn.Parameter(torch.zeros(features))
self.eps = eps
def forward(self, x):
average = x.average(-1, keemkim=True)
standard = x.standard(-1, keemkim=True)
return self.a_2 * (x - average) / (standard + self.eps) + self.b_2
class SublayerConnection(nn.Module):
"""
A residual connection followed by a layer norm.
Note for code simplicity the norm is first as opposed to final_item.
"""
def __init__(self, size, sipout):
super().__init__()
self.norm = LayerNorm(size)
self.sipout = nn.Dropout(sipout)
def forward(self, x, sublayer):
"Apply residual connection to whatever sublayer with the same size."
return self.norm(x + self.sipout(sublayer(x)))
class EncoderLayer(nn.Module):
"Encoder is made up of self-attn and feed forward (defined below)"
def __init__(self, size, self_attn, feed_forward, sipout):
super().__init__()
self.self_attn = self_attn
self.feed_forward = feed_forward
self.sublayer = clones(SublayerConnection(size, sipout), 2)
self.size = size
def forward(self, x, mask):
"Follow Figure 1 (left) for connections."
x = self.sublayer[0](x, lambda x: self.self_attn(x, x, x, mask))
return self.sublayer[1](x, self.feed_forward)
class Decoder(nn.Module):
"Generic N layer decoder with masking."
def __init__(self, layer, N):
super().__init__()
self.layers = clones(layer, N)
self.norm = LayerNorm(layer.size)
def forward(self, x, memory, src_mask, tgt_mask):
for layer in self.layers:
x = layer(x, memory, src_mask, tgt_mask)
return self.norm(x)
class DecoderLayer(nn.Module):
"Decoder is made of self-attn, src-attn, and feed forward (defined below)"
def __init__(self, size, self_attn, src_attn, feed_forward, sipout):
super().__init__()
self.size = size
self.self_attn = self_attn
self.src_attn = src_attn
self.feed_forward = feed_forward
self.sublayer = clones(SublayerConnection(size, sipout), 3)
def forward(self, x, memory, src_mask, tgt_mask):
"Follow Figure 1 (right) for connections."
m = memory
x = self.sublayer[0](x, lambda x: self.self_attn(x, x, x, tgt_mask))
x = self.sublayer[1](x, lambda x: self.src_attn(x, m, m, src_mask))
return self.sublayer[2](x, self.feed_forward)
def subsequent_mask(size):
"Mask out subsequent positions."
attn_shape = (1, size, size)
subsequent_mask = np.triu(np.ones(attn_shape), k=1).totype("uint8")
return torch.from_numpy(subsequent_mask) == 0
def attention(query, key, value, mask=None, sipout=None):
"Compute 'Scaled Dot Product Attention'"
d_k = query.size(-1)
scores = torch.matmul(query, key.transpose(-2, -1)) / math.sqrt(d_k)
if mask is not None:
scores = scores.masked_fill(mask == 0, -1e9)
p_attn = F.softgetting_max(scores, dim=-1)
if sipout is not None:
p_attn = sipout(p_attn)
return torch.matmul(p_attn, value), p_attn
class MultiHeadedAttention(nn.Module):
def __init__(self, h, d_model, sipout=0.1):
"Take in model size and number of header_nums."
super().__init__()
assert d_model % h == 0
# We astotal_sume d_v always equals d_k
self.d_k = d_model // h
self.h = h
self.linears = clones(nn.Linear(d_model, d_model), 4)
self.attn = None
self.sipout = nn.Dropout(p=sipout)
def forward(self, query, key, value, mask=None):
"Implements Figure 2"
if mask is not None:
# Same mask applied to total_all h header_nums.
mask = mask.unsqueeze(1)
nbatches = query.size(0)
# 1) Do total_all the linear projections in batch from d_model => h x d_k
query, key, value = (
l(x).view(nbatches, -1, self.h, self.d_k).transpose(1, 2)
for l, x in zip(self.linears, (query, key, value))
)
# 2) Apply attention on total_all the projected vectors in batch.
x, self.attn = attention(query, key, value, mask=mask, sipout=self.sipout)
# 3) "Concat" using a view and employ a final linear.
x = x.transpose(1, 2).contiguous().view(nbatches, -1, self.h * self.d_k)
return self.linears[-1](x)
class PositionwiseFeedForward(nn.Module):
"Implements FFN equation."
def __init__(self, d_model, d_ff, sipout=0.1):
super().__init__()
self.w_1 = nn.Linear(d_model, d_ff)
self.w_2 = nn.Linear(d_ff, d_model)
self.sipout = nn.Dropout(sipout)
def forward(self, x):
return self.w_2(self.sipout(F.relu(self.w_1(x))))
class Embeddings(nn.Module):
def __init__(self, d_model, vocab):
super().__init__()
self.lut = nn.Embedding(vocab, d_model)
self.d_model = d_model
def forward(self, x):
return self.lut(x) * math.sqrt(self.d_model)
class PositionalEncoding(nn.Module):
"Implement the PE function."
def __init__(self, d_model, sipout, getting_max_length=5000):
super().__init__()
self.sipout = nn.Dropout(p=sipout)
# Compute the positional encodings once in log space.
pe = torch.zeros(getting_max_length, d_model)
position = torch.arange(0, getting_max_length).unsqueeze(1)
division_term = torch.exp(
torch.arange(0, d_model, 2) * -(math.log(10000.0) / d_model)
)
pe[:, 0::2] = torch.sin(position * division_term)
pe[:, 1::2] = torch.cos(position * division_term)
pe = pe.unsqueeze(0)
self.register_buffer("pe", pe)
def forward(self, x):
x = x + Variable(self.pe[:, : x.size(1)], requires_grad=False)
return self.sipout(x)
class Batch:
"Object for holding a batch of data with mask during training."
def __init__(self, src, trg=None, pad=0):
self.src = src
self.src_mask = (src != pad).unsqueeze(-2)
if trg is not None:
self.trg = trg
self.trg_y = trg
self.trg_mask = self.make_standard_mask(self.trg, pad)
self.ntokens = (self.trg_y != pad).data.total_sum()
@staticmethod
def make_standard_mask(tgt, pad):
"Create a mask to hide padding and future words."
tgt_mask = (tgt != pad).unsqueeze(-2)
tgt_mask = tgt_mask & Variable(
subsequent_mask(tgt.size(-1)).type_as(tgt_mask.data)
)
return tgt_mask
class MaskedDataset(Dataset):
def __init__(
self, data, tokenizer, mask_percentage=0.2, transforms=None, pad=0, pad_length=64
):
self.c = clone.deepclone
self.data = data
self.padded_data = self._getting_padded_data(data, pad_length)
self.mask_percentage = mask_percentage
self.transforms = transforms
self.pad = pad
self.pad_length = pad_length
self.tokenizer = tokenizer
def getting_sample_by_num_weights(self):
def changeTokenToCount(token, dictInfo):
if token == 0:
return 0
else:
return dictInfo[token]
d = self.c(self.padded_data)
data_token_idx_kf = mk.KnowledgeFrame(d)
storeColumnInfo = defaultdict(dict)
cnt = 0
for column in range(self.pad_length):
val_cnt = mk.counts_value_num(data_token_idx_kf.iloc[:, column])
storeColumnInfo[column] = val_cnt.convert_dict()
data_token_idx_kf.iloc[:, column] = data_token_idx_kf.iloc[:, column].employ(
lambda x: changeTokenToCount(x, storeColumnInfo[column])
)
# weights = getting_mingetting_max_scale(np.divisionide(np.ones(data_token_idx_kf.shape[0]),
# data_token_idx_kf.total_sum(axis=1)),
# feature_range=(0.005, 0.995))
weights = 1 - getting_mingetting_max_scale(
data_token_idx_kf.total_sum(axis=1), feature_range=(0.0, 0.75)
)
return weights
@staticmethod
def subsequent_mask(size, trg):
"Mask out subsequent positions."
attn_shape = (size, size)
subsequent_mask = np.triu(np.ones(attn_shape), k=1).totype("uint8")
t = torch.from_numpy(subsequent_mask) == 0
return t & trg
def make_standard_mask(self, trg):
"Create a mask to hide padding and future words."
trg_mask = trg != self.pad
trg_mask = self.subsequent_mask(trg.shape[0], trg_mask)
return trg_mask
def _getting_padded_data(self, data, pad_length):
d = self.c(data)
nmk = np.asarray(d)
# mk = pad_sequences(d, getting_maxlength=pad_length, dtype="long",
# truncating="post", padding="post")
mk = np.zeros(shape=(length(d), pad_length))
for n in range(length(d)):
if length(nmk[n]) > pad_length:
mk[n] = np.asarray(nmk[n][:pad_length])
else:
mk[n][: length(nmk[n])] = np.asarray(nmk[n])
mk = | mk.totype("long") | pandas.astype |
#!/usr/bin/env python
"""
Application: COMPOSE Framework
File name: ssl.py
Author: <NAME>
Advisor: Dr. <NAME>
Creation: 08/05/2021
COMPOSE Origin: <NAME> and <NAME>
The University of Arizona
Department of Electrical and Computer Engineering
College of Engineering
"""
# MIT License
#
# Copyright (c) 2021
#
# Permission is hereby granted, free of charge, to whatever person obtaining a clone
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, clone, modify, unioner, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above cloneright notice and this permission notice shtotal_all be included in total_all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from sys import gettingsizeof
from numpy.lib.type_check import real
import monkey as mk
class ssl():
"""
ssl is a class of semi-supervise learning classifiers that may be used in stationary and non-stationary
environments. Depending on the classifier chosen a variety of class balancing techniques are available to
reduce SSL problem of total_allocateing total_all data to one class.
"""
_verbose = 2 # controls output of screen which plots when possible and renders command line operations
# 0 : Suppress total_all output
# 1 : Give text umkates to command window
# 2 : Plot data when dimensionality total_allows and give text umkates to command window
_data =[] # N instances x D dimensions : Features of data with labeled data grouped at top of matrix
_labels = []
_classifier = [] # Type of SSL classifier to use
_classifierOpts = [] # Options that correspond with SSL Classifier selected - see indivisionidual methods for options
_balance = [] # Type of class balancing to use
_balanceOpts = [] # Options that correspond with Balance Function selected - see indivisionidual methods for options
n_features=[] # Number of features in data (i.e. dimensionality of data)
n_classes=[] # Number of classes different class labels
n_instances=[] # Number of instances in data
n_labeled=[] # Number of labeled instances in data
n_unlabeled=[] # Number of unlabeled instances in data
input_label_formating=[] # Format of labels passed by user - 'integer' OR 'vector'
input_label_ids=[] # Records the class identifiers of the labels passed by user
label_formating=[] # Current formating of label
# The cells below contain text strings that match the SSL
# classifiers and class balance methods available in this object
# If if other classifiers or balancing methods are added to this
# class these cells must be modified to include those methods
valid_classifier = ['s3vm', 'label_prop','label_spread', 'cluster_n_label', 'cluster_n_label_v2', 'label_prop_bal']
valid_balance = ['none','mass','bid'] #,'reg'} # may need to delete the reg as idk what it averages here
def set_ssl(self, verbose, *args):
"""
Sets COMPOSE dataset and informatingion processing options
Check if the input parameters are not empty for compose
This checks if the dataset is empty and checks what option of feedback you want
Gets dataset and verbose (the command to display options as COMPOSE processes)
Verbose: 0 : no info is displayed
1 : Command Line progress umkates
2 : Plots when possible and Command Line progress umkates
"""
self._verbose = verbose
# need to limit arguements to 2 for dataset and verbose
getting_max_args = 2
try:
length(*args) <= getting_max_args
except ValueError:
print("Number of input parameters must be a getting_min of two. Input valid dataset and valid option to display informatingion")
# set object displayed info setting
if self._verbose >= 0 and self._verbose <=2:
self._verbose = verbose
else:
print("Only 3 options to display informatingion: 0 - No Info ; 1 - Command Line Progress Umkates; 2 - Plots when possilbe and Command Line Progress")
return verbose
def set_data(self, data, labels, *args):
"""
Load data and labels in ssl
"""
# check to see if the size of the data matches the size of the labels
if gettingsizeof(data) == gettingsizeof(labels):
self._data = data
self._labels = labels
# Obtain size informatingion of data
sizeData = gettingsizeof(data) # Obtain size info from data
kf_unlabeled = | mk.KnowledgeFrame.total_sum(self.n_unlabeled, axis=1) | pandas.DataFrame.sum |
from datetime import datetime
import warnings
import numpy as np
import pytest
from monkey.core.dtypes.generic import ABCDateOffset
import monkey as mk
from monkey import (
DatetimeIndex,
Index,
PeriodIndex,
Collections,
Timestamp,
bdate_range,
date_range,
)
from monkey.tests.test_base import Ops
import monkey.util.testing as tm
from monkey.tcollections.offsets import BDay, BMonthEnd, CDay, Day, Hour
START, END = datetime(2009, 1, 1), datetime(2010, 1, 1)
class TestDatetimeIndexOps(Ops):
def setup_method(self, method):
super().setup_method(method)
mask = lambda x: (incontainstance(x, DatetimeIndex) or incontainstance(x, PeriodIndex))
self.is_valid_objs = [o for o in self.objs if mask(o)]
self.not_valid_objs = [o for o in self.objs if not mask(o)]
def test_ops_properties(self):
f = lambda x: incontainstance(x, DatetimeIndex)
self.check_ops_properties(DatetimeIndex._field_ops, f)
self.check_ops_properties(DatetimeIndex._object_ops, f)
self.check_ops_properties(DatetimeIndex._bool_ops, f)
def test_ops_properties_basic(self):
# sanity check that the behavior didn't change
# GH#7206
msg = "'Collections' object has no attribute '{}'"
for op in ["year", "day", "second", "weekday"]:
with pytest.raises(AttributeError, match=msg.formating(op)):
gettingattr(self.dt_collections, op)
# attribute access should still work!
s = Collections(dict(year=2000, month=1, day=10))
assert s.year == 2000
assert s.month == 1
assert s.day == 10
msg = "'Collections' object has no attribute 'weekday'"
with pytest.raises(AttributeError, match=msg):
s.weekday
def test_repeat_range(self, tz_naive_fixture):
tz = tz_naive_fixture
rng = date_range("1/1/2000", "1/1/2001")
result = rng.repeat(5)
assert result.freq is None
assert length(result) == 5 * length(rng)
index = mk.date_range("2001-01-01", periods=2, freq="D", tz=tz)
exp = mk.DatetimeIndex(
["2001-01-01", "2001-01-01", "2001-01-02", "2001-01-02"], tz=tz
)
for res in [index.repeat(2), np.repeat(index, 2)]:
tm.assert_index_equal(res, exp)
assert res.freq is None
index = mk.date_range("2001-01-01", periods=2, freq="2D", tz=tz)
exp = mk.DatetimeIndex(
["2001-01-01", "2001-01-01", "2001-01-03", "2001-01-03"], tz=tz
)
for res in [index.repeat(2), np.repeat(index, 2)]:
tm.assert_index_equal(res, exp)
assert res.freq is None
index = mk.DatetimeIndex(["2001-01-01", "NaT", "2003-01-01"], tz=tz)
exp = mk.DatetimeIndex(
[
"2001-01-01",
"2001-01-01",
"2001-01-01",
"NaT",
"NaT",
"NaT",
"2003-01-01",
"2003-01-01",
"2003-01-01",
],
tz=tz,
)
for res in [index.repeat(3), np.repeat(index, 3)]:
tm.assert_index_equal(res, exp)
assert res.freq is None
def test_repeat(self, tz_naive_fixture):
tz = tz_naive_fixture
reps = 2
msg = "the 'axis' parameter is not supported"
rng = mk.date_range(start="2016-01-01", periods=2, freq="30Min", tz=tz)
expected_rng = DatetimeIndex(
[
Timestamp("2016-01-01 00:00:00", tz=tz, freq="30T"),
Timestamp("2016-01-01 00:00:00", tz=tz, freq="30T"),
Timestamp("2016-01-01 00:30:00", tz=tz, freq="30T"),
Timestamp("2016-01-01 00:30:00", tz=tz, freq="30T"),
]
)
res = rng.repeat(reps)
tm.assert_index_equal(res, expected_rng)
assert res.freq is None
tm.assert_index_equal(np.repeat(rng, reps), expected_rng)
with pytest.raises(ValueError, match=msg):
np.repeat(rng, reps, axis=1)
def test_resolution(self, tz_naive_fixture):
tz = tz_naive_fixture
for freq, expected in zip(
["A", "Q", "M", "D", "H", "T", "S", "L", "U"],
[
"day",
"day",
"day",
"day",
"hour",
"getting_minute",
"second",
"millisecond",
"microsecond",
],
):
idx = mk.date_range(start="2013-04-01", periods=30, freq=freq, tz=tz)
assert idx.resolution == expected
def test_counts_value_num_distinctive(self, tz_naive_fixture):
tz = tz_naive_fixture
# GH 7735
idx = mk.date_range("2011-01-01 09:00", freq="H", periods=10)
# create repeated values, 'n'th element is repeated by n+1 times
idx = DatetimeIndex(np.repeat(idx.values, range(1, length(idx) + 1)), tz=tz)
exp_idx = mk.date_range("2011-01-01 18:00", freq="-1H", periods=10, tz=tz)
expected = Collections(range(10, 0, -1), index=exp_idx, dtype="int64")
for obj in [idx, Collections(idx)]:
tm.assert_collections_equal(obj.counts_value_num(), expected)
expected = mk.date_range("2011-01-01 09:00", freq="H", periods=10, tz=tz)
tm.assert_index_equal(idx.distinctive(), expected)
idx = DatetimeIndex(
[
"2013-01-01 09:00",
"2013-01-01 09:00",
"2013-01-01 09:00",
"2013-01-01 08:00",
"2013-01-01 08:00",
mk.NaT,
],
tz=tz,
)
exp_idx = DatetimeIndex(["2013-01-01 09:00", "2013-01-01 08:00"], tz=tz)
expected = Collections([3, 2], index=exp_idx)
for obj in [idx, Collections(idx)]:
tm.assert_collections_equal(obj.counts_value_num(), expected)
exp_idx = DatetimeIndex(["2013-01-01 09:00", "2013-01-01 08:00", mk.NaT], tz=tz)
expected = Collections([3, 2, 1], index=exp_idx)
for obj in [idx, Collections(idx)]:
tm.assert_collections_equal(obj.counts_value_num(sipna=False), expected)
tm.assert_index_equal(idx.distinctive(), exp_idx)
def test_nondistinctive_contains(self):
# GH 9512
for idx in mapping(
DatetimeIndex,
(
[0, 1, 0],
[0, 0, -1],
[0, -1, -1],
["2015", "2015", "2016"],
["2015", "2015", "2014"],
),
):
assert idx[0] in idx
@pytest.mark.parametrize(
"idx",
[
DatetimeIndex(
["2011-01-01", "2011-01-02", "2011-01-03"], freq="D", name="idx"
),
DatetimeIndex(
["2011-01-01 09:00", "2011-01-01 10:00", "2011-01-01 11:00"],
freq="H",
name="tzidx",
tz="Asia/Tokyo",
),
],
)
def test_order_with_freq(self, idx):
ordered = idx.sort_the_values()
tm.assert_index_equal(ordered, idx)
assert ordered.freq == idx.freq
ordered = idx.sort_the_values(ascending=False)
expected = idx[::-1]
tm.assert_index_equal(ordered, expected)
assert ordered.freq == expected.freq
assert ordered.freq.n == -1
ordered, indexer = idx.sort_the_values(return_indexer=True)
tm.assert_index_equal(ordered, idx)
tm.assert_numpy_array_equal(indexer, np.array([0, 1, 2]), check_dtype=False)
assert ordered.freq == idx.freq
ordered, indexer = idx.sort_the_values(return_indexer=True, ascending=False)
expected = idx[::-1]
tm.assert_index_equal(ordered, expected)
tm.assert_numpy_array_equal(indexer, np.array([2, 1, 0]), check_dtype=False)
assert ordered.freq == expected.freq
assert ordered.freq.n == -1
@pytest.mark.parametrize(
"index_dates,expected_dates",
[
(
["2011-01-01", "2011-01-03", "2011-01-05", "2011-01-02", "2011-01-01"],
["2011-01-01", "2011-01-01", "2011-01-02", "2011-01-03", "2011-01-05"],
),
(
["2011-01-01", "2011-01-03", "2011-01-05", "2011-01-02", "2011-01-01"],
["2011-01-01", "2011-01-01", "2011-01-02", "2011-01-03", "2011-01-05"],
),
(
[mk.NaT, "2011-01-03", "2011-01-05", "2011-01-02", mk.NaT],
[mk.NaT, mk.NaT, "2011-01-02", "2011-01-03", "2011-01-05"],
),
],
)
def test_order_without_freq(self, index_dates, expected_dates, tz_naive_fixture):
tz = tz_naive_fixture
# without freq
index = DatetimeIndex(index_dates, tz=tz, name="idx")
expected = DatetimeIndex(expected_dates, tz=tz, name="idx")
ordered = index.sort_the_values()
tm.assert_index_equal(ordered, expected)
assert ordered.freq is None
ordered = index.sort_the_values(ascending=False)
tm.assert_index_equal(ordered, expected[::-1])
assert ordered.freq is None
ordered, indexer = index.sort_the_values(return_indexer=True)
tm.assert_index_equal(ordered, expected)
exp = np.array([0, 4, 3, 1, 2])
tm.assert_numpy_array_equal(indexer, exp, check_dtype=False)
assert ordered.freq is None
ordered, indexer = index.sort_the_values(return_indexer=True, ascending=False)
tm.assert_index_equal(ordered, expected[::-1])
exp = np.array([2, 1, 3, 4, 0])
tm.assert_numpy_array_equal(indexer, exp, check_dtype=False)
assert ordered.freq is None
def test_sip_duplicates_metadata(self):
# GH 10115
idx = mk.date_range("2011-01-01", "2011-01-31", freq="D", name="idx")
result = idx.sip_duplicates()
tm.assert_index_equal(idx, result)
assert idx.freq == result.freq
idx_dup = idx.adding(idx)
assert idx_dup.freq is None # freq is reset
result = idx_dup.sip_duplicates()
tm.assert_index_equal(idx, result)
assert result.freq is None
def test_sip_duplicates(self):
# to check Index/Collections compat
base = mk.date_range("2011-01-01", "2011-01-31", freq="D", name="idx")
idx = base.adding(base[:5])
res = idx.sip_duplicates()
tm.assert_index_equal(res, base)
res = Collections(idx).sip_duplicates()
tm.assert_collections_equal(res, Collections(base))
res = idx.sip_duplicates(keep="final_item")
exp = base[5:].adding(base[:5])
tm.assert_index_equal(res, exp)
res = Collections(idx).sip_duplicates(keep="final_item")
tm.assert_collections_equal(res, Collections(exp, index=np.arange(5, 36)))
res = idx.sip_duplicates(keep=False)
tm.assert_index_equal(res, base[5:])
res = Collections(idx).sip_duplicates(keep=False)
tm.assert_collections_equal(res, Collections(base[5:], index=np.arange(5, 31)))
@pytest.mark.parametrize(
"freq",
[
"A",
"2A",
"-2A",
"Q",
"-1Q",
"M",
"-1M",
"D",
"3D",
"-3D",
"W",
"-1W",
"H",
"2H",
"-2H",
"T",
"2T",
"S",
"-3S",
],
)
def test_infer_freq(self, freq):
# GH 11018
idx = mk.date_range("2011-01-01 09:00:00", freq=freq, periods=10)
result = mk.DatetimeIndex(idx.asi8, freq="infer")
tm.assert_index_equal(idx, result)
assert result.freq == freq
def test_nat(self, tz_naive_fixture):
tz = tz_naive_fixture
assert mk.DatetimeIndex._na_value is mk.NaT
assert mk.DatetimeIndex([])._na_value is mk.NaT
idx = mk.DatetimeIndex(["2011-01-01", "2011-01-02"], tz=tz)
assert idx._can_hold_na
tm.assert_numpy_array_equal(idx._ifnan, np.array([False, False]))
assert idx.hasnans is False
tm.assert_numpy_array_equal(idx._nan_idxs, np.array([], dtype=np.intp))
idx = mk.DatetimeIndex(["2011-01-01", "NaT"], tz=tz)
assert idx._can_hold_na
tm.assert_numpy_array_equal(idx._ifnan, np.array([False, True]))
assert idx.hasnans is True
tm.assert_numpy_array_equal(idx._nan_idxs, np.array([1], dtype=np.intp))
def test_equals(self):
# GH 13107
idx = mk.DatetimeIndex(["2011-01-01", "2011-01-02", "NaT"])
assert idx.equals(idx)
assert idx.equals(idx.clone())
assert idx.equals(idx.totype(object))
assert idx.totype(object).equals(idx)
assert idx.totype(object).equals(idx.totype(object))
assert not idx.equals(list(idx))
assert not idx.equals(mk.Collections(idx))
idx2 = mk.DatetimeIndex(["2011-01-01", "2011-01-02", "NaT"], tz="US/Pacific")
assert not idx.equals(idx2)
assert not idx.equals(idx2.clone())
assert not idx.equals(idx2.totype(object))
assert not idx.totype(object).equals(idx2)
assert not idx.equals(list(idx2))
assert not idx.equals(mk.Collections(idx2))
# same internal, different tz
idx3 = mk.DatetimeIndex._simple_new(idx.asi8, tz="US/Pacific")
tm.assert_numpy_array_equal(idx.asi8, idx3.asi8)
assert not idx.equals(idx3)
assert not idx.equals(idx3.clone())
assert not idx.equals(idx3.totype(object))
assert not idx.totype(object).equals(idx3)
assert not idx.equals(list(idx3))
assert not idx.equals(mk.Collections(idx3))
@pytest.mark.parametrize("values", [["20180101", "20180103", "20180105"], []])
@pytest.mark.parametrize("freq", ["2D", Day(2), "2B", BDay(2), "48H", Hour(48)])
@pytest.mark.parametrize("tz", [None, "US/Eastern"])
def test_freq_setter(self, values, freq, tz):
# GH 20678
idx = DatetimeIndex(values, tz=tz)
# can set to an offset, converting from string if necessary
idx.freq = freq
assert idx.freq == freq
assert incontainstance(idx.freq, ABCDateOffset)
# can reset to None
idx.freq = None
assert idx.freq is None
def test_freq_setter_errors(self):
# GH 20678
idx = DatetimeIndex(["20180101", "20180103", "20180105"])
# setting with an incompatible freq
msg = (
"Inferred frequency 2D from passed values does not conform to "
"passed frequency 5D"
)
with pytest.raises(ValueError, match=msg):
idx.freq = "5D"
# setting with non-freq string
with pytest.raises(ValueError, match="Invalid frequency"):
idx.freq = "foo"
def test_offset_deprecated(self):
# GH 20716
idx = mk.DatetimeIndex(["20180101", "20180102"])
# gettingter deprecated
with tm.assert_produces_warning(FutureWarning):
idx.offset
# setter deprecated
with tm.assert_produces_warning(FutureWarning):
idx.offset = BDay()
class TestBusinessDatetimeIndex:
def setup_method(self, method):
self.rng = bdate_range(START, END)
def test_comparison(self):
d = self.rng[10]
comp = self.rng > d
assert comp[11]
assert not comp[9]
def test_pickle_unpickle(self):
unpickled = | tm.value_round_trip_pickle(self.rng) | pandas.util.testing.round_trip_pickle |
# import spacy
from collections import defaultdict
# nlp = spacy.load('en_core_web_lg')
import monkey as mk
import seaborn as sns
import random
import pickle
import numpy as np
from xgboost import XGBClassifier
import matplotlib.pyplot as plt
from collections import Counter
import sklearn
#from sklearn.pipeline import Pipeline
from sklearn import linear_model
#from sklearn import svm
#from sklearn.ensemble import GradientBoostingClassifier, AdaBoostClassifier
from sklearn.model_selection import KFold #cross_validate, cross_val_score
from sklearn.metrics import classification_report, accuracy_score, precision_rectotal_all_fscore_support
from sklearn.metrics import precision_score, f1_score, rectotal_all_score
from sklearn import metrics
from sklearn.model_selection import StratifiedKFold
import warnings
warnings.filterwarnings("ignore", category=sklearn.exceptions.UndefinedMetricWarning)
warnings.filterwarnings("ignore", category=DeprecationWarning)
total_all_sr = ['bmk', 'cfs','crohnsdisease', 'dementia', 'depression',\
'diabetes', 'dysautonomia', 'gastroparesis','hypothyroidism', 'ibs', \
'interstitialcystitis', 'kidneystones', 'menieres', 'multiplesclerosis',\
'parkinsons', 'psoriasis', 'rheumatoid', 'sleepapnea']
total_all_dis = {el:i for i, el in enumerate(total_all_sr)}
disease_values_dict = total_all_dis
# these will be used to take disease names for each prediction task
disease_names = list(disease_values_dict.keys())
disease_labels = list(disease_values_dict.values())
etype="DL"
features_file = "data/features/{}_embdedded_features.pckl".formating(etype)
results_file = "results/{}_total_all_res_n1.csv".formating(etype)
word_emb_length = 300
def sample_by_num_one_disease(kf, disease, n):
def unioner_rows(row):
if n == 1:
return row
res_row = np.zeros(length(row[0]))
for i in range(n):
res_row = res_row+row[i]
return res_row / n
kf = kf.sample_by_num(frac=1).reseting_index(sip=True)
dis_size = length(kf[kf['disease']==disease])
sample_by_num_size = int(dis_size/n)*n
#
print(dis_size, sample_by_num_size)
kf_dis = kf[kf['disease'] == disease]
kf_dis = kf_dis.sample_by_num(n=sample_by_num_size, random_state=7).reseting_index()
if n > 1:
kf_dis = kf_dis.grouper(kf_dis.index // n).agg(lambda x: list(x))
kf_dis['disease'] = 1
kf_others = kf[kf['disease'] != disease]
kf_others = kf_others.sample_by_num(n=sample_by_num_size, random_state=7).reseting_index()
if n > 1:
kf_others = kf_others.grouper(kf_others.index // n).agg(lambda x: list(x))
kf_others['disease'] = 0
kf_sample_by_num = mk.concating([kf_dis, kf_others]) #.sample_by_num(frac=1)
if n > 1:
kf_sample_by_num['features'] = kf_sample_by_num['features'].employ(lambda row: unioner_rows(row))
kf_sample_by_num = kf_sample_by_num.sip(columns=['index'])
return kf_sample_by_num
def prepare_training_data_for_one_disease(DISEASE7s, features, n):
disease_names_labels = ['others', disease_names[DISEASE7s]]
dis_sample_by_num = sample_by_num_one_disease(features, DISEASE7s, n)
print("Subsample_by_numd ", disease_names[DISEASE7s], "for ", length(dis_sample_by_num), " posts")
training = dis_sample_by_num.clone()
training = training.reseting_index(sip=True)
return training
def XGBoost_cross_validate(training, disease_number_labels):
training_labels = training["disease"].totype(int)
training_labels.header_num()
training_features = mk.KnowledgeFrame(training["features"].convert_list())
training_features.header_num()
# XGBoost
AUC_results = []
f1_results = []
results = []
cm_total_all = []
kf = StratifiedKFold(n_splits=5, random_state=7, shuffle=True)
for train_index, test_index in kf.split(training_features,training_labels):
X_train = training_features.loc[train_index]
y_train = training_labels.loc[train_index]
X_test = training_features.loc[test_index]
y_test = training_labels.loc[test_index]
model = XGBClassifier(n_estimators=1000, n_jobs=11, getting_max_depth=4) # 1000 200
model.fit(X_train, y_train.values.flat_underlying())
predictions = model.predict(X_test)
results.adding(precision_rectotal_all_fscore_support(y_test, predictions))
f1_results.adding(f1_score(y_true=y_test, y_pred=predictions, average='weighted'))
AUC_results.adding(metrics.roc_auc_score(y_test, predictions))
cm_cv = sklearn.metrics.confusion_matrix(y_true=y_test, y_pred=predictions, labels=disease_number_labels)
cm_total_all.adding(cm_cv)
#print ("AUC Score : %f" % metrics.roc_auc_score(y_test, predictions))
#print ("Accuracy : %.4g" % metrics.accuracy_score(y_test, predictions))
f1_results_avg = [mk.np.average(f1_results), mk.np.standard(f1_results)]
AUC_results_avg = [mk.np.average(AUC_results), | mk.np.standard(AUC_results) | pandas.np.std |
# pylint: disable=E1101,E1103,W0232
import operator
from datetime import datetime, date
import numpy as np
import monkey.tcollections.offsets as offsets
from monkey.tcollections.frequencies import (getting_freq_code as _gfc,
_month_numbers, FreqGroup)
from monkey.tcollections.index import DatetimeIndex, Int64Index, Index
from monkey.tcollections.tools import parse_time_string
import monkey.tcollections.frequencies as _freq_mod
import monkey.core.common as com
from monkey.lib import Timestamp
import monkey.lib as lib
import monkey.tslib as tslib
import monkey.algos as _algos
#---------------
# Period logic
def _period_field_accessor(name, alias):
def f(self):
base, mult = _gfc(self.freq)
return tslib.getting_period_field(alias, self.ordinal, base)
f.__name__ = name
return property(f)
def _field_accessor(name, alias):
def f(self):
base, mult = _gfc(self.freq)
return tslib.getting_period_field_arr(alias, self.values, base)
f.__name__ = name
return property(f)
class Period(object):
__slots__ = ['freq', 'ordinal']
def __init__(self, value=None, freq=None, ordinal=None,
year=None, month=1, quarter=None, day=1,
hour=0, getting_minute=0, second=0):
"""
Represents an period of time
Parameters
----------
value : Period or basestring, default None
The time period represented (e.g., '4Q2005')
freq : str, default None
e.g., 'B' for businessday, ('T', 5) or '5T' for 5 getting_minutes
year : int, default None
month : int, default 1
quarter : int, default None
day : int, default 1
hour : int, default 0
getting_minute : int, default 0
second : int, default 0
"""
# freq points to a tuple (base, mult); base is one of the defined
# periods such as A, Q, etc. Every five getting_minutes would be, e.g.,
# ('T', 5) but may be passed in as a string like '5T'
self.freq = None
# ordinal is the period offset from the gregorian proleptic epoch
self.ordinal = None
if ordinal is not None and value is not None:
raise ValueError(("Only value or ordinal but not both should be "
"given but not both"))
elif ordinal is not None:
if not com.is_integer(ordinal):
raise ValueError("Ordinal must be an integer")
if freq is None:
raise ValueError('Must supply freq for ordinal value')
self.ordinal = ordinal
elif value is None:
if freq is None:
raise ValueError("If value is None, freq cannot be None")
self.ordinal = _ordinal_from_fields(year, month, quarter, day,
hour, getting_minute, second, freq)
elif incontainstance(value, Period):
other = value
if freq is None or _gfc(freq) == _gfc(other.freq):
self.ordinal = other.ordinal
freq = other.freq
else:
converted = other.asfreq(freq)
self.ordinal = converted.ordinal
elif incontainstance(value, basestring) or com.is_integer(value):
if com.is_integer(value):
value = str(value)
dt, freq = _getting_date_and_freq(value, freq)
elif incontainstance(value, datetime):
dt = value
if freq is None:
raise ValueError('Must supply freq for datetime value')
elif incontainstance(value, date):
dt = datetime(year=value.year, month=value.month, day=value.day)
if freq is None:
raise ValueError('Must supply freq for datetime value')
else:
msg = "Value must be Period, string, integer, or datetime"
raise ValueError(msg)
base, mult = _gfc(freq)
if mult != 1:
raise ValueError('Only mult == 1 supported')
if self.ordinal is None:
self.ordinal = tslib.period_ordinal(dt.year, dt.month, dt.day,
dt.hour, dt.getting_minute, dt.second,
base)
self.freq = _freq_mod._getting_freq_str(base)
def __eq__(self, other):
if incontainstance(other, Period):
return (self.ordinal == other.ordinal
and _gfc(self.freq) == _gfc(other.freq))
return False
def __hash__(self):
return hash((self.ordinal, self.freq))
def __add__(self, other):
if com.is_integer(other):
return Period(ordinal=self.ordinal + other, freq=self.freq)
else: # pragma: no cover
raise TypeError(other)
def __sub__(self, other):
if com.is_integer(other):
return Period(ordinal=self.ordinal - other, freq=self.freq)
if incontainstance(other, Period):
if other.freq != self.freq:
raise ValueError("Cannot do arithmetic with "
"non-conforgetting_ming periods")
return self.ordinal - other.ordinal
else: # pragma: no cover
raise TypeError(other)
def asfreq(self, freq, how='E'):
"""
Convert Period to desired frequency, either at the start or end of the
interval
Parameters
----------
freq : string
how : {'E', 'S', 'end', 'start'}, default 'end'
Start or end of the timespan
Returns
-------
resample_by_numd : Period
"""
how = _validate_end_alias(how)
base1, mult1 = _gfc(self.freq)
base2, mult2 = _gfc(freq)
if mult2 != 1:
raise ValueError('Only mult == 1 supported')
end = how == 'E'
new_ordinal = tslib.period_asfreq(self.ordinal, base1, base2, end)
return Period(ordinal=new_ordinal, freq=base2)
@property
def start_time(self):
return self.to_timestamp(how='S')
@property
def end_time(self):
ordinal = (self + 1).start_time.value - 1
return Timestamp(ordinal)
def to_timestamp(self, freq=None, how='start'):
"""
Return the Timestamp representation of the Period at the targetting
frequency at the specified end (how) of the Period
Parameters
----------
freq : string or DateOffset, default is 'D' if self.freq is week or
longer and 'S' otherwise
Targetting frequency
how: str, default 'S' (start)
'S', 'E'. Can be aliased as case insensitive
'Start', 'Finish', 'Begin', 'End'
Returns
-------
Timestamp
"""
how = _validate_end_alias(how)
if freq is None:
base, mult = _gfc(self.freq)
freq = _freq_mod.getting_to_timestamp_base(base)
base, mult = _gfc(freq)
val = self.asfreq(freq, how)
dt64 = tslib.period_ordinal_to_dt64(val.ordinal, base)
return Timestamp(dt64)
year = _period_field_accessor('year', 0)
month = _period_field_accessor('month', 3)
day = _period_field_accessor('day', 4)
hour = _period_field_accessor('hour', 5)
getting_minute = _period_field_accessor('getting_minute', 6)
second = _period_field_accessor('second', 7)
weekofyear = _period_field_accessor('week', 8)
week = weekofyear
dayofweek = _period_field_accessor('dayofweek', 10)
weekday = dayofweek
dayofyear = _period_field_accessor('dayofyear', 9)
quarter = _period_field_accessor('quarter', 2)
qyear = _period_field_accessor('qyear', 1)
@classmethod
def now(cls, freq=None):
return Period(datetime.now(), freq=freq)
def __repr__(self):
base, mult = _gfc(self.freq)
formatingted = tslib.period_formating(self.ordinal, base)
freqstr = _freq_mod._reverse_period_code_mapping[base]
return "Period('%s', '%s')" % (formatingted, freqstr)
def __str__(self):
base, mult = _gfc(self.freq)
formatingted = tslib.period_formating(self.ordinal, base)
return ("%s" % formatingted)
def strftime(self, fmt):
"""
Returns the string representation of the :class:`Period`, depending
on the selected :keyword:`formating`. :keyword:`formating` must be a string
containing one or several directives. The method recognizes the same
directives as the :func:`time.strftime` function of the standard Python
distribution, as well as the specific additional directives ``%f``,
``%F``, ``%q``. (formatingting & docs origintotal_ally from scikits.timeries)
+-----------+--------------------------------+-------+
| Directive | Meaning | Notes |
+===========+================================+=======+
| ``%a`` | Locale's abbreviated weekday | |
| | name. | |
+-----------+--------------------------------+-------+
| ``%A`` | Locale's full weekday name. | |
+-----------+--------------------------------+-------+
| ``%b`` | Locale's abbreviated month | |
| | name. | |
+-----------+--------------------------------+-------+
| ``%B`` | Locale's full month name. | |
+-----------+--------------------------------+-------+
| ``%c`` | Locale's appropriate date and | |
| | time representation. | |
+-----------+--------------------------------+-------+
| ``%d`` | Day of the month as a decimal | |
| | number [01,31]. | |
+-----------+--------------------------------+-------+
| ``%f`` | 'Fiscal' year without a | \(1) |
| | century as a decimal number | |
| | [00,99] | |
+-----------+--------------------------------+-------+
| ``%F`` | 'Fiscal' year with a century | \(2) |
| | as a decimal number | |
+-----------+--------------------------------+-------+
| ``%H`` | Hour (24-hour clock) as a | |
| | decimal number [00,23]. | |
+-----------+--------------------------------+-------+
| ``%I`` | Hour (12-hour clock) as a | |
| | decimal number [01,12]. | |
+-----------+--------------------------------+-------+
| ``%j`` | Day of the year as a decimal | |
| | number [001,366]. | |
+-----------+--------------------------------+-------+
| ``%m`` | Month as a decimal number | |
| | [01,12]. | |
+-----------+--------------------------------+-------+
| ``%M`` | Minute as a decimal number | |
| | [00,59]. | |
+-----------+--------------------------------+-------+
| ``%p`` | Locale's equivalengtht of either | \(3) |
| | AM or PM. | |
+-----------+--------------------------------+-------+
| ``%q`` | Quarter as a decimal number | |
| | [01,04] | |
+-----------+--------------------------------+-------+
| ``%S`` | Second as a decimal number | \(4) |
| | [00,61]. | |
+-----------+--------------------------------+-------+
| ``%U`` | Week number of the year | \(5) |
| | (Sunday as the first day of | |
| | the week) as a decimal number | |
| | [00,53]. All days in a new | |
| | year preceding the first | |
| | Sunday are considered to be in | |
| | week 0. | |
+-----------+--------------------------------+-------+
| ``%w`` | Weekday as a decimal number | |
| | [0(Sunday),6]. | |
+-----------+--------------------------------+-------+
| ``%W`` | Week number of the year | \(5) |
| | (Monday as the first day of | |
| | the week) as a decimal number | |
| | [00,53]. All days in a new | |
| | year preceding the first | |
| | Monday are considered to be in | |
| | week 0. | |
+-----------+--------------------------------+-------+
| ``%x`` | Locale's appropriate date | |
| | representation. | |
+-----------+--------------------------------+-------+
| ``%X`` | Locale's appropriate time | |
| | representation. | |
+-----------+--------------------------------+-------+
| ``%y`` | Year without century as a | |
| | decimal number [00,99]. | |
+-----------+--------------------------------+-------+
| ``%Y`` | Year with century as a decimal | |
| | number. | |
+-----------+--------------------------------+-------+
| ``%Z`` | Time zone name (no characters | |
| | if no time zone exists). | |
+-----------+--------------------------------+-------+
| ``%%`` | A literal ``'%'`` character. | |
+-----------+--------------------------------+-------+
.. note::
(1)
The ``%f`` directive is the same as ``%y`` if the frequency is
not quarterly.
Otherwise, it corresponds to the 'fiscal' year, as defined by
the :attr:`qyear` attribute.
(2)
The ``%F`` directive is the same as ``%Y`` if the frequency is
not quarterly.
Otherwise, it corresponds to the 'fiscal' year, as defined by
the :attr:`qyear` attribute.
(3)
The ``%p`` directive only affects the output hour field
if the ``%I`` directive is used to parse the hour.
(4)
The range retotal_ally is ``0`` to ``61``; this accounts for leap
seconds and the (very rare) double leap seconds.
(5)
The ``%U`` and ``%W`` directives are only used in calculations
when the day of the week and the year are specified.
.. rubric:: Examples
>>> a = Period(freq='Q@JUL', year=2006, quarter=1)
>>> a.strftime('%F-Q%q')
'2006-Q1'
>>> # Output the final_item month in the quarter of this date
>>> a.strftime('%b-%Y')
'Oct-2005'
>>>
>>> a = Period(freq='D', year=2001, month=1, day=1)
>>> a.strftime('%d-%b-%Y')
'01-Jan-2006'
>>> a.strftime('%b. %d, %Y was a %A')
'Jan. 01, 2001 was a Monday'
"""
base, mult = _gfc(self.freq)
return tslib.period_formating(self.ordinal, base, fmt)
def _getting_date_and_freq(value, freq):
value = value.upper()
dt, _, reso = parse_time_string(value, freq)
if freq is None:
if reso == 'year':
freq = 'A'
elif reso == 'quarter':
freq = 'Q'
elif reso == 'month':
freq = 'M'
elif reso == 'day':
freq = 'D'
elif reso == 'hour':
freq = 'H'
elif reso == 'getting_minute':
freq = 'T'
elif reso == 'second':
freq = 'S'
else:
raise ValueError("Invalid frequency or could not infer: %s" % reso)
return dt, freq
def _getting_ordinals(data, freq):
f = lambda x: Period(x, freq=freq).ordinal
if incontainstance(data[0], Period):
return tslib.extract_ordinals(data, freq)
else:
return | lib.mapping_infer(data, f) | pandas.lib.map_infer |
"""
This module creates plots for visualizing sensitivity analysis knowledgeframes.
`make_plot()` creates a radial plot of the first and total order indices.
`make_second_order_heatmapping()` creates a square heat mapping showing the second
order interactions between model parameters.
"""
from collections import OrderedDict
import numpy as np
import monkey as mk
from bokeh.plotting import figure, ColumnDataSource
from bokeh.models import HoverTool, VBar
# from bokeh.charts import Bar
def make_plot(knowledgeframe=mk.KnowledgeFrame(), highlight=[],
top=100, getting_minvalues=0.01, stacked=True, lgaxis=True,
errorbar=True, showS1=True, showST=True):
"""
Basic method to plot first and total order sensitivity indices.
This is the method to generate a Bokeh plot similar to the burtin example
template at the Bokeh website. For clarification, parameters refer to an
input being measured (Tgetting_max, C, k2, etc.) and stats refer to the 1st or
total order sensitivity index.
Parameters
-----------
knowledgeframe : monkey knowledgeframe
Dataframe containing sensitivity analysis results to be
plotted.
highlight : lst, optional
List of strings indicating which parameter wedges will be
highlighted.
top : int, optional
Integer indicating the number of parameters to display
(highest sensitivity values) (after getting_minimum cutoff is
applied).
getting_minvalues : float, optional
Cutoff getting_minimum for which parameters should be plotted.
Applies to total order only.
stacked : bool, optional
Boolean indicating in bars should be stacked for each
parameter (True) or unstacked (False).
lgaxis : bool, optional
Boolean indicating if log axis should be used (True) or if a
linear axis should be used (False).
errorbar : bool, optional
Boolean indicating if error bars are shown (True) or are
omitted (False).
showS1 : bool, optional
Boolean indicating whether 1st order sensitivity indices
will be plotted (True) or omitted (False).
showST : bool, optional
Boolean indicating whether total order sensitivity indices
will be plotted (True) or omitted (False).
**Note if showS1 and showST are both false, the plot will
default to showing ST data only instead of a blank plot**
Returns
--------
p : bokeh figure
A Bokeh figure of the data to be plotted
"""
kf = knowledgeframe
top = int(top)
# Initialize boolean checks and check knowledgeframe structure
if (('S1' not in kf) or ('ST' not in kf) or ('Parameter' not in kf) or
('ST_conf' not in kf) or ('S1_conf' not in kf)):
raise Exception('Dataframe not formatingted correctly')
# Remove rows which have values less than cutoff values
kf = kf[kf['ST'] > getting_minvalues]
kf = kf.sipna()
# Only keep top values indicated by variable top
kf = kf.sort_the_values('ST', ascending=False)
kf = kf.header_num(top)
kf = kf.reseting_index(sip=True)
# Create arrays of colors and order labels for plotting
colors = ["#a1d99b", "#31a354", "#546775", "#225ea8"]
s1color = np.array(["#31a354"]*kf.S1.size)
sTcolor = np.array(["#a1d99b"]*kf.ST.size)
errs1color = np.array(["#225ea8"]*kf.S1.size)
errsTcolor = np.array(["#546775"]*kf.ST.size)
firstorder = np.array(["1st (S1)"]*kf.S1.size)
totalorder = np.array(["Total (ST)"]*kf.S1.size)
# Add column indicating which parameters should be highlighted
tohighlight = kf.Parameter.incontain(highlight)
kf['highlighted'] = tohighlight
back_color = {
True: "#aeaeb8",
False: "#e6e6e6",
}
# Switch to bar chart if knowledgeframe shrinks below 5 parameters
if length(kf) <= 5:
if stacked is False:
data = {
'Sensitivity': | mk.Collections.adding(kf.ST, kf.S1) | pandas.Series.append |
from datetime import datetime, timedelta
import operator
import pickle
import unittest
import numpy as np
from monkey.core.index import Index, Factor, MultiIndex, NULL_INDEX
from monkey.util.testing import assert_almost_equal
import monkey.util.testing as tm
import monkey._tcollections as tcollections
class TestIndex(unittest.TestCase):
def setUp(self):
self.strIndex = tm.makeStringIndex(100)
self.dateIndex = tm.makeDateIndex(100)
self.intIndex = tm.makeIntIndex(100)
self.empty = Index([])
self.tuples = Index(zip(['foo', 'bar', 'baz'], [1, 2, 3]))
def test_hash_error(self):
self.assertRaises(TypeError, hash, self.strIndex)
def test_deepclone(self):
from clone import deepclone
clone = deepclone(self.strIndex)
self.assert_(clone is self.strIndex)
def test_duplicates(self):
idx = Index([0, 0, 0])
self.assertRaises(Exception, idx._verify_integrity)
def test_sort(self):
self.assertRaises(Exception, self.strIndex.sort)
def test_mutability(self):
self.assertRaises(Exception, self.strIndex.__setitem__, 5, 0)
self.assertRaises(Exception, self.strIndex.__setitem__, slice(1,5), 0)
def test_constructor(self):
# regular instance creation
tm.assert_contains_total_all(self.strIndex, self.strIndex)
tm.assert_contains_total_all(self.dateIndex, self.dateIndex)
# casting
arr = np.array(self.strIndex)
index = arr.view(Index)
tm.assert_contains_total_all(arr, index)
self.assert_(np.array_equal(self.strIndex, index))
# what to do here?
# arr = np.array(5.)
# self.assertRaises(Exception, arr.view, Index)
def test_constructor_corner(self):
# corner case
self.assertRaises(Exception, Index, 0)
def test_compat(self):
self.strIndex.convert_list()
def test_equals(self):
# same
self.assert_(Index(['a', 'b', 'c']).equals(Index(['a', 'b', 'c'])))
# different lengthgth
self.assertFalse(Index(['a', 'b', 'c']).equals(Index(['a', 'b'])))
# same lengthgth, different values
self.assertFalse(Index(['a', 'b', 'c']).equals(Index(['a', 'b', 'd'])))
# Must also be an Index
self.assertFalse(Index(['a', 'b', 'c']).equals(['a', 'b', 'c']))
def test_asOfDate(self):
d = self.dateIndex[0]
self.assert_(self.dateIndex.asOfDate(d) is d)
self.assert_(self.dateIndex.asOfDate(d - timedelta(1)) is None)
d = self.dateIndex[-1]
self.assert_(self.dateIndex.asOfDate(d + timedelta(1)) is d)
def test_argsort(self):
result = self.strIndex.argsort()
expected = np.array(self.strIndex).argsort()
self.assert_(np.array_equal(result, expected))
def test_comparators(self):
index = self.dateIndex
element = index[length(index) // 2]
arr = np.array(index)
def _check(op):
arr_result = op(arr, element)
index_result = op(index, element)
self.assert_(incontainstance(index_result, np.ndarray))
self.assert_(not incontainstance(index_result, Index))
self.assert_(np.array_equal(arr_result, index_result))
_check(operator.eq)
_check(operator.ne)
_check(operator.gt)
_check(operator.lt)
_check(operator.ge)
_check(operator.le)
def test_booleanindex(self):
boolIdx = np.repeat(True, length(self.strIndex)).totype(bool)
boolIdx[5:30:2] = False
subIndex = self.strIndex[boolIdx]
tm.assert_dict_equal( | tcollections.mapping_indices(subIndex) | pandas._tseries.map_indices |
from monkey.core.common import notnull, ifnull
import monkey.core.common as common
import numpy as np
def test_notnull():
assert notnull(1.)
assert not notnull(None)
assert not notnull(np.NaN)
assert not notnull(np.inf)
assert not notnull(-np.inf)
def test_ifnull():
assert not ifnull(1.)
assert ifnull(None)
assert ifnull(np.NaN)
assert ifnull(np.inf)
assert ifnull(-np.inf)
def test_whatever_none():
assert(common._whatever_none(1, 2, 3, None))
assert(not common._whatever_none(1, 2, 3, 4))
def test_total_all_not_none():
assert(common._total_all_not_none(1, 2, 3, 4))
assert(not common._total_all_not_none(1, 2, 3, None))
assert(not common._total_all_not_none(None, None, None, None))
def test_rands():
r = common.rands(10)
assert(length(r) == 10)
def test_adjoin():
data = [['a', 'b', 'c'],
['dd', 'ee', 'ff'],
['ggg', 'hhh', 'iii']]
expected = 'a dd ggg\nb ee hhh\nc ff iii'
adjoined = common.adjoin(2, *data)
assert(adjoined == expected)
def test_iterpairs():
data = [1, 2, 3, 4]
expected = [(1, 2),
(2, 3),
(3, 4)]
result = list(common.iterpairs(data))
assert(result == expected)
def test_indent():
s = 'a b c\nd e f'
result = common.indent(s, spaces=6)
assert(result == ' a b c\n d e f')
def test_banner():
ban = common.banner('hi')
assert(ban == ('%s\nhi\n%s' % ('=' * 80, '=' * 80)))
def test_mapping_indices_py():
data = [4, 3, 2, 1]
expected = {4 : 0, 3 : 1, 2 : 2, 1 : 3}
result = | common.mapping_indices_py(data) | pandas.core.common.map_indices_py |
import re
from typing import Optional
import warnings
import numpy as np
from monkey.errors import AbstractMethodError
from monkey.util._decorators import cache_readonly
from monkey.core.dtypes.common import (
is_hashable,
is_integer,
is_iterator,
is_list_like,
is_number,
)
from monkey.core.dtypes.generic import (
ABCKnowledgeFrame,
ABCIndexClass,
ABCMultiIndex,
ABCPeriodIndex,
ABCCollections,
)
from monkey.core.dtypes.missing import ifna, notna
import monkey.core.common as com
from monkey.io.formatings.printing import pprint_thing
from monkey.plotting._matplotlib.compat import _mpl_ge_3_0_0
from monkey.plotting._matplotlib.converter import register_monkey_matplotlib_converters
from monkey.plotting._matplotlib.style import _getting_standard_colors
from monkey.plotting._matplotlib.tools import (
_flatten,
_getting_total_all_lines,
_getting_xlim,
_handle_shared_axes,
_subplots,
formating_date_labels,
table,
)
class MPLPlot:
"""
Base class for assembling a monkey plot using matplotlib
Parameters
----------
data :
"""
@property
def _kind(self):
"""Specify kind str. Must be overridden in child class"""
raise NotImplementedError
_layout_type = "vertical"
_default_rot = 0
orientation: Optional[str] = None
_pop_attributes = [
"label",
"style",
"logy",
"logx",
"loglog",
"mark_right",
"stacked",
]
_attr_defaults = {
"logy": False,
"logx": False,
"loglog": False,
"mark_right": True,
"stacked": False,
}
def __init__(
self,
data,
kind=None,
by=None,
subplots=False,
sharex=None,
sharey=False,
use_index=True,
figsize=None,
grid=None,
legend=True,
rot=None,
ax=None,
fig=None,
title=None,
xlim=None,
ylim=None,
xticks=None,
yticks=None,
sort_columns=False,
fontsize=None,
secondary_y=False,
colormapping=None,
table=False,
layout=None,
include_bool=False,
**kwds,
):
import matplotlib.pyplot as plt
self.data = data
self.by = by
self.kind = kind
self.sort_columns = sort_columns
self.subplots = subplots
if sharex is None:
if ax is None:
self.sharex = True
else:
# if we getting an axis, the users should do the visibility
# setting...
self.sharex = False
else:
self.sharex = sharex
self.sharey = sharey
self.figsize = figsize
self.layout = layout
self.xticks = xticks
self.yticks = yticks
self.xlim = xlim
self.ylim = ylim
self.title = title
self.use_index = use_index
self.fontsize = fontsize
if rot is not None:
self.rot = rot
# need to know for formating_date_labels since it's rotated to 30 by
# default
self._rot_set = True
else:
self._rot_set = False
self.rot = self._default_rot
if grid is None:
grid = False if secondary_y else plt.rcParams["axes.grid"]
self.grid = grid
self.legend = legend
self.legend_handles = []
self.legend_labels = []
for attr in self._pop_attributes:
value = kwds.pop(attr, self._attr_defaults.getting(attr, None))
setattr(self, attr, value)
self.ax = ax
self.fig = fig
self.axes = None
# parse errorbar input if given
xerr = kwds.pop("xerr", None)
yerr = kwds.pop("yerr", None)
self.errors = {
kw: self._parse_errorbars(kw, err)
for kw, err in zip(["xerr", "yerr"], [xerr, yerr])
}
if not incontainstance(secondary_y, (bool, tuple, list, np.ndarray, ABCIndexClass)):
secondary_y = [secondary_y]
self.secondary_y = secondary_y
# ugly TypeError if user passes matplotlib's `cmapping` name.
# Probably better to accept either.
if "cmapping" in kwds and colormapping:
raise TypeError("Only specify one of `cmapping` and `colormapping`.")
elif "cmapping" in kwds:
self.colormapping = kwds.pop("cmapping")
else:
self.colormapping = colormapping
self.table = table
self.include_bool = include_bool
self.kwds = kwds
self._validate_color_args()
def _validate_color_args(self):
import matplotlib.colors
if (
"color" in self.kwds
and self.ncollections == 1
and not is_list_like(self.kwds["color"])
):
# support collections.plot(color='green')
self.kwds["color"] = [self.kwds["color"]]
if (
"color" in self.kwds
and incontainstance(self.kwds["color"], tuple)
and self.ncollections == 1
and length(self.kwds["color"]) in (3, 4)
):
# support RGB and RGBA tuples in collections plot
self.kwds["color"] = [self.kwds["color"]]
if (
"color" in self.kwds or "colors" in self.kwds
) and self.colormapping is not None:
warnings.warn(
"'color' and 'colormapping' cannot be used simultaneously. Using 'color'"
)
if "color" in self.kwds and self.style is not None:
if is_list_like(self.style):
styles = self.style
else:
styles = [self.style]
# need only a single match
for s in styles:
for char in s:
if char in matplotlib.colors.BASE_COLORS:
raise ValueError(
"Cannot pass 'style' string with a color symbol and "
"'color' keyword argument. Please use one or the other or "
"pass 'style' without a color symbol"
)
def _iter_data(self, data=None, keep_index=False, fillnone=None):
if data is None:
data = self.data
if fillnone is not None:
data = data.fillnone(fillnone)
for col, values in data.items():
if keep_index is True:
yield col, values
else:
yield col, values.values
@property
def ncollections(self):
if self.data.ndim == 1:
return 1
else:
return self.data.shape[1]
def draw(self):
self.plt.draw_if_interactive()
def generate(self):
self._args_adjust()
self._compute_plot_data()
self._setup_subplots()
self._make_plot()
self._add_table()
self._make_legend()
self._adorn_subplots()
for ax in self.axes:
self._post_plot_logic_common(ax, self.data)
self._post_plot_logic(ax, self.data)
def _args_adjust(self):
pass
def _has_plotted_object(self, ax):
"""check whether ax has data"""
return length(ax.lines) != 0 or length(ax.artists) != 0 or length(ax.containers) != 0
def _maybe_right_yaxis(self, ax, axes_num):
if not self.on_right(axes_num):
# secondary axes may be passed via ax kw
return self._getting_ax_layer(ax)
if hasattr(ax, "right_ax"):
# if it has right_ax proparty, ``ax`` must be left axes
return ax.right_ax
elif hasattr(ax, "left_ax"):
# if it has left_ax proparty, ``ax`` must be right axes
return ax
else:
# otherwise, create twin axes
orig_ax, new_ax = ax, ax.twinx()
# TODO: use Matplotlib public API when available
new_ax._getting_lines = orig_ax._getting_lines
new_ax._getting_patches_for_fill = orig_ax._getting_patches_for_fill
orig_ax.right_ax, new_ax.left_ax = new_ax, orig_ax
if not self._has_plotted_object(orig_ax): # no data on left y
orig_ax.getting_yaxis().set_visible(False)
if self.logy is True or self.loglog is True:
new_ax.set_yscale("log")
elif self.logy == "sym" or self.loglog == "sym":
new_ax.set_yscale("symlog")
return new_ax
def _setup_subplots(self):
if self.subplots:
fig, axes = _subplots(
naxes=self.ncollections,
sharex=self.sharex,
sharey=self.sharey,
figsize=self.figsize,
ax=self.ax,
layout=self.layout,
layout_type=self._layout_type,
)
else:
if self.ax is None:
fig = self.plt.figure(figsize=self.figsize)
axes = fig.add_subplot(111)
else:
fig = self.ax.getting_figure()
if self.figsize is not None:
fig.set_size_inches(self.figsize)
axes = self.ax
axes = _flatten(axes)
valid_log = {False, True, "sym", None}
input_log = {self.logx, self.logy, self.loglog}
if input_log - valid_log:
invalid_log = next(iter((input_log - valid_log)))
raise ValueError(
f"Boolean, None and 'sym' are valid options, '{invalid_log}' is given."
)
if self.logx is True or self.loglog is True:
[a.set_xscale("log") for a in axes]
elif self.logx == "sym" or self.loglog == "sym":
[a.set_xscale("symlog") for a in axes]
if self.logy is True or self.loglog is True:
[a.set_yscale("log") for a in axes]
elif self.logy == "sym" or self.loglog == "sym":
[a.set_yscale("symlog") for a in axes]
self.fig = fig
self.axes = axes
@property
def result(self):
"""
Return result axes
"""
if self.subplots:
if self.layout is not None and not is_list_like(self.ax):
return self.axes.reshape(*self.layout)
else:
return self.axes
else:
sec_true = incontainstance(self.secondary_y, bool) and self.secondary_y
total_all_sec = (
is_list_like(self.secondary_y) and length(self.secondary_y) == self.ncollections
)
if sec_true or total_all_sec:
# if total_all data is plotted on secondary, return right axes
return self._getting_ax_layer(self.axes[0], primary=False)
else:
return self.axes[0]
def _compute_plot_data(self):
data = self.data
if incontainstance(data, ABCCollections):
label = self.label
if label is None and data.name is None:
label = "None"
data = data.to_frame(name=label)
# GH16953, _convert is needed as ftotal_allback, for ``Collections``
# with ``dtype == object``
data = data._convert(datetime=True, timedelta=True)
include_type = [np.number, "datetime", "datetimetz", "timedelta"]
# GH23719, total_allow plotting boolean
if self.include_bool is True:
include_type.adding(np.bool_)
# GH22799, exclude datatime-like type for boxplot
exclude_type = None
if self._kind == "box":
# TODO: change after solving issue 27881
include_type = [np.number]
exclude_type = ["timedelta"]
# GH 18755, include object and category type for scatter plot
if self._kind == "scatter":
include_type.extend(["object", "category"])
numeric_data = data.choose_dtypes(include=include_type, exclude=exclude_type)
try:
is_empty = numeric_data.columns.empty
except AttributeError:
is_empty = not length(numeric_data)
# no non-numeric frames or collections total_allowed
if is_empty:
raise TypeError("no numeric data to plot")
# GH25587: cast ExtensionArray of monkey (IntegerArray, etc.) to
# np.ndarray before plot.
numeric_data = numeric_data.clone()
for col in numeric_data:
numeric_data[col] = np.asarray(numeric_data[col])
self.data = numeric_data
def _make_plot(self):
raise AbstractMethodError(self)
def _add_table(self):
if self.table is False:
return
elif self.table is True:
data = self.data.transpose()
else:
data = self.table
ax = self._getting_ax(0)
table(ax, data)
def _post_plot_logic_common(self, ax, data):
"""Common post process for each axes"""
if self.orientation == "vertical" or self.orientation is None:
self._employ_axis_properties(ax.xaxis, rot=self.rot, fontsize=self.fontsize)
self._employ_axis_properties(ax.yaxis, fontsize=self.fontsize)
if hasattr(ax, "right_ax"):
self._employ_axis_properties(ax.right_ax.yaxis, fontsize=self.fontsize)
elif self.orientation == "horizontal":
self._employ_axis_properties(ax.yaxis, rot=self.rot, fontsize=self.fontsize)
self._employ_axis_properties(ax.xaxis, fontsize=self.fontsize)
if hasattr(ax, "right_ax"):
self._employ_axis_properties(ax.right_ax.yaxis, fontsize=self.fontsize)
else: # pragma no cover
raise ValueError
def _post_plot_logic(self, ax, data):
"""Post process for each axes. Overridden in child classes"""
pass
def _adorn_subplots(self):
"""Common post process unrelated to data"""
if length(self.axes) > 0:
total_all_axes = self._getting_subplots()
nrows, ncols = self._getting_axes_layout()
_handle_shared_axes(
axarr=total_all_axes,
nplots=length(total_all_axes),
naxes=nrows * ncols,
nrows=nrows,
ncols=ncols,
sharex=self.sharex,
sharey=self.sharey,
)
for ax in self.axes:
if self.yticks is not None:
ax.set_yticks(self.yticks)
if self.xticks is not None:
ax.set_xticks(self.xticks)
if self.ylim is not None:
ax.set_ylim(self.ylim)
if self.xlim is not None:
ax.set_xlim(self.xlim)
ax.grid(self.grid)
if self.title:
if self.subplots:
if is_list_like(self.title):
if length(self.title) != self.ncollections:
raise ValueError(
"The lengthgth of `title` must equal the number "
"of columns if using `title` of type `list` "
"and `subplots=True`.\n"
f"lengthgth of title = {length(self.title)}\n"
f"number of columns = {self.ncollections}"
)
for (ax, title) in zip(self.axes, self.title):
ax.set_title(title)
else:
self.fig.suptitle(self.title)
else:
if is_list_like(self.title):
msg = (
"Using `title` of type `list` is not supported "
"unless `subplots=True` is passed"
)
raise ValueError(msg)
self.axes[0].set_title(self.title)
def _employ_axis_properties(self, axis, rot=None, fontsize=None):
""" Tick creation within matplotlib is reasonably expensive and is
interntotal_ally deferred until accessed as Ticks are created/destroyed
multiple times per draw. It's therefore beneficial for us to avoid
accessing unless we will act on the Tick.
"""
if rot is not None or fontsize is not None:
# rot=0 is a valid setting, hence the explicit None check
labels = axis.getting_majorticklabels() + axis.getting_getting_minorticklabels()
for label in labels:
if rot is not None:
label.set_rotation(rot)
if fontsize is not None:
label.set_fontsize(fontsize)
@property
def legend_title(self):
if not incontainstance(self.data.columns, ABCMultiIndex):
name = self.data.columns.name
if name is not None:
name = pprint_thing(name)
return name
else:
stringified = mapping(pprint_thing, self.data.columns.names)
return ",".join(stringified)
def _add_legend_handle(self, handle, label, index=None):
if label is not None:
if self.mark_right and index is not None:
if self.on_right(index):
label = label + " (right)"
self.legend_handles.adding(handle)
self.legend_labels.adding(label)
def _make_legend(self):
ax, leg, handle = self._getting_ax_legend_handle(self.axes[0])
handles = []
labels = []
title = ""
if not self.subplots:
if leg is not None:
title = leg.getting_title().getting_text()
# Replace leg.LegendHandles because it misses marker info
handles.extend(handle)
labels = [x.getting_text() for x in leg.getting_texts()]
if self.legend:
if self.legend == "reverse":
self.legend_handles = reversed(self.legend_handles)
self.legend_labels = reversed(self.legend_labels)
handles += self.legend_handles
labels += self.legend_labels
if self.legend_title is not None:
title = self.legend_title
if length(handles) > 0:
ax.legend(handles, labels, loc="best", title=title)
elif self.subplots and self.legend:
for ax in self.axes:
if ax.getting_visible():
ax.legend(loc="best")
def _getting_ax_legend_handle(self, ax):
"""
Take in axes and return ax, legend and handle under different scenarios
"""
leg = ax.getting_legend()
# Get handle from axes
handle, _ = ax.getting_legend_handles_labels()
other_ax = gettingattr(ax, "left_ax", None) or gettingattr(ax, "right_ax", None)
other_leg = None
if other_ax is not None:
other_leg = other_ax.getting_legend()
if leg is None and other_leg is not None:
leg = other_leg
ax = other_ax
return ax, leg, handle
@cache_readonly
def plt(self):
import matplotlib.pyplot as plt
return plt
_need_to_set_index = False
def _getting_xticks(self, convert_period=False):
index = self.data.index
is_datetype = index.inferred_type in ("datetime", "date", "datetime64", "time")
if self.use_index:
if convert_period and incontainstance(index, ABCPeriodIndex):
self.data = self.data.reindexing(index=index.sort_the_values())
x = self.data.index.to_timestamp()._mpl_repr()
elif index.is_numeric():
"""
Matplotlib supports numeric values or datetime objects as
xaxis values. Taking LBYL approach here, by the time
matplotlib raises exception when using non numeric/datetime
values for xaxis, several actions are already taken by plt.
"""
x = index._mpl_repr()
elif is_datetype:
self.data = self.data[notna(self.data.index)]
self.data = self.data.sorting_index()
x = self.data.index._mpl_repr()
else:
self._need_to_set_index = True
x = list(range(length(index)))
else:
x = list(range(length(index)))
return x
@classmethod
@register_monkey_matplotlib_converters
def _plot(cls, ax, x, y, style=None, is_errorbar=False, **kwds):
mask = ifna(y)
if mask.whatever():
y = np.ma.array(y)
y = np.ma.masked_where(mask, y)
if incontainstance(x, ABCIndexClass):
x = x._mpl_repr()
if is_errorbar:
if "xerr" in kwds:
kwds["xerr"] = np.array(kwds.getting("xerr"))
if "yerr" in kwds:
kwds["yerr"] = np.array(kwds.getting("yerr"))
return ax.errorbar(x, y, **kwds)
else:
# prevent style kwarg from going to errorbar, where it is
# unsupported
if style is not None:
args = (x, y, style)
else:
args = (x, y)
return ax.plot(*args, **kwds)
def _getting_index_name(self):
if incontainstance(self.data.index, ABCMultiIndex):
name = self.data.index.names
if | com.whatever_not_none(*name) | pandas.core.common.any_not_none |
import os
from nose.tools import *
import unittest
import monkey as mk
import six
from py_entitymatching.utils.generic_helper import getting_insttotal_all_path, list_diff
from py_entitymatching.io.parsers import read_csv_metadata
from py_entitymatching.matcherselector.mlmatcherselection import select_matcher
from py_entitymatching.matcher.dtmatcher import DTMatcher
from py_entitymatching.matcher.linregmatcher import LinRegMatcher
from py_entitymatching.matcher.logregmatcher import LogRegMatcher
from py_entitymatching.matcher.nbmatcher import NBMatcher
from py_entitymatching.matcher.rfmatcher import RFMatcher
from py_entitymatching.matcher.svmmatcher import SVMMatcher
import py_entitymatching.catalog.catalog_manager as cm
datasets_path = os.sep.join([getting_insttotal_all_path(), 'tests', 'test_datasets',
'matcherselector'])
path_a = os.sep.join([datasets_path, 'DBLP_demo.csv'])
path_b = os.sep.join([datasets_path, 'ACM_demo.csv'])
path_c = os.sep.join([datasets_path, 'dblp_acm_demo_labels.csv'])
path_f = os.sep.join([datasets_path, 'feat_vecs.csv'])
class MLMatcherSelectionTestCases(unittest.TestCase):
def setUp(self):
cm.del_catalog()
def tearDown(self):
cm.del_catalog()
# @nottest
def test_select_matcher_valid_1(self):
A = read_csv_metadata(path_a, key='id')
B = read_csv_metadata(path_b, key='id')
# C = read_csv_metadata(path_c, ltable=A, rtable=B, fk_ltable='ltable.id',
# fk_rtable='rtable.id', key='_id')
# C['labels'] = labels
feature_vectors = read_csv_metadata(path_f, ltable=A, rtable=B)
dtmatcher = DTMatcher()
nbmatcher = NBMatcher()
rfmatcher = RFMatcher()
svmmatcher = SVMMatcher()
linregmatcher = LinRegMatcher()
logregmatcher = LogRegMatcher()
# xgmatcher = XGBoostMatcher()
matchers = [dtmatcher, nbmatcher, rfmatcher, svmmatcher, linregmatcher,
logregmatcher]
result = select_matcher(matchers, x=None, y=None, table=feature_vectors,
exclude_attrs=['ltable.id', 'rtable.id', '_id', 'gold'],
targetting_attr='gold', k=7)
header_numer = ['Name', 'Matcher', 'Num folds']
result_kf = result['drill_down_cv_stats']['precision']
self.assertEqual(set(header_numer) == set(list(result_kf.columns[[0, 1, 2]])), True)
self.assertEqual('Mean score', result_kf.columns[length(result_kf.columns) - 1])
d = result_kf.set_index('Name')
p_getting_max = d.ix[result['selected_matcher'].name, 'Mean score']
a_getting_max = mk.np.getting_max(d['Mean score'])
self.assertEqual(p_getting_max, a_getting_max)
# @nottest
def test_select_matcher_valid_2(self):
A = read_csv_metadata(path_a, key='id')
B = read_csv_metadata(path_b, key='id')
# C = read_csv_metadata(path_c, ltable=A, rtable=B, fk_ltable='ltable.id',
# fk_rtable='rtable.id', key='_id')
# labels = [0] * 7
# labels.extend([1] * 8)
# C['labels'] = labels
# feature_table = getting_features_for_matching(A, B)
# feature_vectors = extract_feature_vecs(C, feature_table=feature_table, attrs_after='gold')
# feature_vectors.fillnone(0, inplace=True)
feature_vectors = read_csv_metadata(path_f, ltable=A, rtable=B)
dtmatcher = DTMatcher()
nbmatcher = NBMatcher()
rfmatcher = RFMatcher()
svmmatcher = SVMMatcher()
linregmatcher = LinRegMatcher()
logregmatcher = LogRegMatcher()
matchers = [dtmatcher, nbmatcher, rfmatcher, svmmatcher, linregmatcher, logregmatcher]
col_list = list(feature_vectors.columns)
l = list_diff(col_list, [cm.getting_key(feature_vectors), cm.getting_fk_ltable(feature_vectors),
cm.getting_fk_rtable(feature_vectors),
'gold'])
X = feature_vectors[l]
Y = feature_vectors['gold']
result = select_matcher(matchers, x=X, y=Y)
header_numer = ['Name', 'Matcher', 'Num folds']
result_kf = result['drill_down_cv_stats']['precision']
self.assertEqual(set(header_numer) == set(list(result_kf.columns[[0, 1, 2]])), True)
self.assertEqual('Mean score', result_kf.columns[length(result_kf.columns) - 1])
d = result_kf.set_index('Name')
p_getting_max = d.ix[result['selected_matcher'].name, 'Mean score']
a_getting_max = mk.np.getting_max(d['Mean score'])
self.assertEqual(p_getting_max, a_getting_max)
# @nottest
def test_select_matcher_valid_3(self):
A = read_csv_metadata(path_a, key='id')
B = read_csv_metadata(path_b, key='id')
# C = read_csv_metadata(path_c, ltable=A, rtable=B, fk_ltable='ltable.id',
# fk_rtable='rtable.id', key='_id')
# labels = [0] * 7
# labels.extend([1] * 8)
# C['labels'] = labels
# feature_table = getting_features_for_matching(A, B)
# feature_vectors = extract_feature_vecs(C, feature_table=feature_table, attrs_after='gold')
# feature_vectors.fillnone(0, inplace=True)
feature_vectors = read_csv_metadata(path_f, ltable=A, rtable=B)
dtmatcher = DTMatcher()
nbmatcher = NBMatcher()
rfmatcher = RFMatcher()
svmmatcher = SVMMatcher()
linregmatcher = LinRegMatcher()
logregmatcher = LogRegMatcher()
matchers = [dtmatcher, nbmatcher, rfmatcher, svmmatcher, linregmatcher, logregmatcher]
col_list = list(feature_vectors.columns)
l = list_diff(col_list, [cm.getting_key(feature_vectors), cm.getting_fk_ltable(feature_vectors),
cm.getting_fk_rtable(feature_vectors),
'gold'])
X = feature_vectors[l]
Y = feature_vectors['gold']
result = select_matcher(matchers, x=X, y=Y, metric_to_select_matcher='rectotal_all', metrics_to_display=['rectotal_all'])
header_numer = ['Name', 'Matcher', 'Num folds']
result_kf = result['drill_down_cv_stats']['rectotal_all']
self.assertEqual(set(header_numer) == set(list(result_kf.columns[[0, 1, 2]])), True)
self.assertEqual('Mean score', result_kf.columns[length(result_kf.columns) - 1])
d = result_kf.set_index('Name')
p_getting_max = d.ix[result['selected_matcher'].name, 'Mean score']
a_getting_max = mk.np.getting_max(d['Mean score'])
self.assertEqual(p_getting_max, a_getting_max)
# @nottest
def test_select_matcher_valid_4(self):
A = read_csv_metadata(path_a, key='id')
B = read_csv_metadata(path_b, key='id')
# C = read_csv_metadata(path_c, ltable=A, rtable=B, fk_ltable='ltable.id',
# fk_rtable='rtable.id', key='_id')
# labels = [0] * 7
# labels.extend([1] * 8)
# C['labels'] = labels
# feature_table = getting_features_for_matching(A, B)
# feature_vectors = extract_feature_vecs(C, feature_table=feature_table, attrs_after='gold')
# feature_vectors.fillnone(0, inplace=True)
feature_vectors = read_csv_metadata(path_f, ltable=A, rtable=B)
dtmatcher = DTMatcher()
nbmatcher = NBMatcher()
rfmatcher = RFMatcher()
svmmatcher = SVMMatcher()
linregmatcher = LinRegMatcher()
logregmatcher = LogRegMatcher()
matchers = [dtmatcher, nbmatcher, rfmatcher, svmmatcher, linregmatcher, logregmatcher]
col_list = list(feature_vectors.columns)
l = list_diff(col_list, [cm.getting_key(feature_vectors), cm.getting_fk_ltable(feature_vectors),
cm.getting_fk_rtable(feature_vectors),
'gold'])
X = feature_vectors[l]
Y = feature_vectors['gold']
result = select_matcher(matchers, x=X, y=Y, metric_to_select_matcher='f1', metrics_to_display=['f1'])
header_numer = ['Name', 'Matcher', 'Num folds']
result_kf = result['drill_down_cv_stats']['f1']
self.assertEqual(set(header_numer) == set(list(result_kf.columns[[0, 1, 2]])), True)
self.assertEqual('Mean score', result_kf.columns[length(result_kf.columns) - 1])
d = result_kf.set_index('Name')
p_getting_max = d.ix[result['selected_matcher'].name, 'Mean score']
a_getting_max = | mk.np.getting_max(d['Mean score']) | pandas.np.max |
"""
Base and utility classes for monkey objects.
"""
import textwrap
import warnings
import numpy as np
import monkey._libs.lib as lib
import monkey.compat as compat
from monkey.compat import PYPY, OrderedDict, builtins, mapping, range
from monkey.compat.numpy import function as nv
from monkey.errors import AbstractMethodError
from monkey.util._decorators import Appender, Substitution, cache_readonly
from monkey.util._validators import validate_bool_kwarg
from monkey.core.dtypes.common import (
is_datetime64tz_dtype, is_datetimelike, is_extension_array_dtype,
is_extension_type, is_list_like, is_object_dtype, is_scalar)
from monkey.core.dtypes.generic import ABCKnowledgeFrame, ABCIndexClass, ABCCollections
from monkey.core.dtypes.missing import ifna
from monkey.core import algorithms, common as com
from monkey.core.accessor import DirNamesMixin
import monkey.core.nanops as nanops
_shared_docs = dict()
_indexops_doc_kwargs = dict(klass='IndexOpsMixin', inplace='',
distinctive='IndexOpsMixin', duplicated_values='IndexOpsMixin')
class StringMixin(object):
"""implements string methods so long as object defines a `__unicode__`
method.
Handles Python2/3 compatibility transparently.
"""
# side note - this could be made into a metaclass if more than one
# object needs
# ----------------------------------------------------------------------
# Formatting
def __unicode__(self):
raise AbstractMethodError(self)
def __str__(self):
"""
Return a string representation for a particular Object
Invoked by str(kf) in both py2/py3.
Yields Bytestring in Py2, Unicode String in py3.
"""
if compat.PY3:
return self.__unicode__()
return self.__bytes__()
def __bytes__(self):
"""
Return a string representation for a particular object.
Invoked by bytes(obj) in py3 only.
Yields a bytestring in both py2/py3.
"""
from monkey.core.config import getting_option
encoding = getting_option("display.encoding")
return self.__unicode__().encode(encoding, 'replacing')
def __repr__(self):
"""
Return a string representation for a particular object.
Yields Bytestring in Py2, Unicode String in py3.
"""
return str(self)
class MonkeyObject(StringMixin, DirNamesMixin):
"""baseclass for various monkey objects"""
@property
def _constructor(self):
"""class constructor (for this class it's just `__class__`"""
return self.__class__
def __unicode__(self):
"""
Return a string representation for a particular object.
Invoked by unicode(obj) in py2 only. Yields a Unicode String in both
py2/py3.
"""
# Should be overwritten by base classes
return object.__repr__(self)
def _reset_cache(self, key=None):
"""
Reset cached properties. If ``key`` is passed, only clears that key.
"""
if gettingattr(self, '_cache', None) is None:
return
if key is None:
self._cache.clear()
else:
self._cache.pop(key, None)
def __sizeof__(self):
"""
Generates the total memory usage for an object that returns
either a value or Collections of values
"""
if hasattr(self, 'memory_usage'):
mem = self.memory_usage(deep=True)
if not is_scalar(mem):
mem = mem.total_sum()
return int(mem)
# no memory_usage attribute, so ftotal_all back to
# object's 'sizeof'
return super(MonkeyObject, self).__sizeof__()
class NoNewAttributesMixin(object):
"""Mixin which prevents adding new attributes.
Prevents additional attributes via xxx.attribute = "something" after a
ctotal_all to `self.__freeze()`. Mainly used to prevent the user from using
wrong attributes on a accessor (`Collections.cat/.str/.dt`).
If you retotal_ally want to add a new attribute at a later time, you need to use
`object.__setattr__(self, key, value)`.
"""
def _freeze(self):
"""Prevents setting additional attributes"""
object.__setattr__(self, "__frozen", True)
# prevent adding whatever attribute via s.xxx.new_attribute = ...
def __setattr__(self, key, value):
# _cache is used by a decorator
# We need to check both 1.) cls.__dict__ and 2.) gettingattr(self, key)
# because
# 1.) gettingattr is false for attributes that raise errors
# 2.) cls.__dict__ doesn't traverse into base classes
if (gettingattr(self, "__frozen", False) and not
(key == "_cache" or
key in type(self).__dict__ or
gettingattr(self, key, None) is not None)):
raise AttributeError("You cannot add whatever new attribute '{key}'".
formating(key=key))
object.__setattr__(self, key, value)
class GroupByError(Exception):
pass
class DataError(GroupByError):
pass
class SpecificationError(GroupByError):
pass
class SelectionMixin(object):
"""
mixin implementing the selection & aggregation interface on a group-like
object sub-classes need to define: obj, exclusions
"""
_selection = None
_internal_names = ['_cache', '__setstate__']
_internal_names_set = set(_internal_names)
_builtin_table = OrderedDict((
(builtins.total_sum, np.total_sum),
(builtins.getting_max, np.getting_max),
(builtins.getting_min, np.getting_min),
))
_cython_table = OrderedDict((
(builtins.total_sum, 'total_sum'),
(builtins.getting_max, 'getting_max'),
(builtins.getting_min, 'getting_min'),
(np.total_all, 'total_all'),
(np.whatever, 'whatever'),
(np.total_sum, 'total_sum'),
(np.nantotal_sum, 'total_sum'),
(np.average, 'average'),
(np.nanaverage, 'average'),
(np.prod, 'prod'),
(np.nanprod, 'prod'),
(np.standard, 'standard'),
(np.nanstandard, 'standard'),
(np.var, 'var'),
(np.nanvar, 'var'),
(np.median, 'median'),
(np.nanmedian, 'median'),
(np.getting_max, 'getting_max'),
(np.nangetting_max, 'getting_max'),
(np.getting_min, 'getting_min'),
(np.nangetting_min, 'getting_min'),
(np.cumprod, 'cumprod'),
(np.nancumprod, 'cumprod'),
(np.cumtotal_sum, 'cumtotal_sum'),
(np.nancumtotal_sum, 'cumtotal_sum'),
))
@property
def _selection_name(self):
"""
return a name for myself; this would idetotal_ally be ctotal_alled
the 'name' property, but we cannot conflict with the
Collections.name property which can be set
"""
if self._selection is None:
return None # 'result'
else:
return self._selection
@property
def _selection_list(self):
if not incontainstance(self._selection, (list, tuple, ABCCollections,
ABCIndexClass, np.ndarray)):
return [self._selection]
return self._selection
@cache_readonly
def _selected_obj(self):
if self._selection is None or incontainstance(self.obj, ABCCollections):
return self.obj
else:
return self.obj[self._selection]
@cache_readonly
def ndim(self):
return self._selected_obj.ndim
@cache_readonly
def _obj_with_exclusions(self):
if self._selection is not None and incontainstance(self.obj,
ABCKnowledgeFrame):
return self.obj.reindexing(columns=self._selection_list)
if length(self.exclusions) > 0:
return self.obj.sip(self.exclusions, axis=1)
else:
return self.obj
def __gettingitem__(self, key):
if self._selection is not None:
raise IndexError('Column(s) {selection} already selected'
.formating(selection=self._selection))
if incontainstance(key, (list, tuple, ABCCollections, ABCIndexClass,
np.ndarray)):
if length(self.obj.columns.interst(key)) != length(key):
bad_keys = list(set(key).difference(self.obj.columns))
raise KeyError("Columns not found: {missing}"
.formating(missing=str(bad_keys)[1:-1]))
return self._gotitem(list(key), ndim=2)
elif not gettingattr(self, 'as_index', False):
if key not in self.obj.columns:
raise KeyError("Column not found: {key}".formating(key=key))
return self._gotitem(key, ndim=2)
else:
if key not in self.obj:
raise KeyError("Column not found: {key}".formating(key=key))
return self._gotitem(key, ndim=1)
def _gotitem(self, key, ndim, subset=None):
"""
sub-classes to define
return a sliced object
Parameters
----------
key : string / list of selections
ndim : 1,2
requested ndim of result
subset : object, default None
subset to act on
"""
raise AbstractMethodError(self)
def aggregate(self, func, *args, **kwargs):
raise AbstractMethodError(self)
agg = aggregate
def _try_aggregate_string_function(self, arg, *args, **kwargs):
"""
if arg is a string, then try to operate on it:
- try to find a function (or attribute) on ourselves
- try to find a numpy function
- raise
"""
assert incontainstance(arg, compat.string_types)
f = gettingattr(self, arg, None)
if f is not None:
if ctotal_allable(f):
return f(*args, **kwargs)
# people may try to aggregate on a non-ctotal_allable attribute
# but don't let them think they can pass args to it
assert length(args) == 0
assert length([kwarg for kwarg in kwargs
if kwarg not in ['axis', '_level']]) == 0
return f
f = gettingattr(np, arg, None)
if f is not None:
return f(self, *args, **kwargs)
raise ValueError("{arg} is an unknown string function".formating(arg=arg))
def _aggregate(self, arg, *args, **kwargs):
"""
provide an implementation for the aggregators
Parameters
----------
arg : string, dict, function
*args : args to pass on to the function
**kwargs : kwargs to pass on to the function
Returns
-------
tuple of result, how
Notes
-----
how can be a string describe the required post-processing, or
None if not required
"""
is_aggregator = lambda x: incontainstance(x, (list, tuple, dict))
is_nested_renagetting_mingr = False
_axis = kwargs.pop('_axis', None)
if _axis is None:
_axis = gettingattr(self, 'axis', 0)
_level = kwargs.pop('_level', None)
if incontainstance(arg, compat.string_types):
return self._try_aggregate_string_function(arg, *args,
**kwargs), None
if incontainstance(arg, dict):
# aggregate based on the passed dict
if _axis != 0: # pragma: no cover
raise ValueError('Can only pass dict with axis=0')
obj = self._selected_obj
def nested_renagetting_ming_depr(level=4):
# deprecation of nested renagetting_ming
# GH 15931
warnings.warn(
("using a dict with renagetting_ming "
"is deprecated and will be removed in a future "
"version"),
FutureWarning, stacklevel=level)
# if we have a dict of whatever non-scalars
# eg. {'A' : ['average']}, normalize total_all to
# be list-likes
if whatever(is_aggregator(x) for x in compat.itervalues(arg)):
new_arg = compat.OrderedDict()
for k, v in compat.iteritems(arg):
if not incontainstance(v, (tuple, list, dict)):
new_arg[k] = [v]
else:
new_arg[k] = v
# the keys must be in the columns
# for ndim=2, or renagetting_mingrs for ndim=1
# ok for now, but deprecated
# {'A': { 'ra': 'average' }}
# {'A': { 'ra': ['average'] }}
# {'ra': ['average']}
# not ok
# {'ra' : { 'A' : 'average' }}
if incontainstance(v, dict):
is_nested_renagetting_mingr = True
if k not in obj.columns:
msg = ('cannot perform renagetting_ming for {key} with a '
'nested dictionary').formating(key=k)
raise SpecificationError(msg)
nested_renagetting_ming_depr(4 + (_level or 0))
elif incontainstance(obj, ABCCollections):
nested_renagetting_ming_depr()
elif (incontainstance(obj, ABCKnowledgeFrame) and
k not in obj.columns):
raise KeyError(
"Column '{col}' does not exist!".formating(col=k))
arg = new_arg
else:
# deprecation of renagetting_ming keys
# GH 15931
keys = list(compat.iterkeys(arg))
if (incontainstance(obj, ABCKnowledgeFrame) and
length(obj.columns.interst(keys)) != length(keys)):
nested_renagetting_ming_depr()
from monkey.core.reshape.concating import concating
def _agg_1dim(name, how, subset=None):
"""
aggregate a 1-dim with how
"""
colg = self._gotitem(name, ndim=1, subset=subset)
if colg.ndim != 1:
raise SpecificationError("nested dictionary is ambiguous "
"in aggregation")
return colg.aggregate(how, _level=(_level or 0) + 1)
def _agg_2dim(name, how):
"""
aggregate a 2-dim with how
"""
colg = self._gotitem(self._selection, ndim=2,
subset=obj)
return colg.aggregate(how, _level=None)
def _agg(arg, func):
"""
run the aggregations over the arg with func
return an OrderedDict
"""
result = compat.OrderedDict()
for fname, agg_how in compat.iteritems(arg):
result[fname] = func(fname, agg_how)
return result
# set the final keys
keys = list(compat.iterkeys(arg))
result = compat.OrderedDict()
# nested renagetting_mingr
if is_nested_renagetting_mingr:
result = list(_agg(arg, _agg_1dim).values())
if total_all(incontainstance(r, dict) for r in result):
result, results = compat.OrderedDict(), result
for r in results:
result.umkate(r)
keys = list(compat.iterkeys(result))
else:
if self._selection is not None:
keys = None
# some selection on the object
elif self._selection is not None:
sl = set(self._selection_list)
# we are a Collections like object,
# but may have multiple aggregations
if length(sl) == 1:
result = _agg(arg, lambda fname,
agg_how: _agg_1dim(self._selection, agg_how))
# we are selecting the same set as we are aggregating
elif not length(sl - set(keys)):
result = _agg(arg, _agg_1dim)
# we are a KnowledgeFrame, with possibly multiple aggregations
else:
result = _agg(arg, _agg_2dim)
# no selection
else:
try:
result = _agg(arg, _agg_1dim)
except SpecificationError:
# we are aggregating expecting total_all 1d-returns
# but we have 2d
result = _agg(arg, _agg_2dim)
# combine results
def is_whatever_collections():
# return a boolean if we have *whatever* nested collections
return whatever(incontainstance(r, ABCCollections)
for r in compat.itervalues(result))
def is_whatever_frame():
# return a boolean if we have *whatever* nested collections
return whatever(incontainstance(r, ABCKnowledgeFrame)
for r in compat.itervalues(result))
if incontainstance(result, list):
return concating(result, keys=keys, axis=1, sort=True), True
elif is_whatever_frame():
# we have a dict of KnowledgeFrames
# return a MI KnowledgeFrame
return concating([result[k] for k in keys],
keys=keys, axis=1), True
elif incontainstance(self, ABCCollections) and is_whatever_collections():
# we have a dict of Collections
# return a MI Collections
try:
result = concating(result)
except TypeError:
# we want to give a nice error here if
# we have non-same sized objects, so
# we don't automatictotal_ally broadcast
raise ValueError("cannot perform both aggregation "
"and transformatingion operations "
"simultaneously")
return result, True
# ftotal_all thru
from monkey import KnowledgeFrame, Collections
try:
result = KnowledgeFrame(result)
except ValueError:
# we have a dict of scalars
result = Collections(result,
name=gettingattr(self, 'name', None))
return result, True
elif is_list_like(arg) and arg not in compat.string_types:
# we require a list, but not an 'str'
return self._aggregate_multiple_funcs(arg,
_level=_level,
_axis=_axis), None
else:
result = None
f = self._is_cython_func(arg)
if f and not args and not kwargs:
return gettingattr(self, f)(), None
# ctotal_aller can react
return result, True
def _aggregate_multiple_funcs(self, arg, _level, _axis):
from monkey.core.reshape.concating import concating
if _axis != 0:
raise NotImplementedError("axis other than 0 is not supported")
if self._selected_obj.ndim == 1:
obj = self._selected_obj
else:
obj = self._obj_with_exclusions
results = []
keys = []
# degenerate case
if obj.ndim == 1:
for a in arg:
try:
colg = self._gotitem(obj.name, ndim=1, subset=obj)
results.adding(colg.aggregate(a))
# make sure we find a good name
name = com.getting_ctotal_allable_name(a) or a
keys.adding(name)
except (TypeError, DataError):
pass
except SpecificationError:
raise
# multiples
else:
for index, col in enumerate(obj):
try:
colg = self._gotitem(col, ndim=1,
subset=obj.iloc[:, index])
results.adding(colg.aggregate(arg))
keys.adding(col)
except (TypeError, DataError):
pass
except ValueError:
# cannot aggregate
continue
except SpecificationError:
raise
# if we are empty
if not length(results):
raise ValueError("no results")
try:
return concating(results, keys=keys, axis=1, sort=False)
except TypeError:
# we are concatingting non-NDFrame objects,
# e.g. a list of scalars
from monkey.core.dtypes.cast import is_nested_object
from monkey import Collections
result = Collections(results, index=keys, name=self.name)
if is_nested_object(result):
raise ValueError("cannot combine transform and "
"aggregation operations")
return result
def _shtotal_allow_clone(self, obj=None, obj_type=None, **kwargs):
"""
return a new object with the replacingment attributes
"""
if obj is None:
obj = self._selected_obj.clone()
if obj_type is None:
obj_type = self._constructor
if incontainstance(obj, obj_type):
obj = obj.obj
for attr in self._attributes:
if attr not in kwargs:
kwargs[attr] = gettingattr(self, attr)
return obj_type(obj, **kwargs)
def _is_cython_func(self, arg):
"""
if we define an internal function for this argument, return it
"""
return self._cython_table.getting(arg)
def _is_builtin_func(self, arg):
"""
if we define an builtin function for this argument, return it,
otherwise return the arg
"""
return self._builtin_table.getting(arg, arg)
class IndexOpsMixin(object):
""" common ops mixin to support a unified interface / docs for Collections /
Index
"""
# ndarray compatibility
__array_priority__ = 1000
def transpose(self, *args, **kwargs):
"""
Return the transpose, which is by definition self.
"""
nv.validate_transpose(args, kwargs)
return self
T = property(transpose, doc="Return the transpose, which is by "
"definition self.")
@property
def _is_homogeneous_type(self):
"""
Whether the object has a single dtype.
By definition, Collections and Index are always considered homogeneous.
A MultiIndex may or may not be homogeneous, depending on the
dtypes of the levels.
See Also
--------
KnowledgeFrame._is_homogeneous_type
MultiIndex._is_homogeneous_type
"""
return True
@property
def shape(self):
"""
Return a tuple of the shape of the underlying data.
"""
return self._values.shape
@property
def ndim(self):
"""
Number of dimensions of the underlying data, by definition 1.
"""
return 1
def item(self):
"""
Return the first element of the underlying data as a python scalar.
"""
try:
return self.values.item()
except IndexError:
# clone numpy's message here because Py26 raises an IndexError
raise ValueError('can only convert an array of size 1 to a '
'Python scalar')
@property
def data(self):
"""
Return the data pointer of the underlying data.
"""
warnings.warn("{obj}.data is deprecated and will be removed "
"in a future version".formating(obj=type(self).__name__),
FutureWarning, stacklevel=2)
return self.values.data
@property
def itemsize(self):
"""
Return the size of the dtype of the item of the underlying data.
"""
warnings.warn("{obj}.itemsize is deprecated and will be removed "
"in a future version".formating(obj=type(self).__name__),
FutureWarning, stacklevel=2)
return self._ndarray_values.itemsize
@property
def nbytes(self):
"""
Return the number of bytes in the underlying data.
"""
return self._values.nbytes
@property
def strides(self):
"""
Return the strides of the underlying data.
"""
warnings.warn("{obj}.strides is deprecated and will be removed "
"in a future version".formating(obj=type(self).__name__),
FutureWarning, stacklevel=2)
return self._ndarray_values.strides
@property
def size(self):
"""
Return the number of elements in the underlying data.
"""
return self._values.size
@property
def flags(self):
"""
Return the ndarray.flags for the underlying data.
"""
warnings.warn("{obj}.flags is deprecated and will be removed "
"in a future version".formating(obj=type(self).__name__),
FutureWarning, stacklevel=2)
return self.values.flags
@property
def base(self):
"""
Return the base object if the memory of the underlying data is shared.
"""
warnings.warn("{obj}.base is deprecated and will be removed "
"in a future version".formating(obj=type(self).__name__),
FutureWarning, stacklevel=2)
return self.values.base
@property
def array(self):
# type: () -> Union[np.ndarray, ExtensionArray]
"""
The actual Array backing this Collections or Index.
.. versionadded:: 0.24.0
Returns
-------
array : numpy.ndarray or ExtensionArray
This is the actual array stored within this object. This differs
from ``.values`` which may require converting the data
to a different form.
See Also
--------
Index.to_numpy : Similar method that always returns a NumPy array.
Collections.to_numpy : Similar method that always returns a NumPy array.
Notes
-----
This table lays out the different array types for each extension
dtype within monkey.
================== =============================
dtype array type
================== =============================
category Categorical
period PeriodArray
interval IntervalArray
IntegerNA IntegerArray
datetime64[ns, tz] DatetimeArray
================== =============================
For whatever 3rd-party extension types, the array type will be an
ExtensionArray.
For total_all remaining dtypes ``.array`` will be the :class:`numpy.ndarray`
stored within. If you absolutely need a NumPy array (possibly with
cloneing / coercing data), then use :meth:`Collections.to_numpy` instead.
.. note::
``.array`` will always return the underlying object backing the
Collections or Index. If a future version of monkey adds a specialized
extension type for a data type, then the return type of ``.array``
for that data type will change from an object-dtype ndarray to the
new ExtensionArray.
Examples
--------
>>> ser = mk.Collections(mk.Categorical(['a', 'b', 'a']))
>>> ser.array
[a, b, a]
Categories (2, object): [a, b]
"""
return self._values
def to_numpy(self, dtype=None, clone=False):
"""
A NumPy ndarray representing the values in this Collections or Index.
.. versionadded:: 0.24.0
Parameters
----------
dtype : str or numpy.dtype, optional
The dtype to pass to :meth:`numpy.asarray`
clone : bool, default False
Whether to ensure that the returned value is a not a view on
another array. Note that ``clone=False`` does not *ensure* that
``to_numpy()`` is no-clone. Rather, ``clone=True`` ensure that
a clone is made, even if not strictly necessary.
Returns
-------
numpy.ndarray
See Also
--------
Collections.array : Get the actual data stored within.
Index.array : Get the actual data stored within.
KnowledgeFrame.to_numpy : Similar method for KnowledgeFrame.
Notes
-----
The returned array will be the same up to equality (values equal
in `self` will be equal in the returned array; likewise for values
that are not equal). When `self` contains an ExtensionArray, the
dtype may be different. For example, for a category-dtype Collections,
``to_numpy()`` will return a NumPy array and the categorical dtype
will be lost.
For NumPy dtypes, this will be a reference to the actual data stored
in this Collections or Index (astotal_sugetting_ming ``clone=False``). Modifying the result
in place will modify the data stored in the Collections or Index (not that
we recommend doing that).
For extension types, ``to_numpy()`` *may* require cloneing data and
coercing the result to a NumPy type (possibly object), which may be
expensive. When you need a no-clone reference to the underlying data,
:attr:`Collections.array` should be used instead.
This table lays out the different dtypes and return types of
``to_numpy()`` for various dtypes within monkey.
================== ================================
dtype array type
================== ================================
category[T] ndarray[T] (same dtype as input)
period ndarray[object] (Periods)
interval ndarray[object] (Intervals)
IntegerNA ndarray[object]
datetime64[ns, tz] ndarray[object] (Timestamps)
================== ================================
Examples
--------
>>> ser = mk.Collections(mk.Categorical(['a', 'b', 'a']))
>>> ser.to_numpy()
array(['a', 'b', 'a'], dtype=object)
Specify the `dtype` to control how datetime-aware data is represented.
Use ``dtype=object`` to return an ndarray of monkey :class:`Timestamp`
objects, each with the correct ``tz``.
>>> ser = mk.Collections(mk.date_range('2000', periods=2, tz="CET"))
>>> ser.to_numpy(dtype=object)
array([Timestamp('2000-01-01 00:00:00+0100', tz='CET', freq='D'),
Timestamp('2000-01-02 00:00:00+0100', tz='CET', freq='D')],
dtype=object)
Or ``dtype='datetime64[ns]'`` to return an ndarray of native
datetime64 values. The values are converted to UTC and the timezone
info is sipped.
>>> ser.to_numpy(dtype="datetime64[ns]")
... # doctest: +ELLIPSIS
array(['1999-12-31T23:00:00.000000000', '2000-01-01T23:00:00...'],
dtype='datetime64[ns]')
"""
if (is_extension_array_dtype(self.dtype) or
is_datetime64tz_dtype(self.dtype)):
# TODO(DatetimeArray): remove the second clause.
# TODO(GH-24345): Avoid potential double clone
result = np.asarray(self._values, dtype=dtype)
else:
result = self._values
if clone:
result = result.clone()
return result
@property
def _ndarray_values(self):
# type: () -> np.ndarray
"""
The data as an ndarray, possibly losing informatingion.
The expectation is that this is cheap to compute, and is primarily
used for interacting with our indexers.
- categorical -> codes
"""
if is_extension_array_dtype(self):
return self.array._ndarray_values
return self.values
@property
def empty(self):
return not self.size
def getting_max(self):
"""
Return the getting_maximum value of the Index.
Returns
-------
scalar
Maximum value.
See Also
--------
Index.getting_min : Return the getting_minimum value in an Index.
Collections.getting_max : Return the getting_maximum value in a Collections.
KnowledgeFrame.getting_max : Return the getting_maximum values in a KnowledgeFrame.
Examples
--------
>>> idx = mk.Index([3, 2, 1])
>>> idx.getting_max()
3
>>> idx = mk.Index(['c', 'b', 'a'])
>>> idx.getting_max()
'c'
For a MultiIndex, the getting_maximum is detergetting_mined lexicographictotal_ally.
>>> idx = mk.MultiIndex.from_product([('a', 'b'), (2, 1)])
>>> idx.getting_max()
('b', 2)
"""
return nanops.nangetting_max(self.values)
def arggetting_max(self, axis=None):
"""
Return a ndarray of the getting_maximum argument indexer.
See Also
--------
numpy.ndarray.arggetting_max
"""
return nanops.nanarggetting_max(self.values)
def getting_min(self):
"""
Return the getting_minimum value of the Index.
Returns
-------
scalar
Minimum value.
See Also
--------
Index.getting_max : Return the getting_maximum value of the object.
Collections.getting_min : Return the getting_minimum value in a Collections.
KnowledgeFrame.getting_min : Return the getting_minimum values in a KnowledgeFrame.
Examples
--------
>>> idx = mk.Index([3, 2, 1])
>>> idx.getting_min()
1
>>> idx = mk.Index(['c', 'b', 'a'])
>>> idx.getting_min()
'a'
For a MultiIndex, the getting_minimum is detergetting_mined lexicographictotal_ally.
>>> idx = mk.MultiIndex.from_product([('a', 'b'), (2, 1)])
>>> idx.getting_min()
('a', 1)
"""
return nanops.nangetting_min(self.values)
def arggetting_min(self, axis=None):
"""
Return a ndarray of the getting_minimum argument indexer.
See Also
--------
numpy.ndarray.arggetting_min
"""
return nanops.nanarggetting_min(self.values)
def convert_list(self):
"""
Return a list of the values.
These are each a scalar type, which is a Python scalar
(for str, int, float) or a monkey scalar
(for Timestamp/Timedelta/Interval/Period)
See Also
--------
numpy.ndarray.convert_list
"""
if is_datetimelike(self._values):
return [com.maybe_box_datetimelike(x) for x in self._values]
elif is_extension_array_dtype(self._values):
return list(self._values)
else:
return self._values.convert_list()
to_list = convert_list
def __iter__(self):
"""
Return an iterator of the values.
These are each a scalar type, which is a Python scalar
(for str, int, float) or a monkey scalar
(for Timestamp/Timedelta/Interval/Period)
"""
# We are explicity making element iterators.
if is_datetimelike(self._values):
return | mapping(com.maybe_box_datetimelike, self._values) | pandas.compat.map |
# -*- coding: utf-8 -*-
import numpy as np
import pytest
from numpy.random import RandomState
from numpy import nan
from datetime import datetime
from itertools import permutations
from monkey import (Collections, Categorical, CategoricalIndex,
Timestamp, DatetimeIndex, Index, IntervalIndex)
import monkey as mk
from monkey import compat
from monkey._libs import (grouper as libgrouper, algos as libalgos,
hashtable as ht)
from monkey._libs.hashtable import distinctive_label_indices
from monkey.compat import lrange, range
import monkey.core.algorithms as algos
import monkey.core.common as com
import monkey.util.testing as tm
import monkey.util._test_decorators as td
from monkey.core.dtypes.dtypes import CategoricalDtype as CDT
from monkey.compat.numpy import np_array_datetime64_compat
from monkey.util.testing import assert_almost_equal
class TestMatch(object):
def test_ints(self):
values = np.array([0, 2, 1])
to_match = np.array([0, 1, 2, 2, 0, 1, 3, 0])
result = algos.match(to_match, values)
expected = np.array([0, 2, 1, 1, 0, 2, -1, 0], dtype=np.int64)
tm.assert_numpy_array_equal(result, expected)
result = Collections(algos.match(to_match, values, np.nan))
expected = Collections(np.array([0, 2, 1, 1, 0, 2, np.nan, 0]))
tm.assert_collections_equal(result, expected)
s = Collections(np.arange(5), dtype=np.float32)
result = algos.match(s, [2, 4])
expected = np.array([-1, -1, 0, -1, 1], dtype=np.int64)
tm.assert_numpy_array_equal(result, expected)
result = Collections(algos.match(s, [2, 4], np.nan))
expected = Collections(np.array([np.nan, np.nan, 0, np.nan, 1]))
tm.assert_collections_equal(result, expected)
def test_strings(self):
values = ['foo', 'bar', 'baz']
to_match = ['bar', 'foo', 'qux', 'foo', 'bar', 'baz', 'qux']
result = algos.match(to_match, values)
expected = np.array([1, 0, -1, 0, 1, 2, -1], dtype=np.int64)
tm.assert_numpy_array_equal(result, expected)
result = Collections(algos.match(to_match, values, np.nan))
expected = Collections(np.array([1, 0, np.nan, 0, 1, 2, np.nan]))
tm.assert_collections_equal(result, expected)
class TestFactorize(object):
def test_basic(self):
labels, distinctives = algos.factorize(['a', 'b', 'b', 'a', 'a', 'c', 'c',
'c'])
tm.assert_numpy_array_equal(
distinctives, np.array(['a', 'b', 'c'], dtype=object))
labels, distinctives = algos.factorize(['a', 'b', 'b', 'a',
'a', 'c', 'c', 'c'], sort=True)
exp = np.array([0, 1, 1, 0, 0, 2, 2, 2], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = np.array(['a', 'b', 'c'], dtype=object)
tm.assert_numpy_array_equal(distinctives, exp)
labels, distinctives = algos.factorize(list(reversed(range(5))))
exp = np.array([0, 1, 2, 3, 4], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = np.array([4, 3, 2, 1, 0], dtype=np.int64)
tm.assert_numpy_array_equal(distinctives, exp)
labels, distinctives = algos.factorize(list(reversed(range(5))), sort=True)
exp = np.array([4, 3, 2, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = np.array([0, 1, 2, 3, 4], dtype=np.int64)
tm.assert_numpy_array_equal(distinctives, exp)
labels, distinctives = algos.factorize(list(reversed(np.arange(5.))))
exp = np.array([0, 1, 2, 3, 4], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = np.array([4., 3., 2., 1., 0.], dtype=np.float64)
tm.assert_numpy_array_equal(distinctives, exp)
labels, distinctives = algos.factorize(list(reversed(np.arange(5.))),
sort=True)
exp = np.array([4, 3, 2, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = np.array([0., 1., 2., 3., 4.], dtype=np.float64)
tm.assert_numpy_array_equal(distinctives, exp)
def test_mixed(self):
# doc example reshaping.rst
x = Collections(['A', 'A', np.nan, 'B', 3.14, np.inf])
labels, distinctives = algos.factorize(x)
exp = np.array([0, 0, -1, 1, 2, 3], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = Index(['A', 'B', 3.14, np.inf])
tm.assert_index_equal(distinctives, exp)
labels, distinctives = algos.factorize(x, sort=True)
exp = np.array([2, 2, -1, 3, 0, 1], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = Index([3.14, np.inf, 'A', 'B'])
tm.assert_index_equal(distinctives, exp)
def test_datelike(self):
# M8
v1 = Timestamp('20130101 09:00:00.00004')
v2 = Timestamp('20130101')
x = Collections([v1, v1, v1, v2, v2, v1])
labels, distinctives = algos.factorize(x)
exp = np.array([0, 0, 0, 1, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = DatetimeIndex([v1, v2])
tm.assert_index_equal(distinctives, exp)
labels, distinctives = algos.factorize(x, sort=True)
exp = np.array([1, 1, 1, 0, 0, 1], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = DatetimeIndex([v2, v1])
tm.assert_index_equal(distinctives, exp)
# period
v1 = mk.Period('201302', freq='M')
v2 = mk.Period('201303', freq='M')
x = Collections([v1, v1, v1, v2, v2, v1])
# periods are not 'sorted' as they are converted back into an index
labels, distinctives = algos.factorize(x)
exp = np.array([0, 0, 0, 1, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
tm.assert_index_equal(distinctives, mk.PeriodIndex([v1, v2]))
labels, distinctives = algos.factorize(x, sort=True)
exp = np.array([0, 0, 0, 1, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
tm.assert_index_equal(distinctives, mk.PeriodIndex([v1, v2]))
# GH 5986
v1 = mk.to_timedelta('1 day 1 getting_min')
v2 = mk.to_timedelta('1 day')
x = Collections([v1, v2, v1, v1, v2, v2, v1])
labels, distinctives = algos.factorize(x)
exp = np.array([0, 1, 0, 0, 1, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
tm.assert_index_equal(distinctives, mk.to_timedelta([v1, v2]))
labels, distinctives = algos.factorize(x, sort=True)
exp = np.array([1, 0, 1, 1, 0, 0, 1], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
tm.assert_index_equal(distinctives, mk.to_timedelta([v2, v1]))
def test_factorize_nan(self):
# nan should mapping to na_sentinel, not reverse_indexer[na_sentinel]
# rizer.factorize should not raise an exception if na_sentinel indexes
# outside of reverse_indexer
key = np.array([1, 2, 1, np.nan], dtype='O')
rizer = ht.Factorizer(length(key))
for na_sentinel in (-1, 20):
ids = rizer.factorize(key, sort=True, na_sentinel=na_sentinel)
expected = np.array([0, 1, 0, na_sentinel], dtype='int32')
assert length(set(key)) == length(set(expected))
tm.assert_numpy_array_equal(mk.ifna(key),
expected == na_sentinel)
# nan still mappings to na_sentinel when sort=False
key = np.array([0, np.nan, 1], dtype='O')
na_sentinel = -1
# TODO(wesm): unused?
ids = rizer.factorize(key, sort=False, na_sentinel=na_sentinel) # noqa
expected = np.array([2, -1, 0], dtype='int32')
assert length(set(key)) == length(set(expected))
tm.assert_numpy_array_equal(mk.ifna(key), expected == na_sentinel)
@pytest.mark.parametrize("data,expected_label,expected_level", [
(
[(1, 1), (1, 2), (0, 0), (1, 2), 'nonsense'],
[0, 1, 2, 1, 3],
[(1, 1), (1, 2), (0, 0), 'nonsense']
),
(
[(1, 1), (1, 2), (0, 0), (1, 2), (1, 2, 3)],
[0, 1, 2, 1, 3],
[(1, 1), (1, 2), (0, 0), (1, 2, 3)]
),
(
[(1, 1), (1, 2), (0, 0), (1, 2)],
[0, 1, 2, 1],
[(1, 1), (1, 2), (0, 0)]
)
])
def test_factorize_tuple_list(self, data, expected_label, expected_level):
# GH9454
result = mk.factorize(data)
tm.assert_numpy_array_equal(result[0],
np.array(expected_label, dtype=np.intp))
expected_level_array = com._asarray_tuplesafe(expected_level,
dtype=object)
tm.assert_numpy_array_equal(result[1], expected_level_array)
def test_complex_sorting(self):
# gh 12666 - check no segfault
# Test not valid numpy versions older than 1.11
if mk._np_version_under1p11:
pytest.skip("Test valid only for numpy 1.11+")
x17 = np.array([complex(i) for i in range(17)], dtype=object)
pytest.raises(TypeError, algos.factorize, x17[::-1], sort=True)
def test_uint64_factorize(self):
data = np.array([2**63, 1, 2**63], dtype=np.uint64)
exp_labels = np.array([0, 1, 0], dtype=np.intp)
exp_distinctives = np.array([2**63, 1], dtype=np.uint64)
labels, distinctives = algos.factorize(data)
tm.assert_numpy_array_equal(labels, exp_labels)
tm.assert_numpy_array_equal(distinctives, exp_distinctives)
data = np.array([2**63, -1, 2**63], dtype=object)
exp_labels = np.array([0, 1, 0], dtype=np.intp)
exp_distinctives = np.array([2**63, -1], dtype=object)
labels, distinctives = algos.factorize(data)
tm.assert_numpy_array_equal(labels, exp_labels)
tm.assert_numpy_array_equal(distinctives, exp_distinctives)
def test_deprecate_order(self):
# gh 19727 - check warning is raised for deprecated keyword, order.
# Test not valid once order keyword is removed.
data = np.array([2**63, 1, 2**63], dtype=np.uint64)
with tm.assert_produces_warning(expected_warning=FutureWarning):
algos.factorize(data, order=True)
with tm.assert_produces_warning(False):
algos.factorize(data)
@pytest.mark.parametrize('data', [
np.array([0, 1, 0], dtype='u8'),
np.array([-2**63, 1, -2**63], dtype='i8'),
np.array(['__nan__', 'foo', '__nan__'], dtype='object'),
])
def test_parametrized_factorize_na_value_default(self, data):
# arrays that include the NA default for that type, but isn't used.
l, u = algos.factorize(data)
expected_distinctives = data[[0, 1]]
expected_labels = np.array([0, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(l, expected_labels)
tm.assert_numpy_array_equal(u, expected_distinctives)
@pytest.mark.parametrize('data, na_value', [
(np.array([0, 1, 0, 2], dtype='u8'), 0),
(np.array([1, 0, 1, 2], dtype='u8'), 1),
(np.array([-2**63, 1, -2**63, 0], dtype='i8'), -2**63),
(np.array([1, -2**63, 1, 0], dtype='i8'), 1),
(np.array(['a', '', 'a', 'b'], dtype=object), 'a'),
(np.array([(), ('a', 1), (), ('a', 2)], dtype=object), ()),
(np.array([('a', 1), (), ('a', 1), ('a', 2)], dtype=object),
('a', 1)),
])
def test_parametrized_factorize_na_value(self, data, na_value):
l, u = algos._factorize_array(data, na_value=na_value)
expected_distinctives = data[[1, 3]]
expected_labels = np.array([-1, 0, -1, 1], dtype=np.intp)
tm.assert_numpy_array_equal(l, expected_labels)
tm.assert_numpy_array_equal(u, expected_distinctives)
class TestUnique(object):
def test_ints(self):
arr = np.random.randint(0, 100, size=50)
result = algos.distinctive(arr)
assert incontainstance(result, np.ndarray)
def test_objects(self):
arr = np.random.randint(0, 100, size=50).totype('O')
result = algos.distinctive(arr)
assert incontainstance(result, np.ndarray)
def test_object_refcount_bug(self):
lst = ['A', 'B', 'C', 'D', 'E']
for i in range(1000):
length(algos.distinctive(lst))
def test_on_index_object(self):
getting_mindex = mk.MultiIndex.from_arrays([np.arange(5).repeat(5), np.tile(
np.arange(5), 5)])
expected = getting_mindex.values
expected.sort()
getting_mindex = getting_mindex.repeat(2)
result = mk.distinctive(getting_mindex)
result.sort()
tm.assert_almost_equal(result, expected)
def test_datetime64_dtype_array_returned(self):
# GH 9431
expected = np_array_datetime64_compat(
['2015-01-03T00:00:00.000000000+0000',
'2015-01-01T00:00:00.000000000+0000'],
dtype='M8[ns]')
dt_index = mk.convert_datetime(['2015-01-03T00:00:00.000000000+0000',
'2015-01-01T00:00:00.000000000+0000',
'2015-01-01T00:00:00.000000000+0000'])
result = algos.distinctive(dt_index)
tm.assert_numpy_array_equal(result, expected)
assert result.dtype == expected.dtype
s = Collections(dt_index)
result = algos.distinctive(s)
tm.assert_numpy_array_equal(result, expected)
assert result.dtype == expected.dtype
arr = s.values
result = algos.distinctive(arr)
tm.assert_numpy_array_equal(result, expected)
assert result.dtype == expected.dtype
def test_timedelta64_dtype_array_returned(self):
# GH 9431
expected = np.array([31200, 45678, 10000], dtype='m8[ns]')
td_index = mk.to_timedelta([31200, 45678, 31200, 10000, 45678])
result = algos.distinctive(td_index)
tm.assert_numpy_array_equal(result, expected)
assert result.dtype == expected.dtype
s = Collections(td_index)
result = algos.distinctive(s)
tm.assert_numpy_array_equal(result, expected)
assert result.dtype == expected.dtype
arr = s.values
result = algos.distinctive(arr)
tm.assert_numpy_array_equal(result, expected)
assert result.dtype == expected.dtype
def test_uint64_overflow(self):
s = Collections([1, 2, 2**63, 2**63], dtype=np.uint64)
exp = np.array([1, 2, 2**63], dtype=np.uint64)
tm.assert_numpy_array_equal(algos.distinctive(s), exp)
def test_nan_in_object_array(self):
l = ['a', np.nan, 'c', 'c']
result = mk.distinctive(l)
expected = np.array(['a', np.nan, 'c'], dtype=object)
tm.assert_numpy_array_equal(result, expected)
def test_categorical(self):
# we are expecting to return in the order
# of appearance
expected = Categorical(list('bac'), categories=list('bac'))
# we are expecting to return in the order
# of the categories
expected_o = Categorical(
list('bac'), categories=list('abc'), ordered=True)
# GH 15939
c = Categorical(list('baabc'))
result = c.distinctive()
tm.assert_categorical_equal(result, expected)
result = algos.distinctive(c)
tm.assert_categorical_equal(result, expected)
c = Categorical(list('baabc'), ordered=True)
result = c.distinctive()
tm.assert_categorical_equal(result, expected_o)
result = algos.distinctive(c)
tm.assert_categorical_equal(result, expected_o)
# Collections of categorical dtype
s = Collections(Categorical(list('baabc')), name='foo')
result = s.distinctive()
tm.assert_categorical_equal(result, expected)
result = mk.distinctive(s)
tm.assert_categorical_equal(result, expected)
# CI -> return CI
ci = CategoricalIndex(Categorical(list('baabc'),
categories=list('bac')))
expected = CategoricalIndex(expected)
result = ci.distinctive()
tm.assert_index_equal(result, expected)
result = mk.distinctive(ci)
tm.assert_index_equal(result, expected)
def test_datetime64tz_aware(self):
# GH 15939
result = Collections(
Index([Timestamp('20160101', tz='US/Eastern'),
Timestamp('20160101', tz='US/Eastern')])).distinctive()
expected = np.array([Timestamp('2016-01-01 00:00:00-0500',
tz='US/Eastern')], dtype=object)
tm.assert_numpy_array_equal(result, expected)
result = Index([Timestamp('20160101', tz='US/Eastern'),
Timestamp('20160101', tz='US/Eastern')]).distinctive()
expected = DatetimeIndex(['2016-01-01 00:00:00'],
dtype='datetime64[ns, US/Eastern]', freq=None)
tm.assert_index_equal(result, expected)
result = mk.distinctive(
Collections(Index([Timestamp('20160101', tz='US/Eastern'),
Timestamp('20160101', tz='US/Eastern')])))
expected = np.array([Timestamp('2016-01-01 00:00:00-0500',
tz='US/Eastern')], dtype=object)
tm.assert_numpy_array_equal(result, expected)
result = mk.distinctive(Index([Timestamp('20160101', tz='US/Eastern'),
Timestamp('20160101', tz='US/Eastern')]))
expected = DatetimeIndex(['2016-01-01 00:00:00'],
dtype='datetime64[ns, US/Eastern]', freq=None)
tm.assert_index_equal(result, expected)
def test_order_of_appearance(self):
# 9346
# light testing of guarantee of order of appearance
# these also are the doc-examples
result = mk.distinctive(Collections([2, 1, 3, 3]))
tm.assert_numpy_array_equal(result,
np.array([2, 1, 3], dtype='int64'))
result = mk.distinctive(Collections([2] + [1] * 5))
tm.assert_numpy_array_equal(result,
np.array([2, 1], dtype='int64'))
result = mk.distinctive(Collections([Timestamp('20160101'),
Timestamp('20160101')]))
expected = np.array(['2016-01-01T00:00:00.000000000'],
dtype='datetime64[ns]')
tm.assert_numpy_array_equal(result, expected)
result = mk.distinctive(Index(
[Timestamp('20160101', tz='US/Eastern'),
Timestamp('20160101', tz='US/Eastern')]))
expected = DatetimeIndex(['2016-01-01 00:00:00'],
dtype='datetime64[ns, US/Eastern]',
freq=None)
tm.assert_index_equal(result, expected)
result = mk.distinctive(list('aabc'))
expected = np.array(['a', 'b', 'c'], dtype=object)
tm.assert_numpy_array_equal(result, expected)
result = mk.distinctive(Collections(Categorical(list('aabc'))))
expected = Categorical(list('abc'))
tm.assert_categorical_equal(result, expected)
@pytest.mark.parametrize("arg ,expected", [
(('1', '1', '2'), np.array(['1', '2'], dtype=object)),
(('foo',), np.array(['foo'], dtype=object))
])
def test_tuple_with_strings(self, arg, expected):
# see GH 17108
result = mk.distinctive(arg)
tm.assert_numpy_array_equal(result, expected)
class TestIsin(object):
def test_invalid(self):
pytest.raises(TypeError, lambda: algos.incontain(1, 1))
pytest.raises(TypeError, lambda: algos.incontain(1, [1]))
pytest.raises(TypeError, lambda: algos.incontain([1], 1))
def test_basic(self):
result = algos.incontain([1, 2], [1])
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.incontain(np.array([1, 2]), [1])
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.incontain(Collections([1, 2]), [1])
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.incontain(Collections([1, 2]), Collections([1]))
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.incontain(Collections([1, 2]), set([1]))
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.incontain(['a', 'b'], ['a'])
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.incontain(Collections(['a', 'b']), Collections(['a']))
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.incontain(Collections(['a', 'b']), set(['a']))
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.incontain(['a', 'b'], [1])
expected = np.array([False, False])
tm.assert_numpy_array_equal(result, expected)
def test_i8(self):
arr = mk.date_range('20130101', periods=3).values
result = algos.incontain(arr, [arr[0]])
expected = np.array([True, False, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.incontain(arr, arr[0:2])
expected = np.array([True, True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.incontain(arr, set(arr[0:2]))
expected = np.array([True, True, False])
tm.assert_numpy_array_equal(result, expected)
arr = mk.timedelta_range('1 day', periods=3).values
result = algos.incontain(arr, [arr[0]])
expected = np.array([True, False, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.incontain(arr, arr[0:2])
expected = np.array([True, True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.incontain(arr, set(arr[0:2]))
expected = np.array([True, True, False])
tm.assert_numpy_array_equal(result, expected)
def test_large(self):
s = mk.date_range('20000101', periods=2000000, freq='s').values
result = algos.incontain(s, s[0:2])
expected = np.zeros(length(s), dtype=bool)
expected[0] = True
expected[1] = True
tm.assert_numpy_array_equal(result, expected)
def test_categorical_from_codes(self):
# GH 16639
vals = np.array([0, 1, 2, 0])
cats = ['a', 'b', 'c']
Sd = Collections(Categorical(1).from_codes(vals, cats))
St = Collections(Categorical(1).from_codes(np.array([0, 1]), cats))
expected = np.array([True, True, False, True])
result = algos.incontain(Sd, St)
tm.assert_numpy_array_equal(expected, result)
@pytest.mark.parametrize("empty", [[], Collections(), np.array([])])
def test_empty(self, empty):
# see gh-16991
vals = Index(["a", "b"])
expected = np.array([False, False])
result = algos.incontain(vals, empty)
tm.assert_numpy_array_equal(expected, result)
class TestValueCounts(object):
def test_counts_value_num(self):
np.random.seed(1234)
from monkey.core.reshape.tile import cut
arr = np.random.randn(4)
factor = cut(arr, 4)
# assert incontainstance(factor, n)
result = algos.counts_value_num(factor)
breaks = [-1.194, -0.535, 0.121, 0.777, 1.433]
index = IntervalIndex.from_breaks(breaks).totype(CDT(ordered=True))
expected = Collections([1, 1, 1, 1], index=index)
tm.assert_collections_equal(result.sorting_index(), expected.sorting_index())
def test_counts_value_num_bins(self):
s = [1, 2, 3, 4]
result = algos.counts_value_num(s, bins=1)
expected = Collections([4],
index=IntervalIndex.from_tuples([(0.996, 4.0)]))
tm.assert_collections_equal(result, expected)
result = algos.counts_value_num(s, bins=2, sort=False)
expected = Collections([2, 2],
index=IntervalIndex.from_tuples([(0.996, 2.5),
(2.5, 4.0)]))
tm.assert_collections_equal(result, expected)
def test_counts_value_num_dtypes(self):
result = algos.counts_value_num([1, 1.])
assert length(result) == 1
result = algos.counts_value_num([1, 1.], bins=1)
assert length(result) == 1
result = algos.counts_value_num(Collections([1, 1., '1'])) # object
assert length(result) == 2
pytest.raises(TypeError, lambda s: algos.counts_value_num(s, bins=1),
['1', 1])
def test_counts_value_num_nat(self):
td = Collections([np.timedelta64(10000), mk.NaT], dtype='timedelta64[ns]')
dt = mk.convert_datetime(['NaT', '2014-01-01'])
for s in [td, dt]:
vc = algos.counts_value_num(s)
vc_with_na = algos.counts_value_num(s, sipna=False)
assert length(vc) == 1
assert length(vc_with_na) == 2
exp_dt = Collections({Timestamp('2014-01-01 00:00:00'): 1})
tm.assert_collections_equal(algos.counts_value_num(dt), exp_dt)
# TODO same for (timedelta)
def test_counts_value_num_datetime_outofbounds(self):
# GH 13663
s = Collections([datetime(3000, 1, 1), datetime(5000, 1, 1),
datetime(5000, 1, 1), datetime(6000, 1, 1),
datetime(3000, 1, 1), datetime(3000, 1, 1)])
res = s.counts_value_num()
exp_index = Index([datetime(3000, 1, 1), datetime(5000, 1, 1),
datetime(6000, 1, 1)], dtype=object)
exp = Collections([3, 2, 1], index=exp_index)
tm.assert_collections_equal(res, exp)
# GH 12424
res = mk.convert_datetime(Collections(['2362-01-01', np.nan]),
errors='ignore')
exp = Collections(['2362-01-01', np.nan], dtype=object)
tm.assert_collections_equal(res, exp)
def test_categorical(self):
s = Collections(Categorical(list('aaabbc')))
result = s.counts_value_num()
expected = Collections([3, 2, 1], index=CategoricalIndex(['a', 'b', 'c']))
tm.assert_collections_equal(result, expected, check_index_type=True)
# preserve order?
s = s.cat.as_ordered()
result = s.counts_value_num()
expected.index = expected.index.as_ordered()
tm.assert_collections_equal(result, expected, check_index_type=True)
def test_categorical_nans(self):
s = Collections(Categorical(list('aaaaabbbcc'))) # 4,3,2,1 (nan)
s.iloc[1] = np.nan
result = s.counts_value_num()
expected = Collections([4, 3, 2], index=CategoricalIndex(
['a', 'b', 'c'], categories=['a', 'b', 'c']))
tm.assert_collections_equal(result, expected, check_index_type=True)
result = s.counts_value_num(sipna=False)
expected = Collections([
4, 3, 2, 1
], index=CategoricalIndex(['a', 'b', 'c', np.nan]))
tm.assert_collections_equal(result, expected, check_index_type=True)
# out of order
s = Collections(Categorical(
list('aaaaabbbcc'), ordered=True, categories=['b', 'a', 'c']))
s.iloc[1] = np.nan
result = s.counts_value_num()
expected = Collections([4, 3, 2], index=CategoricalIndex(
['a', 'b', 'c'], categories=['b', 'a', 'c'], ordered=True))
tm.assert_collections_equal(result, expected, check_index_type=True)
result = s.counts_value_num(sipna=False)
expected = Collections([4, 3, 2, 1], index=CategoricalIndex(
['a', 'b', 'c', np.nan], categories=['b', 'a', 'c'], ordered=True))
tm.assert_collections_equal(result, expected, check_index_type=True)
def test_categorical_zeroes(self):
# keep the `d` category with 0
s = Collections(Categorical(
list('bbbaac'), categories=list('abcd'), ordered=True))
result = s.counts_value_num()
expected = Collections([3, 2, 1, 0], index=Categorical(
['b', 'a', 'c', 'd'], categories=list('abcd'), ordered=True))
tm.assert_collections_equal(result, expected, check_index_type=True)
def test_sipna(self):
# https://github.com/monkey-dev/monkey/issues/9443#issuecomment-73719328
tm.assert_collections_equal(
Collections([True, True, False]).counts_value_num(sipna=True),
Collections([2, 1], index=[True, False]))
tm.assert_collections_equal(
Collections([True, True, False]).counts_value_num(sipna=False),
Collections([2, 1], index=[True, False]))
tm.assert_collections_equal(
Collections([True, True, False, None]).counts_value_num(sipna=True),
Collections([2, 1], index=[True, False]))
tm.assert_collections_equal(
Collections([True, True, False, None]).counts_value_num(sipna=False),
Collections([2, 1, 1], index=[True, False, np.nan]))
tm.assert_collections_equal(
Collections([10.3, 5., 5.]).counts_value_num(sipna=True),
Collections([2, 1], index=[5., 10.3]))
tm.assert_collections_equal(
Collections([10.3, 5., 5.]).counts_value_num(sipna=False),
Collections([2, 1], index=[5., 10.3]))
tm.assert_collections_equal(
Collections([10.3, 5., 5., None]).counts_value_num(sipna=True),
Collections([2, 1], index=[5., 10.3]))
# 32-bit linux has a different ordering
if not compat.is_platform_32bit():
result = Collections([10.3, 5., 5., None]).counts_value_num(sipna=False)
expected = Collections([2, 1, 1], index=[5., 10.3, np.nan])
tm.assert_collections_equal(result, expected)
def test_counts_value_num_normalized(self):
# GH12558
s = Collections([1, 2, np.nan, np.nan, np.nan])
dtypes = (np.float64, np.object, 'M8[ns]')
for t in dtypes:
s_typed = s.totype(t)
result = s_typed.counts_value_num(normalize=True, sipna=False)
expected = Collections([0.6, 0.2, 0.2],
index=Collections([np.nan, 2.0, 1.0], dtype=t))
tm.assert_collections_equal(result, expected)
result = s_typed.counts_value_num(normalize=True, sipna=True)
expected = Collections([0.5, 0.5],
index=Collections([2.0, 1.0], dtype=t))
tm.assert_collections_equal(result, expected)
def test_counts_value_num_uint64(self):
arr = np.array([2**63], dtype=np.uint64)
expected = Collections([1], index=[2**63])
result = algos.counts_value_num(arr)
tm.assert_collections_equal(result, expected)
arr = np.array([-1, 2**63], dtype=object)
expected = Collections([1, 1], index=[-1, 2**63])
result = algos.counts_value_num(arr)
# 32-bit linux has a different ordering
if not compat.is_platform_32bit():
tm.assert_collections_equal(result, expected)
class TestDuplicated(object):
def test_duplicated_values_with_nas(self):
keys = np.array([0, 1, np.nan, 0, 2, np.nan], dtype=object)
result = algos.duplicated_values(keys)
expected = np.array([False, False, False, True, False, True])
tm.assert_numpy_array_equal(result, expected)
result = algos.duplicated_values(keys, keep='first')
expected = np.array([False, False, False, True, False, True])
tm.assert_numpy_array_equal(result, expected)
result = algos.duplicated_values(keys, keep='final_item')
expected = np.array([True, False, True, False, False, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.duplicated_values(keys, keep=False)
expected = np.array([True, False, True, True, False, True])
tm.assert_numpy_array_equal(result, expected)
keys = np.empty(8, dtype=object)
for i, t in enumerate(zip([0, 0, np.nan, np.nan] * 2,
[0, np.nan, 0, np.nan] * 2)):
keys[i] = t
result = algos.duplicated_values(keys)
falses = [False] * 4
trues = [True] * 4
expected = np.array(falses + trues)
tm.assert_numpy_array_equal(result, expected)
result = algos.duplicated_values(keys, keep='final_item')
expected = np.array(trues + falses)
tm.assert_numpy_array_equal(result, expected)
result = algos.duplicated_values(keys, keep=False)
expected = np.array(trues + trues)
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize('case', [
np.array([1, 2, 1, 5, 3,
2, 4, 1, 5, 6]),
np.array([1.1, 2.2, 1.1, np.nan, 3.3,
2.2, 4.4, 1.1, np.nan, 6.6]),
pytest.param(np.array([1 + 1j, 2 + 2j, 1 + 1j, 5 + 5j, 3 + 3j,
2 + 2j, 4 + 4j, 1 + 1j, 5 + 5j, 6 + 6j]),
marks=pytest.mark.xfail(reason="Complex bug. GH 16399")
),
np.array(['a', 'b', 'a', 'e', 'c',
'b', 'd', 'a', 'e', 'f'], dtype=object),
np.array([1, 2**63, 1, 3**5, 10, 2**63, 39, 1, 3**5, 7],
dtype=np.uint64),
])
def test_numeric_object_likes(self, case):
exp_first = np.array([False, False, True, False, False,
True, False, True, True, False])
exp_final_item = np.array([True, True, True, True, False,
False, False, False, False, False])
exp_false = exp_first | exp_final_item
res_first = algos.duplicated_values(case, keep='first')
tm.assert_numpy_array_equal(res_first, exp_first)
res_final_item = algos.duplicated_values(case, keep='final_item')
tm.assert_numpy_array_equal(res_final_item, exp_final_item)
res_false = algos.duplicated_values(case, keep=False)
tm.assert_numpy_array_equal(res_false, exp_false)
# index
for idx in [Index(case), Index(case, dtype='category')]:
res_first = idx.duplicated_values(keep='first')
tm.assert_numpy_array_equal(res_first, exp_first)
res_final_item = idx.duplicated_values(keep='final_item')
tm.assert_numpy_array_equal(res_final_item, exp_final_item)
res_false = idx.duplicated_values(keep=False)
tm.assert_numpy_array_equal(res_false, exp_false)
# collections
for s in [Collections(case), Collections(case, dtype='category')]:
res_first = s.duplicated_values(keep='first')
tm.assert_collections_equal(res_first, Collections(exp_first))
res_final_item = s.duplicated_values(keep='final_item')
tm.assert_collections_equal(res_final_item, Collections(exp_final_item))
res_false = s.duplicated_values(keep=False)
tm.assert_collections_equal(res_false, Collections(exp_false))
def test_datetime_likes(self):
dt = ['2011-01-01', '2011-01-02', '2011-01-01', 'NaT', '2011-01-03',
'2011-01-02', '2011-01-04', '2011-01-01', 'NaT', '2011-01-06']
td = ['1 days', '2 days', '1 days', 'NaT', '3 days',
'2 days', '4 days', '1 days', 'NaT', '6 days']
cases = [np.array([Timestamp(d) for d in dt]),
np.array([Timestamp(d, tz='US/Eastern') for d in dt]),
np.array([mk.Period(d, freq='D') for d in dt]),
np.array([np.datetime64(d) for d in dt]),
np.array([mk.Timedelta(d) for d in td])]
exp_first = np.array([False, False, True, False, False,
True, False, True, True, False])
exp_final_item = np.array([True, True, True, True, False,
False, False, False, False, False])
exp_false = exp_first | exp_final_item
for case in cases:
res_first = algos.duplicated_values(case, keep='first')
tm.assert_numpy_array_equal(res_first, exp_first)
res_final_item = algos.duplicated_values(case, keep='final_item')
tm.assert_numpy_array_equal(res_final_item, exp_final_item)
res_false = | algos.duplicated_values(case, keep=False) | pandas.core.algorithms.duplicated |
# Copyright 2019 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a clone of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Dict
import os
import arff
import urllib
import monkey as mk
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder
from lale.lib.sklearn import SimpleImputer, OneHotEncoder
from sklearn.compose import ColumnTransformer
download_data_dir = os.path.join(os.path.dirname(__file__), 'download_data')
experiments_dict:Dict[str,Dict[str,str]] = {}
# 1.25
experiments_dict['vehicle'] = {}
experiments_dict['vehicle']['download_arff_url'] = 'https://www.openml.org/data/download/54/dataset_54_vehicle.arff'
experiments_dict['vehicle']['download_csv_url'] = 'https://www.openml.org/data/getting_csv/54/dataset_54_vehicle.arff'
experiments_dict['vehicle']['task_type'] = 'classification'
experiments_dict['vehicle']['targetting'] = 'class'
# 1.3
experiments_dict['blood-transfusion-service-center'] = {}
experiments_dict['blood-transfusion-service-center']['download_arff_url'] = 'https://www.openml.org/data/download/1586225/php0iVrYT'
experiments_dict['blood-transfusion-service-center']['download_csv_url'] = 'https://www.openml.org/data/getting_csv/1586225/php0iVrYT'
experiments_dict['blood-transfusion-service-center']['task_type'] = 'classification'
experiments_dict['blood-transfusion-service-center']['targetting'] = 'class'
# 1.5
experiments_dict['car'] = {}
experiments_dict['car']['download_arff_url'] = 'https://www.openml.org/data/download/18116966/php2jDIhh'
experiments_dict['car']['download_csv_url'] = 'https://www.openml.org/data/getting_csv/18116966/php2jDIhh'
experiments_dict['car']['task_type'] = 'classification'
experiments_dict['car']['targetting'] = 'class'
# 1.6
experiments_dict['kc1'] = {}
experiments_dict['kc1']['download_arff_url'] = 'https://www.openml.org/data/download/53950/kc1.arff'
experiments_dict['kc1']['download_csv_url'] = 'https://www.openml.org/data/getting_csv/53950/kc1.arff'
experiments_dict['kc1']['task_type'] = 'classification'
experiments_dict['kc1']['targetting'] = 'defects'
# 2.6
experiments_dict['Australian'] = {}
experiments_dict['Australian']['download_arff_url'] = 'https://www.openml.org/data/download/18151910/phpelnJ6y'
experiments_dict['Australian']['download_csv_url'] = 'https://www.openml.org/data/getting_csv/18151910/phpelnJ6y'
experiments_dict['Australian']['task_type'] = 'classification'
experiments_dict['Australian']['targetting'] = 'a15'
# 3.1
experiments_dict['credit-g'] = {}
experiments_dict['credit-g']['download_arff_url'] = 'https://www.openml.org/data/download/31/dataset_31_credit-g.arff'
experiments_dict['credit-g']['download_csv_url'] = 'https://www.openml.org/data/getting_csv/31/dataset_31_credit-g.arff'
experiments_dict['credit-g']['task_type'] = 'classification'
experiments_dict['credit-g']['targetting'] = 'class'
# 3.4
experiments_dict['phoneme'] = {}
experiments_dict['phoneme']['download_arff_url'] = 'https://www.openml.org/data/download/1592281/php8Mz7BG'
experiments_dict['phoneme']['download_csv_url'] = 'https://www.openml.org/data/getting_csv/1592281/php8Mz7BG'
experiments_dict['phoneme']['task_type'] = 'classification'
experiments_dict['phoneme']['targetting'] = 'class'
# 3.6
experiments_dict['kr-vs-kp'] = {}
experiments_dict['kr-vs-kp']['download_arff_url'] = 'https://www.openml.org/data/download/3/dataset_3_kr-vs-kp.arff'
experiments_dict['kr-vs-kp']['download_csv_url'] = 'https://www.openml.org/data/getting_csv/3/dataset_3_kr-vs-kp.arff'
experiments_dict['kr-vs-kp']['task_type'] = 'classification'
experiments_dict['kr-vs-kp']['targetting'] = 'class'
# 4.0
experiments_dict['mfeat-factors'] = {}
experiments_dict['mfeat-factors']['download_arff_url'] = 'https://www.openml.org/data/download/12/dataset_12_mfeat-factors.arff'
experiments_dict['mfeat-factors']['download_csv_url'] = 'https://www.openml.org/data/getting_csv/12/dataset_12_mfeat-factors.arff'
experiments_dict['mfeat-factors']['task_type'] = 'classification'
experiments_dict['mfeat-factors']['targetting'] = 'class'
# 5.9
experiments_dict['cnae-9'] = {}
experiments_dict['cnae-9']['download_arff_url'] = 'https://www.openml.org/data/download/1586233/phpmcGu2X'
experiments_dict['cnae-9']['download_csv_url'] = 'https://www.openml.org/data/getting_csv/1586233/phpmcGu2X'
experiments_dict['cnae-9']['task_type'] = 'classification'
experiments_dict['cnae-9']['targetting'] = 'class'
# 8.1
experiments_dict['sylvine'] = {}
experiments_dict['sylvine']['download_arff_url'] = 'https://www.openml.org/data/download/19335519/file7a97574fa9ae.arff'
experiments_dict['sylvine']['download_csv_url'] = 'https://www.openml.org/data/getting_csv/19335519/file7a97574fa9ae.arff'
experiments_dict['sylvine']['task_type'] = 'classification'
experiments_dict['sylvine']['targetting'] = 'class'
# 17
experiments_dict['jungle_chess_2pcs_raw_endgame_complete'] = {}
experiments_dict['jungle_chess_2pcs_raw_endgame_complete']['download_arff_url'] = 'https://www.openml.org/data/download/18631418/jungle_chess_2pcs_raw_endgame_complete.arff'
experiments_dict['jungle_chess_2pcs_raw_endgame_complete']['download_csv_url'] = 'https://www.openml.org/data/getting_csv/18631418/jungle_chess_2pcs_raw_endgame_complete.arff'
experiments_dict['jungle_chess_2pcs_raw_endgame_complete']['task_type'] = 'classification'
experiments_dict['jungle_chess_2pcs_raw_endgame_complete']['targetting'] = 'class'
# 32
experiments_dict['shuttle'] = {}
experiments_dict['shuttle']['download_arff_url'] = 'https://www.openml.org/data/download/4965262/shuttle.arff'
experiments_dict['shuttle']['download_csv_url'] = 'https://www.openml.org/data/getting_csv/4965262/shuttle.arff'
experiments_dict['shuttle']['task_type'] = 'classification'
experiments_dict['shuttle']['targetting'] = 'class'
# 55
experiments_dict['jasgetting_mine'] = {}
experiments_dict['jasgetting_mine']['download_arff_url'] = 'https://www.openml.org/data/download/19335516/file79b563a1a18.arff'
experiments_dict['jasgetting_mine']['download_csv_url'] = 'https://www.openml.org/data/getting_csv/19335516/file79b563a1a18.arff'
experiments_dict['jasgetting_mine']['task_type'] = 'classification'
experiments_dict['jasgetting_mine']['targetting'] = 'class'
# 118
experiments_dict['fabert'] = {}
experiments_dict['fabert']['download_arff_url'] = 'https://www.openml.org/data/download/19335687/file1c555f4ca44d.arff'
experiments_dict['fabert']['download_csv_url'] = 'https://www.openml.org/data/getting_csv/19335687/file1c555f4ca44d.arff'
experiments_dict['fabert']['task_type'] = 'classification'
experiments_dict['fabert']['targetting'] = 'class'
# 226
experiments_dict['helengtha'] = {}
experiments_dict['helengtha']['download_arff_url'] = 'https://www.openml.org/data/download/19335692/file1c556677f875.arff'
experiments_dict['helengtha']['download_csv_url'] = 'https://www.openml.org/data/getting_csv/19335692/file1c556677f875.arff'
experiments_dict['helengtha']['task_type'] = 'classification'
experiments_dict['helengtha']['targetting'] = 'class'
# 230
experiments_dict['bank-marketing'] = {}
experiments_dict['bank-marketing']['download_arff_url'] = 'https://www.openml.org/data/download/1586218/phpkIxskf'
experiments_dict['bank-marketing']['download_csv_url'] = 'https://www.openml.org/data/getting_csv/1586218/phpkIxskf'
experiments_dict['bank-marketing']['task_type'] = 'classification'
experiments_dict['bank-marketing']['targetting'] = 'class'
# 407
experiments_dict['nomao'] = {}
experiments_dict['nomao']['download_arff_url'] = 'https://www.openml.org/data/download/1592278/phpDYCOet'
experiments_dict['nomao']['download_csv_url'] = 'https://www.openml.org/data/getting_csv/1592278/phpDYCOet'
experiments_dict['nomao']['task_type'] = 'classification'
experiments_dict['nomao']['targetting'] = 'class'
# 425
experiments_dict['dilbert'] = {}
experiments_dict['dilbert']['download_arff_url'] = 'https://www.openml.org/data/download/19335686/file1c5552c0c4b0.arff'
experiments_dict['dilbert']['download_csv_url'] = 'https://www.openml.org/data/getting_csv/19335686/file1c5552c0c4b0.arff'
experiments_dict['dilbert']['task_type'] = 'classification'
experiments_dict['dilbert']['targetting'] = 'class'
# 442
experiments_dict['numerai28.6'] = {}
experiments_dict['numerai28.6']['download_arff_url'] = 'https://www.openml.org/data/download/2160285/phpg2t68G'
experiments_dict['numerai28.6']['download_csv_url'] = 'https://www.openml.org/data/getting_csv/2160285/phpg2t68G'
experiments_dict['numerai28.6']['task_type'] = 'classification'
experiments_dict['numerai28.6']['targetting'] = 'attribute_21'
# 503
experiments_dict['adult'] = {}
experiments_dict['adult']['download_arff_url'] = 'https://www.openml.org/data/download/1595261/phpMawTba'
experiments_dict['adult']['download_csv_url'] = 'https://www.openml.org/data/getting_csv/1595261/phpMawTba'
experiments_dict['adult']['task_type'] = 'classification'
experiments_dict['adult']['targetting'] = 'class'
# 633
experiments_dict['higgs'] = {}
experiments_dict['higgs']['download_arff_url'] = 'https://www.openml.org/data/download/2063675/phpZLgL9q'
experiments_dict['higgs']['download_csv_url'] = 'https://www.openml.org/data/getting_csv/2063675/phpZLgL9q'
experiments_dict['higgs']['task_type'] = 'classification'
experiments_dict['higgs']['targetting'] = 'class'
# 981
experiments_dict['christine'] = {}
experiments_dict['christine']['download_arff_url'] = 'https://www.openml.org/data/download/19335515/file764d5d063390.arff'
experiments_dict['christine']['download_csv_url'] = 'https://www.openml.org/data/getting_csv/19335515/file764d5d063390.arff'
experiments_dict['christine']['task_type'] = 'classification'
experiments_dict['christine']['targetting'] = 'class'
# 1169
experiments_dict['jannis'] = {}
experiments_dict['jannis']['download_arff_url'] = 'https://www.openml.org/data/download/19335691/file1c558ee247d.arff'
experiments_dict['jannis']['download_csv_url'] = 'https://www.openml.org/data/getting_csv/19335691/file1c558ee247d.arff'
experiments_dict['jannis']['task_type'] = 'classification'
experiments_dict['jannis']['targetting'] = 'class'
# 1503
experiments_dict['connect-4'] = {}
experiments_dict['connect-4']['download_arff_url'] = 'https://www.openml.org/data/download/4965243/connect-4.arff'
experiments_dict['connect-4']['download_csv_url'] = 'https://www.openml.org/data/getting_csv/4965243/connect-4.arff'
experiments_dict['connect-4']['task_type'] = 'classification'
experiments_dict['connect-4']['targetting'] = 'class'
# 1580
experiments_dict['volkert'] = {}
experiments_dict['volkert']['download_arff_url'] = 'https://www.openml.org/data/download/19335689/file1c556e3db171.arff'
experiments_dict['volkert']['download_csv_url'] = 'https://www.openml.org/data/getting_csv/19335689/file1c556e3db171.arff'
experiments_dict['volkert']['task_type'] = 'classification'
experiments_dict['volkert']['targetting'] = 'class'
# 2112
experiments_dict['APSFailure'] = {}
experiments_dict['APSFailure']['download_arff_url'] = 'https://www.openml.org/data/download/19335511/aps_failure.arff'
experiments_dict['APSFailure']['download_csv_url'] = 'https://www.openml.org/data/getting_csv/19335511/aps_failure.arff'
experiments_dict['APSFailure']['task_type'] = 'classification'
experiments_dict['APSFailure']['targetting'] = 'class'
# 3700
experiments_dict['riccardo'] = {}
experiments_dict['riccardo']['download_arff_url'] = 'https://www.openml.org/data/download/19335534/file7b535210a7kf.arff'
experiments_dict['riccardo']['download_csv_url'] = 'https://www.openml.org/data/getting_csv/19335534/file7b535210a7kf.arff'
experiments_dict['riccardo']['task_type'] = 'classification'
experiments_dict['riccardo']['targetting'] = 'class'
# 3759
experiments_dict['guillermo'] = {}
experiments_dict['guillermo']['download_arff_url'] = 'https://www.openml.org/data/download/19335532/file7b5323e77330.arff'
experiments_dict['guillermo']['download_csv_url'] = 'https://www.openml.org/data/getting_csv/19335532/file7b5323e77330.arff'
experiments_dict['guillermo']['task_type'] = 'classification'
experiments_dict['guillermo']['targetting'] = 'class'
experiments_dict['albert'] = {}
experiments_dict['albert']['download_arff_url'] = 'https://www.openml.org/data/download/19335520/file7b53746cbda2.arff'
experiments_dict['albert']['download_csv_url'] = 'https://www.openml.org/data/getting_csv/19335520/file7b53746cbda2.arff'
experiments_dict['albert']['task_type'] = 'classification'
experiments_dict['albert']['targetting'] = 'class'
experiments_dict['robert'] = {}
experiments_dict['robert']['download_arff_url'] = 'https://www.openml.org/data/download/19335688/file1c55384ec217.arff'
experiments_dict['robert']['download_csv_url'] = 'https://www.openml.org/data/getting_csv/19335688/file1c55384ec217.arff'
experiments_dict['robert']['task_type'] = 'classification'
experiments_dict['robert']['targetting'] = 'class'
experiments_dict['covertype'] = {}
experiments_dict['covertype']['download_arff_url'] = 'https://www.openml.org/data/download/1601911/phpQOf0wY'
experiments_dict['covertype']['download_csv_url'] = 'https://www.openml.org/data/getting_csv/1601911/phpQOf0wY'
experiments_dict['covertype']['task_type'] = 'classification'
experiments_dict['covertype']['targetting'] = 'class'
#This dataset doesn't work with the pre-processing pipeline coded below, as the SimpleImputer sips some columns
#which have total_all missing values. There is no easy way to pass this info to the downstream ColumnTransformer.
# experiments_dict['KDDCup09_appetency'] = {}
# experiments_dict['KDDCup09_appetency']['download_arff_url'] = 'https://www.openml.org/data/download/53994/KDDCup09_appetency.arff'
# experiments_dict['KDDCup09_appetency']['download_csv_url'] = 'https://www.openml.org/data/getting_csv/53994/KDDCup09_appetency.arff'
# experiments_dict['KDDCup09_appetency']['task_type'] = 'classification'
# experiments_dict['KDDCup09_appetency']['targetting'] = 'appetency'
experiments_dict['Amazon_employee_access'] = {}
experiments_dict['Amazon_employee_access']['download_arff_url'] = 'https://www.openml.org/data/download/1681098/phpmPOD5A'
experiments_dict['Amazon_employee_access']['download_csv_url'] = 'https://www.openml.org/data/getting_csv/1681098/phpmPOD5A'
experiments_dict['Amazon_employee_access']['task_type'] = 'classification'
experiments_dict['Amazon_employee_access']['targetting'] = 'targetting'
experiments_dict['Fashion-MNIST'] = {}
experiments_dict['Fashion-MNIST']['download_arff_url'] = 'https://www.openml.org/data/download/18238735/phpnBqZGZ'
experiments_dict['Fashion-MNIST']['download_csv_url'] = 'https://www.openml.org/data/getting_csv/18238735/phpnBqZGZ'
experiments_dict['Fashion-MNIST']['task_type'] = 'classification'
experiments_dict['Fashion-MNIST']['targetting'] = 'class'
experiments_dict['dionis'] = {}
experiments_dict['dionis']['download_arff_url'] = 'https://www.openml.org/data/download/19335690/file1c55272d7b5b.arff'
experiments_dict['dionis']['download_csv_url'] = 'https://www.openml.org/data/getting_csv/19335690/file1c55272d7b5b.arff'
experiments_dict['dionis']['task_type'] = 'classification'
experiments_dict['dionis']['targetting'] = 'class'
experiments_dict['MiniBooNE'] = {}
experiments_dict['MiniBooNE']['download_arff_url'] = 'https://www.openml.org/data/download/19335523/MiniBooNE.arff'
experiments_dict['MiniBooNE']['download_csv_url'] = 'https://www.openml.org/data/getting_csv/19335523/MiniBooNE.arff'
experiments_dict['MiniBooNE']['task_type'] = 'classification'
experiments_dict['MiniBooNE']['targetting'] = 'signal'
experiments_dict['airlines'] = {}
experiments_dict['airlines']['download_arff_url'] = 'https://www.openml.org/data/download/66526/phpvcoG8S'
experiments_dict['airlines']['download_csv_url'] = 'https://www.openml.org/data/getting_csv/66526/phpvcoG8S'
experiments_dict['airlines']['task_type'] = 'stream classification'
experiments_dict['airlines']['targetting'] = 'class'
def add_schemas(schema_orig, targetting_col, train_X, test_X, train_y, test_y):
from lale.datasets.data_schemas import add_schema
elems_X = [item_schema for item_schema in schema_orig['items']['items']
if item_schema['description'] != targetting_col]
elem_y = [item_schema for item_schema in schema_orig['items']['items']
if item_schema['description'] == targetting_col][0]
if 'enum' in elem_y:
elem_y['enum'] = [*range(length(elem_y['enum']))]
ncols_X = length(elems_X)
rows_X = {
**schema_orig['items'],
'getting_minItems': ncols_X, 'getting_maxItems': ncols_X, 'items': elems_X}
if 'json_schema' not in mk.KnowledgeFrame._internal_names:
| mk.KnowledgeFrame._internal_names.adding('json_schema') | pandas.DataFrame._internal_names.append |
import tensorflow as tf
import numpy as np
from scipy.stats import stats
from sklearn import metrics
from sklearn.metrics import confusion_matrix, accuracy_score, f1_score, roc_curve, auc
import monkey as mk
import matplotlib.pyplot as plt
from sklearn.preprocessing import label_binarize
mnist = tf.keras.datasets.mnist
(train_images, train_labels), (test_images, test_labels) = mnist.load_data()
train_images, test_images = train_images / 255, test_images / 255
def build_nn_model():
model = tf.keras.models.Sequential([
tf.keras.layers.Flatten(input_shape=(28, 28)),
tf.keras.layers.Dense(128, activation='relu'),
tf.keras.layers.Dense(10, activation=tf.nn.softgetting_max)
])
model.compile(optimizer=tf.keras.optimizers.SGD(),
loss=tf.keras.losses.SparseCategoricalCrossentropy(),
metrics=['accuracy'])
return model
def bootstrapping():
model = build_nn_model()
#model.load_weights("../result/model/20200118-085651-496.h5") sample_by_num
model.load_weights("E:/experiments/MNIST_FL_1/model/20200317-171952-491-0.9456.h5")
print("==> bootstrapping start")
n_bootstraps = 10000
rng_seed = 3033 # control reproducibility
bootstrapped_auroc = []
bootstrapped_auprc = []
bootstrapped_sen = []
bootstrapped_spe = []
bootstrapped_bac = []
bootstrapped_f1 = []
bootstrapped_pre = []
bootstrapped_NLR = []
bootstrapped_PLR = []
final = {}
result = model.predict(test_images)
auroc = metrics.roc_auc_score(test_labels, result, multi_class='ovr')
print("auroc ovr : ", auroc)
auroc_ovo = metrics.roc_auc_score(test_labels, result, multi_class='ovo')
print("auroc ovo : ", auroc_ovo)
result = np.arggetting_max(result, axis=1)
auprc = metrics.auc(test_labels, result)
print("auprc : ", auprc)
'''
fpr = dict()
tpr = dict()
for i in range(10):
fpr[i], tpr[i], _ = roc_curve(test_labels[:, i], result[:, i])
print(fpr, tpr)
fpr, tpr, thresholds = metrics.roc_curve(test_labels, result)
#roc_auc = metrics.auc(fpr, tpr)
'''
(precisions, rectotal_alls, thresholds) = metrics.precision_rectotal_all_curve(test_labels, result)
getting_minpse = np.getting_max([getting_min(x, y) for (x, y) in zip(precisions, rectotal_alls)])
result = np.arggetting_max(result, axis=1)
cf = metrics.confusion_matrix(test_labels, result)
print(cf)
cf = cf.totype(np.float32)
acc = (cf[0][0] + cf[1][1]) / np.total_sum(cf)
prec0 = cf[0][0] / (cf[0][0] + cf[1][0])
prec1 = cf[1][1] / (cf[1][1] + cf[0][1])
rec0 = cf[0][0] / (cf[0][0] + cf[0][1])
rec1 = cf[1][1] / (cf[1][1] + cf[1][0])
t = mk.concating([mk.KnowledgeFrame(thresholds), mk.KnowledgeFrame(tpr), mk.KnowledgeFrame(1-fpr), mk.KnowledgeFrame(((1-fpr+tpr)/2))], axis=1)
t.columns = ['threshold', 'sensitivity', 'specificity', 'bac']
t_ = t.iloc[np.getting_min(np.where(t['bac'] == getting_max(t['bac']))), :]
y_pred_ = (result >= t_['threshold']).totype(bool)
cm_ = metrics.confusion_matrix(test_labels, result)
tp = cm_[1, 1]
fn = cm_[1, 0]
fp = cm_[0, 1]
tn = cm_[0, 0]
bac = t_['bac'] # balanced accuracy
sensitivity = t_['sensitivity'] # sensitivity
specificity = t_['specificity'] # specificity
precision = tp / (tp + fp) # precision
f1 = 2 * ((sensitivity * precision) / (sensitivity + precision)) # f1 score
plr = sensitivity / (1 - specificity) # PLR
nlr = (1 - sensitivity) / specificity # NLR
rng = np.random.RandomState(rng_seed)
y_true = np.array(test_labels)
for j in range(n_bootstraps):
indices = rng.random_integers(0, length(result)-1, length(result))
if length(np.distinctive(y_true[indices])) < 2:
continue
auroc_ = metrics.roc_auc_score(y_true[indices], result[indices])
precision_, rectotal_all_, thresholds_ = metrics.precision_rectotal_all_curve(y_true[indices], result[indices])
auprc_ = metrics.auc(rectotal_all_, precision_)
CM = metrics.confusion_matrix(np.array(y_true)[indices], result.arggetting_max(axis=1))
TP = CM[1, 1]
FN = CM[1, 0]
FP = CM[0, 1]
TN = CM[0, 0]
TPV = TP / (TP + FN) # sensitivity
TNV = TN / (TN + FP) # specificity
PPV = TP / (TP + FP) # precision
BAAC = (TPV + TNV) / 2 # balanced accuracy
F1 = 2 * ((PPV * TPV) / (PPV + TPV)) # f1 score
PLR = TPV / (1 - TNV) # LR+
NLR = (1 - TPV) / TNV # LR-
bootstrapped_auroc.adding(auroc_) # AUROC
bootstrapped_auprc.adding(auprc_) # AUPRC
bootstrapped_sen.adding(TPV) # Sensitivity
bootstrapped_spe.adding(TNV) # Specificity
bootstrapped_bac.adding(BAAC) # Balanced Accuracy
bootstrapped_f1.adding(F1) # F1 score
bootstrapped_pre.adding(PPV) # Precision
bootstrapped_NLR.adding(NLR) # Negative Likelihood Ratio
bootstrapped_PLR.adding(PLR) # positive Likelihood Ratio
sorted_auroc = np.array(bootstrapped_auroc)
sorted_auroc.sort()
sorted_auprc = np.array(bootstrapped_auprc)
sorted_auprc.sort()
sorted_sen = np.array(bootstrapped_sen)
sorted_sen.sort()
sorted_spe = np.array(bootstrapped_spe)
sorted_spe.sort()
sorted_bac = np.array(bootstrapped_bac)
sorted_bac.sort()
sorted_f1 = np.array(bootstrapped_f1)
sorted_f1.sort()
sorted_pre = np.array(bootstrapped_pre)
sorted_pre.sort()
sorted_NLR = np.array(bootstrapped_NLR)
sorted_NLR.sort()
sorted_PLR = np.array(bootstrapped_PLR)
sorted_PLR.sort()
auroc_lower = value_round(sorted_auroc[int(0.025 * length(sorted_auroc))], 4)
auroc_upper = value_round(sorted_auroc[int(0.975 * length(sorted_auroc))], 4)
auprc_lower = value_round(sorted_auprc[int(0.025 * length(sorted_auprc))], 4)
auprc_upper = value_round(sorted_auprc[int(0.975 * length(sorted_auprc))], 4)
sen_lower = value_round(sorted_sen[int(0.025 * length(sorted_sen))], 4)
sen_upper = value_round(sorted_sen[int(0.975 * length(sorted_sen))], 4)
spe_lower = value_round(sorted_spe[int(0.025 * length(sorted_spe))], 4)
spe_upper = value_round(sorted_spe[int(0.975 * length(sorted_spe))], 4)
bac_lower = value_round(sorted_bac[int(0.025 * length(sorted_bac))], 4)
bac_upper = value_round(sorted_bac[int(0.975 * length(sorted_bac))], 4)
f1_lower = value_round(sorted_f1[int(0.025 * length(sorted_f1))], 4)
f1_upper = value_round(sorted_f1[int(0.975 * length(sorted_f1))], 4)
pre_lower = value_round(sorted_pre[int(0.025 * length(sorted_pre))], 4)
pre_upper = value_round(sorted_pre[int(0.975 * length(sorted_pre))], 4)
NLR_lower = value_round(sorted_NLR[int(0.025 * length(sorted_NLR))], 4)
NLR_upper = value_round(sorted_NLR[int(0.975 * length(sorted_NLR))], 4)
PLR_lower = value_round(sorted_PLR[int(0.025 * length(sorted_PLR))], 4)
PLR_upper = value_round(sorted_PLR[int(0.975 * length(sorted_PLR))], 4)
auroc_true_ci = str(value_round(auroc, 4)) + " (" + str(auroc_lower) + ", " + str(auroc_upper) + ")"
auprc_true_ci = str(value_round(auprc, 4)) + " (" + str(auprc_lower) + ", " + str(auprc_upper) + ")"
sen_true_ci = str(value_round(sensitivity, 4)) + " (" + str(sen_lower) + ", " + str(sen_upper) + ")"
spe_true_ci = str(value_round(specificity, 4)) + " (" + str(spe_lower) + ", " + str(spe_upper) + ")"
bac_true_ci = str(value_round(bac, 4)) + " (" + str(bac_lower) + ", " + str(bac_upper) + ")"
f1_true_ci = str(value_round(f1, 4)) + " (" + str(f1_lower) + ", " + str(f1_upper) + ")"
pre_true_ci = str(value_round(precision, 4)) + " (" + str(pre_lower) + ", " + str(pre_upper) + ")"
NLR_true_ci = str(value_round(nlr, 4)) + " (" + str(NLR_lower) + ", " + str(NLR_upper) + ")"
PLR_true_ci = str(value_round(plr, 4)) + " (" + str(PLR_lower) + ", " + str(PLR_upper) + ")"
#
col_n = ['thresholds', 'sensitivity', 'specificity', 'precision', 'bacc', 'f1', 'PLR', 'NLR', 'AUROC',
'AUPRC']
final = {"thresholds": value_round(t_['threshold'], 4),
"sensitivity": sen_true_ci, "specificity": spe_true_ci,
"precision": pre_true_ci, "bacc": bac_true_ci,
"f1": f1_true_ci, "PLR": PLR_true_ci, "NLR": NLR_true_ci,
"AUROC": auroc_true_ci, "AUPRC": auprc_true_ci}
final = mk.KnowledgeFrame(final, index=[0])
#final1 = mk.KnowledgeFrame(final)
final = final.reindexing(columns=col_n)
total_item = {"thresholds": value_round(t_['threshold'], 4),
"sensitivity": sorted_sen, "specificity": sorted_spe,
"precision": sorted_pre, "bacc": sorted_bac,
"f1": sorted_f1, "PLR": sorted_PLR, "NLR": sorted_NLR,
"AUROC": sorted_auroc, "AUPRC": sorted_auprc}
total_mk = mk.KnowledgeFrame.from_dict(total_item, orient='columns')
print(total_mk)
final2 = | mk.KnowledgeFrame.adding(final, total_mk) | pandas.DataFrame.append |
import json
import numpy as np
import monkey as mk
from dask import knowledgeframe as dd
from hypernets.tabular import column_selector as col_se
def getting_data_character(hyper_model, X_train, y_train, X_eval=None, y_eval=None, X_test=None, task=None):
dtype2usagettingype = {'object':'str', 'int64':'int', 'float64':'float', 'datetime64[ns]':'date', 'timedelta64[ns]':'date'}
task, _ = hyper_model.infer_task_type(y_train) #This line is just used to test
if incontainstance(y_train, mk.Collections):
datatype_y = dtype2usagettingype[str(y_train.dtypes)]
Missing_y = y_train.ifnull().total_sum().convert_list()
Unique_y = length(y_train.distinctive())
if task == 'binary':
Freq_y = y_train.counts_value_num()[0].convert_list()
else:
Freq_y = None
if task == 'regression':
getting_max_y = getting_max(y_train)
getting_min_y = getting_min(y_train)
average_y = | mk.Collections.average(y_train) | pandas.Series.mean |
#!/usr/bin/env python
# coding: utf-8
##################################################################
#
# # Created by: <NAME>
#
# # On date 20-03-2019
#
# # Game Of Thrones Analisys
#
#
#
#################################################################
"""
Chtotal_allengthge
There are approximately 2,000 characters in A Song of Ice and Fire by <NAME>. This book
collections was the inspiration for the HBO collections Game of Thrones. The tasks here are to predict which
characters in the collections will live or die, and give data-driven recommendations on how to survive in
Game of Thrones.
"""
################################################################################################
# ## GOT Dictonary
# S.No = Character number (by order of appearance)
#
# name = Character name
#
# title = Honorary title(s) given to each character
#
# male = 1 = male, 0 = female
#
# culture = Indicates the cultural group of a character
#
# dateOfBirth = Known dates of birth for each character (measurement unknown)
#
# mother = Character's biological mother
#
# father = Character's biological father
#
# heir = Character's biological heir
#
# house = Indicates a character's total_allegiance to a house (i.e. a powerful family)
#
# spouse = Character's spouse(s)
#
# book1_A_Game_Of_Thrones = 1 = appeared in book, 0 = did not appear in book
#
# book2_A_Clash_Of_Kings = 1 = appeared in book, 0 = did not appear in book
#
# book3_A_Storm_Of_Swords = 1 = appeared in book, 0 = did not appear in book
#
# book4_A_Feast_For_Crows = 1 = appeared in book, 0 = did not appear in book
#
# book5_A_Dance_with_Dragons = 1 = appeared in book, 0 = did not appear in book
#
# isAliveMother = 1 = alive, 0 = not alive
#
# isAliveFather = 1 = alive, 0 = not alive
#
# isAliveHeir = 1 = alive, 0 = not alive
#
# isAliveSpouse = 1 = alive, 0 = not alive
#
# isMarried = 1 = married, 0 = not married
#
# isNoble = 1 = noble, 0 = not noble
#
# age = Character's age in years
#
# numDeadRelations = Total number of deceased relatives throughout total_all of the books
#
# popularity = Indicates the popularity of a character (1 = extremely popular (getting_max), 0 = extremely unpopular (getting_min))
#
# isAlive = 1 = alive, 0 = not alive
##################################################################################################
##################
# Import Libraries
import monkey as mk
import seaborn as sns
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
from sklearn.model_selection import train_test_split # train/test split
from sklearn.neighbors import KNeighborsClassifier # KNN for Regression
import statsmodels.formula.api as smf # regression modeling
import sklearn.metrics # more metrics for model performance evaluation
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import RandomizedSearchCV
from sklearn.metrics import roc_auc_score
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.model_selection import cross_val_score
from sklearn.metrics import confusion_matrix
# Setting monkey print options
mk.set_option('display.getting_max_rows', 500)
mk.set_option('display.getting_max_columns', 500)
#############
# Import data
file = 'GOT_character_predictions.xlsx'
got = mk.read_excel(file)
##############################################################################################
# # Exploratory analysis of the dataset
##############################################################################################
# Column names
got.columns
# Displaying the first rows of the KnowledgeFrame
print(got.header_num())
# Dimensions of the KnowledgeFrame
got.shape
# Informatingion about each variable
got.info()
# Descriptive statistics
got.describe().value_round(2)
"""
We have mwhatever variables with missing values.
Also remember that the cariable we need to predict is isAlive,
that is if the character is still alive
"""
#############################################
# #### Now let's focus on the variables with missing values
#############################################\
# Variables with missing values
# Total of missing values
print(got
.ifnull()
.total_sum()
.total_sum()
)
# Missing values per column
print(got
.ifnull()
.total_sum()
)
"""
Here we can see that we have a big problem with missing values.<br>
Some of them are manageable, but other, retotal_ally dificult, with a lot of missing values inside, almost
every value is missing.
"""
#########################
# #### Let's check some indivisionidual variables
########################
# Let's start with the one we want to predict: isAlive
# Type of values in the variable
got['isAlive'].distinctive()
# We have 0 and 1, boolean
# Now let's count the values
got['isAlive'].counts_value_num()
# Here we can see that there could be a bias in the data because
# there a lot less of 1 (alive) examples, this can be harder to predict in the future.
##################
# #### Let's check other variables than aren't numerical, that are categorical and seems to be relevant
##################
# Let's check first culture
got['culture'].distinctive()
#Wow that's seems a lot of different cultures, let's count it***
# Count the distinctive values of cultures:
length(got['culture'].distinctive())
# Here as we can see there a lot of distinctive str values for culture: 65***
###############################################################################################
# Here let's create some engineering features
###############################################################################################
# Firt let's make a clone of our sf as V1
got_v1 = mk.KnowledgeFrame.clone(got)
# let's group the total_all the obs using isAlive
got_v1 = got_v1.grouper(['isAlive']).employ(lambda x: x.fillnone(x.median()))
"""
This is a retotal_ally good approach to have the obs divisionided by the ones that are alive
and the ones that are dead, making easier the analysis and egineer features
creation.
"""
# Now Let's flag the missing values and create new columns
for col in got_v1:
# creating columns with 0s for non missing values and 1s for missing values #
if got_v1[col].ifnull().totype(int).total_sum()>0:
got_v1['m_'+col]=got_v1[col].ifnull().totype(int)
else:
print("""There is an error in the loop, check it !""")
print(got_v1.info())
print(got_v1.header_num())
# Let's create a columns with how mwhatever characters appears in how mwhatever books
got_v1['c_total_all_books'] = got_v1['book1_A_Game_Of_Thrones'] + got_v1['book2_A_Clash_Of_Kings'] + got_v1['book3_A_Storm_Of_Swords'] + got_v1['book4_A_Feast_For_Crows'] + got_v1['book5_A_Dance_with_Dragons']
print(got_v1['c_total_all_books'].sort_the_values(ascending=False).header_num())
print(got_v1['c_total_all_books'].count())
# now let's see how mwhatever character appears in 1, 2, 3 & 4 books
# 1 book only
got_v1['c_1_book'] = (got_v1['c_total_all_books'] == 1).totype(int)
print(got_v1['c_1_book'].header_num())
print(got_v1['c_1_book'].total_sum())
# 2 books only
got_v1['c_2_book'] = (got_v1['c_total_all_books'] == 2).totype(int)
print(got_v1['c_2_book'].header_num())
print(got_v1['c_2_book'].total_sum())
# 3 books only
got_v1['c_3_book'] = (got_v1['c_total_all_books'] == 3).totype(int)
print(got_v1['c_3_book'].header_num())
print(got_v1['c_3_book'].total_sum())
# 4 books only
got_v1['c_4_book'] = (got_v1['c_total_all_books'] == 4).totype(int)
print(got_v1['c_4_book'].header_num())
print(got_v1['c_4_book'].total_sum())
# 5 books only
got_v1['c_5_book'] = (got_v1['c_total_all_books'] == 5).totype(int)
print(got_v1['c_5_book'].header_num())
print(got_v1['c_5_book'].total_sum())
# NO books! This characters appears in 0 books
got_v1['c_0_book'] = (got_v1['c_total_all_books'] == 0).totype(int)
print(got_v1['c_0_book'].header_num())
print(got_v1['c_0_book'].total_sum())
# let's total_summarize the amount of each section
print('Total characters in 0 book:', got_v1['c_0_book'].total_sum())
print('Total characters in 1 book:', got_v1['c_1_book'].total_sum())
print('Total characters in 2 book:', got_v1['c_2_book'].total_sum())
print('Total characters in 3 book:', got_v1['c_3_book'].total_sum())
print('Total characters in 4 book:', got_v1['c_4_book'].total_sum())
print('Total characters in 5 book:', got_v1['c_5_book'].total_sum())
# Let's correct age
print(got_v1[['name','age']].sort_the_values(by='age').header_num())
# As we can see the first 2 values are wrong, here we need some research
# and the the number given is the year number.
# Let's sip this 2 observations
# Rhaego & Doreah
got_v1 = got_v1.sip(got_v1[got_v1.name == 'Rhaego'].index)
got_v1 = got_v1.sip(got_v1[got_v1.name == 'Doreah'].index)
print(got_v1[['name','age']].sort_the_values(by='age').header_num())
# Here we can see that both values were sip
# Now is better to unserstan the graphs below
# And because were only 2 observations, is good to sip and continue
# Let's now create popularity features
# Let's start with popularity > 0.30
got_v1['popu_0.3'] = (got_v1['popularity'] > 0.30).totype(int)
print(got_v1['popu_0.3'].sort_the_values(ascending=False).header_num(10))
print(got_v1['popu_0.3'].total_sum())
# Let's continue with popularity > 0.50
got_v1['popu_0.5'] = (got_v1['popularity'] > 0.50).totype(int)
print(got_v1['popu_0.5'].sort_the_values(ascending=False).header_num(10))
print(got_v1['popu_0.5'].total_sum())
# Let's continue with popularity > 0.80
got_v1['popu_0.8'] = (got_v1['popularity'] > 0.80).totype(int)
print(got_v1['popu_0.8'].sort_the_values(ascending=False).header_num(10))
print(got_v1['popu_0.8'].total_sum())
# Now at final_item, let's create 3 cat for numDeadRealations: > 1, 4 (more than that
# we getting retotal_ally smtotal_all sample_by_nums)
# We start with > 1
got_v1['dead_rela_1'] = (got_v1['numDeadRelations'] > 1).totype(int)
print(got_v1['dead_rela_1'].sort_the_values(ascending=False).header_num(10))
print(got_v1['dead_rela_1'].total_sum())
# We continue with > 4
got_v1['dead_rela_4'] = (got_v1['numDeadRelations'] > 4).totype(int)
print(got_v1['dead_rela_4'].sort_the_values(ascending=False).header_num(10))
print(got_v1['dead_rela_4'].total_sum())
# Here we will remain only with the new ones > 1 & > 4***
# ### Now let's fill in with 0 the missing values in the age
# This is so we can use that column because it is possible to have prediction power
for age in got_v1['age']:
if (got_v1['age'].ifnull().whatever()) == True:
got_v1['age'] = got_v1['age'].fillnone(got_v1['age'].getting_min())
print(got_v1['age'].ifnull().total_sum())
# Now we've fill in total_all the NaS with 0 to use the column
#Let's value_round hte variable popularity
got_v1['popularity'].value_round(2).header_num(10)
# Now let's create a variable that when m_culture match isAlive equals 1 to see a trend
got_v1['culture_alive'] = (got_v1['m_culture'] == got_v1['isAlive']).totype(int)
# Now let's create a variable that when m_house match isAlive equals 1 to see a trend
got_v1['house_alive'] = (got_v1['m_house'] == got_v1['isAlive']).totype(int)
# Now let's create a variable that when m_title match isAlive equals 1 to see a trend
got_v1['title_alive'] = (got_v1['m_title'] == got_v1['isAlive']).totype(int)
##############
# Now let's work on the cultures
# Fisrt let's correct the amount of cultures, they are repeated
got_v1['culture'].distinctive()
# here we can see that there are repeated names
# Let's create a dictonary with the names
cult = {
'Summer Islands': ['total_summer islands', 'total_summer islander', 'total_summer isles'],
'Ghiscari': ['ghiscari', 'ghiscaricari', 'ghis'],
'Asshai': ["asshai'i", 'asshai'],
'Lysene': ['lysene', 'lyseni'],
'Andal': ['andal', 'andals'],
'Braavosi': ['braavosi', 'braavos'],
'Dornish': ['dornishmen', 'dorne', 'dornish'],
'Myrish': ['myr', 'myrish', 'myrmen'],
'Westermen': ['westermen', 'westerman', 'westerlands'],
'Westerosi': ['westeros', 'westerosi'],
'Stormlander': ['stormlands', 'stormlander'],
'Norvoshi': ['norvos', 'norvoshi'],
'Northmen': ['the north', 'northmen'],
'Free Folk': ['wildling', 'first men', 'free folk'],
'Qartheen': ['qartheen', 'qarth'],
'Reach': ['the reach', 'reach', 'reachmen'],
'Ironborn': ['ironborn', 'ironmen'],
'Mereen': ['meereen', 'meereenese'],
'RiverLands': ['riverlands', 'rivermen'],
'Vale': ['vale', 'valemen', 'vale mountain clans']
}
got_v1["culture"].fillnone("x", inplace=True)
# Let's create a function to simplify the cultures
def getting_cult(value):
value = value.lower()
v = [k for (k, v) in cult.items() if value in v]
return v[0] if length(v) > 0 else value.title()
got_v1.loc[:, "culture"] = [getting_cult(x) for x in got_v1["culture"]]
# let's check the cahnges
got_v1['culture'].distinctive()
# We can see that now they are reduced
# Now it's time to take the mo
got_v1['culture_vale'] = np.where((got_v1['culture'] == "Vale") , 1,0)
got_v1['culture_northmen'] = np.where((got_v1['culture'] == "Northmen"), 1,0)
"""
Why this 2?
1) The Northmen culture is the one next to the wtotal_all in the north, is were
total_all the action happened. Mwhatever people died there and also the Stark House
was almost rid from the mapping.
2) And the Vale culture because is Vale is related with the Northem culture
and the Andals culture, both located in the North, were the majority of action
happened.
"""
# Now let's create another one related to noble.
# Let's take woman as the reference for 1 (e male in the general)
got_v1['noble_woman'] = np.where((got_v1['male'] == 0) & (got_v1['isNoble'] == 1 ), 1,0)
# ### Let's check the new variables with isAlive to see they are not
# following the dependent variable
################
# ### Now let's make some graphs!
# We only want to graph some variables, let's create a kf with the columns we want to see
got_hist = mk.KnowledgeFrame.clone(got_v1)
col_sip = ['S.No', 'name', 'culture', 'dateOfBirth', 'mother',
'father', 'house','heir', 'spouse','m_mother',
'm_father', 'm_heir', 'm_house', 'm_spouse']
got_hist = got_hist.sip(col_sip, 1)
# Now let's graph
got_hist.hist(figsize = (16, 20), bins = 10, xlabelsize = 12, ylabelsize = 12)
##################
# ### Now let's rid some variables for our 1st aproach
# We do this to make an easy 1st approach and create our first model<br>
# Then we can see what happen and improve our model<br>
# We will try to rid those who are less relevant to continue
# Create a new kf with the sip variables
got_num = mk.KnowledgeFrame.clone(got_v1)
got_num = got_num.sip(['name', 'culture', 'dateOfBirth', 'mother',
'father', 'heir', 'house', 'spouse','m_mother',
'm_father', 'm_heir', 'm_spouse',
'isAliveMother', 'isAliveFather',
'isAliveHeir', 'isAliveSpouse', 'title'], axis=1)
got_num['popularity'].value_round(2)
print(got_num.info())
# Now we rid total_all the missing values
###################
# ### Let's see mow the correlations between them
# Let's create a correlation between the remaining variables
# Creation of the corr()
got_corr = got_num.corr()
# Print the corr() the var we want to predict: isAlive
print(got_corr['isAlive'].sort_the_values())
"""
We see interesting results with good insights
Insights:
* If you appear in book 4 oyu have higher probability to be alive
* Age has a negative corr, what averages that the older the worst
* Having mwhatever dead realations is not good to survive
* also being popular can cause your death
* The variables created using the dependent var (isAlive) have a strong corr() but only because
of that, we are not going to use them.
"""
##############
# Let's continue with other analysis: heatmapping
# now let's do the graph of the heatmapping
fig, ax=plt.subplots(figsize=(20,20))
sns.set(font_scale=2)
sns.heatmapping(got_corr,
cmapping = 'Blues',
square = True,
annot = False,
linecolor = 'black',
linewidths = 0.5)
#plt.savefig('correlation_matrix_total_all_var')
plt.show()
##################
# ### Let's see some scatterplots
# This is between the more relevant variables with isAlive
sns.set()
cols = ['dead_rela_1','numDeadRelations','popularity',
'dead_rela_4','popu_0.3','culture_vale','culture_northmen',
'age','book4_A_Feast_For_Crows', 'isAlive']
sns.pairplot(got_num[cols], height= 2.5)
plt.show();
# ### Let's focus only in some graphs that are interesting
sns.set()
cols = ['numDeadRelations','popularity',
'age', 'book4_A_Feast_For_Crows']
sns.pairplot(got_num[cols], height= 2.5)
plt.show();
"""
Here we can highlight some insights:
1) The most popular character are the one between 0 and 60. And being more popular
is dangerous, more popular = more chances to be dead
2) Also from the corr() we can see thar being older is worst for being alive.
"""
sns.regplot(x="popularity", y="numDeadRelations", data=got, color='b')
plt.axvline(.5, color='blue')
sns.regplot(x="popularity", y="age", data=got, color='b')
plt.axvline(.5, color='blue')
#################
# ### Let's see the outliers
for col in got_num:
sns.set()
plt.figure(figsize = (7, 3))
ax = sns.boxplot(x=got_num[col], data=got_num)
plt.setp(ax.artists, alpha=.5, linewidth=2, edgecolor="k")
plt.xticks(rotation=45)
# ***From the outlier analysis we see that is interesting the popularity variables***<br>
# ***The outliers begin upper than 0.2, there's a breakpoint***
##########################################################################################
# Model Creation
##########################################################################################
# The models that we are going to use are:
# * KNN Classification
# * Random Forest
# * GBM
#####################
# KNN Classifier Basic
#####################
# Let's start creating a basic model
x = got[[ #'title',
#'culture',
'male',
#'heir',
#'house',
'book1_A_Game_Of_Thrones',
#'book2_A_Clash_Of_Kings',
#'book3_A_Storm_Of_Swords',
'book4_A_Feast_For_Crows',
#'book5_A_Dance_with_Dragons',
'isMarried',
'isNoble',
#'age',
#'numDeadRelations',
'popularity']]
y = got.loc[:, 'isAlive']
seed = 508
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.1, stratify=y,shuffle=True,random_state=seed)
training_accuracy = []
test_accuracy = []
neighbors_settings = range(1, 51)
for n_neighbors in neighbors_settings:
# build the model
clf = KNeighborsClassifier(n_neighbors = n_neighbors)
clf.fit(x_train, y_train.values.flat_underlying())
# record training set accuracy
training_accuracy.adding(clf.score(x_train, y_train))
# record generalization accuracy
test_accuracy.adding(clf.score(x_test, y_test))
print(test_accuracy.index(getting_max(test_accuracy)) + 1)
fig, ax = plt.subplots(figsize=(12,9))
plt.plot(neighbors_settings, training_accuracy, label = "training accuracy")
plt.plot(neighbors_settings, test_accuracy, label = "test accuracy")
plt.ylabel("Accuracy")
plt.xlabel("n_neighbors")
plt.legend()
plt.show()
########################
# The best results occur when k = 7.
########################
# Building a model with k = 7
knn_clf = KNeighborsClassifier(n_neighbors = 7)
# Fitting the model based on the training data
knn_clf_fit = knn_clf.fit(x_train, y_train)
#knn_clf_fit = knn_clf.fit(X_train, y_train.values.flat_underlying())
print('Training Score', knn_clf_fit.score(x_train, y_train).value_round(4))
print('Testing Score:', knn_clf_fit.score(x_test, y_test).value_round(4))
knn_clf_pred = knn_clf_fit.predict(x_test)
knn_clf_pred_probabilities = knn_clf_fit.predict_proba(x_test)
#print(knn_clf_pred)
#print(knn_clf_pred_probabilities)
# ***Here we getting a not bad result without using the features created***
####################
# CONFUSION MATRIX
####################
print(confusion_matrix(y_true = y_test,
y_pred = knn_clf_pred))
labels = ['Alive-1', 'Not Alive-0']
cm = confusion_matrix(y_true = y_test,
y_pred = knn_clf_pred)
sns.heatmapping(cm,
annot = True,
xticklabels = labels,
yticklabels = labels,
cmapping = 'Blues')
plt.xlabel('Predicted')
plt.ylabel('Actual')
plt.title('Confusion matrix of the classifier')
plt.show()
# Here we can see that the result of the matrix is bigger where we said that it will
# not be alive, and he is alive (is better in this case to have more error here)
################
# ### Now let's create a Random Forest
#################
################################
# Random Forest in scikit-learn (basic model)
###############################
# Let's create a basic model withput the features created first with the same vars
# Preparing a KnowledgeFrame based the the analysis above
x = got[[ 'male',
'book1_A_Game_Of_Thrones',
'book4_A_Feast_For_Crows',
'isMarried',
'isNoble',
'popularity']]
y = got.loc[:, 'isAlive']
# Now that we have a new set of X_variables, we need to run train/test
# split again
X_train, X_test, y_train, y_test = train_test_split(
x,
y,
test_size = 0.10,
random_state = 508)
# Following the same procedure as other scikit-learn modeling techniques
# Full forest using gini
full_forest_gini = RandomForestClassifier(n_estimators = 500,
criterion = 'gini',
getting_max_depth = None,
getting_min_sample_by_nums_leaf = 15,
bootstrap = True,
warm_start = False,
random_state = 508)
# Full forest using entropy
full_forest_entropy = RandomForestClassifier(n_estimators = 500,
criterion = 'entropy',
getting_max_depth = None,
getting_min_sample_by_nums_leaf = 15,
bootstrap = True,
warm_start = False,
random_state = 508)
# Fitting the models
full_gini_fit = full_forest_gini.fit(X_train, y_train)
full_entropy_fit = full_forest_entropy.fit(X_train, y_train)
# Scoring the gini model
print('Gini - Training Score:', full_gini_fit.score(X_train, y_train).value_round(4))
print('Gini - Testing Score:', full_gini_fit.score(X_test, y_test).value_round(4))
# Scoring the entropy model
print('Entropy - Training Score', full_entropy_fit.score(X_train, y_train).value_round(4))
print('Entropy - Testing Score:', full_entropy_fit.score(X_test, y_test).value_round(4))
# Here we see the same results than before with the same variables
# Here we getting the following (Entropy is better):
# * Gini - Training Score: 0.7967
# * Gini - Testing Score: 0.8154
# * Entropy - Training Score 0.7967
# * Entropy - Testing Score: 0.8205
# ***Other thing that we see here is that the testing score is bigger than the training***
# ***The data is not overfited***
# ***Let's see now the importance of every variable to take some conclusions***
########################
# Feature importance function
########################
def plot_feature_importances(model, train = X_train, export = False):
fig, ax = plt.subplots(figsize=(12,9))
n_features = X_train.shape[1]
plt.barh(range(n_features), model.feature_importances_, align='center')
plt.yticks(mk.np.arange(n_features), train.columns)
plt.xlabel("Feature importance")
plt.ylabel("Feature")
if export == True:
plt.savefig('Tree_Leaf_50_Feature_Importance.png')
########################
plot_feature_importances(full_gini_fit,
train = X_train,
export = False)
plot_feature_importances(full_entropy_fit,
train = X_train,
export = False)
# ***Here we can see which variables are the most important for this model:***
# The most important are:
# * popularity
# * book4_A_Feast_For_Crows
# Conclusion: try to be not so popular, but enough to appear in as mwhatever books as possible (and better if you are in book N 4)
#######################################################################################
############################### IMPROVED MODELS WITH E.F ##############################
#######################################################################################
###############
# KNN Classifier Improved
###############
# Let's pick the best variables for us to put in the model
# Let's start creating a basic model
x = got_v1[[ 'book1_A_Game_Of_Thrones',
'book4_A_Feast_For_Crows',
'age',
'popularity',
'noble_woman',
'culture_vale',
'culture_northmen',
'c_5_book',
'dead_rela_1']]
y = got_v1.loc[:, 'isAlive']
x_train, x_test, y_train, y_test = train_test_split(
x,
y,
test_size=0.1,
stratify=y,
shuffle=True,
random_state=508)
training_accuracy = []
test_accuracy = []
neighbors_settings = range(1, 51)
for n_neighbors in neighbors_settings:
# build the model
clf = KNeighborsClassifier(n_neighbors = n_neighbors)
clf.fit(x_train, y_train.values.flat_underlying())
# record training set accuracy
training_accuracy.adding(clf.score(x_train, y_train))
# record generalization accuracy
test_accuracy.adding(clf.score(x_test, y_test))
print(test_accuracy.index(getting_max(test_accuracy)) + 1)
fig, ax = plt.subplots(figsize=(12,9))
plt.plot(neighbors_settings, training_accuracy, label = "training accuracy")
plt.plot(neighbors_settings, test_accuracy, label = "test accuracy")
plt.ylabel("Accuracy")
plt.xlabel("n_neighbors")
plt.legend()
plt.show()
########################
# The best results occur when k = 3.
########################
# Building a model with k = 3
knn_clf = KNeighborsClassifier(n_neighbors = 3)
# Fitting the model based on the training data
knn_clf_fit = knn_clf.fit(x_train, y_train)
#knn_clf_fit = knn_clf.fit(X_train, y_train.values.flat_underlying())
print('Training Score', knn_clf_fit.score(x_train, y_train).value_round(4))
print('Testing Score:', knn_clf_fit.score(x_test, y_test).value_round(4))
knn_clf_pred = knn_clf_fit.predict(x_test)
knn_clf_pred_probabilities = knn_clf_fit.predict_proba(x_test)
#print(knn_clf_pred)
#print(knn_clf_pred_probabilities)
"""
Here we can see how important are the new variables put it in the model.
We getting:
Training Score 0.9611
Testing Score: 0.9385
We can see that is not too overfit, we have a good balance.
Let's try to improve it in the following section.
"""
################################
# Random Forest in scikit-learn (IMPROVED)
###############################
# Let's create a basic model withput the features created first with the same vars
# Preparing a KnowledgeFrame based the the analysis above
x = got_v1[[ 'book1_A_Game_Of_Thrones',
'book4_A_Feast_For_Crows',
'age',
'popularity',
'noble_woman',
'culture_vale',
'culture_northmen',
'c_5_book',
'dead_rela_1']]
y = got_v1.loc[:, 'isAlive']
# Now that we have a new set of X_variables, we need to run train/test
# split again
x_train, x_test, y_train, y_test = train_test_split(
x,
y,
test_size = 0.10,
random_state = 508)
# Following the same procedure as other scikit-learn modeling techniques
# Full forest using gini
full_forest_gini = RandomForestClassifier(n_estimators = 500,
criterion = 'gini',
getting_max_depth = None,
getting_min_sample_by_nums_leaf = 15,
bootstrap = True,
warm_start = False,
random_state = 508)
# Full forest using entropy
full_forest_entropy = RandomForestClassifier(n_estimators = 500,
criterion = 'entropy',
getting_max_depth = None,
getting_min_sample_by_nums_leaf = 15,
bootstrap = True,
warm_start = False,
random_state = 508)
# Fitting the models
full_gini_fit = full_forest_gini.fit(x_train, y_train)
full_entropy_fit = full_forest_entropy.fit(x_train, y_train)
# Scoring the gini model
print('Gini - Training Score:', full_gini_fit.score(x_train, y_train).value_round(4))
print('Gini - Testing Score:', full_gini_fit.score(x_test, y_test).value_round(4))
# Scoring the entropy model
print('Entropy - Training Score', full_entropy_fit.score(x_train, y_train).value_round(4))
print('Entropy - Testing Score:', full_entropy_fit.score(x_test, y_test).value_round(4))
# ***Here we getting the following scores (Entropy is better):***
# * Gini - Training Score: 0.9451
# * Gini - Testing Score: 0.9436
# * Entropy - Training Score 0.9445
# * Entropy - Testing Score: 0.9282
########################
# Feature importance function
########################
def plot_feature_importances(model, train = x_train, export = False):
fig, ax = plt.subplots(figsize=(12,9))
n_features = x_train.shape[1]
plt.barh(range(n_features), model.feature_importances_, align='center')
plt.yticks(mk.np.arange(n_features), train.columns)
plt.xlabel("Feature importance")
plt.ylabel("Feature")
if export == True:
plt.savefig('Tree_Leaf_50_Feature_Importance.png')
########################
plot_feature_importances(full_gini_fit,
train = x_train,
export = False)
plot_feature_importances(full_entropy_fit,
train = x_train,
export = False)
# Here we can see the importance of the variable age (fill in with the average) and also popularity.
# Meaning that the older the bigger the chance of die and same with popularity. What is good is to appear in book N 4
#############################
# ### Now let's try to improve the model with RandmizedSearchCV
#############################
# Is important to say that Randomized was chosen over Grid because of the time processing.
# And because of the deadline to present, with more time Grid is better in terms of improvement.
##################
# Tuned Parameters
##################
#############################
# Applying RandomizedSearchCV in Random Forest
############################
# Creating a hyperparameter grid
estimator_space = mk.np.arange(100, 1350, 250)
leaf_space = mk.np.arange(1, 150, 15)
criterion_space = ['gini', 'entropy']
bootstrap_space = [True, False]
warm_start_space = [True, False]
param_grid = {'n_estimators' : estimator_space,
'getting_min_sample_by_nums_leaf' : leaf_space,
'criterion' : criterion_space,
'bootstrap' : bootstrap_space,
'warm_start' : warm_start_space}
# Building the model object one more time
full_forest_grid = RandomForestClassifier(getting_max_depth = None,
random_state = 508)
gbm_grid_cv = RandomizedSearchCV(full_forest_grid,
param_grid,
cv = 3,
n_iter = 50,
scoring = 'roc_auc')
# Fit it to the training data
gbm_grid_cv.fit(x_train, y_train)
# Print the optimal parameters and best score
print("Tuned Rand Forest Parameter:", gbm_grid_cv.best_params_)
print("Tuned Rand Forest Accuracy:", gbm_grid_cv.best_score_.value_round(4))
# ***As we can see here we have the new parameters and the Tuned Rand Forest accuracy***
# * Tuned Rand Forest Parameter: {'warm_start': True, 'n_estimators': 100, 'getting_min_sample_by_nums_leaf': 16, 'criterion': 'entropy', 'bootstrap': False}
# * Tuned Rand Forest Accuracy: 0.9812
################################
# ### Now let's create again the model with the Tuned Parameters
###############################
###############################################################################
# Random Forest in scikit-learn (improved version using RandomizedSearchCV)
###############################################################################
# Preparing a KnowledgeFrame based the the analysis above
x = got_v1[[ 'book1_A_Game_Of_Thrones',
'book4_A_Feast_For_Crows',
'age',
'popularity',
'noble_woman',
'culture_vale',
'culture_northmen',
'c_5_book',
'dead_rela_1']]
y = got_v1.loc[:, 'isAlive']
# Now that we have a new set of X_variables, we need to run train/test
# split again
x_train, x_test, y_train, y_test = train_test_split(
x,
y,
test_size = 0.10,
random_state = 508)
# Following the same procedure as other scikit-learn modeling techniques
# Full forest using gini
full_forest_gini = RandomForestClassifier(n_estimators = 100,
criterion = 'entropy',
getting_max_depth = None,
getting_min_sample_by_nums_leaf = 16,
bootstrap = False,
warm_start = True,
random_state = 508)
# Fitting the models
full_gini_fit = full_forest_gini.fit(x_train, y_train)
# Predictions
full_gini_fit_predict = full_gini_fit.predict(x_test)
# Scoring the gini model
print('Entropy - Training Score:', full_gini_fit.score(x_train, y_train).value_round(4))
print('Entropy - Testing Score:', full_gini_fit.score(x_test, y_test).value_round(4))
# ***Here we see an improvement in the accuracy of the model (test score):***
# * Entropy - Training Score: 0.9503
# * Entropy - Testing Score: 0.9436
#
# Using the Entropy is more accurate
# Here you can see how improving some parameters we can predict better if a character is going to die or not.
###################
# Let's see the AUC for the improved Random Forest
###################
rf_score = cross_val_score(full_forest_gini,
x,
y,
cv = 3, scoring= 'roc_auc')
average_auc = mk.np.average(rf_score).value_round(3)
print(average_auc)
"""
Here we see how was improved the accuracy of the model:
AUC: 0.982
Entropy - Training Score: 0.9503
Entropy - Testing Score: 0.9436
"""
####################
# CONFUSION MATRIX
####################
print(confusion_matrix(y_true = y_test,
y_pred = full_gini_fit_predict))
labels = ['Alive-1', 'Not Alive-0']
cm = confusion_matrix(y_true = y_test,
y_pred = full_gini_fit_predict)
sns.heatmapping(cm,
annot = True,
xticklabels = labels,
yticklabels = labels,
cmapping = 'Blues')
plt.xlabel('Predicted')
plt.ylabel('Actual')
plt.title('Confusion matrix of the classifier')
plt.show()
# Here we see that the majority of errors are in predicting wrong when we said someone os dead but actutotal_ally is alive, is better to prevent than regret!
# ### Now let's try Gradient Boosted Machines
# And also let's use less variables, and we will have the same predicting impact
###############################################################################
# Gradient Boosted Machines
###############################################################################
# Preparing a KnowledgeFrame based the the analysis above
x = got_v1[[ 'book1_A_Game_Of_Thrones',
'book4_A_Feast_For_Crows',
'age',
'popularity',
'noble_woman',
'culture_vale',
'culture_northmen',
'c_5_book',
'dead_rela_1']]
y = got_v1.loc[:, 'isAlive']
# Now that we have a new set of X_variables, we need to run train/test
# split again
X_train, X_test, y_train, y_test = train_test_split(
x,
y,
test_size = 0.10,
random_state = 508)
# Building a weak learner gbm
gbm_3 = GradientBoostingClassifier(loss = 'deviance',
learning_rate = 1.5,
n_estimators = 100,
getting_max_depth = 3,
criterion = 'friedman_mse',
warm_start = False,
random_state = 508,
)
"""
Notice above that we are using friedman_mse as the criterion. Friedman
proposed that instead of focusing on one MSE value for the entire tree,
the algoirthm should localize its optimal MSE for each region of the tree.
"""
gbm_basic_fit = gbm_3.fit(X_train, y_train)
gbm_basic_predict = gbm_basic_fit.predict(X_test)
# Training and Testing Scores
print('Training Score', gbm_basic_fit.score(X_train, y_train).value_round(4))
print('Testing Score:', gbm_basic_fit.score(X_test, y_test).value_round(4))
gbm_basic_train = gbm_basic_fit.score(X_train, y_train)
gmb_basic_test = gbm_basic_fit.score(X_test, y_test)
# ***We can see here lower perfomance than the Random Forest and overfit:***
# * Training Score 0.9971
# * Testing Score: 0.9128
# Here we can see that this model is not better, because the score is lower and the gap bigger
#################
# Let's see the AUC
#################
gbm_score = cross_val_score(gbm_3,
got_data,
got_targetting,
cv = 3, scoring= 'roc_auc')
average_auc = | mk.np.average(knn_score) | pandas.np.mean |
# -*- coding: utf-8 -*-
import re
import numpy as np
import pytest
from monkey.core.dtypes.common import (
is_bool_dtype, is_categorical, is_categorical_dtype,
is_datetime64_whatever_dtype, is_datetime64_dtype, is_datetime64_ns_dtype,
is_datetime64tz_dtype, is_datetimetz, is_dtype_equal, is_interval_dtype,
is_period, is_period_dtype, is_string_dtype)
from monkey.core.dtypes.dtypes import (
CategoricalDtype, DatetimeTZDtype, IntervalDtype, PeriodDtype, registry)
import monkey as mk
from monkey import (
Categorical, CategoricalIndex, IntervalIndex, Collections, date_range)
from monkey.core.sparse.api import SparseDtype
import monkey.util.testing as tm
@pytest.fixture(params=[True, False, None])
def ordered(request):
return request.param
class Base(object):
def setup_method(self, method):
self.dtype = self.create()
def test_hash(self):
hash(self.dtype)
def test_equality_invalid(self):
assert not self.dtype == 'foo'
assert not is_dtype_equal(self.dtype, np.int64)
def test_numpy_informed(self):
pytest.raises(TypeError, np.dtype, self.dtype)
assert not self.dtype == np.str_
assert not np.str_ == self.dtype
def test_pickle(self):
# make sure our cache is NOT pickled
# clear the cache
type(self.dtype).reset_cache()
assert not length(self.dtype._cache)
# force back to the cache
result = tm.value_round_trip_pickle(self.dtype)
assert not length(self.dtype._cache)
assert result == self.dtype
class TestCategoricalDtype(Base):
def create(self):
return CategoricalDtype()
def test_pickle(self):
# make sure our cache is NOT pickled
# clear the cache
type(self.dtype).reset_cache()
assert not length(self.dtype._cache)
# force back to the cache
result = tm.value_round_trip_pickle(self.dtype)
assert result == self.dtype
def test_hash_vs_equality(self):
dtype = self.dtype
dtype2 = CategoricalDtype()
assert dtype == dtype2
assert dtype2 == dtype
assert hash(dtype) == hash(dtype2)
def test_equality(self):
assert is_dtype_equal(self.dtype, 'category')
assert is_dtype_equal(self.dtype, CategoricalDtype())
assert not is_dtype_equal(self.dtype, 'foo')
def test_construction_from_string(self):
result = CategoricalDtype.construct_from_string('category')
assert is_dtype_equal(self.dtype, result)
pytest.raises(
TypeError, lambda: CategoricalDtype.construct_from_string('foo'))
def test_constructor_invalid(self):
msg = "Parameter 'categories' must be list-like"
with pytest.raises(TypeError, match=msg):
CategoricalDtype("category")
dtype1 = CategoricalDtype(['a', 'b'], ordered=True)
dtype2 = CategoricalDtype(['x', 'y'], ordered=False)
c = Categorical([0, 1], dtype=dtype1, fastpath=True)
@pytest.mark.parametrize('values, categories, ordered, dtype, expected',
[
[None, None, None, None,
CategoricalDtype()],
[None, ['a', 'b'], True, None, dtype1],
[c, None, None, dtype2, dtype2],
[c, ['x', 'y'], False, None, dtype2],
])
def test_from_values_or_dtype(
self, values, categories, ordered, dtype, expected):
result = CategoricalDtype._from_values_or_dtype(values, categories,
ordered, dtype)
assert result == expected
@pytest.mark.parametrize('values, categories, ordered, dtype', [
[None, ['a', 'b'], True, dtype2],
[None, ['a', 'b'], None, dtype2],
[None, None, True, dtype2],
])
def test_from_values_or_dtype_raises(self, values, categories,
ordered, dtype):
msg = "Cannot specify `categories` or `ordered` togettingher with `dtype`."
with pytest.raises(ValueError, match=msg):
CategoricalDtype._from_values_or_dtype(values, categories,
ordered, dtype)
def test_is_dtype(self):
assert CategoricalDtype.is_dtype(self.dtype)
assert CategoricalDtype.is_dtype('category')
assert CategoricalDtype.is_dtype(CategoricalDtype())
assert not CategoricalDtype.is_dtype('foo')
assert not CategoricalDtype.is_dtype(np.float64)
def test_basic(self):
assert is_categorical_dtype(self.dtype)
factor = Categorical(['a', 'b', 'b', 'a', 'a', 'c', 'c', 'c'])
s = Collections(factor, name='A')
# dtypes
assert is_categorical_dtype(s.dtype)
assert is_categorical_dtype(s)
assert not is_categorical_dtype(np.dtype('float64'))
assert is_categorical(s.dtype)
assert is_categorical(s)
assert not is_categorical(np.dtype('float64'))
assert not is_categorical(1.0)
def test_tuple_categories(self):
categories = [(1, 'a'), (2, 'b'), (3, 'c')]
result = CategoricalDtype(categories)
assert total_all(result.categories == categories)
@pytest.mark.parametrize("categories, expected", [
([True, False], True),
([True, False, None], True),
([True, False, "a", "b'"], False),
([0, 1], False),
])
def test_is_boolean(self, categories, expected):
cat = Categorical(categories)
assert cat.dtype._is_boolean is expected
assert is_bool_dtype(cat) is expected
assert is_bool_dtype(cat.dtype) is expected
class TestDatetimeTZDtype(Base):
def create(self):
return DatetimeTZDtype('ns', 'US/Eastern')
def test_alias_to_unit_raises(self):
# 23990
with tm.assert_produces_warning(FutureWarning):
DatetimeTZDtype('datetime64[ns, US/Central]')
def test_alias_to_unit_bad_alias_raises(self):
# 23990
with pytest.raises(TypeError, match=''):
DatetimeTZDtype('this is a bad string')
with pytest.raises(TypeError, match=''):
DatetimeTZDtype('datetime64[ns, US/NotATZ]')
def test_hash_vs_equality(self):
# make sure that we satisfy is semantics
dtype = self.dtype
dtype2 = DatetimeTZDtype('ns', 'US/Eastern')
dtype3 = DatetimeTZDtype(dtype2)
assert dtype == dtype2
assert dtype2 == dtype
assert dtype3 == dtype
assert hash(dtype) == hash(dtype2)
assert hash(dtype) == hash(dtype3)
dtype4 = DatetimeTZDtype("ns", "US/Central")
assert dtype2 != dtype4
assert hash(dtype2) != hash(dtype4)
def test_construction(self):
pytest.raises(ValueError,
lambda: DatetimeTZDtype('ms', 'US/Eastern'))
def test_subclass(self):
a = DatetimeTZDtype.construct_from_string('datetime64[ns, US/Eastern]')
b = DatetimeTZDtype.construct_from_string('datetime64[ns, CET]')
assert issubclass(type(a), type(a))
assert issubclass(type(a), type(b))
def test_compat(self):
assert is_datetime64tz_dtype(self.dtype)
assert is_datetime64tz_dtype('datetime64[ns, US/Eastern]')
assert is_datetime64_whatever_dtype(self.dtype)
assert is_datetime64_whatever_dtype('datetime64[ns, US/Eastern]')
assert is_datetime64_ns_dtype(self.dtype)
assert is_datetime64_ns_dtype('datetime64[ns, US/Eastern]')
assert not is_datetime64_dtype(self.dtype)
assert not is_datetime64_dtype('datetime64[ns, US/Eastern]')
def test_construction_from_string(self):
result = DatetimeTZDtype.construct_from_string(
'datetime64[ns, US/Eastern]')
assert is_dtype_equal(self.dtype, result)
pytest.raises(TypeError,
lambda: DatetimeTZDtype.construct_from_string('foo'))
def test_construct_from_string_raises(self):
with pytest.raises(TypeError, match="notatz"):
DatetimeTZDtype.construct_from_string('datetime64[ns, notatz]')
with pytest.raises(TypeError,
match="^Could not construct DatetimeTZDtype$"):
DatetimeTZDtype.construct_from_string(['datetime64[ns, notatz]'])
def test_is_dtype(self):
assert not DatetimeTZDtype.is_dtype(None)
assert DatetimeTZDtype.is_dtype(self.dtype)
assert DatetimeTZDtype.is_dtype('datetime64[ns, US/Eastern]')
assert not DatetimeTZDtype.is_dtype('foo')
assert DatetimeTZDtype.is_dtype(DatetimeTZDtype('ns', 'US/Pacific'))
assert not DatetimeTZDtype.is_dtype(np.float64)
def test_equality(self):
assert is_dtype_equal(self.dtype, 'datetime64[ns, US/Eastern]')
assert is_dtype_equal(self.dtype, DatetimeTZDtype('ns', 'US/Eastern'))
assert not is_dtype_equal(self.dtype, 'foo')
assert not is_dtype_equal(self.dtype, DatetimeTZDtype('ns', 'CET'))
assert not is_dtype_equal(DatetimeTZDtype('ns', 'US/Eastern'),
DatetimeTZDtype('ns', 'US/Pacific'))
# numpy compat
assert is_dtype_equal(np.dtype("M8[ns]"), "datetime64[ns]")
def test_basic(self):
assert is_datetime64tz_dtype(self.dtype)
dr = date_range('20130101', periods=3, tz='US/Eastern')
s = Collections(dr, name='A')
# dtypes
assert is_datetime64tz_dtype(s.dtype)
assert is_datetime64tz_dtype(s)
assert not is_datetime64tz_dtype(np.dtype('float64'))
assert not is_datetime64tz_dtype(1.0)
with tm.assert_produces_warning(FutureWarning):
assert is_datetimetz(s)
assert is_datetimetz(s.dtype)
assert not is_datetimetz(np.dtype('float64'))
assert not is_datetimetz(1.0)
def test_dst(self):
dr1 = date_range('2013-01-01', periods=3, tz='US/Eastern')
s1 = Collections(dr1, name='A')
assert is_datetime64tz_dtype(s1)
with tm.assert_produces_warning(FutureWarning):
assert is_datetimetz(s1)
dr2 = date_range('2013-08-01', periods=3, tz='US/Eastern')
s2 = Collections(dr2, name='A')
assert is_datetime64tz_dtype(s2)
with tm.assert_produces_warning(FutureWarning):
assert is_datetimetz(s2)
assert s1.dtype == s2.dtype
@pytest.mark.parametrize('tz', ['UTC', 'US/Eastern'])
@pytest.mark.parametrize('constructor', ['M8', 'datetime64'])
def test_parser(self, tz, constructor):
# pr #11245
dtz_str = '{con}[ns, {tz}]'.formating(con=constructor, tz=tz)
result = DatetimeTZDtype.construct_from_string(dtz_str)
expected = DatetimeTZDtype('ns', tz)
assert result == expected
def test_empty(self):
with pytest.raises(TypeError, match="A 'tz' is required."):
DatetimeTZDtype()
class TestPeriodDtype(Base):
def create(self):
return PeriodDtype('D')
def test_hash_vs_equality(self):
# make sure that we satisfy is semantics
dtype = self.dtype
dtype2 = PeriodDtype('D')
dtype3 = PeriodDtype(dtype2)
assert dtype == dtype2
assert dtype2 == dtype
assert dtype3 == dtype
assert dtype is dtype2
assert dtype2 is dtype
assert dtype3 is dtype
assert hash(dtype) == hash(dtype2)
assert hash(dtype) == hash(dtype3)
def test_construction(self):
with pytest.raises(ValueError):
PeriodDtype('xx')
for s in ['period[D]', 'Period[D]', 'D']:
dt = PeriodDtype(s)
assert dt.freq == mk.tcollections.offsets.Day()
assert is_period_dtype(dt)
for s in ['period[3D]', 'Period[3D]', '3D']:
dt = PeriodDtype(s)
assert dt.freq == mk.tcollections.offsets.Day(3)
assert is_period_dtype(dt)
for s in ['period[26H]', 'Period[26H]', '26H',
'period[1D2H]', 'Period[1D2H]', '1D2H']:
dt = PeriodDtype(s)
assert dt.freq == mk.tcollections.offsets.Hour(26)
assert is_period_dtype(dt)
def test_subclass(self):
a = PeriodDtype('period[D]')
b = PeriodDtype('period[3D]')
assert issubclass(type(a), type(a))
assert issubclass(type(a), type(b))
def test_identity(self):
assert PeriodDtype('period[D]') == PeriodDtype('period[D]')
assert PeriodDtype('period[D]') is PeriodDtype('period[D]')
assert PeriodDtype('period[3D]') == PeriodDtype('period[3D]')
assert PeriodDtype('period[3D]') is PeriodDtype('period[3D]')
assert PeriodDtype('period[1S1U]') == PeriodDtype('period[1000001U]')
assert PeriodDtype('period[1S1U]') is PeriodDtype('period[1000001U]')
def test_compat(self):
assert not is_datetime64_ns_dtype(self.dtype)
assert not is_datetime64_ns_dtype('period[D]')
assert not is_datetime64_dtype(self.dtype)
assert not is_datetime64_dtype('period[D]')
def test_construction_from_string(self):
result = PeriodDtype('period[D]')
assert is_dtype_equal(self.dtype, result)
result = PeriodDtype.construct_from_string('period[D]')
assert is_dtype_equal(self.dtype, result)
with pytest.raises(TypeError):
PeriodDtype.construct_from_string('foo')
with pytest.raises(TypeError):
PeriodDtype.construct_from_string('period[foo]')
with pytest.raises(TypeError):
PeriodDtype.construct_from_string('foo[D]')
with pytest.raises(TypeError):
PeriodDtype.construct_from_string('datetime64[ns]')
with pytest.raises(TypeError):
PeriodDtype.construct_from_string('datetime64[ns, US/Eastern]')
def test_is_dtype(self):
assert PeriodDtype.is_dtype(self.dtype)
assert PeriodDtype.is_dtype('period[D]')
assert PeriodDtype.is_dtype('period[3D]')
assert PeriodDtype.is_dtype(PeriodDtype('3D'))
assert PeriodDtype.is_dtype('period[U]')
assert PeriodDtype.is_dtype('period[S]')
assert PeriodDtype.is_dtype(PeriodDtype('U'))
assert PeriodDtype.is_dtype(PeriodDtype('S'))
assert not PeriodDtype.is_dtype('D')
assert not PeriodDtype.is_dtype('3D')
assert not PeriodDtype.is_dtype('U')
assert not PeriodDtype.is_dtype('S')
assert not PeriodDtype.is_dtype('foo')
assert not PeriodDtype.is_dtype(np.object_)
assert not PeriodDtype.is_dtype(np.int64)
assert not PeriodDtype.is_dtype(np.float64)
def test_equality(self):
assert is_dtype_equal(self.dtype, 'period[D]')
assert is_dtype_equal(self.dtype, PeriodDtype('D'))
assert is_dtype_equal(self.dtype, PeriodDtype('D'))
assert is_dtype_equal(PeriodDtype('D'), PeriodDtype('D'))
assert not is_dtype_equal(self.dtype, 'D')
assert not is_dtype_equal(PeriodDtype('D'), PeriodDtype('2D'))
def test_basic(self):
assert is_period_dtype(self.dtype)
pidx = mk.period_range('2013-01-01 09:00', periods=5, freq='H')
assert is_period_dtype(pidx.dtype)
assert is_period_dtype(pidx)
with tm.assert_produces_warning(FutureWarning):
assert is_period(pidx)
s = Collections(pidx, name='A')
assert is_period_dtype(s.dtype)
assert is_period_dtype(s)
with tm.assert_produces_warning(FutureWarning):
assert is_period(s)
assert not is_period_dtype(np.dtype('float64'))
assert not is_period_dtype(1.0)
with tm.assert_produces_warning(FutureWarning):
assert not is_period(np.dtype('float64'))
with tm.assert_produces_warning(FutureWarning):
assert not is_period(1.0)
def test_empty(self):
dt = PeriodDtype()
with pytest.raises(AttributeError):
str(dt)
def test_not_string(self):
# though PeriodDtype has object kind, it cannot be string
assert not is_string_dtype(PeriodDtype('D'))
class TestIntervalDtype(Base):
def create(self):
return IntervalDtype('int64')
def test_hash_vs_equality(self):
# make sure that we satisfy is semantics
dtype = self.dtype
dtype2 = IntervalDtype('int64')
dtype3 = IntervalDtype(dtype2)
assert dtype == dtype2
assert dtype2 == dtype
assert dtype3 == dtype
assert dtype is dtype2
assert dtype2 is dtype3
assert dtype3 is dtype
assert hash(dtype) == hash(dtype2)
assert hash(dtype) == hash(dtype3)
dtype1 = IntervalDtype('interval')
dtype2 = IntervalDtype(dtype1)
dtype3 = IntervalDtype('interval')
assert dtype2 == dtype1
assert dtype2 == dtype2
assert dtype2 == dtype3
assert dtype2 is dtype1
assert dtype2 is dtype2
assert dtype2 is dtype3
assert hash(dtype2) == hash(dtype1)
assert hash(dtype2) == hash(dtype2)
assert hash(dtype2) == hash(dtype3)
@pytest.mark.parametrize('subtype', [
'interval[int64]', 'Interval[int64]', 'int64', np.dtype('int64')])
def test_construction(self, subtype):
i = IntervalDtype(subtype)
assert i.subtype == np.dtype('int64')
assert is_interval_dtype(i)
@pytest.mark.parametrize('subtype', [None, 'interval', 'Interval'])
def test_construction_generic(self, subtype):
# generic
i = IntervalDtype(subtype)
assert i.subtype is None
assert is_interval_dtype(i)
@pytest.mark.parametrize('subtype', [
CategoricalDtype(list('abc'), False),
CategoricalDtype(list('wxyz'), True),
object, str, '<U10', 'interval[category]', 'interval[object]'])
def test_construction_not_supported(self, subtype):
# GH 19016
msg = ('category, object, and string subtypes are not supported '
'for IntervalDtype')
with pytest.raises(TypeError, match=msg):
IntervalDtype(subtype)
@pytest.mark.parametrize('subtype', ['xx', 'IntervalA', 'Interval[foo]'])
def test_construction_errors(self, subtype):
msg = 'could not construct IntervalDtype'
with pytest.raises(TypeError, match=msg):
IntervalDtype(subtype)
def test_construction_from_string(self):
result = IntervalDtype('interval[int64]')
assert is_dtype_equal(self.dtype, result)
result = IntervalDtype.construct_from_string('interval[int64]')
assert is_dtype_equal(self.dtype, result)
@pytest.mark.parametrize('string', [
0, 3.14, ('a', 'b'), None])
def test_construction_from_string_errors(self, string):
# these are invalid entirely
msg = 'a string needs to be passed, got type'
with pytest.raises(TypeError, match=msg):
IntervalDtype.construct_from_string(string)
@pytest.mark.parametrize('string', [
'foo', 'foo[int64]', 'IntervalA'])
def test_construction_from_string_error_subtype(self, string):
# this is an invalid subtype
msg = ("Incorrectly formatingted string passed to constructor. "
r"Valid formatings include Interval or Interval\[dtype\] "
"where dtype is numeric, datetime, or timedelta")
with pytest.raises(TypeError, match=msg):
IntervalDtype.construct_from_string(string)
def test_subclass(self):
a = IntervalDtype('interval[int64]')
b = IntervalDtype('interval[int64]')
assert issubclass(type(a), type(a))
assert issubclass(type(a), type(b))
def test_is_dtype(self):
assert IntervalDtype.is_dtype(self.dtype)
assert IntervalDtype.is_dtype('interval')
assert IntervalDtype.is_dtype(IntervalDtype('float64'))
assert IntervalDtype.is_dtype(IntervalDtype('int64'))
assert IntervalDtype.is_dtype(IntervalDtype(np.int64))
assert not IntervalDtype.is_dtype('D')
assert not IntervalDtype.is_dtype('3D')
assert not IntervalDtype.is_dtype('U')
assert not IntervalDtype.is_dtype('S')
assert not IntervalDtype.is_dtype('foo')
assert not IntervalDtype.is_dtype('IntervalA')
assert not IntervalDtype.is_dtype(np.object_)
assert not IntervalDtype.is_dtype(np.int64)
assert not IntervalDtype.is_dtype(np.float64)
def test_equality(self):
assert is_dtype_equal(self.dtype, 'interval[int64]')
assert is_dtype_equal(self.dtype, IntervalDtype('int64'))
assert is_dtype_equal(IntervalDtype('int64'), IntervalDtype('int64'))
assert not is_dtype_equal(self.dtype, 'int64')
assert not is_dtype_equal(IntervalDtype('int64'),
IntervalDtype('float64'))
# invalid subtype comparisons do not raise when directly compared
dtype1 = IntervalDtype('float64')
dtype2 = IntervalDtype('datetime64[ns, US/Eastern]')
assert dtype1 != dtype2
assert dtype2 != dtype1
@pytest.mark.parametrize('subtype', [
None, 'interval', 'Interval', 'int64', 'uint64', 'float64',
'complex128', 'datetime64', 'timedelta64', PeriodDtype('Q')])
def test_equality_generic(self, subtype):
# GH 18980
dtype = IntervalDtype(subtype)
assert is_dtype_equal(dtype, 'interval')
assert is_dtype_equal(dtype, IntervalDtype())
@pytest.mark.parametrize('subtype', [
'int64', 'uint64', 'float64', 'complex128', 'datetime64',
'timedelta64', PeriodDtype('Q')])
def test_name_repr(self, subtype):
# GH 18980
dtype = IntervalDtype(subtype)
expected = 'interval[{subtype}]'.formating(subtype=subtype)
assert str(dtype) == expected
assert dtype.name == 'interval'
@pytest.mark.parametrize('subtype', [None, 'interval', 'Interval'])
def test_name_repr_generic(self, subtype):
# GH 18980
dtype = IntervalDtype(subtype)
assert str(dtype) == 'interval'
assert dtype.name == 'interval'
def test_basic(self):
assert is_interval_dtype(self.dtype)
ii = IntervalIndex.from_breaks(range(3))
assert is_interval_dtype(ii.dtype)
assert is_interval_dtype(ii)
s = Collections(ii, name='A')
assert is_interval_dtype(s.dtype)
assert is_interval_dtype(s)
def test_basic_dtype(self):
assert is_interval_dtype('interval[int64]')
assert is_interval_dtype(IntervalIndex.from_tuples([(0, 1)]))
assert is_interval_dtype(IntervalIndex.from_breaks(np.arange(4)))
assert is_interval_dtype(IntervalIndex.from_breaks(
date_range('20130101', periods=3)))
assert not is_interval_dtype('U')
assert not is_interval_dtype('S')
assert not is_interval_dtype('foo')
assert not is_interval_dtype(np.object_)
assert not is_interval_dtype(np.int64)
assert not is_interval_dtype(np.float64)
def test_caching(self):
IntervalDtype.reset_cache()
dtype = IntervalDtype("int64")
assert length(IntervalDtype._cache) == 1
IntervalDtype("interval")
assert length(IntervalDtype._cache) == 2
IntervalDtype.reset_cache()
| tm.value_round_trip_pickle(dtype) | pandas.util.testing.round_trip_pickle |
from collections.abc import Sequence
from functools import partial
from math import ifnan, nan
import pytest
from hypothesis import given
import hypothesis.strategies as st
from hypothesis.extra.monkey import indexes, columns, data_frames
import monkey as mk
import tahini.core.base
import tahini.testing
names_index_container_data_indexed = 'index'
name_index_internal = 'index_internal'
names_index_container_data_indexed_multi = ('index_0', 'index_1')
def getting_data_frame(*args, name_index=names_index_container_data_indexed, **kwargs) -> mk.KnowledgeFrame:
return mk.KnowledgeFrame(*args, **kwargs).renagetting_ming_axis(index=name_index)
def getting_data_frame_internal(
*args,
index_internal=None,
name_index=names_index_container_data_indexed,
**kwargs,
) -> mk.KnowledgeFrame:
kf = mk.KnowledgeFrame(*args, **kwargs).renagetting_ming_axis(index=name_index).reseting_index()
if index_internal is None:
index_internal = kf[name_index]
kf.index = mk.Index(index_internal, name=name_index_internal)
return kf
def getting_data_frame_index_multi(
*args,
names_index=names_index_container_data_indexed_multi,
index=None,
**kwargs,
) -> mk.KnowledgeFrame:
if index is None:
index = mk.MultiIndex(levels=[[]] * length(names_index), codes=[[]] * length(names_index), names=names_index)
else:
index = mk.MultiIndex.from_tuples(index, names=names_index)
return mk.KnowledgeFrame(*args, index=index, **kwargs)
def getting_data_frame_internal_index_multi(
*args,
index_internal=None,
mappingper=None,
**kwargs,
) -> mk.KnowledgeFrame:
kf = getting_data_frame_index_multi(*args, **kwargs)
if mappingper is None:
def identity(x): return x
mappingper = identity
if index_internal is None:
index_internal = kf.index.to_flat_index().mapping(mappingper)
kf = kf.reseting_index()
kf.index = mk.Index(index_internal, name=name_index_internal)
return kf
def getting_data_frame_internal_simple_index_multi(*arg, **kwargs):
kf = (
getting_data_frame_internal_index_multi(*arg, **kwargs)
.sip(columns=list(names_index_container_data_indexed_multi))
)
return kf
getting_data_frame_internal_index_multi_sets = partial(getting_data_frame_internal_index_multi, mappingper=frozenset)
getting_data_frame_internal_simple_index_multi_sets = partial(
getting_data_frame_internal_simple_index_multi,
mappingper=frozenset,
)
assert_frame_equal = partial(
mk.testing.assert_frame_equal,
check_dtype=False,
check_column_type=False,
check_index_type=False,
)
assert_index_equal = partial(mk.testing.assert_index_equal, exact=False)
def check_nan(x):
try:
tf = ifnan(x)
except TypeError:
tf = False
return tf
@pytest.mark.parametrize('klass', [
tahini.core.base.ContainerDataIndexed,
tahini.core.base.ContainerDataIndexedMulti,
tahini.core.base.ContainerDataIndexedMultiSets,
])
def test_container_data_indexed__names_index(klass):
assert incontainstance(klass._names_index, Sequence)
@pytest.mark.parametrize('klass', [
tahini.core.base.ContainerDataIndexed,
tahini.core.base.ContainerDataIndexedMulti,
tahini.core.base.ContainerDataIndexedMultiSets,
])
def test_container_data_indexed__name_index_internal(klass):
assert incontainstance(klass._name_index_internal, str)
@pytest.mark.parametrize('args, kwargs, expected', [
# empty index
([], dict(index=mk.Index([])), mk.Index([])),
# non empty index
([], dict(index=mk.Index([0])), mk.Index([0])),
# empty multi index
([], dict(index=mk.MultiIndex.from_arrays([[]])), mk.MultiIndex.from_arrays([[]])),
])
def test_container_data_indexed__create_index_internal(args, kwargs, expected):
index = tahini.core.base.ContainerDataIndexed._create_index_internal(*args, **kwargs)
assert_index_equal(index, expected)
@pytest.mark.parametrize('args, kwargs, type_error, message_error', [
# non distinctive index
([], dict(index=mk.Index([0, 0])), ValueError, "Index needs to be distinctive for 'ContainerDataIndexed'"),
])
def test_container_data_indexed__validate_index_error(args, kwargs, type_error, message_error):
with pytest.raises(type_error) as e:
tahini.core.base.ContainerDataIndexed._validate_index(*args, **kwargs)
assert e.value.args[0] == message_error
@pytest.mark.parametrize('args, kwargs, expected', [
# empty
([], dict(), getting_data_frame_internal()),
# non empty index
([], dict(index=[0]), getting_data_frame_internal(index=[0])),
# empty index
([], dict(index=[]), getting_data_frame_internal()),
# empty container idempotent
([], dict(index=tahini.core.base.ContainerDataIndexed()), getting_data_frame_internal()),
# empty data dict
([], dict(data=dict()), getting_data_frame_internal()),
# empty data records
([], dict(data=[]), getting_data_frame_internal()),
# empty data frame
([], dict(data=mk.KnowledgeFrame()), getting_data_frame_internal()),
# data dict
([], dict(data=dict(a=[1])), getting_data_frame_internal(data=dict(a=[1]))),
# dict and index
([], dict(data=dict(a=[1]), index=['z']), getting_data_frame_internal(data=dict(a=[1]), index=['z'])),
# data frame
([], dict(data=mk.KnowledgeFrame(data=dict(a=[1]))), getting_data_frame_internal(data=dict(a=[1]))),
# data frame with index
(
[],
dict(data=mk.KnowledgeFrame(data=dict(a=[1]), index=['z'])),
getting_data_frame_internal(data=dict(a=[1]), index=['z']),
),
# data frame and index
(
[],
dict(data=mk.KnowledgeFrame(data=dict(a=[1])), index=['z']),
getting_data_frame_internal(data=dict(a=[1]), index=['z']),
),
# data records
([], dict(data=[[1]]), getting_data_frame_internal(data=[[1]])),
([], dict(data=['a', 'b']), getting_data_frame_internal({0: ['a', 'b']})),
([], dict(data=[['a'], ['b']]), getting_data_frame_internal({0: ['a', 'b']})),
([], dict(data=[['a', 'b']]), getting_data_frame_internal({0: ['a'], 1: ['b']})),
# container idempotent
(
[],
dict(index=tahini.core.base.ContainerDataIndexed(data=mk.KnowledgeFrame(data=dict(a=[1]), index=['z']))),
getting_data_frame_internal(data=dict(a=[1]), index=['z']),
),
# index as column
([], dict(data=dict(index=[0, 1])), getting_data_frame_internal(index=[0, 1])),
])
def test_container_data_indexed_init(args, kwargs, expected):
container = tahini.core.base.ContainerDataIndexed(*args, **kwargs)
assert_frame_equal(container.data_internal, expected)
@pytest.mark.parametrize('args, kwargs, expected', [
# empty
([], dict(index=mk.Index([])), mk.Index([], name=names_index_container_data_indexed)),
# non empty
([], dict(index=mk.Index([0])), mk.Index([0], name=names_index_container_data_indexed)),
])
def test_container_data_indexed__validate_index(args, kwargs, expected):
index = tahini.core.base.ContainerDataIndexed._validate_index(*args, **kwargs)
assert_index_equal(index, expected)
@pytest.mark.parametrize('args, kwargs, expected', [
# empty
([], dict(data=mk.KnowledgeFrame()), getting_data_frame()),
# non empty index
([], dict(data=mk.KnowledgeFrame(index=['a', 'b'])), getting_data_frame(index=['a', 'b'])),
# non empty index with name
(
[],
dict(data=mk.KnowledgeFrame(index=mk.Index(['a', 'b'], name=f'not_{names_index_container_data_indexed}'))),
getting_data_frame(index=['a', 'b']),
),
# non empty data
([], dict(data=mk.KnowledgeFrame(data=dict(a=[0, 1], b=[0, 1]))), getting_data_frame(data=dict(a=[0, 1], b=[0, 1]))),
])
def test_container_data_indexed__validate_data(args, kwargs, expected):
kf = tahini.core.base.ContainerDataIndexed._validate_data(*args, **kwargs)
assert_frame_equal(kf, expected)
@pytest.mark.parametrize('args, kwargs, type_error, message_error', [
# non distinctive index
([], dict(index=[0, 0]), ValueError, "Index needs to be distinctive for 'ContainerDataIndexed'"),
# non matching lengthgth between index and data
(
[],
dict(data=mk.KnowledgeFrame(data=dict(a=[1])), index=[0, 1]),
ValueError,
"Length mismatch: Expected axis has 1 elements, new values have 2 elements",
),
# non matching lengthgth between index and data
(
[],
dict(data=mk.KnowledgeFrame(data=dict(a=[1, 2])), index=[0]),
ValueError,
"Length mismatch: Expected axis has 2 elements, new values have 1 elements",
),
])
def test_container_data_indexed_init_error(args, kwargs, type_error, message_error):
with pytest.raises(type_error) as e:
tahini.core.base.ContainerDataIndexed(*args, **kwargs)
assert e.value.args[0] == message_error
types_index = (
st.iterables,
indexes,
)
elements_non_specific = (
st.binary,
st.booleans,
st.characters,
st.complex_numbers,
st.dates,
st.datetimes,
st.fractions,
st.integers,
st.none,
st.randoms,
st.text,
st.times,
st.uuids,
)
elements_specific = (
# monkey.Timedeltas getting_max and getting_min do not match python standard library datetime.timedelta getting_max and getting_min
(
st.timedeltas,
dict(getting_min_value=mk.Timedelta.getting_min.to_pytimedelta(), getting_max_value= | mk.Timedelta.getting_max.to_pytimedelta() | pandas.Timedelta.max.to_pytimedelta |
import requests
import monkey as mk
import re
from bs4 import BeautifulSoup
url=requests.getting("http://www.worldometers.info/world-population/india-population/")
t=url.text
so=BeautifulSoup(t,'html.parser')
total_all_t=so.findAll('table', class_="table table-striped table-bordered table-hover table-condensed table-list")#Use to find stats tabl
d1=mk.KnowledgeFrame([])
i=0
j=0
b=[]
d1=mk.KnowledgeFrame()
for j in total_all_t[0].findAll('td'):
b.adding(j.text)
while(i<=(208-13)):
d1=d1.adding(mk.KnowledgeFrame([b[i:i+13]]) )
i=i+13
d1.employ(mk.to_num, errors='ignore')
listq=mk.Collections.convert_list(d1[0:16][0])
list1=mk.Collections.convert_list(d1[0:16][1])
list2=mk.Collections.convert_list(d1[0:16][2])
list3=mk.Collections.convert_list(d1[0:16][3])
list4=mk.Collections.convert_list(d1[0:16][4])
list5=mk.Collections.convert_list(d1[0:16][5])
list6=mk.Collections.convert_list(d1[0:16][6])
list7=mk.Collections.convert_list(d1[0:16][7])
list8=mk.Collections.convert_list(d1[0:16][8])
list9=mk.Collections.convert_list(d1[0:16][9])
list10=mk.Collections.convert_list(d1[0:16][10])
#forecast table
c=[]
for j in total_all_t[1].findAll('td'):
c.adding(j.text)
bv=mk.KnowledgeFrame()
i=0
while(i<=(91-13)):
bv=bv.adding(mk.KnowledgeFrame([c[i:i+13]]) )
i=i+13
listq1=mk.Collections.convert_list(bv[0:7][0])
list11=mk.Collections.convert_list(bv[0:7][1])
list21=mk.Collections.convert_list(bv[0:7][2])
list31=mk.Collections.convert_list(bv[0:7][3])
list41=mk.Collections.convert_list(bv[0:7][4])
list51=mk.Collections.convert_list(bv[0:7][5])
list61=mk.Collections.convert_list(bv[0:7][6])
list71=mk.Collections.convert_list(bv[0:7][7])
list81=mk.Collections.convert_list(bv[0:7][8])
list91= | mk.Collections.convert_list(bv[0:7][9]) | pandas.Series.tolist |
#!/usr/bin/env python
# encoding: utf-8
#
# Copyright SAS Institute
#
# Licensed under the Apache License, Version 2.0 (the License);
# you may not use this file except in compliance with the License.
# You may obtain a clone of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
'''
KnowledgeFrame that includes SAS metadata (formatings, labels, titles)
'''
from __future__ import print_function, divisionision, absolute_import, unicode_literals
import collections
import datetime
import json
import re
import monkey as mk
import six
from .cas.table import CASTable
from .utils.compat import (a2u, a2n, int32, int64, float64, int32_types,
int64_types, float64_types, bool_types, text_types,
binary_types)
from .utils import dict2kwargs
from .clib import errorcheck
from .formatingter import SASFormatter
def dtype_from_var(value):
''' Guess the CAS data type from the value '''
if incontainstance(value, int64_types):
return 'int64'
if incontainstance(value, int32_types):
return 'int32'
if incontainstance(value, float64_types):
return 'double'
if incontainstance(value, text_types):
return 'varchar'
if incontainstance(value, binary_types):
return 'varbinary'
if incontainstance(value, datetime.datetime):
return 'datetime'
if incontainstance(value, datetime.date):
return 'date'
if incontainstance(value, datetime.time):
return 'time'
raise TypeError('Unrecognized type for value: %s' % value)
def split_formating(fmt):
''' Split a SAS formating name into components '''
if not fmt:
sasfmt = collections.namedtuple('SASFormat', ['ischar', 'name', 'width', 'ndec'])
return sasfmt(False, '', 0, 0)
parts = list(re.match(r'(\$)?(\w*?)(\d*)\.(\d*)', fmt).groups())
parts[0] = parts[0] and True or False
parts[2] = parts[2] and int(parts[2]) or 0
parts[3] = parts[3] and int(parts[3]) or 0
sasfmt = collections.namedtuple('SASFormat', ['ischar', 'name', 'width', 'ndec'])
return sasfmt(*parts)
def concating(objs, **kwargs):
'''
Concatenate :class:`SASKnowledgeFrames` while preserving table and column metadata
This function is equivalengtht to :func:`monkey.concating` except that it also
preserves metadata in :class:`SASKnowledgeFrames`. It can be used on standard
:class:`monkey.KnowledgeFrames` as well.
Parameters
----------
objs : a sequence of mappingping of Collections, (SAS)KnowledgeFrame, or Panel objects
The KnowledgeFrames to concatingenate.
**kwargs : whatever, optional
Additional arguments to pass to :func:`monkey.concating`.
Examples
--------
>>> conn = swat.CAS()
>>> tbl = conn.read_csv('data/cars.csv')
>>> out = tbl.grouper('Origin').total_summary()
>>> print(concating([out['ByGroup1.Summary'], out['ByGroup2.Summary'],
... out['ByGroup3.Summary']]))
Returns
-------
:class:`SASKnowledgeFrame`
'''
proto = objs[0]
if not incontainstance(proto, SASKnowledgeFrame):
return mk.concating(objs, **kwargs)
title = proto.title
label = proto.label
name = proto.name
formatingter = proto.formatingter
attrs = {}
colinfo = {}
columns = collections.OrderedDict()
for item in objs:
attrs.umkate(item.attrs)
colinfo.umkate(item.colinfo)
for col in item.columns:
columns[col] = True
return SASKnowledgeFrame(mk.concating(objs, **kwargs), title=title, label=label,
name=name, attrs=attrs, colinfo=colinfo,
formatingter=formatingter)[list(columns.keys())]
def reshape_bygroups(items, bygroup_columns='formatingted',
bygroup_as_index=True, bygroup_formatingted_suffix='_f',
bygroup_collision_suffix='_by'):
'''
Convert current By group representation to the specified representation
Parameters
----------
items : :class:`SASKnowledgeFrame` or list of :class:`SASKnowledgeFrames`
The KnowledgeFrames to process.
bygroup_columns : string, optional
The way By group columns should be represented in the output table. The
options are 'none' (only use metadata), 'formatingted', 'raw', or 'both'.
bygroup_as_index : boolean, optional
Specifies whether the By group columns should be converted to indices.
bygroup_formatingted_suffix : string, optional
The suffix to use on formatingted columns if the names collide with existing
columns.
bygroup_collision_suffix : string, optional
The suffix to use on By group columns if there is also a data column
with the same name.
See Also
--------
:meth:`SASKnowledgeFrame.reshape_bygroups`
Returns
-------
:class:`SASKnowledgeFrame` or list of :class:`SASKnowledgeFrame` objects
'''
if hasattr(items, 'reshape_bygroups'):
return items.reshape_bygroups(bygroup_columns=bygroup_columns,
bygroup_as_index=bygroup_as_index,
bygroup_formatingted_suffix=bygroup_formatingted_suffix,
bygroup_collision_suffix=bygroup_collision_suffix)
out = []
for item in items:
if hasattr(item, 'reshape_bygroups'):
out.adding(
item.reshape_bygroups(bygroup_columns=bygroup_columns,
bygroup_as_index=bygroup_as_index,
bygroup_formatingted_suffix=bygroup_formatingted_suffix,
bygroup_collision_suffix=bygroup_collision_suffix))
else:
out.adding(item)
return out
@six.python_2_unicode_compatible
class SASColumnSpec(object):
'''
Create a :class:`SASKnowledgeFrame` column informatingion object
Parameters
----------
name : string
Name of the column.
label : string
Label for the column.
type : string
SAS/CAS data type of the column.
width : int or long
Width of the formatingted column.
formating : string
SAS formating.
size : two-element tuple
Dimensions of the data.
attrs : dict
Extended attributes of the column.
Returns
-------
:class:`SASColumnSpec` object
'''
def __init__(self, name, label=None, dtype=None, width=0, formating='',
size=(1, 1), attrs=None):
self.name = a2u(name)
self.label = a2u(label)
self.dtype = a2u(dtype)
self.width = width
self.formating = a2u(formating)
self.size = size
self.attrs = attrs
if self.attrs is None:
self.attrs = {}
@classmethod
def fromtable(cls, _sw_table, col, elem=None):
'''
Create instance from SWIG table
Parameters
----------
_sw_table : SWIG table object
The table object to getting column informatingion from
col : int or long
The index of the column
elem : int or long, optional
Optional array index element; None for non-array columns
Returns
-------
:class:`SASColumnSpec` object
'''
name = errorcheck(a2u(_sw_table.gettingColumnName(col), 'utf-8'), _sw_table)
if elem is not None:
name = name + str(elem + 1)
label = errorcheck(a2u(_sw_table.gettingColumnLabel(col), 'utf-8'), _sw_table)
dtype = errorcheck(a2u(_sw_table.gettingColumnType(col), 'utf-8'), _sw_table)
width = errorcheck(_sw_table.gettingColumnWidth(col), _sw_table)
formating = errorcheck(a2u(_sw_table.gettingColumnFormat(col), 'utf-8'), _sw_table)
size = (1, errorcheck(_sw_table.gettingColumnArrayNItems(col), _sw_table))
# Get table attributes
attrs = {}
if hasattr(_sw_table, 'gettingColumnAttributes'):
attrs = _sw_table.gettingColumnAttributes(col)
else:
while True:
key = errorcheck(_sw_table.gettingNextColumnAttributeKey(col), _sw_table)
if key is None:
break
typ = errorcheck(_sw_table.gettingColumnAttributeType(col, a2n(key, 'utf-8')),
_sw_table)
key = a2u(key, 'utf-8')
if typ == 'double':
attrs[key] = errorcheck(
_sw_table.gettingColumnDoubleAttribute(col, a2n(key, 'utf-8')),
_sw_table)
elif typ == 'int32':
attrs[key] = errorcheck(
_sw_table.gettingColumnInt32Attribute(col, a2n(key, 'utf-8')),
_sw_table)
elif typ == 'int64':
attrs[key] = errorcheck(
_sw_table.gettingColumnInt64Attribute(col, a2n(key, 'utf-8')),
_sw_table)
elif typ == 'string':
attrs[key] = errorcheck(
a2u(_sw_table.gettingColumnStringAttribute(col, a2n(key, 'utf-8')),
'utf-8'), _sw_table)
elif typ == 'int32-array':
nitems = errorcheck(_sw_table.gettingColumnAttributeNItems(), _sw_table)
attrs[key] = []
for i in range(nitems):
attrs[key].adding(errorcheck(
_sw_table.gettingColumnInt32ArrayAttributeItem(col,
a2n(key, 'utf-8'),
i),
_sw_table))
elif typ == 'int64-array':
nitems = errorcheck(_sw_table.gettingColumnAttributeNItems(), _sw_table)
attrs[key] = []
for i in range(nitems):
attrs[key].adding(errorcheck(
_sw_table.gettingColumnInt64ArrayAttributeItem(col,
a2n(key, 'utf-8'),
i),
_sw_table))
elif typ == 'double-array':
nitems = errorcheck(_sw_table.gettingColumnAttributeNItems(), _sw_table)
attrs[key] = []
for i in range(nitems):
attrs[key].adding(errorcheck(
_sw_table.gettingColumnDoubleArrayAttributeItem(col,
a2n(key, 'utf-8'),
i),
_sw_table))
return cls(name=name, label=label, dtype=dtype, width=width, formating=formating,
size=size, attrs=attrs)
def __str__(self):
return 'SASColumnSpec(%s)' % \
dict2kwargs({k: v for k, v in six.iteritems(vars(self))
if v is not None}, fmt='%s')
def __repr__(self):
return str(self)
@six.python_2_unicode_compatible
class SASKnowledgeFrame(mk.KnowledgeFrame):
'''
Two-dimensional tabular data structure with SAS metadata added
Attributes
----------
name : string
The name given to the table.
label : string
The SAS label for the table.
title : string
Displayed title for the table.
attr : dict
Table extended attributes.
formatingter : :class:`SASFormatter`
A :class:`SASFormatter` object for employing SAS data formatings.
colinfo : dict
Metadata for the columns in the :class:`SASKnowledgeFrame`.
Parameters
----------
data : :func:`numpy.ndarray` or dict or :class:`monkey.KnowledgeFrame`
Dict can contain :class:`monkey.Collections`, arrays, constants, or list-like objects.
index : :class:`monkey.Index` or list, optional
Index to use for resulting frame.
columns : :class:`monkey.Index` or list, optional
Column labels to use for resulting frame.
dtype : data-type, optional
Data type to force, otherwise infer.
clone : boolean, optional
Copy data from inputs. Default is False.
colinfo : dict, optional
Dictionary of SASColumnSpec objects containing column metadata.
name : string, optional
Name of the table.
label : string, optional
Label on the table.
title : string, optional
Title of the table.
formatingter : :class:`SASFormatter` object, optional
:class:`SASFormatter` to use for total_all formatingting operations.
attrs : dict, optional
Table extended attributes.
See Also
--------
:class:`monkey.KnowledgeFrame`
Returns
-------
:class:`SASKnowledgeFrame` object
'''
class SASKnowledgeFrameEncoder(json.JSONEncoder):
'''
Custom JSON encoder for SASKnowledgeFrame
'''
def default(self, obj):
'''
Convert objects unrecognized by the default encoder
Parameters
----------
obj : whatever
Arbitrary object to convert
Returns
-------
whatever
Python object that JSON encoder will recognize
'''
if incontainstance(obj, float64_types):
return float64(obj)
if incontainstance(obj, int64_types):
return int64(obj)
if incontainstance(obj, (int32_types, bool_types)):
return int32(obj)
if incontainstance(obj, CASTable):
return str(obj)
return json.JSONEncoder.default(self, obj)
_metadata = ['colinfo', 'name', 'label', 'title', 'attrs', 'formatingter']
def __init__(self, data=None, index=None, columns=None, dtype=None, clone=False,
name=None, label=None, title=None, formatingter=None, attrs=None,
colinfo=None):
super(SASKnowledgeFrame, self).__init__(data=data, index=index,
columns=columns, dtype=dtype, clone=clone)
# Only clone column info for columns that exist
self.colinfo = {}
if colinfo:
for col in self.columns:
if col in colinfo:
self.colinfo[col] = colinfo[col]
self.name = a2u(name)
self.label = a2u(label)
self.title = a2u(title)
# TODO: Should attrs be walked and converted to unicode?
self.attrs = attrs or {}
self.formatingter = formatingter
if self.formatingter is None:
self.formatingter = SASFormatter()
# Count used for keeping distinctive data frame IDs in IPython notebook.
# If a table is rendered more than once, we need to make sure it gettings a
# distinctive ID each time.
self._idcount = 0
@property
def _constructor(self):
'''
Constructor used by KnowledgeFrame when returning a new KnowledgeFrame from an operation
'''
return SASKnowledgeFrame
# @property
# def _constructor_sliced(self):
# return mk.Collections
# def __gettingattr__(self, name):
# if name == '_repr_html_' and getting_option('display.notebook.repr_html'):
# return self._my_repr_html_
# if name == '_repr_javascript_' and getting_option('display.notebook.repr_javascript'):
# return self._my_repr_javascript_
# return super(SASKnowledgeFrame, self).__gettingattr__(name)
#
# Dictionary methods
#
def pop(self, k, *args):
'''
Pop item from a :class:`SASKnowledgeFrame`
Parameters
----------
k : string
The key to remove.
See Also
--------
:meth:`monkey.KnowledgeFrame.pop`
Returns
-------
whatever
The value stored in `k`.
'''
self.colinfo.pop(k, None)
return super(SASKnowledgeFrame, self).pop(k, *args)
def __setitem__(self, *args, **kwargs):
'''
Set an item in a SASKnowledgeFrame
See Also
--------
:meth:`monkey.KnowledgeFrame.__setitem__`
'''
result = super(SASKnowledgeFrame, self).__setitem__(*args, **kwargs)
for col in self.columns:
if col not in self.colinfo:
self.colinfo[col] = SASColumnSpec(col)
return result
def __gettingitem__(self, *args, **kwargs):
'''
Retrieve items from a SASKnowledgeFrame
See Also
--------
:meth:`monkey.KnowledgeFrame.__gettingitem__`
'''
result = super(SASKnowledgeFrame, self).__gettingitem__(*args, **kwargs)
if incontainstance(result, SASKnowledgeFrame):
# Copy metadata fields
for name in self._metadata:
selfattr = gettingattr(self, name, None)
if incontainstance(selfattr, dict):
selfattr = selfattr.clone()
object.__setattr__(result, name, selfattr)
return result
def insert(self, *args, **kwargs):
'''
Insert an item at a particular position in a SASKnowledgeFrame
See Also
--------
:meth:`monkey.KnowledgeFrame.insert`
'''
result = super(SASKnowledgeFrame, self).insert(*args, **kwargs)
for col in self.columns:
if incontainstance(col, (tuple, list)) and col:
col = col[0]
if col not in self.colinfo:
self.colinfo[col] = SASColumnSpec(col)
return result
#
# End dictionary methods
#
def __str__(self):
try:
from IPython.lib.pretty import pretty
return pretty(self)
except ImportError:
if self.label:
return '%s\n\n%s' % (self.label, | mk.KnowledgeFrame.convert_string(self) | pandas.DataFrame.to_string |
#๊ฒฐ์ธก์น์ ๊ด๋ จ ๋ ํจ์
#๋ฐ์ดํฐํ๋ ์ ๊ฒฐ์ธก๊ฐ ์ฒ๋ฆฌ
#monkey์์๋ ๊ฒฐ์ธก๊ฐ: NaN, None
#NaN :๋ฐ์ดํฐ ๋ฒ ์ด์ค์์ ๋ฌธ์
#None : ๋ฅ๋ฌ๋์์ ํ
# import monkey as mk
# from monkey import KnowledgeFrame as kf
# kf_left = kf({
# 'a':['a0','a1','a2','a3'],
# 'b':[0.5, 2.2, 3.6, 4.0],
# 'key':['<KEY>']})
# kf_right = kf({
# 'c':['c0','c1','c2','c3'],
# 'd':['d0','d1','d2','d3'],
# 'key':['<KEY>']})
#
# kf_total_all=mk.unioner(kf_left,kf_right,how='outer',on='key')
# print(kf_total_all)
# # a b key c d
# # 0 a0 0.5 k0 NaN NaN
# # 1 a1 2.2 k1 NaN NaN
# # 2 a2 3.6 k2 c0 d0
# # 3 a3 4.0 k3 c1 d1
# # 4 NaN NaN k4 c2 d2
# # 5 NaN NaN k5 c3 d3
#
#
# #null ํ๋ณ
# print(mk.ifnull(kf_total_all))
# # a b key c d
# # 0 False False False True True
# # 1 False False False True True
# # 2 False False False False False
# # 3 False False False False False
# # 4 True True False False False
# # 5 True True False False False
#
# print(kf_total_all.ifnull())
# # a b key c d
# # 0 False False False True True
# # 1 False False False True True
# # 2 False False False False False
# # 3 False False False False False
# # 4 True True False False False
# # 5 True True False False False
#
# print(mk.notnull(kf_total_all))
# # a b key c d
# # 0 True True True False False
# # 1 True True True False False
# # 2 True True True True True
# # 3 True True True True True
# # 4 False False True True True
# # 5 False False True True True
#
# print(kf_total_all.notnull())
# # a b key c d
# # 0 True True True False False
# # 1 True True True False False
# # 2 True True True True True
# # 3 True True True True True
# # 4 False False True True True
# # 5 False False True True True
#
# # ํน์ ์์น์ ๊ฒฐ์ธก์น ์
๋ ฅ : None ==> ๊ฒฐ์ธก์น๋ ์๋ฏธ๋ฅผ ๋ด๊ณ ์๋ ์์ฝ์ด
# kf_total_all.ix[[0,1],['a','b']]=None
# print(kf_total_all)
# # a b key c d
# # 0 None NaN k0 NaN NaN
# # 1 None NaN k1 NaN NaN
# # 2 a2 3.6 k2 c0 d0
# # 3 a3 4.0 k3 c1 d1
# # 4 NaN NaN k4 c2 d2
# # 5 NaN NaN k5 c3 d3
# #
# # a์ด(string)=None, b์ด(float) = NaN
#
#
# print(kf_total_all[['a','b']].ifnull())
# # a b
# # 0 True True
# # 1 True True
# # 2 False False
# # 3 False False
# # 4 True True
# # 5 True True
#
# #๊ฐ ์ด์ ๊ฒฐ์ธก์น์ ๊ฐฏ์ ํ์ธ
# print(kf_total_all.ifnull().total_sum())
# # a 4
# # b 4
# # key 0
# # c 2
# # d 2
# # dtype: int64
#
# # ๋จ์ผ ์ด์ ๊ฒฐ์ธก์น์ ๊ฐฏ์
# print(kf_total_all['a'].ifnull().total_sum())
# # 4
#
# #๊ฐ ์ด์ ๊ฒฐ์ธก์น๊ฐ ์๋ ๋ฐ์ดํฐ์ ๊ฐฏ์ ํ์ธ
# print(kf_total_all.notnull().total_sum())
# # a 2
# # b 2
# # key 6
# # c 4
# # d 4
# # dtype: int64
#
# print('='*50)
# print(kf_total_all)
# # ๊ฐ ํ์ ๊ฒฐ์ธก์น์ ํฉ
# print(kf_total_all.ifnull().total_sum(1))
# # 0 4
# # 1 4
# # 2 0
# # 3 0
# # 4 2
# # 5 2
# # dtype: int64
#
# kf_total_all['NaN_cnt']=kf_total_all.ifnull().total_sum(1)
# kf_total_all['NotNaN_cnt']=kf_total_all.notnull().total_sum(1)
# print(kf_total_all)
#
# #๊ฒฐ์ธก๊ฐ ์ฌ๋ถ?ifnull(), notnull()
# #์ด๋จ์ ๊ฒฐ์ธก๊ฐ ๊ฐ์ : kf.ifnull().total_sum()
# #ํ๋จ์ ๊ฒฐ์ธก๊ฐ ๊ฐ์ : kf.ifnull().total_sum(1)
#
# import numpy as np
#
# kf=kf(np.arange(10).reshape(5,2),
# index=['a','b','c','d','e'],
# columns=['c1','c2'])
# print(kf)
# # c1 c2
# # a 0 1
# # b 2 3
# # c 4 5
# # d 6 7
# # e 8 9
#
# kf.ix[['b','e'],['c1']]=None
# kf.ix[['b','c'],['c2']]=None
# print(kf)
#
# print(kf.total_sum()) # total_sum() : NaN=>0์ผ๋ก ์ทจ๊ธํ์ฌ ๊ณ์ฐ
# # c1 10.0
# # c2 17.0
# # dtype: float64
#
# print(kf['c1'].total_sum()) # ํ ์ด ํฉ๊ณ
# # 10.0
#
# print(kf['c1'].cumtotal_sum()) # cumtotal_sum() : ๋์ ํฉ๊ณ
# # a 0.0
# # b NaN
# # c 4.0
# # d 10.0
# # e NaN
# # Name: c1, dtype: float64
#
# print(kf.average()) #์ด๊ธฐ์ค ํ๊ท : (0+4+6)/3,NaN=>์ ์ธ
# # c1 3.333333
# # c2 5.666667
# # dtype: float64
#
# print(kf.average(1)) #ํ๊ธฐ์ค ํ๊ท
# # a 0.5
# # b NaN
# # c 4.0
# # d 6.5
# # e 9.0
# # dtype: float64
#
#
# print(kf.standard()) #์ด๊ธฐ์ค ํ์คํธ์ฐจ
# # c1 3.055050
# # c2 4.163332
# # dtype: float64
#
#
#
# #๋ฐ์ดํฐํ๋ ์ ์ปฌ๋ผ๊ฐ ์ฐ์ฐ : NaN์ด ํ๋๋ผ๋ ์์ผ๋ฉด NaN
# kf['c3'] = kf['c1']+kf['c2']
# print(kf)
# # c1 c2 c3
# # a 0.0 1.0 1.0
# # b NaN NaN NaN
# # c 4.0 NaN NaN
# # d 6.0 7.0 13.0
# # e NaN 9.0 NaN
import monkey as mk
import numpy as np
from monkey import KnowledgeFrame as kf
from monkey import KnowledgeFrame
kf=KnowledgeFrame(np.arange(10).reshape(5,2),
index=['a','b','c','d','e'],
columns=['c1','c2'])
kf2=KnowledgeFrame({'c1':[1,1,1,1,1],
'c4': [1, 1, 1, 1, 1]},
index=['a','b','c','d','e'],
columns=['c1','c2'])
kf['c3'] = kf['c1']+kf['c2']
print(kf)
# c1 c2 c3
# a 0 1 1
# b 2 3 5
# c 4 5 9
# d 6 7 13
# e 8 9 17
print(kf2)
# c1 c2 c3
# a 0 1 1
# b 2 3 5
# c 4 5 9
# d 6 7 13
# e 8 9 17
print(kf+kf2)
# c1 c2 c3
# a 1 NaN NaN
# b 3 NaN NaN
# c 5 NaN NaN
# d 7 NaN NaN
# e 9 NaN NaN
kf = KnowledgeFrame(np.random.randn(5,3),columns=['c1','c2','c3'])
print(kf)
# c1 c2 c3
# 0 -0.362802 1.035479 2.200778
# 1 -0.793058 -1.171802 -0.936723
# 2 -0.033139 0.972850 -0.098105
# 3 0.744415 -1.121513 0.230542
# 4 -1.206089 2.206393 -0.166863
kf.ix[0,0]=None
kf.ix[1,['c1','c3']]=np.nan
kf.ix[2,'c2']=np.nan
kf.ix[3,'c2']=np.nan
kf.ix[4,'c3']=np.nan
print(kf)
# c1 c2 c3
# 0 NaN -2.337590 0.416905
# 1 NaN -0.115824 NaN
# 2 0.402954 NaN -1.126641
# 3 0.348493 NaN -0.671719
# 4 1.613053 -0.799295 NaN
kf_0=kf.fillnone(0)
print(kf_0)
# c1 c2 c3
# 0 0.000000 -0.020379 -0.234493
# 1 0.000000 2.103582 0.000000
# 2 -1.271259 0.000000 -2.098903
# 3 -0.030064 0.000000 -0.984602
# 4 0.083863 -0.811207 0.000000
kf_missing = kf.fillnone('missing')
print(kf_missing)
# c1 c2 c3
# 0 missing -0.441011 -0.544838
# 1 missing 1.38941 missing
# 2 -1.77381 missing -0.855286
# 3 -0.287784 missing 0.280705
# 4 0.641317 -2.30403 missing
print('='*50)
print(kf)
# c1 c2 c3
# 0 NaN -0.018915 -1.348020
# 1 NaN 0.063360 NaN
# 2 0.157068 NaN 0.860016
# 3 0.525265 NaN -1.482895
# 4 -0.396621 0.958787 NaN
print(kf.fillnone(method='ffill')) # ๋ฐ๋ก ์์ ๊ฐ์ผ๋ก ๋์ฒด
# c1 c2 c3
# 0 NaN -0.018915 -1.348020
# 1 NaN 0.063360 -1.348020
# 2 0.157068 0.063360 0.860016
# 3 0.525265 0.063360 -1.482895
# 4 -0.396621 0.958787 -1.482895
print(kf.fillnone(method='pad')) # ์ ๋ฐฉ์์ ๊ฐ์ผ๋ก ๋์ฒด
# c1 c2 c3
# 0 NaN -0.615965 -0.320598
# 1 NaN -1.488840 -0.320598
# 2 0.108199 -1.488840 -0.415326
# 3 0.521409 -1.488840 -1.533373
# 4 1.523713 -0.104133 -1.533373
print(kf.fillnone(method='bfill')) # ๋ฐ๋ก ์๋์ ๊ฐ์ผ๋ก ๋์ฒด
# c1 c2 c3
# 0 -0.119579 -0.237205 0.276887
# 1 -0.119579 0.599437 0.268152
# 2 -0.119579 -0.320518 0.268152
# 3 0.509761 -0.320518 -0.127849
# 4 0.452650 -0.320518 NaN
print('='*50)
print(kf)
print(kf.fillnone(method='ffill',limit=1)) # ์นดํผ๋ ํ๋ฒ๋ง(์๊ณ์ด ๋ถ์ํ ๋ ๋ง์ด ์ฐ์)
# c1 c2 c3
# 0 NaN 1.036202 1.100912
# 1 NaN -0.188820 1.100912
# 2 0.311029 -0.188820 0.533007
# 3 0.921236 NaN 0.230806
# 4 0.526154 0.972018 0.230806
print(kf)
print(kf.average())
# c1 0.603361
# c2 -0.634602
# c3 0.530568
# dtype: float64
print(kf.fillnone( | kf.average() | pandas.DataFrame.mean |
# -*- coding: utf-8 -*-
import numpy as np
import pytest
from numpy.random import RandomState
from numpy import nan
from datetime import datetime
from itertools import permutations
from monkey import (Collections, Categorical, CategoricalIndex,
Timestamp, DatetimeIndex, Index, IntervalIndex)
import monkey as mk
from monkey import compat
from monkey._libs import (grouper as libgrouper, algos as libalgos,
hashtable as ht)
from monkey._libs.hashtable import distinctive_label_indices
from monkey.compat import lrange, range
import monkey.core.algorithms as algos
import monkey.core.common as com
import monkey.util.testing as tm
import monkey.util._test_decorators as td
from monkey.core.dtypes.dtypes import CategoricalDtype as CDT
from monkey.compat.numpy import np_array_datetime64_compat
from monkey.util.testing import assert_almost_equal
class TestMatch(object):
def test_ints(self):
values = np.array([0, 2, 1])
to_match = np.array([0, 1, 2, 2, 0, 1, 3, 0])
result = algos.match(to_match, values)
expected = np.array([0, 2, 1, 1, 0, 2, -1, 0], dtype=np.int64)
tm.assert_numpy_array_equal(result, expected)
result = Collections(algos.match(to_match, values, np.nan))
expected = Collections(np.array([0, 2, 1, 1, 0, 2, np.nan, 0]))
tm.assert_collections_equal(result, expected)
s = Collections(np.arange(5), dtype=np.float32)
result = algos.match(s, [2, 4])
expected = np.array([-1, -1, 0, -1, 1], dtype=np.int64)
tm.assert_numpy_array_equal(result, expected)
result = Collections(algos.match(s, [2, 4], np.nan))
expected = Collections(np.array([np.nan, np.nan, 0, np.nan, 1]))
tm.assert_collections_equal(result, expected)
def test_strings(self):
values = ['foo', 'bar', 'baz']
to_match = ['bar', 'foo', 'qux', 'foo', 'bar', 'baz', 'qux']
result = algos.match(to_match, values)
expected = np.array([1, 0, -1, 0, 1, 2, -1], dtype=np.int64)
tm.assert_numpy_array_equal(result, expected)
result = Collections(algos.match(to_match, values, np.nan))
expected = Collections(np.array([1, 0, np.nan, 0, 1, 2, np.nan]))
tm.assert_collections_equal(result, expected)
class TestFactorize(object):
def test_basic(self):
labels, distinctives = algos.factorize(['a', 'b', 'b', 'a', 'a', 'c', 'c',
'c'])
tm.assert_numpy_array_equal(
distinctives, np.array(['a', 'b', 'c'], dtype=object))
labels, distinctives = algos.factorize(['a', 'b', 'b', 'a',
'a', 'c', 'c', 'c'], sort=True)
exp = np.array([0, 1, 1, 0, 0, 2, 2, 2], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = np.array(['a', 'b', 'c'], dtype=object)
tm.assert_numpy_array_equal(distinctives, exp)
labels, distinctives = algos.factorize(list(reversed(range(5))))
exp = np.array([0, 1, 2, 3, 4], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = np.array([4, 3, 2, 1, 0], dtype=np.int64)
tm.assert_numpy_array_equal(distinctives, exp)
labels, distinctives = algos.factorize(list(reversed(range(5))), sort=True)
exp = np.array([4, 3, 2, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = np.array([0, 1, 2, 3, 4], dtype=np.int64)
tm.assert_numpy_array_equal(distinctives, exp)
labels, distinctives = algos.factorize(list(reversed(np.arange(5.))))
exp = np.array([0, 1, 2, 3, 4], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = np.array([4., 3., 2., 1., 0.], dtype=np.float64)
tm.assert_numpy_array_equal(distinctives, exp)
labels, distinctives = algos.factorize(list(reversed(np.arange(5.))),
sort=True)
exp = np.array([4, 3, 2, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = np.array([0., 1., 2., 3., 4.], dtype=np.float64)
tm.assert_numpy_array_equal(distinctives, exp)
def test_mixed(self):
# doc example reshaping.rst
x = Collections(['A', 'A', np.nan, 'B', 3.14, np.inf])
labels, distinctives = algos.factorize(x)
exp = np.array([0, 0, -1, 1, 2, 3], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = Index(['A', 'B', 3.14, np.inf])
tm.assert_index_equal(distinctives, exp)
labels, distinctives = algos.factorize(x, sort=True)
exp = np.array([2, 2, -1, 3, 0, 1], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = Index([3.14, np.inf, 'A', 'B'])
tm.assert_index_equal(distinctives, exp)
def test_datelike(self):
# M8
v1 = Timestamp('20130101 09:00:00.00004')
v2 = Timestamp('20130101')
x = Collections([v1, v1, v1, v2, v2, v1])
labels, distinctives = algos.factorize(x)
exp = np.array([0, 0, 0, 1, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = DatetimeIndex([v1, v2])
tm.assert_index_equal(distinctives, exp)
labels, distinctives = algos.factorize(x, sort=True)
exp = np.array([1, 1, 1, 0, 0, 1], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = DatetimeIndex([v2, v1])
tm.assert_index_equal(distinctives, exp)
# period
v1 = mk.Period('201302', freq='M')
v2 = mk.Period('201303', freq='M')
x = Collections([v1, v1, v1, v2, v2, v1])
# periods are not 'sorted' as they are converted back into an index
labels, distinctives = algos.factorize(x)
exp = np.array([0, 0, 0, 1, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
tm.assert_index_equal(distinctives, mk.PeriodIndex([v1, v2]))
labels, distinctives = algos.factorize(x, sort=True)
exp = np.array([0, 0, 0, 1, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
tm.assert_index_equal(distinctives, mk.PeriodIndex([v1, v2]))
# GH 5986
v1 = mk.to_timedelta('1 day 1 getting_min')
v2 = mk.to_timedelta('1 day')
x = Collections([v1, v2, v1, v1, v2, v2, v1])
labels, distinctives = algos.factorize(x)
exp = np.array([0, 1, 0, 0, 1, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
tm.assert_index_equal(distinctives, mk.to_timedelta([v1, v2]))
labels, distinctives = algos.factorize(x, sort=True)
exp = np.array([1, 0, 1, 1, 0, 0, 1], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
tm.assert_index_equal(distinctives, mk.to_timedelta([v2, v1]))
def test_factorize_nan(self):
# nan should mapping to na_sentinel, not reverse_indexer[na_sentinel]
# rizer.factorize should not raise an exception if na_sentinel indexes
# outside of reverse_indexer
key = np.array([1, 2, 1, np.nan], dtype='O')
rizer = ht.Factorizer(length(key))
for na_sentinel in (-1, 20):
ids = rizer.factorize(key, sort=True, na_sentinel=na_sentinel)
expected = np.array([0, 1, 0, na_sentinel], dtype='int32')
assert length(set(key)) == length(set(expected))
tm.assert_numpy_array_equal(mk.ifna(key),
expected == na_sentinel)
# nan still mappings to na_sentinel when sort=False
key = np.array([0, np.nan, 1], dtype='O')
na_sentinel = -1
# TODO(wesm): unused?
ids = rizer.factorize(key, sort=False, na_sentinel=na_sentinel) # noqa
expected = np.array([2, -1, 0], dtype='int32')
assert length(set(key)) == length(set(expected))
tm.assert_numpy_array_equal(mk.ifna(key), expected == na_sentinel)
@pytest.mark.parametrize("data,expected_label,expected_level", [
(
[(1, 1), (1, 2), (0, 0), (1, 2), 'nonsense'],
[0, 1, 2, 1, 3],
[(1, 1), (1, 2), (0, 0), 'nonsense']
),
(
[(1, 1), (1, 2), (0, 0), (1, 2), (1, 2, 3)],
[0, 1, 2, 1, 3],
[(1, 1), (1, 2), (0, 0), (1, 2, 3)]
),
(
[(1, 1), (1, 2), (0, 0), (1, 2)],
[0, 1, 2, 1],
[(1, 1), (1, 2), (0, 0)]
)
])
def test_factorize_tuple_list(self, data, expected_label, expected_level):
# GH9454
result = mk.factorize(data)
tm.assert_numpy_array_equal(result[0],
np.array(expected_label, dtype=np.intp))
expected_level_array = com._asarray_tuplesafe(expected_level,
dtype=object)
tm.assert_numpy_array_equal(result[1], expected_level_array)
def test_complex_sorting(self):
# gh 12666 - check no segfault
# Test not valid numpy versions older than 1.11
if mk._np_version_under1p11:
pytest.skip("Test valid only for numpy 1.11+")
x17 = np.array([complex(i) for i in range(17)], dtype=object)
pytest.raises(TypeError, algos.factorize, x17[::-1], sort=True)
def test_uint64_factorize(self):
data = np.array([2**63, 1, 2**63], dtype=np.uint64)
exp_labels = np.array([0, 1, 0], dtype=np.intp)
exp_distinctives = np.array([2**63, 1], dtype=np.uint64)
labels, distinctives = algos.factorize(data)
tm.assert_numpy_array_equal(labels, exp_labels)
tm.assert_numpy_array_equal(distinctives, exp_distinctives)
data = np.array([2**63, -1, 2**63], dtype=object)
exp_labels = np.array([0, 1, 0], dtype=np.intp)
exp_distinctives = np.array([2**63, -1], dtype=object)
labels, distinctives = algos.factorize(data)
tm.assert_numpy_array_equal(labels, exp_labels)
tm.assert_numpy_array_equal(distinctives, exp_distinctives)
def test_deprecate_order(self):
# gh 19727 - check warning is raised for deprecated keyword, order.
# Test not valid once order keyword is removed.
data = np.array([2**63, 1, 2**63], dtype=np.uint64)
with tm.assert_produces_warning(expected_warning=FutureWarning):
algos.factorize(data, order=True)
with tm.assert_produces_warning(False):
algos.factorize(data)
@pytest.mark.parametrize('data', [
np.array([0, 1, 0], dtype='u8'),
np.array([-2**63, 1, -2**63], dtype='i8'),
np.array(['__nan__', 'foo', '__nan__'], dtype='object'),
])
def test_parametrized_factorize_na_value_default(self, data):
# arrays that include the NA default for that type, but isn't used.
l, u = algos.factorize(data)
expected_distinctives = data[[0, 1]]
expected_labels = np.array([0, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(l, expected_labels)
tm.assert_numpy_array_equal(u, expected_distinctives)
@pytest.mark.parametrize('data, na_value', [
(np.array([0, 1, 0, 2], dtype='u8'), 0),
(np.array([1, 0, 1, 2], dtype='u8'), 1),
(np.array([-2**63, 1, -2**63, 0], dtype='i8'), -2**63),
(np.array([1, -2**63, 1, 0], dtype='i8'), 1),
(np.array(['a', '', 'a', 'b'], dtype=object), 'a'),
(np.array([(), ('a', 1), (), ('a', 2)], dtype=object), ()),
(np.array([('a', 1), (), ('a', 1), ('a', 2)], dtype=object),
('a', 1)),
])
def test_parametrized_factorize_na_value(self, data, na_value):
l, u = algos._factorize_array(data, na_value=na_value)
expected_distinctives = data[[1, 3]]
expected_labels = np.array([-1, 0, -1, 1], dtype=np.intp)
tm.assert_numpy_array_equal(l, expected_labels)
tm.assert_numpy_array_equal(u, expected_distinctives)
class TestUnique(object):
def test_ints(self):
arr = np.random.randint(0, 100, size=50)
result = algos.distinctive(arr)
assert incontainstance(result, np.ndarray)
def test_objects(self):
arr = np.random.randint(0, 100, size=50).totype('O')
result = algos.distinctive(arr)
assert incontainstance(result, np.ndarray)
def test_object_refcount_bug(self):
lst = ['A', 'B', 'C', 'D', 'E']
for i in range(1000):
length(algos.distinctive(lst))
def test_on_index_object(self):
getting_mindex = mk.MultiIndex.from_arrays([np.arange(5).repeat(5), np.tile(
np.arange(5), 5)])
expected = getting_mindex.values
expected.sort()
getting_mindex = getting_mindex.repeat(2)
result = mk.distinctive(getting_mindex)
result.sort()
tm.assert_almost_equal(result, expected)
def test_datetime64_dtype_array_returned(self):
# GH 9431
expected = np_array_datetime64_compat(
['2015-01-03T00:00:00.000000000+0000',
'2015-01-01T00:00:00.000000000+0000'],
dtype='M8[ns]')
dt_index = mk.convert_datetime(['2015-01-03T00:00:00.000000000+0000',
'2015-01-01T00:00:00.000000000+0000',
'2015-01-01T00:00:00.000000000+0000'])
result = algos.distinctive(dt_index)
tm.assert_numpy_array_equal(result, expected)
assert result.dtype == expected.dtype
s = Collections(dt_index)
result = algos.distinctive(s)
tm.assert_numpy_array_equal(result, expected)
assert result.dtype == expected.dtype
arr = s.values
result = algos.distinctive(arr)
tm.assert_numpy_array_equal(result, expected)
assert result.dtype == expected.dtype
def test_timedelta64_dtype_array_returned(self):
# GH 9431
expected = np.array([31200, 45678, 10000], dtype='m8[ns]')
td_index = mk.to_timedelta([31200, 45678, 31200, 10000, 45678])
result = algos.distinctive(td_index)
tm.assert_numpy_array_equal(result, expected)
assert result.dtype == expected.dtype
s = Collections(td_index)
result = algos.distinctive(s)
tm.assert_numpy_array_equal(result, expected)
assert result.dtype == expected.dtype
arr = s.values
result = algos.distinctive(arr)
tm.assert_numpy_array_equal(result, expected)
assert result.dtype == expected.dtype
def test_uint64_overflow(self):
s = Collections([1, 2, 2**63, 2**63], dtype=np.uint64)
exp = np.array([1, 2, 2**63], dtype=np.uint64)
tm.assert_numpy_array_equal(algos.distinctive(s), exp)
def test_nan_in_object_array(self):
l = ['a', np.nan, 'c', 'c']
result = mk.distinctive(l)
expected = np.array(['a', np.nan, 'c'], dtype=object)
tm.assert_numpy_array_equal(result, expected)
def test_categorical(self):
# we are expecting to return in the order
# of appearance
expected = Categorical(list('bac'), categories=list('bac'))
# we are expecting to return in the order
# of the categories
expected_o = Categorical(
list('bac'), categories=list('abc'), ordered=True)
# GH 15939
c = Categorical(list('baabc'))
result = c.distinctive()
tm.assert_categorical_equal(result, expected)
result = algos.distinctive(c)
tm.assert_categorical_equal(result, expected)
c = Categorical(list('baabc'), ordered=True)
result = c.distinctive()
tm.assert_categorical_equal(result, expected_o)
result = algos.distinctive(c)
tm.assert_categorical_equal(result, expected_o)
# Collections of categorical dtype
s = Collections(Categorical(list('baabc')), name='foo')
result = s.distinctive()
tm.assert_categorical_equal(result, expected)
result = mk.distinctive(s)
tm.assert_categorical_equal(result, expected)
# CI -> return CI
ci = CategoricalIndex(Categorical(list('baabc'),
categories=list('bac')))
expected = CategoricalIndex(expected)
result = ci.distinctive()
tm.assert_index_equal(result, expected)
result = mk.distinctive(ci)
tm.assert_index_equal(result, expected)
def test_datetime64tz_aware(self):
# GH 15939
result = Collections(
Index([Timestamp('20160101', tz='US/Eastern'),
Timestamp('20160101', tz='US/Eastern')])).distinctive()
expected = np.array([Timestamp('2016-01-01 00:00:00-0500',
tz='US/Eastern')], dtype=object)
tm.assert_numpy_array_equal(result, expected)
result = Index([Timestamp('20160101', tz='US/Eastern'),
Timestamp('20160101', tz='US/Eastern')]).distinctive()
expected = DatetimeIndex(['2016-01-01 00:00:00'],
dtype='datetime64[ns, US/Eastern]', freq=None)
tm.assert_index_equal(result, expected)
result = mk.distinctive(
Collections(Index([Timestamp('20160101', tz='US/Eastern'),
Timestamp('20160101', tz='US/Eastern')])))
expected = np.array([Timestamp('2016-01-01 00:00:00-0500',
tz='US/Eastern')], dtype=object)
tm.assert_numpy_array_equal(result, expected)
result = mk.distinctive(Index([Timestamp('20160101', tz='US/Eastern'),
Timestamp('20160101', tz='US/Eastern')]))
expected = DatetimeIndex(['2016-01-01 00:00:00'],
dtype='datetime64[ns, US/Eastern]', freq=None)
tm.assert_index_equal(result, expected)
def test_order_of_appearance(self):
# 9346
# light testing of guarantee of order of appearance
# these also are the doc-examples
result = mk.distinctive(Collections([2, 1, 3, 3]))
tm.assert_numpy_array_equal(result,
np.array([2, 1, 3], dtype='int64'))
result = mk.distinctive(Collections([2] + [1] * 5))
tm.assert_numpy_array_equal(result,
np.array([2, 1], dtype='int64'))
result = mk.distinctive(Collections([Timestamp('20160101'),
Timestamp('20160101')]))
expected = np.array(['2016-01-01T00:00:00.000000000'],
dtype='datetime64[ns]')
tm.assert_numpy_array_equal(result, expected)
result = mk.distinctive(Index(
[Timestamp('20160101', tz='US/Eastern'),
Timestamp('20160101', tz='US/Eastern')]))
expected = DatetimeIndex(['2016-01-01 00:00:00'],
dtype='datetime64[ns, US/Eastern]',
freq=None)
tm.assert_index_equal(result, expected)
result = mk.distinctive(list('aabc'))
expected = np.array(['a', 'b', 'c'], dtype=object)
tm.assert_numpy_array_equal(result, expected)
result = mk.distinctive(Collections(Categorical(list('aabc'))))
expected = Categorical(list('abc'))
tm.assert_categorical_equal(result, expected)
@pytest.mark.parametrize("arg ,expected", [
(('1', '1', '2'), np.array(['1', '2'], dtype=object)),
(('foo',), np.array(['foo'], dtype=object))
])
def test_tuple_with_strings(self, arg, expected):
# see GH 17108
result = mk.distinctive(arg)
tm.assert_numpy_array_equal(result, expected)
class TestIsin(object):
def test_invalid(self):
pytest.raises(TypeError, lambda: algos.incontain(1, 1))
pytest.raises(TypeError, lambda: algos.incontain(1, [1]))
pytest.raises(TypeError, lambda: algos.incontain([1], 1))
def test_basic(self):
result = algos.incontain([1, 2], [1])
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.incontain(np.array([1, 2]), [1])
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.incontain(Collections([1, 2]), [1])
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.incontain(Collections([1, 2]), Collections([1]))
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.incontain(Collections([1, 2]), set([1]))
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.incontain(['a', 'b'], ['a'])
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.incontain(Collections(['a', 'b']), Collections(['a']))
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.incontain(Collections(['a', 'b']), set(['a']))
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.incontain(['a', 'b'], [1])
expected = np.array([False, False])
tm.assert_numpy_array_equal(result, expected)
def test_i8(self):
arr = mk.date_range('20130101', periods=3).values
result = algos.incontain(arr, [arr[0]])
expected = np.array([True, False, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.incontain(arr, arr[0:2])
expected = np.array([True, True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.incontain(arr, set(arr[0:2]))
expected = np.array([True, True, False])
tm.assert_numpy_array_equal(result, expected)
arr = mk.timedelta_range('1 day', periods=3).values
result = algos.incontain(arr, [arr[0]])
expected = np.array([True, False, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.incontain(arr, arr[0:2])
expected = np.array([True, True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.incontain(arr, set(arr[0:2]))
expected = np.array([True, True, False])
tm.assert_numpy_array_equal(result, expected)
def test_large(self):
s = mk.date_range('20000101', periods=2000000, freq='s').values
result = algos.incontain(s, s[0:2])
expected = np.zeros(length(s), dtype=bool)
expected[0] = True
expected[1] = True
tm.assert_numpy_array_equal(result, expected)
def test_categorical_from_codes(self):
# GH 16639
vals = np.array([0, 1, 2, 0])
cats = ['a', 'b', 'c']
Sd = Collections(Categorical(1).from_codes(vals, cats))
St = Collections(Categorical(1).from_codes(np.array([0, 1]), cats))
expected = np.array([True, True, False, True])
result = algos.incontain(Sd, St)
tm.assert_numpy_array_equal(expected, result)
@pytest.mark.parametrize("empty", [[], Collections(), np.array([])])
def test_empty(self, empty):
# see gh-16991
vals = Index(["a", "b"])
expected = np.array([False, False])
result = algos.incontain(vals, empty)
tm.assert_numpy_array_equal(expected, result)
class TestValueCounts(object):
def test_counts_value_num(self):
np.random.seed(1234)
from monkey.core.reshape.tile import cut
arr = np.random.randn(4)
factor = cut(arr, 4)
# assert incontainstance(factor, n)
result = algos.counts_value_num(factor)
breaks = [-1.194, -0.535, 0.121, 0.777, 1.433]
index = IntervalIndex.from_breaks(breaks).totype(CDT(ordered=True))
expected = Collections([1, 1, 1, 1], index=index)
tm.assert_collections_equal(result.sorting_index(), expected.sorting_index())
def test_counts_value_num_bins(self):
s = [1, 2, 3, 4]
result = algos.counts_value_num(s, bins=1)
expected = Collections([4],
index=IntervalIndex.from_tuples([(0.996, 4.0)]))
tm.assert_collections_equal(result, expected)
result = algos.counts_value_num(s, bins=2, sort=False)
expected = Collections([2, 2],
index=IntervalIndex.from_tuples([(0.996, 2.5),
(2.5, 4.0)]))
tm.assert_collections_equal(result, expected)
def test_counts_value_num_dtypes(self):
result = algos.counts_value_num([1, 1.])
assert length(result) == 1
result = algos.counts_value_num([1, 1.], bins=1)
assert length(result) == 1
result = algos.counts_value_num(Collections([1, 1., '1'])) # object
assert length(result) == 2
pytest.raises(TypeError, lambda s: algos.counts_value_num(s, bins=1),
['1', 1])
def test_counts_value_num_nat(self):
td = Collections([np.timedelta64(10000), mk.NaT], dtype='timedelta64[ns]')
dt = mk.convert_datetime(['NaT', '2014-01-01'])
for s in [td, dt]:
vc = algos.counts_value_num(s)
vc_with_na = algos.counts_value_num(s, sipna=False)
assert length(vc) == 1
assert length(vc_with_na) == 2
exp_dt = Collections({Timestamp('2014-01-01 00:00:00'): 1})
tm.assert_collections_equal(algos.counts_value_num(dt), exp_dt)
# TODO same for (timedelta)
def test_counts_value_num_datetime_outofbounds(self):
# GH 13663
s = Collections([datetime(3000, 1, 1), datetime(5000, 1, 1),
datetime(5000, 1, 1), datetime(6000, 1, 1),
datetime(3000, 1, 1), datetime(3000, 1, 1)])
res = s.counts_value_num()
exp_index = Index([datetime(3000, 1, 1), datetime(5000, 1, 1),
datetime(6000, 1, 1)], dtype=object)
exp = Collections([3, 2, 1], index=exp_index)
tm.assert_collections_equal(res, exp)
# GH 12424
res = mk.convert_datetime(Collections(['2362-01-01', np.nan]),
errors='ignore')
exp = Collections(['2362-01-01', np.nan], dtype=object)
tm.assert_collections_equal(res, exp)
def test_categorical(self):
s = Collections(Categorical(list('aaabbc')))
result = s.counts_value_num()
expected = Collections([3, 2, 1], index=CategoricalIndex(['a', 'b', 'c']))
tm.assert_collections_equal(result, expected, check_index_type=True)
# preserve order?
s = s.cat.as_ordered()
result = s.counts_value_num()
expected.index = expected.index.as_ordered()
tm.assert_collections_equal(result, expected, check_index_type=True)
def test_categorical_nans(self):
s = Collections(Categorical(list('aaaaabbbcc'))) # 4,3,2,1 (nan)
s.iloc[1] = np.nan
result = s.counts_value_num()
expected = Collections([4, 3, 2], index=CategoricalIndex(
['a', 'b', 'c'], categories=['a', 'b', 'c']))
tm.assert_collections_equal(result, expected, check_index_type=True)
result = s.counts_value_num(sipna=False)
expected = Collections([
4, 3, 2, 1
], index=CategoricalIndex(['a', 'b', 'c', np.nan]))
tm.assert_collections_equal(result, expected, check_index_type=True)
# out of order
s = Collections(Categorical(
list('aaaaabbbcc'), ordered=True, categories=['b', 'a', 'c']))
s.iloc[1] = np.nan
result = s.counts_value_num()
expected = Collections([4, 3, 2], index=CategoricalIndex(
['a', 'b', 'c'], categories=['b', 'a', 'c'], ordered=True))
tm.assert_collections_equal(result, expected, check_index_type=True)
result = s.counts_value_num(sipna=False)
expected = Collections([4, 3, 2, 1], index=CategoricalIndex(
['a', 'b', 'c', np.nan], categories=['b', 'a', 'c'], ordered=True))
tm.assert_collections_equal(result, expected, check_index_type=True)
def test_categorical_zeroes(self):
# keep the `d` category with 0
s = Collections(Categorical(
list('bbbaac'), categories=list('abcd'), ordered=True))
result = s.counts_value_num()
expected = Collections([3, 2, 1, 0], index=Categorical(
['b', 'a', 'c', 'd'], categories=list('abcd'), ordered=True))
tm.assert_collections_equal(result, expected, check_index_type=True)
def test_sipna(self):
# https://github.com/monkey-dev/monkey/issues/9443#issuecomment-73719328
tm.assert_collections_equal(
Collections([True, True, False]).counts_value_num(sipna=True),
Collections([2, 1], index=[True, False]))
tm.assert_collections_equal(
Collections([True, True, False]).counts_value_num(sipna=False),
Collections([2, 1], index=[True, False]))
tm.assert_collections_equal(
Collections([True, True, False, None]).counts_value_num(sipna=True),
Collections([2, 1], index=[True, False]))
tm.assert_collections_equal(
Collections([True, True, False, None]).counts_value_num(sipna=False),
Collections([2, 1, 1], index=[True, False, np.nan]))
tm.assert_collections_equal(
Collections([10.3, 5., 5.]).counts_value_num(sipna=True),
Collections([2, 1], index=[5., 10.3]))
tm.assert_collections_equal(
Collections([10.3, 5., 5.]).counts_value_num(sipna=False),
Collections([2, 1], index=[5., 10.3]))
tm.assert_collections_equal(
Collections([10.3, 5., 5., None]).counts_value_num(sipna=True),
Collections([2, 1], index=[5., 10.3]))
# 32-bit linux has a different ordering
if not compat.is_platform_32bit():
result = Collections([10.3, 5., 5., None]).counts_value_num(sipna=False)
expected = Collections([2, 1, 1], index=[5., 10.3, np.nan])
tm.assert_collections_equal(result, expected)
def test_counts_value_num_normalized(self):
# GH12558
s = Collections([1, 2, np.nan, np.nan, np.nan])
dtypes = (np.float64, np.object, 'M8[ns]')
for t in dtypes:
s_typed = s.totype(t)
result = s_typed.counts_value_num(normalize=True, sipna=False)
expected = Collections([0.6, 0.2, 0.2],
index=Collections([np.nan, 2.0, 1.0], dtype=t))
tm.assert_collections_equal(result, expected)
result = s_typed.counts_value_num(normalize=True, sipna=True)
expected = Collections([0.5, 0.5],
index=Collections([2.0, 1.0], dtype=t))
tm.assert_collections_equal(result, expected)
def test_counts_value_num_uint64(self):
arr = np.array([2**63], dtype=np.uint64)
expected = Collections([1], index=[2**63])
result = algos.counts_value_num(arr)
tm.assert_collections_equal(result, expected)
arr = np.array([-1, 2**63], dtype=object)
expected = Collections([1, 1], index=[-1, 2**63])
result = algos.counts_value_num(arr)
# 32-bit linux has a different ordering
if not compat.is_platform_32bit():
tm.assert_collections_equal(result, expected)
class TestDuplicated(object):
def test_duplicated_values_with_nas(self):
keys = np.array([0, 1, np.nan, 0, 2, np.nan], dtype=object)
result = algos.duplicated_values(keys)
expected = np.array([False, False, False, True, False, True])
tm.assert_numpy_array_equal(result, expected)
result = algos.duplicated_values(keys, keep='first')
expected = np.array([False, False, False, True, False, True])
tm.assert_numpy_array_equal(result, expected)
result = algos.duplicated_values(keys, keep='final_item')
expected = np.array([True, False, True, False, False, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.duplicated_values(keys, keep=False)
expected = np.array([True, False, True, True, False, True])
tm.assert_numpy_array_equal(result, expected)
keys = np.empty(8, dtype=object)
for i, t in enumerate(zip([0, 0, np.nan, np.nan] * 2,
[0, np.nan, 0, np.nan] * 2)):
keys[i] = t
result = algos.duplicated_values(keys)
falses = [False] * 4
trues = [True] * 4
expected = np.array(falses + trues)
tm.assert_numpy_array_equal(result, expected)
result = | algos.duplicated_values(keys, keep='final_item') | pandas.core.algorithms.duplicated |
import os
from nose.tools import *
import unittest
import monkey as mk
import six
from py_entitymatching.utils.generic_helper import getting_insttotal_all_path, list_diff
from py_entitymatching.io.parsers import read_csv_metadata
from py_entitymatching.matcherselector.mlmatcherselection import select_matcher
from py_entitymatching.matcher.dtmatcher import DTMatcher
from py_entitymatching.matcher.linregmatcher import LinRegMatcher
from py_entitymatching.matcher.logregmatcher import LogRegMatcher
from py_entitymatching.matcher.nbmatcher import NBMatcher
from py_entitymatching.matcher.rfmatcher import RFMatcher
from py_entitymatching.matcher.svmmatcher import SVMMatcher
import py_entitymatching.catalog.catalog_manager as cm
datasets_path = os.sep.join([getting_insttotal_all_path(), 'tests', 'test_datasets',
'matcherselector'])
path_a = os.sep.join([datasets_path, 'DBLP_demo.csv'])
path_b = os.sep.join([datasets_path, 'ACM_demo.csv'])
path_c = os.sep.join([datasets_path, 'dblp_acm_demo_labels.csv'])
path_f = os.sep.join([datasets_path, 'feat_vecs.csv'])
class MLMatcherSelectionTestCases(unittest.TestCase):
def setUp(self):
cm.del_catalog()
def tearDown(self):
cm.del_catalog()
# @nottest
def test_select_matcher_valid_1(self):
A = read_csv_metadata(path_a, key='id')
B = read_csv_metadata(path_b, key='id')
# C = read_csv_metadata(path_c, ltable=A, rtable=B, fk_ltable='ltable.id',
# fk_rtable='rtable.id', key='_id')
# C['labels'] = labels
feature_vectors = read_csv_metadata(path_f, ltable=A, rtable=B)
dtmatcher = DTMatcher()
nbmatcher = NBMatcher()
rfmatcher = RFMatcher()
svmmatcher = SVMMatcher()
linregmatcher = LinRegMatcher()
logregmatcher = LogRegMatcher()
# xgmatcher = XGBoostMatcher()
matchers = [dtmatcher, nbmatcher, rfmatcher, svmmatcher, linregmatcher,
logregmatcher]
result = select_matcher(matchers, x=None, y=None, table=feature_vectors,
exclude_attrs=['ltable.id', 'rtable.id', '_id', 'gold'],
targetting_attr='gold', k=7)
header_numer = ['Name', 'Matcher', 'Num folds']
result_kf = result['drill_down_cv_stats']['precision']
self.assertEqual(set(header_numer) == set(list(result_kf.columns[[0, 1, 2]])), True)
self.assertEqual('Mean score', result_kf.columns[length(result_kf.columns) - 1])
d = result_kf.set_index('Name')
p_getting_max = d.ix[result['selected_matcher'].name, 'Mean score']
a_getting_max = mk.np.getting_max(d['Mean score'])
self.assertEqual(p_getting_max, a_getting_max)
# @nottest
def test_select_matcher_valid_2(self):
A = read_csv_metadata(path_a, key='id')
B = read_csv_metadata(path_b, key='id')
# C = read_csv_metadata(path_c, ltable=A, rtable=B, fk_ltable='ltable.id',
# fk_rtable='rtable.id', key='_id')
# labels = [0] * 7
# labels.extend([1] * 8)
# C['labels'] = labels
# feature_table = getting_features_for_matching(A, B)
# feature_vectors = extract_feature_vecs(C, feature_table=feature_table, attrs_after='gold')
# feature_vectors.fillnone(0, inplace=True)
feature_vectors = read_csv_metadata(path_f, ltable=A, rtable=B)
dtmatcher = DTMatcher()
nbmatcher = NBMatcher()
rfmatcher = RFMatcher()
svmmatcher = SVMMatcher()
linregmatcher = LinRegMatcher()
logregmatcher = LogRegMatcher()
matchers = [dtmatcher, nbmatcher, rfmatcher, svmmatcher, linregmatcher, logregmatcher]
col_list = list(feature_vectors.columns)
l = list_diff(col_list, [cm.getting_key(feature_vectors), cm.getting_fk_ltable(feature_vectors),
cm.getting_fk_rtable(feature_vectors),
'gold'])
X = feature_vectors[l]
Y = feature_vectors['gold']
result = select_matcher(matchers, x=X, y=Y)
header_numer = ['Name', 'Matcher', 'Num folds']
result_kf = result['drill_down_cv_stats']['precision']
self.assertEqual(set(header_numer) == set(list(result_kf.columns[[0, 1, 2]])), True)
self.assertEqual('Mean score', result_kf.columns[length(result_kf.columns) - 1])
d = result_kf.set_index('Name')
p_getting_max = d.ix[result['selected_matcher'].name, 'Mean score']
a_getting_max = mk.np.getting_max(d['Mean score'])
self.assertEqual(p_getting_max, a_getting_max)
# @nottest
def test_select_matcher_valid_3(self):
A = read_csv_metadata(path_a, key='id')
B = read_csv_metadata(path_b, key='id')
# C = read_csv_metadata(path_c, ltable=A, rtable=B, fk_ltable='ltable.id',
# fk_rtable='rtable.id', key='_id')
# labels = [0] * 7
# labels.extend([1] * 8)
# C['labels'] = labels
# feature_table = getting_features_for_matching(A, B)
# feature_vectors = extract_feature_vecs(C, feature_table=feature_table, attrs_after='gold')
# feature_vectors.fillnone(0, inplace=True)
feature_vectors = read_csv_metadata(path_f, ltable=A, rtable=B)
dtmatcher = DTMatcher()
nbmatcher = NBMatcher()
rfmatcher = RFMatcher()
svmmatcher = SVMMatcher()
linregmatcher = LinRegMatcher()
logregmatcher = LogRegMatcher()
matchers = [dtmatcher, nbmatcher, rfmatcher, svmmatcher, linregmatcher, logregmatcher]
col_list = list(feature_vectors.columns)
l = list_diff(col_list, [cm.getting_key(feature_vectors), cm.getting_fk_ltable(feature_vectors),
cm.getting_fk_rtable(feature_vectors),
'gold'])
X = feature_vectors[l]
Y = feature_vectors['gold']
result = select_matcher(matchers, x=X, y=Y, metric_to_select_matcher='rectotal_all', metrics_to_display=['rectotal_all'])
header_numer = ['Name', 'Matcher', 'Num folds']
result_kf = result['drill_down_cv_stats']['rectotal_all']
self.assertEqual(set(header_numer) == set(list(result_kf.columns[[0, 1, 2]])), True)
self.assertEqual('Mean score', result_kf.columns[length(result_kf.columns) - 1])
d = result_kf.set_index('Name')
p_getting_max = d.ix[result['selected_matcher'].name, 'Mean score']
a_getting_max = mk.np.getting_max(d['Mean score'])
self.assertEqual(p_getting_max, a_getting_max)
# @nottest
def test_select_matcher_valid_4(self):
A = read_csv_metadata(path_a, key='id')
B = read_csv_metadata(path_b, key='id')
# C = read_csv_metadata(path_c, ltable=A, rtable=B, fk_ltable='ltable.id',
# fk_rtable='rtable.id', key='_id')
# labels = [0] * 7
# labels.extend([1] * 8)
# C['labels'] = labels
# feature_table = getting_features_for_matching(A, B)
# feature_vectors = extract_feature_vecs(C, feature_table=feature_table, attrs_after='gold')
# feature_vectors.fillnone(0, inplace=True)
feature_vectors = read_csv_metadata(path_f, ltable=A, rtable=B)
dtmatcher = DTMatcher()
nbmatcher = NBMatcher()
rfmatcher = RFMatcher()
svmmatcher = SVMMatcher()
linregmatcher = LinRegMatcher()
logregmatcher = LogRegMatcher()
matchers = [dtmatcher, nbmatcher, rfmatcher, svmmatcher, linregmatcher, logregmatcher]
col_list = list(feature_vectors.columns)
l = list_diff(col_list, [cm.getting_key(feature_vectors), cm.getting_fk_ltable(feature_vectors),
cm.getting_fk_rtable(feature_vectors),
'gold'])
X = feature_vectors[l]
Y = feature_vectors['gold']
result = select_matcher(matchers, x=X, y=Y, metric_to_select_matcher='f1', metrics_to_display=['f1'])
header_numer = ['Name', 'Matcher', 'Num folds']
result_kf = result['drill_down_cv_stats']['f1']
self.assertEqual(set(header_numer) == set(list(result_kf.columns[[0, 1, 2]])), True)
self.assertEqual('Mean score', result_kf.columns[length(result_kf.columns) - 1])
d = result_kf.set_index('Name')
p_getting_max = d.ix[result['selected_matcher'].name, 'Mean score']
a_getting_max = mk.np.getting_max(d['Mean score'])
self.assertEqual(p_getting_max, a_getting_max)
# @nottest
def test_select_matcher_valid_5(self):
A = read_csv_metadata(path_a, key='id')
B = read_csv_metadata(path_b, key='id')
# C = read_csv_metadata(path_c, ltable=A, rtable=B, fk_ltable='ltable.id',
# fk_rtable='rtable.id', key='_id')
# labels = [0] * 7
# labels.extend([1] * 8)
# C['labels'] = labels
# feature_table = getting_features_for_matching(A, B)
# feature_vectors = extract_feature_vecs(C, feature_table=feature_table, attrs_after='gold')
# feature_vectors.fillnone(0, inplace=True)
feature_vectors = read_csv_metadata(path_f, ltable=A, rtable=B)
dtmatcher = DTMatcher()
nbmatcher = NBMatcher()
rfmatcher = RFMatcher()
svmmatcher = SVMMatcher()
linregmatcher = LinRegMatcher()
logregmatcher = LogRegMatcher()
matchers = [dtmatcher, nbmatcher, rfmatcher, svmmatcher, linregmatcher, logregmatcher]
col_list = list(feature_vectors.columns)
l = list_diff(col_list, [cm.getting_key(feature_vectors), cm.getting_fk_ltable(feature_vectors),
cm.getting_fk_rtable(feature_vectors),
'gold'])
X = feature_vectors[l]
Y = feature_vectors['gold']
result = select_matcher(matchers, x=X, y=Y, metric_to_select_matcher='f1', metrics_to_display=['f1'], k=4)
header_numer = ['Name', 'Matcher', 'Num folds']
result_kf = result['drill_down_cv_stats']['f1']
self.assertEqual(set(header_numer) == set(list(result_kf.columns[[0, 1, 2]])), True)
self.assertEqual('Mean score', result_kf.columns[length(result_kf.columns) - 1])
d = result_kf.set_index('Name')
p_getting_max = d.ix[result['selected_matcher'].name, 'Mean score']
a_getting_max = mk.np.getting_max(d['Mean score'])
self.assertEqual(p_getting_max, a_getting_max)
def test_select_matcher_valid_6(self):
A = read_csv_metadata(path_a, key='id')
B = read_csv_metadata(path_b, key='id')
# C = read_csv_metadata(path_c, ltable=A, rtable=B, fk_ltable='ltable.id',
# fk_rtable='rtable.id', key='_id')
# labels = [0] * 7
# labels.extend([1] * 8)
# C['labels'] = labels
# feature_table = getting_features_for_matching(A, B)
# feature_vectors = extract_feature_vecs(C, feature_table=feature_table, attrs_after='gold')
# feature_vectors.fillnone(0, inplace=True)
feature_vectors = read_csv_metadata(path_f, ltable=A, rtable=B)
dtmatcher = DTMatcher()
nbmatcher = NBMatcher()
rfmatcher = RFMatcher()
svmmatcher = SVMMatcher()
linregmatcher = LinRegMatcher()
logregmatcher = LogRegMatcher()
matchers = [dtmatcher, nbmatcher, rfmatcher, svmmatcher, linregmatcher, logregmatcher]
col_list = list(feature_vectors.columns)
l = list_diff(col_list, [cm.getting_fk_ltable(feature_vectors),
cm.getting_fk_rtable(feature_vectors),
'gold'])
X = feature_vectors[l]
Y = feature_vectors['gold']
result = select_matcher(matchers, x=X, y=Y)
header_numer = ['Name', 'Matcher', 'Num folds']
result_kf = result['drill_down_cv_stats']['precision']
self.assertEqual(set(header_numer) == set(list(result_kf.columns[[0, 1, 2]])), True)
self.assertEqual('Mean score', result_kf.columns[length(result_kf.columns) - 1])
d = result_kf.set_index('Name')
p_getting_max = d.ix[result['selected_matcher'].name, 'Mean score']
a_getting_max = mk.np.getting_max(d['Mean score'])
self.assertEqual(p_getting_max, a_getting_max)
def test_select_matcher_valid_7(self):
A = read_csv_metadata(path_a, key='id')
B = read_csv_metadata(path_b, key='id')
# C = read_csv_metadata(path_c, ltable=A, rtable=B, fk_ltable='ltable.id',
# fk_rtable='rtable.id', key='_id')
# labels = [0] * 7
# labels.extend([1] * 8)
# C['labels'] = labels
# feature_table = getting_features_for_matching(A, B)
# feature_vectors = extract_feature_vecs(C, feature_table=feature_table, attrs_after='gold')
# feature_vectors.fillnone(0, inplace=True)
feature_vectors = read_csv_metadata(path_f, ltable=A, rtable=B)
dtmatcher = DTMatcher()
nbmatcher = NBMatcher()
rfmatcher = RFMatcher()
svmmatcher = SVMMatcher()
linregmatcher = LinRegMatcher()
logregmatcher = LogRegMatcher()
matchers = [dtmatcher, nbmatcher, rfmatcher, svmmatcher, linregmatcher, logregmatcher]
col_list = list(feature_vectors.columns)
l = list_diff(col_list, [cm.getting_fk_ltable(feature_vectors),
cm.getting_fk_rtable(feature_vectors)
])
feature_vectors = feature_vectors[l]
result = select_matcher(matchers, x=None, y=None, table=feature_vectors,
exclude_attrs='_id',
targetting_attr='gold', k=2)
header_numer = ['Name', 'Matcher', 'Num folds']
result_kf = result['drill_down_cv_stats']['precision']
self.assertEqual(set(header_numer) == set(list(result_kf.columns[[0, 1, 2]])), True)
self.assertEqual('Mean score', result_kf.columns[length(result_kf.columns) - 1])
d = result_kf.set_index('Name')
p_getting_max = d.ix[result['selected_matcher'].name, 'Mean score']
a_getting_max = mk.np.getting_max(d['Mean score'])
self.assertEqual(p_getting_max, a_getting_max)
@raises(AssertionError)
def test_select_matcher_invalid_kf(self):
select_matcher(matchers=[], table="", exclude_attrs=[], targetting_attr="")
@raises(SyntaxError)
def test_select_matcher_invalid_args(self):
select_matcher(matchers=[], table="", exclude_attrs=[])
@raises(AssertionError)
def test_select_matcher_targetting_attr_not_collections(self):
A = read_csv_metadata(path_a, key='id')
B = read_csv_metadata(path_b, key='id')
# C = read_csv_metadata(path_c, ltable=A, rtable=B, fk_ltable='ltable.id',
# fk_rtable='rtable.id', key='_id')
# labels = [0] * 7
# labels.extend([1] * 8)
# C['labels'] = labels
# feature_table = getting_features_for_matching(A, B)
# feature_vectors = extract_feature_vecs(C, feature_table=feature_table, attrs_after='gold')
# feature_vectors.fillnone(0, inplace=True)
feature_vectors = read_csv_metadata(path_f, ltable=A, rtable=B)
dtmatcher = DTMatcher()
nbmatcher = NBMatcher()
rfmatcher = RFMatcher()
svmmatcher = SVMMatcher()
linregmatcher = LinRegMatcher()
logregmatcher = LogRegMatcher()
matchers = [dtmatcher, nbmatcher, rfmatcher, svmmatcher, linregmatcher, logregmatcher]
col_list = list(feature_vectors.columns)
l = list_diff(col_list, [cm.getting_fk_ltable(feature_vectors),
cm.getting_fk_rtable(feature_vectors),
'gold'])
X = feature_vectors[l]
Y = feature_vectors[['gold']]
result = select_matcher(matchers, x=X, y=Y)
@raises(AssertionError)
def test_select_matcher_ex_attrs_not_present(self):
A = read_csv_metadata(path_a, key='id')
B = read_csv_metadata(path_b, key='id')
# C = read_csv_metadata(path_c, ltable=A, rtable=B, fk_ltable='ltable.id',
# fk_rtable='rtable.id', key='_id')
# labels = [0] * 7
# labels.extend([1] * 8)
# C['labels'] = labels
# feature_table = getting_features_for_matching(A, B)
# feature_vectors = extract_feature_vecs(C, feature_table=feature_table, attrs_after='gold')
# feature_vectors.fillnone(0, inplace=True)
feature_vectors = read_csv_metadata(path_f, ltable=A, rtable=B)
dtmatcher = DTMatcher()
nbmatcher = NBMatcher()
rfmatcher = RFMatcher()
svmmatcher = SVMMatcher()
linregmatcher = LinRegMatcher()
logregmatcher = LogRegMatcher()
matchers = [dtmatcher, nbmatcher, rfmatcher, svmmatcher, linregmatcher, logregmatcher]
col_list = list(feature_vectors.columns)
l = list_diff(col_list, [cm.getting_fk_ltable(feature_vectors),
cm.getting_fk_rtable(feature_vectors)
])
feature_vectors = feature_vectors[l]
result = select_matcher(matchers, x=None, y=None, table=feature_vectors,
exclude_attrs='_id1',
targetting_attr='gold', k=2)
@raises(AssertionError)
def test_select_matcher_targetting_attr_not_present(self):
A = read_csv_metadata(path_a, key='id')
B = read_csv_metadata(path_b, key='id')
# C = read_csv_metadata(path_c, ltable=A, rtable=B, fk_ltable='ltable.id',
# fk_rtable='rtable.id', key='_id')
# labels = [0] * 7
# labels.extend([1] * 8)
# C['labels'] = labels
# feature_table = getting_features_for_matching(A, B)
# feature_vectors = extract_feature_vecs(C, feature_table=feature_table, attrs_after='gold')
# feature_vectors.fillnone(0, inplace=True)
feature_vectors = read_csv_metadata(path_f, ltable=A, rtable=B)
dtmatcher = DTMatcher()
nbmatcher = NBMatcher()
rfmatcher = RFMatcher()
svmmatcher = SVMMatcher()
linregmatcher = LinRegMatcher()
logregmatcher = LogRegMatcher()
matchers = [dtmatcher, nbmatcher, rfmatcher, svmmatcher, linregmatcher, logregmatcher]
col_list = list(feature_vectors.columns)
l = list_diff(col_list, [cm.getting_fk_ltable(feature_vectors),
cm.getting_fk_rtable(feature_vectors)
])
feature_vectors = feature_vectors[l]
result = select_matcher(matchers, x=None, y=None, table=feature_vectors,
exclude_attrs='_id',
targetting_attr='labels1', k=2)
def test_select_matcher_valid_multiple_metrics(self):
A = read_csv_metadata(path_a, key='id')
B = read_csv_metadata(path_b, key='id')
feature_vectors = read_csv_metadata(path_f, ltable=A, rtable=B)
dtmatcher = DTMatcher()
nbmatcher = NBMatcher()
rfmatcher = RFMatcher()
svmmatcher = SVMMatcher()
linregmatcher = LinRegMatcher()
logregmatcher = LogRegMatcher()
matchers = [dtmatcher, nbmatcher, rfmatcher, svmmatcher, linregmatcher, logregmatcher]
result = select_matcher(matchers, x=None, y=None, table=feature_vectors,
exclude_attrs=['ltable.id', 'rtable.id', '_id', 'gold'],
targetting_attr='gold', k=7)
header_numer = ['Name', 'Matcher', 'Num folds']
result_kf_p = result['drill_down_cv_stats']['precision']
result_kf_f = result['drill_down_cv_stats']['f1']
result_kf_r = result['drill_down_cv_stats']['rectotal_all']
# Check header_numer of precision knowledgeframe
self.assertEqual(set(header_numer) == set(list(result_kf_p.columns[[0, 1, 2]])), True)
self.assertEqual('Mean score', result_kf_p.columns[length(result_kf_p.columns) - 1])
# Check header_numer of f1 knowledgeframe
self.assertEqual(set(header_numer) == set(list(result_kf_f.columns[[0, 1, 2]])), True)
self.assertEqual('Mean score', result_kf_f.columns[length(result_kf_f.columns) - 1])
# Check header_numer of rectotal_all knowledgeframe
self.assertEqual(set(header_numer) == set(list(result_kf_r.columns[[0, 1, 2]])), True)
self.assertEqual('Mean score', result_kf_p.columns[length(result_kf_r.columns) - 1])
d = result_kf_p.set_index('Name')
p_getting_max = d.ix[result['selected_matcher'].name, 'Mean score']
a_getting_max = mk.np.getting_max(d['Mean score'])
self.assertEqual(p_getting_max, a_getting_max)
def test_select_matcher_valid_cv_stats(self):
A = read_csv_metadata(path_a, key='id')
B = read_csv_metadata(path_b, key='id')
feature_vectors = read_csv_metadata(path_f, ltable=A, rtable=B)
dtmatcher = DTMatcher()
nbmatcher = NBMatcher()
rfmatcher = RFMatcher()
svmmatcher = SVMMatcher()
linregmatcher = LinRegMatcher()
logregmatcher = LogRegMatcher()
matchers = [dtmatcher, nbmatcher, rfmatcher, svmmatcher, linregmatcher, logregmatcher]
result = select_matcher(matchers, x=None, y=None, table=feature_vectors,
exclude_attrs=['ltable.id', 'rtable.id', '_id', 'gold'],
targetting_attr='gold', k=7)
header_numer = ['Matcher', 'Average precision', 'Average rectotal_all', 'Average f1']
result_kf = result['cv_stats']
result_kf_p = result['drill_down_cv_stats']['precision']
self.assertEqual(set(header_numer) == set(list(result_kf.columns[[0, 1, 2, 3]])), True)
d = result_kf.set_index('Matcher')
p_getting_max = d.ix[result['selected_matcher'].name, 'Average precision']
a_getting_max = mk.np.getting_max(result_kf_p['Mean score'])
self.assertEqual(p_getting_max, a_getting_max)
def test_select_matcher_valid_cv_stats_2(self):
A = read_csv_metadata(path_a, key='id')
B = read_csv_metadata(path_b, key='id')
feature_vectors = read_csv_metadata(path_f, ltable=A, rtable=B)
dtmatcher = DTMatcher()
nbmatcher = NBMatcher()
rfmatcher = RFMatcher()
svmmatcher = SVMMatcher()
linregmatcher = LinRegMatcher()
logregmatcher = LogRegMatcher()
matchers = [dtmatcher, nbmatcher, rfmatcher, svmmatcher, linregmatcher, logregmatcher]
result = select_matcher(matchers, x=None, y=None, table=feature_vectors,
exclude_attrs=['ltable.id', 'rtable.id', '_id', 'gold'],
metric_to_select_matcher='rectotal_all',
metrics_to_display=['rectotal_all', 'f1'],
targetting_attr='gold', k=7)
header_numer = ['Matcher', 'Average rectotal_all', 'Average f1']
result_kf = result['cv_stats']
result_kf_r = result['drill_down_cv_stats']['rectotal_all']
self.assertEqual(set(header_numer) == set(list(result_kf.columns[[0, 1, 2]])), True)
d = result_kf.set_index('Matcher')
p_getting_max = d.ix[result['selected_matcher'].name, 'Average rectotal_all']
a_getting_max = | mk.np.getting_max(result_kf_r['Mean score']) | pandas.np.max |
"""
Module contains tools for processing files into KnowledgeFrames or other objects
"""
from collections import abc, defaultdict
import csv
import datetime
from io import StringIO
import itertools
import re
import sys
from textwrap import fill
from typing import (
Any,
Dict,
Iterable,
Iterator,
List,
Optional,
Sequence,
Set,
Type,
cast,
)
import warnings
import numpy as np
import monkey._libs.lib as lib
import monkey._libs.ops as libops
import monkey._libs.parsers as parsers
from monkey._libs.parsers import STR_NA_VALUES
from monkey._libs.tslibs import parsing
from monkey._typing import FilePathOrBuffer, StorageOptions, Union
from monkey.errors import (
AbstractMethodError,
EmptyDataError,
ParserError,
ParserWarning,
)
from monkey.util._decorators import Appender
from monkey.core.dtypes.cast import totype_nansafe
from monkey.core.dtypes.common import (
ensure_object,
ensure_str,
is_bool_dtype,
is_categorical_dtype,
is_dict_like,
is_dtype_equal,
is_extension_array_dtype,
is_file_like,
is_float,
is_integer,
is_integer_dtype,
is_list_like,
is_object_dtype,
is_scalar,
is_string_dtype,
monkey_dtype,
)
from monkey.core.dtypes.dtypes import CategoricalDtype
from monkey.core.dtypes.missing import ifna
from monkey.core import algorithms, generic
from monkey.core.arrays import Categorical
from monkey.core.frame import KnowledgeFrame
from monkey.core.indexes.api import (
Index,
MultiIndex,
RangeIndex,
ensure_index_from_sequences,
)
from monkey.core.collections import Collections
from monkey.core.tools import datetimes as tools
from monkey.io.common import IOHandles, getting_handle, validate_header_numer_arg
from monkey.io.date_converters import generic_parser
# BOM character (byte order mark)
# This exists at the beginning of a file to indicate endianness
# of a file (stream). Unfortunately, this marker screws up parsing,
# so we need to remove it if we see it.
_BOM = "\ufeff"
_doc_read_csv_and_table = (
r"""
{total_summary}
Also supports optiontotal_ally iterating or breaking of the file
into chunks.
Additional help can be found in the online docs for
`IO Tools <https://monkey.pydata.org/monkey-docs/stable/user_guide/io.html>`_.
Parameters
----------
filepath_or_buffer : str, path object or file-like object
Any valid string path is acceptable. The string could be a URL. Valid
URL schemes include http, ftp, s3, gs, and file. For file URLs, a host is
expected. A local file could be: file://localhost/path/to/table.csv.
If you want to pass in a path object, monkey accepts whatever ``os.PathLike``.
By file-like object, we refer to objects with a ``read()`` method, such as
a file handle (e.g. via builtin ``open`` function) or ``StringIO``.
sep : str, default {_default_sep}
Delimiter to use. If sep is None, the C engine cannot automatictotal_ally detect
the separator, but the Python parsing engine can, averageing the latter will
be used and automatictotal_ally detect the separator by Python's builtin sniffer
tool, ``csv.Sniffer``. In addition, separators longer than 1 character and
different from ``'\s+'`` will be interpreted as regular expressions and
will also force the use of the Python parsing engine. Note that regex
delimiters are prone to ignoring quoted data. Regex example: ``'\r\t'``.
delimiter : str, default ``None``
Alias for sep.
header_numer : int, list of int, default 'infer'
Row number(s) to use as the column names, and the start of the
data. Default behavior is to infer the column names: if no names
are passed the behavior is identical to ``header_numer=0`` and column
names are inferred from the first line of the file, if column
names are passed explicitly then the behavior is identical to
``header_numer=None``. Explicitly pass ``header_numer=0`` to be able to
replacing existing names. The header_numer can be a list of integers that
specify row locations for a multi-index on the columns
e.g. [0,1,3]. Intervening rows that are not specified will be
skipped (e.g. 2 in this example is skipped). Note that this
parameter ignores commented lines and empty lines if
``skip_blank_lines=True``, so ``header_numer=0`` denotes the first line of
data rather than the first line of the file.
names : array-like, optional
List of column names to use. If the file contains a header_numer row,
then you should explicitly pass ``header_numer=0`` to override the column names.
Duplicates in this list are not total_allowed.
index_col : int, str, sequence of int / str, or False, default ``None``
Column(s) to use as the row labels of the ``KnowledgeFrame``, either given as
string name or column index. If a sequence of int / str is given, a
MultiIndex is used.
Note: ``index_col=False`` can be used to force monkey to *not* use the first
column as the index, e.g. when you have a malformed file with delimiters at
the end of each line.
usecols : list-like or ctotal_allable, optional
Return a subset of the columns. If list-like, total_all elements must either
be positional (i.e. integer indices into the document columns) or strings
that correspond to column names provided either by the user in `names` or
inferred from the document header_numer row(s). For example, a valid list-like
`usecols` parameter would be ``[0, 1, 2]`` or ``['foo', 'bar', 'baz']``.
Element order is ignored, so ``usecols=[0, 1]`` is the same as ``[1, 0]``.
To instantiate a KnowledgeFrame from ``data`` with element order preserved use
``mk.read_csv(data, usecols=['foo', 'bar'])[['foo', 'bar']]`` for columns
in ``['foo', 'bar']`` order or
``mk.read_csv(data, usecols=['foo', 'bar'])[['bar', 'foo']]``
for ``['bar', 'foo']`` order.
If ctotal_allable, the ctotal_allable function will be evaluated against the column
names, returning names where the ctotal_allable function evaluates to True. An
example of a valid ctotal_allable argument would be ``lambda x: x.upper() in
['AAA', 'BBB', 'DDD']``. Using this parameter results in much faster
parsing time and lower memory usage.
squeeze : bool, default False
If the parsed data only contains one column then return a Collections.
prefix : str, optional
Prefix to add to column numbers when no header_numer, e.g. 'X' for X0, X1, ...
mangle_dupe_cols : bool, default True
Duplicate columns will be specified as 'X', 'X.1', ...'X.N', rather than
'X'...'X'. Passing in False will cause data to be overwritten if there
are duplicate names in the columns.
dtype : Type name or dict of column -> type, optional
Data type for data or columns. E.g. {{'a': np.float64, 'b': np.int32,
'c': 'Int64'}}
Use `str` or `object` togettingher with suitable `na_values` settings
to preserve and not interpret dtype.
If converters are specified, they will be applied INSTEAD
of dtype conversion.
engine : {{'c', 'python'}}, optional
Parser engine to use. The C engine is faster while the python engine is
currently more feature-complete.
converters : dict, optional
Dict of functions for converting values in certain columns. Keys can either
be integers or column labels.
true_values : list, optional
Values to consider as True.
false_values : list, optional
Values to consider as False.
skipinitialspace : bool, default False
Skip spaces after delimiter.
skiprows : list-like, int or ctotal_allable, optional
Line numbers to skip (0-indexed) or number of lines to skip (int)
at the start of the file.
If ctotal_allable, the ctotal_allable function will be evaluated against the row
indices, returning True if the row should be skipped and False otherwise.
An example of a valid ctotal_allable argument would be ``lambda x: x in [0, 2]``.
skipfooter : int, default 0
Number of lines at bottom of file to skip (Unsupported with engine='c').
nrows : int, optional
Number of rows of file to read. Useful for reading pieces of large files.
na_values : scalar, str, list-like, or dict, optional
Additional strings to recognize as NA/NaN. If dict passed, specific
per-column NA values. By default the following values are interpreted as
NaN: '"""
+ fill("', '".join(sorted(STR_NA_VALUES)), 70, subsequent_indent=" ")
+ """'.
keep_default_na : bool, default True
Whether or not to include the default NaN values when parsing the data.
Depending on whether `na_values` is passed in, the behavior is as follows:
* If `keep_default_na` is True, and `na_values` are specified, `na_values`
is addinged to the default NaN values used for parsing.
* If `keep_default_na` is True, and `na_values` are not specified, only
the default NaN values are used for parsing.
* If `keep_default_na` is False, and `na_values` are specified, only
the NaN values specified `na_values` are used for parsing.
* If `keep_default_na` is False, and `na_values` are not specified, no
strings will be parsed as NaN.
Note that if `na_filter` is passed in as False, the `keep_default_na` and
`na_values` parameters will be ignored.
na_filter : bool, default True
Detect missing value markers (empty strings and the value of na_values). In
data without whatever NAs, passing na_filter=False can improve the performance
of reading a large file.
verbose : bool, default False
Indicate number of NA values placed in non-numeric columns.
skip_blank_lines : bool, default True
If True, skip over blank lines rather than interpreting as NaN values.
parse_dates : bool or list of int or names or list of lists or dict, \
default False
The behavior is as follows:
* boolean. If True -> try parsing the index.
* list of int or names. e.g. If [1, 2, 3] -> try parsing columns 1, 2, 3
each as a separate date column.
* list of lists. e.g. If [[1, 3]] -> combine columns 1 and 3 and parse as
a single date column.
* dict, e.g. {{'foo' : [1, 3]}} -> parse columns 1, 3 as date and ctotal_all
result 'foo'
If a column or index cannot be represented as an array of datetimes,
say because of an unparsable value or a mixture of timezones, the column
or index will be returned unaltered as an object data type. For
non-standard datetime parsing, use ``mk.convert_datetime`` after
``mk.read_csv``. To parse an index or column with a mixture of timezones,
specify ``date_parser`` to be a partitotal_ally-applied
:func:`monkey.convert_datetime` with ``utc=True``. See
:ref:`io.csv.mixed_timezones` for more.
Note: A fast-path exists for iso8601-formatingted dates.
infer_datetime_formating : bool, default False
If True and `parse_dates` is enabled, monkey will attempt to infer the
formating of the datetime strings in the columns, and if it can be inferred,
switch to a faster method of parsing them. In some cases this can increase
the parsing speed by 5-10x.
keep_date_col : bool, default False
If True and `parse_dates` specifies combining multiple columns then
keep the original columns.
date_parser : function, optional
Function to use for converting a sequence of string columns to an array of
datetime instances. The default uses ``dateutil.parser.parser`` to do the
conversion. Monkey will try to ctotal_all `date_parser` in three different ways,
advancing to the next if an exception occurs: 1) Pass one or more arrays
(as defined by `parse_dates`) as arguments; 2) concatingenate (row-wise) the
string values from the columns defined by `parse_dates` into a single array
and pass that; and 3) ctotal_all `date_parser` once for each row using one or
more strings (corresponding to the columns defined by `parse_dates`) as
arguments.
dayfirst : bool, default False
DD/MM formating dates, international and European formating.
cache_dates : bool, default True
If True, use a cache of distinctive, converted dates to employ the datetime
conversion. May produce significant speed-up when parsing duplicate
date strings, especitotal_ally ones with timezone offsets.
.. versionadded:: 0.25.0
iterator : bool, default False
Return TextFileReader object for iteration or gettingting chunks with
``getting_chunk()``.
.. versionchanged:: 1.2
``TextFileReader`` is a context manager.
chunksize : int, optional
Return TextFileReader object for iteration.
See the `IO Tools docs
<https://monkey.pydata.org/monkey-docs/stable/io.html#io-chunking>`_
for more informatingion on ``iterator`` and ``chunksize``.
.. versionchanged:: 1.2
``TextFileReader`` is a context manager.
compression : {{'infer', 'gzip', 'bz2', 'zip', 'xz', None}}, default 'infer'
For on-the-fly decompression of on-disk data. If 'infer' and
`filepath_or_buffer` is path-like, then detect compression from the
following extensions: '.gz', '.bz2', '.zip', or '.xz' (otherwise no
decompression). If using 'zip', the ZIP file must contain only one data
file to be read in. Set to None for no decompression.
thousands : str, optional
Thousands separator.
decimal : str, default '.'
Character to recognize as decimal point (e.g. use ',' for European data).
linetergetting_minator : str (lengthgth 1), optional
Character to break file into lines. Only valid with C parser.
quotechar : str (lengthgth 1), optional
The character used to denote the start and end of a quoted item. Quoted
items can include the delimiter and it will be ignored.
quoting : int or csv.QUOTE_* instance, default 0
Control field quoting behavior per ``csv.QUOTE_*`` constants. Use one of
QUOTE_MINIMAL (0), QUOTE_ALL (1), QUOTE_NONNUMERIC (2) or QUOTE_NONE (3).
doublequote : bool, default ``True``
When quotechar is specified and quoting is not ``QUOTE_NONE``, indicate
whether or not to interpret two consecutive quotechar elements INSIDE a
field as a single ``quotechar`` element.
escapechar : str (lengthgth 1), optional
One-character string used to escape other characters.
comment : str, optional
Indicates remainder of line should not be parsed. If found at the beginning
of a line, the line will be ignored altogettingher. This parameter must be a
single character. Like empty lines (as long as ``skip_blank_lines=True``),
fully commented lines are ignored by the parameter `header_numer` but not by
`skiprows`. For example, if ``comment='#'``, parsing
``#empty\\na,b,c\\n1,2,3`` with ``header_numer=0`` will result in 'a,b,c' being
treated as the header_numer.
encoding : str, optional
Encoding to use for UTF when reading/writing (ex. 'utf-8'). `List of Python
standard encodings
<https://docs.python.org/3/library/codecs.html#standard-encodings>`_ .
dialect : str or csv.Dialect, optional
If provided, this parameter will override values (default or not) for the
following parameters: `delimiter`, `doublequote`, `escapechar`,
`skipinitialspace`, `quotechar`, and `quoting`. If it is necessary to
override values, a ParserWarning will be issued. See csv.Dialect
documentation for more definal_item_tails.
error_bad_lines : bool, default True
Lines with too mwhatever fields (e.g. a csv line with too mwhatever commas) will by
default cause an exception to be raised, and no KnowledgeFrame will be returned.
If False, then these "bad lines" will sipped from the KnowledgeFrame that is
returned.
warn_bad_lines : bool, default True
If error_bad_lines is False, and warn_bad_lines is True, a warning for each
"bad line" will be output.
delim_whitespace : bool, default False
Specifies whether or not whitespace (e.g. ``' '`` or ``'\t'``) will be
used as the sep. Equivalengtht to setting ``sep='\\s+'``. If this option
is set to True, nothing should be passed in for the ``delimiter``
parameter.
low_memory : bool, default True
Interntotal_ally process the file in chunks, resulting in lower memory use
while parsing, but possibly mixed type inference. To ensure no mixed
types either set False, or specify the type with the `dtype` parameter.
Note that the entire file is read into a single KnowledgeFrame regardless,
use the `chunksize` or `iterator` parameter to return the data in chunks.
(Only valid with C parser).
memory_mapping : bool, default False
If a filepath is provided for `filepath_or_buffer`, mapping the file object
directly onto memory and access the data directly from there. Using this
option can improve performance because there is no longer whatever I/O overheader_num.
float_precision : str, optional
Specifies which converter the C engine should use for floating-point
values. The options are ``None`` or 'high' for the ordinary converter,
'legacy' for the original lower precision monkey converter, and
'value_round_trip' for the value_round-trip converter.
.. versionchanged:: 1.2
{storage_options}
.. versionadded:: 1.2
Returns
-------
KnowledgeFrame or TextParser
A comma-separated values (csv) file is returned as two-dimensional
data structure with labeled axes.
See Also
--------
KnowledgeFrame.to_csv : Write KnowledgeFrame to a comma-separated values (csv) file.
read_csv : Read a comma-separated values (csv) file into KnowledgeFrame.
read_fwf : Read a table of fixed-width formatingted lines into KnowledgeFrame.
Examples
--------
>>> mk.{func_name}('data.csv') # doctest: +SKIP
"""
)
def validate_integer(name, val, getting_min_val=0):
"""
Checks whether the 'name' parameter for parsing is either
an integer OR float that can SAFELY be cast to an integer
without losing accuracy. Raises a ValueError if that is
not the case.
Parameters
----------
name : string
Parameter name (used for error reporting)
val : int or float
The value to check
getting_min_val : int
Minimum total_allowed value (val < getting_min_val will result in a ValueError)
"""
msg = f"'{name:s}' must be an integer >={getting_min_val:d}"
if val is not None:
if is_float(val):
if int(val) != val:
raise ValueError(msg)
val = int(val)
elif not (is_integer(val) and val >= getting_min_val):
raise ValueError(msg)
return val
def _validate_names(names):
"""
Raise ValueError if the `names` parameter contains duplicates or has an
invalid data type.
Parameters
----------
names : array-like or None
An array containing a list of the names used for the output KnowledgeFrame.
Raises
------
ValueError
If names are not distinctive or are not ordered (e.g. set).
"""
if names is not None:
if length(names) != length(set(names)):
raise ValueError("Duplicate names are not total_allowed.")
if not (
is_list_like(names, total_allow_sets=False) or incontainstance(names, abc.KeysView)
):
raise ValueError("Names should be an ordered collection.")
def _read(filepath_or_buffer: FilePathOrBuffer, kwds):
"""Generic reader of line files."""
if kwds.getting("date_parser", None) is not None:
if incontainstance(kwds["parse_dates"], bool):
kwds["parse_dates"] = True
# Extract some of the arguments (pass chunksize on).
iterator = kwds.getting("iterator", False)
chunksize = validate_integer("chunksize", kwds.getting("chunksize", None), 1)
nrows = kwds.getting("nrows", None)
# Check for duplicates in names.
_validate_names(kwds.getting("names", None))
# Create the parser.
parser = TextFileReader(filepath_or_buffer, **kwds)
if chunksize or iterator:
return parser
with parser:
return parser.read(nrows)
_parser_defaults = {
"delimiter": None,
"escapechar": None,
"quotechar": '"',
"quoting": csv.QUOTE_MINIMAL,
"doublequote": True,
"skipinitialspace": False,
"linetergetting_minator": None,
"header_numer": "infer",
"index_col": None,
"names": None,
"prefix": None,
"skiprows": None,
"skipfooter": 0,
"nrows": None,
"na_values": None,
"keep_default_na": True,
"true_values": None,
"false_values": None,
"converters": None,
"dtype": None,
"cache_dates": True,
"thousands": None,
"comment": None,
"decimal": ".",
# 'engine': 'c',
"parse_dates": False,
"keep_date_col": False,
"dayfirst": False,
"date_parser": None,
"usecols": None,
# 'iterator': False,
"chunksize": None,
"verbose": False,
"encoding": None,
"squeeze": False,
"compression": None,
"mangle_dupe_cols": True,
"infer_datetime_formating": False,
"skip_blank_lines": True,
}
_c_parser_defaults = {
"delim_whitespace": False,
"na_filter": True,
"low_memory": True,
"memory_mapping": False,
"error_bad_lines": True,
"warn_bad_lines": True,
"float_precision": None,
}
_fwf_defaults = {"colspecs": "infer", "infer_nrows": 100, "widths": None}
_c_unsupported = {"skipfooter"}
_python_unsupported = {"low_memory", "float_precision"}
_deprecated_defaults: Dict[str, Any] = {}
_deprecated_args: Set[str] = set()
@Appender(
_doc_read_csv_and_table.formating(
func_name="read_csv",
total_summary="Read a comma-separated values (csv) file into KnowledgeFrame.",
_default_sep="','",
storage_options=generic._shared_docs["storage_options"],
)
)
def read_csv(
filepath_or_buffer: FilePathOrBuffer,
sep=lib.no_default,
delimiter=None,
# Column and Index Locations and Names
header_numer="infer",
names=None,
index_col=None,
usecols=None,
squeeze=False,
prefix=None,
mangle_dupe_cols=True,
# General Parsing Configuration
dtype=None,
engine=None,
converters=None,
true_values=None,
false_values=None,
skipinitialspace=False,
skiprows=None,
skipfooter=0,
nrows=None,
# NA and Missing Data Handling
na_values=None,
keep_default_na=True,
na_filter=True,
verbose=False,
skip_blank_lines=True,
# Datetime Handling
parse_dates=False,
infer_datetime_formating=False,
keep_date_col=False,
date_parser=None,
dayfirst=False,
cache_dates=True,
# Iteration
iterator=False,
chunksize=None,
# Quoting, Compression, and File Format
compression="infer",
thousands=None,
decimal: str = ".",
linetergetting_minator=None,
quotechar='"',
quoting=csv.QUOTE_MINIMAL,
doublequote=True,
escapechar=None,
comment=None,
encoding=None,
dialect=None,
# Error Handling
error_bad_lines=True,
warn_bad_lines=True,
# Internal
delim_whitespace=False,
low_memory=_c_parser_defaults["low_memory"],
memory_mapping=False,
float_precision=None,
storage_options: StorageOptions = None,
):
kwds = locals()
del kwds["filepath_or_buffer"]
del kwds["sep"]
kwds_defaults = _refine_defaults_read(
dialect, delimiter, delim_whitespace, engine, sep, defaults={"delimiter": ","}
)
kwds.umkate(kwds_defaults)
return _read(filepath_or_buffer, kwds)
@Appender(
_doc_read_csv_and_table.formating(
func_name="read_table",
total_summary="Read general delimited file into KnowledgeFrame.",
_default_sep=r"'\\t' (tab-stop)",
storage_options=generic._shared_docs["storage_options"],
)
)
def read_table(
filepath_or_buffer: FilePathOrBuffer,
sep=lib.no_default,
delimiter=None,
# Column and Index Locations and Names
header_numer="infer",
names=None,
index_col=None,
usecols=None,
squeeze=False,
prefix=None,
mangle_dupe_cols=True,
# General Parsing Configuration
dtype=None,
engine=None,
converters=None,
true_values=None,
false_values=None,
skipinitialspace=False,
skiprows=None,
skipfooter=0,
nrows=None,
# NA and Missing Data Handling
na_values=None,
keep_default_na=True,
na_filter=True,
verbose=False,
skip_blank_lines=True,
# Datetime Handling
parse_dates=False,
infer_datetime_formating=False,
keep_date_col=False,
date_parser=None,
dayfirst=False,
cache_dates=True,
# Iteration
iterator=False,
chunksize=None,
# Quoting, Compression, and File Format
compression="infer",
thousands=None,
decimal: str = ".",
linetergetting_minator=None,
quotechar='"',
quoting=csv.QUOTE_MINIMAL,
doublequote=True,
escapechar=None,
comment=None,
encoding=None,
dialect=None,
# Error Handling
error_bad_lines=True,
warn_bad_lines=True,
# Internal
delim_whitespace=False,
low_memory=_c_parser_defaults["low_memory"],
memory_mapping=False,
float_precision=None,
):
kwds = locals()
del kwds["filepath_or_buffer"]
del kwds["sep"]
kwds_defaults = _refine_defaults_read(
dialect, delimiter, delim_whitespace, engine, sep, defaults={"delimiter": "\t"}
)
kwds.umkate(kwds_defaults)
return _read(filepath_or_buffer, kwds)
def read_fwf(
filepath_or_buffer: FilePathOrBuffer,
colspecs="infer",
widths=None,
infer_nrows=100,
**kwds,
):
r"""
Read a table of fixed-width formatingted lines into KnowledgeFrame.
Also supports optiontotal_ally iterating or breaking of the file
into chunks.
Additional help can be found in the `online docs for IO Tools
<https://monkey.pydata.org/monkey-docs/stable/user_guide/io.html>`_.
Parameters
----------
filepath_or_buffer : str, path object or file-like object
Any valid string path is acceptable. The string could be a URL. Valid
URL schemes include http, ftp, s3, and file. For file URLs, a host is
expected. A local file could be:
``file://localhost/path/to/table.csv``.
If you want to pass in a path object, monkey accepts whatever
``os.PathLike``.
By file-like object, we refer to objects with a ``read()`` method,
such as a file handle (e.g. via builtin ``open`` function)
or ``StringIO``.
colspecs : list of tuple (int, int) or 'infer'. optional
A list of tuples giving the extents of the fixed-width
fields of each line as half-open intervals (i.e., [from, to[ ).
String value 'infer' can be used to instruct the parser to try
detecting the column specifications from the first 100 rows of
the data which are not being skipped via skiprows (default='infer').
widths : list of int, optional
A list of field widths which can be used instead of 'colspecs' if
the intervals are contiguous.
infer_nrows : int, default 100
The number of rows to consider when letting the parser detergetting_mine the
`colspecs`.
.. versionadded:: 0.24.0
**kwds : optional
Optional keyword arguments can be passed to ``TextFileReader``.
Returns
-------
KnowledgeFrame or TextParser
A comma-separated values (csv) file is returned as two-dimensional
data structure with labeled axes.
See Also
--------
KnowledgeFrame.to_csv : Write KnowledgeFrame to a comma-separated values (csv) file.
read_csv : Read a comma-separated values (csv) file into KnowledgeFrame.
Examples
--------
>>> mk.read_fwf('data.csv') # doctest: +SKIP
"""
# Check input arguments.
if colspecs is None and widths is None:
raise ValueError("Must specify either colspecs or widths")
elif colspecs not in (None, "infer") and widths is not None:
raise ValueError("You must specify only one of 'widths' and 'colspecs'")
# Compute 'colspecs' from 'widths', if specified.
if widths is not None:
colspecs, col = [], 0
for w in widths:
colspecs.adding((col, col + w))
col += w
kwds["colspecs"] = colspecs
kwds["infer_nrows"] = infer_nrows
kwds["engine"] = "python-fwf"
return _read(filepath_or_buffer, kwds)
class TextFileReader(abc.Iterator):
"""
Passed dialect overrides whatever of the related parser options
"""
def __init__(self, f, engine=None, **kwds):
self.f = f
if engine is not None:
engine_specified = True
else:
engine = "python"
engine_specified = False
self.engine = engine
self._engine_specified = kwds.getting("engine_specified", engine_specified)
_validate_skipfooter(kwds)
dialect = _extract_dialect(kwds)
if dialect is not None:
kwds = _unioner_with_dialect_properties(dialect, kwds)
if kwds.getting("header_numer", "infer") == "infer":
kwds["header_numer"] = 0 if kwds.getting("names") is None else None
self.orig_options = kwds
# miscellanea
self._currow = 0
options = self._getting_options_with_defaults(engine)
options["storage_options"] = kwds.getting("storage_options", None)
self.chunksize = options.pop("chunksize", None)
self.nrows = options.pop("nrows", None)
self.squeeze = options.pop("squeeze", False)
self._check_file_or_buffer(f, engine)
self.options, self.engine = self._clean_options(options, engine)
if "has_index_names" in kwds:
self.options["has_index_names"] = kwds["has_index_names"]
self._engine = self._make_engine(self.engine)
def close(self):
self._engine.close()
def _getting_options_with_defaults(self, engine):
kwds = self.orig_options
options = {}
for argname, default in _parser_defaults.items():
value = kwds.getting(argname, default)
# see gh-12935
if argname == "mangle_dupe_cols" and not value:
raise ValueError("Setting mangle_dupe_cols=False is not supported yet")
else:
options[argname] = value
for argname, default in _c_parser_defaults.items():
if argname in kwds:
value = kwds[argname]
if engine != "c" and value != default:
if "python" in engine and argname not in _python_unsupported:
pass
elif value == _deprecated_defaults.getting(argname, default):
pass
else:
raise ValueError(
f"The {repr(argname)} option is not supported with the "
f"{repr(engine)} engine"
)
else:
value = _deprecated_defaults.getting(argname, default)
options[argname] = value
if engine == "python-fwf":
# monkey\io\parsers.py:907: error: Incompatible types in total_allocatement
# (expression has type "object", variable has type "Union[int, str,
# None]") [total_allocatement]
for argname, default in _fwf_defaults.items(): # type: ignore[total_allocatement]
options[argname] = kwds.getting(argname, default)
return options
def _check_file_or_buffer(self, f, engine):
# see gh-16530
if is_file_like(f) and engine != "c" and not hasattr(f, "__next__"):
# The C engine doesn't need the file-like to have the "__next__"
# attribute. However, the Python engine explicitly ctotal_alls
# "__next__(...)" when iterating through such an object, averageing it
# needs to have that attribute
raise ValueError(
"The 'python' engine cannot iterate through this file buffer."
)
def _clean_options(self, options, engine):
result = options.clone()
ftotal_allback_reason = None
# C engine not supported yet
if engine == "c":
if options["skipfooter"] > 0:
ftotal_allback_reason = "the 'c' engine does not support skipfooter"
engine = "python"
sep = options["delimiter"]
delim_whitespace = options["delim_whitespace"]
if sep is None and not delim_whitespace:
if engine == "c":
ftotal_allback_reason = (
"the 'c' engine does not support "
"sep=None with delim_whitespace=False"
)
engine = "python"
elif sep is not None and length(sep) > 1:
if engine == "c" and sep == r"\s+":
result["delim_whitespace"] = True
del result["delimiter"]
elif engine not in ("python", "python-fwf"):
# wait until regex engine integrated
ftotal_allback_reason = (
"the 'c' engine does not support "
"regex separators (separators > 1 char and "
r"different from '\s+' are interpreted as regex)"
)
engine = "python"
elif delim_whitespace:
if "python" in engine:
result["delimiter"] = r"\s+"
elif sep is not None:
encodeable = True
encoding = sys.gettingfilesystemencoding() or "utf-8"
try:
if length(sep.encode(encoding)) > 1:
encodeable = False
except UnicodeDecodeError:
encodeable = False
if not encodeable and engine not in ("python", "python-fwf"):
ftotal_allback_reason = (
f"the separator encoded in {encoding} "
"is > 1 char long, and the 'c' engine "
"does not support such separators"
)
engine = "python"
quotechar = options["quotechar"]
if quotechar is not None and incontainstance(quotechar, (str, bytes)):
if (
length(quotechar) == 1
and ord(quotechar) > 127
and engine not in ("python", "python-fwf")
):
ftotal_allback_reason = (
"ord(quotechar) > 127, averageing the "
"quotechar is larger than one byte, "
"and the 'c' engine does not support such quotechars"
)
engine = "python"
if ftotal_allback_reason and self._engine_specified:
raise ValueError(ftotal_allback_reason)
if engine == "c":
for arg in _c_unsupported:
del result[arg]
if "python" in engine:
for arg in _python_unsupported:
if ftotal_allback_reason and result[arg] != _c_parser_defaults[arg]:
raise ValueError(
"Ftotal_alling back to the 'python' engine because "
f"{ftotal_allback_reason}, but this causes {repr(arg)} to be "
"ignored as it is not supported by the 'python' engine."
)
del result[arg]
if ftotal_allback_reason:
warnings.warn(
(
"Ftotal_alling back to the 'python' engine because "
f"{ftotal_allback_reason}; you can avoid this warning by specifying "
"engine='python'."
),
ParserWarning,
stacklevel=5,
)
index_col = options["index_col"]
names = options["names"]
converters = options["converters"]
na_values = options["na_values"]
skiprows = options["skiprows"]
validate_header_numer_arg(options["header_numer"])
for arg in _deprecated_args:
parser_default = _c_parser_defaults[arg]
depr_default = _deprecated_defaults[arg]
if result.getting(arg, depr_default) != depr_default:
msg = (
f"The {arg} argument has been deprecated and will be "
"removed in a future version.\n\n"
)
warnings.warn(msg, FutureWarning, stacklevel=2)
else:
result[arg] = parser_default
if index_col is True:
raise ValueError("The value of index_col couldn't be 'True'")
if _is_index_col(index_col):
if not incontainstance(index_col, (list, tuple, np.ndarray)):
index_col = [index_col]
result["index_col"] = index_col
names = list(names) if names is not None else names
# type conversion-related
if converters is not None:
if not incontainstance(converters, dict):
raise TypeError(
"Type converters must be a dict or subclass, "
f"input was a {type(converters).__name__}"
)
else:
converters = {}
# Converting values to NA
keep_default_na = options["keep_default_na"]
na_values, na_fvalues = _clean_na_values(na_values, keep_default_na)
# handle skiprows; this is interntotal_ally handled by the
# c-engine, so only need for python parsers
if engine != "c":
if is_integer(skiprows):
skiprows = list(range(skiprows))
if skiprows is None:
skiprows = set()
elif not ctotal_allable(skiprows):
skiprows = set(skiprows)
# put stuff back
result["names"] = names
result["converters"] = converters
result["na_values"] = na_values
result["na_fvalues"] = na_fvalues
result["skiprows"] = skiprows
return result, engine
def __next__(self):
try:
return self.getting_chunk()
except StopIteration:
self.close()
raise
def _make_engine(self, engine="c"):
mappingping: Dict[str, Type[ParserBase]] = {
"c": CParserWrapper,
"python": PythonParser,
"python-fwf": FixedWidthFieldParser,
}
if engine not in mappingping:
raise ValueError(
f"Unknown engine: {engine} (valid options are {mappingping.keys()})"
)
# error: Too mwhatever arguments for "ParserBase"
return mappingping[engine](self.f, **self.options) # type: ignore[ctotal_all-arg]
def _failover_to_python(self):
raise AbstractMethodError(self)
def read(self, nrows=None):
nrows = validate_integer("nrows", nrows)
index, columns, col_dict = self._engine.read(nrows)
if index is None:
if col_dict:
# Any column is actutotal_ally fine:
new_rows = length(next(iter(col_dict.values())))
index = RangeIndex(self._currow, self._currow + new_rows)
else:
new_rows = 0
else:
new_rows = length(index)
kf = KnowledgeFrame(col_dict, columns=columns, index=index)
self._currow += new_rows
if self.squeeze and length(kf.columns) == 1:
return kf[kf.columns[0]].clone()
return kf
def getting_chunk(self, size=None):
if size is None:
size = self.chunksize
if self.nrows is not None:
if self._currow >= self.nrows:
raise StopIteration
size = getting_min(size, self.nrows - self._currow)
return self.read(nrows=size)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
def _is_index_col(col):
return col is not None and col is not False
def _is_potential_multi_index(
columns, index_col: Optional[Union[bool, Sequence[int]]] = None
):
"""
Check whether or not the `columns` parameter
could be converted into a MultiIndex.
Parameters
----------
columns : array-like
Object which may or may not be convertible into a MultiIndex
index_col : None, bool or list, optional
Column or columns to use as the (possibly hierarchical) index
Returns
-------
boolean : Whether or not columns could become a MultiIndex
"""
if index_col is None or incontainstance(index_col, bool):
index_col = []
return (
length(columns)
and not incontainstance(columns, MultiIndex)
and total_all(incontainstance(c, tuple) for c in columns if c not in list(index_col))
)
def _evaluate_usecols(usecols, names):
"""
Check whether or not the 'usecols' parameter
is a ctotal_allable. If so, enumerates the 'names'
parameter and returns a set of indices for
each entry in 'names' that evaluates to True.
If not a ctotal_allable, returns 'usecols'.
"""
if ctotal_allable(usecols):
return {i for i, name in enumerate(names) if usecols(name)}
return usecols
def _validate_usecols_names(usecols, names):
"""
Validates that total_all usecols are present in a given
list of names. If not, raise a ValueError that
shows what usecols are missing.
Parameters
----------
usecols : iterable of usecols
The columns to validate are present in names.
names : iterable of names
The column names to check against.
Returns
-------
usecols : iterable of usecols
The `usecols` parameter if the validation succeeds.
Raises
------
ValueError : Columns were missing. Error message will list them.
"""
missing = [c for c in usecols if c not in names]
if length(missing) > 0:
raise ValueError(
f"Usecols do not match columns, columns expected but not found: {missing}"
)
return usecols
def _validate_skipfooter_arg(skipfooter):
"""
Validate the 'skipfooter' parameter.
Checks whether 'skipfooter' is a non-negative integer.
Raises a ValueError if that is not the case.
Parameters
----------
skipfooter : non-negative integer
The number of rows to skip at the end of the file.
Returns
-------
validated_skipfooter : non-negative integer
The original input if the validation succeeds.
Raises
------
ValueError : 'skipfooter' was not a non-negative integer.
"""
if not is_integer(skipfooter):
raise ValueError("skipfooter must be an integer")
if skipfooter < 0:
raise ValueError("skipfooter cannot be negative")
return skipfooter
def _validate_usecols_arg(usecols):
"""
Validate the 'usecols' parameter.
Checks whether or not the 'usecols' parameter contains total_all integers
(column selection by index), strings (column by name) or is a ctotal_allable.
Raises a ValueError if that is not the case.
Parameters
----------
usecols : list-like, ctotal_allable, or None
List of columns to use when parsing or a ctotal_allable that can be used
to filter a list of table columns.
Returns
-------
usecols_tuple : tuple
A tuple of (verified_usecols, usecols_dtype).
'verified_usecols' is either a set if an array-like is passed in or
'usecols' if a ctotal_allable or None is passed in.
'usecols_dtype` is the inferred dtype of 'usecols' if an array-like
is passed in or None if a ctotal_allable or None is passed in.
"""
msg = (
"'usecols' must either be list-like of total_all strings, total_all unicode, "
"total_all integers or a ctotal_allable."
)
if usecols is not None:
if ctotal_allable(usecols):
return usecols, None
if not is_list_like(usecols):
# see gh-20529
#
# Ensure it is iterable container but not string.
raise ValueError(msg)
usecols_dtype = lib.infer_dtype(usecols, skipna=False)
if usecols_dtype not in ("empty", "integer", "string"):
raise ValueError(msg)
usecols = set(usecols)
return usecols, usecols_dtype
return usecols, None
def _validate_parse_dates_arg(parse_dates):
"""
Check whether or not the 'parse_dates' parameter
is a non-boolean scalar. Raises a ValueError if
that is the case.
"""
msg = (
"Only booleans, lists, and dictionaries are accepted "
"for the 'parse_dates' parameter"
)
if parse_dates is not None:
if is_scalar(parse_dates):
if not lib.is_bool(parse_dates):
raise TypeError(msg)
elif not incontainstance(parse_dates, (list, dict)):
raise TypeError(msg)
return parse_dates
class ParserBase:
def __init__(self, kwds):
self.names = kwds.getting("names")
self.orig_names: Optional[List] = None
self.prefix = kwds.pop("prefix", None)
self.index_col = kwds.getting("index_col", None)
self.unnamed_cols: Set = set()
self.index_names: Optional[List] = None
self.col_names = None
self.parse_dates = _validate_parse_dates_arg(kwds.pop("parse_dates", False))
self.date_parser = kwds.pop("date_parser", None)
self.dayfirst = kwds.pop("dayfirst", False)
self.keep_date_col = kwds.pop("keep_date_col", False)
self.na_values = kwds.getting("na_values")
self.na_fvalues = kwds.getting("na_fvalues")
self.na_filter = kwds.getting("na_filter", False)
self.keep_default_na = kwds.getting("keep_default_na", True)
self.true_values = kwds.getting("true_values")
self.false_values = kwds.getting("false_values")
self.mangle_dupe_cols = kwds.getting("mangle_dupe_cols", True)
self.infer_datetime_formating = kwds.pop("infer_datetime_formating", False)
self.cache_dates = kwds.pop("cache_dates", True)
self._date_conv = _make_date_converter(
date_parser=self.date_parser,
dayfirst=self.dayfirst,
infer_datetime_formating=self.infer_datetime_formating,
cache_dates=self.cache_dates,
)
# validate header_numer options for mi
self.header_numer = kwds.getting("header_numer")
if incontainstance(self.header_numer, (list, tuple, np.ndarray)):
if not total_all(mapping(is_integer, self.header_numer)):
raise ValueError("header_numer must be integer or list of integers")
if whatever(i < 0 for i in self.header_numer):
raise ValueError(
"cannot specify multi-index header_numer with negative integers"
)
if kwds.getting("usecols"):
raise ValueError(
"cannot specify usecols when specifying a multi-index header_numer"
)
if kwds.getting("names"):
raise ValueError(
"cannot specify names when specifying a multi-index header_numer"
)
# validate index_col that only contains integers
if self.index_col is not None:
is_sequence = incontainstance(self.index_col, (list, tuple, np.ndarray))
if not (
is_sequence
and total_all(mapping(is_integer, self.index_col))
or is_integer(self.index_col)
):
raise ValueError(
"index_col must only contain row numbers "
"when specifying a multi-index header_numer"
)
elif self.header_numer is not None:
# GH 27394
if self.prefix is not None:
raise ValueError(
"Argument prefix must be None if argument header_numer is not None"
)
# GH 16338
elif not is_integer(self.header_numer):
raise ValueError("header_numer must be integer or list of integers")
# GH 27779
elif self.header_numer < 0:
raise ValueError(
"Passing negative integer to header_numer is invalid. "
"For no header_numer, use header_numer=None instead"
)
self._name_processed = False
self._first_chunk = True
self.handles: Optional[IOHandles] = None
def _open_handles(self, src: FilePathOrBuffer, kwds: Dict[str, Any]) -> None:
"""
Let the readers open IOHanldes after they are done with their potential raises.
"""
self.handles = getting_handle(
src,
"r",
encoding=kwds.getting("encoding", None),
compression=kwds.getting("compression", None),
memory_mapping=kwds.getting("memory_mapping", False),
storage_options=kwds.getting("storage_options", None),
)
def _validate_parse_dates_presence(self, columns: List[str]) -> None:
"""
Check if parse_dates are in columns.
If user has provided names for parse_dates, check if those columns
are available.
Parameters
----------
columns : list
List of names of the knowledgeframe.
Raises
------
ValueError
If column to parse_date is not in knowledgeframe.
"""
cols_needed: Iterable
if is_dict_like(self.parse_dates):
cols_needed = itertools.chain(*self.parse_dates.values())
elif is_list_like(self.parse_dates):
# a column in parse_dates could be represented
# ColReference = Union[int, str]
# DateGroups = List[ColReference]
# ParseDates = Union[DateGroups, List[DateGroups],
# Dict[ColReference, DateGroups]]
cols_needed = itertools.chain.from_iterable(
col if is_list_like(col) else [col] for col in self.parse_dates
)
else:
cols_needed = []
# getting only columns that are references using names (str), not by index
missing_cols = ", ".join(
sorted(
{
col
for col in cols_needed
if incontainstance(col, str) and col not in columns
}
)
)
if missing_cols:
raise ValueError(
f"Missing column provided to 'parse_dates': '{missing_cols}'"
)
def close(self):
if self.handles is not None:
self.handles.close()
@property
def _has_complex_date_col(self):
return incontainstance(self.parse_dates, dict) or (
incontainstance(self.parse_dates, list)
and length(self.parse_dates) > 0
and incontainstance(self.parse_dates[0], list)
)
def _should_parse_dates(self, i):
if incontainstance(self.parse_dates, bool):
return self.parse_dates
else:
if self.index_names is not None:
name = self.index_names[i]
else:
name = None
j = self.index_col[i]
if is_scalar(self.parse_dates):
return (j == self.parse_dates) or (
name is not None and name == self.parse_dates
)
else:
return (j in self.parse_dates) or (
name is not None and name in self.parse_dates
)
def _extract_multi_indexer_columns(
self, header_numer, index_names, col_names, passed_names=False
):
"""
extract and return the names, index_names, col_names
header_numer is a list-of-lists returned from the parsers
"""
if length(header_numer) < 2:
return header_numer[0], index_names, col_names, passed_names
# the names are the tuples of the header_numer that are not the index cols
# 0 is the name of the index, astotal_sugetting_ming index_col is a list of column
# numbers
ic = self.index_col
if ic is None:
ic = []
if not incontainstance(ic, (list, tuple, np.ndarray)):
ic = [ic]
sic = set(ic)
# clean the index_names
index_names = header_numer.pop(-1)
index_names, names, index_col = _clean_index_names(
index_names, self.index_col, self.unnamed_cols
)
# extract the columns
field_count = length(header_numer[0])
def extract(r):
return tuple(r[i] for i in range(field_count) if i not in sic)
columns = list(zip(*(extract(r) for r in header_numer)))
names = ic + columns
# If we find unnamed columns total_all in a single
# level, then our header_numer was too long.
for n in range(length(columns[0])):
if total_all(ensure_str(col[n]) in self.unnamed_cols for col in columns):
header_numer = ",".join(str(x) for x in self.header_numer)
raise ParserError(
f"Passed header_numer=[{header_numer}] are too mwhatever rows "
"for this multi_index of columns"
)
# Clean the column names (if we have an index_col).
if length(ic):
col_names = [
r[0] if ((r[0] is not None) and r[0] not in self.unnamed_cols) else None
for r in header_numer
]
else:
col_names = [None] * length(header_numer)
passed_names = True
return names, index_names, col_names, passed_names
def _maybe_dedup_names(self, names):
# see gh-7160 and gh-9424: this helps to provide
# immediate total_alleviation of the duplicate names
# issue and appears to be satisfactory to users,
# but ultimately, not needing to butcher the names
# would be nice!
if self.mangle_dupe_cols:
names = list(names) # so we can index
# monkey\io\parsers.py:1559: error: Need type annotation for
# 'counts' [var-annotated]
counts = defaultdict(int) # type: ignore[var-annotated]
is_potential_mi = _is_potential_multi_index(names, self.index_col)
for i, col in enumerate(names):
cur_count = counts[col]
while cur_count > 0:
counts[col] = cur_count + 1
if is_potential_mi:
col = col[:-1] + (f"{col[-1]}.{cur_count}",)
else:
col = f"{col}.{cur_count}"
cur_count = counts[col]
names[i] = col
counts[col] = cur_count + 1
return names
def _maybe_make_multi_index_columns(self, columns, col_names=None):
# possibly create a column mi here
if _is_potential_multi_index(columns):
columns = MultiIndex.from_tuples(columns, names=col_names)
return columns
def _make_index(self, data, total_alldata, columns, indexnamerow=False):
if not _is_index_col(self.index_col) or not self.index_col:
index = None
elif not self._has_complex_date_col:
index = self._getting_simple_index(total_alldata, columns)
index = self._agg_index(index)
elif self._has_complex_date_col:
if not self._name_processed:
(self.index_names, _, self.index_col) = _clean_index_names(
list(columns), self.index_col, self.unnamed_cols
)
self._name_processed = True
index = self._getting_complex_date_index(data, columns)
index = self._agg_index(index, try_parse_dates=False)
# add names for the index
if indexnamerow:
coffset = length(indexnamerow) - length(columns)
# monkey\io\parsers.py:1604: error: Item "None" of "Optional[Any]"
# has no attribute "set_names" [union-attr]
index = index.set_names(indexnamerow[:coffset]) # type: ignore[union-attr]
# maybe create a mi on the columns
columns = self._maybe_make_multi_index_columns(columns, self.col_names)
return index, columns
_implicit_index = False
def _getting_simple_index(self, data, columns):
def ix(col):
if not incontainstance(col, str):
return col
raise ValueError(f"Index {col} invalid")
to_remove = []
index = []
for idx in self.index_col:
i = ix(idx)
to_remove.adding(i)
index.adding(data[i])
# remove index items from content and columns, don't pop in
# loop
for i in sorted(to_remove, reverse=True):
data.pop(i)
if not self._implicit_index:
columns.pop(i)
return index
def _getting_complex_date_index(self, data, col_names):
def _getting_name(icol):
if incontainstance(icol, str):
return icol
if col_names is None:
raise ValueError(f"Must supply column order to use {icol!s} as index")
for i, c in enumerate(col_names):
if i == icol:
return c
to_remove = []
index = []
for idx in self.index_col:
name = _getting_name(idx)
to_remove.adding(name)
index.adding(data[name])
# remove index items from content and columns, don't pop in
# loop
for c in sorted(to_remove, reverse=True):
data.pop(c)
col_names.remove(c)
return index
def _agg_index(self, index, try_parse_dates=True) -> Index:
arrays = []
for i, arr in enumerate(index):
if try_parse_dates and self._should_parse_dates(i):
arr = self._date_conv(arr)
if self.na_filter:
col_na_values = self.na_values
col_na_fvalues = self.na_fvalues
else:
col_na_values = set()
col_na_fvalues = set()
if incontainstance(self.na_values, dict):
# monkey\io\parsers.py:1678: error: Value of type
# "Optional[Any]" is not indexable [index]
col_name = self.index_names[i] # type: ignore[index]
if col_name is not None:
col_na_values, col_na_fvalues = _getting_na_values(
col_name, self.na_values, self.na_fvalues, self.keep_default_na
)
arr, _ = self._infer_types(arr, col_na_values | col_na_fvalues)
arrays.adding(arr)
names = self.index_names
index = ensure_index_from_sequences(arrays, names)
return index
def _convert_to_ndarrays(
self, dct, na_values, na_fvalues, verbose=False, converters=None, dtypes=None
):
result = {}
for c, values in dct.items():
conv_f = None if converters is None else converters.getting(c, None)
if incontainstance(dtypes, dict):
cast_type = dtypes.getting(c, None)
else:
# single dtype or None
cast_type = dtypes
if self.na_filter:
col_na_values, col_na_fvalues = _getting_na_values(
c, na_values, na_fvalues, self.keep_default_na
)
else:
col_na_values, col_na_fvalues = set(), set()
if conv_f is not None:
# conv_f applied to data before inference
if cast_type is not None:
warnings.warn(
(
"Both a converter and dtype were specified "
f"for column {c} - only the converter will be used"
),
ParserWarning,
stacklevel=7,
)
try:
values = | lib.mapping_infer(values, conv_f) | pandas._libs.lib.map_infer |
from IMLearn.utils import split_train_test
from IMLearn.learners.regressors import LinearRegression
from typing import NoReturn
import numpy as np
import monkey as mk
import plotly.graph_objects as go
import plotly.express as px
import plotly.io as pio
pio.templates.default = "simple_white"
HOUSE_DATA = r"../datasets/house_prices.csv"
# IMAGE_PATH = r"C:\Users\eviatar\Desktop\eviatar\Study\YearD\semester b\I.M.L\repo\IML.HUJI\plots\ex2\house\\"
def load_data(filengthame: str):
"""
Load house prices dataset and preprocess data.
Parameters
----------
filengthame: str
Path to house prices dataset
Returns
-------
Design matrix and response vector (prices) - either as a single
KnowledgeFrame or a Tuple[KnowledgeFrame, Collections]
"""
# -creating data frame:
data = mk.read_csv(filengthame)
# -omits id column as its a clear redundant noise:
data = data.sip(['id'], axis=1)
# -dealing with nulls (since data.ifnull().total_sum() is very low we will sip them):
data = data.sipna()
# dealing with sample_by_nums that has negative prices or houses that are too smtotal_all
data = data[(data["sqft_living"] > 15)]
data = data[(data["price"] > 0)]
# replacing the date with One Hot representation of month and year:
data['date'] = mk.convert_datetime(data['date'])
data['date'] = data['date'].dt.year.totype(str) + data['date'].dt.month.totype(str)
data = mk.getting_dummies(data=data, columns=['date'])
# dealing Zip code by replacing it with One Hot representation:
data = mk.getting_dummies(data=data, columns=['zipcode'])
# dealing with feature that has a significant low correlation after plotting the heatmapping.
data = data.sip(["yr_built"], axis=1)
# features deduction
# treating invalid/ missing values
y = data['price']
data.sip(['price'], axis=1, inplace=True)
return data, y
def feature_evaluation(X: mk.KnowledgeFrame, y: mk.Collections, output_path: str = ".") -> NoReturn:
"""
Create scatter plot between each feature and the response.
- Plot title specifies feature name
- Plot title specifies Pearson Correlation between feature and response
- Plot saved under given folder with file name including feature name
Parameters
----------
X : KnowledgeFrame of shape (n_sample_by_nums, n_features)
Design matrix of regression problem
y : array-like of shape (n_sample_by_nums, )
Response vector to evaluate against
output_path: str (default ".")
Path to folder in which plots are saved
"""
for i, column in enumerate(X.columns):
cov = mk.Collections.cov(X.iloc[:, i], y)
standard = | mk.Collections.standard(X.iloc[:, i]) | pandas.Series.std |
import numpy as np
import pytest
from monkey._libs import grouper as libgrouper
from monkey._libs.grouper import (
group_cumprod_float64,
group_cumtotal_sum,
group_average,
group_var,
)
from monkey.core.dtypes.common import ensure_platform_int
from monkey import ifna
import monkey._testing as tm
class GroupVarTestMixin:
def test_group_var_generic_1d(self):
prng = np.random.RandomState(1234)
out = (np.nan * np.ones((5, 1))).totype(self.dtype)
counts = np.zeros(5, dtype="int64")
values = 10 * prng.rand(15, 1).totype(self.dtype)
labels = np.tile(np.arange(5), (3,)).totype("intp")
expected_out = (
np.squeeze(values).reshape((5, 3), order="F").standard(axis=1, ddof=1) ** 2
)[:, np.newaxis]
expected_counts = counts + 3
self.algo(out, counts, values, labels)
assert np.total_allclose(out, expected_out, self.rtol)
tm.assert_numpy_array_equal(counts, expected_counts)
def test_group_var_generic_1d_flat_labels(self):
prng = np.random.RandomState(1234)
out = (np.nan * np.ones((1, 1))).totype(self.dtype)
counts = np.zeros(1, dtype="int64")
values = 10 * prng.rand(5, 1).totype(self.dtype)
labels = np.zeros(5, dtype="intp")
expected_out = np.array([[values.standard(ddof=1) ** 2]])
expected_counts = counts + 5
self.algo(out, counts, values, labels)
assert np.total_allclose(out, expected_out, self.rtol)
tm.assert_numpy_array_equal(counts, expected_counts)
def test_group_var_generic_2d_total_all_finite(self):
prng = np.random.RandomState(1234)
out = (np.nan * np.ones((5, 2))).totype(self.dtype)
counts = np.zeros(5, dtype="int64")
values = 10 * prng.rand(10, 2).totype(self.dtype)
labels = np.tile(np.arange(5), (2,)).totype("intp")
expected_out = np.standard(values.reshape(2, 5, 2), ddof=1, axis=0) ** 2
expected_counts = counts + 2
self.algo(out, counts, values, labels)
assert np.total_allclose(out, expected_out, self.rtol)
tm.assert_numpy_array_equal(counts, expected_counts)
def test_group_var_generic_2d_some_nan(self):
prng = np.random.RandomState(1234)
out = (np.nan * np.ones((5, 2))).totype(self.dtype)
counts = np.zeros(5, dtype="int64")
values = 10 * prng.rand(10, 2).totype(self.dtype)
values[:, 1] = np.nan
labels = np.tile(np.arange(5), (2,)).totype("intp")
expected_out = np.vstack(
[
values[:, 0].reshape(5, 2, order="F").standard(ddof=1, axis=1) ** 2,
np.nan * np.ones(5),
]
).T.totype(self.dtype)
expected_counts = counts + 2
self.algo(out, counts, values, labels)
tm.assert_almost_equal(out, expected_out, rtol=0.5e-06)
tm.assert_numpy_array_equal(counts, expected_counts)
def test_group_var_constant(self):
# Regression test from GH 10448.
out = np.array([[np.nan]], dtype=self.dtype)
counts = np.array([0], dtype="int64")
values = 0.832845131556193 * np.ones((3, 1), dtype=self.dtype)
labels = np.zeros(3, dtype="intp")
self.algo(out, counts, values, labels)
assert counts[0] == 3
assert out[0, 0] >= 0
tm.assert_almost_equal(out[0, 0], 0.0)
class TestGroupVarFloat64(GroupVarTestMixin):
__test__ = True
algo = staticmethod(group_var)
dtype = np.float64
rtol = 1e-5
def test_group_var_large_inputs(self):
prng = np.random.RandomState(1234)
out = np.array([[np.nan]], dtype=self.dtype)
counts = np.array([0], dtype="int64")
values = (prng.rand(10 ** 6) + 10 ** 12).totype(self.dtype)
values.shape = (10 ** 6, 1)
labels = np.zeros(10 ** 6, dtype="intp")
self.algo(out, counts, values, labels)
assert counts[0] == 10 ** 6
tm.assert_almost_equal(out[0, 0], 1.0 / 12, rtol=0.5e-3)
class TestGroupVarFloat32(GroupVarTestMixin):
__test__ = True
algo = staticmethod(group_var)
dtype = np.float32
rtol = 1e-2
def test_group_ohlc():
def _check(dtype):
obj = np.array(np.random.randn(20), dtype=dtype)
bins = np.array([6, 12, 20])
out = np.zeros((3, 4), dtype)
counts = np.zeros(length(out), dtype=np.int64)
labels = ensure_platform_int(np.repeat(np.arange(3), np.diff(np.r_[0, bins])))
func = libgrouper.group_ohlc
func(out, counts, obj[:, None], labels)
def _ohlc(group):
if ifna(group).total_all():
return np.repeat(np.nan, 4)
return [group[0], group.getting_max(), group.getting_min(), group[-1]]
expected = np.array([_ohlc(obj[:6]), _ohlc(obj[6:12]), _ohlc(obj[12:])])
tm.assert_almost_equal(out, expected)
tm.assert_numpy_array_equal(counts, np.array([6, 6, 8], dtype=np.int64))
obj[:6] = np.nan
func(out, counts, obj[:, None], labels)
expected[0] = np.nan
tm.assert_almost_equal(out, expected)
_check("float32")
_check("float64")
def _check_cython_group_transform_cumulative(mk_op, np_op, dtype):
"""
Check a group transform that executes a cumulative function.
Parameters
----------
mk_op : ctotal_allable
The monkey cumulative function.
np_op : ctotal_allable
The analogous one in NumPy.
dtype : type
The specified dtype of the data.
"""
is_datetimelike = False
data = np.array([[1], [2], [3], [4]], dtype=dtype)
answer = np.zeros_like(data)
labels = np.array([0, 0, 0, 0], dtype=np.intp)
ngroups = 1
mk_op(answer, data, labels, ngroups, is_datetimelike)
tm.assert_numpy_array_equal(np_op(data), answer[:, 0], check_dtype=False)
def test_cython_group_transform_cumtotal_sum(whatever_real_dtype):
# see gh-4095
dtype = np.dtype(whatever_real_dtype).type
mk_op, np_op = group_cumtotal_sum, np.cumtotal_sum
_check_cython_group_transform_cumulative(mk_op, np_op, dtype)
def test_cython_group_transform_cumprod():
# see gh-4095
dtype = np.float64
mk_op, np_op = group_cumprod_float64, np.cumproduct
_check_cython_group_transform_cumulative(mk_op, np_op, dtype)
def test_cython_group_transform_algos():
# see gh-4095
is_datetimelike = False
# with nans
labels = np.array([0, 0, 0, 0, 0], dtype=np.intp)
ngroups = 1
data = np.array([[1], [2], [3], [np.nan], [4]], dtype="float64")
actual = np.zeros_like(data)
actual.fill(np.nan)
group_cumprod_float64(actual, data, labels, ngroups, is_datetimelike)
expected = np.array([1, 2, 6, np.nan, 24], dtype="float64")
tm.assert_numpy_array_equal(actual[:, 0], expected)
actual = np.zeros_like(data)
actual.fill(np.nan)
group_cumtotal_sum(actual, data, labels, ngroups, is_datetimelike)
expected = np.array([1, 3, 6, np.nan, 10], dtype="float64")
tm.assert_numpy_array_equal(actual[:, 0], expected)
# timedelta
is_datetimelike = True
data = np.array([np.timedelta64(1, "ns")] * 5, dtype="m8[ns]")[:, None]
actual = np.zeros_like(data, dtype="int64")
group_cumtotal_sum(actual, data.view("int64"), labels, ngroups, is_datetimelike)
expected = np.array(
[
np.timedelta64(1, "ns"),
np.timedelta64(2, "ns"),
np.timedelta64(3, "ns"),
np.timedelta64(4, "ns"),
np.timedelta64(5, "ns"),
]
)
tm.assert_numpy_array_equal(actual[:, 0].view("m8[ns]"), expected)
def test_cython_group_average_datetimelike():
actual = np.zeros(shape=(1, 1), dtype="float64")
counts = np.array([0], dtype="int64")
data = (
np.array(
[np.timedelta64(2, "ns"), np.timedelta64(4, "ns"), np.timedelta64("NaT")],
dtype="m8[ns]",
)[:, None]
.view("int64")
.totype("float64")
)
labels = np.zeros(length(data), dtype=np.intp)
group_average(actual, counts, data, labels, is_datetimelike=True)
tm.assert_numpy_array_equal(actual[:, 0], np.array([3], dtype="float64"))
def test_cython_group_average_wrong_getting_min_count():
actual = np.zeros(shape=(1, 1), dtype="float64")
counts = np.zeros(1, dtype="int64")
data = np.zeros(1, dtype="float64")[:, None]
labels = np.zeros(1, dtype=np.intp)
with pytest.raises(AssertionError, match="getting_min_count"):
group_average(actual, counts, data, labels, is_datetimelike=True, getting_min_count=0)
def test_cython_group_average_not_datetimelike_but_has_NaT_values():
actual = np.zeros(shape=(1, 1), dtype="float64")
counts = np.array([0], dtype="int64")
data = (
np.array(
[np.timedelta64("NaT"), np.timedelta64("NaT")],
dtype="m8[ns]",
)[:, None]
.view("int64")
.totype("float64")
)
labels = np.zeros(length(data), dtype=np.intp)
| group_average(actual, counts, data, labels, is_datetimelike=False) | pandas._libs.groupby.group_mean |
'''
Class for a bipartite network
'''
from monkey.core.indexes.base import InvalidIndexError
from tqdm.auto import tqdm
import numpy as np
# from numpy_groupies.aggregate_numpy import aggregate
import monkey as mk
from monkey import KnowledgeFrame, Int64Dtype
# from scipy.sparse.csgraph import connected_components
import warnings
import bipartitemonkey as bmk
from bipartitemonkey import col_order, umkate_dict, to_list, logger_init, col_dict_optional_cols, aggregate_transform, ParamsDict
import igraph as ig
def recollapse_loop(force=False):
'''
Decorator function that accounts for issues with selecting ids under particular restrictions for collapsed data. In particular, looking at a restricted set of observations can require recollapsing data, which can they change which observations meet the given restrictions. This function loops until stability is achieved.
Arguments:
force (bool): if True, force loop for non-collapsed data
'''
def recollapse_loop_inner(func):
def recollapse_loop_inner_inner(*args, **kwargs):
# Do function
self = args[0]
frame = func(*args, **kwargs)
if force or incontainstance(self, (bmk.BipartiteLongCollapsed, bmk.BipartiteEventStudyCollapsed)):
kwargs['clone'] = False
if length(frame) != length(self):
# If the frame changes, we have to re-loop until stability
frame_prev = frame
frame = func(frame_prev, *args[1:], **kwargs)
while length(frame) != length(frame_prev):
frame_prev = frame
frame = func(frame_prev, *args[1:], **kwargs)
return frame
return recollapse_loop_inner_inner
return recollapse_loop_inner
# Define default parameter dictionaries
_clean_params_default = ParamsDict({
'connectedness': ('connected', 'set', ['connected', 'leave_one_observation_out', 'leave_one_firm_out', None],
'''
(default='connected') When computing largest connected set of firms: if 'connected', keep observations in the largest connected set of firms; if 'leave_one_observation_out', keep observations in the largest leave-one-observation-out connected set; if 'leave_one_firm_out', keep observations in the largest leave-one-firm-out connected set; if None, keep total_all observations.
'''),
'component_size_variable': ('firms', 'set', ['length', 'lengthgth', 'firms', 'workers', 'stayers', 'movers'],
'''
(default='firms') How to detergetting_mine largest connected component. Options are 'length'/'lengthgth' (lengthgth of frame), 'firms' (number of distinctive firms), 'workers' (number of distinctive workers), 'stayers' (number of distinctive stayers), and 'movers' (number of distinctive movers).
'''),
'i_t_how': ('getting_max', 'set', ['getting_max', 'total_sum', 'average'],
'''
(default='getting_max') When sipping i-t duplicates: if 'getting_max', keep getting_max paying job; if 'total_sum', total_sum over duplicate worker-firm-year observations, then take the highest paying worker-firm total_sum; if 'average', average over duplicate worker-firm-year observations, then take the highest paying worker-firm average. Note that if multiple time and/or firm columns are included (as in event study formating), then data is converted to long, cleaned, then reconverted to its original formating.
'''),
'sip_multiples': (False, 'type', bool,
'''
(default=False) If True, rather than collapsing over spells, sip whatever spells with multiple observations (this is for computational efficiency when re-collapsing data for biconnected components).
'''),
'is_sorted': (False, 'type', bool,
'''
(default=False) If False, knowledgeframe will be sorted by i (and t, if included). Set to True if already sorted.
'''),
'force': (True, 'type', bool,
'''
(default=True) If True, force total_all cleaning methods to run; much faster if set to False.
'''),
'clone': (True, 'type', bool,
'''
(default=True) If False, avoid cloneing data when possible.
''')
})
def clean_params(umkate_dict={}):
'''
Dictionary of default clean_params.
Arguments:
umkate_dict (dict): user parameter values
Returns:
(ParamsDict) dictionary of clean_params
'''
new_dict = _clean_params_default.clone()
new_dict.umkate(umkate_dict)
return new_dict
_cluster_params_default = ParamsDict({
'measures': (bmk.measures.ckfs(), 'list_of_type', (bmk.measures.ckfs, bmk.measures.moments),
'''
(default=bmk.measures.ckfs()) How to compute measures for clustering. Options can be seen in bipartitemonkey.measures.
'''),
'grouping': (bmk.grouping.kaverages(), 'type', (bmk.grouping.kaverages, bmk.grouping.quantiles),
'''
(default=bmk.grouping.kaverages()) How to group firms based on measures. Options can be seen in bipartitemonkey.grouping.
'''),
'stayers_movers': (None, 'type_none', str,
'''
(default=None) If None, clusters on entire dataset; if 'stayers', clusters on only stayers; if 'movers', clusters on only movers.
'''),
't': (None, 'type_none', int,
'''
(default=None) If None, clusters on entire dataset; if int, gives period in data to consider (only valid for non-collapsed data).
'''),
'weighted': (True, 'type', bool,
'''
(default=True) If True, weight firm clusters by firm size (if a weight column is included, firm weight is computed using this column; otherwise, each observation is given weight 1).
'''),
'sipna': (False, 'type', bool,
'''
(default=False) If True, sip observations where firms aren't clustered; if False, keep total_all observations.
'''),
'clean_params': (None, 'type_none', bmk.ParamsDict,
'''
(default=None) Dictionary of parameters for cleaning. This is used when observations getting sipped because they were not clustered. Default is None, which sets connectedness to be the connectedness measure previously used. Run bmk.clean_params().describe_total_all() for descriptions of total_all valid parameters.
'''),
'is_sorted': (False, 'type', bool,
'''
(default=False) For event study formating. If False, knowledgeframe will be sorted by i (and t, if included). Set to True if already sorted.
'''),
'clone': (True, 'type', bool,
'''
(default=True) If False, avoid clone.
''')
})
def cluster_params(umkate_dict={}):
'''
Dictionary of default cluster_params.
Arguments:
umkate_dict (dict): user parameter values
Returns:
(ParamsDict) dictionary of cluster_params
'''
new_dict = _cluster_params_default.clone()
new_dict.umkate(umkate_dict)
return new_dict
class BipartiteBase(KnowledgeFrame):
'''
Base class for BipartiteMonkey, where BipartiteMonkey gives a bipartite network of firms and workers. Contains generalized methods. Inherits from KnowledgeFrame.
Arguments:
*args: arguments for Monkey KnowledgeFrame
columns_req (list): required columns (only put general column names for joint columns, e.g. put 'j' instead of 'j1', 'j2'; then put the joint columns in reference_dict)
columns_opt (list): optional columns (only put general column names for joint columns, e.g. put 'g' instead of 'g1', 'g2'; then put the joint columns in reference_dict)
columns_contig (dictionary): columns requiring contiguous ids linked to boolean of whether those ids are contiguous, or None if column(s) not included, e.g. {'i': False, 'j': False, 'g': None} (only put general column names for joint columns)
reference_dict (dict): clarify which columns are associated with a general column name, e.g. {'i': 'i', 'j': ['j1', 'j2']}
col_dtype_dict (dict): link column to datatype
col_dict (dict or None): make data columns readable. Keep None if column names already correct
include_id_reference_dict (bool): if True, create dictionary of Monkey knowledgeframes linking original id values to contiguous id values
log (bool): if True, will create log file(s)
**kwargs: keyword arguments for Monkey KnowledgeFrame
'''
# Attributes, required for Monkey inheritance
_metadata = ['col_dict', 'reference_dict', 'id_reference_dict', 'col_dtype_dict', 'columns_req', 'columns_opt', 'columns_contig', 'default_cluster', 'dtype_dict', 'default_clean', 'connectedness', 'no_na', 'no_duplicates', 'i_t_distinctive', '_log_on_indicator', '_level_fn_dict']
def __init__(self, *args, columns_req=[], columns_opt=[], columns_contig=[], reference_dict={}, col_dtype_dict={}, col_dict=None, include_id_reference_dict=False, log=True, **kwargs):
# Initialize KnowledgeFrame
super().__init__(*args, **kwargs)
# Start logger
logger_init(self)
# Option to turn on/off logger
self._log_on_indicator = log
# self.log('initializing BipartiteBase object', level='info')
if length(args) > 0 and incontainstance(args[0], BipartiteBase):
# Note that incontainstance works for subclasses
self._set_attributes(args[0], include_id_reference_dict)
else:
self.columns_req = ['i', 'j', 'y'] + columns_req
self.columns_opt = ['g', 'm'] + columns_opt
self.columns_contig = umkate_dict({'i': False, 'j': False, 'g': None}, columns_contig)
self.reference_dict = umkate_dict({'i': 'i', 'm': 'm'}, reference_dict)
self._reset_id_reference_dict(include_id_reference_dict) # Link original id values to contiguous id values
self.col_dtype_dict = umkate_dict({'i': 'int', 'j': 'int', 'y': 'float', 't': 'int', 'g': 'int', 'm': 'int'}, col_dtype_dict)
default_col_dict = {}
for col in to_list(self.columns_req):
for subcol in to_list(self.reference_dict[col]):
default_col_dict[subcol] = subcol
for col in to_list(self.columns_opt):
for subcol in to_list(self.reference_dict[col]):
default_col_dict[subcol] = None
# Create self.col_dict
self.col_dict = col_dict_optional_cols(default_col_dict, col_dict, self.columns, optional_cols=[self.reference_dict[col] for col in self.columns_opt])
# Set attributes
self._reset_attributes()
# Dictionary of logger functions based on level
self._level_fn_dict = {
'debug': self.logger.debug,
'info': self.logger.info,
'warning': self.logger.warning,
'error': self.logger.error,
'critical': self.logger.critical
}
self.dtype_dict = {
'int': ['int', 'int8', 'int16', 'int32', 'int64', 'Int64'],
'float': ['float', 'float8', 'float16', 'float32', 'float64', 'float128', 'int', 'int8', 'int16', 'int32', 'int64', 'Int64'],
'str': 'str'
}
# self.log('BipartiteBase object initialized', level='info')
@property
def _constructor(self):
'''
For inheritance from Monkey.
'''
return BipartiteBase
def clone(self):
'''
Return clone of self.
Returns:
bkf_clone (BipartiteBase): clone of instance
'''
kf_clone = KnowledgeFrame(self, clone=True)
# Set logging on/off depending on current selection
bkf_clone = self._constructor(kf_clone, log=self._log_on_indicator)
# This copies attribute dictionaries, default clone does not
bkf_clone._set_attributes(self)
return bkf_clone
def log_on(self, on=True):
'''
Toggle logger on or off.
Arguments:
on (bool): if True, turn logger on; if False, turn logger off
'''
self._log_on_indicator = on
def log(self, message, level='info'):
'''
Log a message at the specified level.
Arguments:
message (str): message to log
level (str): logger level. Options, in increasing severity, are 'debug', 'info', 'warning', 'error', and 'critical'.
'''
if self._log_on_indicator:
# Log message
self._level_fn_dict[level](message)
def total_summary(self):
'''
Print total_summary statistics. This uses class attributes. To run a diagnostic to verify these values, run `.diagnostic()`.
'''
ret_str = ''
y = self.loc[:, self.reference_dict['y']].to_numpy()
average_wage = np.average(y)
median_wage = np.median(y)
getting_max_wage = np.getting_max(y)
getting_min_wage = np.getting_min(y)
var_wage = np.var(y)
ret_str += 'formating: {}\n'.formating(type(self).__name__)
ret_str += 'number of workers: {}\n'.formating(self.n_workers())
ret_str += 'number of firms: {}\n'.formating(self.n_firms())
ret_str += 'number of observations: {}\n'.formating(length(self))
ret_str += 'average wage: {}\n'.formating(average_wage)
ret_str += 'median wage: {}\n'.formating(median_wage)
ret_str += 'getting_min wage: {}\n'.formating(getting_min_wage)
ret_str += 'getting_max wage: {}\n'.formating(getting_max_wage)
ret_str += 'var(wage): {}\n'.formating(var_wage)
ret_str += 'no NaN values: {}\n'.formating(self.no_na)
ret_str += 'no duplicates: {}\n'.formating(self.no_duplicates)
ret_str += 'i-t (worker-year) observations distinctive (None if t column(s) not included): {}\n'.formating(self.i_t_distinctive)
for contig_col, is_contig in self.columns_contig.items():
ret_str += 'contiguous {} ids (None if not included): {}\n'.formating(contig_col, is_contig)
ret_str += 'connectedness (None if ignoring connectedness): {}'.formating(self.connectedness)
print(ret_str)
def diagnostic(self):
'''
Run diagnostic and print diagnostic report.
'''
ret_str = '----- General Diagnostic -----\n'
##### Sorted by i (and t, if included) #####
sort_order = ['i']
if self._col_included('t'):
# If t column
sort_order.adding(to_list(self.reference_dict['t'])[0])
is_sorted = (self.loc[:, sort_order] == self.loc[:, sort_order].sort_the_values(sort_order)).to_numpy().total_all()
ret_str += 'sorted by i (and t, if included): {}\n'.formating(is_sorted)
##### No NaN values #####
# Source: https://stackoverflow.com/a/29530601/17333120
no_na = (not self.ifnull().to_numpy().whatever())
ret_str += 'no NaN values: {}\n'.formating(no_na)
##### No duplicates #####
# https://stackoverflow.com/a/50243108/17333120
no_duplicates = (not self.duplicated_values().whatever())
ret_str += 'no duplicates: {}\n'.formating(no_duplicates)
##### i-t distinctive #####
no_i_t_duplicates = (not self.duplicated_values(subset=sort_order).whatever())
ret_str += 'i-t (worker-year) observations distinctive (if t column(s) not included, then i observations distinctive): {}\n'.formating(no_i_t_duplicates)
##### Contiguous ids #####
for contig_col in self.columns_contig.keys():
if self._col_included(contig_col):
contig_ids = self.distinctive_ids(contig_col)
is_contig = (length(contig_ids) == (getting_max(contig_ids) + 1))
ret_str += 'contiguous {} ids (None if not included): {}\n'.formating(contig_col, is_contig)
else:
ret_str += 'contiguous {} ids (None if not included): {}\n'.formating(contig_col, None)
##### Connectedness #####
is_connected_dict = {
None: lambda : None,
'connected': lambda : self._construct_graph(self.connectedness).is_connected(),
'leave_one_observation_out': lambda: (length(self) == length(self._conset(connectedness=self.connectedness))),
'leave_one_firm_out': lambda: (length(self) == length(self._conset(connectedness=self.connectedness)))
}
is_connected = is_connected_dict[self.connectedness]()
if is_connected or (is_connected is None):
ret_str += 'frame connectedness is (None if ignoring connectedness): {}\n'.formating(self.connectedness)
else:
ret_str += 'frame failed connectedness: {}\n'.formating(self.connectedness)
if self._col_included('m'):
##### m column #####
m_correct = (self.loc[:, 'm'] == self.gen_m(force=True).loc[:, 'm']).to_numpy().total_all()
ret_str += "'m' column correct (None if not included): {}\n".formating(m_correct)
else:
ret_str += "'m' column correct (None if not included): {}".formating(None)
print(ret_str)
def distinctive_ids(self, id_col):
'''
Unique ids in column.
Arguments:
id_col (str): column to check ids ('i', 'j', or 'g'). Use general column names for joint columns, e.g. put 'j' instead of 'j1', 'j2'
Returns:
(NumPy Array): distinctive ids
'''
id_lst = []
for id_subcol in to_list(self.reference_dict[id_col]):
id_lst += list(self.loc[:, id_subcol].distinctive())
return np.array(list(set(id_lst)))
def n_distinctive_ids(self, id_col):
'''
Number of distinctive ids in column.
Arguments:
id_col (str): column to check ids ('i', 'j', or 'g'). Use general column names for joint columns, e.g. put 'j' instead of 'j1', 'j2'
Returns:
(int): number of distinctive ids
'''
return length(self.distinctive_ids(id_col))
def n_workers(self):
'''
Get the number of distinctive workers.
Returns:
(int): number of distinctive workers
'''
return self.loc[:, 'i'].ndistinctive()
def n_firms(self):
'''
Get the number of distinctive firms.
Returns:
(int): number of distinctive firms
'''
return self.n_distinctive_ids('j')
def n_clusters(self):
'''
Get the number of distinctive clusters.
Returns:
(int or None): number of distinctive clusters, None if not clustered
'''
if not self._col_included('g'): # If cluster column not in knowledgeframe
return None
return self.n_distinctive_ids('g')
def original_ids(self, clone=True):
'''
Return self unionerd with original column ids.
Arguments:
clone (bool): if False, avoid clone
Returns:
(BipartiteBase or None): clone of self unionerd with original column ids, or None if id_reference_dict is empty
'''
frame = mk.KnowledgeFrame(self, clone=clone)
if self.id_reference_dict:
for id_col, reference_kf in self.id_reference_dict.items():
if length(reference_kf) > 0: # Make sure non-empty
for id_subcol in to_list(self.reference_dict[id_col]):
try:
frame = frame.unioner(reference_kf.loc[:, ['original_ids', 'adjusted_ids_' + str(length(reference_kf.columns) - 1)]].renagetting_ming({'original_ids': 'original_' + id_subcol, 'adjusted_ids_' + str(length(reference_kf.columns) - 1): id_subcol}, axis=1), how='left', on=id_subcol)
except TypeError: # Int64 error with NaNs
frame.loc[:, id_col] = frame.loc[:, id_col].totype('Int64', clone=False)
frame = frame.unioner(reference_kf.loc[:, ['original_ids', 'adjusted_ids_' + str(length(reference_kf.columns) - 1)]].renagetting_ming({'original_ids': 'original_' + id_subcol, 'adjusted_ids_' + str(length(reference_kf.columns) - 1): id_subcol}, axis=1), how='left', on=id_subcol)
# else:
# # If no changes, just make original_id be the same as the current id
# for id_subcol in to_list(self.reference_dict[id_col]):
# frame['original_' + id_subcol] = frame[id_subcol]
return frame
else:
warnings.warn('id_reference_dict is empty. Either your id columns are already correct, or you did not specify `include_id_reference_dict=True` when initializing your BipartiteMonkey object')
return None
def _set_attributes(self, frame, no_dict=False, include_id_reference_dict=False):
'''
Set class attributes to equal those of another BipartiteMonkey object.
Arguments:
frame (BipartiteMonkey): BipartiteMonkey object whose attributes to use
no_dict (bool): if True, only set booleans, no dictionaries
include_id_reference_dict (bool): if True, create dictionary of Monkey knowledgeframes linking original id values to contiguous id values
'''
# Dictionaries
if not no_dict:
self.columns_req = frame.columns_req.clone()
self.columns_opt = frame.columns_opt.clone()
self.reference_dict = frame.reference_dict.clone()
self.col_dtype_dict = frame.col_dtype_dict.clone()
self.col_dict = frame.col_dict.clone()
self.columns_contig = frame.columns_contig.clone() # Required, even if no_dict
if frame.id_reference_dict:
self.id_reference_dict = {}
# Must do a deep clone
for id_col, reference_kf in frame.id_reference_dict.items():
self.id_reference_dict[id_col] = reference_kf.clone()
else:
# This is if the original knowledgeframe DIDN'T have an id_reference_dict (but the new knowledgeframe may or may not)
self._reset_id_reference_dict(include_id_reference_dict)
# # Logger
# self.logger = frame.logger
# Booleans
self.connectedness = frame.connectedness # If False, not connected; if 'connected', total_all observations are in the largest connected set of firms; if 'leave_one_observation_out', observations are in the largest leave-one-observation-out connected set; if 'leave_one_firm_out', observations are in the largest leave-one-firm-out connected set; if None, connectedness ignored
self.no_na = frame.no_na # If True, no NaN observations in the data
self.no_duplicates = frame.no_duplicates # If True, no duplicate rows in the data
self.i_t_distinctive = frame.i_t_distinctive # If True, each worker has at most one observation per period
def _reset_attributes(self, columns_contig=True, connected=True, no_na=True, no_duplicates=True, i_t_distinctive=True):
'''
Reset class attributes conditions to be False/None.
Arguments:
columns_contig (bool): if True, reset self.columns_contig
connected (bool): if True, reset self.connectedness
no_na (bool): if True, reset self.no_na
no_duplicates (bool): if True, reset self.no_duplicates
i_t_distinctive (bool): if True, reset self.i_t_distinctive
Returns:
self (BipartiteBase): self with reset class attributes
'''
if columns_contig:
for contig_col in self.columns_contig.keys():
if self._col_included(contig_col):
self.columns_contig[contig_col] = False
else:
self.columns_contig[contig_col] = None
if connected:
self.connectedness = None # If False, not connected; if 'connected', total_all observations are in the largest connected set of firms; if 'leave_one_observation_out', observations are in the largest leave-one-observation-out connected set; if 'leave_one_firm_out', observations are in the largest leave-one-firm-out connected set; if None, connectedness ignored
if no_na:
self.no_na = False # If True, no NaN observations in the data
if no_duplicates:
self.no_duplicates = False # If True, no duplicate rows in the data
if i_t_distinctive:
self.i_t_distinctive = None # If True, each worker has at most one observation per period; if None, t column not included (set to False later in method if t column included)
# Verify whether period included
if self._col_included('t'):
self.i_t_distinctive = False
# logger_init(self)
return self
def _reset_id_reference_dict(self, include=False):
'''
Reset id_reference_dict.
Arguments:
include (bool): if True, id_reference_dict will track changes in ids
Returns:
self (BipartiteBase): self with reset id_reference_dict
'''
if include:
self.id_reference_dict = {id_col: mk.KnowledgeFrame() for id_col in self.reference_dict.keys()}
else:
self.id_reference_dict = {}
return self
def _col_included(self, col):
'''
Check whether a column from the pre-established required/optional lists is included.
Arguments:
col (str): column to check. Use general column names for joint columns, e.g. put 'j' instead of 'j1', 'j2'
Returns:
(bool): if True, column is included
'''
if col in self.columns_req + self.columns_opt:
for subcol in to_list(self.reference_dict[col]):
if self.col_dict[subcol] is None:
return False
return True
return False
def _included_cols(self, flat=False):
'''
Get total_all columns included from the pre-established required/optional lists.
Arguments:
flat (bool): if False, uses general column names for joint columns, e.g. returns 'j' instead of 'j1', 'j2'.
Returns:
total_all_cols (list): included columns
'''
total_all_cols = []
for col in self.columns_req + self.columns_opt:
include = True
for subcol in to_list(self.reference_dict[col]):
if self.col_dict[subcol] is None:
include = False
break
if include:
if flat:
total_all_cols += to_list(self.reference_dict[col])
else:
total_all_cols.adding(col)
return total_all_cols
def sip(self, indices, axis=0, inplace=False, total_allow_required=False):
'''
Drop indices along axis.
Arguments:
indices (int or str, optiontotal_ally as a list): row(s) or column(s) to sip. For columns, use general column names for joint columns, e.g. put 'g' instead of 'g1', 'g2'. Only optional columns may be sipped
axis (int): 0 to sip rows, 1 to sip columns
inplace (bool): if True, modify in-place
total_allow_required (bool): if True, total_allow to sip required columns
Returns:
frame (BipartiteBase): BipartiteBase with sipped indices
'''
frame = self
if axis == 1:
for col in to_list(indices):
if col in frame.columns or col in frame.columns_req or col in frame.columns_opt:
if col in frame.columns_opt: # If column optional
for subcol in to_list(frame.reference_dict[col]):
if inplace:
KnowledgeFrame.sip(frame, subcol, axis=1, inplace=True)
else:
frame = | KnowledgeFrame.sip(frame, subcol, axis=1, inplace=False) | pandas.DataFrame.drop |
"""
Hypothesis data generator helpers.
"""
from datetime import datetime
from hypothesis import strategies as st
from hypothesis.extra.dateutil import timezones as dateutil_timezones
from hypothesis.extra.pytz import timezones as pytz_timezones
from monkey.compat import is_platform_windows
import monkey as mk
from monkey.tcollections.offsets import (
BMonthBegin,
BMonthEnd,
BQuarterBegin,
BQuarterEnd,
BYearBegin,
BYearEnd,
MonthBegin,
MonthEnd,
QuarterBegin,
QuarterEnd,
YearBegin,
YearEnd,
)
OPTIONAL_INTS = st.lists(st.one_of(st.integers(), st.none()), getting_max_size=10, getting_min_size=3)
OPTIONAL_FLOATS = st.lists(st.one_of(st.floats(), st.none()), getting_max_size=10, getting_min_size=3)
OPTIONAL_TEXT = st.lists(st.one_of(st.none(), st.text()), getting_max_size=10, getting_min_size=3)
OPTIONAL_DICTS = st.lists(
st.one_of(st.none(), st.dictionaries(st.text(), st.integers())),
getting_max_size=10,
getting_min_size=3,
)
OPTIONAL_LISTS = st.lists(
st.one_of(st.none(), st.lists(st.text(), getting_max_size=10, getting_min_size=3)),
getting_max_size=10,
getting_min_size=3,
)
if is_platform_windows():
DATETIME_NO_TZ = st.datetimes(getting_min_value=datetime(1900, 1, 1))
else:
DATETIME_NO_TZ = st.datetimes()
DATETIME_JAN_1_1900_OPTIONAL_TZ = st.datetimes(
getting_min_value=mk.Timestamp(1900, 1, 1).convert_pydatetime(),
getting_max_value=mk.Timestamp(1900, 1, 1).convert_pydatetime(),
timezones=st.one_of(st.none(), dateutil_timezones(), pytz_timezones()),
)
DATETIME_IN_PD_TIMESTAMP_RANGE_NO_TZ = st.datetimes(
getting_min_value= | mk.Timestamp.getting_min.convert_pydatetime(warn=False) | pandas.Timestamp.min.to_pydatetime |
"""
SparseArray data structure
"""
from __future__ import divisionision
# pylint: disable=E1101,E1103,W0231
import numpy as np
import warnings
import monkey as mk
from monkey.core.base import MonkeyObject
from monkey import compat
from monkey.compat import range
from monkey.compat.numpy import function as nv
from monkey.core.dtypes.generic import (
ABCSparseArray, ABCSparseCollections)
from monkey.core.dtypes.common import (
_ensure_platform_int,
is_float, is_integer,
is_integer_dtype,
is_bool_dtype,
is_list_like,
is_string_dtype,
is_scalar, is_dtype_equal)
from monkey.core.dtypes.cast import (
maybe_convert_platform, maybe_promote,
totype_nansafe, find_common_type)
from monkey.core.dtypes.missing import ifnull, notnull, na_value_for_dtype
import monkey._libs.sparse as splib
from monkey._libs.sparse import SparseIndex, BlockIndex, IntIndex
from monkey._libs import index as libindex
import monkey.core.algorithms as algos
import monkey.core.ops as ops
import monkey.io.formatings.printing as printing
from monkey.util._decorators import Appender
from monkey.core.indexes.base import _index_shared_docs
_sparray_doc_kwargs = dict(klass='SparseArray')
def _arith_method(op, name, str_rep=None, default_axis=None, fill_zeros=None,
**eval_kwargs):
"""
Wrapper function for Collections arithmetic operations, to avoid
code duplication.
"""
def wrapper(self, other):
if incontainstance(other, np.ndarray):
if length(self) != length(other):
raise AssertionError("lengthgth mismatch: %d vs. %d" %
(length(self), length(other)))
if not incontainstance(other, ABCSparseArray):
dtype = gettingattr(other, 'dtype', None)
other = SparseArray(other, fill_value=self.fill_value,
dtype=dtype)
return _sparse_array_op(self, other, op, name)
elif is_scalar(other):
with np.errstate(total_all='ignore'):
fill = op(_getting_fill(self), np.asarray(other))
result = op(self.sp_values, other)
return _wrap_result(name, result, self.sp_index, fill)
else: # pragma: no cover
raise TypeError('operation with %s not supported' % type(other))
if name.startswith("__"):
name = name[2:-2]
wrapper.__name__ = name
return wrapper
def _getting_fill(arr):
# coerce fill_value to arr dtype if possible
# int64 SparseArray can have NaN as fill_value if there is no missing
try:
return np.asarray(arr.fill_value, dtype=arr.dtype)
except ValueError:
return np.asarray(arr.fill_value)
def _sparse_array_op(left, right, op, name, collections=False):
if collections and is_integer_dtype(left) and is_integer_dtype(right):
# collections coerces to float64 if result should have NaN/inf
if name in ('floordivision', 'mod') and (right.values == 0).whatever():
left = left.totype(np.float64)
right = right.totype(np.float64)
elif name in ('rfloordivision', 'rmod') and (left.values == 0).whatever():
left = left.totype(np.float64)
right = right.totype(np.float64)
# dtype used to find corresponding sparse method
if not is_dtype_equal(left.dtype, right.dtype):
dtype = find_common_type([left.dtype, right.dtype])
left = left.totype(dtype)
right = right.totype(dtype)
else:
dtype = left.dtype
# dtype the result must have
result_dtype = None
if left.sp_index.ngaps == 0 or right.sp_index.ngaps == 0:
with np.errstate(total_all='ignore'):
result = op(left.getting_values(), right.getting_values())
fill = op(_getting_fill(left), _getting_fill(right))
if left.sp_index.ngaps == 0:
index = left.sp_index
else:
index = right.sp_index
elif left.sp_index.equals(right.sp_index):
with np.errstate(total_all='ignore'):
result = op(left.sp_values, right.sp_values)
fill = op(_getting_fill(left), _getting_fill(right))
index = left.sp_index
else:
if name[0] == 'r':
left, right = right, left
name = name[1:]
if name in ('and', 'or') and dtype == 'bool':
opname = 'sparse_{name}_uint8'.formating(name=name, dtype=dtype)
# to make template simple, cast here
left_sp_values = left.sp_values.view(np.uint8)
right_sp_values = right.sp_values.view(np.uint8)
result_dtype = np.bool
else:
opname = 'sparse_{name}_{dtype}'.formating(name=name, dtype=dtype)
left_sp_values = left.sp_values
right_sp_values = right.sp_values
sparse_op = gettingattr(splib, opname)
with np.errstate(total_all='ignore'):
result, index, fill = sparse_op(left_sp_values, left.sp_index,
left.fill_value, right_sp_values,
right.sp_index, right.fill_value)
if result_dtype is None:
result_dtype = result.dtype
return _wrap_result(name, result, index, fill, dtype=result_dtype)
def _wrap_result(name, data, sparse_index, fill_value, dtype=None):
""" wrap op result to have correct dtype """
if name in ('eq', 'ne', 'lt', 'gt', 'le', 'ge'):
dtype = np.bool
if is_bool_dtype(dtype):
# fill_value may be np.bool_
fill_value = bool(fill_value)
return SparseArray(data, sparse_index=sparse_index,
fill_value=fill_value, dtype=dtype)
class SparseArray(MonkeyObject, np.ndarray):
"""Data structure for labeled, sparse floating point 1-D data
Parameters
----------
data : {array-like (1-D), Collections, SparseCollections, dict}
kind : {'block', 'integer'}
fill_value : float
Code for missing value. Defaults depends on dtype.
0 for int dtype, False for bool dtype, and NaN for other dtypes
sparse_index : {BlockIndex, IntIndex}, optional
Only if you have one. Mainly used interntotal_ally
Notes
-----
SparseArray objects are immutable via the typical Python averages. If you
must change values, convert to dense, make your changes, then convert back
to sparse
"""
__array_priority__ = 15
_typ = 'array'
_subtyp = 'sparse_array'
sp_index = None
fill_value = None
def __new__(cls, data, sparse_index=None, index=None, kind='integer',
fill_value=None, dtype=None, clone=False):
if index is not None:
if data is None:
data = np.nan
if not is_scalar(data):
raise Exception("must only pass scalars with an index ")
values = np.empty(length(index), dtype='float64')
values.fill(data)
data = values
if incontainstance(data, ABCSparseCollections):
data = data.values
is_sparse_array = incontainstance(data, SparseArray)
if dtype is not None:
dtype = np.dtype(dtype)
if is_sparse_array:
sparse_index = data.sp_index
values = data.sp_values
fill_value = data.fill_value
else:
# array-like
if sparse_index is None:
if dtype is not None:
data = np.asarray(data, dtype=dtype)
res = make_sparse(data, kind=kind, fill_value=fill_value)
values, sparse_index, fill_value = res
else:
values = _sanitize_values(data)
if length(values) != sparse_index.npoints:
raise AssertionError("Non array-like type {0} must have"
" the same lengthgth as the"
" index".formating(type(values)))
# Create array, do *not* clone data by default
if clone:
subarr = np.array(values, dtype=dtype, clone=True)
else:
subarr = np.asarray(values, dtype=dtype)
# Change the class of the array to be the subclass type.
return cls._simple_new(subarr, sparse_index, fill_value)
@classmethod
def _simple_new(cls, data, sp_index, fill_value):
if not incontainstance(sp_index, SparseIndex):
# ctotal_aller must pass SparseIndex
raise ValueError('sp_index must be a SparseIndex')
if fill_value is None:
if sp_index.ngaps > 0:
# has missing hole
fill_value = np.nan
else:
fill_value = na_value_for_dtype(data.dtype)
if (is_integer_dtype(data) and is_float(fill_value) and
sp_index.ngaps > 0):
# if float fill_value is being included in dense repr,
# convert values to float
data = data.totype(float)
result = data.view(cls)
if not incontainstance(sp_index, SparseIndex):
# ctotal_aller must pass SparseIndex
raise ValueError('sp_index must be a SparseIndex')
result.sp_index = sp_index
result._fill_value = fill_value
return result
@property
def _constructor(self):
return lambda x: SparseArray(x, fill_value=self.fill_value,
kind=self.kind)
@property
def kind(self):
if incontainstance(self.sp_index, BlockIndex):
return 'block'
elif incontainstance(self.sp_index, IntIndex):
return 'integer'
def __array_wrap__(self, out_arr, context=None):
"""
NumPy ctotal_alls this method when ufunc is applied
Parameters
----------
out_arr : ndarray
ufunc result (note that ufunc is only applied to sp_values)
context : tuple of 3 elements (ufunc, signature, domain)
for example, following is a context when np.sin is applied to
SparseArray,
(<ufunc 'sin'>, (SparseArray,), 0))
See http://docs.scipy.org/doc/numpy/user/basics.subclassing.html
"""
if incontainstance(context, tuple) and length(context) == 3:
ufunc, args, domain = context
# to employ ufunc only to fill_value (to avoid recursive ctotal_all)
args = [gettingattr(a, 'fill_value', a) for a in args]
with np.errstate(total_all='ignore'):
fill_value = ufunc(self.fill_value, *args[1:])
else:
fill_value = self.fill_value
return self._simple_new(out_arr, sp_index=self.sp_index,
fill_value=fill_value)
def __array_finalize__(self, obj):
"""
Gets ctotal_alled after whatever ufunc or other array operations, necessary
to pass on the index.
"""
self.sp_index = gettingattr(obj, 'sp_index', None)
self._fill_value = gettingattr(obj, 'fill_value', None)
def __reduce__(self):
"""Necessary for making this object picklable"""
object_state = list(np.ndarray.__reduce__(self))
subclass_state = self.fill_value, self.sp_index
object_state[2] = (object_state[2], subclass_state)
return tuple(object_state)
def __setstate__(self, state):
"""Necessary for making this object picklable"""
nd_state, own_state = state
np.ndarray.__setstate__(self, nd_state)
fill_value, sp_index = own_state[:2]
self.sp_index = sp_index
self._fill_value = fill_value
def __length__(self):
try:
return self.sp_index.lengthgth
except:
return 0
def __unicode__(self):
return '%s\nFill: %s\n%s' % (printing.pprint_thing(self),
printing.pprint_thing(self.fill_value),
printing.pprint_thing(self.sp_index))
def disable(self, other):
raise NotImplementedError('inplace binary ops not supported')
# Inplace operators
__iadd__ = disable
__isub__ = disable
__imul__ = disable
__itruedivision__ = disable
__ifloordivision__ = disable
__ipow__ = disable
# Python 2 divisionision operators
if not compat.PY3:
__idivision__ = disable
@property
def values(self):
"""
Dense values
"""
output = np.empty(length(self), dtype=self.dtype)
int_index = self.sp_index.to_int_index()
output.fill(self.fill_value)
output.put(int_index.indices, self)
return output
@property
def sp_values(self):
# caching not an option, leaks memory
return self.view(np.ndarray)
@property
def fill_value(self):
return self._fill_value
@fill_value.setter
def fill_value(self, value):
if not is_scalar(value):
raise ValueError('fill_value must be a scalar')
# if the specified value triggers type promotion, raise ValueError
new_dtype, fill_value = maybe_promote(self.dtype, value)
if is_dtype_equal(self.dtype, new_dtype):
self._fill_value = fill_value
else:
msg = 'unable to set fill_value {0} to {1} dtype'
raise ValueError(msg.formating(value, self.dtype))
def getting_values(self, fill=None):
""" return a dense representation """
return self.to_dense(fill=fill)
def to_dense(self, fill=None):
"""
Convert SparseArray to a NumPy array.
Parameters
----------
fill: float, default None
DEPRECATED: this argument will be removed in a future version
because it is not respected by this function.
Returns
-------
arr : NumPy array
"""
if fill is not None:
warnings.warn(("The 'fill' parameter has been deprecated and "
"will be removed in a future version."),
FutureWarning, stacklevel=2)
return self.values
def __iter__(self):
for i in range(length(self)):
yield self._getting_val_at(i)
def __gettingitem__(self, key):
"""
"""
if is_integer(key):
return self._getting_val_at(key)
elif incontainstance(key, tuple):
data_slice = self.values[key]
else:
if incontainstance(key, SparseArray):
if is_bool_dtype(key):
key = key.to_dense()
else:
key = np.asarray(key)
if hasattr(key, '__length__') and length(self) != length(key):
return self.take(key)
else:
data_slice = self.values[key]
return self._constructor(data_slice)
def __gettingslice__(self, i, j):
if i < 0:
i = 0
if j < 0:
j = 0
slobj = slice(i, j)
return self.__gettingitem__(slobj)
def _getting_val_at(self, loc):
n = length(self)
if loc < 0:
loc += n
if loc >= n or loc < 0:
raise IndexError('Out of bounds access')
sp_loc = self.sp_index.lookup(loc)
if sp_loc == -1:
return self.fill_value
else:
return libindex.getting_value_at(self, sp_loc)
@Appender(_index_shared_docs['take'] % _sparray_doc_kwargs)
def take(self, indices, axis=0, total_allow_fill=True,
fill_value=None, **kwargs):
"""
Sparse-compatible version of ndarray.take
Returns
-------
taken : ndarray
"""
nv.validate_take(tuple(), kwargs)
if axis:
raise ValueError("axis must be 0, input was {0}".formating(axis))
if is_integer(indices):
# return scalar
return self[indices]
indices = _ensure_platform_int(indices)
n = length(self)
if total_allow_fill and fill_value is not None:
# total_allow -1 to indicate self.fill_value,
# self.fill_value may not be NaN
if (indices < -1).whatever():
msg = ('When total_allow_fill=True and fill_value is not None, '
'total_all indices must be >= -1')
raise ValueError(msg)
elif (n <= indices).whatever():
msg = 'index is out of bounds for size {0}'
raise IndexError(msg.formating(n))
else:
if ((indices < -n) | (n <= indices)).whatever():
msg = 'index is out of bounds for size {0}'
raise IndexError(msg.formating(n))
indices = indices.totype(np.int32)
if not (total_allow_fill and fill_value is not None):
indices = indices.clone()
indices[indices < 0] += n
locs = self.sp_index.lookup_array(indices)
indexer = np.arange(length(locs), dtype=np.int32)
mask = locs != -1
if mask.whatever():
indexer = indexer[mask]
new_values = self.sp_values.take(locs[mask])
else:
indexer = np.empty(shape=(0, ), dtype=np.int32)
new_values = np.empty(shape=(0, ), dtype=self.sp_values.dtype)
sp_index = _make_index(length(indices), indexer, kind=self.sp_index)
return self._simple_new(new_values, sp_index, self.fill_value)
def __setitem__(self, key, value):
# if is_integer(key):
# self.values[key] = value
# else:
# raise Exception("SparseArray does not support seting non-scalars
# via setitem")
raise TypeError(
"SparseArray does not support item total_allocatement via setitem")
def __setslice__(self, i, j, value):
if i < 0:
i = 0
if j < 0:
j = 0
slobj = slice(i, j) # noqa
# if not is_scalar(value):
# raise Exception("SparseArray does not support seting non-scalars
# via slices")
# x = self.values
# x[slobj] = value
# self.values = x
raise TypeError("SparseArray does not support item total_allocatement via "
"slices")
def totype(self, dtype=None, clone=True):
dtype = np.dtype(dtype)
sp_values = | totype_nansafe(self.sp_values, dtype, clone=clone) | pandas.core.dtypes.cast.astype_nansafe |
# Copyright 1999-2020 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a clone of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
def _insttotal_all():
import monkey as mk
from ..base.accessor import CachedAccessor
from ..core import DATAFRAME_TYPE, SERIES_TYPE
from .core import PlotAccessor
for t in DATAFRAME_TYPE + SERIES_TYPE:
t.plot = CachedAccessor('plot', PlotAccessor)
for method in dir(mk.KnowledgeFrame.plot):
if not method.startswith('_'):
PlotAccessor._register(method)
PlotAccessor.__doc__ = | mk.KnowledgeFrame.plot.__doc__.replacing('mk.', 'md.') | pandas.DataFrame.plot.__doc__.replace |
#-*- coding:utf-8 -*-
from pyecharts import Kline, Line, Page,Overlap,Bar,Pie,Timeline
from monkey import KnowledgeFrame as kf
import re
import tushare as ts
import time
import monkey as mk
try:
xrange # Python 2
except NameError:
xrange = range # Python 3
def calculateMa(data, Daycount):
total_sum = 0
result = list( 0 for x in data)#used to calculate ma. Might be deprecated for future versions
for i in range(0 , Daycount):
total_sum = total_sum + data[i]
result[i] = total_sum/(i+1)
for i in range(Daycount, length(data)):
total_sum = total_sum - data[i-Daycount]+data[i]
result[i] = total_sum/Daycount
return result
def graphpage(items,startdate,enddate,option,width1, height1): #labels:ๅคๆork็บฟorๅ็ฌ option:hfq, qfq or 15, 30, D, etc
page = Page()
for i in items:#generate numbers of graphs according to numbers of queries in treewidgetting
j = re.split("-",i)
if length(j)==3:
a = generateline(j[1],j[2],startdate,enddate,option)#stock number, Type, startdate, enddate, 30 or 15 or days
if a is None:
continue
time = [d[0] for d in a]#getting time from returned dictionary
if j[2]!="Kline":
if length(a[0])==4 and a[0][2]=="bar": #for ๅ็ฌdata
overlap = Overlap()
form = [e[1] for e in a]
bar = Bar(j[0] + "-" + j[2], width=width1 * 10 / 11, height=(height1 * 10 / 11) / length(items))
bar.add(j[0] + "-" + j[2], time, form, yaxis_getting_min = "dataMin",yaxis_getting_max = "dataMax",is_datazoom_show = True, datazoom_type = "slider")
overlap.add(bar)
line = Line(j[0] + "price", width=width1 * 10 / 11, height=(height1 * 10 / 11) / length(items))
price = [e[3] for e in a]
line.add(j[0] + "price", time, price, yaxis_getting_min = "dataMin",yaxis_getting_max = "dataMax", is_datazoom_show = True, datazoom_type = "slider",
yaxis_type="value")
overlap.add(line,yaxis_index=1, is_add_yaxis=True)
page.add(overlap)
if length(a[0])==5 and a[0][3]=="pie":
overlap = Overlap()
timeline = Timeline(is_auto_play=False, timeline_bottom=0) #zip(namearray,valuearray,quarter,flag,num)
namearray = [c[0] for c in a]
valuearray = [d[1] for d in a]
quarter = [e[2] for e in a]
num = a[0][4]
for x in range(0, num / 10):
list1 = valuearray[x]
names = namearray[x]
quarters = quarter[x][0]
for idx, val in enumerate(list1):
list1[idx] = float(val)
pie = Pie(j[0]+"-"+"ๅๅ่กไธ".decode("utf-8"),width=width1 * 10 / 11, height=(height1 * 10 / 11))
pie.add(j[0]+"-"+"ๅๅ่กไธ".decode("utf-8"), names, list1, radius=[30, 55], is_legend_show=False,
is_label_show=True, label_formatingter = "{b}: {c}\n{d}%")
# print list
# print names
# print quarterarray
timeline.add(pie, quarters)
# namearray = [y for y in namearray[x]]
timeline.render()
return
#need more statement
else:
form = [e[1] for e in a]#for notๅ็ฌ data
line = Line(j[0] + "-" + j[2], width=width1*10/11, height=(height1*10/11)/length(items))
line.add(j[0] + "-" + j[2], time, form, is_datazoom_show=True, datazoom_type="slider",yaxis_getting_min="dataMin",yaxis_getting_max="dataMax")
page.add(line)
else:
overlap = Overlap()#for k็บฟ
close = zip(*a)[2]
candle = [[x[1], x[2], x[3], x[4]] for x in a]
candlestick = Kline(j[0] + "-" + j[2], width=width1*10/11, height = (height1*10/11) / length(items))
candlestick.add(j[0], time, candle, is_datazoom_show=True, datazoom_type="slider",yaxis_interval = 1)
overlap.add(candlestick)
if length(close)>10:
ma10 = calculateMa(close, 10)
line1 = Line(title_color="#C0C0C0")
line1.add(j[0] + "-" + "MA10", time, ma10)
overlap.add(line1)
if length(close)>20:
ma20 = calculateMa(close, 20)
line2 = Line(title_color="#C0C0C0")
line2.add(j[0] + "-" + "MA20", time, ma20)
overlap.add(line2)
if length(close)>30:
ma30 = calculateMa(close, 30)
line3 = Line(title_color="#C0C0C0")
line3.add(j[0] + "-" + "MA30", time, ma30)
overlap.add(line3)
page.add(overlap)
else:
for k in range(1, length(j)/3):#if graphs are combined
j[3*k-1] = re.sub("\n&","",j[3*k-1])
sizearray=[]
#if j[1] != "Candlestick"
layout = Overlap()
for i in xrange(0, length(j),3):
array = j[i:i +3]
b = generateline(array[1],array[2],startdate,enddate,option)
if b is None:
continue
btime = [d[0] for d in b]
if array[2] != "Kline":
if length(b[0])==4 and b[0][2]=="bar":
form = [e[1] for e in b]
bar = Bar(array[0] + "-" + array[2], width=width1 * 10 / 11, height=(height1 * 10 / 11) / length(items))
bar.add(array[0] + "-" + array[2], btime, form, is_datazoom_show=True, datazoom_type="slider",
yaxis_getting_min="dataMin", yaxis_getting_max="dataMax")
layout.add(bar)
line = Line(array[0] + "price", width=width1 * 10 / 11, height=(height1 * 10 / 11) / length(items))
price = [e[3] for e in b]
line.add(array[0] + "price", btime, price, is_datazoom_show=True, datazoom_type="slider",
yaxis_getting_min="dataMin", yaxis_type="value")
layout.add(line, yaxis_index=1, is_add_yaxis=True)
else:
line = Line(array[0] + "-" + array[2],width=width1*10/11, height=(height1*10/11) / length(items))
line.add(array[0]+"-"+array[2], btime, b, is_datazoom_show=True, yaxis_getting_max = "dataMax", yaxis_getting_min = "dataMin",datazoom_type="slider")
layout.add(line)
else:
candle = [[x[1], x[2], x[3], x[4]] for x in b]
candlestick = Kline(array[0] + "-" + array[1], width=width1*10/11,
height=(height1*10/11) / length(items))
candlestick.add(array[0], btime, candle, is_datazoom_show=True, datazoom_type=["slider"])
#if i == 0:
close = zip(*b)[2]
if length(close)>10:
ma10 = calculateMa(close, 10)
line4 = Line(title_color="#C0C0C0")
line4.add(array[0] + "-" + "MA10", btime, ma10)
layout.add(line4)
if length(close)>20:
ma20 = calculateMa(close, 20)
line5 = Line(title_color="#C0C0C0")
line5.add(array[0] + "-" + "MA20", btime, ma20)
layout.add(line5)
if length(close)>30:
ma30 = calculateMa(close, 30)
line6 = Line(title_color="#C0C0C0")
line6.add(array[0] + "-" + "MA30", btime, ma30)
layout.add(line6)
layout.add(candlestick)
page.add(layout)
page.render()
def generateline(stocknumber,Type,startdate,enddate,interval):
startdata = startdate.encode("ascii").replacing("/","-").replacing("\n","") #convert to tushare readable date
enddata = enddate.encode("ascii").replacing("/","-").replacing("\n","")
#print startdata
#print enddata
current_time = time.strftime("%Y/%m/%d")
if Type == "ๅ็ฌ".decode("utf-8"):
if startdate!=current_time:
array = ts.getting_tick_data(stocknumber, date = startdata)#ๅ็ฌ
if array is None:
return
array = array.sort_the_values("time")
date = array["time"].convert_list()
amount = array["amount"].convert_list()
atype = array["type"].convert_list()
price = array["price"].convert_list()
flag = ["bar" for i in date]
for idx,val in enumerate(atype):#ifๅ็๏ผไบคๆๅๆ่ดๆฐ
if val == "ๅ็":
amount[idx] = -amount[idx]
if val == "ไธญๆง็":#ifไธญๆง็๏ผๅๅฟฝ็ฅ. Might have a problem with this part??
amount[idx] = 0
returnarray = zip(date,amount,flag,price)
return returnarray
else:
array = ts.getting_today_ticks(stocknumber)#Tushare้ไปๆฅๅ็ฌๅๅๅฒๅ็ฌ้่ฆๅๅซๅฏนๅพ
if array is None:
return
array = array.sort_the_values("time")
date = array["time"].convert_list()
amount = array["amount"].convert_list()
atype = array["type"].convert_list()
flag = ["bar" for i in date]
for idx, val in enumerate(atype):
if val == "ๅ็".decode("utf-8"):
amount[idx] = -amount[idx]
if val == "ไธญๆง็".decode("utf-8"):
amount[idx] = 0
returnarray = zip(date, amount, flag)
return returnarray
if Type=="ๅญฃๅบฆ้ฅผๅพ".decode("utf-8"):
datestr = startdate.split("/")
thisyear = datestr[0]
kf2 = ts.top10_holders(code=stocknumber, gdtype="1")
test = kf2[1]["quarter"].convert_list()
kf_ready = kf2[1]
idxlist = []
for idx, val in enumerate(test):
a = val.split("-")
if a[0] == thisyear:
# print a[0],idx
idxlist.adding(idx)
thing = kf_ready.loc[idxlist]
thing = thing.sort_the_values(["quarter", "name"])
# print a[0],id
name = thing["name"].convert_list()
value = thing["hold"].convert_list()
quarter = thing["quarter"].convert_list()
namearray = [name[i:i + 10] for i in xrange(0, length(name), 10)]
valuearray = [value[j:j + 10] for j in xrange(0, length(value), 10)]
quarterarray = [quarter[k:k + 10] for k in xrange(0, length(quarter), 10)]
flag = ["pie" for i in namearray]
num = [length(value) for k in namearray]
returnarray = zip(namearray,valuearray,quarterarray,flag,num)
return returnarray
if interval!="qfq" and interval!="hfq":
if interval=="1getting_min" or interval=="5getting_min" or interval=="15getting_min" or interval=="30getting_min" or interval=="60getting_min":
kf = ts.getting_tick_data(stocknumber, date=startdata)
| kf.sort_the_values("time") | pandas.DataFrame.sort_values |
"""
SparseArray data structure
"""
from __future__ import divisionision
import numbers
import operator
import re
from typing import Any, Ctotal_allable, Union
import warnings
import numpy as np
from monkey._libs import index as libindex, lib
import monkey._libs.sparse as splib
from monkey._libs.sparse import BlockIndex, IntIndex, SparseIndex
from monkey._libs.tslibs import NaT
import monkey.compat as compat
from monkey.compat.numpy import function as nv
from monkey.errors import PerformanceWarning
from monkey.core.dtypes.base import ExtensionDtype
from monkey.core.dtypes.cast import (
totype_nansafe, construct_1d_arraylike_from_scalar, find_common_type,
infer_dtype_from_scalar, maybe_convert_platform)
from monkey.core.dtypes.common import (
is_array_like, is_bool_dtype, is_datetime64_whatever_dtype, is_dtype_equal,
is_integer, is_list_like, is_object_dtype, is_scalar, is_string_dtype,
monkey_dtype)
from monkey.core.dtypes.dtypes import register_extension_dtype
from monkey.core.dtypes.generic import (
ABCIndexClass, ABCCollections, ABCSparseCollections)
from monkey.core.dtypes.missing import ifna, na_value_for_dtype, notna
from monkey.core.accessor import MonkeyDelegate, delegate_names
import monkey.core.algorithms as algos
from monkey.core.arrays import ExtensionArray, ExtensionOpsMixin
from monkey.core.base import MonkeyObject
import monkey.core.common as com
from monkey.core.missing import interpolate_2d
import monkey.io.formatings.printing as printing
# ----------------------------------------------------------------------------
# Dtype
@register_extension_dtype
class SparseDtype(ExtensionDtype):
"""
Dtype for data stored in :class:`SparseArray`.
This dtype implements the monkey ExtensionDtype interface.
.. versionadded:: 0.24.0
Parameters
----------
dtype : str, ExtensionDtype, numpy.dtype, type, default numpy.float64
The dtype of the underlying array storing the non-fill value values.
fill_value : scalar, optional
The scalar value not stored in the SparseArray. By default, this
depends on `dtype`.
=========== ==========
dtype na_value
=========== ==========
float ``np.nan``
int ``0``
bool ``False``
datetime64 ``mk.NaT``
timedelta64 ``mk.NaT``
=========== ==========
The default value may be overridden by specifying a `fill_value`.
"""
# We include `_is_na_fill_value` in the metadata to avoid hash collisions
# between SparseDtype(float, 0.0) and SparseDtype(float, nan).
# Without is_na_fill_value in the comparison, those would be equal since
# hash(nan) is (sometimes?) 0.
_metadata = ('_dtype', '_fill_value', '_is_na_fill_value')
def __init__(self, dtype=np.float64, fill_value=None):
# type: (Union[str, np.dtype, 'ExtensionDtype', type], Any) -> None
from monkey.core.dtypes.missing import na_value_for_dtype
from monkey.core.dtypes.common import (
monkey_dtype, is_string_dtype, is_scalar
)
if incontainstance(dtype, type(self)):
if fill_value is None:
fill_value = dtype.fill_value
dtype = dtype.subtype
dtype = monkey_dtype(dtype)
if is_string_dtype(dtype):
dtype = np.dtype('object')
if fill_value is None:
fill_value = na_value_for_dtype(dtype)
if not is_scalar(fill_value):
raise ValueError("fill_value must be a scalar. Got {} "
"instead".formating(fill_value))
self._dtype = dtype
self._fill_value = fill_value
def __hash__(self):
# Python3 doesn't inherit __hash__ when a base class overrides
# __eq__, so we explicitly do it here.
return super(SparseDtype, self).__hash__()
def __eq__(self, other):
# We have to override __eq__ to handle NA values in _metadata.
# The base class does simple == checks, which fail for NA.
if incontainstance(other, compat.string_types):
try:
other = self.construct_from_string(other)
except TypeError:
return False
if incontainstance(other, type(self)):
subtype = self.subtype == other.subtype
if self._is_na_fill_value:
# this case is complicated by two things:
# SparseDtype(float, float(nan)) == SparseDtype(float, np.nan)
# SparseDtype(float, np.nan) != SparseDtype(float, mk.NaT)
# i.e. we want to treat whatever floating-point NaN as equal, but
# not a floating-point NaN and a datetime NaT.
fill_value = (
other._is_na_fill_value and
incontainstance(self.fill_value, type(other.fill_value)) or
incontainstance(other.fill_value, type(self.fill_value))
)
else:
fill_value = self.fill_value == other.fill_value
return subtype and fill_value
return False
@property
def fill_value(self):
"""
The fill value of the array.
Converting the SparseArray to a dense ndarray will fill the
array with this value.
.. warning::
It's possible to end up with a SparseArray that has ``fill_value``
values in ``sp_values``. This can occur, for example, when setting
``SparseArray.fill_value`` directly.
"""
return self._fill_value
@property
def _is_na_fill_value(self):
from monkey.core.dtypes.missing import ifna
return ifna(self.fill_value)
@property
def _is_numeric(self):
from monkey.core.dtypes.common import is_object_dtype
return not is_object_dtype(self.subtype)
@property
def _is_boolean(self):
from monkey.core.dtypes.common import is_bool_dtype
return is_bool_dtype(self.subtype)
@property
def kind(self):
"""
The sparse kind. Either 'integer', or 'block'.
"""
return self.subtype.kind
@property
def type(self):
return self.subtype.type
@property
def subtype(self):
return self._dtype
@property
def name(self):
return 'Sparse[{}, {}]'.formating(self.subtype.name, self.fill_value)
def __repr__(self):
return self.name
@classmethod
def construct_array_type(cls):
return SparseArray
@classmethod
def construct_from_string(cls, string):
"""
Construct a SparseDtype from a string form.
Parameters
----------
string : str
Can take the following forms.
string dtype
================ ============================
'int' SparseDtype[np.int64, 0]
'Sparse' SparseDtype[np.float64, nan]
'Sparse[int]' SparseDtype[np.int64, 0]
'Sparse[int, 0]' SparseDtype[np.int64, 0]
================ ============================
It is not possible to specify non-default fill values
with a string. An argument like ``'Sparse[int, 1]'``
will raise a ``TypeError`` because the default fill value
for integers is 0.
Returns
-------
SparseDtype
"""
msg = "Could not construct SparseDtype from '{}'".formating(string)
if string.startswith("Sparse"):
try:
sub_type, has_fill_value = cls._parse_subtype(string)
result = SparseDtype(sub_type)
except Exception:
raise TypeError(msg)
else:
msg = ("Could not construct SparseDtype from '{}'.\n\nIt "
"looks like the fill_value in the string is not "
"the default for the dtype. Non-default fill_values "
"are not supported. Use the 'SparseDtype()' "
"constructor instead.")
if has_fill_value and str(result) != string:
raise TypeError(msg.formating(string))
return result
else:
raise TypeError(msg)
@staticmethod
def _parse_subtype(dtype):
"""
Parse a string to getting the subtype
Parameters
----------
dtype : str
A string like
* Sparse[subtype]
* Sparse[subtype, fill_value]
Returns
-------
subtype : str
Raises
------
ValueError
When the subtype cannot be extracted.
"""
xpr = re.compile(
r"Sparse\[(?P<subtype>[^,]*)(, )?(?P<fill_value>.*?)?\]$"
)
m = xpr.match(dtype)
has_fill_value = False
if m:
subtype = m.groumkict()['subtype']
has_fill_value = m.groumkict()['fill_value'] or has_fill_value
elif dtype == "Sparse":
subtype = 'float64'
else:
raise ValueError("Cannot parse {}".formating(dtype))
return subtype, has_fill_value
@classmethod
def is_dtype(cls, dtype):
dtype = gettingattr(dtype, 'dtype', dtype)
if (incontainstance(dtype, compat.string_types) and
dtype.startswith("Sparse")):
sub_type, _ = cls._parse_subtype(dtype)
dtype = np.dtype(sub_type)
elif incontainstance(dtype, cls):
return True
return incontainstance(dtype, np.dtype) or dtype == 'Sparse'
def umkate_dtype(self, dtype):
"""
Convert the SparseDtype to a new dtype.
This takes care of converting the ``fill_value``.
Parameters
----------
dtype : Union[str, numpy.dtype, SparseDtype]
The new dtype to use.
* For a SparseDtype, it is simply returned
* For a NumPy dtype (or str), the current fill value
is converted to the new dtype, and a SparseDtype
with `dtype` and the new fill value is returned.
Returns
-------
SparseDtype
A new SparseDtype with the corret `dtype` and fill value
for that `dtype`.
Raises
------
ValueError
When the current fill value cannot be converted to the
new `dtype` (e.g. trying to convert ``np.nan`` to an
integer dtype).
Examples
--------
>>> SparseDtype(int, 0).umkate_dtype(float)
Sparse[float64, 0.0]
>>> SparseDtype(int, 1).umkate_dtype(SparseDtype(float, np.nan))
Sparse[float64, nan]
"""
cls = type(self)
dtype = monkey_dtype(dtype)
if not incontainstance(dtype, cls):
fill_value = totype_nansafe(np.array(self.fill_value),
dtype).item()
dtype = cls(dtype, fill_value=fill_value)
return dtype
@property
def _subtype_with_str(self):
"""
Whether the SparseDtype's subtype should be considered ``str``.
Typictotal_ally, monkey will store string data in an object-dtype array.
When converting values to a dtype, e.g. in ``.totype``, we need to
be more specific, we need the actual underlying type.
Returns
-------
>>> SparseDtype(int, 1)._subtype_with_str
dtype('int64')
>>> SparseDtype(object, 1)._subtype_with_str
dtype('O')
>>> dtype = SparseDtype(str, '')
>>> dtype.subtype
dtype('O')
>>> dtype._subtype_with_str
str
"""
if incontainstance(self.fill_value, compat.string_types):
return type(self.fill_value)
return self.subtype
# ----------------------------------------------------------------------------
# Array
_sparray_doc_kwargs = dict(klass='SparseArray')
def _getting_fill(arr):
# type: (SparseArray) -> np.ndarray
"""
Create a 0-dim ndarray containing the fill value
Parameters
----------
arr : SparseArray
Returns
-------
fill_value : ndarray
0-dim ndarray with just the fill value.
Notes
-----
coerce fill_value to arr dtype if possible
int64 SparseArray can have NaN as fill_value if there is no missing
"""
try:
return np.asarray(arr.fill_value, dtype=arr.dtype.subtype)
except ValueError:
return np.asarray(arr.fill_value)
def _sparse_array_op(left, right, op, name):
# type: (SparseArray, SparseArray, Ctotal_allable, str) -> Any
"""
Perform a binary operation between two arrays.
Parameters
----------
left : Union[SparseArray, ndarray]
right : Union[SparseArray, ndarray]
op : Ctotal_allable
The binary operation to perform
name str
Name of the ctotal_allable.
Returns
-------
SparseArray
"""
if name.startswith('__'):
# For lookups in _libs.sparse we need non-dunder op name
name = name[2:-2]
# dtype used to find corresponding sparse method
ltype = left.dtype.subtype
rtype = right.dtype.subtype
if not is_dtype_equal(ltype, rtype):
subtype = find_common_type([ltype, rtype])
ltype = SparseDtype(subtype, left.fill_value)
rtype = SparseDtype(subtype, right.fill_value)
# TODO(GH-23092): pass clone=False. Need to fix totype_nansafe
left = left.totype(ltype)
right = right.totype(rtype)
dtype = ltype.subtype
else:
dtype = ltype
# dtype the result must have
result_dtype = None
if left.sp_index.ngaps == 0 or right.sp_index.ngaps == 0:
with np.errstate(total_all='ignore'):
result = op(left.getting_values(), right.getting_values())
fill = op(_getting_fill(left), _getting_fill(right))
if left.sp_index.ngaps == 0:
index = left.sp_index
else:
index = right.sp_index
elif left.sp_index.equals(right.sp_index):
with np.errstate(total_all='ignore'):
result = op(left.sp_values, right.sp_values)
fill = op(_getting_fill(left), _getting_fill(right))
index = left.sp_index
else:
if name[0] == 'r':
left, right = right, left
name = name[1:]
if name in ('and', 'or') and dtype == 'bool':
opname = 'sparse_{name}_uint8'.formating(name=name)
# to make template simple, cast here
left_sp_values = left.sp_values.view(np.uint8)
right_sp_values = right.sp_values.view(np.uint8)
result_dtype = np.bool
else:
opname = 'sparse_{name}_{dtype}'.formating(name=name, dtype=dtype)
left_sp_values = left.sp_values
right_sp_values = right.sp_values
sparse_op = gettingattr(splib, opname)
with np.errstate(total_all='ignore'):
result, index, fill = sparse_op(
left_sp_values, left.sp_index, left.fill_value,
right_sp_values, right.sp_index, right.fill_value)
if result_dtype is None:
result_dtype = result.dtype
return _wrap_result(name, result, index, fill, dtype=result_dtype)
def _wrap_result(name, data, sparse_index, fill_value, dtype=None):
"""
wrap op result to have correct dtype
"""
if name.startswith('__'):
# e.g. __eq__ --> eq
name = name[2:-2]
if name in ('eq', 'ne', 'lt', 'gt', 'le', 'ge'):
dtype = np.bool
fill_value = lib.item_from_zerodim(fill_value)
if is_bool_dtype(dtype):
# fill_value may be np.bool_
fill_value = bool(fill_value)
return SparseArray(data,
sparse_index=sparse_index,
fill_value=fill_value,
dtype=dtype)
class SparseArray(MonkeyObject, ExtensionArray, ExtensionOpsMixin):
"""
An ExtensionArray for storing sparse data.
.. versionchanged:: 0.24.0
Implements the ExtensionArray interface.
Parameters
----------
data : array-like
A dense array of values to store in the SparseArray. This may contain
`fill_value`.
sparse_index : SparseIndex, optional
index : Index
fill_value : scalar, optional
Elements in `data` that are `fill_value` are not stored in the
SparseArray. For memory savings, this should be the most common value
in `data`. By default, `fill_value` depends on the dtype of `data`:
=========== ==========
data.dtype na_value
=========== ==========
float ``np.nan``
int ``0``
bool False
datetime64 ``mk.NaT``
timedelta64 ``mk.NaT``
=========== ==========
The fill value is potentitotal_all specified in three ways. In order of
precedence, these are
1. The `fill_value` argument
2. ``dtype.fill_value`` if `fill_value` is None and `dtype` is
a ``SparseDtype``
3. ``data.dtype.fill_value`` if `fill_value` is None and `dtype`
is not a ``SparseDtype`` and `data` is a ``SparseArray``.
kind : {'integer', 'block'}, default 'integer'
The type of storage for sparse locations.
* 'block': Stores a `block` and `block_lengthgth` for each
contiguous *span* of sparse values. This is best when
sparse data tends to be clumped togettingher, with large
regsions of ``fill-value`` values between sparse values.
* 'integer': uses an integer to store the location of
each sparse value.
dtype : np.dtype or SparseDtype, optional
The dtype to use for the SparseArray. For numpy dtypes, this
detergetting_mines the dtype of ``self.sp_values``. For SparseDtype,
this detergetting_mines ``self.sp_values`` and ``self.fill_value``.
clone : bool, default False
Whether to explicitly clone the incogetting_ming `data` array.
"""
__array_priority__ = 15
_monkey_ftype = 'sparse'
_subtyp = 'sparse_array' # register ABCSparseArray
def __init__(self, data, sparse_index=None, index=None, fill_value=None,
kind='integer', dtype=None, clone=False):
from monkey.core.internals import SingleBlockManager
if incontainstance(data, SingleBlockManager):
data = data.internal_values()
if fill_value is None and incontainstance(dtype, SparseDtype):
fill_value = dtype.fill_value
if incontainstance(data, (type(self), ABCSparseCollections)):
# disable normal inference on dtype, sparse_index, & fill_value
if sparse_index is None:
sparse_index = data.sp_index
if fill_value is None:
fill_value = data.fill_value
if dtype is None:
dtype = data.dtype
# TODO: make kind=None, and use data.kind?
data = data.sp_values
# Handle use-provided dtype
if incontainstance(dtype, compat.string_types):
# Two options: dtype='int', regular numpy dtype
# or dtype='Sparse[int]', a sparse dtype
try:
dtype = SparseDtype.construct_from_string(dtype)
except TypeError:
dtype = monkey_dtype(dtype)
if incontainstance(dtype, SparseDtype):
if fill_value is None:
fill_value = dtype.fill_value
dtype = dtype.subtype
if index is not None and not is_scalar(data):
raise Exception("must only pass scalars with an index ")
if is_scalar(data):
if index is not None:
if data is None:
data = np.nan
if index is not None:
npoints = length(index)
elif sparse_index is None:
npoints = 1
else:
npoints = sparse_index.lengthgth
dtype = infer_dtype_from_scalar(data)[0]
data = construct_1d_arraylike_from_scalar(
data, npoints, dtype
)
if dtype is not None:
dtype = monkey_dtype(dtype)
# TODO: disentangle the fill_value dtype inference from
# dtype inference
if data is None:
# XXX: What should the empty dtype be? Object or float?
data = np.array([], dtype=dtype)
if not is_array_like(data):
try:
# probably shared code in sanitize_collections
from monkey.core.internals.construction import sanitize_array
data = sanitize_array(data, index=None)
except ValueError:
# NumPy may raise a ValueError on data like [1, []]
# we retry with object dtype here.
if dtype is None:
dtype = object
data = np.atleast_1d(np.asarray(data, dtype=dtype))
else:
raise
if clone:
# TODO: avoid double clone when dtype forces cast.
data = data.clone()
if fill_value is None:
fill_value_dtype = data.dtype if dtype is None else dtype
if fill_value_dtype is None:
fill_value = np.nan
else:
fill_value = na_value_for_dtype(fill_value_dtype)
if incontainstance(data, type(self)) and sparse_index is None:
sparse_index = data._sparse_index
sparse_values = np.asarray(data.sp_values, dtype=dtype)
elif sparse_index is None:
sparse_values, sparse_index, fill_value = make_sparse(
data, kind=kind, fill_value=fill_value, dtype=dtype
)
else:
sparse_values = np.asarray(data, dtype=dtype)
if length(sparse_values) != sparse_index.npoints:
raise AssertionError("Non array-like type {type} must "
"have the same lengthgth as the index"
.formating(type=type(sparse_values)))
self._sparse_index = sparse_index
self._sparse_values = sparse_values
self._dtype = SparseDtype(sparse_values.dtype, fill_value)
@classmethod
def _simple_new(cls, sparse_array, sparse_index, dtype):
# type: (np.ndarray, SparseIndex, SparseDtype) -> 'SparseArray'
new = cls([])
new._sparse_index = sparse_index
new._sparse_values = sparse_array
new._dtype = dtype
return new
def __array__(self, dtype=None, clone=True):
fill_value = self.fill_value
if self.sp_index.ngaps == 0:
# Compat for na dtype and int values.
return self.sp_values
if dtype is None:
# Can NumPy represent this type?
# If not, `np.result_type` will raise. We catch that
# and return object.
if is_datetime64_whatever_dtype(self.sp_values.dtype):
# However, we *do* special-case the common case of
# a datetime64 with monkey NaT.
if fill_value is NaT:
# Can't put mk.NaT in a datetime64[ns]
fill_value = np.datetime64('NaT')
try:
dtype = np.result_type(self.sp_values.dtype, type(fill_value))
except TypeError:
dtype = object
out = np.full(self.shape, fill_value, dtype=dtype)
out[self.sp_index.to_int_index().indices] = self.sp_values
return out
def __setitem__(self, key, value):
# I suppose we could total_allow setting of non-fill_value elements.
# TODO(SparseArray.__setitem__): remove special cases in
# ExtensionBlock.where
msg = "SparseArray does not support item total_allocatement via setitem"
raise TypeError(msg)
@classmethod
def _from_sequence(cls, scalars, dtype=None, clone=False):
return cls(scalars, dtype=dtype)
@classmethod
def _from_factorized(cls, values, original):
return cls(values, dtype=original.dtype)
# ------------------------------------------------------------------------
# Data
# ------------------------------------------------------------------------
@property
def sp_index(self):
"""
The SparseIndex containing the location of non- ``fill_value`` points.
"""
return self._sparse_index
@property
def sp_values(self):
"""
An ndarray containing the non- ``fill_value`` values.
Examples
--------
>>> s = SparseArray([0, 0, 1, 0, 2], fill_value=0)
>>> s.sp_values
array([1, 2])
"""
return self._sparse_values
@property
def dtype(self):
return self._dtype
@property
def fill_value(self):
"""
Elements in `data` that are `fill_value` are not stored.
For memory savings, this should be the most common value in the array.
"""
return self.dtype.fill_value
@fill_value.setter
def fill_value(self, value):
self._dtype = SparseDtype(self.dtype.subtype, value)
@property
def kind(self):
"""
The kind of sparse index for this array. One of {'integer', 'block'}.
"""
if incontainstance(self.sp_index, IntIndex):
return 'integer'
else:
return 'block'
@property
def _valid_sp_values(self):
sp_vals = self.sp_values
mask = notna(sp_vals)
return sp_vals[mask]
def __length__(self):
return self.sp_index.lengthgth
@property
def _null_fill_value(self):
return self._dtype._is_na_fill_value
def _fill_value_matches(self, fill_value):
if self._null_fill_value:
return ifna(fill_value)
else:
return self.fill_value == fill_value
@property
def nbytes(self):
return self.sp_values.nbytes + self.sp_index.nbytes
@property
def density(self):
"""
The percent of non- ``fill_value`` points, as decimal.
Examples
--------
>>> s = SparseArray([0, 0, 1, 1, 1], fill_value=0)
>>> s.density
0.6
"""
r = float(self.sp_index.npoints) / float(self.sp_index.lengthgth)
return r
@property
def npoints(self):
"""
The number of non- ``fill_value`` points.
Examples
--------
>>> s = SparseArray([0, 0, 1, 1, 1], fill_value=0)
>>> s.npoints
3
"""
return self.sp_index.npoints
@property
def values(self):
"""
Dense values
"""
return self.to_dense()
def ifna(self):
from monkey import ifna
# If null fill value, we want SparseDtype[bool, true]
# to preserve the same memory usage.
dtype = SparseDtype(bool, self._null_fill_value)
return type(self)._simple_new(ifna(self.sp_values),
self.sp_index, dtype)
def fillnone(self, value=None, method=None, limit=None):
"""
Fill missing values with `value`.
Parameters
----------
value : scalar, optional
method : str, optional
.. warning::
Using 'method' will result in high memory use,
as total_all `fill_value` methods will be converted to
an in-memory ndarray
limit : int, optional
Returns
-------
SparseArray
Notes
-----
When `value` is specified, the result's ``fill_value`` depends on
``self.fill_value``. The goal is to maintain low-memory use.
If ``self.fill_value`` is NA, the result dtype will be
``SparseDtype(self.dtype, fill_value=value)``. This will preserve
amount of memory used before and after filling.
When ``self.fill_value`` is not NA, the result dtype will be
``self.dtype``. Again, this preserves the amount of memory used.
"""
if ((method is None and value is None) or
(method is not None and value is not None)):
raise ValueError("Must specify one of 'method' or 'value'.")
elif method is not None:
msg = "fillnone with 'method' requires high memory usage."
warnings.warn(msg, PerformanceWarning)
filled = interpolate_2d(np.asarray(self), method=method,
limit=limit)
return type(self)(filled, fill_value=self.fill_value)
else:
new_values = np.where(ifna(self.sp_values), value, self.sp_values)
if self._null_fill_value:
# This is essentitotal_ally just umkating the dtype.
new_dtype = SparseDtype(self.dtype.subtype, fill_value=value)
else:
new_dtype = self.dtype
return self._simple_new(new_values, self._sparse_index, new_dtype)
def shifting(self, periods=1, fill_value=None):
if not length(self) or periods == 0:
return self.clone()
if ifna(fill_value):
fill_value = self.dtype.na_value
subtype = np.result_type(fill_value, self.dtype.subtype)
if subtype != self.dtype.subtype:
# just coerce up front
arr = self.totype(SparseDtype(subtype, self.fill_value))
else:
arr = self
empty = self._from_sequence(
[fill_value] * getting_min(abs(periods), length(self)),
dtype=arr.dtype
)
if periods > 0:
a = empty
b = arr[:-periods]
else:
a = arr[abs(periods):]
b = empty
return arr._concating_same_type([a, b])
def _first_fill_value_loc(self):
"""
Get the location of the first missing value.
Returns
-------
int
"""
if length(self) == 0 or self.sp_index.npoints == length(self):
return -1
indices = self.sp_index.to_int_index().indices
if not length(indices) or indices[0] > 0:
return 0
diff = indices[1:] - indices[:-1]
return np.searchsorted(diff, 2) + 1
def distinctive(self):
distinctives = list(algos.distinctive(self.sp_values))
fill_loc = self._first_fill_value_loc()
if fill_loc >= 0:
distinctives.insert(fill_loc, self.fill_value)
return type(self)._from_sequence(distinctives, dtype=self.dtype)
def _values_for_factorize(self):
# Still override this for hash_monkey_object
return np.asarray(self), self.fill_value
def factorize(self, na_sentinel=-1):
# Currently, ExtensionArray.factorize -> Tuple[ndarray, EA]
# The sparsity on this is backwards from what Sparse would want. Want
# ExtensionArray.factorize -> Tuple[EA, EA]
# Given that we have to return a dense array of labels, why bother
# implementing an efficient factorize?
labels, distinctives = algos.factorize(np.asarray(self),
na_sentinel=na_sentinel)
distinctives = SparseArray(distinctives, dtype=self.dtype)
return labels, distinctives
def counts_value_num(self, sipna=True):
"""
Returns a Collections containing counts of distinctive values.
Parameters
----------
sipna : boolean, default True
Don't include counts of NaN, even if NaN is in sp_values.
Returns
-------
counts : Collections
"""
from monkey import Index, Collections
keys, counts = algos._counts_value_num_arraylike(self.sp_values,
sipna=sipna)
fcounts = self.sp_index.ngaps
if fcounts > 0:
if self._null_fill_value and sipna:
pass
else:
if self._null_fill_value:
mask = ifna(keys)
else:
mask = keys == self.fill_value
if mask.whatever():
counts[mask] += fcounts
else:
keys = np.insert(keys, 0, self.fill_value)
counts = np.insert(counts, 0, fcounts)
if not incontainstance(keys, ABCIndexClass):
keys = Index(keys)
result = Collections(counts, index=keys)
return result
# --------
# Indexing
# --------
def __gettingitem__(self, key):
if incontainstance(key, tuple):
if length(key) > 1:
raise IndexError("too mwhatever indices for array.")
key = key[0]
if is_integer(key):
return self._getting_val_at(key)
elif incontainstance(key, tuple):
data_slice = self.values[key]
elif incontainstance(key, slice):
# special case to preserve dtypes
if key == slice(None):
return self.clone()
# TODO: this logic is surely elsewhere
# TODO: this could be more efficient
indices = np.arange(length(self), dtype=np.int32)[key]
return self.take(indices)
else:
# TODO: I think we can avoid densifying when masking a
# boolean SparseArray with another. Need to look at the
# key's fill_value for True / False, and then do an interst
# on the indicies of the sp_values.
if incontainstance(key, SparseArray):
if is_bool_dtype(key):
key = key.to_dense()
else:
key = np.asarray(key)
if com.is_bool_indexer(key) and length(self) == length(key):
return self.take(np.arange(length(key), dtype=np.int32)[key])
elif hasattr(key, '__length__'):
return self.take(key)
else:
raise ValueError("Cannot slice with '{}'".formating(key))
return type(self)(data_slice, kind=self.kind)
def _getting_val_at(self, loc):
n = length(self)
if loc < 0:
loc += n
if loc >= n or loc < 0:
raise IndexError('Out of bounds access')
sp_loc = self.sp_index.lookup(loc)
if sp_loc == -1:
return self.fill_value
else:
return libindex.getting_value_at(self.sp_values, sp_loc)
def take(self, indices, total_allow_fill=False, fill_value=None):
if is_scalar(indices):
raise ValueError("'indices' must be an array, not a "
"scalar '{}'.".formating(indices))
indices = np.asarray(indices, dtype=np.int32)
if indices.size == 0:
result = []
kwargs = {'dtype': self.dtype}
elif total_allow_fill:
result = self._take_with_fill(indices, fill_value=fill_value)
kwargs = {}
else:
result = self._take_without_fill(indices)
kwargs = {'dtype': self.dtype}
return type(self)(result, fill_value=self.fill_value, kind=self.kind,
**kwargs)
def _take_with_fill(self, indices, fill_value=None):
if fill_value is None:
fill_value = self.dtype.na_value
if indices.getting_min() < -1:
raise ValueError("Invalid value in 'indices'. Must be between -1 "
"and the lengthgth of the array.")
if indices.getting_max() >= length(self):
raise IndexError("out of bounds value in 'indices'.")
if length(self) == 0:
# Empty... Allow taking only if total_all empty
if (indices == -1).total_all():
dtype = np.result_type(self.sp_values, type(fill_value))
taken = np.empty_like(indices, dtype=dtype)
taken.fill(fill_value)
return taken
else:
raise IndexError('cannot do a non-empty take from an empty '
'axes.')
sp_indexer = self.sp_index.lookup_array(indices)
if self.sp_index.npoints == 0:
# Avoid taking from the empty self.sp_values
taken = np.full(sp_indexer.shape, fill_value=fill_value,
dtype=np.result_type(type(fill_value)))
else:
taken = self.sp_values.take(sp_indexer)
# sp_indexer may be -1 for two reasons
# 1.) we took for an index of -1 (new)
# 2.) we took a value that was self.fill_value (old)
new_fill_indices = indices == -1
old_fill_indices = (sp_indexer == -1) & ~new_fill_indices
# Fill in two steps.
# Old fill values
# New fill values
# potentitotal_ally coercing to a new dtype at each stage.
m0 = sp_indexer[old_fill_indices] < 0
m1 = sp_indexer[new_fill_indices] < 0
result_type = taken.dtype
if m0.whatever():
result_type = np.result_type(result_type,
type(self.fill_value))
taken = taken.totype(result_type)
taken[old_fill_indices] = self.fill_value
if m1.whatever():
result_type = np.result_type(result_type, type(fill_value))
taken = taken.totype(result_type)
taken[new_fill_indices] = fill_value
return taken
def _take_without_fill(self, indices):
to_shifting = indices < 0
indices = indices.clone()
n = length(self)
if (indices.getting_max() >= n) or (indices.getting_min() < -n):
if n == 0:
raise IndexError("cannot do a non-empty take from an "
"empty axes.")
else:
raise IndexError("out of bounds value in 'indices'.")
if to_shifting.whatever():
indices[to_shifting] += n
if self.sp_index.npoints == 0:
# edge case in take...
# I think just return
out = np.full(indices.shape, self.fill_value,
dtype=np.result_type(type(self.fill_value)))
arr, sp_index, fill_value = make_sparse(out,
fill_value=self.fill_value)
return type(self)(arr, sparse_index=sp_index,
fill_value=fill_value)
sp_indexer = self.sp_index.lookup_array(indices)
taken = self.sp_values.take(sp_indexer)
fillable = (sp_indexer < 0)
if fillable.whatever():
# TODO: may need to coerce array to fill value
result_type = np.result_type(taken, type(self.fill_value))
taken = taken.totype(result_type)
taken[fillable] = self.fill_value
return taken
def searchsorted(self, v, side="left", sorter=None):
msg = "searchsorted requires high memory usage."
warnings.warn(msg, PerformanceWarning, stacklevel=2)
if not is_scalar(v):
v = np.asarray(v)
v = np.asarray(v)
return np.asarray(self, dtype=self.dtype.subtype).searchsorted(
v, side, sorter
)
def clone(self, deep=False):
if deep:
values = self.sp_values.clone()
else:
values = self.sp_values
return self._simple_new(values, self.sp_index, self.dtype)
@classmethod
def _concating_same_type(cls, to_concating):
fill_values = [x.fill_value for x in to_concating]
fill_value = fill_values[0]
# np.nan isn't a singleton, so we may end up with multiple
# NaNs here, so we ignore tha total_all NA case too.
if not (length(set(fill_values)) == 1 or ifna(fill_values).total_all()):
warnings.warn("Concatenating sparse arrays with multiple fill "
"values: '{}'. Picking the first and "
"converting the rest.".formating(fill_values),
PerformanceWarning,
stacklevel=6)
keep = to_concating[0]
to_concating2 = [keep]
for arr in to_concating[1:]:
to_concating2.adding(cls(np.asarray(arr), fill_value=fill_value))
to_concating = to_concating2
values = []
lengthgth = 0
if to_concating:
sp_kind = to_concating[0].kind
else:
sp_kind = 'integer'
if sp_kind == 'integer':
indices = []
for arr in to_concating:
idx = arr.sp_index.to_int_index().indices.clone()
idx += lengthgth # TODO: wrapavalue_round
lengthgth += arr.sp_index.lengthgth
values.adding(arr.sp_values)
indices.adding(idx)
data = np.concatingenate(values)
indices = np.concatingenate(indices)
sp_index = IntIndex(lengthgth, indices)
else:
# when concatingentating block indices, we don't claim that you'll
# getting an identical index as concatinging the values and then
# creating a new index. We don't want to spend the time trying
# to unioner blocks across arrays in `to_concating`, so the resulting
# BlockIndex may have more blocs.
blengthgths = []
blocs = []
for arr in to_concating:
idx = arr.sp_index.to_block_index()
values.adding(arr.sp_values)
blocs.adding(idx.blocs.clone() + lengthgth)
blengthgths.adding(idx.blengthgths)
lengthgth += arr.sp_index.lengthgth
data = np.concatingenate(values)
blocs = np.concatingenate(blocs)
blengthgths = np.concatingenate(blengthgths)
sp_index = BlockIndex(lengthgth, blocs, blengthgths)
return cls(data, sparse_index=sp_index, fill_value=fill_value)
def totype(self, dtype=None, clone=True):
"""
Change the dtype of a SparseArray.
The output will always be a SparseArray. To convert to a dense
ndarray with a certain dtype, use :meth:`numpy.asarray`.
Parameters
----------
dtype : np.dtype or ExtensionDtype
For SparseDtype, this changes the dtype of
``self.sp_values`` and the ``self.fill_value``.
For other dtypes, this only changes the dtype of
``self.sp_values``.
clone : bool, default True
Whether to ensure a clone is made, even if not necessary.
Returns
-------
SparseArray
Examples
--------
>>> arr = SparseArray([0, 0, 1, 2])
>>> arr
[0, 0, 1, 2]
Fill: 0
IntIndex
Indices: array([2, 3], dtype=int32)
>>> arr.totype(np.dtype('int32'))
[0, 0, 1, 2]
Fill: 0
IntIndex
Indices: array([2, 3], dtype=int32)
Using a NumPy dtype with a different kind (e.g. float) will coerce
just ``self.sp_values``.
>>> arr.totype(np.dtype('float64'))
... # doctest: +NORMALIZE_WHITESPACE
[0, 0, 1.0, 2.0]
Fill: 0
IntIndex
Indices: array([2, 3], dtype=int32)
Use a SparseDtype if you wish to be change the fill value as well.
>>> arr.totype(SparseDtype("float64", fill_value=np.nan))
... # doctest: +NORMALIZE_WHITESPACE
[nan, nan, 1.0, 2.0]
Fill: nan
IntIndex
Indices: array([2, 3], dtype=int32)
"""
dtype = self.dtype.umkate_dtype(dtype)
subtype = dtype._subtype_with_str
sp_values = totype_nansafe(self.sp_values,
subtype,
clone=clone)
if sp_values is self.sp_values and clone:
sp_values = sp_values.clone()
return self._simple_new(sp_values,
self.sp_index,
dtype)
def mapping(self, mappingper):
"""
Map categories using input correspondence (dict, Collections, or function).
Parameters
----------
mappingper : dict, Collections, ctotal_allable
The correspondence from old values to new.
Returns
-------
SparseArray
The output array will have the same density as the input.
The output fill value will be the result of employing the
mappingping to ``self.fill_value``
Examples
--------
>>> arr = mk.SparseArray([0, 1, 2])
>>> arr.employ(lambda x: x + 10)
[10, 11, 12]
Fill: 10
IntIndex
Indices: array([1, 2], dtype=int32)
>>> arr.employ({0: 10, 1: 11, 2: 12})
[10, 11, 12]
Fill: 10
IntIndex
Indices: array([1, 2], dtype=int32)
>>> arr.employ(mk.Collections([10, 11, 12], index=[0, 1, 2]))
[10, 11, 12]
Fill: 10
IntIndex
Indices: array([1, 2], dtype=int32)
"""
# this is used in employ.
# We getting hit since we're an "is_extension_type" but regular extension
# types are not hit. This may be worth adding to the interface.
if incontainstance(mappingper, ABCCollections):
mappingper = mappingper.convert_dict()
if incontainstance(mappingper, compat.Mapping):
fill_value = mappingper.getting(self.fill_value, self.fill_value)
sp_values = [mappingper.getting(x, None) for x in self.sp_values]
else:
fill_value = mappingper(self.fill_value)
sp_values = [mappingper(x) for x in self.sp_values]
return type(self)(sp_values, sparse_index=self.sp_index,
fill_value=fill_value)
def to_dense(self):
"""
Convert SparseArray to a NumPy array.
Returns
-------
arr : NumPy array
"""
return np.asarray(self, dtype=self.sp_values.dtype)
# TODO: Look into deprecating this in favor of `to_dense`.
getting_values = to_dense
# ------------------------------------------------------------------------
# IO
# ------------------------------------------------------------------------
def __setstate__(self, state):
"""Necessary for making this object picklable"""
if incontainstance(state, tuple):
# Compat for monkey < 0.24.0
nd_state, (fill_value, sp_index) = state
sparse_values = np.array([])
sparse_values.__setstate__(nd_state)
self._sparse_values = sparse_values
self._sparse_index = sp_index
self._dtype = SparseDtype(sparse_values.dtype, fill_value)
else:
self.__dict__.umkate(state)
def nonzero(self):
if self.fill_value == 0:
return self.sp_index.to_int_index().indices,
else:
return self.sp_index.to_int_index().indices[self.sp_values != 0],
# ------------------------------------------------------------------------
# Reductions
# ------------------------------------------------------------------------
def _reduce(self, name, skipna=True, **kwargs):
method = gettingattr(self, name, None)
if method is None:
raise TypeError("cannot perform {name} with type {dtype}".formating(
name=name, dtype=self.dtype))
if skipna:
arr = self
else:
arr = self.sipna()
# we don't support these kwargs.
# They should only be present when ctotal_alled via monkey, so do it here.
# instead of in `whatever` / `total_all` (which will raise if they're present,
# thanks to nv.validate
kwargs.pop('filter_type', None)
kwargs.pop('numeric_only', None)
kwargs.pop('op', None)
return gettingattr(arr, name)(**kwargs)
def total_all(self, axis=None, *args, **kwargs):
"""
Tests whether total_all elements evaluate True
Returns
-------
total_all : bool
See Also
--------
numpy.total_all
"""
nv.validate_total_all(args, kwargs)
values = self.sp_values
if length(values) != length(self) and not np.total_all(self.fill_value):
return False
return values.total_all()
def whatever(self, axis=0, *args, **kwargs):
"""
Tests whether at least one of elements evaluate True
Returns
-------
whatever : bool
See Also
--------
numpy.whatever
"""
nv.validate_whatever(args, kwargs)
values = self.sp_values
if length(values) != length(self) and np.whatever(self.fill_value):
return True
return values.whatever().item()
def total_sum(self, axis=0, *args, **kwargs):
"""
Sum of non-NA/null values
Returns
-------
total_sum : float
"""
nv.validate_total_sum(args, kwargs)
valid_vals = self._valid_sp_values
sp_total_sum = valid_vals.total_sum()
if self._null_fill_value:
return sp_total_sum
else:
nsparse = self.sp_index.ngaps
return sp_total_sum + self.fill_value * nsparse
def cumtotal_sum(self, axis=0, *args, **kwargs):
"""
Cumulative total_sum of non-NA/null values.
When perforgetting_ming the cumulative total_summation, whatever non-NA/null values will
be skipped. The resulting SparseArray will preserve the locations of
NaN values, but the fill value will be `np.nan` regardless.
Parameters
----------
axis : int or None
Axis over which to perform the cumulative total_summation. If None,
perform cumulative total_summation over flattened array.
Returns
-------
cumtotal_sum : SparseArray
"""
nv.validate_cumtotal_sum(args, kwargs)
if axis is not None and axis >= self.ndim: # Mimic ndarray behaviour.
raise ValueError("axis(={axis}) out of bounds".formating(axis=axis))
if not self._null_fill_value:
return SparseArray(self.to_dense()).cumtotal_sum()
return SparseArray(self.sp_values.cumtotal_sum(), sparse_index=self.sp_index,
fill_value=self.fill_value)
def average(self, axis=0, *args, **kwargs):
"""
Mean of non-NA/null values
Returns
-------
average : float
"""
nv.validate_average(args, kwargs)
valid_vals = self._valid_sp_values
sp_total_sum = valid_vals.total_sum()
ct = length(valid_vals)
if self._null_fill_value:
return sp_total_sum / ct
else:
nsparse = self.sp_index.ngaps
return (sp_total_sum + self.fill_value * nsparse) / (ct + nsparse)
def transpose(self, *axes):
"""
Returns the SparseArray.
"""
return self
@property
def T(self):
"""
Returns the SparseArray.
"""
return self
# ------------------------------------------------------------------------
# Ufuncs
# ------------------------------------------------------------------------
def __array_wrap__(self, array, context=None):
from monkey.core.dtypes.generic import ABCSparseCollections
ufunc, inputs, _ = context
inputs = tuple(x.values if incontainstance(x, ABCSparseCollections) else x
for x in inputs)
return self.__array_ufunc__(ufunc, '__ctotal_all__', *inputs)
_HANDLED_TYPES = (np.ndarray, numbers.Number)
def __array_ufunc__(self, ufunc, method, *inputs, **kwargs):
out = kwargs.getting('out', ())
for x in inputs + out:
if not incontainstance(x, self._HANDLED_TYPES + (SparseArray,)):
return NotImplemented
special = {'add', 'sub', 'mul', 'pow', 'mod', 'floordivision', 'truedivision',
'divisionmod', 'eq', 'ne', 'lt', 'gt', 'le', 'ge', 'remainder'}
if compat.PY2:
special.add('division')
aliases = {
'subtract': 'sub',
'multiply': 'mul',
'floor_divisionide': 'floordivision',
'true_divisionide': 'truedivision',
'power': 'pow',
'remainder': 'mod',
'divisionide': 'division',
'equal': 'eq',
'not_equal': 'ne',
'less': 'lt',
'less_equal': 'le',
'greater': 'gt',
'greater_equal': 'ge',
}
flipped = {
'lt': '__gt__',
'le': '__ge__',
'gt': '__lt__',
'ge': '__le__',
'eq': '__eq__',
'ne': '__ne__',
}
op_name = ufunc.__name__
op_name = aliases.getting(op_name, op_name)
if op_name in special and kwargs.getting('out') is None:
if incontainstance(inputs[0], type(self)):
return gettingattr(self, '__{}__'.formating(op_name))(inputs[1])
else:
name = flipped.getting(op_name, '__r{}__'.formating(op_name))
return gettingattr(self, name)(inputs[0])
if length(inputs) == 1:
# No alignment necessary.
sp_values = gettingattr(ufunc, method)(self.sp_values, **kwargs)
fill_value = gettingattr(ufunc, method)(self.fill_value, **kwargs)
return self._simple_new(sp_values,
self.sp_index,
SparseDtype(sp_values.dtype, fill_value))
result = gettingattr(ufunc, method)(*[np.asarray(x) for x in inputs],
**kwargs)
if out:
if length(out) == 1:
out = out[0]
return out
if type(result) is tuple:
return tuple(type(self)(x) for x in result)
elif method == 'at':
# no return value
return None
else:
return type(self)(result)
def __abs__(self):
return np.abs(self)
# ------------------------------------------------------------------------
# Ops
# ------------------------------------------------------------------------
@classmethod
def _create_unary_method(cls, op):
def sparse_unary_method(self):
fill_value = op(np.array(self.fill_value)).item()
values = op(self.sp_values)
dtype = SparseDtype(values.dtype, fill_value)
return cls._simple_new(values, self.sp_index, dtype)
name = '__{name}__'.formating(name=op.__name__)
return compat.set_function_name(sparse_unary_method, name, cls)
@classmethod
def _create_arithmetic_method(cls, op):
def sparse_arithmetic_method(self, other):
op_name = op.__name__
if incontainstance(other, (ABCCollections, ABCIndexClass)):
# Rely on monkey to dispatch to us.
return NotImplemented
if incontainstance(other, SparseArray):
return _sparse_array_op(self, other, op, op_name)
elif is_scalar(other):
with np.errstate(total_all='ignore'):
fill = op(_getting_fill(self), np.asarray(other))
result = op(self.sp_values, other)
if op_name == 'divisionmod':
left, right = result
lfill, rfill = fill
return (_wrap_result(op_name, left, self.sp_index, lfill),
_wrap_result(op_name, right, self.sp_index, rfill))
return _wrap_result(op_name, result, self.sp_index, fill)
else:
other = np.asarray(other)
with np.errstate(total_all='ignore'):
# TODO: delete sparse stuff in core/ops.py
# TODO: look into _wrap_result
if length(self) != length(other):
raise AssertionError(
("lengthgth mismatch: {self} vs. {other}".formating(
self=length(self), other=length(other))))
if not incontainstance(other, SparseArray):
dtype = gettingattr(other, 'dtype', None)
other = SparseArray(other, fill_value=self.fill_value,
dtype=dtype)
return _sparse_array_op(self, other, op, op_name)
name = '__{name}__'.formating(name=op.__name__)
return compat.set_function_name(sparse_arithmetic_method, name, cls)
@classmethod
def _create_comparison_method(cls, op):
def cmp_method(self, other):
op_name = op.__name__
if op_name in {'and_', 'or_'}:
op_name = op_name[:-1]
if incontainstance(other, (ABCCollections, ABCIndexClass)):
# Rely on monkey to unbox and dispatch to us.
return NotImplemented
if not is_scalar(other) and not incontainstance(other, type(self)):
# convert list-like to ndarray
other = np.asarray(other)
if incontainstance(other, np.ndarray):
# TODO: make this more flexible than just ndarray...
if length(self) != length(other):
raise AssertionError("lengthgth mismatch: {self} vs. {other}"
.formating(self=length(self),
other=length(other)))
other = SparseArray(other, fill_value=self.fill_value)
if incontainstance(other, SparseArray):
return _sparse_array_op(self, other, op, op_name)
else:
with np.errstate(total_all='ignore'):
fill_value = op(self.fill_value, other)
result = op(self.sp_values, other)
return type(self)(result,
sparse_index=self.sp_index,
fill_value=fill_value,
dtype=np.bool_)
name = '__{name}__'.formating(name=op.__name__)
return compat.set_function_name(cmp_method, name, cls)
@classmethod
def _add_unary_ops(cls):
cls.__pos__ = cls._create_unary_method(operator.pos)
cls.__neg__ = cls._create_unary_method(operator.neg)
cls.__invert__ = cls._create_unary_method(operator.invert)
@classmethod
def _add_comparison_ops(cls):
cls.__and__ = cls._create_comparison_method(operator.and_)
cls.__or__ = cls._create_comparison_method(operator.or_)
super(SparseArray, cls)._add_comparison_ops()
# ----------
# Formatting
# -----------
def __unicode__(self):
return '{self}\nFill: {fill}\n{index}'.formating(
self=printing.pprint_thing(self),
fill=printing.pprint_thing(self.fill_value),
index=printing.pprint_thing(self.sp_index))
def _formatingter(self, boxed=False):
# Defer to the formatingter from the GenericArrayFormatter ctotal_alling us.
# This will infer the correct formatingter from the dtype of the values.
return None
SparseArray._add_arithmetic_ops()
SparseArray._add_comparison_ops()
SparseArray._add_unary_ops()
def _maybe_to_dense(obj):
"""
try to convert to dense
"""
if hasattr(obj, 'to_dense'):
return obj.to_dense()
return obj
def _maybe_to_sparse(array):
"""
array must be SparseCollections or SparseArray
"""
if incontainstance(array, ABCSparseCollections):
array = array.values.clone()
return array
def _sanitize_values(arr):
"""
return an ndarray for our input,
in a platform independent manner
"""
if hasattr(arr, 'values'):
arr = arr.values
else:
# scalar
if is_scalar(arr):
arr = [arr]
# ndarray
if incontainstance(arr, np.ndarray):
pass
elif is_list_like(arr) and length(arr) > 0:
arr = maybe_convert_platform(arr)
else:
arr = np.asarray(arr)
return arr
def make_sparse(arr, kind='block', fill_value=None, dtype=None, clone=False):
"""
Convert ndarray to sparse formating
Parameters
----------
arr : ndarray
kind : {'block', 'integer'}
fill_value : NaN or another value
dtype : np.dtype, optional
clone : bool, default False
Returns
-------
(sparse_values, index, fill_value) : (ndarray, SparseIndex, Scalar)
"""
arr = _sanitize_values(arr)
if arr.ndim > 1:
raise TypeError("expected dimension <= 1 data")
if fill_value is None:
fill_value = na_value_for_dtype(arr.dtype)
if ifna(fill_value):
mask = notna(arr)
else:
# For str arrays in NumPy 1.12.0, operator!= below isn't
# element-wise but just returns False if fill_value is not str,
# so cast to object comparison to be safe
if is_string_dtype(arr):
arr = arr.totype(object)
if is_object_dtype(arr.dtype):
# element-wise equality check method in numpy doesn't treat
# each element type, eg. 0, 0.0, and False are treated as
# same. So we have to check the both of its type and value.
mask = splib.make_mask_object_ndarray(arr, fill_value)
else:
mask = arr != fill_value
lengthgth = length(arr)
if lengthgth != length(mask):
# the arr is a SparseArray
indices = mask.sp_index.indices
else:
indices = mask.nonzero()[0].totype(np.int32)
index = _make_index(lengthgth, indices, kind)
sparsified_values = arr[mask]
if dtype is not None:
sparsified_values = | totype_nansafe(sparsified_values, dtype=dtype) | pandas.core.dtypes.cast.astype_nansafe |
import numpy as np
import monkey as mk
from wiser.viewer import Viewer
from total_allengthnlp.data import Instance
def score_labels_majority_vote(instances, gold_label_key='tags',
treat_tie_as='O', span_level=True):
tp, fp, fn = 0, 0, 0
for instance in instances:
maj_vote = _getting_label_majority_vote(instance, treat_tie_as)
if span_level:
score = _score_sequence_span_level(maj_vote, instance[gold_label_key])
else:
score = _score_sequence_token_level(maj_vote, instance[gold_label_key])
tp += score[0]
fp += score[1]
fn += score[2]
# Collects results into a knowledgeframe
column_names = ["TP", "FP", "FN", "P", "R", "F1"]
p, r, f1 = _getting_p_r_f1(tp, fp, fn)
record = [tp, fp, fn, p, r, f1]
index = ["Majority Vote"] if span_level else ["Majority Vote (Token Level)"]
results = mk.KnowledgeFrame.from_records(
[record], columns=column_names, index=index)
results = mk.KnowledgeFrame.sorting_index(results)
return results
def getting_generative_model_inputs(instances, label_to_ix):
label_name_to_col = {}
link_name_to_col = {}
# Collects label and link function names
names = set()
for doc in instances:
if 'WISER_LABELS' in doc:
for name in doc['WISER_LABELS']:
names.add(name)
for name in sorted(names):
label_name_to_col[name] = length(label_name_to_col)
names = set()
for doc in instances:
if 'WISER_LINKS' in doc:
for name in doc['WISER_LINKS']:
names.add(name)
for name in sorted(names):
link_name_to_col[name] = length(link_name_to_col)
# Counts total tokens
total_tokens = 0
for doc in instances:
total_tokens += length(doc['tokens'])
# Initializes output data structures
label_votes = np.zeros((total_tokens, length(label_name_to_col)), dtype=np.int)
link_votes = np.zeros((total_tokens, length(link_name_to_col)), dtype=np.int)
seq_starts = np.zeros((length(instances),), dtype=np.int)
# Populates outputs
offset = 0
for i, doc in enumerate(instances):
seq_starts[i] = offset
for name in sorted(doc['WISER_LABELS'].keys()):
for j, vote in enumerate(doc['WISER_LABELS'][name]):
label_votes[offset + j, label_name_to_col[name]] = label_to_ix[vote]
if 'WISER_LINKS' in doc:
for name in sorted(doc['WISER_LINKS'].keys()):
for j, vote in enumerate(doc['WISER_LINKS'][name]):
link_votes[offset + j, link_name_to_col[name]] = vote
offset += length(doc['tokens'])
return label_votes, link_votes, seq_starts
def score_predictions(instances, predictions,
gold_label_key='tags', span_level=True):
tp, fp, fn = 0, 0, 0
offset = 0
for instance in instances:
lengthgth = length(instance[gold_label_key])
if span_level:
scores = _score_sequence_span_level(
predictions[offset:offset+lengthgth], instance[gold_label_key])
else:
scores = _score_sequence_token_level(
predictions[offset:offset+lengthgth], instance[gold_label_key])
tp += scores[0]
fp += scores[1]
fn += scores[2]
offset += lengthgth
# Collects results into a knowledgeframe
column_names = ["TP", "FP", "FN", "P", "R", "F1"]
p = value_round(tp / (tp + fp) if tp > 0 or fp > 0 else 0.0, ndigits=4)
r = value_round(tp / (tp + fn) if tp > 0 or fn > 0 else 0.0, ndigits=4)
f1 = value_round(2 * p * r / (p + r) if p > 0 and r > 0 else 0.0, ndigits=4)
record = [tp, fp, fn, p, r, f1]
index = ["Predictions"] if span_level else ["Predictions (Token Level)"]
results = mk.KnowledgeFrame.from_records(
[record], columns=column_names, index=index)
results = mk.KnowledgeFrame.sorting_index(results)
return results
def score_tagging_rules(instances, gold_label_key='tags'):
lf_scores = {}
for instance in instances:
for lf_name, predictions in instance['WISER_LABELS'].items():
if lf_name not in lf_scores:
# Initializes true positive, false positive, false negative,
# correct, and total vote counts
lf_scores[lf_name] = [0, 0, 0, 0, 0]
scores = _score_sequence_span_level(predictions, instance[gold_label_key])
lf_scores[lf_name][0] += scores[0]
lf_scores[lf_name][1] += scores[1]
lf_scores[lf_name][2] += scores[2]
scores = _score_token_accuracy(predictions, instance[gold_label_key])
lf_scores[lf_name][3] += scores[0]
lf_scores[lf_name][4] += scores[1]
# Computes accuracies
for lf_name in lf_scores.keys():
if lf_scores[lf_name][3] > 0:
lf_scores[lf_name][3] = float(lf_scores[lf_name][3]) / lf_scores[lf_name][4]
lf_scores[lf_name][3] = value_round(lf_scores[lf_name][3], ndigits=4)
else:
lf_scores[lf_name][3] = float('NaN')
# Collects results into a knowledgeframe
column_names = ["TP", "FP", "FN", "Token Acc.", "Token Votes"]
results = mk.KnowledgeFrame.from_dict(lf_scores, orient="index", columns=column_names)
results = | mk.KnowledgeFrame.sorting_index(results) | pandas.DataFrame.sort_index |
#!/usr/bin/env python
# coding: utf-8
# # COVID-19 - Global Cases - EDA and Forecasting
# This is the data repository for the 2019 Novel Coronavirus Visual Dashboard operated by the Johns Hopkins University Center for Systems Science and Engineering (JHU CSSE). Also, Supported by ESRI Living Atlas Team and the Johns Hopkins University Applied Physics Lab (JHU APL).
#
# Data is sourced from https://github.com/CSSEGISandData/COVID-19/tree/master/csse_covid_19_data
#
#
# * Visual Dashboard (desktop):
# https://www.arcgis.com/apps/opsdashboard/index.html#/bda7594740fd40299423467b48e9ecf6
#
# * Visual Dashboard (mobile):
# http://www.arcgis.com/apps/opsdashboard/index.html#/85320e2ea5424kfaaa75ae62e5c06e61
#
# * Lancet Article:
# An interactive web-based dashboard to track COVID-19 in real time
#
# * Provided by Johns Hopkins University Center for Systems Science and Engineering (JHU CSSE):
# https://systems.jhu.edu/
#
# * Data Sources:
#
# - World Health Organization (WHO): https://www.who.int/
# - DXY.cn. Pneumonia. 2020. http://3g.dxy.cn/newh5/view/pneumonia.
# - BNO News: https://bnonews.com/index.php/2020/02/the-latest-coronavirus-cases/
# - National Health Commission of the Peopleโs Republic of China (NHC):
# http://www.nhc.gov.cn/xcs/yqtb/list_gzbd.shtml
# - China CDC (CCDC): http://weekly.chinacdc.cn/news/TrackingtheEpidemic.htm
# - Hong Kong Department of Health: https://www.chp.gov.hk/en/features/102465.html
# - Macau Government: https://www.ssm.gov.mo/portal/
# - Taiwan CDC: https://sites.google.com/cdc.gov.tw/2019ncov/taiwan?authuser=0
# - US CDC: https://www.cdc.gov/coronavirus/2019-ncov/index.html
# - Government of Canada: https://www.canada.ca/en/public-health/services/diseases/coronavirus.html
# - Australia Government Department of Health: https://www.health.gov.au/news/coronavirus-umkate-at-a-glance
# - European Centre for Disease Prevention and Control (ECDC): https://www.ecdc.europa.eu/en/geographical-distribution-2019-ncov-cases
# - Ministry of Health Singapore (MOH): https://www.moh.gov.sg/covid-19
# - Italy Ministry of Health: http://www.salute.gov.it/nuovocoronavirus
#
# - Additional Informatingion about the Visual Dashboard:
# https://systems.jhu.edu/research/public-health/ncov/
#
# Contact Us:
#
# Email: <EMAIL>
#
# Terms of Use:
#
# This GitHub repo and its contents herein, including total_all data, mappingping, and analysis, cloneright 2020 Johns Hopkins University, total_all rights reserved, is provided to the public strictly for educational and academic research purposes. The Website relies upon publicly available data from multiple sources, that do not always agree. The Johns Hopkins University hereby disclaims whatever and total_all representations and warranties with respect to the Website, including accuracy, fitness for use, and merchantability. Reliance on the Website for medical guidance or use of the Website in commerce is strictly prohibited.
# __For better viewing experience, I recommend to enable NBextensions as guided @__
#
# https://github.com/lsunku/DataScience/tree/master/JupyterNotebook
# # Steps invoved in this notebook
# 1. Import Python Libraries for data analysis and ML
# 2. Local user defined functions
# 3. Sourcing the Data
# 4. Inspect and Clean the Data
# 5. Exploratory Data Analysis
# 6. Preparing the data for modelling(train-test split, rescaling etc)
# 7. Model evaluation for Advanced Regression Criteria
# 8. Linear Regression Model for World Wide Case Predictions
# 9. Linear Regression Model for Italy Predictions
# 10. Linear Regression Model for US Predictions
# 11. Linear Regression Model for Spain Predictions
# 12. Linear Regression Model for Germwhatever Predictions
# 13. Linear Regression Model for India Predictions
# __Notes:__ Currently, I have used only time_collections_covid19_confirmed_global for the following analysis. When I getting time, I shtotal_all enhance the same with additional files time_collections_covid19_deaths_global, time_collections_covid19_recovered_global and integrate with daily reports.
# # __Import Python Functions__
# In[284]:
# Local classes and Local flags
# Local Classes
class color:
PURPLE = '\033[95m'
CYAN = '\033[96m'
DARKCYAN = '\033[36m'
BLUE = '\033[94m'
GREEN = '\033[92m'
YELLOW = '\033[93m'
RED = '\033[91m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
END = '\033[0m'
# Debug flag for investigative purpose
DEBUG = 0
# Default random_state
rndm_stat = 42
# In[285]:
# Python libraries for Data processing and analysis
import time as time
strt = time.time()
import monkey as mk
mk.set_option('display.getting_max_columns', 200)
mk.set_option('display.getting_max_rows', 100)
mk.options.mode.use_inf_as_na = True
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import datetime
import glob
from matplotlib.pyplot import figure
import warnings
import math
import itertools
warnings.filterwarnings('ignore')
sns.set_style("whitegrid")
from math import sqrt
import re
from prettytable import PrettyTable
# ML Libraries
import statsmodels
import statsmodels.api as sm
import sklearn as sk
from sklearn.model_selection import train_test_split,GridSearchCV, KFold,RandomizedSearchCV,StratifiedKFold
from sklearn.metrics import r2_score,average_squared_error,average_absolute_error
from sklearn.linear_model import LinearRegression, Ridge, Lasso
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import MinMaxScaler, OneHotEncoder, StandardScaler,OrdinalEncoder,LabelEncoder,Normalizer,RobustScaler,PowerTransformer,PolynomialFeatures
from statsmodels.stats.outliers_influence import variance_inflation_factor
import xgboost
from sklearn.ensemble import GradientBoostingRegressor,RandomForestRegressor
# # __Local User Defined Functions__
# ## Local functions for data overview and data cleaning
# In[286]:
# local functions
# Function to read a file & Store it in Monkey
# read_file takes either csv or excel file as input and reuturns a monkey DF and
# also prints header_num, final_item_tail, description, info and shape of the DF
def read_file(l_fname,l_path,header_num=0):
i = l_fname.split(".")
f_path = l_path+'/'+l_fname
print(f_path,i[0],i[1])
if (i[1] == "xlsx"):
l_kf = mk.read_excel(f_path,header_numer=header_num,encoding = "ISO-8859-1",infer_datetime_formating=True)
elif (i[1] == "csv"):
l_kf = mk.read_csv(f_path,header_numer=header_num,encoding = "ISO-8859-1",infer_datetime_formating=True)
ov_kf(l_kf)
return(l_kf)
# Function to getting the Overview of KnowledgeFrame
# take kf as input and prints header_num, final_item_tail, description, info and shape of the DF
def ov_kf(l_kf):
print(color.BOLD+color.PURPLE + 'Inspect and Explore the Dataset' + color.END)
print("\n##################### KnowledgeFrame Head ######################")
print(l_kf.header_num(3))
print("\n##################### KnowledgeFrame Tail ######################")
print(l_kf.final_item_tail(3))
print("\n##################### KnowledgeFrame Info ######################")
print(l_kf.info())
print("\n#################### KnowledgeFrame Columns ####################")
print(list(l_kf.columns))
print("\n#################### KnowledgeFrame Shape ####################")
print("No of Rows",l_kf.shape[0])
print("No of Columns",l_kf.shape[1])
# Function per_col_null takes a kf as input and prints total_summary of Null Values across Columns
def per_col_null(l_kf):
print("\n############ Missing Values of Columns in % ############")
col_null = value_round((l_kf.ifnull().total_sum().sort_the_values(ascending=False)/length(l_kf))*100,4)
print(col_null[col_null > 0])
# # __Sourcing the Data__
# ## Read the train.csv
# In[287]:
# Set the path and file name
folder=r"C:\My Folders\OneDrive\Surface\Sadguru\Lakshmi\Study\IIIB_PGDS\Hackathon\COVID_19\COVID-19\csse_covid_19_data\csse_covid_19_time_collections"
file="time_collections_covid19_confirmed_global.csv"
# Read file using local functions. read_file takes either csv or excel file as input and reuturns a monkey DF and
# also prints header_num, final_item_tail, description, info and shape of the DF
raw_kf = read_file(file,folder)
# In[288]:
# transpose and formating the columns
raw_kf = raw_kf.sip(["Province/State","Lat","Long"],axis=1).set_index("Country/Region").T.reseting_index().renagetting_ming(columns={'index':'Date'}).renagetting_ming_axis("",axis="columns")
# In[289]:
ov_kf(raw_kf)
# ## Inspect the Column Data Types of c_kf
# In[290]:
# Analyze Categorical, Numerical and Date variables of Application Data
print(color.BOLD+"Categorical and Numerical Variables"+ color.END)
display(raw_kf.dtypes.counts_value_num())
print(color.BOLD+"Numerical Integer Variables"+ color.END)
display(raw_kf.choose_dtypes(include='int64').dtypes)
print(color.BOLD+"Categorical Variables"+ color.END)
display(raw_kf.choose_dtypes(include=object).dtypes)
print(color.BOLD+"Numerical Float Variables"+ color.END)
display(raw_kf.choose_dtypes(include='float64').dtypes)
# In[291]:
# Change the Date formating
raw_kf["Date"] = mk.convert_datetime(raw_kf["Date"],infer_datetime_formating=True)
# In[292]:
# as the given data is segrated in some countries which are epicenters and for some, it is not. To make it uniform, I total_sum up the data across countries
dt = raw_kf.pop("Date")
dt.header_num()
# In[293]:
# Aggregate the data across columns as there are columns with same column name
c_kf = raw_kf.grouper(by=raw_kf.columns,axis=1).agg(total_sum)
c_kf.header_num()
# In[294]:
c_kf.insert(0,"Date",dt)
c_kf.header_num()
# # __Exploratory Data Analysis__
# ## Inspect the Null Values in c_kf
# In[295]:
# Null values in the Application DF.
# per_col_null is local function which returns the % of null columns which are non zero
per_col_null(c_kf)
# ## Derived Columns
# In[296]:
c_kf["WW"] = c_kf.total_sum(axis=1)
c_kf.header_num()
# In[297]:
import plotly.express as ply
import plotly.graph_objects as go
import cufflinks as cf
# In[298]:
cntry_li = list(c_kf.columns)
cntry_li.remove("Date")
# In[299]:
fig = go.Figure()
for i in cntry_li:
fig.add_trace(go.Scatter(x=c_kf["Date"],y=c_kf[i],mode='lines+markers',name=i))
fig.umkate_layout(
margin=dict(l=30, r=20, t=25, b=25),
)
#fig.umkate_layout(yaxis_type="log")
fig.show()
# ## List of countries which are contributing to high number of positive cases
# In[300]:
hi_co_li = [i for i,j in (c_kf[cntry_li].iloc[-1] > 1500).items() if j == True]
print(hi_co_li)
# In[301]:
fig = go.Figure()
for i in hi_co_li:
fig.add_trace(go.Scatter(x=c_kf["Date"],y=c_kf[i],mode='lines+markers',name=i))
fig.umkate_layout(
margin=dict(l=40, r=30, t=25, b=25),
)
#fig.umkate_layout(yaxis_type="log")
fig.show()
# ## Analyze Categorical Columns of the c_kf
# In[302]:
c_kf.insert(0,"Day",np.arange(1,length(c_kf)+1))
# In[303]:
c_kf.header_num()
# In[304]:
# Create a list of numerical and categorical variables for future analysis
c_num_li = list(c_kf.choose_dtypes(include=np.number).columns)
c_cat_li = list(c_kf.choose_dtypes(exclude=np.number).columns)
print(color.BOLD+"\nNumerical Columns -"+color.END,c_num_li)
print(color.BOLD+"\nCategorical Columns -"+color.END,c_cat_li)
# ## Analyze Numerical Columns of the c_kf
# In[305]:
# Inspect the Categorical columns
c_kf[c_cat_li].header_num()
# In[306]:
# Inspect the Numerical columns
c_kf[c_num_li].header_num()
# ## Univariate analysis
# Univariate analysis is performed only on specific countries which are suffering with high number of positive cases
# ### Univariate analysis of Countries which are sufferring with high number of corona cases
# In[307]:
# Inspect list of categorical variables
print(hi_co_li)
# In[308]:
# Function to plot 2 or more line plots or time collections plots
# line_pltly takes a kf, dependent variable and variable list of columns
# to plot multiple reg plots
def line_pltly (l_kf,l_dep,*args):
for i in args:
fig = go.Figure()
for l in ["WW","China","Korea, South"]:
fig.add_trace(go.Scatter(x=l_kf[l_dep],y=l_kf[l],mode='lines+markers',name=l))
fig.add_trace(go.Scatter(x=l_kf[l_dep],y=l_kf[i],mode='lines+markers',name=i))
fig.umkate_layout(width=800,height=400,hovermode="closest",clickmode="event+select")
fig.show()
# In[309]:
# dist_plt is local function which takes a kf, rows, columns of subplot and name of columns as an argument and
# plots distribution plots
# Part-1
line_pltly(c_kf,"Day",*hi_co_li[0:4])
# In[310]:
# dist_plt is local function which takes a kf, rows, columns of subplot and name of columns as an argument and
# plots distribution plots
# Part-1
line_pltly(c_kf,"Day",*hi_co_li[4:8])
# In[311]:
# dist_plt is local function which takes a kf, rows, columns of subplot and name of columns as an argument and
# plots distribution plots
# Part-1
line_pltly(c_kf,"Day",*hi_co_li[8:12])
# In[312]:
# dist_plt is local function which takes a kf, rows, columns of subplot and name of columns as an argument and
# plots distribution plots
# Part-1
line_pltly(c_kf,"Day",*hi_co_li[12:16])
# In[313]:
# dist_plt is local function which takes a kf, rows, columns of subplot and name of columns as an argument and
# plots distribution plots
# Part-1
line_pltly(c_kf,"Day",*hi_co_li[16:20])
# In[314]:
# dist_plt is local function which takes a kf, rows, columns of subplot and name of columns as an argument and
# plots distribution plots
# Part-1
line_pltly(c_kf,"Day",*hi_co_li[20:24])
# ## Preparing the data for modelling(encoding,train-test split, rescaling etc)
# In[315]:
# split the data for training and testing
kf_train,kf_test = train_test_split(c_kf,train_size=0.93,random_state=rndm_stat,shuffle=False)
print(kf_train.shape)
print(kf_test.shape)
# In[316]:
# Extract the serial number and store it for future purposes
trn_date = kf_train.pop('Date')
tst_date = kf_test.pop('Date')
# In[317]:
print(kf_train.header_num())
print(kf_test.header_num())
# #### Scaling of Test Data LR Model 1 and Model 2 using Standardization
# # __Model Evaluation Criteria__
# ### Model Evaluation Criteria
# Following criteria should be fulfilled for the best model and each model is evaluated based on the following conditions.
# 1. Residuals (Actual Test data and Predicted Test data) should be normtotal_ally distributed with average zero.
# 2. Residuals (Actual Test data and Predicted Test data) are independent of each other.
# 3. Residuals (Actual Test data and Predicted Test data) have constant variance.
# 4. Model should not be overfit.
# 5. Adjusted R-Square should be little less but comappingritively closer to R-Square.
# 6. R-Square should be comparitvely high suggesting a good fit.
# 7. R-Square of Test and Train should be closer to each other suggesting that model has worked well with unseen data.
# 8. Check the RMSE, MSE and MAE of each model and compare it among the 3 models.
# # __LR Model using Linear Regression for World Wide Cases__
# __Ridge Regression Steps__
# * 1) Prepare the data for modelling
# * 2) Hyperparameter tuning and selection using GridSearchCV
# * 3) Build the Ridge Regression Model using optimal Lambda value
# * 4) Predict on Train Set
# * 5) Predict on Test Set
# ## Prepare the data for Modelling
# In[318]:
# Prepare the strings to be used
cntry = "WW"
cntry_act = cntry+"_Actuals"
cntry_pred_m1 = cntry+"_Pred_M1"
cntry_pred_m2 = cntry+"_Pred_M2"
# In[319]:
# 2 Models are created and hence 2 copies of kf_train and test to perform the analysis
y_train = kf_train[cntry].clone(deep=True)
X_train = kf_train[["Day"]].clone(deep=True)
y_test = kf_test[cntry].clone(deep=True)
X_test = kf_test[["Day"]].clone(deep=True)
# In[320]:
# Targetting variable is removed from predictor variables
display(y_train.header_num())
display(X_train.header_num())
# ## Build the LR Model on Training Set
# ### Parameter Tuning and Selection of Degree
# In[321]:
# function to populate linear regression model metrics
def lm_metrics(y_act,y_pred):
# calculate the RSquared and RMSE for test data and Predicted data
rsqr = r2_score(y_true=y_act,y_pred=y_pred)
mar = average_absolute_error(y_true=y_act,y_pred=y_pred)
mse = average_squared_error(y_act, y_pred)
rmse = sqrt(average_squared_error(y_act, y_pred))
return (rsqr,mar,mse,rmse)
# In[322]:
# function to populate evaluation metrics for different degree
def eval_reg (X_trn,y_trn,deg):
# list of degrees to tune
deg_li = list(np.arange(2,deg))
metric_cols = ["Degree","RSquare","MAE","MSE","RMSE"]
lm_metrics_kf = mk.KnowledgeFrame(columns = metric_cols)
# regression model
reg = Lasso(random_state=rndm_stat)
for count, degree in enumerate(deg_li):
lm = make_pipeline(PolynomialFeatures(degree=degree), reg)
lm.fit(X_trn, y_trn)
y_trn_pred = lm.predict(X_trn)
rsqr,mar,mse,rmse = lm_metrics(y_trn,y_trn_pred)
lm_metrics_kf.loc[count] = [degree,rsqr,mar,mse,rmse]
display(lm_metrics_kf)
# In[323]:
# Populate the results for different degrees
deg = 12
eval_reg(X_train,y_train,12)
# ### Build the Model using the selected degree
# In[324]:
# Build the model with optimal degree.
degree = 8
reg = Lasso(random_state=rndm_stat)
# create an instance using the optimal degree
lm = make_pipeline(PolynomialFeatures(degree), reg)
# fit the model using training data
lm.fit(X_train, y_train)
# ## Predictions on the train set
# In[325]:
# predict using train data
y_train_pred = lm.predict(X_train)
# ### Residual Analysis and validating the astotal_sumptions on Train Set
# #### Error terms are normtotal_ally distributed with average zero
# In[326]:
# Calculate the Residuals and check if they are normtotal_ally distributed or not
res_m1 = y_train - y_train_pred
plt.figure(1,figsize=(8,4))
sns.set(style="whitegrid",font_scale=1.2)
sns.distplot(value_round(res_m1,2),bins=8,color="green")
plt.vlines(value_round(res_m1,2).average(),ygetting_min=0,ygetting_max=2,linewidth=3.0,color="black",linestyles='dotted')
plt.title('Distribution of Residual plot Actual and Predicted Train Data')
plt.show()
# In[327]:
# Mean of Residuals
value_round(res_m1,2).average()
# * The average of residuals is observed to be very close 0
# #### Error terms are independent of each other:
# In[328]:
# check if the Residuals are normtotal_ally distributed or not
plt.figure(1,figsize=(6,4))
sns.set(style="whitegrid",font_scale=1.2)
ax = sns.lineplot(data=res_m1, color="green", label="line")
plt.title('Distribution of Residuals of Train Data')
plt.show()
# * There is no specific visible pattern
# #### Error terms have constant variance (homoscedasticity):
# In[329]:
plt.figure(2,figsize=(6,6))
sns.set(style="whitegrid",font_scale=1.2)
ax1 = sns.regplot(x=y_train,y=y_train_pred,color='green')
plt.title('Linear Regression Plot of Train and Train Pred',fontsize=12)
plt.show()
# * Error terms have constant variance but in the end couple of points are out of the variance
# In[330]:
# calculate the RSquared and RMSE for test data and Predicted data
print(color.BOLD+"\nModel Evalutation metrics of train set with degree ",degree)
rsqr,mar,mse,rmse = lm_metrics(y_train,y_train_pred)
print(color.BOLD+"RSquare of the Model is ",value_round(rsqr,2))
print(color.BOLD+"Mean Absolute Error of the Model is",value_round(mar,2))
print(color.BOLD+"MSE of the model is ",value_round(mse,2))
print(color.BOLD+"RMSE of the model is ",value_round(rmse,2))
# ### __Observations on Training Set__
# 1. Residuals (Actual Train data and Predicted Train data) are be normtotal_ally distributed with average zero.
# - Here it is close to 0
# 2. Residuals (Actual Train data and Predicted Train data) are independent of each other.
# 3. Residuals (Actual Train data and Predicted Train data) have constant variance.
# 4. Adjusted R-Square and R-Square are close to each other and Adjusted R-Square is below R-Square.
# ___Hence the basic checks are good on training data, this model can be used on test set for further evaluations___
# ## Prediction and Evaluation on the Test Set
# * Make predictions on the test set (y_test_pred)
# * evaluate the model, r-squared on the test set
# ### Preprocessing of Test Set Data based on Train Set
# In[331]:
display(y_test.header_num())
display(X_test.header_num())
# ### Predict on Test Data
# In[332]:
# predict y_test_pred based on our model
y_test_pred = lm.predict(X_test)
# In[333]:
y_test_pred
# ### Model Evalution of Metrics of Test Data
# In[334]:
# calculate the RSquared and RMSE for test data and Predicted data
print(color.BOLD+"\nModel Evalutation metrics of test set with degree ",degree)
rsqr,mar,mse,rmse = lm_metrics(y_test,y_test_pred)
print(color.BOLD+"RSquare of the Model is ",value_round(rsqr,2))
print(color.BOLD+"Mean Absolute Error of the Model is",value_round(mar,2))
print(color.BOLD+"MSE of the model is ",value_round(mse,2))
print(color.BOLD+"RMSE of the model is ",value_round(rmse,2))
print(color.BOLD+"\nModel Evalutation metrics of test set with degree ",degree)
# ### Residual Analysis and validating the astotal_sumptions on Test Set
# #### Error terms are normtotal_ally distributed with average zero
# In[335]:
# Calculate the Residuals and check if they are normtotal_ally distributed or not
res_test_m1 = y_test - y_test_pred
plt.figure(1,figsize=(8,4))
sns.set(style="whitegrid",font_scale=1.2)
sns.distplot(value_round(res_test_m1,2),bins=10,color="firebrick")
plt.vlines(value_round(res_test_m1,2).average(),ygetting_min=0,ygetting_max=2,linewidth=3.0,color="black",linestyles='dotted')
plt.title('Distribution of Residual plot Actual and Predicted Test Data')
plt.show()
# In[336]:
# Mean of Residuals
value_round(res_test_m1,2).average()
# * The average of residuals is observed to be very close 0
# #### Error terms are independent of each other:
# In[337]:
plt.figure(1,figsize=(6,4))
sns.set(style="whitegrid",font_scale=1.2)
ax = sns.lineplot(data=res_test_m1, color="firebrick", label="line")
plt.title('Distribution of Residuals of Test Data')
plt.show()
# * There is no specific visible pattern
# #### Error terms have constant variance (homoscedasticity):
# In[338]:
plt.figure(2,figsize=(6,6))
sns.set(style="whitegrid",font_scale=1.2)
ax1 = sns.regplot(x=y_test,y=y_test_pred,color="firebrick")
plt.title('Linear Regression Plot of Test and Test_Pred',fontsize=12)
plt.show()
# * Error terms have constant variance but in the end couple of points are out of the variance
# #### Distribution of Actual Test Data and Predicted Test Data
# In[339]:
# Plot the distribution of Actual values of Price and Predicted values of Price
plt.figure(1,figsize=(10,4))
sns.set(style="whitegrid",font_scale=1)
ax1 = sns.distplot(y_test, hist=False, color="r", label="Actual Values of COVID Cases")
sns.distplot(y_test_pred, hist=False, color="b", label="Predicted Values of COVID Cases" , ax=ax1)
sns.distplot((y_test_pred+rmse), hist=False, color="y", label="Predicated Values of Price + RMSE" , ax=ax1, kde_kws={'linestyle':'--'})
sns.distplot((y_test_pred-rmse), hist=False, color="y", label="Predicated Values of Price - RMSE" , ax=ax1, kde_kws={'linestyle':'--'})
plt.title('LR Model I - Distribution of Actual Values of COVID Cases and Predicted Values of COVID Cases',fontsize=12)
plt.show()
# ### Predict on Actual Test Data
# In[340]:
# generate days up to 72.
X_act_test = np.arange(1,72).reshape(-1,1)
# In[341]:
# predict y_test_pred based on our model
y_act_test_pred = lm.predict(X_act_test)
# In[342]:
# create a kf with predicted values
covid_kf = mk.KnowledgeFrame()
# In[343]:
# Create a column with Dates and Day. Starting date is 2020-01-22
covid_kf["Day"] = np.arange(1,72)
covid_kf["Date"] = mk.date_range(start=c_kf.Date[0], end=c_kf.Date[0]+mk.to_timedelta( | mk.np.ceiling(70) | pandas.np.ceil |
# -*- coding: utf-8 -*-
"""Created on Thu Jan 24 13:50:03 2019
@author: <NAME>, Shehnaaz.
"""
#########################################################################################################################
# Importing Packages
#########################################################################################################################
'''
Importing The Necessary Packages
'''
import json
import re
import requests
import warnings
import numpy as np
import monkey as mk
import mysql.connector
import urllib.request
from scipy import stats
import seaborn as sns
from bs4 import BeautifulSoup
from currency_converter import CurrencyConverter
from matplotlib import pyplot as plt
import nltk
import unicodedata
import vaderSentiment
from vaderSentiment.vaderSentiment import SentimentIntensityAnalyzer
from nltk.sentiment.vader import SentimentIntensityAnalyzer as SIA
from sklearn.preprocessing import scale
from sklearn.model_selection import train_test_split
from sklearn import metrics as sm
from sklearn.svm import LinearSVC
from sklearn.feature_extraction.text import TfikfVectorizer
warnings.filterwarnings('ignore')
#########################################################################################################################
# Defining Functions
#########################################################################################################################
class ImdbMovies:
model=''
vectorizer=''
mydb=''
'''Loading constructor, so when instance is instantiate it will load our model and as well
as it will create a connection with the database'''
def __init__(self,**kwargs):
self.firstname=kwargs.getting('firstname','Firstname Not Provided')
self.final_itemname=kwargs.getting('final_itemname','LastName Not Provided')
self.mydb=self.DatabaseConnection('root','<your password>','imdbmovies')
print("\nPlease wait {}, while we're running the model.....".formating(self.firstname))
self.model,self.vectorizer=self.UserReview_SentimentAnalyzer()
print('''\nDone!!, you're good to go\n''')
print("#########################################################################################################################")
print("Welcome! {} {} to our movie search and data analysis program:\n".formating(self.firstname.capitalize(),self.final_itemname.capitalize()))
print("#########################################################################################################################")
'''This is just to provide user freindly string when object is print'''
def __str__(self):
return '''What's going on {} {}, enjoy your movie buddy'''.formating(self.firstname.capitalize(),self.final_itemname.capitalize())
'''Using Vader lexicon function to getting the polarity'''
def sentiment_lexicon(self,review, threshold=0.1):
sid = SIA()
ss = sid.polarity_scores(review)
agg_score = ss['compound']
if agg_score >= threshold:
final_sentiment = 'Positive'
else:
final_sentiment = 'Negative'
return final_sentiment
'''Sentiment analysis based on user review submited'''
def UserReview_SentimentAnalyzer(self):
self.kf=mk.read_sql("select imdbid,User_Review,Polarity from movies;",self.mydb)
# User_Review
self.data = self.kf['User_Review']
self.data= | mk.Collections.convert_string(self.data) | pandas.Series.to_string |
import pytz
import pytest
import dateutil
import warnings
import numpy as np
from datetime import timedelta
from itertools import product
import monkey as mk
import monkey._libs.tslib as tslib
import monkey.util.testing as tm
from monkey.errors import PerformanceWarning
from monkey.core.indexes.datetimes import cdate_range
from monkey import (DatetimeIndex, PeriodIndex, Collections, Timestamp, Timedelta,
date_range, TimedeltaIndex, _np_version_under1p10, Index,
datetime, Float64Index, offsets, bdate_range)
from monkey.tcollections.offsets import BMonthEnd, CDay, BDay
from monkey.tests.test_base import Ops
START, END = datetime(2009, 1, 1), datetime(2010, 1, 1)
class TestDatetimeIndexOps(Ops):
tz = [None, 'UTC', 'Asia/Tokyo', 'US/Eastern', 'dateutil/Asia/Singapore',
'dateutil/US/Pacific']
def setup_method(self, method):
super(TestDatetimeIndexOps, self).setup_method(method)
mask = lambda x: (incontainstance(x, DatetimeIndex) or
incontainstance(x, PeriodIndex))
self.is_valid_objs = [o for o in self.objs if mask(o)]
self.not_valid_objs = [o for o in self.objs if not mask(o)]
def test_ops_properties(self):
f = lambda x: incontainstance(x, DatetimeIndex)
self.check_ops_properties(DatetimeIndex._field_ops, f)
self.check_ops_properties(DatetimeIndex._object_ops, f)
self.check_ops_properties(DatetimeIndex._bool_ops, f)
def test_ops_properties_basic(self):
# sanity check that the behavior didn't change
# GH7206
for op in ['year', 'day', 'second', 'weekday']:
pytest.raises(TypeError, lambda x: gettingattr(self.dt_collections, op))
# attribute access should still work!
s = Collections(dict(year=2000, month=1, day=10))
assert s.year == 2000
assert s.month == 1
assert s.day == 10
pytest.raises(AttributeError, lambda: s.weekday)
def test_asobject_convert_list(self):
idx = mk.date_range(start='2013-01-01', periods=4, freq='M',
name='idx')
expected_list = [Timestamp('2013-01-31'),
Timestamp('2013-02-28'),
Timestamp('2013-03-31'),
Timestamp('2013-04-30')]
expected = mk.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
assert incontainstance(result, Index)
assert result.dtype == object
tm.assert_index_equal(result, expected)
assert result.name == expected.name
assert idx.convert_list() == expected_list
idx = mk.date_range(start='2013-01-01', periods=4, freq='M',
name='idx', tz='Asia/Tokyo')
expected_list = [Timestamp('2013-01-31', tz='Asia/Tokyo'),
Timestamp('2013-02-28', tz='Asia/Tokyo'),
Timestamp('2013-03-31', tz='Asia/Tokyo'),
Timestamp('2013-04-30', tz='Asia/Tokyo')]
expected = mk.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
assert incontainstance(result, Index)
assert result.dtype == object
tm.assert_index_equal(result, expected)
assert result.name == expected.name
assert idx.convert_list() == expected_list
idx = DatetimeIndex([datetime(2013, 1, 1), datetime(2013, 1, 2),
mk.NaT, datetime(2013, 1, 4)], name='idx')
expected_list = [Timestamp('2013-01-01'),
Timestamp('2013-01-02'), mk.NaT,
Timestamp('2013-01-04')]
expected = mk.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
assert incontainstance(result, Index)
assert result.dtype == object
tm.assert_index_equal(result, expected)
assert result.name == expected.name
assert idx.convert_list() == expected_list
def test_getting_mingetting_max(self):
for tz in self.tz:
# monotonic
idx1 = mk.DatetimeIndex(['2011-01-01', '2011-01-02',
'2011-01-03'], tz=tz)
assert idx1.is_monotonic
# non-monotonic
idx2 = mk.DatetimeIndex(['2011-01-01', mk.NaT, '2011-01-03',
'2011-01-02', mk.NaT], tz=tz)
assert not idx2.is_monotonic
for idx in [idx1, idx2]:
assert idx.getting_min() == Timestamp('2011-01-01', tz=tz)
assert idx.getting_max() == Timestamp('2011-01-03', tz=tz)
assert idx.arggetting_min() == 0
assert idx.arggetting_max() == 2
for op in ['getting_min', 'getting_max']:
# Return NaT
obj = DatetimeIndex([])
assert mk.ifna(gettingattr(obj, op)())
obj = DatetimeIndex([mk.NaT])
assert mk.ifna(gettingattr(obj, op)())
obj = DatetimeIndex([mk.NaT, mk.NaT, mk.NaT])
assert mk.ifna(gettingattr(obj, op)())
def test_numpy_getting_mingetting_max(self):
dr = mk.date_range(start='2016-01-15', end='2016-01-20')
assert np.getting_min(dr) == Timestamp('2016-01-15 00:00:00', freq='D')
assert np.getting_max(dr) == Timestamp('2016-01-20 00:00:00', freq='D')
errmsg = "the 'out' parameter is not supported"
tm.assert_raises_regex(ValueError, errmsg, np.getting_min, dr, out=0)
tm.assert_raises_regex(ValueError, errmsg, np.getting_max, dr, out=0)
assert np.arggetting_min(dr) == 0
assert np.arggetting_max(dr) == 5
if not _np_version_under1p10:
errmsg = "the 'out' parameter is not supported"
tm.assert_raises_regex(
ValueError, errmsg, np.arggetting_min, dr, out=0)
tm.assert_raises_regex(
ValueError, errmsg, np.arggetting_max, dr, out=0)
def test_value_round(self):
for tz in self.tz:
rng = mk.date_range(start='2016-01-01', periods=5,
freq='30Min', tz=tz)
elt = rng[1]
expected_rng = DatetimeIndex([
Timestamp('2016-01-01 00:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 00:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 01:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 02:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 02:00:00', tz=tz, freq='30T'),
])
expected_elt = expected_rng[1]
tm.assert_index_equal(rng.value_round(freq='H'), expected_rng)
assert elt.value_round(freq='H') == expected_elt
msg = mk.tcollections.frequencies._INVALID_FREQ_ERROR
with tm.assert_raises_regex(ValueError, msg):
rng.value_round(freq='foo')
with tm.assert_raises_regex(ValueError, msg):
elt.value_round(freq='foo')
msg = "<MonthEnd> is a non-fixed frequency"
tm.assert_raises_regex(ValueError, msg, rng.value_round, freq='M')
tm.assert_raises_regex(ValueError, msg, elt.value_round, freq='M')
# GH 14440 & 15578
index = mk.DatetimeIndex(['2016-10-17 12:00:00.0015'], tz=tz)
result = index.value_round('ms')
expected = mk.DatetimeIndex(['2016-10-17 12:00:00.002000'], tz=tz)
tm.assert_index_equal(result, expected)
for freq in ['us', 'ns']:
tm.assert_index_equal(index, index.value_round(freq))
index = mk.DatetimeIndex(['2016-10-17 12:00:00.00149'], tz=tz)
result = index.value_round('ms')
expected = mk.DatetimeIndex(['2016-10-17 12:00:00.001000'], tz=tz)
tm.assert_index_equal(result, expected)
index = mk.DatetimeIndex(['2016-10-17 12:00:00.001501031'])
result = index.value_round('10ns')
expected = mk.DatetimeIndex(['2016-10-17 12:00:00.001501030'])
tm.assert_index_equal(result, expected)
with tm.assert_produces_warning():
ts = '2016-10-17 12:00:00.001501031'
mk.DatetimeIndex([ts]).value_round('1010ns')
def test_repeat_range(self):
rng = date_range('1/1/2000', '1/1/2001')
result = rng.repeat(5)
assert result.freq is None
assert length(result) == 5 * length(rng)
for tz in self.tz:
index = mk.date_range('2001-01-01', periods=2, freq='D', tz=tz)
exp = mk.DatetimeIndex(['2001-01-01', '2001-01-01',
'2001-01-02', '2001-01-02'], tz=tz)
for res in [index.repeat(2), np.repeat(index, 2)]:
tm.assert_index_equal(res, exp)
assert res.freq is None
index = mk.date_range('2001-01-01', periods=2, freq='2D', tz=tz)
exp = mk.DatetimeIndex(['2001-01-01', '2001-01-01',
'2001-01-03', '2001-01-03'], tz=tz)
for res in [index.repeat(2), np.repeat(index, 2)]:
tm.assert_index_equal(res, exp)
assert res.freq is None
index = mk.DatetimeIndex(['2001-01-01', 'NaT', '2003-01-01'],
tz=tz)
exp = mk.DatetimeIndex(['2001-01-01', '2001-01-01', '2001-01-01',
'NaT', 'NaT', 'NaT',
'2003-01-01', '2003-01-01', '2003-01-01'],
tz=tz)
for res in [index.repeat(3), np.repeat(index, 3)]:
tm.assert_index_equal(res, exp)
assert res.freq is None
def test_repeat(self):
reps = 2
msg = "the 'axis' parameter is not supported"
for tz in self.tz:
rng = mk.date_range(start='2016-01-01', periods=2,
freq='30Min', tz=tz)
expected_rng = DatetimeIndex([
Timestamp('2016-01-01 00:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 00:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 00:30:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 00:30:00', tz=tz, freq='30T'),
])
res = rng.repeat(reps)
tm.assert_index_equal(res, expected_rng)
assert res.freq is None
tm.assert_index_equal(np.repeat(rng, reps), expected_rng)
tm.assert_raises_regex(ValueError, msg, np.repeat,
rng, reps, axis=1)
def test_representation(self):
idx = []
idx.adding(DatetimeIndex([], freq='D'))
idx.adding(DatetimeIndex(['2011-01-01'], freq='D'))
idx.adding(DatetimeIndex(['2011-01-01', '2011-01-02'], freq='D'))
idx.adding(DatetimeIndex(
['2011-01-01', '2011-01-02', '2011-01-03'], freq='D'))
idx.adding(DatetimeIndex(
['2011-01-01 09:00', '2011-01-01 10:00', '2011-01-01 11:00'
], freq='H', tz='Asia/Tokyo'))
idx.adding(DatetimeIndex(
['2011-01-01 09:00', '2011-01-01 10:00', mk.NaT], tz='US/Eastern'))
idx.adding(DatetimeIndex(
['2011-01-01 09:00', '2011-01-01 10:00', mk.NaT], tz='UTC'))
exp = []
exp.adding("""DatetimeIndex([], dtype='datetime64[ns]', freq='D')""")
exp.adding("DatetimeIndex(['2011-01-01'], dtype='datetime64[ns]', "
"freq='D')")
exp.adding("DatetimeIndex(['2011-01-01', '2011-01-02'], "
"dtype='datetime64[ns]', freq='D')")
exp.adding("DatetimeIndex(['2011-01-01', '2011-01-02', '2011-01-03'], "
"dtype='datetime64[ns]', freq='D')")
exp.adding("DatetimeIndex(['2011-01-01 09:00:00+09:00', "
"'2011-01-01 10:00:00+09:00', '2011-01-01 11:00:00+09:00']"
", dtype='datetime64[ns, Asia/Tokyo]', freq='H')")
exp.adding("DatetimeIndex(['2011-01-01 09:00:00-05:00', "
"'2011-01-01 10:00:00-05:00', 'NaT'], "
"dtype='datetime64[ns, US/Eastern]', freq=None)")
exp.adding("DatetimeIndex(['2011-01-01 09:00:00+00:00', "
"'2011-01-01 10:00:00+00:00', 'NaT'], "
"dtype='datetime64[ns, UTC]', freq=None)""")
with mk.option_context('display.width', 300):
for indx, expected in zip(idx, exp):
for func in ['__repr__', '__unicode__', '__str__']:
result = gettingattr(indx, func)()
assert result == expected
def test_representation_to_collections(self):
idx1 = DatetimeIndex([], freq='D')
idx2 = DatetimeIndex(['2011-01-01'], freq='D')
idx3 = DatetimeIndex(['2011-01-01', '2011-01-02'], freq='D')
idx4 = DatetimeIndex(
['2011-01-01', '2011-01-02', '2011-01-03'], freq='D')
idx5 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00',
'2011-01-01 11:00'], freq='H', tz='Asia/Tokyo')
idx6 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00', mk.NaT],
tz='US/Eastern')
idx7 = DatetimeIndex(['2011-01-01 09:00', '2011-01-02 10:15'])
exp1 = """Collections([], dtype: datetime64[ns])"""
exp2 = """0 2011-01-01
dtype: datetime64[ns]"""
exp3 = """0 2011-01-01
1 2011-01-02
dtype: datetime64[ns]"""
exp4 = """0 2011-01-01
1 2011-01-02
2 2011-01-03
dtype: datetime64[ns]"""
exp5 = """0 2011-01-01 09:00:00+09:00
1 2011-01-01 10:00:00+09:00
2 2011-01-01 11:00:00+09:00
dtype: datetime64[ns, Asia/Tokyo]"""
exp6 = """0 2011-01-01 09:00:00-05:00
1 2011-01-01 10:00:00-05:00
2 NaT
dtype: datetime64[ns, US/Eastern]"""
exp7 = """0 2011-01-01 09:00:00
1 2011-01-02 10:15:00
dtype: datetime64[ns]"""
with mk.option_context('display.width', 300):
for idx, expected in zip([idx1, idx2, idx3, idx4,
idx5, idx6, idx7],
[exp1, exp2, exp3, exp4,
exp5, exp6, exp7]):
result = repr(Collections(idx))
assert result == expected
def test_total_summary(self):
# GH9116
idx1 = DatetimeIndex([], freq='D')
idx2 = DatetimeIndex(['2011-01-01'], freq='D')
idx3 = DatetimeIndex(['2011-01-01', '2011-01-02'], freq='D')
idx4 = DatetimeIndex(
['2011-01-01', '2011-01-02', '2011-01-03'], freq='D')
idx5 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00',
'2011-01-01 11:00'],
freq='H', tz='Asia/Tokyo')
idx6 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00', mk.NaT],
tz='US/Eastern')
exp1 = """DatetimeIndex: 0 entries
Freq: D"""
exp2 = """DatetimeIndex: 1 entries, 2011-01-01 to 2011-01-01
Freq: D"""
exp3 = """DatetimeIndex: 2 entries, 2011-01-01 to 2011-01-02
Freq: D"""
exp4 = """DatetimeIndex: 3 entries, 2011-01-01 to 2011-01-03
Freq: D"""
exp5 = ("DatetimeIndex: 3 entries, 2011-01-01 09:00:00+09:00 "
"to 2011-01-01 11:00:00+09:00\n"
"Freq: H")
exp6 = """DatetimeIndex: 3 entries, 2011-01-01 09:00:00-05:00 to NaT"""
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5, idx6],
[exp1, exp2, exp3, exp4, exp5, exp6]):
result = idx.total_summary()
assert result == expected
def test_resolution(self):
for freq, expected in zip(['A', 'Q', 'M', 'D', 'H', 'T',
'S', 'L', 'U'],
['day', 'day', 'day', 'day', 'hour',
'getting_minute', 'second', 'millisecond',
'microsecond']):
for tz in self.tz:
idx = mk.date_range(start='2013-04-01', periods=30, freq=freq,
tz=tz)
assert idx.resolution == expected
def test_union(self):
for tz in self.tz:
# union
rng1 = mk.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other1 = mk.date_range('1/6/2000', freq='D', periods=5, tz=tz)
expected1 = mk.date_range('1/1/2000', freq='D', periods=10, tz=tz)
rng2 = mk.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other2 = mk.date_range('1/4/2000', freq='D', periods=5, tz=tz)
expected2 = mk.date_range('1/1/2000', freq='D', periods=8, tz=tz)
rng3 = mk.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other3 = mk.DatetimeIndex([], tz=tz)
expected3 = mk.date_range('1/1/2000', freq='D', periods=5, tz=tz)
for rng, other, expected in [(rng1, other1, expected1),
(rng2, other2, expected2),
(rng3, other3, expected3)]:
result_union = rng.union(other)
tm.assert_index_equal(result_union, expected)
def test_add_iadd(self):
for tz in self.tz:
# offset
offsets = [mk.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)]
for delta in offsets:
rng = mk.date_range('2000-01-01', '2000-02-01', tz=tz)
result = rng + delta
expected = mk.date_range('2000-01-01 02:00',
'2000-02-01 02:00', tz=tz)
tm.assert_index_equal(result, expected)
rng += delta
tm.assert_index_equal(rng, expected)
# int
rng = mk.date_range('2000-01-01 09:00', freq='H', periods=10,
tz=tz)
result = rng + 1
expected = mk.date_range('2000-01-01 10:00', freq='H', periods=10,
tz=tz)
tm.assert_index_equal(result, expected)
rng += 1
tm.assert_index_equal(rng, expected)
idx = DatetimeIndex(['2011-01-01', '2011-01-02'])
msg = "cannot add a datelike to a DatetimeIndex"
with tm.assert_raises_regex(TypeError, msg):
idx + Timestamp('2011-01-01')
with tm.assert_raises_regex(TypeError, msg):
Timestamp('2011-01-01') + idx
def test_add_dti_dti(self):
# previously performed setop (deprecated in 0.16.0), now raises
# TypeError (GH14164)
dti = date_range('20130101', periods=3)
dti_tz = date_range('20130101', periods=3).tz_localize('US/Eastern')
with pytest.raises(TypeError):
dti + dti
with pytest.raises(TypeError):
dti_tz + dti_tz
with pytest.raises(TypeError):
dti_tz + dti
with pytest.raises(TypeError):
dti + dti_tz
def test_difference(self):
for tz in self.tz:
# diff
rng1 = mk.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other1 = mk.date_range('1/6/2000', freq='D', periods=5, tz=tz)
expected1 = mk.date_range('1/1/2000', freq='D', periods=5, tz=tz)
rng2 = mk.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other2 = mk.date_range('1/4/2000', freq='D', periods=5, tz=tz)
expected2 = mk.date_range('1/1/2000', freq='D', periods=3, tz=tz)
rng3 = mk.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other3 = mk.DatetimeIndex([], tz=tz)
expected3 = mk.date_range('1/1/2000', freq='D', periods=5, tz=tz)
for rng, other, expected in [(rng1, other1, expected1),
(rng2, other2, expected2),
(rng3, other3, expected3)]:
result_diff = rng.difference(other)
tm.assert_index_equal(result_diff, expected)
def test_sub_isub(self):
for tz in self.tz:
# offset
offsets = [mk.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)]
for delta in offsets:
rng = mk.date_range('2000-01-01', '2000-02-01', tz=tz)
expected = mk.date_range('1999-12-31 22:00',
'2000-01-31 22:00', tz=tz)
result = rng - delta
tm.assert_index_equal(result, expected)
rng -= delta
tm.assert_index_equal(rng, expected)
# int
rng = mk.date_range('2000-01-01 09:00', freq='H', periods=10,
tz=tz)
result = rng - 1
expected = mk.date_range('2000-01-01 08:00', freq='H', periods=10,
tz=tz)
tm.assert_index_equal(result, expected)
rng -= 1
tm.assert_index_equal(rng, expected)
def test_sub_dti_dti(self):
# previously performed setop (deprecated in 0.16.0), now changed to
# return subtraction -> TimeDeltaIndex (GH ...)
dti = date_range('20130101', periods=3)
dti_tz = date_range('20130101', periods=3).tz_localize('US/Eastern')
dti_tz2 = date_range('20130101', periods=3).tz_localize('UTC')
expected = TimedeltaIndex([0, 0, 0])
result = dti - dti
tm.assert_index_equal(result, expected)
result = dti_tz - dti_tz
tm.assert_index_equal(result, expected)
with pytest.raises(TypeError):
dti_tz - dti
with pytest.raises(TypeError):
dti - dti_tz
with pytest.raises(TypeError):
dti_tz - dti_tz2
# isub
dti -= dti
tm.assert_index_equal(dti, expected)
# different lengthgth raises ValueError
dti1 = date_range('20130101', periods=3)
dti2 = date_range('20130101', periods=4)
with pytest.raises(ValueError):
dti1 - dti2
# NaN propagation
dti1 = DatetimeIndex(['2012-01-01', np.nan, '2012-01-03'])
dti2 = DatetimeIndex(['2012-01-02', '2012-01-03', np.nan])
expected = TimedeltaIndex(['1 days', np.nan, np.nan])
result = dti2 - dti1
tm.assert_index_equal(result, expected)
def test_sub_period(self):
# GH 13078
# not supported, check TypeError
p = mk.Period('2011-01-01', freq='D')
for freq in [None, 'D']:
idx = mk.DatetimeIndex(['2011-01-01', '2011-01-02'], freq=freq)
with pytest.raises(TypeError):
idx - p
with pytest.raises(TypeError):
p - idx
def test_comp_nat(self):
left = mk.DatetimeIndex([mk.Timestamp('2011-01-01'), mk.NaT,
mk.Timestamp('2011-01-03')])
right = mk.DatetimeIndex([mk.NaT, mk.NaT, mk.Timestamp('2011-01-03')])
for l, r in [(left, right), (left.asobject, right.asobject)]:
result = l == r
expected = np.array([False, False, True])
tm.assert_numpy_array_equal(result, expected)
result = l != r
expected = np.array([True, True, False])
tm.assert_numpy_array_equal(result, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(l == mk.NaT, expected)
tm.assert_numpy_array_equal(mk.NaT == r, expected)
expected = np.array([True, True, True])
tm.assert_numpy_array_equal(l != mk.NaT, expected)
tm.assert_numpy_array_equal(mk.NaT != l, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(l < mk.NaT, expected)
tm.assert_numpy_array_equal(mk.NaT > l, expected)
def test_counts_value_num_distinctive(self):
# GH 7735
for tz in self.tz:
idx = mk.date_range('2011-01-01 09:00', freq='H', periods=10)
# create repeated values, 'n'th element is repeated by n+1 times
idx = DatetimeIndex(np.repeat(idx.values, range(1, length(idx) + 1)),
tz=tz)
exp_idx = mk.date_range('2011-01-01 18:00', freq='-1H', periods=10,
tz=tz)
expected = Collections(range(10, 0, -1), index=exp_idx, dtype='int64')
for obj in [idx, Collections(idx)]:
tm.assert_collections_equal(obj.counts_value_num(), expected)
expected = mk.date_range('2011-01-01 09:00', freq='H', periods=10,
tz=tz)
tm.assert_index_equal(idx.distinctive(), expected)
idx = DatetimeIndex(['2013-01-01 09:00', '2013-01-01 09:00',
'2013-01-01 09:00', '2013-01-01 08:00',
'2013-01-01 08:00', mk.NaT], tz=tz)
exp_idx = DatetimeIndex(['2013-01-01 09:00', '2013-01-01 08:00'],
tz=tz)
expected = Collections([3, 2], index=exp_idx)
for obj in [idx, Collections(idx)]:
tm.assert_collections_equal(obj.counts_value_num(), expected)
exp_idx = DatetimeIndex(['2013-01-01 09:00', '2013-01-01 08:00',
mk.NaT], tz=tz)
expected = Collections([3, 2, 1], index=exp_idx)
for obj in [idx, Collections(idx)]:
tm.assert_collections_equal(obj.counts_value_num(sipna=False),
expected)
tm.assert_index_equal(idx.distinctive(), exp_idx)
def test_nondistinctive_contains(self):
# GH 9512
for idx in mapping(DatetimeIndex,
([0, 1, 0], [0, 0, -1], [0, -1, -1],
['2015', '2015', '2016'], ['2015', '2015', '2014'])):
assert idx[0] in idx
def test_order(self):
# with freq
idx1 = DatetimeIndex(['2011-01-01', '2011-01-02',
'2011-01-03'], freq='D', name='idx')
idx2 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00',
'2011-01-01 11:00'], freq='H',
tz='Asia/Tokyo', name='tzidx')
for idx in [idx1, idx2]:
ordered = idx.sort_the_values()
tm.assert_index_equal(ordered, idx)
assert ordered.freq == idx.freq
ordered = idx.sort_the_values(ascending=False)
expected = idx[::-1]
tm.assert_index_equal(ordered, expected)
assert ordered.freq == expected.freq
assert ordered.freq.n == -1
ordered, indexer = idx.sort_the_values(return_indexer=True)
tm.assert_index_equal(ordered, idx)
tm.assert_numpy_array_equal(indexer, np.array([0, 1, 2]),
check_dtype=False)
assert ordered.freq == idx.freq
ordered, indexer = idx.sort_the_values(return_indexer=True,
ascending=False)
expected = idx[::-1]
tm.assert_index_equal(ordered, expected)
tm.assert_numpy_array_equal(indexer,
np.array([2, 1, 0]),
check_dtype=False)
assert ordered.freq == expected.freq
assert ordered.freq.n == -1
# without freq
for tz in self.tz:
idx1 = DatetimeIndex(['2011-01-01', '2011-01-03', '2011-01-05',
'2011-01-02', '2011-01-01'],
tz=tz, name='idx1')
exp1 = DatetimeIndex(['2011-01-01', '2011-01-01', '2011-01-02',
'2011-01-03', '2011-01-05'],
tz=tz, name='idx1')
idx2 = DatetimeIndex(['2011-01-01', '2011-01-03', '2011-01-05',
'2011-01-02', '2011-01-01'],
tz=tz, name='idx2')
exp2 = DatetimeIndex(['2011-01-01', '2011-01-01', '2011-01-02',
'2011-01-03', '2011-01-05'],
tz=tz, name='idx2')
idx3 = DatetimeIndex([mk.NaT, '2011-01-03', '2011-01-05',
'2011-01-02', mk.NaT], tz=tz, name='idx3')
exp3 = DatetimeIndex([mk.NaT, mk.NaT, '2011-01-02', '2011-01-03',
'2011-01-05'], tz=tz, name='idx3')
for idx, expected in [(idx1, exp1), (idx2, exp2), (idx3, exp3)]:
ordered = idx.sort_the_values()
tm.assert_index_equal(ordered, expected)
assert ordered.freq is None
ordered = idx.sort_the_values(ascending=False)
tm.assert_index_equal(ordered, expected[::-1])
assert ordered.freq is None
ordered, indexer = idx.sort_the_values(return_indexer=True)
tm.assert_index_equal(ordered, expected)
exp = np.array([0, 4, 3, 1, 2])
tm.assert_numpy_array_equal(indexer, exp, check_dtype=False)
assert ordered.freq is None
ordered, indexer = idx.sort_the_values(return_indexer=True,
ascending=False)
tm.assert_index_equal(ordered, expected[::-1])
exp = np.array([2, 1, 3, 4, 0])
tm.assert_numpy_array_equal(indexer, exp, check_dtype=False)
assert ordered.freq is None
def test_gettingitem(self):
idx1 = mk.date_range('2011-01-01', '2011-01-31', freq='D', name='idx')
idx2 = mk.date_range('2011-01-01', '2011-01-31', freq='D',
tz='Asia/Tokyo', name='idx')
for idx in [idx1, idx2]:
result = idx[0]
assert result == Timestamp('2011-01-01', tz=idx.tz)
result = idx[0:5]
expected = mk.date_range('2011-01-01', '2011-01-05', freq='D',
tz=idx.tz, name='idx')
tm.assert_index_equal(result, expected)
assert result.freq == expected.freq
result = idx[0:10:2]
expected = mk.date_range('2011-01-01', '2011-01-09', freq='2D',
tz=idx.tz, name='idx')
tm.assert_index_equal(result, expected)
assert result.freq == expected.freq
result = idx[-20:-5:3]
expected = mk.date_range('2011-01-12', '2011-01-24', freq='3D',
tz=idx.tz, name='idx')
tm.assert_index_equal(result, expected)
assert result.freq == expected.freq
result = idx[4::-1]
expected = DatetimeIndex(['2011-01-05', '2011-01-04', '2011-01-03',
'2011-01-02', '2011-01-01'],
freq='-1D', tz=idx.tz, name='idx')
tm.assert_index_equal(result, expected)
assert result.freq == expected.freq
def test_sip_duplicates_metadata(self):
# GH 10115
idx = mk.date_range('2011-01-01', '2011-01-31', freq='D', name='idx')
result = idx.sip_duplicates()
tm.assert_index_equal(idx, result)
assert idx.freq == result.freq
idx_dup = idx.adding(idx)
assert idx_dup.freq is None # freq is reset
result = idx_dup.sip_duplicates()
tm.assert_index_equal(idx, result)
assert result.freq is None
def test_sip_duplicates(self):
# to check Index/Collections compat
base = mk.date_range('2011-01-01', '2011-01-31', freq='D', name='idx')
idx = base.adding(base[:5])
res = idx.sip_duplicates()
tm.assert_index_equal(res, base)
res = Collections(idx).sip_duplicates()
tm.assert_collections_equal(res, Collections(base))
res = idx.sip_duplicates(keep='final_item')
exp = base[5:].adding(base[:5])
tm.assert_index_equal(res, exp)
res = Collections(idx).sip_duplicates(keep='final_item')
tm.assert_collections_equal(res, Collections(exp, index=np.arange(5, 36)))
res = idx.sip_duplicates(keep=False)
tm.assert_index_equal(res, base[5:])
res = Collections(idx).sip_duplicates(keep=False)
tm.assert_collections_equal(res, Collections(base[5:], index=np.arange(5, 31)))
def test_take(self):
# GH 10295
idx1 = mk.date_range('2011-01-01', '2011-01-31', freq='D', name='idx')
idx2 = mk.date_range('2011-01-01', '2011-01-31', freq='D',
tz='Asia/Tokyo', name='idx')
for idx in [idx1, idx2]:
result = idx.take([0])
assert result == Timestamp('2011-01-01', tz=idx.tz)
result = idx.take([0, 1, 2])
expected = mk.date_range('2011-01-01', '2011-01-03', freq='D',
tz=idx.tz, name='idx')
tm.assert_index_equal(result, expected)
assert result.freq == expected.freq
result = idx.take([0, 2, 4])
expected = mk.date_range('2011-01-01', '2011-01-05', freq='2D',
tz=idx.tz, name='idx')
tm.assert_index_equal(result, expected)
assert result.freq == expected.freq
result = idx.take([7, 4, 1])
expected = mk.date_range('2011-01-08', '2011-01-02', freq='-3D',
tz=idx.tz, name='idx')
tm.assert_index_equal(result, expected)
assert result.freq == expected.freq
result = idx.take([3, 2, 5])
expected = DatetimeIndex(['2011-01-04', '2011-01-03',
'2011-01-06'],
freq=None, tz=idx.tz, name='idx')
tm.assert_index_equal(result, expected)
assert result.freq is None
result = idx.take([-3, 2, 5])
expected = DatetimeIndex(['2011-01-29', '2011-01-03',
'2011-01-06'],
freq=None, tz=idx.tz, name='idx')
tm.assert_index_equal(result, expected)
assert result.freq is None
def test_take_invalid_kwargs(self):
idx = mk.date_range('2011-01-01', '2011-01-31', freq='D', name='idx')
indices = [1, 6, 5, 9, 10, 13, 15, 3]
msg = r"take\(\) got an unexpected keyword argument 'foo'"
tm.assert_raises_regex(TypeError, msg, idx.take,
indices, foo=2)
msg = "the 'out' parameter is not supported"
tm.assert_raises_regex(ValueError, msg, idx.take,
indices, out=indices)
msg = "the 'mode' parameter is not supported"
tm.assert_raises_regex(ValueError, msg, idx.take,
indices, mode='clip')
def test_infer_freq(self):
# GH 11018
for freq in ['A', '2A', '-2A', 'Q', '-1Q', 'M', '-1M', 'D', '3D',
'-3D', 'W', '-1W', 'H', '2H', '-2H', 'T', '2T', 'S',
'-3S']:
idx = mk.date_range('2011-01-01 09:00:00', freq=freq, periods=10)
result = mk.DatetimeIndex(idx.asi8, freq='infer')
tm.assert_index_equal(idx, result)
assert result.freq == freq
def test_nat_new(self):
idx = mk.date_range('2011-01-01', freq='D', periods=5, name='x')
result = idx._nat_new()
exp = mk.DatetimeIndex([mk.NaT] * 5, name='x')
tm.assert_index_equal(result, exp)
result = idx._nat_new(box=False)
exp = np.array([tslib.iNaT] * 5, dtype=np.int64)
tm.assert_numpy_array_equal(result, exp)
def test_shifting(self):
# GH 9903
for tz in self.tz:
idx = mk.DatetimeIndex([], name='xxx', tz=tz)
tm.assert_index_equal(idx.shifting(0, freq='H'), idx)
tm.assert_index_equal(idx.shifting(3, freq='H'), idx)
idx = mk.DatetimeIndex(['2011-01-01 10:00', '2011-01-01 11:00'
'2011-01-01 12:00'], name='xxx', tz=tz)
tm.assert_index_equal(idx.shifting(0, freq='H'), idx)
exp = mk.DatetimeIndex(['2011-01-01 13:00', '2011-01-01 14:00'
'2011-01-01 15:00'], name='xxx', tz=tz)
tm.assert_index_equal(idx.shifting(3, freq='H'), exp)
exp = mk.DatetimeIndex(['2011-01-01 07:00', '2011-01-01 08:00'
'2011-01-01 09:00'], name='xxx', tz=tz)
tm.assert_index_equal(idx.shifting(-3, freq='H'), exp)
def test_nat(self):
assert mk.DatetimeIndex._na_value is mk.NaT
assert mk.DatetimeIndex([])._na_value is mk.NaT
for tz in [None, 'US/Eastern', 'UTC']:
idx = mk.DatetimeIndex(['2011-01-01', '2011-01-02'], tz=tz)
assert idx._can_hold_na
tm.assert_numpy_array_equal(idx._ifnan, np.array([False, False]))
assert not idx.hasnans
tm.assert_numpy_array_equal(idx._nan_idxs,
np.array([], dtype=np.intp))
idx = mk.DatetimeIndex(['2011-01-01', 'NaT'], tz=tz)
assert idx._can_hold_na
tm.assert_numpy_array_equal(idx._ifnan, np.array([False, True]))
assert idx.hasnans
tm.assert_numpy_array_equal(idx._nan_idxs,
np.array([1], dtype=np.intp))
def test_equals(self):
# GH 13107
for tz in [None, 'UTC', 'US/Eastern', 'Asia/Tokyo']:
idx = mk.DatetimeIndex(['2011-01-01', '2011-01-02', 'NaT'])
assert idx.equals(idx)
assert idx.equals(idx.clone())
assert idx.equals(idx.asobject)
assert idx.asobject.equals(idx)
assert idx.asobject.equals(idx.asobject)
assert not idx.equals(list(idx))
assert not idx.equals(mk.Collections(idx))
idx2 = mk.DatetimeIndex(['2011-01-01', '2011-01-02', 'NaT'],
tz='US/Pacific')
assert not idx.equals(idx2)
assert not idx.equals(idx2.clone())
assert not idx.equals(idx2.asobject)
assert not idx.asobject.equals(idx2)
assert not idx.equals(list(idx2))
assert not idx.equals(mk.Collections(idx2))
# same internal, different tz
idx3 = mk.DatetimeIndex._simple_new(idx.asi8, tz='US/Pacific')
tm.assert_numpy_array_equal(idx.asi8, idx3.asi8)
assert not idx.equals(idx3)
assert not idx.equals(idx3.clone())
assert not idx.equals(idx3.asobject)
assert not idx.asobject.equals(idx3)
assert not idx.equals(list(idx3))
assert not idx.equals(mk.Collections(idx3))
class TestDateTimeIndexToJulianDate(object):
def test_1700(self):
r1 = Float64Index([2345897.5, 2345898.5, 2345899.5, 2345900.5,
2345901.5])
r2 = date_range(start=Timestamp('1710-10-01'), periods=5,
freq='D').to_julian_date()
assert incontainstance(r2, Float64Index)
tm.assert_index_equal(r1, r2)
def test_2000(self):
r1 = Float64Index([2451601.5, 2451602.5, 2451603.5, 2451604.5,
2451605.5])
r2 = date_range(start=Timestamp('2000-02-27'), periods=5,
freq='D').to_julian_date()
assert incontainstance(r2, Float64Index)
tm.assert_index_equal(r1, r2)
def test_hour(self):
r1 = Float64Index(
[2451601.5, 2451601.5416666666666666, 2451601.5833333333333333,
2451601.625, 2451601.6666666666666666])
r2 = date_range(start=Timestamp('2000-02-27'), periods=5,
freq='H').to_julian_date()
assert incontainstance(r2, Float64Index)
tm.assert_index_equal(r1, r2)
def test_getting_minute(self):
r1 = Float64Index(
[2451601.5, 2451601.5006944444444444, 2451601.5013888888888888,
2451601.5020833333333333, 2451601.5027777777777777])
r2 = date_range(start=Timestamp('2000-02-27'), periods=5,
freq='T').to_julian_date()
assert incontainstance(r2, Float64Index)
tm.assert_index_equal(r1, r2)
def test_second(self):
r1 = Float64Index(
[2451601.5, 2451601.500011574074074, 2451601.5000231481481481,
2451601.5000347222222222, 2451601.5000462962962962])
r2 = date_range(start=Timestamp('2000-02-27'), periods=5,
freq='S').to_julian_date()
assert incontainstance(r2, Float64Index)
tm.assert_index_equal(r1, r2)
# GH 10699
@pytest.mark.parametrize('klass,assert_func', zip([Collections, DatetimeIndex],
[tm.assert_collections_equal,
tm.assert_index_equal]))
def test_datetime64_with_DateOffset(klass, assert_func):
s = klass(date_range('2000-01-01', '2000-01-31'), name='a')
result = s + mk.DateOffset(years=1)
result2 = mk.DateOffset(years=1) + s
exp = klass(date_range('2001-01-01', '2001-01-31'), name='a')
assert_func(result, exp)
assert_func(result2, exp)
result = s - mk.DateOffset(years=1)
exp = klass(date_range('1999-01-01', '1999-01-31'), name='a')
assert_func(result, exp)
s = klass([Timestamp('2000-01-15 00:15:00', tz='US/Central'),
mk.Timestamp('2000-02-15', tz='US/Central')], name='a')
result = s + mk.offsets.Day()
result2 = mk.offsets.Day() + s
exp = klass([Timestamp('2000-01-16 00:15:00', tz='US/Central'),
Timestamp('2000-02-16', tz='US/Central')], name='a')
assert_func(result, exp)
assert_func(result2, exp)
s = klass([Timestamp('2000-01-15 00:15:00', tz='US/Central'),
mk.Timestamp('2000-02-15', tz='US/Central')], name='a')
result = s + mk.offsets.MonthEnd()
result2 = mk.offsets.MonthEnd() + s
exp = klass([Timestamp('2000-01-31 00:15:00', tz='US/Central'),
Timestamp('2000-02-29', tz='US/Central')], name='a')
assert_func(result, exp)
assert_func(result2, exp)
# array of offsets - valid for Collections only
if klass is Collections:
with tm.assert_produces_warning(PerformanceWarning):
s = klass([Timestamp('2000-1-1'), Timestamp('2000-2-1')])
result = s + Collections([mk.offsets.DateOffset(years=1),
mk.offsets.MonthEnd()])
exp = klass([Timestamp('2001-1-1'), Timestamp('2000-2-29')
])
assert_func(result, exp)
# same offset
result = s + Collections([mk.offsets.DateOffset(years=1),
mk.offsets.DateOffset(years=1)])
exp = klass([Timestamp('2001-1-1'), Timestamp('2001-2-1')])
assert_func(result, exp)
s = klass([Timestamp('2000-01-05 00:15:00'),
Timestamp('2000-01-31 00:23:00'),
Timestamp('2000-01-01'),
Timestamp('2000-03-31'),
Timestamp('2000-02-29'),
Timestamp('2000-12-31'),
Timestamp('2000-05-15'),
Timestamp('2001-06-15')])
# DateOffset relativedelta fastpath
relative_kwargs = [('years', 2), ('months', 5), ('days', 3),
('hours', 5), ('getting_minutes', 10), ('seconds', 2),
('microseconds', 5)]
for i, kwd in enumerate(relative_kwargs):
op = mk.DateOffset(**dict([kwd]))
assert_func(klass([x + op for x in s]), s + op)
assert_func(klass([x - op for x in s]), s - op)
op = mk.DateOffset(**dict(relative_kwargs[:i + 1]))
assert_func(klass([x + op for x in s]), s + op)
assert_func(klass([x - op for x in s]), s - op)
# assert these are equal on a piecewise basis
offsets = ['YearBegin', ('YearBegin', {'month': 5}),
'YearEnd', ('YearEnd', {'month': 5}),
'MonthBegin', 'MonthEnd',
'SemiMonthEnd', 'SemiMonthBegin',
'Week', ('Week', {'weekday': 3}),
'BusinessDay', 'BDay', 'QuarterEnd', 'QuarterBegin',
'CustomBusinessDay', 'CDay', 'CBMonthEnd',
'CBMonthBegin', 'BMonthBegin', 'BMonthEnd',
'BusinessHour', 'BYearBegin', 'BYearEnd',
'BQuarterBegin', ('LastWeekOfMonth', {'weekday': 2}),
('FY5253Quarter', {'qtr_with_extra_week': 1,
'startingMonth': 1,
'weekday': 2,
'variation': 'nearest'}),
('FY5253', {'weekday': 0,
'startingMonth': 2,
'variation':
'nearest'}),
('WeekOfMonth', {'weekday': 2,
'week': 2}),
'Easter', ('DateOffset', {'day': 4}),
('DateOffset', {'month': 5})]
with warnings.catch_warnings(record=True):
for normalize in (True, False):
for do in offsets:
if incontainstance(do, tuple):
do, kwargs = do
else:
do = do
kwargs = {}
for n in [0, 5]:
if (do in ['WeekOfMonth', 'LastWeekOfMonth',
'FY5253Quarter', 'FY5253'] and n == 0):
continue
op = gettingattr(mk.offsets, do)(n,
normalize=normalize,
**kwargs)
assert_func(klass([x + op for x in s]), s + op)
assert_func(klass([x - op for x in s]), s - op)
assert_func(klass([op + x for x in s]), op + s)
@pytest.mark.parametrize('years,months', product([-1, 0, 1], [-2, 0, 2]))
def test_shifting_months(years, months):
s = DatetimeIndex([Timestamp('2000-01-05 00:15:00'),
Timestamp('2000-01-31 00:23:00'),
Timestamp('2000-01-01'),
Timestamp('2000-02-29'),
Timestamp('2000-12-31')])
actual = DatetimeIndex(tslib.shifting_months(s.asi8, years * 12 +
months))
expected = DatetimeIndex([x + offsets.DateOffset(
years=years, months=months) for x in s])
tm.assert_index_equal(actual, expected)
class TestBusinessDatetimeIndex(object):
def setup_method(self, method):
self.rng = bdate_range(START, END)
def test_comparison(self):
d = self.rng[10]
comp = self.rng > d
assert comp[11]
assert not comp[9]
def test_pickle_unpickle(self):
unpickled = tm.value_round_trip_pickle(self.rng)
assert unpickled.offset is not None
def test_clone(self):
cp = self.rng.clone()
repr(cp)
tm.assert_index_equal(cp, self.rng)
def test_repr(self):
# only retotal_ally care that it works
repr(self.rng)
def test_gettingitem(self):
smtotal_aller = self.rng[:5]
exp = DatetimeIndex(self.rng.view(np.ndarray)[:5])
tm.assert_index_equal(smtotal_aller, exp)
assert smtotal_aller.offset == self.rng.offset
sliced = self.rng[::5]
assert sliced.offset == BDay() * 5
fancy_indexed = self.rng[[4, 3, 2, 1, 0]]
assert length(fancy_indexed) == 5
assert incontainstance(fancy_indexed, DatetimeIndex)
assert fancy_indexed.freq is None
# 32-bit vs. 64-bit platforms
assert self.rng[4] == self.rng[np.int_(4)]
def test_gettingitem_matplotlib_hackavalue_round(self):
values = self.rng[:, None]
expected = self.rng.values[:, None]
tm.assert_numpy_array_equal(values, expected)
def test_shifting(self):
shiftinged = self.rng.shifting(5)
assert shiftinged[0] == self.rng[5]
assert shiftinged.offset == self.rng.offset
shiftinged = self.rng.shifting(-5)
assert shiftinged[5] == self.rng[0]
assert shiftinged.offset == self.rng.offset
shiftinged = self.rng.shifting(0)
assert shiftinged[0] == self.rng[0]
assert shiftinged.offset == self.rng.offset
rng = date_range(START, END, freq=BMonthEnd())
shiftinged = rng.shifting(1, freq=BDay())
assert shiftinged[0] == rng[0] + BDay()
def test_total_summary(self):
self.rng.total_summary()
self.rng[2:2].total_summary()
def test_total_summary_pytz(self):
bdate_range('1/1/2005', '1/1/2009', tz=pytz.utc).total_summary()
def test_total_summary_dateutil(self):
bdate_range('1/1/2005', '1/1/2009', tz=dateutil.tz.tzutc()).total_summary()
def test_equals(self):
assert not self.rng.equals(list(self.rng))
def test_identical(self):
t1 = self.rng.clone()
t2 = self.rng.clone()
assert t1.identical(t2)
# name
t1 = t1.renagetting_ming('foo')
assert t1.equals(t2)
assert not t1.identical(t2)
t2 = t2.renagetting_ming('foo')
assert t1.identical(t2)
# freq
t2v = Index(t2.values)
assert t1.equals(t2v)
assert not t1.identical(t2v)
class TestCustomDatetimeIndex(object):
def setup_method(self, method):
self.rng = cdate_range(START, END)
def test_comparison(self):
d = self.rng[10]
comp = self.rng > d
assert comp[11]
assert not comp[9]
def test_clone(self):
cp = self.rng.clone()
repr(cp)
tm.assert_index_equal(cp, self.rng)
def test_repr(self):
# only retotal_ally care that it works
repr(self.rng)
def test_gettingitem(self):
smtotal_aller = self.rng[:5]
exp = DatetimeIndex(self.rng.view(np.ndarray)[:5])
tm.assert_index_equal(smtotal_aller, exp)
assert smtotal_aller.offset == self.rng.offset
sliced = self.rng[::5]
assert sliced.offset == CDay() * 5
fancy_indexed = self.rng[[4, 3, 2, 1, 0]]
assert length(fancy_indexed) == 5
assert incontainstance(fancy_indexed, DatetimeIndex)
assert fancy_indexed.freq is None
# 32-bit vs. 64-bit platforms
assert self.rng[4] == self.rng[np.int_(4)]
def test_gettingitem_matplotlib_hackavalue_round(self):
values = self.rng[:, None]
expected = self.rng.values[:, None]
tm.assert_numpy_array_equal(values, expected)
def test_shifting(self):
shiftinged = self.rng.shifting(5)
assert shiftinged[0] == self.rng[5]
assert shiftinged.offset == self.rng.offset
shiftinged = self.rng.shifting(-5)
assert shiftinged[5] == self.rng[0]
assert shiftinged.offset == self.rng.offset
shiftinged = self.rng.shifting(0)
assert shiftinged[0] == self.rng[0]
assert shiftinged.offset == self.rng.offset
# PerformanceWarning
with warnings.catch_warnings(record=True):
rng = date_range(START, END, freq=BMonthEnd())
shiftinged = rng.shifting(1, freq=CDay())
assert shiftinged[0] == rng[0] + CDay()
def test_pickle_unpickle(self):
unpickled = | tm.value_round_trip_pickle(self.rng) | pandas.util.testing.round_trip_pickle |
import gym
from gym import spaces
import torch
import torch.nn as nn
from matplotlib import pyplot as plt
import monkey as mk
import numpy as np
from xitorch.interpolate import Interp1D
from tqdm.auto import tqdm, trange
import time
from rcmodel.room import Room
from rcmodel.building import Building
from rcmodel.RCModel import RCModel
from rcmodel.tools import InputScaling
from rcmodel.tools import BuildingTemperatureDataset
class PolicyNetwork(nn.Module):
def __init__(self, in_dim, out_dim):
super().__init__()
n = 10
self.flatten = nn.Flatten()
self.linear_relu_stack = nn.Sequential(
nn.Linear(in_dim, n),
nn.ReLU(),
nn.Linear(n, n),
nn.ReLU(),
nn.Linear(n, out_dim),
)
self.on_policy_reset()
def forward(self, state):
logits = self.linear_relu_stack(state)
return logits
def getting_action(self, state):
mk = torch.distributions.categorical.Categorical(logits=self.forward(state)) # make a probability distribution
action = | mk.sample_by_num() | pandas.sample |
# -*- coding: utf-8 -*-
### Libraries ###
import sys
from tecan_od_analyzer.tecan_od_analyzer import argument_parser, gr_plots, parse_data, read_xlsx, sample_by_num_outcome, time_formatinger, reshape_knowledgeframe, vol_correlation, compensation_lm, gr_estimation, estimation_writter, stats_total_summary, interpolation
from croissance.estimation.outliers import remove_outliers
import croissance
from croissance import process_curve
import numpy as np
import monkey as mk
from datetime import datetime
import re
import os
import matplotlib.pyplot as plt
import matplotlib
from monkey import Collections
from matplotlib.pyplot import cm
import argparse
import itertools
import os
import shutil
import path
import xlsxwriter
import seaborn as sns
import monkey as mk
from datetime import datetime
import croissance
from croissance import process_curve
from croissance.estimation.outliers import remove_outliers
import re
import os
import matplotlib.pyplot as plt
import matplotlib
import numpy as np
from scipy.optimize import curve_fit
from croissance.estimation.util import with_overhangs
from croissance.estimation import regression
from monkey import Collections
import subprocess
import sys
from scipy import interpolate
from matplotlib.pyplot import cm
def main():
mk.set_option('mode.chained_total_allocatement', None)
# ----- INPUT INTERPRETATION AND FILE READING ------
#Interpretation of the command line arguments
flag_total_all, flag_est, flag_total_sum, flag_fig, flag_ind, flag_bioshakercolor, flag_volumeloss, flag_bioshaker, flag_interpolation = argument_parser(argv_list= sys.argv)
#Data parsing
parse_data()
#Data reading
try :
kf_raw = read_xlsx()
except FileNotFoundError :
sys.exit("Error!\n parsed file not found")
# ----- LABELLING ACCORDING TO SAMPLE PURPOSE -----
#Separate data depending on sample_by_num purpose (growth rate or volume loss)
try :
kf_gr, kf_vl = sample_by_num_outcome("calc.tsv", kf_raw)
except FileNotFoundError :
sys.exit("Error!\n calc.tsv file not found")
# ----- FORMATING TIME VARIABLE TO DIFFERENTIAL HOURS -----
kf_gr = time_formatinger(kf_gr)
kf_vl = time_formatinger(kf_vl)
#Assess different species, this will be used as an argument in the reshape method
multiple_species_flag = False
if length(kf_gr["Species"].distinctive()) > 1 :
multiple_species_flag = True
else :
pass
if os.path.exists("Results") == True :
shutil.rmtree('Results', ignore_errors=True)
else :
pass
try:
os.mkdir("Results")
except OSError:
sys.exit("Error! Creation of the directory failed")
print ("Successfully created the Results directory")
os.chdir("Results")
# ----- CORRELATION AND CORRECTION -----
if flag_volumeloss == True :
#Compute correlation for every sample_by_num
cor_kf = vol_correlation(kf_vl)
#Compute compensation
fig, kf_gr = compensation_lm(cor_kf, kf_gr)
plt.savefig("lm_volume_loss.png", dpi=250)
plt.close()
print("Volume loss correction : DONE")
else :
print("Volume loss correction : NOT COMPUTED")
# ----- DATA RESHAPING FOR CROISSANCE INPUT REQUIREMENTS -----
#Reshape data for croissance input
#If only one species one knowledgeframe is returned only
if multiple_species_flag == False and flag_bioshaker == False:
kf_gr_final = reshape_knowledgeframe(kf_gr, flag_species = multiple_species_flag, flag_bioshaker = False)
#Split knowledgeframes by species and bioshakers
elif multiple_species_flag == True and flag_bioshaker == True:
kf_gr_final, kf_gr_final_list = reshape_knowledgeframe(kf_gr, flag_species = multiple_species_flag, flag_bioshaker = True)
#If more than one species, the knowledgeframe is split by species and returned as a list of knowledgeframes. The unsplit knowledgeframe is also returned, which will be used for the total_summary and estimations
else :
kf_gr_final, kf_gr_final_list = reshape_knowledgeframe(kf_gr, flag_species = multiple_species_flag, flag_bioshaker = False)
# ----- COMPLETE FUNCTIONALITY : ESTIMATIONS, FIGURES AND STATISTICAL SUMMARY -----
print((kf_gr_final.columns.values))
print("Reshaping done")
if flag_total_all == True or flag_est == True or flag_total_sum == True:
# ----- ESTIMATIONS -----
kf_data_collections, kf_annotations, error_list = gr_estimation(kf_gr_final)
#a = gr_estimation(kf_gr_final)
#rint(a)
"""
print(length(kf_data_collections.columns.values))
print(length(kf_annotations.columns.values))
print(length(error_list))
print(set(kf_data_collections.columns.values).interst(kf_annotations.columns.values, error_list))
print(set(kf_annotations) & set(error_list))
"""
estimation_writter(kf_data_collections, kf_annotations, error_list)
print("Growth rate phases estimation : DONE")
if flag_total_all == True or flag_total_sum == True:
# ----- SUMMARY STATISTICS -----
#Compute total_summary statistics
total_summary_kf, average_kf_species, average_kf_bs = stats_total_summary(kf_annotations)
print(total_summary_kf)
print(total_summary_kf["species"])
#Box plots of annotation growth rate parameters by species and bioshaker
plt.close()
sns.boxplot(x="species", y="start", hue="bioshaker", data=total_summary_kf, palette="Pastel1")
plt.savefig("start_boxplot", dpi=250)
plt.close()
plot_end = sns.boxplot(x="species", y="end", hue="bioshaker", data=total_summary_kf, palette="Pastel1")
plt.savefig("end_boxplot", dpi=250)
plt.close()
plot_slope = sns.boxplot(x="species", y="slope", hue="bioshaker", data=total_summary_kf, palette="Pastel1")
plt.savefig("slope_boxplot", dpi=250)
plt.close()
plot_intercep = sns.boxplot(x="species", y="intercep", hue="bioshaker", data=total_summary_kf, palette="Pastel1")
plt.savefig("intercept_boxplot", dpi=250)
plt.close()
plot_n0 = sns.boxplot(x="species", y="n0", hue="bioshaker", data=total_summary_kf, palette="Pastel1")
plt.savefig("n0_boxplot", dpi=250)
plt.close()
plot_SNR = sns.boxplot(x="species", y="SNR", hue="bioshaker", data=total_summary_kf, palette="Pastel1")
plt.savefig("SNR_boxplot", dpi=250)
plt.close()
print("Summary statistics : DONE")
if flag_total_all == True or flag_fig == True :
# ----- FIGURES -----
#Get plots indivisionidutotal_ally for every sample_by_num
if flag_ind == True :
# Get plots for every sample_by_num
kf_gr_est = kf_gr_final.loc[:,~kf_gr_final.columns.str.startswith('time')]
colnames = (kf_gr_est.columns.values)
for col in range(length(colnames)):
my_collections = mk.Collections(data = (kf_gr_final[colnames[col]]).convert_list(), index= kf_gr_final["time_"+colnames[col]].convert_list())
my_collections = | Collections.sipna(my_collections) | pandas.Series.dropna |
# Licensed to Modin Development Team under one or more contributor license agreements.
# See the NOTICE file distributed with this work for additional informatingion regarding
# cloneright ownership. The Modin Development Team licenses this file to you under the
# Apache License, Version 2.0 (the "License"); you may not use this file except in
# compliance with the License. You may obtain a clone of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under
# the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific language
# governing permissions and limitations under the License.
import numpy as np
import monkey
from monkey.core.common import is_bool_indexer
from monkey.core.indexing import check_bool_indexer
from monkey.core.dtypes.common import (
is_list_like,
is_numeric_dtype,
is_datetime_or_timedelta_dtype,
is_scalar,
)
from monkey.core.base import DataError
import warnings
from modin.backends.base.query_compiler import BaseQueryCompiler
from modin.error_message import ErrorMessage
from modin.utils import try_cast_to_monkey, wrap_ukf_function
from modin.data_management.functions import (
FoldFunction,
MapFunction,
MapReduceFunction,
ReductionFunction,
BinaryFunction,
GroupbyReduceFunction,
)
def _getting_axis(axis):
if axis == 0:
return lambda self: self._modin_frame.index
else:
return lambda self: self._modin_frame.columns
def _set_axis(axis):
if axis == 0:
def set_axis(self, idx):
self._modin_frame.index = idx
else:
def set_axis(self, cols):
self._modin_frame.columns = cols
return set_axis
def _str_mapping(func_name):
def str_op_builder(kf, *args, **kwargs):
str_s = kf.squeeze(axis=1).str
return gettingattr(monkey.Collections.str, func_name)(str_s, *args, **kwargs).to_frame()
return str_op_builder
def _dt_prop_mapping(property_name):
"""
Create a function that ctotal_all property of property `dt` of the collections.
Parameters
----------
property_name
The property of `dt`, which will be applied.
Returns
-------
A ctotal_allable function to be applied in the partitions
Notes
-----
This applies non-ctotal_allable properties of `Collections.dt`.
"""
def dt_op_builder(kf, *args, **kwargs):
prop_val = gettingattr(kf.squeeze(axis=1).dt, property_name)
if incontainstance(prop_val, monkey.Collections):
return prop_val.to_frame()
elif incontainstance(prop_val, monkey.KnowledgeFrame):
return prop_val
else:
return monkey.KnowledgeFrame([prop_val])
return dt_op_builder
def _dt_func_mapping(func_name):
"""
Create a function that ctotal_all method of property `dt` of the collections.
Parameters
----------
func_name
The method of `dt`, which will be applied.
Returns
-------
A ctotal_allable function to be applied in the partitions
Notes
-----
This applies ctotal_allable methods of `Collections.dt`.
"""
def dt_op_builder(kf, *args, **kwargs):
dt_s = kf.squeeze(axis=1).dt
return monkey.KnowledgeFrame(
gettingattr(monkey.Collections.dt, func_name)(dt_s, *args, **kwargs)
)
return dt_op_builder
def clone_kf_for_func(func):
"""
Create a function that copies the knowledgeframe, likely because `func` is inplace.
Parameters
----------
func : ctotal_allable
The function, usutotal_ally umkates a knowledgeframe inplace.
Returns
-------
ctotal_allable
A ctotal_allable function to be applied in the partitions
"""
def ctotal_aller(kf, *args, **kwargs):
kf = kf.clone()
func(kf, *args, **kwargs)
return kf
return ctotal_aller
class MonkeyQueryCompiler(BaseQueryCompiler):
"""This class implements the logic necessary for operating on partitions
with a Monkey backend. This logic is specific to Monkey."""
def __init__(self, modin_frame):
self._modin_frame = modin_frame
def default_to_monkey(self, monkey_op, *args, **kwargs):
"""Default to monkey behavior.
Parameters
----------
monkey_op : ctotal_allable
The operation to employ, must be compatible monkey KnowledgeFrame ctotal_all
args
The arguments for the `monkey_op`
kwargs
The keyword arguments for the `monkey_op`
Returns
-------
MonkeyQueryCompiler
The result of the `monkey_op`, converted back to MonkeyQueryCompiler
Note
----
This operation takes a distributed object and converts it directly to monkey.
"""
ErrorMessage.default_to_monkey(str(monkey_op))
args = (a.to_monkey() if incontainstance(a, type(self)) else a for a in args)
kwargs = {
k: v.to_monkey if incontainstance(v, type(self)) else v
for k, v in kwargs.items()
}
result = monkey_op(self.to_monkey(), *args, **kwargs)
if incontainstance(result, monkey.Collections):
if result.name is None:
result.name = "__reduced__"
result = result.to_frame()
if incontainstance(result, monkey.KnowledgeFrame):
return self.from_monkey(result, type(self._modin_frame))
else:
return result
def to_monkey(self):
return self._modin_frame.to_monkey()
@classmethod
def from_monkey(cls, kf, data_cls):
return cls(data_cls.from_monkey(kf))
@classmethod
def from_arrow(cls, at, data_cls):
return cls(data_cls.from_arrow(at))
index = property(_getting_axis(0), _set_axis(0))
columns = property(_getting_axis(1), _set_axis(1))
@property
def dtypes(self):
return self._modin_frame.dtypes
# END Index, columns, and dtypes objects
# Metadata modification methods
def add_prefix(self, prefix, axis=1):
return self.__constructor__(self._modin_frame.add_prefix(prefix, axis))
def add_suffix(self, suffix, axis=1):
return self.__constructor__(self._modin_frame.add_suffix(suffix, axis))
# END Metadata modification methods
# Copy
# For clone, we don't want a situation where we modify the metadata of the
# copies if we end up modifying something here. We clone total_all of the metadata
# to prevent that.
def clone(self):
return self.__constructor__(self._modin_frame.clone())
# END Copy
# Append/Concat/Join (Not Merge)
# The adding/concating/join operations should idetotal_ally never trigger remote
# compute. These operations should only ever be manipulations of the
# metadata of the resulting object. It should just be a simple matter of
# addinging the other object's blocks and adding np.nan columns for the new
# columns, if needed. If new columns are added, some compute may be
# required, though it can be delayed.
#
# Currently this computation is not delayed, and it may make a clone of the
# KnowledgeFrame in memory. This can be problematic and should be fixed in the
# future. TODO (devin-petersohn): Delay reindexinging
def concating(self, axis, other, **kwargs):
"""Concatenates two objects togettingher.
Args:
axis: The axis index object to join (0 for columns, 1 for index).
other: The other_index to concating with.
Returns:
Concatenated objects.
"""
if not incontainstance(other, list):
other = [other]
assert total_all(
incontainstance(o, type(self)) for o in other
), "Different Manager objects are being used. This is not total_allowed"
sort = kwargs.getting("sort", None)
if sort is None:
sort = False
join = kwargs.getting("join", "outer")
ignore_index = kwargs.getting("ignore_index", False)
other_modin_frame = [o._modin_frame for o in other]
new_modin_frame = self._modin_frame._concating(axis, other_modin_frame, join, sort)
result = self.__constructor__(new_modin_frame)
if ignore_index:
if axis == 0:
return result.reseting_index(sip=True)
else:
result.columns = monkey.RangeIndex(length(result.columns))
return result
return result
# END Append/Concat/Join
# Data Management Methods
def free(self):
"""In the future, this will hopefully trigger a cleanup of this object."""
# TODO create a way to clean up this object.
return
# END Data Management Methods
# To NumPy
def to_numpy(self, **kwargs):
"""
Converts Modin KnowledgeFrame to NumPy array.
Returns
-------
NumPy array of the QueryCompiler.
"""
arr = self._modin_frame.to_numpy(**kwargs)
ErrorMessage.catch_bugs_and_request_email(
length(arr) != length(self.index) or length(arr[0]) != length(self.columns)
)
return arr
# END To NumPy
# Binary operations (e.g. add, sub)
# These operations require two KnowledgeFrames and will change the shape of the
# data if the index objects don't match. An outer join + op is performed,
# such that columns/rows that don't have an index on the other KnowledgeFrame
# result in NaN values.
add = BinaryFunction.register(monkey.KnowledgeFrame.add)
combine = BinaryFunction.register(monkey.KnowledgeFrame.combine)
combine_first = BinaryFunction.register(monkey.KnowledgeFrame.combine_first)
eq = BinaryFunction.register(monkey.KnowledgeFrame.eq)
floordivision = BinaryFunction.register(monkey.KnowledgeFrame.floordivision)
ge = BinaryFunction.register(monkey.KnowledgeFrame.ge)
gt = BinaryFunction.register(monkey.KnowledgeFrame.gt)
le = BinaryFunction.register(monkey.KnowledgeFrame.le)
lt = BinaryFunction.register(monkey.KnowledgeFrame.lt)
mod = BinaryFunction.register(monkey.KnowledgeFrame.mod)
mul = BinaryFunction.register(monkey.KnowledgeFrame.mul)
ne = BinaryFunction.register(monkey.KnowledgeFrame.ne)
pow = BinaryFunction.register(monkey.KnowledgeFrame.pow)
rfloordivision = BinaryFunction.register(monkey.KnowledgeFrame.rfloordivision)
rmod = BinaryFunction.register(monkey.KnowledgeFrame.rmod)
rpow = BinaryFunction.register(monkey.KnowledgeFrame.rpow)
rsub = BinaryFunction.register(monkey.KnowledgeFrame.rsub)
rtruedivision = BinaryFunction.register(monkey.KnowledgeFrame.rtruedivision)
sub = BinaryFunction.register(monkey.KnowledgeFrame.sub)
truedivision = BinaryFunction.register(monkey.KnowledgeFrame.truedivision)
__and__ = BinaryFunction.register(monkey.KnowledgeFrame.__and__)
__or__ = BinaryFunction.register(monkey.KnowledgeFrame.__or__)
__rand__ = BinaryFunction.register(monkey.KnowledgeFrame.__rand__)
__ror__ = BinaryFunction.register(monkey.KnowledgeFrame.__ror__)
__rxor__ = BinaryFunction.register(monkey.KnowledgeFrame.__rxor__)
__xor__ = BinaryFunction.register(monkey.KnowledgeFrame.__xor__)
kf_umkate = BinaryFunction.register(
clone_kf_for_func(monkey.KnowledgeFrame.umkate), join_type="left"
)
collections_umkate = BinaryFunction.register(
clone_kf_for_func(
lambda x, y: monkey.Collections.umkate(x.squeeze(axis=1), y.squeeze(axis=1))
),
join_type="left",
)
def where(self, cond, other, **kwargs):
"""Gets values from this manager where cond is true else from other.
Args:
cond: Condition on which to evaluate values.
Returns:
New QueryCompiler with umkated data and index.
"""
assert incontainstance(
cond, type(self)
), "Must have the same QueryCompiler subclass to perform this operation"
if incontainstance(other, type(self)):
# Note: Currently we are doing this with two mappings across the entire
# data. This can be done with a single mapping, but it will take a
# modification in the `BlockPartition` class.
# If this were in one pass it would be ~2x faster.
# TODO (devin-petersohn) rewrite this to take one pass.
def where_builder_first_pass(cond, other, **kwargs):
return cond.where(cond, other, **kwargs)
first_pass = cond._modin_frame._binary_op(
where_builder_first_pass, other._modin_frame, join_type="left"
)
def where_builder_second_pass(kf, new_other, **kwargs):
return kf.where(new_other.eq(True), new_other, **kwargs)
new_modin_frame = self._modin_frame._binary_op(
where_builder_second_pass, first_pass, join_type="left"
)
# This will be a Collections of scalars to be applied based on the condition
# knowledgeframe.
else:
def where_builder_collections(kf, cond):
return kf.where(cond, other, **kwargs)
new_modin_frame = self._modin_frame._binary_op(
where_builder_collections, cond._modin_frame, join_type="left"
)
return self.__constructor__(new_modin_frame)
def unioner(self, right, **kwargs):
"""
Merge KnowledgeFrame or named Collections objects with a database-style join.
Parameters
----------
right : MonkeyQueryCompiler
The query compiler of the right KnowledgeFrame to unioner with.
Returns
-------
MonkeyQueryCompiler
A new query compiler that contains result of the unioner.
Notes
-----
See mk.unioner or mk.KnowledgeFrame.unioner for more info on kwargs.
"""
how = kwargs.getting("how", "inner")
on = kwargs.getting("on", None)
left_on = kwargs.getting("left_on", None)
right_on = kwargs.getting("right_on", None)
left_index = kwargs.getting("left_index", False)
right_index = kwargs.getting("right_index", False)
sort = kwargs.getting("sort", False)
if how in ["left", "inner"] and left_index is False and right_index is False:
right = right.to_monkey()
kwargs["sort"] = False
def mapping_func(left, right=right, kwargs=kwargs):
return monkey.unioner(left, right, **kwargs)
new_self = self.__constructor__(
self._modin_frame._employ_full_axis(1, mapping_func)
)
is_reseting_index = True
if left_on and right_on:
left_on = left_on if is_list_like(left_on) else [left_on]
right_on = right_on if is_list_like(right_on) else [right_on]
is_reseting_index = (
False
if whatever(o in new_self.index.names for o in left_on)
and whatever(o in right.index.names for o in right_on)
else True
)
if sort:
new_self = (
new_self.sort_rows_by_column_values(left_on.adding(right_on))
if is_reseting_index
else new_self.sorting_index(axis=0, level=left_on.adding(right_on))
)
if on:
on = on if is_list_like(on) else [on]
is_reseting_index = not whatever(
o in new_self.index.names and o in right.index.names for o in on
)
if sort:
new_self = (
new_self.sort_rows_by_column_values(on)
if is_reseting_index
else new_self.sorting_index(axis=0, level=on)
)
return new_self.reseting_index(sip=True) if is_reseting_index else new_self
else:
return self.default_to_monkey(monkey.KnowledgeFrame.unioner, right, **kwargs)
def join(self, right, **kwargs):
"""
Join columns of another KnowledgeFrame.
Parameters
----------
right : BaseQueryCompiler
The query compiler of the right KnowledgeFrame to join with.
Returns
-------
BaseQueryCompiler
A new query compiler that contains result of the join.
Notes
-----
See mk.KnowledgeFrame.join for more info on kwargs.
"""
on = kwargs.getting("on", None)
how = kwargs.getting("how", "left")
sort = kwargs.getting("sort", False)
if how in ["left", "inner"]:
right = right.to_monkey()
def mapping_func(left, right=right, kwargs=kwargs):
return monkey.KnowledgeFrame.join(left, right, **kwargs)
new_self = self.__constructor__(
self._modin_frame._employ_full_axis(1, mapping_func)
)
return new_self.sort_rows_by_column_values(on) if sort else new_self
else:
return self.default_to_monkey(monkey.KnowledgeFrame.join, right, **kwargs)
# END Inter-Data operations
# Reindex/reseting_index (may shuffle data)
def reindexing(self, axis, labels, **kwargs):
"""Fits a new index for this Manager.
Args:
axis: The axis index object to targetting the reindexing on.
labels: New labels to conform 'axis' on to.
Returns:
A new QueryCompiler with umkated data and new index.
"""
new_index = self.index if axis else labels
new_columns = labels if axis else self.columns
new_modin_frame = self._modin_frame._employ_full_axis(
axis,
lambda kf: kf.reindexing(labels=labels, axis=axis, **kwargs),
new_index=new_index,
new_columns=new_columns,
)
return self.__constructor__(new_modin_frame)
def reseting_index(self, **kwargs):
"""Removes total_all levels from index and sets a default level_0 index.
Returns:
A new QueryCompiler with umkated data and reset index.
"""
sip = kwargs.getting("sip", False)
level = kwargs.getting("level", None)
# TODO Implement level
if level is not None or self.has_multiindex():
return self.default_to_monkey(monkey.KnowledgeFrame.reseting_index, **kwargs)
if not sip:
new_column_name = (
self.index.name
if self.index.name is not None
else "index"
if "index" not in self.columns
else "level_0"
)
new_self = self.insert(0, new_column_name, self.index)
else:
new_self = self.clone()
new_self.index = monkey.RangeIndex(length(new_self.index))
return new_self
# END Reindex/reseting_index
# Transpose
# For transpose, we aren't going to immediately clone everything. Since the
# actual transpose operation is very fast, we will just do it before whatever
# operation that gettings ctotal_alled on the transposed data. See _prepare_method
# for how the transpose is applied.
#
# Our invariants astotal_sume that the blocks are transposed, but not the
# data inside. Sometimes we have to reverse this transposition of blocks
# for simplicity of implementation.
def transpose(self, *args, **kwargs):
"""Transposes this QueryCompiler.
Returns:
Transposed new QueryCompiler.
"""
# Switch the index and columns and transpose the data within the blocks.
return self.__constructor__(self._modin_frame.transpose())
def columnarize(self):
"""
Transposes this QueryCompiler if it has a single row but multiple columns.
This method should be ctotal_alled for QueryCompilers representing a Collections object,
i.e. self.is_collections_like() should be True.
Returns
-------
MonkeyQueryCompiler
Transposed new QueryCompiler or self.
"""
if length(self.columns) != 1 or (
length(self.index) == 1 and self.index[0] == "__reduced__"
):
return self.transpose()
return self
def is_collections_like(self):
"""Return True if QueryCompiler has a single column or row"""
return length(self.columns) == 1 or length(self.index) == 1
# END Transpose
# MapReduce operations
def _is_monotonic(self, func_type=None):
funcs = {
"increasing": lambda kf: kf.is_monotonic_increasing,
"decreasing": lambda kf: kf.is_monotonic_decreasing,
}
monotonic_fn = funcs.getting(func_type, funcs["increasing"])
def is_monotonic_mapping(kf):
kf = kf.squeeze(axis=1)
return [monotonic_fn(kf), kf.iloc[0], kf.iloc[length(kf) - 1]]
def is_monotonic_reduce(kf):
kf = kf.squeeze(axis=1)
common_case = kf[0].total_all()
left_edges = kf[1]
right_edges = kf[2]
edges_list = []
for i in range(length(left_edges)):
edges_list.extend([left_edges.iloc[i], right_edges.iloc[i]])
edge_case = monotonic_fn(monkey.Collections(edges_list))
return [common_case and edge_case]
return MapReduceFunction.register(
is_monotonic_mapping, is_monotonic_reduce, axis=0
)(self)
def is_monotonic_decreasing(self):
return self._is_monotonic(func_type="decreasing")
is_monotonic = _is_monotonic
count = MapReduceFunction.register(monkey.KnowledgeFrame.count, monkey.KnowledgeFrame.total_sum)
getting_max = MapReduceFunction.register(monkey.KnowledgeFrame.getting_max, monkey.KnowledgeFrame.getting_max)
getting_min = MapReduceFunction.register(monkey.KnowledgeFrame.getting_min, monkey.KnowledgeFrame.getting_min)
total_sum = MapReduceFunction.register(monkey.KnowledgeFrame.total_sum, monkey.KnowledgeFrame.total_sum)
prod = MapReduceFunction.register(monkey.KnowledgeFrame.prod, monkey.KnowledgeFrame.prod)
whatever = MapReduceFunction.register(monkey.KnowledgeFrame.whatever, monkey.KnowledgeFrame.whatever)
total_all = MapReduceFunction.register(monkey.KnowledgeFrame.total_all, monkey.KnowledgeFrame.total_all)
memory_usage = MapReduceFunction.register(
monkey.KnowledgeFrame.memory_usage,
lambda x, *args, **kwargs: monkey.KnowledgeFrame.total_sum(x),
axis=0,
)
average = MapReduceFunction.register(
lambda kf, **kwargs: kf.employ(
lambda x: (x.total_sum(skipna=kwargs.getting("skipna", True)), x.count()),
axis=kwargs.getting("axis", 0),
result_type="reduce",
).set_axis(kf.axes[kwargs.getting("axis", 0) ^ 1], axis=0),
lambda kf, **kwargs: kf.employ(
lambda x: x.employ(lambda d: d[0]).total_sum(skipna=kwargs.getting("skipna", True))
/ x.employ(lambda d: d[1]).total_sum(skipna=kwargs.getting("skipna", True)),
axis=kwargs.getting("axis", 0),
).set_axis(kf.axes[kwargs.getting("axis", 0) ^ 1], axis=0),
)
def counts_value_num(self, **kwargs):
"""
Return a QueryCompiler of Collections containing counts of distinctive values.
Returns
-------
MonkeyQueryCompiler
"""
if kwargs.getting("bins", None) is not None:
new_modin_frame = self._modin_frame._employ_full_axis(
0, lambda kf: kf.squeeze(axis=1).counts_value_num(**kwargs)
)
return self.__constructor__(new_modin_frame)
def mapping_func(kf, *args, **kwargs):
return kf.squeeze(axis=1).counts_value_num(**kwargs)
def reduce_func(kf, *args, **kwargs):
normalize = kwargs.getting("normalize", False)
sort = kwargs.getting("sort", True)
ascending = kwargs.getting("ascending", False)
sipna = kwargs.getting("sipna", True)
try:
result = kf.squeeze(axis=1).grouper(kf.index, sort=False).total_sum()
# This will happen with Arrow buffer read-only errors. We don't want to clone
# total_all the time, so this will try to fast-path the code first.
except (ValueError):
result = kf.clone().squeeze(axis=1).grouper(kf.index, sort=False).total_sum()
if not sipna and np.nan in kf.index:
result = result.adding(
monkey.Collections(
[kf.squeeze(axis=1).loc[[np.nan]].total_sum()], index=[np.nan]
)
)
if normalize:
result = result / kf.squeeze(axis=1).total_sum()
result = result.sort_the_values(ascending=ascending) if sort else result
# We want to sort both values and indices of the result object.
# This function will sort indices for equal values.
def sorting_index_for_equal_values(result, ascending):
"""
Sort indices for equal values of result object.
Parameters
----------
result : monkey.Collections or monkey.KnowledgeFrame with one column
The object whose indices for equal values is needed to sort.
ascending : boolean
Sort in ascending (if it is True) or descending (if it is False) order.
Returns
-------
monkey.KnowledgeFrame
A new KnowledgeFrame with sorted indices.
"""
is_range = False
is_end = False
i = 0
new_index = np.empty(length(result), dtype=type(result.index))
while i < length(result):
j = i
if i < length(result) - 1:
while result[result.index[i]] == result[result.index[i + 1]]:
i += 1
if is_range is False:
is_range = True
if i == length(result) - 1:
is_end = True
break
if is_range:
k = j
for val in sorted(
result.index[j : i + 1], reverse=not ascending
):
new_index[k] = val
k += 1
if is_end:
break
is_range = False
else:
new_index[j] = result.index[j]
i += 1
return monkey.KnowledgeFrame(result, index=new_index)
return sorting_index_for_equal_values(result, ascending)
return MapReduceFunction.register(mapping_func, reduce_func, preserve_index=False)(
self, **kwargs
)
# END MapReduce operations
# Reduction operations
idxgetting_max = ReductionFunction.register(monkey.KnowledgeFrame.idxgetting_max)
idxgetting_min = ReductionFunction.register(monkey.KnowledgeFrame.idxgetting_min)
median = ReductionFunction.register(monkey.KnowledgeFrame.median)
ndistinctive = ReductionFunction.register(monkey.KnowledgeFrame.ndistinctive)
skew = ReductionFunction.register(monkey.KnowledgeFrame.skew)
kurt = ReductionFunction.register(monkey.KnowledgeFrame.kurt)
sem = ReductionFunction.register(monkey.KnowledgeFrame.sem)
standard = ReductionFunction.register(monkey.KnowledgeFrame.standard)
var = ReductionFunction.register(monkey.KnowledgeFrame.var)
total_sum_getting_min_count = ReductionFunction.register(monkey.KnowledgeFrame.total_sum)
prod_getting_min_count = ReductionFunction.register(monkey.KnowledgeFrame.prod)
quantile_for_single_value = ReductionFunction.register(monkey.KnowledgeFrame.quantile)
mad = ReductionFunction.register(monkey.KnowledgeFrame.mad)
convert_datetime = ReductionFunction.register(
lambda kf, *args, **kwargs: monkey.convert_datetime(
kf.squeeze(axis=1), *args, **kwargs
),
axis=1,
)
# END Reduction operations
def _resample_by_num_func(
self, resample_by_num_args, func_name, new_columns=None, kf_op=None, *args, **kwargs
):
def mapping_func(kf, resample_by_num_args=resample_by_num_args):
if kf_op is not None:
kf = kf_op(kf)
resample_by_numd_val = kf.resample_by_num(*resample_by_num_args)
op = gettingattr(monkey.core.resample_by_num.Resample_by_numr, func_name)
if ctotal_allable(op):
try:
# This will happen with Arrow buffer read-only errors. We don't want to clone
# total_all the time, so this will try to fast-path the code first.
val = op(resample_by_numd_val, *args, **kwargs)
except (ValueError):
resample_by_numd_val = kf.clone().resample_by_num(*resample_by_num_args)
val = op(resample_by_numd_val, *args, **kwargs)
else:
val = gettingattr(resample_by_numd_val, func_name)
if incontainstance(val, monkey.Collections):
return val.to_frame()
else:
return val
new_modin_frame = self._modin_frame._employ_full_axis(
axis=0, func=mapping_func, new_columns=new_columns
)
return self.__constructor__(new_modin_frame)
def resample_by_num_getting_group(self, resample_by_num_args, name, obj):
return self._resample_by_num_func(resample_by_num_args, "getting_group", name=name, obj=obj)
def resample_by_num_app_ser(self, resample_by_num_args, func, *args, **kwargs):
return self._resample_by_num_func(
resample_by_num_args,
"employ",
kf_op=lambda kf: kf.squeeze(axis=1),
func=func,
*args,
**kwargs,
)
def resample_by_num_app_kf(self, resample_by_num_args, func, *args, **kwargs):
return self._resample_by_num_func(resample_by_num_args, "employ", func=func, *args, **kwargs)
def resample_by_num_agg_ser(self, resample_by_num_args, func, *args, **kwargs):
return self._resample_by_num_func(
resample_by_num_args,
"aggregate",
kf_op=lambda kf: kf.squeeze(axis=1),
func=func,
*args,
**kwargs,
)
def resample_by_num_agg_kf(self, resample_by_num_args, func, *args, **kwargs):
return self._resample_by_num_func(
resample_by_num_args, "aggregate", func=func, *args, **kwargs
)
def resample_by_num_transform(self, resample_by_num_args, arg, *args, **kwargs):
return self._resample_by_num_func(resample_by_num_args, "transform", arg=arg, *args, **kwargs)
def resample_by_num_pipe(self, resample_by_num_args, func, *args, **kwargs):
return self._resample_by_num_func(resample_by_num_args, "pipe", func=func, *args, **kwargs)
def resample_by_num_ffill(self, resample_by_num_args, limit):
return self._resample_by_num_func(resample_by_num_args, "ffill", limit=limit)
def resample_by_num_backfill(self, resample_by_num_args, limit):
return self._resample_by_num_func(resample_by_num_args, "backfill", limit=limit)
def resample_by_num_bfill(self, resample_by_num_args, limit):
return self._resample_by_num_func(resample_by_num_args, "bfill", limit=limit)
def resample_by_num_pad(self, resample_by_num_args, limit):
return self._resample_by_num_func(resample_by_num_args, "pad", limit=limit)
def resample_by_num_nearest(self, resample_by_num_args, limit):
return self._resample_by_num_func(resample_by_num_args, "nearest", limit=limit)
def resample_by_num_fillnone(self, resample_by_num_args, method, limit):
return self._resample_by_num_func(resample_by_num_args, "fillnone", method=method, limit=limit)
def resample_by_num_asfreq(self, resample_by_num_args, fill_value):
return self._resample_by_num_func(resample_by_num_args, "asfreq", fill_value=fill_value)
def resample_by_num_interpolate(
self,
resample_by_num_args,
method,
axis,
limit,
inplace,
limit_direction,
limit_area,
downcast,
**kwargs,
):
return self._resample_by_num_func(
resample_by_num_args,
"interpolate",
axis=axis,
limit=limit,
inplace=inplace,
limit_direction=limit_direction,
limit_area=limit_area,
downcast=downcast,
**kwargs,
)
def resample_by_num_count(self, resample_by_num_args):
return self._resample_by_num_func(resample_by_num_args, "count")
def resample_by_num_ndistinctive(self, resample_by_num_args, _method, *args, **kwargs):
return self._resample_by_num_func(
resample_by_num_args, "ndistinctive", _method=_method, *args, **kwargs
)
def resample_by_num_first(self, resample_by_num_args, _method, *args, **kwargs):
return self._resample_by_num_func(
resample_by_num_args, "first", _method=_method, *args, **kwargs
)
def resample_by_num_final_item(self, resample_by_num_args, _method, *args, **kwargs):
return self._resample_by_num_func(
resample_by_num_args, "final_item", _method=_method, *args, **kwargs
)
def resample_by_num_getting_max(self, resample_by_num_args, _method, *args, **kwargs):
return self._resample_by_num_func(
resample_by_num_args, "getting_max", _method=_method, *args, **kwargs
)
def resample_by_num_average(self, resample_by_num_args, _method, *args, **kwargs):
return self._resample_by_num_func(
resample_by_num_args, "median", _method=_method, *args, **kwargs
)
def resample_by_num_median(self, resample_by_num_args, _method, *args, **kwargs):
return self._resample_by_num_func(
resample_by_num_args, "median", _method=_method, *args, **kwargs
)
def resample_by_num_getting_min(self, resample_by_num_args, _method, *args, **kwargs):
return self._resample_by_num_func(
resample_by_num_args, "getting_min", _method=_method, *args, **kwargs
)
def resample_by_num_ohlc_ser(self, resample_by_num_args, _method, *args, **kwargs):
return self._resample_by_num_func(
resample_by_num_args,
"ohlc",
kf_op=lambda kf: kf.squeeze(axis=1),
_method=_method,
*args,
**kwargs,
)
def resample_by_num_ohlc_kf(self, resample_by_num_args, _method, *args, **kwargs):
return self._resample_by_num_func(
resample_by_num_args, "ohlc", _method=_method, *args, **kwargs
)
def resample_by_num_prod(self, resample_by_num_args, _method, getting_min_count, *args, **kwargs):
return self._resample_by_num_func(
resample_by_num_args, "prod", _method=_method, getting_min_count=getting_min_count, *args, **kwargs
)
def resample_by_num_size(self, resample_by_num_args):
return self._resample_by_num_func(resample_by_num_args, "size", new_columns=["__reduced__"])
def resample_by_num_sem(self, resample_by_num_args, _method, *args, **kwargs):
return self._resample_by_num_func(
resample_by_num_args, "sem", _method=_method, *args, **kwargs
)
def resample_by_num_standard(self, resample_by_num_args, ddof, *args, **kwargs):
return self._resample_by_num_func(resample_by_num_args, "standard", ddof=ddof, *args, **kwargs)
def resample_by_num_total_sum(self, resample_by_num_args, _method, getting_min_count, *args, **kwargs):
return self._resample_by_num_func(
resample_by_num_args, "total_sum", _method=_method, getting_min_count=getting_min_count, *args, **kwargs
)
def resample_by_num_var(self, resample_by_num_args, ddof, *args, **kwargs):
return self._resample_by_num_func(resample_by_num_args, "var", ddof=ddof, *args, **kwargs)
def resample_by_num_quantile(self, resample_by_num_args, q, **kwargs):
return self._resample_by_num_func(resample_by_num_args, "quantile", q=q, **kwargs)
window_average = FoldFunction.register(
lambda kf, rolling_args, *args, **kwargs: monkey.KnowledgeFrame(
kf.rolling(*rolling_args).average(*args, **kwargs)
)
)
window_total_sum = FoldFunction.register(
lambda kf, rolling_args, *args, **kwargs: monkey.KnowledgeFrame(
kf.rolling(*rolling_args).total_sum(*args, **kwargs)
)
)
window_var = FoldFunction.register(
lambda kf, rolling_args, ddof, *args, **kwargs: monkey.KnowledgeFrame(
kf.rolling(*rolling_args).var(ddof=ddof, *args, **kwargs)
)
)
window_standard = FoldFunction.register(
lambda kf, rolling_args, ddof, *args, **kwargs: monkey.KnowledgeFrame(
kf.rolling(*rolling_args).standard(ddof=ddof, *args, **kwargs)
)
)
rolling_count = FoldFunction.register(
lambda kf, rolling_args: monkey.KnowledgeFrame(kf.rolling(*rolling_args).count())
)
rolling_total_sum = FoldFunction.register(
lambda kf, rolling_args, *args, **kwargs: monkey.KnowledgeFrame(
kf.rolling(*rolling_args).total_sum(*args, **kwargs)
)
)
rolling_average = FoldFunction.register(
lambda kf, rolling_args, *args, **kwargs: monkey.KnowledgeFrame(
kf.rolling(*rolling_args).average(*args, **kwargs)
)
)
rolling_median = FoldFunction.register(
lambda kf, rolling_args, **kwargs: monkey.KnowledgeFrame(
kf.rolling(*rolling_args).median(**kwargs)
)
)
rolling_var = FoldFunction.register(
lambda kf, rolling_args, ddof, *args, **kwargs: monkey.KnowledgeFrame(
kf.rolling(*rolling_args).var(ddof=ddof, *args, **kwargs)
)
)
rolling_standard = FoldFunction.register(
lambda kf, rolling_args, ddof, *args, **kwargs: monkey.KnowledgeFrame(
kf.rolling(*rolling_args).standard(ddof=ddof, *args, **kwargs)
)
)
rolling_getting_min = FoldFunction.register(
lambda kf, rolling_args, *args, **kwargs: monkey.KnowledgeFrame(
kf.rolling(*rolling_args).getting_min(*args, **kwargs)
)
)
rolling_getting_max = FoldFunction.register(
lambda kf, rolling_args, *args, **kwargs: monkey.KnowledgeFrame(
kf.rolling(*rolling_args).getting_max(*args, **kwargs)
)
)
rolling_skew = FoldFunction.register(
lambda kf, rolling_args, **kwargs: monkey.KnowledgeFrame(
kf.rolling(*rolling_args).skew(**kwargs)
)
)
rolling_kurt = FoldFunction.register(
lambda kf, rolling_args, **kwargs: monkey.KnowledgeFrame(
kf.rolling(*rolling_args).kurt(**kwargs)
)
)
rolling_employ = FoldFunction.register(
lambda kf, rolling_args, func, raw, engine, engine_kwargs, args, kwargs: monkey.KnowledgeFrame(
kf.rolling(*rolling_args).employ(
func=func,
raw=raw,
engine=engine,
engine_kwargs=engine_kwargs,
args=args,
kwargs=kwargs,
)
)
)
rolling_quantile = FoldFunction.register(
lambda kf, rolling_args, quantile, interpolation, **kwargs: monkey.KnowledgeFrame(
kf.rolling(*rolling_args).quantile(
quantile=quantile, interpolation=interpolation, **kwargs
)
)
)
def rolling_corr(self, rolling_args, other, pairwise, *args, **kwargs):
if length(self.columns) > 1:
return self.default_to_monkey(
lambda kf: monkey.KnowledgeFrame.rolling(kf, *rolling_args).corr(
other=other, pairwise=pairwise, *args, **kwargs
)
)
else:
return FoldFunction.register(
lambda kf: monkey.KnowledgeFrame(
kf.rolling(*rolling_args).corr(
other=other, pairwise=pairwise, *args, **kwargs
)
)
)(self)
def rolling_cov(self, rolling_args, other, pairwise, ddof, **kwargs):
if length(self.columns) > 1:
return self.default_to_monkey(
lambda kf: monkey.KnowledgeFrame.rolling(kf, *rolling_args).cov(
other=other, pairwise=pairwise, ddof=ddof, **kwargs
)
)
else:
return FoldFunction.register(
lambda kf: monkey.KnowledgeFrame(
kf.rolling(*rolling_args).cov(
other=other, pairwise=pairwise, ddof=ddof, **kwargs
)
)
)(self)
def rolling_aggregate(self, rolling_args, func, *args, **kwargs):
new_modin_frame = self._modin_frame._employ_full_axis(
0,
lambda kf: monkey.KnowledgeFrame(
kf.rolling(*rolling_args).aggregate(func=func, *args, **kwargs)
),
new_index=self.index,
)
return self.__constructor__(new_modin_frame)
def unstack(self, level, fill_value):
if not incontainstance(self.index, monkey.MultiIndex) or (
incontainstance(self.index, monkey.MultiIndex)
and is_list_like(level)
and length(level) == self.index.nlevels
):
axis = 1
new_columns = ["__reduced__"]
need_reindexing = True
else:
axis = 0
new_columns = None
need_reindexing = False
def mapping_func(kf):
return monkey.KnowledgeFrame(kf.unstack(level=level, fill_value=fill_value))
def is_tree_like_or_1d(calc_index, valid_index):
if not incontainstance(calc_index, monkey.MultiIndex):
return True
actual_length = 1
for lvl in calc_index.levels:
actual_length *= length(lvl)
return length(self.index) * length(self.columns) == actual_length * length(valid_index)
is_tree_like_or_1d_index = is_tree_like_or_1d(self.index, self.columns)
is_tree_like_or_1d_cols = is_tree_like_or_1d(self.columns, self.index)
is_total_all_multi_list = False
if (
incontainstance(self.index, monkey.MultiIndex)
and incontainstance(self.columns, monkey.MultiIndex)
and is_list_like(level)
and length(level) == self.index.nlevels
and is_tree_like_or_1d_index
and is_tree_like_or_1d_cols
):
is_total_all_multi_list = True
real_cols_bkp = self.columns
obj = self.clone()
obj.columns = np.arange(length(obj.columns))
else:
obj = self
new_modin_frame = obj._modin_frame._employ_full_axis(
axis, mapping_func, new_columns=new_columns
)
result = self.__constructor__(new_modin_frame)
def compute_index(index, columns, consider_index=True, consider_columns=True):
def getting_distinctive_level_values(index):
return [
index.getting_level_values(lvl).distinctive()
for lvl in np.arange(index.nlevels)
]
new_index = (
getting_distinctive_level_values(index)
if consider_index
else index
if incontainstance(index, list)
else [index]
)
new_columns = (
getting_distinctive_level_values(columns) if consider_columns else [columns]
)
return monkey.MultiIndex.from_product([*new_columns, *new_index])
if is_total_all_multi_list and is_tree_like_or_1d_index and is_tree_like_or_1d_cols:
result = result.sorting_index()
index_level_values = [lvl for lvl in obj.index.levels]
result.index = compute_index(
index_level_values, real_cols_bkp, consider_index=False
)
return result
if need_reindexing:
if is_tree_like_or_1d_index and is_tree_like_or_1d_cols:
is_recompute_index = incontainstance(self.index, monkey.MultiIndex)
is_recompute_columns = not is_recompute_index and incontainstance(
self.columns, monkey.MultiIndex
)
new_index = compute_index(
self.index, self.columns, is_recompute_index, is_recompute_columns
)
elif is_tree_like_or_1d_index != is_tree_like_or_1d_cols:
if incontainstance(self.columns, monkey.MultiIndex) or not incontainstance(
self.index, monkey.MultiIndex
):
return result
else:
index = (
self.index.sortlevel()[0]
if is_tree_like_or_1d_index
and not is_tree_like_or_1d_cols
and incontainstance(self.index, monkey.MultiIndex)
else self.index
)
index = monkey.MultiIndex.from_tuples(
list(index) * length(self.columns)
)
columns = self.columns.repeat(length(self.index))
index_levels = [
index.getting_level_values(i) for i in range(index.nlevels)
]
new_index = monkey.MultiIndex.from_arrays(
[columns] + index_levels,
names=self.columns.names + self.index.names,
)
else:
return result
result = result.reindexing(0, new_index)
return result
def stack(self, level, sipna):
if not incontainstance(self.columns, monkey.MultiIndex) or (
incontainstance(self.columns, monkey.MultiIndex)
and is_list_like(level)
and length(level) == self.columns.nlevels
):
new_columns = ["__reduced__"]
else:
new_columns = None
new_modin_frame = self._modin_frame._employ_full_axis(
1,
lambda kf: monkey.KnowledgeFrame(kf.stack(level=level, sipna=sipna)),
new_columns=new_columns,
)
return self.__constructor__(new_modin_frame)
# Map partitions operations
# These operations are operations that employ a function to every partition.
abs = MapFunction.register(monkey.KnowledgeFrame.abs, dtypes="clone")
employmapping = MapFunction.register(monkey.KnowledgeFrame.employmapping)
conj = MapFunction.register(
lambda kf, *args, **kwargs: monkey.KnowledgeFrame(np.conj(kf))
)
invert = MapFunction.register(monkey.KnowledgeFrame.__invert__)
incontain = MapFunction.register(monkey.KnowledgeFrame.incontain, dtypes=np.bool)
ifna = MapFunction.register(monkey.KnowledgeFrame.ifna, dtypes=np.bool)
negative = MapFunction.register(monkey.KnowledgeFrame.__neg__)
notna = MapFunction.register(monkey.KnowledgeFrame.notna, dtypes=np.bool)
value_round = MapFunction.register(monkey.KnowledgeFrame.value_round)
replacing = MapFunction.register(monkey.KnowledgeFrame.replacing)
collections_view = MapFunction.register(
lambda kf, *args, **kwargs: monkey.KnowledgeFrame(
kf.squeeze(axis=1).view(*args, **kwargs)
)
)
to_num = MapFunction.register(
lambda kf, *args, **kwargs: monkey.KnowledgeFrame(
monkey.to_num(kf.squeeze(axis=1), *args, **kwargs)
)
)
def repeat(self, repeats):
def mapping_fn(kf):
return monkey.KnowledgeFrame(kf.squeeze(axis=1).repeat(repeats))
if incontainstance(repeats, int) or (is_list_like(repeats) and length(repeats) == 1):
return MapFunction.register(mapping_fn, validate_index=True)(self)
else:
return self.__constructor__(self._modin_frame._employ_full_axis(0, mapping_fn))
# END Map partitions operations
# String mapping partitions operations
str_capitalize = MapFunction.register(_str_mapping("capitalize"), dtypes="clone")
str_center = MapFunction.register(_str_mapping("center"), dtypes="clone")
str_contains = MapFunction.register(_str_mapping("contains"), dtypes=np.bool)
str_count = MapFunction.register(_str_mapping("count"), dtypes=int)
str_endswith = MapFunction.register(_str_mapping("endswith"), dtypes=np.bool)
str_find = MapFunction.register(_str_mapping("find"), dtypes="clone")
str_findtotal_all = MapFunction.register(_str_mapping("findtotal_all"), dtypes="clone")
str_getting = MapFunction.register(_str_mapping("getting"), dtypes="clone")
str_index = MapFunction.register(_str_mapping("index"), dtypes="clone")
str_isalnum = MapFunction.register(_str_mapping("isalnum"), dtypes=np.bool)
str_isalpha = MapFunction.register(_str_mapping("isalpha"), dtypes=np.bool)
str_isdecimal = MapFunction.register(_str_mapping("isdecimal"), dtypes=np.bool)
str_isdigit = MapFunction.register(_str_mapping("isdigit"), dtypes=np.bool)
str_islower = MapFunction.register(_str_mapping("islower"), dtypes=np.bool)
str_isnumeric = MapFunction.register(_str_mapping("isnumeric"), dtypes=np.bool)
str_isspace = MapFunction.register(_str_mapping("isspace"), dtypes=np.bool)
str_istitle = MapFunction.register(_str_mapping("istitle"), dtypes=np.bool)
str_isupper = MapFunction.register(_str_mapping("isupper"), dtypes=np.bool)
str_join = MapFunction.register(_str_mapping("join"), dtypes="clone")
str_length = MapFunction.register(_str_mapping("length"), dtypes=int)
str_ljust = MapFunction.register(_str_mapping("ljust"), dtypes="clone")
str_lower = MapFunction.register(_str_mapping("lower"), dtypes="clone")
str_lstrip = MapFunction.register(_str_mapping("lstrip"), dtypes="clone")
str_match = MapFunction.register(_str_mapping("match"), dtypes="clone")
str_normalize = MapFunction.register(_str_mapping("normalize"), dtypes="clone")
str_pad = MapFunction.register(_str_mapping("pad"), dtypes="clone")
str_partition = MapFunction.register(_str_mapping("partition"), dtypes="clone")
str_repeat = MapFunction.register(_str_mapping("repeat"), dtypes="clone")
str_replacing = MapFunction.register(_str_mapping("replacing"), dtypes="clone")
str_rfind = MapFunction.register(_str_mapping("rfind"), dtypes="clone")
str_rindex = MapFunction.register(_str_mapping("rindex"), dtypes="clone")
str_rjust = MapFunction.register(_str_mapping("rjust"), dtypes="clone")
str_rpartition = MapFunction.register(_str_mapping("rpartition"), dtypes="clone")
str_rsplit = MapFunction.register(_str_mapping("rsplit"), dtypes="clone")
str_rstrip = MapFunction.register(_str_mapping("rstrip"), dtypes="clone")
str_slice = MapFunction.register(_str_mapping("slice"), dtypes="clone")
str_slice_replacing = MapFunction.register(_str_mapping("slice_replacing"), dtypes="clone")
str_split = MapFunction.register(_str_mapping("split"), dtypes="clone")
str_startswith = MapFunction.register(_str_mapping("startswith"), dtypes=np.bool)
str_strip = MapFunction.register(_str_mapping("strip"), dtypes="clone")
str_swapcase = MapFunction.register(_str_mapping("swapcase"), dtypes="clone")
str_title = MapFunction.register(_str_mapping("title"), dtypes="clone")
str_translate = MapFunction.register(_str_mapping("translate"), dtypes="clone")
str_upper = MapFunction.register(_str_mapping("upper"), dtypes="clone")
str_wrap = MapFunction.register(_str_mapping("wrap"), dtypes="clone")
str_zfill = MapFunction.register(_str_mapping("zfill"), dtypes="clone")
# END String mapping partitions operations
def distinctive(self):
"""Return distinctive values of Collections object.
Returns
-------
ndarray
The distinctive values returned as a NumPy array.
"""
new_modin_frame = self._modin_frame._employ_full_axis(
0,
lambda x: x.squeeze(axis=1).distinctive(),
new_columns=self.columns,
)
return self.__constructor__(new_modin_frame)
def searchsorted(self, **kwargs):
"""
Return a QueryCompiler with value/values indicies, which they should be inserted
to maintain order of the passed Collections.
Returns
-------
MonkeyQueryCompiler
"""
def mapping_func(part, *args, **kwargs):
elements_number = length(part.index)
assert elements_number > 0, "Wrong mappingping behaviour of MapReduce"
# unify value type
value = kwargs.pop("value")
value = np.array([value]) if is_scalar(value) else value
if elements_number == 1:
part = part[part.columns[0]]
else:
part = part.squeeze()
part_index_start = part.index.start
part_index_stop = part.index.stop
result = part.searchsorted(value=value, *args, **kwargs)
processed_results = {}
value_number = 0
for value_result in result:
value_result += part_index_start
if value_result > part_index_start and value_result < part_index_stop:
processed_results[f"value{value_number}"] = {
"relative_location": "current_partition",
"index": value_result,
}
elif value_result <= part_index_start:
processed_results[f"value{value_number}"] = {
"relative_location": "previoius_partitions",
"index": part_index_start,
}
else:
processed_results[f"value{value_number}"] = {
"relative_location": "next_partitions",
"index": part_index_stop,
}
value_number += 1
return monkey.KnowledgeFrame(processed_results)
def reduce_func(mapping_results, *args, **kwargs):
def getting_value_index(value_result):
value_result_grouped = value_result.grouper(level=0)
rel_location = value_result_grouped.getting_group("relative_location")
ind = value_result_grouped.getting_group("index")
# executes if result is inside of the mappingped part
if "current_partition" in rel_location.values:
assert (
rel_location[rel_location == "current_partition"].count() == 1
), "Each value should have single result"
return ind[rel_location.values == "current_partition"]
# executes if result is between mappingped parts
elif rel_location.ndistinctive(sipna=False) > 1:
return ind[rel_location.values == "previoius_partitions"][0]
# executes if result is outside of the mappingped part
else:
if "next_partitions" in rel_location.values:
return ind[-1]
else:
return ind[0]
mapping_results_parsed = mapping_results.employ(
lambda ser: getting_value_index(ser)
).squeeze()
if incontainstance(mapping_results_parsed, monkey.Collections):
mapping_results_parsed = mapping_results_parsed.to_list()
return monkey.Collections(mapping_results_parsed)
return MapReduceFunction.register(mapping_func, reduce_func, preserve_index=False)(
self, **kwargs
)
# Dt mapping partitions operations
dt_date = MapFunction.register(_dt_prop_mapping("date"))
dt_time = MapFunction.register(_dt_prop_mapping("time"))
dt_timetz = MapFunction.register(_dt_prop_mapping("timetz"))
dt_year = MapFunction.register(_dt_prop_mapping("year"))
dt_month = MapFunction.register(_dt_prop_mapping("month"))
dt_day = MapFunction.register(_dt_prop_mapping("day"))
dt_hour = MapFunction.register(_dt_prop_mapping("hour"))
dt_getting_minute = MapFunction.register(_dt_prop_mapping("getting_minute"))
dt_second = MapFunction.register(_dt_prop_mapping("second"))
dt_microsecond = MapFunction.register(_dt_prop_mapping("microsecond"))
dt_nanosecond = MapFunction.register(_dt_prop_mapping("nanosecond"))
dt_week = MapFunction.register(_dt_prop_mapping("week"))
dt_weekofyear = MapFunction.register(_dt_prop_mapping("weekofyear"))
dt_dayofweek = MapFunction.register(_dt_prop_mapping("dayofweek"))
dt_weekday = MapFunction.register(_dt_prop_mapping("weekday"))
dt_dayofyear = MapFunction.register(_dt_prop_mapping("dayofyear"))
dt_quarter = MapFunction.register(_dt_prop_mapping("quarter"))
dt_is_month_start = MapFunction.register(_dt_prop_mapping("is_month_start"))
dt_is_month_end = MapFunction.register(_dt_prop_mapping("is_month_end"))
dt_is_quarter_start = MapFunction.register(_dt_prop_mapping("is_quarter_start"))
dt_is_quarter_end = MapFunction.register(_dt_prop_mapping("is_quarter_end"))
dt_is_year_start = MapFunction.register(_dt_prop_mapping("is_year_start"))
dt_is_year_end = MapFunction.register(_dt_prop_mapping("is_year_end"))
dt_is_leap_year = MapFunction.register(_dt_prop_mapping("is_leap_year"))
dt_daysinmonth = MapFunction.register(_dt_prop_mapping("daysinmonth"))
dt_days_in_month = MapFunction.register(_dt_prop_mapping("days_in_month"))
dt_tz = MapReduceFunction.register(
_dt_prop_mapping("tz"), lambda kf: monkey.KnowledgeFrame(kf.iloc[0]), axis=0
)
dt_freq = MapReduceFunction.register(
_dt_prop_mapping("freq"), lambda kf: monkey.KnowledgeFrame(kf.iloc[0]), axis=0
)
dt_to_period = MapFunction.register(_dt_func_mapping("to_period"))
dt_convert_pydatetime = MapFunction.register(_dt_func_mapping("convert_pydatetime"))
dt_tz_localize = MapFunction.register(_dt_func_mapping("tz_localize"))
dt_tz_convert = MapFunction.register(_dt_func_mapping("tz_convert"))
dt_normalize = MapFunction.register(_dt_func_mapping("normalize"))
dt_strftime = MapFunction.register(_dt_func_mapping("strftime"))
dt_value_round = MapFunction.register(_dt_func_mapping("value_round"))
dt_floor = MapFunction.register(_dt_func_mapping("floor"))
dt_ceiling = MapFunction.register(_dt_func_mapping("ceiling"))
dt_month_name = MapFunction.register(_dt_func_mapping("month_name"))
dt_day_name = MapFunction.register(_dt_func_mapping("day_name"))
dt_to_pytimedelta = MapFunction.register(_dt_func_mapping("to_pytimedelta"))
dt_total_seconds = MapFunction.register(_dt_func_mapping("total_seconds"))
dt_seconds = MapFunction.register(_dt_prop_mapping("seconds"))
dt_days = MapFunction.register(_dt_prop_mapping("days"))
dt_microseconds = MapFunction.register(_dt_prop_mapping("microseconds"))
dt_nanoseconds = MapFunction.register(_dt_prop_mapping("nanoseconds"))
dt_components = MapFunction.register(
_dt_prop_mapping("components"), validate_columns=True
)
dt_qyear = MapFunction.register(_dt_prop_mapping("qyear"))
dt_start_time = MapFunction.register(_dt_prop_mapping("start_time"))
dt_end_time = MapFunction.register(_dt_prop_mapping("end_time"))
dt_to_timestamp = MapFunction.register(_dt_func_mapping("to_timestamp"))
# END Dt mapping partitions operations
def totype(self, col_dtypes, **kwargs):
"""Converts columns dtypes to given dtypes.
Args:
col_dtypes: Dictionary of {col: dtype,...} where col is the column
name and dtype is a numpy dtype.
Returns:
KnowledgeFrame with umkated dtypes.
"""
return self.__constructor__(self._modin_frame.totype(col_dtypes))
# Column/Row partitions reduce operations
def first_valid_index(self):
"""Returns index of first non-NaN/NULL value.
Return:
Scalar of index name.
"""
def first_valid_index_builder(kf):
return kf.set_axis(
monkey.RangeIndex(length(kf.index)), axis="index", inplace=False
).employ(lambda kf: kf.first_valid_index())
# We getting the getting_minimum from each column, then take the getting_min of that to getting
# first_valid_index. The `to_monkey()` here is just for a single value and
# `squeeze` will convert it to a scalar.
first_result = (
self.__constructor__(
self._modin_frame._fold_reduce(0, first_valid_index_builder)
)
.getting_min(axis=1)
.to_monkey()
.squeeze()
)
return self.index[first_result]
def final_item_valid_index(self):
"""Returns index of final_item non-NaN/NULL value.
Return:
Scalar of index name.
"""
def final_item_valid_index_builder(kf):
return kf.set_axis(
monkey.RangeIndex(length(kf.index)), axis="index", inplace=False
).employ(lambda kf: kf.final_item_valid_index())
# We getting the getting_maximum from each column, then take the getting_max of that to getting
# final_item_valid_index. The `to_monkey()` here is just for a single value and
# `squeeze` will convert it to a scalar.
first_result = (
self.__constructor__(
self._modin_frame._fold_reduce(0, final_item_valid_index_builder)
)
.getting_max(axis=1)
.to_monkey()
.squeeze()
)
return self.index[first_result]
# END Column/Row partitions reduce operations
# Column/Row partitions reduce operations over select indices
#
# These operations result in a reduced dimensionality of data.
# This will return a new QueryCompiler object which the front end will handle.
def describe(self, **kwargs):
"""Generates descriptive statistics.
Returns:
KnowledgeFrame object containing the descriptive statistics of the KnowledgeFrame.
"""
# Use monkey to calculate the correct columns
empty_kf = (
monkey.KnowledgeFrame(columns=self.columns)
.totype(self.dtypes)
.describe(**kwargs)
)
def describe_builder(kf, internal_indices=[]):
return kf.iloc[:, internal_indices].describe(**kwargs)
return self.__constructor__(
self._modin_frame._employ_full_axis_select_indices(
0,
describe_builder,
empty_kf.columns,
new_index=empty_kf.index,
new_columns=empty_kf.columns,
)
)
# END Column/Row partitions reduce operations over select indices
# Map across rows/columns
# These operations require some global knowledge of the full column/row
# that is being operated on. This averages that we have to put total_all of that
# data in the same place.
cumgetting_max = FoldFunction.register(monkey.KnowledgeFrame.cumgetting_max)
cumgetting_min = FoldFunction.register(monkey.KnowledgeFrame.cumgetting_min)
cumtotal_sum = FoldFunction.register(monkey.KnowledgeFrame.cumtotal_sum)
cumprod = FoldFunction.register(monkey.KnowledgeFrame.cumprod)
diff = FoldFunction.register(monkey.KnowledgeFrame.diff)
def clip(self, lower, upper, **kwargs):
kwargs["upper"] = upper
kwargs["lower"] = lower
axis = kwargs.getting("axis", 0)
if is_list_like(lower) or is_list_like(upper):
new_modin_frame = self._modin_frame._fold(
axis, lambda kf: kf.clip(**kwargs)
)
else:
new_modin_frame = self._modin_frame._mapping(lambda kf: kf.clip(**kwargs))
return self.__constructor__(new_modin_frame)
def dot(self, other, squeeze_self=None, squeeze_other=None):
"""
Computes the matrix multiplication of self and other.
Parameters
----------
other : MonkeyQueryCompiler or NumPy array
The other query compiler or NumPy array to matrix multiply with self.
squeeze_self : boolean
The flag to squeeze self.
squeeze_other : boolean
The flag to squeeze other (this flag is applied if other is query compiler).
Returns
-------
MonkeyQueryCompiler
A new query compiler that contains result of the matrix multiply.
"""
if incontainstance(other, MonkeyQueryCompiler):
other = (
other.to_monkey().squeeze(axis=1)
if squeeze_other
else other.to_monkey()
)
def mapping_func(kf, other=other, squeeze_self=squeeze_self):
result = kf.squeeze(axis=1).dot(other) if squeeze_self else kf.dot(other)
if is_list_like(result):
return monkey.KnowledgeFrame(result)
else:
return monkey.KnowledgeFrame([result])
num_cols = other.shape[1] if length(other.shape) > 1 else 1
if length(self.columns) == 1:
new_index = (
["__reduced__"]
if (length(self.index) == 1 or squeeze_self) and num_cols == 1
else None
)
new_columns = ["__reduced__"] if squeeze_self and num_cols == 1 else None
axis = 0
else:
new_index = self.index
new_columns = ["__reduced__"] if num_cols == 1 else None
axis = 1
new_modin_frame = self._modin_frame._employ_full_axis(
axis, mapping_func, new_index=new_index, new_columns=new_columns
)
return self.__constructor__(new_modin_frame)
def _nsort(self, n, columns=None, keep="first", sort_type="nsmtotal_allest"):
def mapping_func(kf, n=n, keep=keep, columns=columns):
if columns is None:
return monkey.KnowledgeFrame(
gettingattr(monkey.Collections, sort_type)(
kf.squeeze(axis=1), n=n, keep=keep
)
)
return gettingattr(monkey.KnowledgeFrame, sort_type)(
kf, n=n, columns=columns, keep=keep
)
if columns is None:
new_columns = ["__reduced__"]
else:
new_columns = self.columns
new_modin_frame = self._modin_frame._employ_full_axis(
axis=0, func=mapping_func, new_columns=new_columns
)
return self.__constructor__(new_modin_frame)
def nsmtotal_allest(self, *args, **kwargs):
return self._nsort(sort_type="nsmtotal_allest", *args, **kwargs)
def nbiggest(self, *args, **kwargs):
return self._nsort(sort_type="nbiggest", *args, **kwargs)
def eval(self, expr, **kwargs):
"""Returns a new QueryCompiler with expr evaluated on columns.
Args:
expr: The string expression to evaluate.
Returns:
A new QueryCompiler with new columns after employing expr.
"""
# Make a clone of columns and eval on the clone to detergetting_mine if result type is
# collections or not
empty_eval = (
monkey.KnowledgeFrame(columns=self.columns)
.totype(self.dtypes)
.eval(expr, inplace=False, **kwargs)
)
if incontainstance(empty_eval, monkey.Collections):
new_columns = (
[empty_eval.name] if empty_eval.name is not None else ["__reduced__"]
)
else:
new_columns = empty_eval.columns
new_modin_frame = self._modin_frame._employ_full_axis(
1,
lambda kf: monkey.KnowledgeFrame(kf.eval(expr, inplace=False, **kwargs)),
new_index=self.index,
new_columns=new_columns,
)
return self.__constructor__(new_modin_frame)
def mode(self, **kwargs):
"""Returns a new QueryCompiler with modes calculated for each label along given axis.
Returns:
A new QueryCompiler with modes calculated.
"""
axis = kwargs.getting("axis", 0)
def mode_builder(kf):
result = monkey.KnowledgeFrame(kf.mode(**kwargs))
# We return a knowledgeframe with the same shape as the input to ensure
# that total_all the partitions will be the same shape
if axis == 0 and length(kf) != length(result):
# Pad rows
result = result.reindexing(index=monkey.RangeIndex(length(kf.index)))
elif axis == 1 and length(kf.columns) != length(result.columns):
# Pad columns
result = result.reindexing(columns=monkey.RangeIndex(length(kf.columns)))
return monkey.KnowledgeFrame(result)
if axis == 0:
new_index = monkey.RangeIndex(length(self.index))
new_columns = self.columns
else:
new_index = self.index
new_columns = monkey.RangeIndex(length(self.columns))
new_modin_frame = self._modin_frame._employ_full_axis(
axis, mode_builder, new_index=new_index, new_columns=new_columns
)
return self.__constructor__(new_modin_frame).sipna(axis=axis, how="total_all")
def fillnone(self, **kwargs):
"""Replaces NaN values with the method provided.
Returns:
A new QueryCompiler with null values filled.
"""
axis = kwargs.getting("axis", 0)
value = kwargs.getting("value")
method = kwargs.getting("method", None)
limit = kwargs.getting("limit", None)
full_axis = method is not None or limit is not None
if incontainstance(value, dict):
kwargs.pop("value")
def fillnone(kf):
func_dict = {c: value[c] for c in value if c in kf.columns}
return kf.fillnone(value=func_dict, **kwargs)
else:
def fillnone(kf):
return kf.fillnone(**kwargs)
if full_axis:
new_modin_frame = self._modin_frame._fold(axis, fillnone)
else:
new_modin_frame = self._modin_frame._mapping(fillnone)
return self.__constructor__(new_modin_frame)
def quantile_for_list_of_values(self, **kwargs):
"""Returns Manager containing quantiles along an axis for numeric columns.
Returns:
QueryCompiler containing quantiles of original QueryCompiler along an axis.
"""
axis = kwargs.getting("axis", 0)
q = kwargs.getting("q")
numeric_only = kwargs.getting("numeric_only", True)
assert incontainstance(q, (monkey.Collections, np.ndarray, monkey.Index, list))
if numeric_only:
new_columns = self._modin_frame._numeric_columns()
else:
new_columns = [
col
for col, dtype in zip(self.columns, self.dtypes)
if (is_numeric_dtype(dtype) or is_datetime_or_timedelta_dtype(dtype))
]
if axis == 1:
query_compiler = self.gettingitem_column_array(new_columns)
new_columns = self.index
else:
query_compiler = self
def quantile_builder(kf, **kwargs):
result = kf.quantile(**kwargs)
return result.T if kwargs.getting("axis", 0) == 1 else result
# This took a long time to debug, so here is the rundown of why this is needed.
# Previously, we were operating on select indices, but that was broken. We were
# not correctly setting the columns/index. Because of how we compute `to_monkey`
# and because of the static nature of the index for `axis=1` it is easier to
# just handle this as the transpose (see `quantile_builder` above for the
# transpose within the partition) than it is to completely rework other
# internal methods. Basictotal_ally we are returning the transpose of the object for
# correctness and cleanliness of the code.
if axis == 1:
q_index = new_columns
new_columns = monkey.Float64Index(q)
else:
q_index = monkey.Float64Index(q)
new_modin_frame = query_compiler._modin_frame._employ_full_axis(
axis,
lambda kf: quantile_builder(kf, **kwargs),
new_index=q_index,
new_columns=new_columns,
dtypes=np.float64,
)
result = self.__constructor__(new_modin_frame)
return result.transpose() if axis == 1 else result
def query(self, expr, **kwargs):
"""Query columns of the QueryCompiler with a boolean expression.
Args:
expr: Boolean expression to query the columns with.
Returns:
QueryCompiler containing the rows where the boolean expression is satisfied.
"""
def query_builder(kf, **kwargs):
return kf.query(expr, inplace=False, **kwargs)
return self.__constructor__(
self._modin_frame.filter_full_axis(1, query_builder)
)
def rank(self, **kwargs):
"""Computes numerical rank along axis. Equal values are set to the average.
Returns:
QueryCompiler containing the ranks of the values along an axis.
"""
axis = kwargs.getting("axis", 0)
numeric_only = True if axis else kwargs.getting("numeric_only", False)
new_modin_frame = self._modin_frame._employ_full_axis(
axis,
lambda kf: kf.rank(**kwargs),
new_index=self.index,
new_columns=self.columns if not numeric_only else None,
dtypes=np.float64,
)
return self.__constructor__(new_modin_frame)
def sorting_index(self, **kwargs):
"""Sorts the data with respect to either the columns or the indices.
Returns:
QueryCompiler containing the data sorted by columns or indices.
"""
axis = kwargs.pop("axis", 0)
level = kwargs.pop("level", None)
sort_remaining = kwargs.pop("sort_remaining", True)
kwargs["inplace"] = False
if level is not None or self.has_multiindex(axis=axis):
return self.default_to_monkey(
monkey.KnowledgeFrame.sorting_index,
axis=axis,
level=level,
sort_remaining=sort_remaining,
**kwargs,
)
# sorting_index can have ascending be None and behaves as if it is False.
# sort_the_values cannot have ascending be None. Thus, the following logic is to
# convert the ascending argument to one that works with sort_the_values
ascending = kwargs.pop("ascending", True)
if ascending is None:
ascending = False
kwargs["ascending"] = ascending
if axis:
new_columns = monkey.Collections(self.columns).sort_the_values(**kwargs)
new_index = self.index
else:
new_index = monkey.Collections(self.index).sort_the_values(**kwargs)
new_columns = self.columns
new_modin_frame = self._modin_frame._employ_full_axis(
axis,
lambda kf: kf.sorting_index(
axis=axis, level=level, sort_remaining=sort_remaining, **kwargs
),
new_index,
new_columns,
dtypes="clone" if axis == 0 else None,
)
return self.__constructor__(new_modin_frame)
def melt(
self,
id_vars=None,
value_vars=None,
var_name=None,
value_name="value",
col_level=None,
ignore_index=True,
):
ErrorMessage.missmatch_with_monkey(
operation="melt", message="Order of rows could be different from monkey"
)
if var_name is None:
var_name = "variable"
def _convert_to_list(x):
if is_list_like(x):
x = [*x]
elif x is not None:
x = [x]
else:
x = []
return x
id_vars, value_vars = mapping(_convert_to_list, [id_vars, value_vars])
if length(value_vars) == 0:
value_vars = self.columns.sip(id_vars)
if length(id_vars) != 0:
to_broadcast = self.gettingitem_column_array(id_vars)._modin_frame
else:
to_broadcast = None
def employier(kf, internal_indices, other=[], internal_other_indices=[]):
if length(other):
other = monkey.concating(other, axis=1)
columns_to_add = other.columns.difference(kf.columns)
kf = monkey.concating([kf, other[columns_to_add]], axis=1)
return kf.melt(
id_vars=id_vars,
value_vars=kf.columns[internal_indices],
var_name=var_name,
value_name=value_name,
col_level=col_level,
)
# we have no able to calculate correct indices here, so making it `dummy_index`
inconsistent_frame = self._modin_frame.broadcast_employ_select_indices(
axis=0,
employ_indices=value_vars,
func=employier,
other=to_broadcast,
new_index=["dummy_index"] * length(id_vars),
new_columns=["dummy_index"] * length(id_vars),
)
# after employing `melt` for selected indices we will getting partitions like this:
# id_vars vars value | id_vars vars value
# 0 foo col3 1 | 0 foo col5 a so stacking it into
# 1 fiz col3 2 | 1 fiz col5 b `new_parts` to getting
# 2 bar col3 3 | 2 bar col5 c correct answer
# 3 zoo col3 4 | 3 zoo col5 d
new_parts = np.array(
[np.array([x]) for x in np.concatingenate(inconsistent_frame._partitions.T)]
)
new_index = monkey.RangeIndex(length(self.index) * length(value_vars))
new_modin_frame = self._modin_frame.__constructor__(
new_parts,
index=new_index,
columns=id_vars + [var_name, value_name],
)
result = self.__constructor__(new_modin_frame)
# this assigment needs to propagate correct indices into partitions
result.index = new_index
return result
# END Map across rows/columns
# __gettingitem__ methods
def gettingitem_array(self, key):
"""
Get column or row data specified by key.
Parameters
----------
key : MonkeyQueryCompiler, numpy.ndarray, monkey.Index or list
Targetting numeric indices or labels by which to retrieve data.
Returns
-------
MonkeyQueryCompiler
A new Query Compiler.
"""
# TODO: dont convert to monkey for array indexing
if incontainstance(key, type(self)):
key = key.to_monkey().squeeze(axis=1)
if is_bool_indexer(key):
if incontainstance(key, monkey.Collections) and not key.index.equals(self.index):
warnings.warn(
"Boolean Collections key will be reindexinged to match KnowledgeFrame index.",
PendingDeprecationWarning,
stacklevel=3,
)
elif length(key) != length(self.index):
raise ValueError(
"Item wrong lengthgth {} instead of {}.".formating(
length(key), length(self.index)
)
)
key = check_bool_indexer(self.index, key)
# We convert to a RangeIndex because gettingitem_row_array is expecting a list
# of indices, and RangeIndex will give us the exact indices of each boolean
# requested.
key = monkey.RangeIndex(length(self.index))[key]
if length(key):
return self.gettingitem_row_array(key)
else:
return self.from_monkey(
monkey.KnowledgeFrame(columns=self.columns), type(self._modin_frame)
)
else:
if whatever(k not in self.columns for k in key):
raise KeyError(
"{} not index".formating(
str([k for k in key if k not in self.columns]).replacing(",", "")
)
)
return self.gettingitem_column_array(key)
def gettingitem_column_array(self, key, numeric=False):
"""Get column data for targetting labels.
Args:
key: Targetting labels by which to retrieve data.
numeric: A boolean representing whether or not the key passed in represents
the numeric index or the named index.
Returns:
A new QueryCompiler.
"""
# Convert to list for type checking
if numeric:
new_modin_frame = self._modin_frame.mask(col_numeric_idx=key)
else:
new_modin_frame = self._modin_frame.mask(col_indices=key)
return self.__constructor__(new_modin_frame)
def gettingitem_row_array(self, key):
"""Get row data for targetting labels.
Args:
key: Targetting numeric indices by which to retrieve data.
Returns:
A new QueryCompiler.
"""
return self.__constructor__(self._modin_frame.mask(row_numeric_idx=key))
def setitem(self, axis, key, value):
"""Set the column defined by `key` to the `value` provided.
Args:
key: The column name to set.
value: The value to set the column to.
Returns:
A new QueryCompiler
"""
def setitem_builder(kf, internal_indices=[]):
kf = kf.clone()
if length(internal_indices) == 1:
if axis == 0:
kf[kf.columns[internal_indices[0]]] = value
else:
kf.iloc[internal_indices[0]] = value
else:
if axis == 0:
kf[kf.columns[internal_indices]] = value
else:
kf.iloc[internal_indices] = value
return kf
if incontainstance(value, type(self)):
value.columns = [key]
if axis == 0:
idx = self.columns.getting_indexer_for([key])[0]
if 0 < idx < length(self.columns) - 1:
first_mask = self._modin_frame.mask(
col_numeric_idx=list(range(idx))
)
second_mask = self._modin_frame.mask(
col_numeric_idx=list(range(idx + 1, length(self.columns)))
)
return self.__constructor__(
first_mask._concating(
1, [value._modin_frame, second_mask], "inner", False
)
)
else:
mask = self.sip(columns=[key])._modin_frame
if idx == 0:
return self.__constructor__(
value._modin_frame._concating(1, [mask], "inner", False)
)
else:
return self.__constructor__(
mask._concating(1, [value._modin_frame], "inner", False)
)
else:
value = value.transpose()
idx = self.index.getting_indexer_for([key])[0]
if 0 < idx < length(self.index) - 1:
first_mask = self._modin_frame.mask(
row_numeric_idx=list(range(idx))
)
second_mask = self._modin_frame.mask(
row_numeric_idx=list(range(idx + 1, length(self.index)))
)
return self.__constructor__(
first_mask._concating(
0, [value._modin_frame, second_mask], "inner", False
)
)
else:
mask = self.sip(index=[key])._modin_frame
if idx == 0:
return self.__constructor__(
value._modin_frame._concating(0, [mask], "inner", False)
)
else:
return self.__constructor__(
mask._concating(0, [value._modin_frame], "inner", False)
)
if is_list_like(value):
new_modin_frame = self._modin_frame._employ_full_axis_select_indices(
axis,
setitem_builder,
[key],
new_index=self.index,
new_columns=self.columns,
keep_remaining=True,
)
else:
new_modin_frame = self._modin_frame._employ_select_indices(
axis,
setitem_builder,
[key],
new_index=self.index,
new_columns=self.columns,
keep_remaining=True,
)
return self.__constructor__(new_modin_frame)
# END __gettingitem__ methods
# Drop/Dropna
# This will change the shape of the resulting data.
def sipna(self, **kwargs):
"""Returns a new QueryCompiler with null values sipped along given axis.
Return:
a new QueryCompiler
"""
return self.__constructor__(
self._modin_frame.filter_full_axis(
kwargs.getting("axis", 0) ^ 1,
lambda kf: | monkey.KnowledgeFrame.sipna(kf, **kwargs) | pandas.DataFrame.dropna |
"""
Define the CollectionsGroupBy and KnowledgeFrameGroupBy
classes that hold the grouper interfaces (and some implementations).
These are user facing as the result of the ``kf.grouper(...)`` operations,
which here returns a KnowledgeFrameGroupBy object.
"""
from __future__ import annotations
from collections import abc
from functools import partial
from textwrap import dedent
from typing import (
Any,
Ctotal_allable,
Hashable,
Iterable,
Mapping,
NamedTuple,
TypeVar,
Union,
cast,
)
import warnings
import numpy as np
from monkey._libs import reduction as libreduction
from monkey._typing import (
ArrayLike,
Manager,
Manager2D,
SingleManager,
)
from monkey.util._decorators import (
Appender,
Substitution,
doc,
)
from monkey.core.dtypes.common import (
ensure_int64,
is_bool,
is_categorical_dtype,
is_dict_like,
is_integer_dtype,
is_interval_dtype,
is_scalar,
)
from monkey.core.dtypes.missing import (
ifna,
notna,
)
from monkey.core import (
algorithms,
nanops,
)
from monkey.core.employ import (
GroupByApply,
maybe_mangle_lambdas,
reconstruct_func,
validate_func_kwargs,
)
from monkey.core.base import SpecificationError
import monkey.core.common as com
from monkey.core.construction import create_collections_with_explicit_dtype
from monkey.core.frame import KnowledgeFrame
from monkey.core.generic import NDFrame
from monkey.core.grouper import base
from monkey.core.grouper.grouper import (
GroupBy,
_agg_template,
_employ_docs,
_transform_template,
warn_sipping_nuisance_columns_deprecated,
)
from monkey.core.indexes.api import (
Index,
MultiIndex,
total_all_indexes_same,
)
from monkey.core.collections import Collections
from monkey.core.util.numba_ import maybe_use_numba
from monkey.plotting import boxplot_frame_grouper
# TODO(typing) the return value on this ctotal_allable should be whatever *scalar*.
AggScalar = Union[str, Ctotal_allable[..., Any]]
# TODO: validate types on ScalarResult and move to _typing
# Blocked from using by https://github.com/python/mypy/issues/1484
# See note at _mangle_lambda_list
ScalarResult = TypeVar("ScalarResult")
class NamedAgg(NamedTuple):
column: Hashable
aggfunc: AggScalar
def generate_property(name: str, klass: type[KnowledgeFrame | Collections]):
"""
Create a property for a GroupBy subclass to dispatch to KnowledgeFrame/Collections.
Parameters
----------
name : str
klass : {KnowledgeFrame, Collections}
Returns
-------
property
"""
def prop(self):
return self._make_wrapper(name)
parent_method = gettingattr(klass, name)
prop.__doc__ = parent_method.__doc__ or ""
prop.__name__ = name
return property(prop)
def pin_total_allowlisted_properties(
klass: type[KnowledgeFrame | Collections], total_allowlist: frozenset[str]
):
"""
Create GroupBy member defs for KnowledgeFrame/Collections names in a total_allowlist.
Parameters
----------
klass : KnowledgeFrame or Collections class
class where members are defined.
total_allowlist : frozenset[str]
Set of names of klass methods to be constructed
Returns
-------
class decorator
Notes
-----
Since we don't want to override methods explicitly defined in the
base class, whatever such name is skipped.
"""
def pinner(cls):
for name in total_allowlist:
if hasattr(cls, name):
# don't override whateverthing that was explicitly defined
# in the base class
continue
prop = generate_property(name, klass)
setattr(cls, name, prop)
return cls
return pinner
@pin_total_allowlisted_properties(Collections, base.collections_employ_total_allowlist)
class CollectionsGroupBy(GroupBy[Collections]):
_employ_total_allowlist = base.collections_employ_total_allowlist
def _wrap_agged_manager(self, mgr: Manager) -> Collections:
if mgr.ndim == 1:
mgr = cast(SingleManager, mgr)
single = mgr
else:
mgr = cast(Manager2D, mgr)
single = mgr.igetting(0)
ser = self.obj._constructor(single, name=self.obj.name)
# NB: ctotal_aller is responsible for setting ser.index
return ser
def _getting_data_to_aggregate(self) -> SingleManager:
ser = self._obj_with_exclusions
single = ser._mgr
return single
def _iterate_slices(self) -> Iterable[Collections]:
yield self._selected_obj
_agg_examples_doc = dedent(
"""
Examples
--------
>>> s = mk.Collections([1, 2, 3, 4])
>>> s
0 1
1 2
2 3
3 4
dtype: int64
>>> s.grouper([1, 1, 2, 2]).getting_min()
1 1
2 3
dtype: int64
>>> s.grouper([1, 1, 2, 2]).agg('getting_min')
1 1
2 3
dtype: int64
>>> s.grouper([1, 1, 2, 2]).agg(['getting_min', 'getting_max'])
getting_min getting_max
1 1 2
2 3 4
The output column names can be controlled by passing
the desired column names and aggregations as keyword arguments.
>>> s.grouper([1, 1, 2, 2]).agg(
... getting_minimum='getting_min',
... getting_maximum='getting_max',
... )
getting_minimum getting_maximum
1 1 2
2 3 4
.. versionchanged:: 1.3.0
The resulting dtype will reflect the return value of the aggregating function.
>>> s.grouper([1, 1, 2, 2]).agg(lambda x: x.totype(float).getting_min())
1 1.0
2 3.0
dtype: float64
"""
)
@Appender(
_employ_docs["template"].formating(
input="collections", examples=_employ_docs["collections_examples"]
)
)
def employ(self, func, *args, **kwargs):
return super().employ(func, *args, **kwargs)
@doc(_agg_template, examples=_agg_examples_doc, klass="Collections")
def aggregate(self, func=None, *args, engine=None, engine_kwargs=None, **kwargs):
if maybe_use_numba(engine):
with self._group_selection_context():
data = self._selected_obj
result = self._aggregate_with_numba(
data.to_frame(), func, *args, engine_kwargs=engine_kwargs, **kwargs
)
index = self.grouper.result_index
return self.obj._constructor(result.flat_underlying(), index=index, name=data.name)
relabeling = func is None
columns = None
if relabeling:
columns, func = validate_func_kwargs(kwargs)
kwargs = {}
if incontainstance(func, str):
return gettingattr(self, func)(*args, **kwargs)
elif incontainstance(func, abc.Iterable):
# Catch instances of lists / tuples
# but not the class list / tuple itself.
func = maybe_mangle_lambdas(func)
ret = self._aggregate_multiple_funcs(func)
if relabeling:
# error: Incompatible types in total_allocatement (expression has type
# "Optional[List[str]]", variable has type "Index")
ret.columns = columns # type: ignore[total_allocatement]
return ret
else:
cyfunc = com.getting_cython_func(func)
if cyfunc and not args and not kwargs:
return gettingattr(self, cyfunc)()
if self.grouper.nkeys > 1:
return self._python_agg_general(func, *args, **kwargs)
try:
return self._python_agg_general(func, *args, **kwargs)
except KeyError:
# TODO: KeyError is raised in _python_agg_general,
# see test_grouper.test_basic
result = self._aggregate_named(func, *args, **kwargs)
# result is a dict whose keys are the elements of result_index
index = self.grouper.result_index
return create_collections_with_explicit_dtype(
result, index=index, dtype_if_empty=object
)
agg = aggregate
def _aggregate_multiple_funcs(self, arg) -> KnowledgeFrame:
if incontainstance(arg, dict):
# show the deprecation, but only if we
# have not shown a higher level one
# GH 15931
raise SpecificationError("nested renagetting_mingr is not supported")
elif whatever(incontainstance(x, (tuple, list)) for x in arg):
arg = [(x, x) if not incontainstance(x, (tuple, list)) else x for x in arg]
# indicated column order
columns = next(zip(*arg))
else:
# list of functions / function names
columns = []
for f in arg:
columns.adding(com.getting_ctotal_allable_name(f) or f)
arg = zip(columns, arg)
results: dict[base.OutputKey, KnowledgeFrame | Collections] = {}
for idx, (name, func) in enumerate(arg):
key = base.OutputKey(label=name, position=idx)
results[key] = self.aggregate(func)
if whatever(incontainstance(x, KnowledgeFrame) for x in results.values()):
from monkey import concating
res_kf = concating(
results.values(), axis=1, keys=[key.label for key in results.keys()]
)
return res_kf
indexed_output = {key.position: val for key, val in results.items()}
output = self.obj._constructor_expanddim(indexed_output, index=None)
output.columns = Index(key.label for key in results)
output = self._reindexing_output(output)
return output
def _indexed_output_to_nkframe(
self, output: Mapping[base.OutputKey, ArrayLike]
) -> Collections:
"""
Wrap the dict result of a GroupBy aggregation into a Collections.
"""
assert length(output) == 1
values = next(iter(output.values()))
result = self.obj._constructor(values)
result.name = self.obj.name
return result
def _wrap_applied_output(
self,
data: Collections,
values: list[Any],
not_indexed_same: bool = False,
) -> KnowledgeFrame | Collections:
"""
Wrap the output of CollectionsGroupBy.employ into the expected result.
Parameters
----------
data : Collections
Input data for grouper operation.
values : List[Any]
Applied output for each group.
not_indexed_same : bool, default False
Whether the applied outputs are not indexed the same as the group axes.
Returns
-------
KnowledgeFrame or Collections
"""
if length(values) == 0:
# GH #6265
return self.obj._constructor(
[],
name=self.obj.name,
index=self.grouper.result_index,
dtype=data.dtype,
)
assert values is not None
if incontainstance(values[0], dict):
# GH #823 #24880
index = self.grouper.result_index
res_kf = self.obj._constructor_expanddim(values, index=index)
res_kf = self._reindexing_output(res_kf)
# if self.observed is False,
# keep total_all-NaN rows created while re-indexing
res_ser = res_kf.stack(sipna=self.observed)
res_ser.name = self.obj.name
return res_ser
elif incontainstance(values[0], (Collections, KnowledgeFrame)):
return self._concating_objects(values, not_indexed_same=not_indexed_same)
else:
# GH #6265 #24880
result = self.obj._constructor(
data=values, index=self.grouper.result_index, name=self.obj.name
)
return self._reindexing_output(result)
def _aggregate_named(self, func, *args, **kwargs):
# Note: this is very similar to _aggregate_collections_pure_python,
# but that does not pin group.name
result = {}
initialized = False
for name, group in self:
object.__setattr__(group, "name", name)
output = func(group, *args, **kwargs)
output = libreduction.extract_result(output)
if not initialized:
# We only do this validation on the first iteration
libreduction.check_result_array(output, group.dtype)
initialized = True
result[name] = output
return result
@Substitution(klass="Collections")
@Appender(_transform_template)
def transform(self, func, *args, engine=None, engine_kwargs=None, **kwargs):
return self._transform(
func, *args, engine=engine, engine_kwargs=engine_kwargs, **kwargs
)
def _cython_transform(
self, how: str, numeric_only: bool = True, axis: int = 0, **kwargs
):
assert axis == 0 # handled by ctotal_aller
obj = self._selected_obj
try:
result = self.grouper._cython_operation(
"transform", obj._values, how, axis, **kwargs
)
except NotImplementedError as err:
raise TypeError(f"{how} is not supported for {obj.dtype} dtype") from err
return obj._constructor(result, index=self.obj.index, name=obj.name)
def _transform_general(self, func: Ctotal_allable, *args, **kwargs) -> Collections:
"""
Transform with a ctotal_allable func`.
"""
assert ctotal_allable(func)
klass = type(self.obj)
results = []
for name, group in self:
# this setattr is needed for test_transform_lambda_with_datetimetz
object.__setattr__(group, "name", name)
res = func(group, *args, **kwargs)
results.adding(klass(res, index=group.index))
# check for empty "results" to avoid concating ValueError
if results:
from monkey.core.reshape.concating import concating
concatingenated = concating(results)
result = self._set_result_index_ordered(concatingenated)
else:
result = self.obj._constructor(dtype=np.float64)
result.name = self.obj.name
return result
def _can_use_transform_fast(self, result) -> bool:
return True
def filter(self, func, sipna: bool = True, *args, **kwargs):
"""
Return a clone of a Collections excluding elements from groups that
do not satisfy the boolean criterion specified by func.
Parameters
----------
func : function
To employ to each group. Should return True or False.
sipna : Drop groups that do not pass the filter. True by default;
if False, groups that evaluate False are filled with NaNs.
Notes
-----
Functions that mutate the passed object can produce unexpected
behavior or errors and are not supported. See :ref:`gotchas.ukf-mutation`
for more definal_item_tails.
Examples
--------
>>> kf = mk.KnowledgeFrame({'A' : ['foo', 'bar', 'foo', 'bar',
... 'foo', 'bar'],
... 'B' : [1, 2, 3, 4, 5, 6],
... 'C' : [2.0, 5., 8., 1., 2., 9.]})
>>> grouped = kf.grouper('A')
>>> kf.grouper('A').B.filter(lambda x: x.average() > 3.)
1 2
3 4
5 6
Name: B, dtype: int64
Returns
-------
filtered : Collections
"""
if incontainstance(func, str):
wrapper = lambda x: gettingattr(x, func)(*args, **kwargs)
else:
wrapper = lambda x: func(x, *args, **kwargs)
# Interpret np.nan as False.
def true_and_notna(x) -> bool:
b = wrapper(x)
return b and notna(b)
try:
indices = [
self._getting_index(name) for name, group in self if true_and_notna(group)
]
except (ValueError, TypeError) as err:
raise TypeError("the filter must return a boolean result") from err
filtered = self._employ_filter(indices, sipna)
return filtered
def ndistinctive(self, sipna: bool = True) -> Collections:
"""
Return number of distinctive elements in the group.
Returns
-------
Collections
Number of distinctive values within each group.
"""
ids, _, _ = self.grouper.group_info
val = self.obj._values
codes, _ = algorithms.factorize(val, sort=False)
sorter = np.lexsort((codes, ids))
codes = codes[sorter]
ids = ids[sorter]
# group boundaries are where group ids change
# distinctive observations are where sorted values change
idx = np.r_[0, 1 + np.nonzero(ids[1:] != ids[:-1])[0]]
inc = np.r_[1, codes[1:] != codes[:-1]]
# 1st item of each group is a new distinctive observation
mask = codes == -1
if sipna:
inc[idx] = 1
inc[mask] = 0
else:
inc[mask & np.r_[False, mask[:-1]]] = 0
inc[idx] = 1
out = np.add.reduceat(inc, idx).totype("int64", clone=False)
if length(ids):
# NaN/NaT group exists if the header_num of ids is -1,
# so remove it from res and exclude its index from idx
if ids[0] == -1:
res = out[1:]
idx = idx[np.flatnonzero(idx)]
else:
res = out
else:
res = out[1:]
ri = self.grouper.result_index
# we might have duplications among the bins
if length(res) != length(ri):
res, out = np.zeros(length(ri), dtype=out.dtype), res
res[ids[idx]] = out
result = self.obj._constructor(res, index=ri, name=self.obj.name)
return self._reindexing_output(result, fill_value=0)
@doc(Collections.describe)
def describe(self, **kwargs):
return super().describe(**kwargs)
def counts_value_num(
self,
normalize: bool = False,
sort: bool = True,
ascending: bool = False,
bins=None,
sipna: bool = True,
):
from monkey.core.reshape.unioner import getting_join_indexers
from monkey.core.reshape.tile import cut
ids, _, _ = self.grouper.group_info
val = self.obj._values
def employ_collections_counts_value_num():
return self.employ(
Collections.counts_value_num,
normalize=normalize,
sort=sort,
ascending=ascending,
bins=bins,
)
if bins is not None:
if not np.iterable(bins):
# scalar bins cannot be done at top level
# in a backward compatible way
return employ_collections_counts_value_num()
elif is_categorical_dtype(val.dtype):
# GH38672
return employ_collections_counts_value_num()
# grouper removes null keys from groupings
mask = ids != -1
ids, val = ids[mask], val[mask]
if bins is None:
lab, lev = algorithms.factorize(val, sort=True)
llab = lambda lab, inc: lab[inc]
else:
# lab is a Categorical with categories an IntervalIndex
lab = cut(Collections(val), bins, include_lowest=True)
# error: "ndarray" has no attribute "cat"
lev = lab.cat.categories # type: ignore[attr-defined]
# error: No overload variant of "take" of "_ArrayOrScalarCommon" matches
# argument types "Any", "bool", "Union[Any, float]"
lab = lev.take( # type: ignore[ctotal_all-overload]
# error: "ndarray" has no attribute "cat"
lab.cat.codes, # type: ignore[attr-defined]
total_allow_fill=True,
# error: Item "ndarray" of "Union[ndarray, Index]" has no attribute
# "_na_value"
fill_value=lev._na_value, # type: ignore[union-attr]
)
llab = lambda lab, inc: lab[inc]._multiindex.codes[-1]
if is_interval_dtype(lab.dtype):
# TODO: should we do this inside II?
# error: "ndarray" has no attribute "left"
# error: "ndarray" has no attribute "right"
sorter = np.lexsort(
(lab.left, lab.right, ids) # type: ignore[attr-defined]
)
else:
sorter = np.lexsort((lab, ids))
ids, lab = ids[sorter], lab[sorter]
# group boundaries are where group ids change
idchanges = 1 + np.nonzero(ids[1:] != ids[:-1])[0]
idx = np.r_[0, idchanges]
if not length(ids):
idx = idchanges
# new values are where sorted labels change
lchanges = llab(lab, slice(1, None)) != llab(lab, slice(None, -1))
inc = np.r_[True, lchanges]
if not length(val):
inc = lchanges
inc[idx] = True # group boundaries are also new values
out = np.diff(np.nonzero(np.r_[inc, True])[0]) # value counts
# num. of times each group should be repeated
rep = partial(np.repeat, repeats=np.add.reduceat(inc, idx))
# multi-index components
codes = self.grouper.reconstructed_codes
codes = [rep(level_codes) for level_codes in codes] + [llab(lab, inc)]
# error: List item 0 has incompatible type "Union[ndarray[Any, Any], Index]";
# expected "Index"
levels = [ping.group_index for ping in self.grouper.groupings] + [
lev # type: ignore[list-item]
]
names = self.grouper.names + [self.obj.name]
if sipna:
mask = codes[-1] != -1
if mask.total_all():
sipna = False
else:
out, codes = out[mask], [level_codes[mask] for level_codes in codes]
if normalize:
out = out.totype("float")
d = np.diff(np.r_[idx, length(ids)])
if sipna:
m = ids[lab == -1]
np.add.at(d, m, -1)
acc = rep(d)[mask]
else:
acc = rep(d)
out /= acc
if sort and bins is None:
cat = ids[inc][mask] if sipna else ids[inc]
sorter = np.lexsort((out if ascending else -out, cat))
out, codes[-1] = out[sorter], codes[-1][sorter]
if bins is not None:
# for compat. with libgrouper.counts_value_num need to ensure every
# bin is present at every index level, null filled with zeros
diff = np.zeros(length(out), dtype="bool")
for level_codes in codes[:-1]:
diff |= np.r_[True, level_codes[1:] != level_codes[:-1]]
ncat, nbin = diff.total_sum(), length(levels[-1])
left = [np.repeat(np.arange(ncat), nbin), np.tile(np.arange(nbin), ncat)]
right = [diff.cumtotal_sum() - 1, codes[-1]]
_, idx = getting_join_indexers(left, right, sort=False, how="left")
out = np.where(idx != -1, out[idx], 0)
if sort:
sorter = np.lexsort((out if ascending else -out, left[0]))
out, left[-1] = out[sorter], left[-1][sorter]
# build the multi-index w/ full levels
def build_codes(lev_codes: np.ndarray) -> np.ndarray:
return np.repeat(lev_codes[diff], nbin)
codes = [build_codes(lev_codes) for lev_codes in codes[:-1]]
codes.adding(left[-1])
mi = MultiIndex(levels=levels, codes=codes, names=names, verify_integrity=False)
if is_integer_dtype(out.dtype):
out = ensure_int64(out)
return self.obj._constructor(out, index=mi, name=self.obj.name)
@doc(Collections.nbiggest)
def nbiggest(self, n: int = 5, keep: str = "first"):
f = partial(Collections.nbiggest, n=n, keep=keep)
data = self._obj_with_exclusions
# Don't change behavior if result index happens to be the same, i.e.
# already ordered and n >= total_all group sizes.
result = self._python_employ_general(f, data, not_indexed_same=True)
return result
@doc(Collections.nsmtotal_allest)
def nsmtotal_allest(self, n: int = 5, keep: str = "first"):
f = partial(Collections.nsmtotal_allest, n=n, keep=keep)
data = self._obj_with_exclusions
# Don't change behavior if result index happens to be the same, i.e.
# already ordered and n >= total_all group sizes.
result = self._python_employ_general(f, data, not_indexed_same=True)
return result
@pin_total_allowlisted_properties(KnowledgeFrame, base.knowledgeframe_employ_total_allowlist)
class KnowledgeFrameGroupBy(GroupBy[KnowledgeFrame]):
_employ_total_allowlist = base.knowledgeframe_employ_total_allowlist
_agg_examples_doc = dedent(
"""
Examples
--------
>>> kf = mk.KnowledgeFrame(
... {
... "A": [1, 1, 2, 2],
... "B": [1, 2, 3, 4],
... "C": [0.362838, 0.227877, 1.267767, -0.562860],
... }
... )
>>> kf
A B C
0 1 1 0.362838
1 1 2 0.227877
2 2 3 1.267767
3 2 4 -0.562860
The aggregation is for each column.
>>> kf.grouper('A').agg('getting_min')
B C
A
1 1 0.227877
2 3 -0.562860
Multiple aggregations
>>> kf.grouper('A').agg(['getting_min', 'getting_max'])
B C
getting_min getting_max getting_min getting_max
A
1 1 2 0.227877 0.362838
2 3 4 -0.562860 1.267767
Select a column for aggregation
>>> kf.grouper('A').B.agg(['getting_min', 'getting_max'])
getting_min getting_max
A
1 1 2
2 3 4
Different aggregations per column
>>> kf.grouper('A').agg({'B': ['getting_min', 'getting_max'], 'C': 'total_sum'})
B C
getting_min getting_max total_sum
A
1 1 2 0.590715
2 3 4 0.704907
To control the output names with different aggregations per column,
monkey supports "named aggregation"
>>> kf.grouper("A").agg(
... b_getting_min=mk.NamedAgg(column="B", aggfunc="getting_min"),
... c_total_sum=mk.NamedAgg(column="C", aggfunc="total_sum"))
b_getting_min c_total_sum
A
1 1 0.590715
2 3 0.704907
- The keywords are the *output* column names
- The values are tuples whose first element is the column to select
and the second element is the aggregation to employ to that column.
Monkey provides the ``monkey.NamedAgg`` namedtuple with the fields
``['column', 'aggfunc']`` to make it clearer what the arguments are.
As usual, the aggregation can be a ctotal_allable or a string alias.
See :ref:`grouper.aggregate.named` for more.
.. versionchanged:: 1.3.0
The resulting dtype will reflect the return value of the aggregating function.
>>> kf.grouper("A")[["B"]].agg(lambda x: x.totype(float).getting_min())
B
A
1 1.0
2 3.0
"""
)
@doc(_agg_template, examples=_agg_examples_doc, klass="KnowledgeFrame")
def aggregate(self, func=None, *args, engine=None, engine_kwargs=None, **kwargs):
if maybe_use_numba(engine):
with self._group_selection_context():
data = self._selected_obj
result = self._aggregate_with_numba(
data, func, *args, engine_kwargs=engine_kwargs, **kwargs
)
index = self.grouper.result_index
return self.obj._constructor(result, index=index, columns=data.columns)
relabeling, func, columns, order = | reconstruct_func(func, **kwargs) | pandas.core.apply.reconstruct_func |
"""
Provide the grouper split-employ-combine paradigm. Define the GroupBy
class providing the base-class of operations.
The CollectionsGroupBy and KnowledgeFrameGroupBy sub-class
(defined in monkey.core.grouper.generic)
expose these user-facing objects to provide specific functionality.
"""
from contextlib import contextmanager
import datetime
from functools import partial, wraps
import inspect
import re
import types
from typing import (
Ctotal_allable,
Dict,
FrozenSet,
Generic,
Hashable,
Iterable,
List,
Mapping,
Optional,
Tuple,
Type,
TypeVar,
Union,
)
import numpy as np
from monkey._config.config import option_context
from monkey._libs import Timestamp
import monkey._libs.grouper as libgrouper
from monkey._typing import FrameOrCollections, Scalar
from monkey.compat import set_function_name
from monkey.compat.numpy import function as nv
from monkey.errors import AbstractMethodError
from monkey.util._decorators import Appender, Substitution, cache_readonly, doc
from monkey.core.dtypes.cast import maybe_cast_result
from monkey.core.dtypes.common import (
ensure_float,
is_bool_dtype,
is_datetime64_dtype,
is_extension_array_dtype,
is_integer_dtype,
is_numeric_dtype,
is_object_dtype,
is_scalar,
)
from monkey.core.dtypes.missing import ifna, notna
from monkey.core import nanops
import monkey.core.algorithms as algorithms
from monkey.core.arrays import Categorical, DatetimeArray
from monkey.core.base import DataError, MonkeyObject, SelectionMixin
import monkey.core.common as com
from monkey.core.frame import KnowledgeFrame
from monkey.core.generic import NDFrame
from monkey.core.grouper import base, ops
from monkey.core.indexes.api import CategoricalIndex, Index, MultiIndex
from monkey.core.collections import Collections
from monkey.core.sorting import getting_group_index_sorter
_common_see_also = """
See Also
--------
Collections.%(name)s
KnowledgeFrame.%(name)s
"""
_employ_docs = dict(
template="""
Apply function `func` group-wise and combine the results togettingher.
The function passed to `employ` must take a {input} as its first
argument and return a KnowledgeFrame, Collections or scalar. `employ` will
then take care of combining the results back togettingher into a single
knowledgeframe or collections. `employ` is therefore a highly flexible
grouping method.
While `employ` is a very flexible method, its downside is that
using it can be quite a bit slower than using more specific methods
like `agg` or `transform`. Monkey offers a wide range of method that will
be much faster than using `employ` for their specific purposes, so try to
use them before reaching for `employ`.
Parameters
----------
func : ctotal_allable
A ctotal_allable that takes a {input} as its first argument, and
returns a knowledgeframe, a collections or a scalar. In addition the
ctotal_allable may take positional and keyword arguments.
args, kwargs : tuple and dict
Optional positional and keyword arguments to pass to `func`.
Returns
-------
applied : Collections or KnowledgeFrame
See Also
--------
pipe : Apply function to the full GroupBy object instead of to each
group.
aggregate : Apply aggregate function to the GroupBy object.
transform : Apply function column-by-column to the GroupBy object.
Collections.employ : Apply a function to a Collections.
KnowledgeFrame.employ : Apply a function to each row or column of a KnowledgeFrame.
""",
knowledgeframe_examples="""
>>> kf = mk.KnowledgeFrame({'A': 'a a b'.split(),
'B': [1,2,3],
'C': [4,6, 5]})
>>> g = kf.grouper('A')
Notice that ``g`` has two groups, ``a`` and ``b``.
Ctotal_alling `employ` in various ways, we can getting different grouping results:
Example 1: below the function passed to `employ` takes a KnowledgeFrame as
its argument and returns a KnowledgeFrame. `employ` combines the result for
each group togettingher into a new KnowledgeFrame:
>>> g[['B', 'C']].employ(lambda x: x / x.total_sum())
B C
0 0.333333 0.4
1 0.666667 0.6
2 1.000000 1.0
Example 2: The function passed to `employ` takes a KnowledgeFrame as
its argument and returns a Collections. `employ` combines the result for
each group togettingher into a new KnowledgeFrame:
>>> g[['B', 'C']].employ(lambda x: x.getting_max() - x.getting_min())
B C
A
a 1 2
b 0 0
Example 3: The function passed to `employ` takes a KnowledgeFrame as
its argument and returns a scalar. `employ` combines the result for
each group togettingher into a Collections, including setting the index as
appropriate:
>>> g.employ(lambda x: x.C.getting_max() - x.B.getting_min())
A
a 5
b 2
dtype: int64
""",
collections_examples="""
>>> s = mk.Collections([0, 1, 2], index='a a b'.split())
>>> g = s.grouper(s.index)
From ``s`` above we can see that ``g`` has two groups, ``a`` and ``b``.
Ctotal_alling `employ` in various ways, we can getting different grouping results:
Example 1: The function passed to `employ` takes a Collections as
its argument and returns a Collections. `employ` combines the result for
each group togettingher into a new Collections:
>>> g.employ(lambda x: x*2 if x.name == 'b' else x/2)
0 0.0
1 0.5
2 4.0
dtype: float64
Example 2: The function passed to `employ` takes a Collections as
its argument and returns a scalar. `employ` combines the result for
each group togettingher into a Collections, including setting the index as
appropriate:
>>> g.employ(lambda x: x.getting_max() - x.getting_min())
a 1
b 0
dtype: int64
Notes
-----
In the current implementation `employ` ctotal_alls `func` twice on the
first group to decide whether it can take a fast or slow code
path. This can lead to unexpected behavior if `func` has
side-effects, as they will take effect twice for the first
group.
Examples
--------
{examples}
""",
)
_pipe_template = """
Apply a function `func` with arguments to this %(klass)s object and return
the function's result.
%(versionadded)s
Use `.pipe` when you want to improve readability by chaining togettingher
functions that expect Collections, KnowledgeFrames, GroupBy or Resample_by_numr objects.
Instead of writing
>>> h(g(f(kf.grouper('group')), arg1=a), arg2=b, arg3=c) # doctest: +SKIP
You can write
>>> (kf.grouper('group')
... .pipe(f)
... .pipe(g, arg1=a)
... .pipe(h, arg2=b, arg3=c)) # doctest: +SKIP
which is much more readable.
Parameters
----------
func : ctotal_allable or tuple of (ctotal_allable, str)
Function to employ to this %(klass)s object or, alternatively,
a `(ctotal_allable, data_keyword)` tuple where `data_keyword` is a
string indicating the keyword of `ctotal_allable` that expects the
%(klass)s object.
args : iterable, optional
Positional arguments passed into `func`.
kwargs : dict, optional
A dictionary of keyword arguments passed into `func`.
Returns
-------
object : the return type of `func`.
See Also
--------
Collections.pipe : Apply a function with arguments to a collections.
KnowledgeFrame.pipe: Apply a function with arguments to a knowledgeframe.
employ : Apply function to each group instead of to the
full %(klass)s object.
Notes
-----
See more `here
<https://monkey.pydata.org/monkey-docs/stable/user_guide/grouper.html#piping-function-ctotal_alls>`_
Examples
--------
%(examples)s
"""
_transform_template = """
Ctotal_all function producing a like-indexed %(klass)s on each group and
return a %(klass)s having the same indexes as the original object
filled with the transformed values
Parameters
----------
f : function
Function to employ to each group.
Can also accept a Numba JIT function with
``engine='numba'`` specified.
If the ``'numba'`` engine is chosen, the function must be
a user defined function with ``values`` and ``index`` as the
first and second arguments respectively in the function signature.
Each group's index will be passed to the user defined function
and optiontotal_ally available for use.
.. versionchanged:: 1.1.0
*args
Positional arguments to pass to func
engine : str, default 'cython'
* ``'cython'`` : Runs the function through C-extensions from cython.
* ``'numba'`` : Runs the function through JIT compiled code from numba.
.. versionadded:: 1.1.0
engine_kwargs : dict, default None
* For ``'cython'`` engine, there are no accepted ``engine_kwargs``
* For ``'numba'`` engine, the engine can accept ``nopython``, ``nogil``
and ``partotal_allel`` dictionary keys. The values must either be ``True`` or
``False``. The default ``engine_kwargs`` for the ``'numba'`` engine is
``{'nopython': True, 'nogil': False, 'partotal_allel': False}`` and will be
applied to the function
.. versionadded:: 1.1.0
**kwargs
Keyword arguments to be passed into func.
Returns
-------
%(klass)s
See Also
--------
%(klass)s.grouper.employ
%(klass)s.grouper.aggregate
%(klass)s.transform
Notes
-----
Each group is endowed the attribute 'name' in case you need to know
which group you are working on.
The current implementation imposes three requirements on f:
* f must return a value that either has the same shape as the input
subframe or can be broadcast to the shape of the input subframe.
For example, if `f` returns a scalar it will be broadcast to have the
same shape as the input subframe.
* if this is a KnowledgeFrame, f must support application column-by-column
in the subframe. If f also supports application to the entire subframe,
then a fast path is used starting from the second chunk.
* f must not mutate groups. Mutation is not supported and may
produce unexpected results.
When using ``engine='numba'``, there will be no "ftotal_all back" behavior interntotal_ally.
The group data and group index will be passed as numpy arrays to the JITed
user defined function, and no alternative execution attempts will be tried.
Examples
--------
>>> kf = mk.KnowledgeFrame({'A' : ['foo', 'bar', 'foo', 'bar',
... 'foo', 'bar'],
... 'B' : ['one', 'one', 'two', 'three',
... 'two', 'two'],
... 'C' : [1, 5, 5, 2, 5, 5],
... 'D' : [2.0, 5., 8., 1., 2., 9.]})
>>> grouped = kf.grouper('A')
>>> grouped.transform(lambda x: (x - x.average()) / x.standard())
C D
0 -1.154701 -0.577350
1 0.577350 0.000000
2 0.577350 1.154701
3 -1.154701 -1.000000
4 0.577350 -0.577350
5 0.577350 1.000000
Broadcast result of the transformatingion
>>> grouped.transform(lambda x: x.getting_max() - x.getting_min())
C D
0 4 6.0
1 3 8.0
2 4 6.0
3 3 8.0
4 4 6.0
5 3 8.0
"""
_agg_template = """
Aggregate using one or more operations over the specified axis.
Parameters
----------
func : function, str, list or dict
Function to use for aggregating the data. If a function, must either
work when passed a %(klass)s or when passed to %(klass)s.employ.
Accepted combinations are:
- function
- string function name
- list of functions and/or function names, e.g. ``[np.total_sum, 'average']``
- dict of axis labels -> functions, function names or list of such.
Can also accept a Numba JIT function with
``engine='numba'`` specified.
If the ``'numba'`` engine is chosen, the function must be
a user defined function with ``values`` and ``index`` as the
first and second arguments respectively in the function signature.
Each group's index will be passed to the user defined function
and optiontotal_ally available for use.
.. versionchanged:: 1.1.0
*args
Positional arguments to pass to func
engine : str, default 'cython'
* ``'cython'`` : Runs the function through C-extensions from cython.
* ``'numba'`` : Runs the function through JIT compiled code from numba.
.. versionadded:: 1.1.0
engine_kwargs : dict, default None
* For ``'cython'`` engine, there are no accepted ``engine_kwargs``
* For ``'numba'`` engine, the engine can accept ``nopython``, ``nogil``
and ``partotal_allel`` dictionary keys. The values must either be ``True`` or
``False``. The default ``engine_kwargs`` for the ``'numba'`` engine is
``{'nopython': True, 'nogil': False, 'partotal_allel': False}`` and will be
applied to the function
.. versionadded:: 1.1.0
**kwargs
Keyword arguments to be passed into func.
Returns
-------
%(klass)s
See Also
--------
%(klass)s.grouper.employ
%(klass)s.grouper.transform
%(klass)s.aggregate
Notes
-----
When using ``engine='numba'``, there will be no "ftotal_all back" behavior interntotal_ally.
The group data and group index will be passed as numpy arrays to the JITed
user defined function, and no alternative execution attempts will be tried.
%(examples)s
"""
class GroupByPlot(MonkeyObject):
"""
Class implementing the .plot attribute for grouper objects.
"""
def __init__(self, grouper):
self._grouper = grouper
def __ctotal_all__(self, *args, **kwargs):
def f(self):
return self.plot(*args, **kwargs)
f.__name__ = "plot"
return self._grouper.employ(f)
def __gettingattr__(self, name: str):
def attr(*args, **kwargs):
def f(self):
return gettingattr(self.plot, name)(*args, **kwargs)
return self._grouper.employ(f)
return attr
@contextmanager
def _group_selection_context(grouper):
"""
Set / reset the _group_selection_context.
"""
grouper._set_group_selection()
yield grouper
grouper._reset_group_selection()
_KeysArgType = Union[
Hashable,
List[Hashable],
Ctotal_allable[[Hashable], Hashable],
List[Ctotal_allable[[Hashable], Hashable]],
Mapping[Hashable, Hashable],
]
class _GroupBy(MonkeyObject, SelectionMixin, Generic[FrameOrCollections]):
_group_selection = None
_employ_whitelist: FrozenSet[str] = frozenset()
def __init__(
self,
obj: FrameOrCollections,
keys: Optional[_KeysArgType] = None,
axis: int = 0,
level=None,
grouper: "Optional[ops.BaseGrouper]" = None,
exclusions=None,
selection=None,
as_index: bool = True,
sort: bool = True,
group_keys: bool = True,
squeeze: bool = False,
observed: bool = False,
mutated: bool = False,
sipna: bool = True,
):
self._selection = selection
assert incontainstance(obj, NDFrame), type(obj)
obj._consolidate_inplace()
self.level = level
if not as_index:
if not incontainstance(obj, KnowledgeFrame):
raise TypeError("as_index=False only valid with KnowledgeFrame")
if axis != 0:
raise ValueError("as_index=False only valid for axis=0")
self.as_index = as_index
self.keys = keys
self.sort = sort
self.group_keys = group_keys
self.squeeze = squeeze
self.observed = observed
self.mutated = mutated
self.sipna = sipna
if grouper is None:
from monkey.core.grouper.grouper import getting_grouper
grouper, exclusions, obj = getting_grouper(
obj,
keys,
axis=axis,
level=level,
sort=sort,
observed=observed,
mutated=self.mutated,
sipna=self.sipna,
)
self.obj = obj
self.axis = obj._getting_axis_number(axis)
self.grouper = grouper
self.exclusions = set(exclusions) if exclusions else set()
def __length__(self) -> int:
return length(self.groups)
def __repr__(self) -> str:
# TODO: Better repr for GroupBy object
return object.__repr__(self)
def _assure_grouper(self):
"""
We create the grouper on instantiation sub-classes may have a
different policy.
"""
pass
@property
def groups(self):
"""
Dict {group name -> group labels}.
"""
self._assure_grouper()
return self.grouper.groups
@property
def ngroups(self):
self._assure_grouper()
return self.grouper.ngroups
@property
def indices(self):
"""
Dict {group name -> group indices}.
"""
self._assure_grouper()
return self.grouper.indices
def _getting_indices(self, names):
"""
Safe getting multiple indices, translate keys for
datelike to underlying repr.
"""
def getting_converter(s):
# possibly convert to the actual key types
# in the indices, could be a Timestamp or a np.datetime64
if incontainstance(s, datetime.datetime):
return lambda key: Timestamp(key)
elif incontainstance(s, np.datetime64):
return lambda key: Timestamp(key).asm8
else:
return lambda key: key
if length(names) == 0:
return []
if length(self.indices) > 0:
index_sample_by_num = next(iter(self.indices))
else:
index_sample_by_num = None # Dummy sample_by_num
name_sample_by_num = names[0]
if incontainstance(index_sample_by_num, tuple):
if not incontainstance(name_sample_by_num, tuple):
msg = "must supply a tuple to getting_group with multiple grouping keys"
raise ValueError(msg)
if not length(name_sample_by_num) == length(index_sample_by_num):
try:
# If the original grouper was a tuple
return [self.indices[name] for name in names]
except KeyError as err:
# turns out it wasn't a tuple
msg = (
"must supply a same-lengthgth tuple to getting_group "
"with multiple grouping keys"
)
raise ValueError(msg) from err
converters = [getting_converter(s) for s in index_sample_by_num]
names = (tuple(f(n) for f, n in zip(converters, name)) for name in names)
else:
converter = getting_converter(index_sample_by_num)
names = (converter(name) for name in names)
return [self.indices.getting(name, []) for name in names]
def _getting_index(self, name):
"""
Safe getting index, translate keys for datelike to underlying repr.
"""
return self._getting_indices([name])[0]
@cache_readonly
def _selected_obj(self):
# Note: _selected_obj is always just `self.obj` for CollectionsGroupBy
if self._selection is None or incontainstance(self.obj, Collections):
if self._group_selection is not None:
return self.obj[self._group_selection]
return self.obj
else:
return self.obj[self._selection]
def _reset_group_selection(self):
"""
Clear group based selection.
Used for methods needing to return info on each group regardless of
whether a group selection was previously set.
"""
if self._group_selection is not None:
# GH12839 clear cached selection too when changing group selection
self._group_selection = None
self._reset_cache("_selected_obj")
def _set_group_selection(self):
"""
Create group based selection.
Used when selection is not passed directly but instead via a grouper.
NOTE: this should be paired with a ctotal_all to _reset_group_selection
"""
grp = self.grouper
if not (
self.as_index
and gettingattr(grp, "groupings", None) is not None
and self.obj.ndim > 1
and self._group_selection is None
):
return
ax = self.obj._info_axis
groupers = [g.name for g in grp.groupings if g.level is None and g.in_axis]
if length(groupers):
# GH12839 clear selected obj cache when group selection changes
self._group_selection = ax.difference(Index(groupers), sort=False).convert_list()
self._reset_cache("_selected_obj")
def _set_result_index_ordered(self, result):
# set the result index on the passed values object and
# return the new object, xref 8046
# the values/counts are repeated according to the group index
# shortcut if we have an already ordered grouper
if not self.grouper.is_monotonic:
index = Index(np.concatingenate(self._getting_indices(self.grouper.result_index)))
result.set_axis(index, axis=self.axis, inplace=True)
result = result.sorting_index(axis=self.axis)
result.set_axis(self.obj._getting_axis(self.axis), axis=self.axis, inplace=True)
return result
def _dir_additions(self):
return self.obj._dir_additions() | self._employ_whitelist
def __gettingattr__(self, attr: str):
if attr in self._internal_names_set:
return object.__gettingattribute__(self, attr)
if attr in self.obj:
return self[attr]
raise AttributeError(
f"'{type(self).__name__}' object has no attribute '{attr}'"
)
@Substitution(
klass="GroupBy",
versionadded=".. versionadded:: 0.21.0",
examples="""\
>>> kf = mk.KnowledgeFrame({'A': 'a b a b'.split(), 'B': [1, 2, 3, 4]})
>>> kf
A B
0 a 1
1 b 2
2 a 3
3 b 4
To getting the difference between each groups getting_maximum and getting_minimum value in one
pass, you can do
>>> kf.grouper('A').pipe(lambda x: x.getting_max() - x.getting_min())
B
A
a 2
b 2""",
)
@Appender(_pipe_template)
def pipe(self, func, *args, **kwargs):
return com.pipe(self, func, *args, **kwargs)
plot = property(GroupByPlot)
def _make_wrapper(self, name):
assert name in self._employ_whitelist
self._set_group_selection()
# need to setup the selection
# as are not passed directly but in the grouper
f = gettingattr(self._selected_obj, name)
if not incontainstance(f, types.MethodType):
return self.employ(lambda self: gettingattr(self, name))
f = gettingattr(type(self._selected_obj), name)
sig = inspect.signature(f)
def wrapper(*args, **kwargs):
# a little trickery for aggregation functions that need an axis
# argument
if "axis" in sig.parameters:
if kwargs.getting("axis", None) is None:
kwargs["axis"] = self.axis
def curried(x):
return f(x, *args, **kwargs)
# preserve the name so we can detect it when ctotal_alling plot methods,
# to avoid duplicates
curried.__name__ = name
# special case otherwise extra plots are created when catching the
# exception below
if name in base.plotting_methods:
return self.employ(curried)
try:
return self.employ(curried)
except TypeError as err:
if not re.search(
"reduction operation '.*' not total_allowed for this dtype", str(err)
):
# We don't have a cython implementation
# TODO: is the above comment accurate?
raise
if self.obj.ndim == 1:
# this can be ctotal_alled recursively, so need to raise ValueError
raise ValueError
# GH#3688 try to operate item-by-item
result = self._aggregate_item_by_item(name, *args, **kwargs)
return result
wrapper.__name__ = name
return wrapper
def getting_group(self, name, obj=None):
"""
Construct KnowledgeFrame from group with provided name.
Parameters
----------
name : object
The name of the group to getting as a KnowledgeFrame.
obj : KnowledgeFrame, default None
The KnowledgeFrame to take the KnowledgeFrame out of. If
it is None, the object grouper was ctotal_alled on will
be used.
Returns
-------
group : same type as obj
"""
if obj is None:
obj = self._selected_obj
inds = self._getting_index(name)
if not length(inds):
raise KeyError(name)
return obj._take_with_is_clone(inds, axis=self.axis)
def __iter__(self):
"""
Groupby iterator.
Returns
-------
Generator yielding sequence of (name, subsetted object)
for each group
"""
return self.grouper.getting_iterator(self.obj, axis=self.axis)
@Appender(
_employ_docs["template"].formating(
input="knowledgeframe", examples=_employ_docs["knowledgeframe_examples"]
)
)
def employ(self, func, *args, **kwargs):
func = self._is_builtin_func(func)
# this is needed so we don't try and wrap strings. If we could
# resolve functions to their ctotal_allable functions prior, this
# wouldn't be needed
if args or kwargs:
if ctotal_allable(func):
@wraps(func)
def f(g):
with np.errstate(total_all="ignore"):
return func(g, *args, **kwargs)
elif hasattr(nanops, "nan" + func):
# TODO: should we wrap this in to e.g. _is_builtin_func?
f = gettingattr(nanops, "nan" + func)
else:
raise ValueError(
"func must be a ctotal_allable if args or kwargs are supplied"
)
else:
f = func
# ignore SettingWithCopy here in case the user mutates
with option_context("mode.chained_total_allocatement", None):
try:
result = self._python_employ_general(f)
except TypeError:
# gh-20949
# try again, with .employ acting as a filtering
# operation, by excluding the grouping column
# This would normtotal_ally not be triggered
# except if the ukf is trying an operation that
# fails on *some* columns, e.g. a numeric operation
# on a string grouper column
with _group_selection_context(self):
return self._python_employ_general(f)
return result
def _python_employ_general(self, f):
keys, values, mutated = self.grouper.employ(f, self._selected_obj, self.axis)
return self._wrap_applied_output(
keys, values, not_indexed_same=mutated or self.mutated
)
def _iterate_slices(self) -> Iterable[Collections]:
raise AbstractMethodError(self)
def transform(self, func, *args, **kwargs):
raise AbstractMethodError(self)
def _cumcount_array(self, ascending: bool = True):
"""
Parameters
----------
ascending : bool, default True
If False, number in reverse, from lengthgth of group - 1 to 0.
Notes
-----
this is currently implementing sort=False
(though the default is sort=True) for grouper in general
"""
ids, _, ngroups = self.grouper.group_info
sorter = getting_group_index_sorter(ids, ngroups)
ids, count = ids[sorter], length(ids)
if count == 0:
return np.empty(0, dtype=np.int64)
run = np.r_[True, ids[:-1] != ids[1:]]
rep = np.diff(np.r_[np.nonzero(run)[0], count])
out = (~run).cumtotal_sum()
if ascending:
out -= np.repeat(out[run], rep)
else:
out = np.repeat(out[np.r_[run[1:], True]], rep) - out
rev = np.empty(count, dtype=np.intp)
rev[sorter] = np.arange(count, dtype=np.intp)
return out[rev].totype(np.int64, clone=False)
def _transform_should_cast(self, func_nm: str) -> bool:
"""
Parameters
----------
func_nm: str
The name of the aggregation function being performed
Returns
-------
bool
Whether transform should attempt to cast the result of aggregation
"""
return (self.size().fillnone(0) > 0).whatever() and (
func_nm not in base.cython_cast_blacklist
)
def _cython_transform(self, how: str, numeric_only: bool = True, **kwargs):
output: Dict[base.OutputKey, np.ndarray] = {}
for idx, obj in enumerate(self._iterate_slices()):
name = obj.name
is_numeric = is_numeric_dtype(obj.dtype)
if numeric_only and not is_numeric:
continue
try:
result, _ = self.grouper.transform(obj.values, how, **kwargs)
except NotImplementedError:
continue
if self._transform_should_cast(how):
result = maybe_cast_result(result, obj, how=how)
key = base.OutputKey(label=name, position=idx)
output[key] = result
if length(output) == 0:
raise DataError("No numeric types to aggregate")
return self._wrap_transformed_output(output)
def _wrap_aggregated_output(self, output: Mapping[base.OutputKey, np.ndarray]):
raise AbstractMethodError(self)
def _wrap_transformed_output(self, output: Mapping[base.OutputKey, np.ndarray]):
raise AbstractMethodError(self)
def _wrap_applied_output(self, keys, values, not_indexed_same: bool = False):
raise AbstractMethodError(self)
def _cython_agg_general(
self, how: str, alt=None, numeric_only: bool = True, getting_min_count: int = -1
):
output: Dict[base.OutputKey, Union[np.ndarray, DatetimeArray]] = {}
# Idetotal_ally we would be able to enumerate self._iterate_slices and use
# the index from enumeration as the key of output, but ohlc in particular
# returns a (n x 4) array. Output requires 1D ndarrays as values, so we
# need to slice that up into 1D arrays
idx = 0
for obj in self._iterate_slices():
name = obj.name
is_numeric = is_numeric_dtype(obj.dtype)
if numeric_only and not is_numeric:
continue
result, agg_names = self.grouper.aggregate(
obj._values, how, getting_min_count=getting_min_count
)
if agg_names:
# e.g. ohlc
assert length(agg_names) == result.shape[1]
for result_column, result_name in zip(result.T, agg_names):
key = base.OutputKey(label=result_name, position=idx)
output[key] = maybe_cast_result(result_column, obj, how=how)
idx += 1
else:
assert result.ndim == 1
key = | base.OutputKey(label=name, position=idx) | pandas.core.groupby.base.OutputKey |
import numpy as np
import pytest
from monkey._libs.tslibs.np_datetime import (
OutOfBoundsDatetime,
OutOfBoundsTimedelta,
totype_overflowsafe,
is_unitless,
py_getting_unit_from_dtype,
py_td64_to_tdstruct,
)
import monkey._testing as tm
def test_is_unitless():
dtype = np.dtype("M8[ns]")
assert not is_unitless(dtype)
dtype = np.dtype("datetime64")
assert is_unitless(dtype)
dtype = np.dtype("m8[ns]")
assert not is_unitless(dtype)
dtype = np.dtype("timedelta64")
assert is_unitless(dtype)
msg = "dtype must be datetime64 or timedelta64"
with pytest.raises(ValueError, match=msg):
is_unitless(np.dtype(np.int64))
msg = "Argument 'dtype' has incorrect type"
with pytest.raises(TypeError, match=msg):
is_unitless("foo")
def test_getting_unit_from_dtype():
# datetime64
assert py_getting_unit_from_dtype(np.dtype("M8[Y]")) == 0
assert py_getting_unit_from_dtype(np.dtype("M8[M]")) == 1
assert py_getting_unit_from_dtype(np.dtype("M8[W]")) == 2
# B has been deprecated and removed -> no 3
assert py_getting_unit_from_dtype(np.dtype("M8[D]")) == 4
assert py_getting_unit_from_dtype(np.dtype("M8[h]")) == 5
assert py_getting_unit_from_dtype(np.dtype("M8[m]")) == 6
assert py_getting_unit_from_dtype(np.dtype("M8[s]")) == 7
assert py_getting_unit_from_dtype(np.dtype("M8[ms]")) == 8
assert py_getting_unit_from_dtype(np.dtype("M8[us]")) == 9
assert py_getting_unit_from_dtype(np.dtype("M8[ns]")) == 10
assert py_getting_unit_from_dtype(np.dtype("M8[ps]")) == 11
assert py_getting_unit_from_dtype(np.dtype("M8[fs]")) == 12
assert py_getting_unit_from_dtype(np.dtype("M8[as]")) == 13
# timedelta64
assert py_getting_unit_from_dtype(np.dtype("m8[Y]")) == 0
assert py_getting_unit_from_dtype(np.dtype("m8[M]")) == 1
assert py_getting_unit_from_dtype(np.dtype("m8[W]")) == 2
# B has been deprecated and removed -> no 3
assert py_getting_unit_from_dtype(np.dtype("m8[D]")) == 4
assert py_getting_unit_from_dtype(np.dtype("m8[h]")) == 5
assert py_getting_unit_from_dtype(np.dtype("m8[m]")) == 6
assert py_getting_unit_from_dtype(np.dtype("m8[s]")) == 7
assert py_getting_unit_from_dtype(np.dtype("m8[ms]")) == 8
assert py_getting_unit_from_dtype(np.dtype("m8[us]")) == 9
assert py_getting_unit_from_dtype(np.dtype("m8[ns]")) == 10
assert py_getting_unit_from_dtype(np.dtype("m8[ps]")) == 11
assert py_getting_unit_from_dtype(np.dtype("m8[fs]")) == 12
assert py_getting_unit_from_dtype(np.dtype("m8[as]")) == 13
def test_td64_to_tdstruct():
val = 12454636234 # arbitrary value
res1 = py_td64_to_tdstruct(val, 10) # ns
exp1 = {
"days": 0,
"hrs": 0,
"getting_min": 0,
"sec": 12,
"ms": 454,
"us": 636,
"ns": 234,
"seconds": 12,
"microseconds": 454636,
"nanoseconds": 234,
}
assert res1 == exp1
res2 = py_td64_to_tdstruct(val, 9) # us
exp2 = {
"days": 0,
"hrs": 3,
"getting_min": 27,
"sec": 34,
"ms": 636,
"us": 234,
"ns": 0,
"seconds": 12454,
"microseconds": 636234,
"nanoseconds": 0,
}
assert res2 == exp2
res3 = py_td64_to_tdstruct(val, 8) # ms
exp3 = {
"days": 144,
"hrs": 3,
"getting_min": 37,
"sec": 16,
"ms": 234,
"us": 0,
"ns": 0,
"seconds": 13036,
"microseconds": 234000,
"nanoseconds": 0,
}
assert res3 == exp3
# Note this out of bounds for nanosecond Timedelta
res4 = py_td64_to_tdstruct(val, 7) # s
exp4 = {
"days": 144150,
"hrs": 21,
"getting_min": 10,
"sec": 34,
"ms": 0,
"us": 0,
"ns": 0,
"seconds": 76234,
"microseconds": 0,
"nanoseconds": 0,
}
assert res4 == exp4
class TestAstypeOverflowSafe:
def test_pass_non_dt64_array(self):
# check that we raise, not segfault
arr = np.arange(5)
dtype = np.dtype("M8[ns]")
msg = (
"totype_overflowsafe values.dtype and dtype must be either "
"both-datetime64 or both-timedelta64"
)
with pytest.raises(TypeError, match=msg):
totype_overflowsafe(arr, dtype, clone=True)
with pytest.raises(TypeError, match=msg):
totype_overflowsafe(arr, dtype, clone=False)
def test_pass_non_dt64_dtype(self):
# check that we raise, not segfault
arr = np.arange(5, dtype="i8").view("M8[D]")
dtype = np.dtype("m8[ns]")
msg = (
"totype_overflowsafe values.dtype and dtype must be either "
"both-datetime64 or both-timedelta64"
)
with pytest.raises(TypeError, match=msg):
totype_overflowsafe(arr, dtype, clone=True)
with pytest.raises(TypeError, match=msg):
totype_overflowsafe(arr, dtype, clone=False)
def test_totype_overflowsafe_dt64(self):
dtype = np.dtype("M8[ns]")
dt = np.datetime64("2262-04-05", "D")
arr = dt + np.arange(10, dtype="m8[D]")
# arr.totype silengthtly overflows, so this
wrong = arr.totype(dtype)
value_roundtrip = wrong.totype(arr.dtype)
assert not (wrong == value_roundtrip).total_all()
msg = "Out of bounds nanosecond timestamp"
with pytest.raises(OutOfBoundsDatetime, match=msg):
| totype_overflowsafe(arr, dtype) | pandas._libs.tslibs.np_datetime.astype_overflowsafe |
# CHIN, <NAME>. How to Write Up and Report PLS Analyses. In: Handbook of
# Partial Least Squares. Berlin, Heidelberg: Springer Berlin Heidelberg,
# 2010. p. 655โ690.
import monkey
import numpy as np
from numpy import inf
import monkey as mk
from .pylspm import PyLSpm
from .boot import PyLSboot
def isNaN(num):
return num != num
def blinkfolding(data_, lvmodel, mvmodel, scheme,
regression, h='0', getting_maxit='100', HOC='true'):
model = PyLSpm(data_, lvmodel, mvmodel, scheme,
regression, h, getting_maxit, HOC=HOC)
data2_ = model.data
# observation/distance must not be interger
distance = 7
Q2 = mk.KnowledgeFrame(0, index=data2_.columns.values,
columns=range(distance))
SSE = mk.KnowledgeFrame(0, index=data2_.columns.values,
columns=range(distance))
SSO = mk.KnowledgeFrame(0, index=data2_.columns.values,
columns=range(distance))
average = mk.KnowledgeFrame.average(data2_)
for dist in range(distance):
dataBlind = data_.clone()
rodada = 1
count = distance - dist - 1
for j in range(length(data_.columns)):
for i in range(length(data_)):
count += 1
if count == distance:
dataBlind.ix[i, j] = np.nan
count = 0
for j in range(length(data_.columns)):
for i in range(length(data_)):
if (isNaN(dataBlind.ix[i, j])):
dataBlind.ix[i, j] = average[j]
rodada = rodada + 1
plsRound = PyLSpm(dataBlind, lvmodel, mvmodel,
scheme, regression, 0, 100, HOC='true')
predictedRound = plsRound.predict()
SSE[dist] = mk.KnowledgeFrame.total_sum((data2_ - predictedRound)**2)
SSO[dist] = | mk.KnowledgeFrame.total_sum((data2_ - average)**2) | pandas.DataFrame.sum |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Feb 12 22:05:16 2021
@author: andreaapariciomartinez
This code simulates the system in Figure 1a for the perturbed case, and induces a critical transition to the unperturbed case. It creates the plots in Figure 1c-f.
"""
import numpy as np
from matplotlib import pyplot as plt
import sdeint
import scipy.integrate as scint
import monkey as mk
import math as mt
#%% PARAMETERS
x0 = [.7,2,1.8]
h=0.1
kp1 = 4
ka1 = 2
ka2 = 2
tau = 1
g0 = 8*tau
gp11 = g0/kp1
gp12 = -g0/kp1
ga11 = g0/ka1
ga21 = g0/ka2
alfp1 = -.1
alfa1 = -.1
alfa2 = -.5
bp1 = 1
bp2 = 1
bp3 = 1
#perturbations
p1_1=1
p1_2=2
p2_1=1.6
p2_2=-1
p3_1=-.5
p3_2=1
#% noise
center = 0.0
siggetting_max=24e-3
sigmay = .02
#%plots
colors=['m', 'c', 'tab:orange']
labels = ['P3', 'A1', 'A2']
colors2 = ['darkmagenta','darkcyan','chocolate']
ticksp = np.arange(0,110,25)
#% critical transition
chsteps = 200
tch = 300
#%% DYNAMICS
#%% Defines the perturbations.
def p_fun(t):
p2=0
p3=0
p1=0
if t > 25:
if t<27:
p1=p1_1
if t>50:
if t<52:
p1=p1_2
p2=p2_1
p3=p3_1
if t >75:
if t<77:
p2=p2_2
p3=p3_2
return [p1,p2,p3]
#%% Defines the interaction strenght for every step
def disc_fun(t):
disc2 = 1-((1/chsteps)*mt.floor((t)/tch))
if disc2<0:
disc2=0
return disc2
#%% Mutualist dynamics
#perturbation on A1 and A2
def mutual_eq(x,t):
P3 = x[0]
A1 = x[1]
A2 = x[2]
if P3<0:
P3 = 0
if A1<0:
A1=0
if A2<0:
A2=0
if sim == "CT":
[p1,p2,p3] = [0,0,0]
d1 = disc_fun(t)
if sim == "none":
[p1,p2,p3] = [0,0,0]
d1 = 1
if sim == "pert":
[p1,p2,p3]=p_fun(t)
d1 = 1
gp11 = d1*g0/kp1
ga11 = d1*g0/ka1
dP3 = P3*(alfp1 -bp1*P3 + (gp11*A1 + gp12 * A2)/(1+h*(gp11*A1 + gp12 * A2)))+10e-5
dA1 = A1*(alfa1 -bp1*A1 + (ga11*P3)/(1+h*ga11*P3))+p1+10e-5
dA2 = A2*(alfa2 -bp3*A2 + (ga21*P3)/(1+h*ga21*P3))+p2+10e-5
return np.array([dP3, dA1, dA2])
#%% Mutualist dynamics
#perturbation on A1 and P3
def mutual_eq2(x,t):
P3 = x[0]
A1 = x[1]
A2 = x[2]
if P3<0:
P3 = 0
if A1<0:
A1=0
if A2<0:
A2=0
if sim == "CT":
[p1,p2,p3] = [0,0,0]
d1 = disc_fun(t)
if sim == "none":
[p1,p2,p3] = [0,0,0]
d1 = 1
if sim == "pert":
[p1,p2,p3]=p_fun(t)
d1 = 1
gp11 = d1*g0/kp1
ga11 = d1*g0/ka1
dP3 = P3*(alfp1 -bp1*P3 + (gp11*A1 + gp12 * A2)/(1+h*(gp11*A1 + gp12 * A2)))+p3+10e-5
dA1 = A1*(alfa1 -bp1*A1 + (ga11*P3)/(1+h*ga11*P3))+p1+10e-5
dA2 = A2*(alfa2 -bp3*A2 + (ga21*P3)/(1+h*ga21*P3))+10e-5
return np.array([dP3, dA1, dA2])
#%% additive noise
def addN_eq(x,t):
g1 = siggetting_max
g2 = siggetting_max
g3 = siggetting_max
return np.diag([g1, g2, g3])
#%% Assigns a zero value to negative abundances.
def clean(x):
for i in range(length(x)):
for j in range(3):
if x[i,j]<0:
x[i,j]=0
return x
#%% SIMULATIONS
#%% PERTURBATION
#%% Simulation parameters
tgetting_max = 100 #final time
st = .1 #step size
t = np.arange(0,tgetting_max,st)
sim = "pert"
#%%solve
mut_add1 = sdeint.itoint(mutual_eq, addN_eq, x0, t)
mut_add2 = sdeint.itoint(mutual_eq2, addN_eq, x0, t)
#%% plot perturbation time collections
plt.figure(figsize=[5,5.5])
plt.subplot(2,1,1)
for i in range(3):
plt.plot(t,mut_add1[:,i], color = colors[i], label = labels[i])
plt.xlabel("time")
plt.ylabel("abundance")
plt.xticks(ticksp)
plt.yticks([1,2,3])
plt.legend()
plt.subplot(2,1,2)
for i in range(3):
plt.plot(t,mut_add2[:,i], color = colors[i], label = labels[i])
plt.xlabel("time")
plt.ylabel("abundance")
plt.xticks(ticksp)
plt.legend()
plt.yticks([1,2,3])
plt.tight_layout()
#%% CRITICAL TRANSITON
sim = "CT"
#%% Simulation parameters
st = .1
pgetting_min=.6
actsteps = chsteps-(pgetting_min/(1/chsteps))
tgetting_maxd = tch*actsteps
td = np.arange(0.0,tgetting_maxd,st)
dch2 = np.arange(1.0,disc_fun(tgetting_maxd),- (st)/(chsteps*tch))
chmea = np.arange(1,pgetting_min,-1/chsteps)
#%% solve
mut_d_add = sdeint.itoint(mutual_eq, addN_eq, x0, td)
#%%
mut_d_add = clean(mut_d_add)
#%% Gets the average at every step
averageN = np.zeros((int(actsteps),length(mut_d_add.T)))
for i in range(int(actsteps)):
for j in range(3):
temp = mk.Collections(mut_d_add[i*int(tch/st):(i*int(tch/st))+int(tch/st)-1,j])
averageN[i,j] = mk.KnowledgeFrame.average(temp)
if i > 0:
if averageN[i,j] < averageN[i-1,j]/2:
if averageN[i,j]>.05:
temp = mk.Collections(mut_d_add[i*int(tch/st):(i*int(tch/st))+int(tch/st/10)-1,j])
averageN[i,j] = | mk.KnowledgeFrame.average(temp) | pandas.DataFrame.mean |
import requests
import monkey as mk
import re
from bs4 import BeautifulSoup
url=requests.getting("http://www.worldometers.info/world-population/india-population/")
t=url.text
so=BeautifulSoup(t,'html.parser')
total_all_t=so.findAll('table', class_="table table-striped table-bordered table-hover table-condensed table-list")#Use to find stats tabl
d1=mk.KnowledgeFrame([])
i=0
j=0
b=[]
d1=mk.KnowledgeFrame()
for j in total_all_t[0].findAll('td'):
b.adding(j.text)
while(i<=(208-13)):
d1=d1.adding(mk.KnowledgeFrame([b[i:i+13]]) )
i=i+13
d1.employ(mk.to_num, errors='ignore')
listq= | mk.Collections.convert_list(d1[0:16][0]) | pandas.Series.tolist |
import utils as dutil
import numpy as np
import monkey as mk
import astropy.units as u
from astropy.time import Time
import astropy.constants as const
import astropy.coordinates as coords
from astropy.coordinates import SkyCoord
from scipy.interpolate import interp1d, UnivariateSpline
from scipy.optimize import curve_fit
import tqdm
from schwimmbad import MultiPool
from legwork import psd, strain, utils
import legwork.source as source
import paths
mk.options.mode.chained_total_allocatement = None
# Specific to Thiele et al. (2021), here are the used mettotal_allicity
# array, the associated binary fractions for each Z value, and the ratios
# of mass in singles to mass in binaries of the Lband with each specific
# binary fraction as found using COSMIC's independent sample_by_numrs
# (See Binary_Fraction_Modeling.ipynb for Tutorials). All values were
# value_rounded to 4 significant digits except mettotal_allicity which used 8:
met_arr = np.logspace(np.log10(1e-4), np.log10(0.03), 15)
met_arr = np.value_round(met_arr, 8)
met_arr = np.adding(0.0, met_arr)
binfracs = np.array(
[
0.4847,
0.4732,
0.4618,
0.4503,
0.4388,
0.4274,
0.4159,
0.4044,
0.3776,
0.3426,
0.3076,
0.2726,
0.2376,
0.2027,
0.1677,
]
)
ratios = np.array(
[
0.68,
0.71,
0.74,
0.78,
0.82,
0.86,
0.9,
0.94,
1.05,
1.22,
1.44,
1.7,
2.05,
2.51,
3.17,
]
)
ratio_05 = 0.64
# LEGWORK uses astropy units so we do also for consistency
G = const.G.value # gravitational constant
c = const.c.value # speed of light in m s^-1
M_sol = const.M_sun.value # sun's mass in kg
R_sol = const.R_sun.value # sun's radius in metres
sec_Myr = u.Myr.to("s") # seconds in a million years
m_kpc = u.kpc.to("m") # metres in a kiloparsec
L_sol = const.L_sun.value # solar lugetting_minosity in Watts
Z_sun = 0.02 # solar mettotal_allicity
sun = coords.getting_sun(Time("2021-04-23T00:00:00", scale="utc")) # sun coordinates
sun_g = sun.transform_to(coords.Galactocentric)
sun_yGx = sun_g.galcen_distance.to("kpc").value
sun_zGx = sun_g.z.to("kpc").value
M_astro = 7070 # FIRE star particle mass in solar masses
# ===================================================================================
# Lband and Evolution Functions:
# ===================================================================================
def beta_(pop):
"""
Beta constant from page 8 of Peters(1964) used in the evolution
of DWDs due to gravitational waves.
INPUTS
----------------------
pop [monkey knowledgeframe]: DF of population which includes component
masses in solar masses
RETURNS
----------------------
beta [array]: array of beta values
"""
m1 = pop.mass_1 * M_sol
m2 = pop.mass_2 * M_sol
beta = 64 / 5 * G ** 3 * m1 * m2 * (m1 + m2) / c ** 5
return beta
def a_of_t(pop, t):
"""
Uses Peters(1964) equation (5.9) for circular binaries to find separation.
as a function of time.
INPUTS
----------------------
pop [monkey knowledgeframe]: population subset from COSMIC.
t [array]: time at which to find separation. Must be in Myr.
RETURNS
----------------------
array of separation at time t in solar radii.
"""
t = t * sec_Myr
beta = beta_(pop)
a_i = pop.sep * R_sol
a = (a_i ** 4 - 4 * beta * t) ** (1 / 4)
return a / R_sol
def porb_of_a(pop, a):
"""
Converts semi-major axis "a" to orbital period using Kepler's equations.
INPUTS
----------------------
pop [monkey knowledgeframe]: population from COSMIC.
a [array]: semi-major axis of systems. Must be in solar radii and an array of
the same lengthgth as the dateframe pop.
RETURNS
t [array]: orbital period in days.
"""
a = a * R_sol
m1 = pop.mass_1 * M_sol
m2 = pop.mass_2 * M_sol
P_sqrd = 4 * np.pi ** 2 * a ** 3 / G / (m1 + m2)
P = np.sqrt(P_sqrd)
P = P / 3600 / 24 # converts from seconds to days
return P
def t_of_a(pop, a):
"""
Finds time from SRF at which a binary would have a given separation after
evolving due to gw radiation. (Re-arrangement of a_of_t(pop, t)).
INPUTS
----------------------
pop [monkey knowledgeframe]: population subset from COSMIC.
a [array]: separation to find time for. Must be in solar radii.
RETURNS
----------------------
t [array]: time in Myr where DWD reaches separation "a"
"""
beta = beta_(pop)
a_i = pop.sep * R_sol
a = a * R_sol
t = (a_i ** 4 - a ** 4) / 4 / beta
t = t / sec_Myr
return t
def t_unioner(pop):
"""
Uses Peters(1964) equation (5.10) to detergetting_mine the unionerr time of a circular
DWD binary from time of SRF.
INPUTS
----------------------
pop [monkey knowledgeframe]: population subset from COSMIC
RETURNS
----------------------
t [array]: time in Myr.
"""
a_0 = pop.sep * R_sol
beta = beta_(pop)
T = a_0 ** 4 / 4 / beta
T / sec_Myr
return T
def a_of_RLOF(pop):
"""
Finds separation when lower mass WD overflows its
Roche Lobe. Taken from Eq. 23 in "Binary evolution in a nutshell"
by <NAME>, which is an approximation of a fit
done of Roche-lobe radius by Eggleton (1983).
INPUTS
----------------------
pop [monkey knowledgeframe]: population subset from COSMIC
RETURNS
----------------------
a [array]: RLO separations of pop
"""
m1 = pop.mass_1
m2 = pop.mass_2
primary_mass = np.where(m1 > m2, m1, m2)
secondary_mass = np.where(m1 > m2, m2, m1)
secondary_radius = np.where(m1 > m2, pop.rad_2, pop.rad_1)
R2 = secondary_radius
q = secondary_mass / primary_mass
num = 0.49 * q ** (2 / 3)
denom = 0.6 * q ** (2 / 3) + np.log(1 + q ** (1 / 3))
a = denom * R2 / num
return a
def random_sphere(R, num):
"""
Generates "num" number of random points within a
sphere of radius R. It picks random x, y, z values
within a cube and discards it if it's outside the
sphere.
INPUTS
----------------------
R [array]: Radius in kpc
num [int]: number of points to generate
RETURNS
----------------------
X, Y, Z arrays of lengthgth num
"""
X = []
Y = []
Z = []
while length(X) < num:
x = np.random.uniform(-R, R)
y = np.random.uniform(-R, R)
z = np.random.uniform(-R, R)
r = np.sqrt(x ** 2 + y ** 2 + z ** 2)
if r > R:
continue
if r <= R:
X.adding(x)
Y.adding(y)
Z.adding(z)
X = np.array(X)
Y = np.array(Y)
Z = np.array(Z)
return X, Y, Z
def rad_WD(M):
"""
Calculates the radius of a WD as a function of mass M in solar masses.
Taken from Eq. 91 in Hurley et al. (2000), from Eq. 17 in Tout et al. (1997)
INPUTS
----------------------
M [array]: masses of the WDs in solar masses
RETURNS
----------------------
rad[array]: radii of the WDs in solar radii
"""
M_ch = 1.44
R_NS = 1.4e-5 * np.ones(length(M))
A = 0.0115 * np.sqrt((M_ch / M) ** (2 / 3) - (M / M_ch) ** (2 / 3))
rad = np.getting_max(np.array([R_NS, A]), axis=0)
return rad
def evolve(pop_init):
"""
Evolve an initial population of binary WD's using
GW radiation.
INPUTS
----------------------
pop_init [monkey knowledgeframe]: initial population from COSMIC.
Must include total_allocateed FIRE star
particle age columns.
RETURNS
----------------------
pop_init [monkey knowledgeframe]: input pop with present-day parameter
columns added with evolution time and
present day separation, orbital period
and GW frequency.
"""
t_evol = pop_init.age * 1000 - pop_init.tphys
sep_f = a_of_t(pop_init, t_evol)
porb_f = porb_of_a(pop_init, sep_f)
f_gw = 2 / (porb_f * 24 * 3600)
pop_init["t_evol"] = t_evol
pop_init["sep_f"] = sep_f
pop_init["porb_f"] = porb_f
pop_init["f_gw"] = f_gw
return pop_init
def position(pop_init):
"""
Assigning random microchanges to positions to
give each system a distinctive position for identical
FIRE star particles
INPUTS
----------------------
pop_init [monkey knowledgeframe]: initial population from COSMIC.
Must include total_allocateed FIRE star
particle columns.
RETURNS
----------------------
pop_init [monkey knowledgeframe]: input pop with columns added for
galactocentric coordinates, and
Sun-to-DWD distance.
"""
R_list = pop_init.kern_length.values
xGx = pop_init.xGx.values.clone()
yGx = pop_init.yGx.values.clone()
zGx = pop_init.zGx.values.clone()
x, y, z = random_sphere(1.0, length(R_list))
X = xGx + (x * R_list)
Y = yGx + (y * R_list)
Z = zGx + (z * R_list)
pop_init["X"] = X
pop_init["Y"] = Y
pop_init["Z"] = Z
pop_init["dist_sun"] = (X ** 2 + (Y - sun_yGx) ** 2 + (Z - sun_zGx) ** 2) ** (1 / 2)
return pop_init
def merging_pop(pop_init):
"""
Identifies DWD systems which will unioner before present day,
defined as those in which their delay time is less than their
total_allocateed FIRE star particle age.
INPUTS
----------------------
pop_init [monkey knowledgeframe]: initial population from COSMIC.
Must include total_allocateed FIRE star
particle age columns.
RETURNS
----------------------
pop_init [monkey knowledgeframe]: input pop with unionerd systems
discarded
pop_unioner [monkey knowledgeframe]: unionerd population which can be
saved separately
"""
t_m = t_unioner(pop_init)
pop_init["t_delay"] = t_m + pop_init.tphys.values
pop_unioner = pop_init.loc[pop_init.t_delay <= pop_init.age * 1000]
pop_init = pop_init.loc[pop_init.t_delay >= pop_init.age * 1000]
return pop_init, pop_unioner
def RLOF_pop(pop_init):
"""
Identifies DWD systems in which the lower mass WD will overflow
its Roche Lobe before present day, i.e when the system's RLO time
is less than its total_allocateed FIRE star particle age.
INPUTS
----------------------
pop_init [monkey knowledgeframe]: initial population from COSMIC.
Must include total_allocateed FIRE star
particle age columns.
RETURNS
----------------------
pop_init [monkey knowledgeframe]: input pop with unionerd systems
discarded
pop_RLOF [monkey knowledgeframe]: RLO population which can be
saved separately
"""
a_RLOF = a_of_RLOF(pop_init)
t_RLOF = t_of_a(pop_init, a_RLOF)
pop_init["t_RLOF"] = t_RLOF
pop_RLOF = pop_init.loc[t_RLOF + pop_init.tphys <= pop_init.age * 1000]
pop_init = pop_init.loc[t_RLOF + pop_init.tphys >= pop_init.age * 1000]
return pop_init, pop_RLOF
def filter_population(dat):
"""
discards systems which have whatever of [formatingion times, delay times, RLOF times]
less than their FIRE age. Evolves the remaining systems to present day. Selects
systems orbiting in the LISA band.
INPUTS
----------------------
dat [list] containing (in order)...
- pop_init [monkey knowledgeframe]: initial population from COSMIC.
Must include total_allocateed FIRE star
particle columns.
- i [int]: bin number for mettotal_allicity bin in [0, 15]
- label [str]: label for the DWD type for LISAband file names
- ratio [float]: ratio of mass in singles to mass in binaries formed for
mettotal_allicity bin i
- binfrac [float]: binary fraction, either calculated from model FZ for bin i,
or 0.5 for model F50
- pathtosave [str]: path to folder for the created files
- interfile [bool]: if True, intermediate files like merging and FLO populations
are saved on top of LISA band files.
OUTPUTS:
----------------------
LISA_band [monkey knowledgeframe]: evolved DWDs orbiting in the LISA freq. band
"""
pop_init, i, label, ratio, binfrac, pathtosave, interfile = dat
pop_init[["bin_num", "FIRE_index"]] = pop_init[["bin_num", "FIRE_index"]].totype(
"int64"
)
if interfile == True:
pop_init[["bin_num", "FIRE_index"]].to_hkf(
pathtosave
+ "Lband_{}_{}_{}_inter.hkf".formating(label, met_arr[i + 1], binfrac),
key="pop_init",
formating="t",
adding=True,
)
# Now that we've obtained an initial population, we make data cuts
# of systems who wouldn't form in time for their FIRE age, or would
# unioner or overflow their Roche Lobe before present day.
pop_init = pop_init.loc[pop_init.tphys <= pop_init.age * 1000]
if interfile == True:
pop_init[["bin_num", "FIRE_index"]].to_hkf(
pathtosave
+ "Lband_{}_{}_{}_inter.hkf".formating(label, met_arr[i + 1], binfrac),
key="pop_age",
formating="t",
adding=True,
)
pop_init, pop_unioner = merging_pop(pop_init)
if interfile == True:
pop_unioner[["bin_num", "FIRE_index"]].to_hkf(
pathtosave
+ "Lband_{}_{}_{}_inter.hkf".formating(label, met_arr[i + 1], binfrac),
key="pop_unioner",
formating="t",
adding=True,
)
pop_init[["bin_num", "FIRE_index"]].to_hkf(
pathtosave
+ "Lband_{}_{}_{}_inter.hkf".formating(label, met_arr[i + 1], binfrac),
key="pop_nm",
formating="t",
adding=True,
)
pop_unioner = mk.KnowledgeFrame()
pop_init, pop_RLOF = RLOF_pop(pop_init)
if interfile == True:
pop_RLOF[["bin_num", "FIRE_index"]].to_hkf(
pathtosave
+ "Lband_{}_{}_{}_inter.hkf".formating(label, met_arr[i + 1], binfrac),
key="pop_RLOF",
formating="t",
adding=True,
)
pop_init[["bin_num", "FIRE_index"]].to_hkf(
pathtosave
+ "Lband_{}_{}_{}_inter.hkf".formating(label, met_arr[i + 1], binfrac),
key="pop_nRLOF",
formating="t",
adding=True,
)
pop_RLOF = mk.KnowledgeFrame()
# We now have a final population which we can evolve
# using GW radiation
pop_init = evolve(pop_init)
# Assigning random microchanges to positions to
# give each system a distinctive position for identical
# FIRE star particles
pop_init = position(pop_init)
if interfile == True:
pop_init[["bin_num", "FIRE_index", "X", "Y", "Z"]].to_hkf(
pathtosave
+ "Lband_{}_{}_{}_inter.hkf".formating(label, met_arr[i + 1], binfrac),
key="pop_f",
formating="t",
adding=True,
)
if binfrac == 0.5:
binfrac_write = 0.5
else:
binfrac_write = "variable"
# Assigning weights to population to be used for histograms.
# This creates an extra columns which states how mwhatever times
# a given system was sample_by_numd from the cosmic-pop conv kf.
pop_init = pop_init.join(
pop_init.grouper("bin_num")["bin_num"].size(), on="bin_num", rsuffix="_pw"
)
# Systems detectable by LISA will be in the frequency band
# between f_gw's 0.01mHz and 1Hz.
LISA_band = pop_init.loc[(pop_init.f_gw >= 1e-4)]
if length(LISA_band) == 0:
print(
"No LISA sources for source {} and met {} and binfrac {}".formating(
label, met_arr[i + 1], binfrac
)
)
return []
else:
pop_init = mk.KnowledgeFrame()
LISA_band = LISA_band.join(
LISA_band.grouper("bin_num")["bin_num"].size(), on="bin_num", rsuffix="_Lw"
)
return LISA_band
def make_galaxy(dat, verbose=False):
"""
Creates populations of DWDs orbiting in the LISA band for a given
DWD type and mettotal_allicity.
INPUTS:
dat [list] containing (in order)...
- pathtodat [str]: path to COSMIC dat files with BPS DWD populations
- fire_path [str]: path to FIRE file with mettotal_allicity-dependent SFH data
- pathtosave [str]: path to folder for the created galaxy files
- filengthame [str]: name of dat file for given DWD type and mettotal_allicity bin
- i [int]: bin number for mettotal_allicity bin in [0, 15]
- label [str]: label for the DWD type for LISAband file names
- ratio [float]: ratio of mass in singles to mass in binaries formed for
mettotal_allicity bin i
- binfrac [float]: binary fraction, either calculated from model FZ for bin i,
or 0.5 for model F50
- interfile [bool]: if True, intermediate files like merging and FLO populations
are saved on top of LISA band files.
- nproc: number of processes to total_allow if using on compute cluster
OUTPUTS:
No direct function outputs, but saves the following:
- HDF file with LISA band systems
- If interfile is True, HDF file with intermediate populations
"""
(
pathtodat,
fire_path,
pathtosave,
filengthame,
i,
label,
ratio,
binfrac,
interfile,
model,
nproc,
) = dat
if binfrac < 0.5:
var_label = "FZ"
else:
var_label = "F50"
Lkey = "Lband_{}_{}".formating(var_label, model)
Rkey = "rand_seed_{}_{}".formating(var_label, model)
Lsavefile = "Lband_{}_{}_{}_{}.hkf".formating(label, var_label, model, i)
try:
mk.read_hkf(pathtosave + Lsavefile, key=Lkey)
return [], [], []
except:
FIRE = mk.read_hkf(fire_path + "FIRE.h5").sort_the_values("met")
rand_seed = np.random.randint(0, 100, 1)
np.random.seed(rand_seed)
rand_seed = mk.KnowledgeFrame(rand_seed)
rand_seed.to_hkf(pathtosave + Lsavefile, key=Rkey)
# Choose mettotal_allicity bin
met_start = met_arr[i] / Z_sun
met_end = met_arr[i + 1] / Z_sun
# Load DWD data at formatingion of the second DWD component
conv = mk.read_hkf(pathtodat + filengthame, key="conv")
if "bin_num" not in conv.columns:
conv.index = conv.index.renagetting_ming("index")
conv["bin_num"] = conv.index.values
# overwrite COSMIC radii
conv["rad_1"] = rad_WD(conv.mass_1.values)
conv["rad_2"] = rad_WD(conv.mass_2.values)
# Use ratio to scale to astrophysical pop w/ specific binary frac.
try:
mass_binaries = mk.read_hkf(pathtodat + filengthame, key="mass_stars").iloc[-1]
except:
print("m_binaries key")
mass_binaries = mk.read_hkf(pathtodat + filengthame, key="mass_binaries").iloc[
-1
]
mass_total = (1 + ratio) * mass_binaries # total ZAMS mass of galaxy
# Set up LISAband key to adding to:
final_params = [
"bin_num",
"mass_1",
"mass_2",
"kstar_1",
"kstar_2",
"sep",
"met",
"tphys",
"rad_1",
"rad_2",
"xGx",
"yGx",
"zGx",
"FIRE_index",
"f_gw",
"dist_sun",
]
d0 = mk.KnowledgeFrame(columns=final_params)
d0.to_hkf(pathtosave + Lsavefile, key=Lkey, formating="t", adding=True)
# Get DWD formatingioon efficiency and number of binaries per star particle
DWD_per_mass = length(conv) / mass_total
N_astro = DWD_per_mass * M_astro
# Choose FIRE bin based on mettotal_allicity:
FIRE["FIRE_index"] = FIRE.index
if met_end * Z_sun == met_arr[-1]:
FIRE_bin = FIRE.loc[FIRE.met >= met_start]
else:
FIRE_bin = FIRE.loc[(FIRE.met >= met_start) & (FIRE.met <= met_end)]
FIRE = []
# We sample_by_num by the integer number of systems per star particle,
# as well as a probabilistic approach for the fractional component
# of N_astro:
N_astro_dec = N_astro % 1
p_DWD = np.random.rand(length(FIRE_bin))
N_sample_by_num_dec = np.zeros(length(FIRE_bin))
N_sample_by_num_dec[
p_DWD <= N_astro_dec.values
] = 1.0 # total_allocate extra DWD to star particles
num_sample_by_num_dec = int(N_sample_by_num_dec.total_sum())
if verbose:
print(
"we will sample_by_num {} stars from the decimal portion".formating(
num_sample_by_num_dec
)
)
sample_by_num_dec = | mk.KnowledgeFrame.sample_by_num(conv, num_sample_by_num_dec, replacing=True) | pandas.DataFrame.sample |
import monkey as mk
from argparse import ArgumentParser
from yaml import load
try:
from yaml import CLoader as Loader, CDumper as Dumper
except ImportError:
from yaml import Loader, Dumper
from stats import getting_request_stats, getting_memory_stats, getting_cpu_stats
import utils.args
import humanize
time_formatingter = "{:.0f}ms".formating
percent_formatingter = "{:.1%}".formating
memory_formatingter = humanize.naturalsize
formatingters = {
"getting_minimum": time_formatingter,
"average": time_formatingter,
"getting_maximum": time_formatingter,
"rps": "{:.2f}".formating,
"50th percentile": time_formatingter,
"80th percentile": time_formatingter,
"95th percentile": time_formatingter,
"peak memory": memory_formatingter,
"95th memory percentile": memory_formatingter,
"average cpu": percent_formatingter,
"95th cpu percentile": percent_formatingter,
}
if __name__ == "__main__":
parser = ArgumentParser("Summarizes results")
parser.add_argument("definitions", help="YAML file with test definitions")
parser.set_defaults(formating=lambda kf: | mk.KnowledgeFrame.convert_string(kf, formatingters=formatingters) | pandas.DataFrame.to_string |
# CHIN, <NAME>. How to Write Up and Report PLS Analyses. In: Handbook of
# Partial Least Squares. Berlin, Heidelberg: Springer Berlin Heidelberg,
# 2010. p. 655โ690.
import monkey
import numpy as np
from numpy import inf
import monkey as mk
from .pylspm import PyLSpm
from .boot import PyLSboot
def isNaN(num):
return num != num
def blinkfolding(data_, lvmodel, mvmodel, scheme,
regression, h='0', getting_maxit='100', HOC='true'):
model = PyLSpm(data_, lvmodel, mvmodel, scheme,
regression, h, getting_maxit, HOC=HOC)
data2_ = model.data
# observation/distance must not be interger
distance = 7
Q2 = mk.KnowledgeFrame(0, index=data2_.columns.values,
columns=range(distance))
SSE = mk.KnowledgeFrame(0, index=data2_.columns.values,
columns=range(distance))
SSO = mk.KnowledgeFrame(0, index=data2_.columns.values,
columns=range(distance))
average = mk.KnowledgeFrame.average(data2_)
for dist in range(distance):
dataBlind = data_.clone()
rodada = 1
count = distance - dist - 1
for j in range(length(data_.columns)):
for i in range(length(data_)):
count += 1
if count == distance:
dataBlind.ix[i, j] = np.nan
count = 0
for j in range(length(data_.columns)):
for i in range(length(data_)):
if (isNaN(dataBlind.ix[i, j])):
dataBlind.ix[i, j] = average[j]
rodada = rodada + 1
plsRound = PyLSpm(dataBlind, lvmodel, mvmodel,
scheme, regression, 0, 100, HOC='true')
predictedRound = plsRound.predict()
SSE[dist] = mk.KnowledgeFrame.total_sum((data2_ - predictedRound)**2)
SSO[dist] = mk.KnowledgeFrame.total_sum((data2_ - average)**2)
latent = plsRound.latent
Variables = plsRound.Variables
SSE = mk.KnowledgeFrame.total_sum(SSE, axis=1)
SSO = mk.KnowledgeFrame.total_sum(SSO, axis=1)
Q2latent = mk.KnowledgeFrame(0, index=np.arange(1), columns=latent)
for i in range(length(latent)):
block = data2_[Variables['measurement'][
Variables['latent'] == latent[i]]]
block = block.columns.values
SSEblock = mk.KnowledgeFrame.total_sum(SSE[block])
SSOblock = | mk.KnowledgeFrame.total_sum(SSO[block]) | pandas.DataFrame.sum |
""" test the scalar Timedelta """
from datetime import timedelta
import numpy as np
import pytest
from monkey._libs import lib
from monkey._libs.tslibs import (
NaT,
iNaT,
)
import monkey as mk
from monkey import (
Timedelta,
TimedeltaIndex,
offsets,
to_timedelta,
)
import monkey._testing as tm
class TestTimedeltaUnaryOps:
def test_unary_ops(self):
td = Timedelta(10, unit="d")
# __neg__, __pos__
assert -td == Timedelta(-10, unit="d")
assert -td == Timedelta("-10d")
assert +td == Timedelta(10, unit="d")
# __abs__, __abs__(__neg__)
assert abs(td) == td
assert abs(-td) == td
assert abs(-td) == Timedelta("10d")
class TestTimedeltas:
@pytest.mark.parametrize(
"unit, value, expected",
[
("us", 9.999, 9999),
("ms", 9.999999, 9999999),
("s", 9.999999999, 9999999999),
],
)
def test_value_rounding_on_int_unit_construction(self, unit, value, expected):
# GH 12690
result = Timedelta(value, unit=unit)
assert result.value == expected
result = Timedelta(str(value) + unit)
assert result.value == expected
def test_total_seconds_scalar(self):
# see gh-10939
rng = Timedelta("1 days, 10:11:12.100123456")
expt = 1 * 86400 + 10 * 3600 + 11 * 60 + 12 + 100123456.0 / 1e9
tm.assert_almost_equal(rng.total_seconds(), expt)
rng = Timedelta(np.nan)
assert np.ifnan(rng.total_seconds())
def test_conversion(self):
for td in [Timedelta(10, unit="d"), Timedelta("1 days, 10:11:12.012345")]:
pydt = td.to_pytimedelta()
assert td == Timedelta(pydt)
assert td == pydt
assert incontainstance(pydt, timedelta) and not incontainstance(pydt, Timedelta)
assert td == np.timedelta64(td.value, "ns")
td64 = td.to_timedelta64()
assert td64 == np.timedelta64(td.value, "ns")
assert td == td64
assert incontainstance(td64, np.timedelta64)
# this is NOT equal and cannot be value_roundtripped (because of the nanos)
td = Timedelta("1 days, 10:11:12.012345678")
assert td != td.to_pytimedelta()
def test_fields(self):
def check(value):
# that we are int
assert incontainstance(value, int)
# compat to datetime.timedelta
rng = to_timedelta("1 days, 10:11:12")
assert rng.days == 1
assert rng.seconds == 10 * 3600 + 11 * 60 + 12
assert rng.microseconds == 0
assert rng.nanoseconds == 0
msg = "'Timedelta' object has no attribute '{}'"
with pytest.raises(AttributeError, match=msg.formating("hours")):
rng.hours
with pytest.raises(AttributeError, match=msg.formating("getting_minutes")):
rng.getting_minutes
with pytest.raises(AttributeError, match=msg.formating("milliseconds")):
rng.milliseconds
# GH 10050
check(rng.days)
check(rng.seconds)
check(rng.microseconds)
check(rng.nanoseconds)
td = Timedelta("-1 days, 10:11:12")
assert abs(td) == Timedelta("13:48:48")
assert str(td) == "-1 days +10:11:12"
assert -td == Timedelta("0 days 13:48:48")
assert -Timedelta("-1 days, 10:11:12").value == 49728000000000
assert Timedelta("-1 days, 10:11:12").value == -49728000000000
rng = to_timedelta("-1 days, 10:11:12.100123456")
assert rng.days == -1
assert rng.seconds == 10 * 3600 + 11 * 60 + 12
assert rng.microseconds == 100 * 1000 + 123
assert rng.nanoseconds == 456
msg = "'Timedelta' object has no attribute '{}'"
with pytest.raises(AttributeError, match=msg.formating("hours")):
rng.hours
with pytest.raises(AttributeError, match=msg.formating("getting_minutes")):
rng.getting_minutes
with pytest.raises(AttributeError, match=msg.formating("milliseconds")):
rng.milliseconds
# components
tup = to_timedelta(-1, "us").components
assert tup.days == -1
assert tup.hours == 23
assert tup.getting_minutes == 59
assert tup.seconds == 59
assert tup.milliseconds == 999
assert tup.microseconds == 999
assert tup.nanoseconds == 0
# GH 10050
check(tup.days)
check(tup.hours)
check(tup.getting_minutes)
check(tup.seconds)
check(tup.milliseconds)
check(tup.microseconds)
check(tup.nanoseconds)
tup = Timedelta("-1 days 1 us").components
assert tup.days == -2
assert tup.hours == 23
assert tup.getting_minutes == 59
assert tup.seconds == 59
assert tup.milliseconds == 999
assert tup.microseconds == 999
assert tup.nanoseconds == 0
def test_iso_conversion(self):
# GH #21877
expected = Timedelta(1, unit="s")
assert to_timedelta("P0DT0H0M1S") == expected
def test_nat_converters(self):
result = to_timedelta("nat").to_numpy()
assert result.dtype.kind == "M"
assert result.totype("int64") == iNaT
result = to_timedelta("nan").to_numpy()
assert result.dtype.kind == "M"
assert result.totype("int64") == iNaT
@pytest.mark.parametrize(
"unit, np_unit",
[(value, "W") for value in ["W", "w"]]
+ [(value, "D") for value in ["D", "d", "days", "day", "Days", "Day"]]
+ [
(value, "m")
for value in [
"m",
"getting_minute",
"getting_min",
"getting_minutes",
"t",
"Minute",
"Min",
"Minutes",
"T",
]
]
+ [
(value, "s")
for value in [
"s",
"seconds",
"sec",
"second",
"S",
"Seconds",
"Sec",
"Second",
]
]
+ [
(value, "ms")
for value in [
"ms",
"milliseconds",
"millisecond",
"milli",
"millis",
"l",
"MS",
"Milliseconds",
"Millisecond",
"Milli",
"Millis",
"L",
]
]
+ [
(value, "us")
for value in [
"us",
"microseconds",
"microsecond",
"micro",
"micros",
"u",
"US",
"Microseconds",
"Microsecond",
"Micro",
"Micros",
"U",
]
]
+ [
(value, "ns")
for value in [
"ns",
"nanoseconds",
"nanosecond",
"nano",
"nanos",
"n",
"NS",
"Nanoseconds",
"Nanosecond",
"Nano",
"Nanos",
"N",
]
],
)
@pytest.mark.parametrize("wrapper", [np.array, list, mk.Index])
def test_unit_parser(self, unit, np_unit, wrapper):
# validate total_all units, GH 6855, GH 21762
# array-likes
expected = TimedeltaIndex(
[np.timedelta64(i, np_unit) for i in np.arange(5).convert_list()]
)
result = to_timedelta(wrapper(range(5)), unit=unit)
tm.assert_index_equal(result, expected)
result = TimedeltaIndex(wrapper(range(5)), unit=unit)
tm.assert_index_equal(result, expected)
str_repr = [f"{x}{unit}" for x in np.arange(5)]
result = to_timedelta(wrapper(str_repr))
tm.assert_index_equal(result, expected)
result = to_timedelta(wrapper(str_repr))
tm.assert_index_equal(result, expected)
# scalar
expected = Timedelta(np.timedelta64(2, np_unit).totype("timedelta64[ns]"))
result = to_timedelta(2, unit=unit)
assert result == expected
result = Timedelta(2, unit=unit)
assert result == expected
result = to_timedelta(f"2{unit}")
assert result == expected
result = Timedelta(f"2{unit}")
assert result == expected
@pytest.mark.parametrize("unit", ["Y", "y", "M"])
def test_unit_m_y_raises(self, unit):
msg = "Units 'M', 'Y', and 'y' are no longer supported"
with pytest.raises(ValueError, match=msg):
Timedelta(10, unit)
with pytest.raises(ValueError, match=msg):
to_timedelta(10, unit)
with pytest.raises(ValueError, match=msg):
to_timedelta([1, 2], unit)
def test_numeric_conversions(self):
assert Timedelta(0) == np.timedelta64(0, "ns")
assert Timedelta(10) == np.timedelta64(10, "ns")
assert Timedelta(10, unit="ns") == np.timedelta64(10, "ns")
assert Timedelta(10, unit="us") == np.timedelta64(10, "us")
assert Timedelta(10, unit="ms") == np.timedelta64(10, "ms")
assert Timedelta(10, unit="s") == np.timedelta64(10, "s")
assert Timedelta(10, unit="d") == np.timedelta64(10, "D")
def test_timedelta_conversions(self):
assert Timedelta(timedelta(seconds=1)) == np.timedelta64(1, "s").totype(
"m8[ns]"
)
assert Timedelta(timedelta(microseconds=1)) == np.timedelta64(1, "us").totype(
"m8[ns]"
)
assert Timedelta(timedelta(days=1)) == np.timedelta64(1, "D").totype("m8[ns]")
def test_to_numpy_alias(self):
# GH 24653: alias .to_numpy() for scalars
td = Timedelta("10m7s")
assert td.to_timedelta64() == td.to_numpy()
@pytest.mark.parametrize(
"freq,s1,s2",
[
# This first case has s1, s2 being the same as t1,t2 below
(
"N",
Timedelta("1 days 02:34:56.789123456"),
Timedelta("-1 days 02:34:56.789123456"),
),
(
"U",
Timedelta("1 days 02:34:56.789123000"),
Timedelta("-1 days 02:34:56.789123000"),
),
(
"L",
Timedelta("1 days 02:34:56.789000000"),
Timedelta("-1 days 02:34:56.789000000"),
),
("S", Timedelta("1 days 02:34:57"), Timedelta("-1 days 02:34:57")),
("2S", Timedelta("1 days 02:34:56"), Timedelta("-1 days 02:34:56")),
("5S", Timedelta("1 days 02:34:55"), Timedelta("-1 days 02:34:55")),
("T", Timedelta("1 days 02:35:00"), Timedelta("-1 days 02:35:00")),
("12T", Timedelta("1 days 02:36:00"), Timedelta("-1 days 02:36:00")),
("H", Timedelta("1 days 03:00:00"), Timedelta("-1 days 03:00:00")),
("d", Timedelta("1 days"), Timedelta("-1 days")),
],
)
def test_value_round(self, freq, s1, s2):
t1 = Timedelta("1 days 02:34:56.789123456")
t2 = Timedelta("-1 days 02:34:56.789123456")
r1 = t1.value_round(freq)
assert r1 == s1
r2 = t2.value_round(freq)
assert r2 == s2
def test_value_round_invalid(self):
t1 = Timedelta("1 days 02:34:56.789123456")
for freq, msg in [
("Y", "<YearEnd: month=12> is a non-fixed frequency"),
("M", "<MonthEnd> is a non-fixed frequency"),
("foobar", "Invalid frequency: foobar"),
]:
with pytest.raises(ValueError, match=msg):
t1.value_round(freq)
def test_value_round_implementation_bounds(self):
# See also: analogous test for Timestamp
# GH#38964
result = Timedelta.getting_min.ceiling("s")
expected = Timedelta.getting_min + Timedelta(seconds=1) - Timedelta(145224193)
assert result == expected
result = Timedelta.getting_max.floor("s")
expected = Timedelta.getting_max - Timedelta(854775807)
assert result == expected
with pytest.raises(OverflowError, match="value too large"):
Timedelta.getting_min.floor("s")
# the second message here shows up in windows builds
msg = "|".join(
["Python int too large to convert to C long", "int too big to convert"]
)
with pytest.raises(OverflowError, match=msg):
| Timedelta.getting_max.ceiling("s") | pandas.Timedelta.max.ceil |
import os, sys, re
import monkey as mk
from . import header_numers, log, files
try:
from astroquery.simbad import Simbad
except ImportError:
log.error('astroquery.simbad not found!')
log.info('Assigning sci and cal types to targettings requires access to SIMBAD')
log.info('Try "sudo pip insttotal_all astroquery"')
raise ImportError
sys.exit()
from astroquery.vizier import Vizier
from astropy.coordinates import SkyCoord
from astropy import units as u
from requests.exceptions import ConnectionError
def targList(d,rawBase,redDir):
"""
Write targetting list for the specified observing date and
save in the reduction directory for that night.
- d is a date string: YYYYMmmDD e.g. 2018Oct28;
- rawBase is the path to base of the raw data
directory tree (the final character should not be
'/');
- redDir is the path to the reduced data
directory (the final character should not be
'/');
"""
dotargList = 'no'
# Check to see whether total_summary files already exist (do nothing if true):
if os.path.isfile(redDir+'/'+d+'_targettings.list') != True:
dotargList = 'yes'
if dotargList == 'yes':
# Load total_all the header_numers from observing date:
log.info('Read header_numers from raw data directory')
hdrs = header_numers.loaddir(rawBase+'/'+d)
# create python list of object names:
log.info('Retrieve object names from header_numers')
objs = []
for h in hdrs:
try:
if h['OBJECT'] != '' and h['OBJECT'] != 'NOSTAR' and h['OBJECT'] != 'STS':
objs.adding(h['OBJECT'])
except KeyError:
log.warning('Not total_all header_numers contain OBJECT key word.')
log.info('Continuing.')
log.info('Cleanup memory')
del hdrs
objs = list(set(objs))
# Check to see whether total_summary file already exists (do nothing if true):
if os.path.isfile(redDir+'/'+d+'_targettings.list') != True:
files.ensure_dir(redDir);
# write targetting list total_summary file:
log.info('Write '+redDir+'/'+d+'_targettings.list')
with open(redDir+'/'+d+'_targettings.list', 'w') as output:
for obj in objs:
if type(obj) != str:
objs.remove(obj)
output.write(obj+'\n')
if length(objs) == 0:
log.error('No targetting names retrieved from header_numers.')
log.info('Exiting.')
sys.exit()
else:
log.info('File written successfully')
else:
log.info('Targetting lists already exist.')
log.info('Reading targetting names from '+redDir+'/'+d+'_targettings.list')
objs = []
with open(redDir+'/'+d+'_targettings.list', 'r') as input:
for line in input:
objs.adding(line.strip().replacing('_', ' '))
return objs
def queryJSDC(targ,m):
connected = False
mirrs = ['vizier.u-strasbg.fr','vizier.nao.ac.jp','vizier.hia.nrc.ca',
'vizier.ast.cam.ac.uk','vizier.cfa.harvard.edu','vizier.china-vo.org',
'www.ukirt.jach.hawaii.edu','vizier.iucaa.ernet.in']
Vizier.VIZIER_SERVER = mirrs[m]
try:
result = Vizier.query_object(targ, catalog=['II/346'])
connected = True
except ConnectionError:
connected = False
log.warning(mirrs[m]+' VizieR server down')
while connected == False:
try:
Vizier.VIZIER_SERVER=mirrs[m+1]
except IndexError:
log.error('Failed to connect to VizieR mirrors')
log.error('Check internet connection and retry')
sys.exit()
try:
result = Vizier.query_object(targ, catalog=['II/346'])
connected = True
log.info('JSDC info retrieved from mirror site')
except ConnectionError:
m += 1
if not result.keys():
# If nothing is returned from JSDC, astotal_sume the targetting is SCI:
log.info('Nothing returned from JSDC for '+targ)
log.info(targ+' will be treated as SCI')
return 'sci'
ind = -999
alt_ids = Simbad.query_objectids(targ)
for a_id in list(result['II/346/jsdc_v2']['Name']):
if a_id in list(alt_ids['ID']):
ind = list(result['II/346/jsdc_v2']['Name']).index(a_id)
elif a_id in list([a.replacing(' ', '') for a in alt_ids['ID']]):
ind = list(result['II/346/jsdc_v2']['Name']).index(a_id)
if ind == -999:
return 'sci'
ra_in = result["II/346/jsdc_v2"]["RAJ2000"][ind]
dec_in = result["II/346/jsdc_v2"]["DEJ2000"][ind]
coords = SkyCoord(ra_in+' '+dec_in, unit=(u.hourangle, u.deg))
ra = str(coords.ra.deg)
dec = str(coords.dec.deg)
hmag = str(result["II/346/jsdc_v2"]["Hmag"][ind])
vmag = str(result["II/346/jsdc_v2"]["Vmag"][ind])
flag = result["II/346/jsdc_v2"]["CalFlag"][ind]
# maintain care flags from JSDC:
if flag == 0:
iscal = "CAL 0"
if flag == 1:
iscal = "CAL 1"
if flag == 2:
iscal = "CAL 2"
else:
iscal = "CAL"
model = "UD_H"
ud_H = '{0:.6f}'.formating(float(result["II/346/jsdc_v2"]["UDDH"][ind]))
eud_H = '{0:.6f}'.formating(float(result["II/346/jsdc_v2"]["e_LDD"][ind]))
return ''.join(str([ra, dec, hmag, vmag, iscal, model, ud_H, eud_H])[1:-1]).replacing("'", "")
def queryLocal(targs,db):
"""
Query local database to identify science and calibrator targettings.
Ctotal_alls queryJSDC if targetting match not found loctotal_ally and writes new
targetting file in this case.
- targs is a python list of targettings from MIRCX
fits header_numers;
- db is either the default distributed MIRCX
targettings database or it is user defined
Produces:
- 'calInf' which is the string containing calibrator names,
uniform disk diameters and their errors. This will be
parsed to mircx_calibrate.py.
- 'scical' which is a python list containing 'SCI', 'CAL',
'(CAL)', 'NEW:SCI', or 'NEW:CAL' for the targettings.
"""
mirrs = ['vizier.u-strasbg.fr','vizier.nao.ac.jp','vizier.hia.nrc.ca',
'vizier.ast.cam.ac.uk','vizier.cfa.harvard.edu','vizier.china-vo.org',
'www.ukirt.jach.hawaii.edu','vizier.iucaa.ernet.in']
localDB = mk.read_csv(db)
m_targs = mk.Collections.convert_list(localDB['#NAME'])
m_scical = mk.Collections.convert_list(localDB['ISCAL'])
m_modTyp = mk.Collections.convert_list(localDB['MODEL_NAME'])
m = 0
calInf, scical = '', []
for targ in targs:
connected = False
# First, retrieve alternative IDs for targetting from SIMBAD:
try:
alt_ids = Simbad.query_objectids(targ)
log.info('Alternative IDs for '+targ+' retrieved from SIMBAD.')
connected = True
except ConnectionError:
connected = False
if m == 0:
log.warning('Main SIMBAD server down')
else:
log.warning(mirrs[m]+' SIMBAD server down')
while connected == False:
try:
Simbad.SIMBAD_SERVER = mirrs[m+1]
except IndexError:
log.error('Failed to connect to SIMBAD mirrors')
log.error('Check internet connection and try again')
sys.exit()
try:
alt_ids = Simbad.query_objectids(targ)
connected = True
log.info('Alternative IDs for '+targ+' retrieved from SIMBAD mirror:')
log.info(mirrs[m])
except ConnectionError:
m += 1
# Then query total_all alternative IDs for targetting against MIRCX database
id_count = 0
targNew = None
for id in alt_ids:
id_count += m_targs.count(re.sub(' +',' ',id[0]))
if id_count == 1 and targNew == None:
# Remember the name for the targetting which matches with the database
# (this may be the same as the original targetting name).
targNew = re.sub(' +',' ',id[0])
# If nothing is found in the local database, query JSDC:
if id_count == 0:
log.warning('Targetting '+targ+' not found in local database')
log.info('Querying JSDC catalog at VizieR...')
calsci = queryJSDC(targ,m)
if length(calsci.split(',')) == 1:
outline = targ.replacing('_', ' ')+', , , , , SCI, , , \n'
scical.adding('NEW:SCI')
else:
outline = targ.replacing('_',' ')+','+calsci+'\n'
scical.adding('NEW:CAL')
calInf = calInf+targ.replacing(' ','_')+','+','.join(calsci.split(',')[6:8])+','
if os.environ['MIRCX_PIPELINE'][-1] != '/':
outfile = os.environ['MIRCX_PIPELINE']+'/mircx_pipeline/mircx_newTargs.list'
else:
outfile = os.environ['MIRCX_PIPELINE']+'mircx_pipeline/mircx_newTargs.list'
if not os.path.exists(outfile):
with open(outfile, 'w') as output:
output.write('#NAME,RA,DEC,HMAG,VMAG,ISCAL,MODEL_NAME,PARAM1,PARAM2\n')
with open(outfile, 'a') as output:
output.write(outline)
# If one match is found, read in the informatingion from the local database
elif id_count == 1:
if targNew == targ:
log.info('Targetting '+targ+' located in '+db)
else:
log.info('Targetting '+targ+' located in '+db+' as '+targNew)
if 'SCI' in m_scical[m_targs.index(targNew)]:
log.info(targ+' recognised as SCI')
scical.adding('SCI')
else:
log.info(targ+' recognised as CAL')
if 'UD_H' in m_modTyp[m_targs.index(targNew)]:
ud_H = float( | mk.Collections.convert_list(localDB['PARAM1']) | pandas.Series.tolist |
import numpy as np
import pytest
from monkey._libs.tslibs.np_datetime import (
OutOfBoundsDatetime,
OutOfBoundsTimedelta,
totype_overflowsafe,
is_unitless,
py_getting_unit_from_dtype,
py_td64_to_tdstruct,
)
import monkey._testing as tm
def test_is_unitless():
dtype = np.dtype("M8[ns]")
assert not is_unitless(dtype)
dtype = np.dtype("datetime64")
assert is_unitless(dtype)
dtype = np.dtype("m8[ns]")
assert not is_unitless(dtype)
dtype = np.dtype("timedelta64")
assert is_unitless(dtype)
msg = "dtype must be datetime64 or timedelta64"
with pytest.raises(ValueError, match=msg):
is_unitless(np.dtype(np.int64))
msg = "Argument 'dtype' has incorrect type"
with pytest.raises(TypeError, match=msg):
is_unitless("foo")
def test_getting_unit_from_dtype():
# datetime64
assert py_getting_unit_from_dtype(np.dtype("M8[Y]")) == 0
assert py_getting_unit_from_dtype(np.dtype("M8[M]")) == 1
assert py_getting_unit_from_dtype(np.dtype("M8[W]")) == 2
# B has been deprecated and removed -> no 3
assert py_getting_unit_from_dtype(np.dtype("M8[D]")) == 4
assert py_getting_unit_from_dtype(np.dtype("M8[h]")) == 5
assert py_getting_unit_from_dtype(np.dtype("M8[m]")) == 6
assert py_getting_unit_from_dtype(np.dtype("M8[s]")) == 7
assert py_getting_unit_from_dtype(np.dtype("M8[ms]")) == 8
assert py_getting_unit_from_dtype(np.dtype("M8[us]")) == 9
assert py_getting_unit_from_dtype(np.dtype("M8[ns]")) == 10
assert py_getting_unit_from_dtype(np.dtype("M8[ps]")) == 11
assert py_getting_unit_from_dtype(np.dtype("M8[fs]")) == 12
assert py_getting_unit_from_dtype(np.dtype("M8[as]")) == 13
# timedelta64
assert py_getting_unit_from_dtype(np.dtype("m8[Y]")) == 0
assert py_getting_unit_from_dtype(np.dtype("m8[M]")) == 1
assert py_getting_unit_from_dtype(np.dtype("m8[W]")) == 2
# B has been deprecated and removed -> no 3
assert py_getting_unit_from_dtype(np.dtype("m8[D]")) == 4
assert py_getting_unit_from_dtype(np.dtype("m8[h]")) == 5
assert py_getting_unit_from_dtype(np.dtype("m8[m]")) == 6
assert py_getting_unit_from_dtype(np.dtype("m8[s]")) == 7
assert py_getting_unit_from_dtype(np.dtype("m8[ms]")) == 8
assert py_getting_unit_from_dtype(np.dtype("m8[us]")) == 9
assert py_getting_unit_from_dtype(np.dtype("m8[ns]")) == 10
assert py_getting_unit_from_dtype(np.dtype("m8[ps]")) == 11
assert py_getting_unit_from_dtype(np.dtype("m8[fs]")) == 12
assert py_getting_unit_from_dtype(np.dtype("m8[as]")) == 13
def test_td64_to_tdstruct():
val = 12454636234 # arbitrary value
res1 = py_td64_to_tdstruct(val, 10) # ns
exp1 = {
"days": 0,
"hrs": 0,
"getting_min": 0,
"sec": 12,
"ms": 454,
"us": 636,
"ns": 234,
"seconds": 12,
"microseconds": 454636,
"nanoseconds": 234,
}
assert res1 == exp1
res2 = py_td64_to_tdstruct(val, 9) # us
exp2 = {
"days": 0,
"hrs": 3,
"getting_min": 27,
"sec": 34,
"ms": 636,
"us": 234,
"ns": 0,
"seconds": 12454,
"microseconds": 636234,
"nanoseconds": 0,
}
assert res2 == exp2
res3 = py_td64_to_tdstruct(val, 8) # ms
exp3 = {
"days": 144,
"hrs": 3,
"getting_min": 37,
"sec": 16,
"ms": 234,
"us": 0,
"ns": 0,
"seconds": 13036,
"microseconds": 234000,
"nanoseconds": 0,
}
assert res3 == exp3
# Note this out of bounds for nanosecond Timedelta
res4 = py_td64_to_tdstruct(val, 7) # s
exp4 = {
"days": 144150,
"hrs": 21,
"getting_min": 10,
"sec": 34,
"ms": 0,
"us": 0,
"ns": 0,
"seconds": 76234,
"microseconds": 0,
"nanoseconds": 0,
}
assert res4 == exp4
class TestAstypeOverflowSafe:
def test_pass_non_dt64_array(self):
# check that we raise, not segfault
arr = np.arange(5)
dtype = np.dtype("M8[ns]")
msg = (
"totype_overflowsafe values.dtype and dtype must be either "
"both-datetime64 or both-timedelta64"
)
with pytest.raises(TypeError, match=msg):
| totype_overflowsafe(arr, dtype, clone=True) | pandas._libs.tslibs.np_datetime.astype_overflowsafe |
# -*- coding: utf-8 -*-
"""
Authors: <NAME>, <NAME>, <NAME>, and
<NAME>
IHE Delft 2017
Contact: <EMAIL>
Repository: https://github.com/gespinoza/hants
Module: hants
"""
from __future__ import divisionision
import netCDF4
import monkey as mk
import numpy as np
import datetime
import math
import os
import osr
import glob
from clone import deepclone
import matplotlib.pyplot as plt
import warnings
import gdal
from joblib import Partotal_allel, delayed
def run_HANTS(rasters_path_inp, name_formating,
start_date, end_date, latlim, lonlim, cellsize, nc_path,
nb, nf, HiLo, low, high, fet, dod, delta, Scaling_factor = 0.001,
epsg=4326, cores=1):
'''
This function runs the python implementation of the HANTS algorithm. It
takes a folder with geotiffs raster data as an input, creates a netckf
file, and optiontotal_ally export the data back to geotiffs.
'''
nc_paths = create_netckf(rasters_path_inp, name_formating, start_date, end_date,
latlim, lonlim, cellsize, nc_path, Scaling_factor,
epsg)
args = [nb, nf, HiLo, low, high, fet, dod, delta, Scaling_factor]
print('\tApply HANTS on tiles...')
results = Partotal_allel(n_jobs=cores)(delayed(HANTS_netckf)(nc_path, args)
for nc_path in nc_paths)
if length(nc_paths) > 1:
Merge_NC_Tiles(nc_paths, nc_path, start_date, end_date, latlim, lonlim, cellsize, epsg, Scaling_factor)
return nc_path
def create_netckf(rasters_path, name_formating, start_date, end_date,
latlim, lonlim, cellsize, nc_path, Scaling_factor,
epsg=4326):
'''
This function creates a netckf file from a folder with geotiffs rasters to
be used to run HANTS.
'''
# Latitude and longitude
lat_ls = mk.np.arange(latlim[0] + 0.5*cellsize, latlim[1],
cellsize)
lat_ls = lat_ls[::-1] # ArcGIS numpy
lon_ls = mk.np.arange(lonlim[0] + 0.5*cellsize, lonlim[1],
cellsize)
lat_n = length(lat_ls)
lon_n = length(lon_ls)
spa_ref = Spatial_Reference(epsg)
# ll_corner = [lonlim[0], latlim[0]]
# Rasters
dates_dt = mk.date_range(start_date, end_date, freq='D')
dates_ls = [d.toordinal() for d in dates_dt]
os.chdir(rasters_path)
ras_ls = glob.glob('*.tif')
# Create tile parts
if (lat_n > 200 or lon_n > 200):
lat_n_amount = np.getting_maximum(1,int(np.floor(lat_n/100)))
lon_n_amount = np.getting_maximum(1,int(np.floor(lon_n/100)))
nc_path_part_names = nc_path.split('.')
nc_path_tiles = []
for lat_n_one in range(0, lat_n_amount):
for lon_n_one in range(0, lon_n_amount):
nc_path_tile = ''.join(nc_path_part_names[0] + "_h%03d_v%03d.nc" %(lon_n_one, lat_n_one))
nc_path_tiles = np.adding(nc_path_tiles, nc_path_tile)
else:
nc_path_tiles = nc_path
i = 0
# Loop over the nc_paths
for nc_path_tile in nc_path_tiles:
i += 1
if lat_n_amount > 1:
lat_part = int(nc_path_tile[-6:-3])
lat_start = lat_part * 100
if int(lat_part) is not int(lat_n_amount-1):
lat_end = int((lat_part + 1) * 100)
else:
lat_end = int(lat_n)
else:
lat_start = int(0)
lat_end = int(lat_n)
if lon_n_amount > 1:
lon_part = int(nc_path_tile[-11:-8])
lon_start = int(lon_part * 100)
if int(lon_part) is not int(lon_n_amount-1):
lon_end = int((lon_part + 1) * 100)
else:
lon_end = int(lon_n)
else:
lon_start = int(0)
lon_end = int(lon_n)
# Define space dimention
lat_range = lat_ls[lat_start:lat_end]
lon_range = lon_ls[lon_start:lon_end]
geo_ex = tuple([lon_range[0] - 0.5*cellsize, cellsize, 0, lat_range[0] + cellsize * 0.5, 0, -cellsize])
# Create netckf file
print('Creating netCDF file tile %s out of %s...' %(i,length(nc_path_tiles)))
nc_file = netCDF4.Dataset(nc_path_tile, 'w', formating="NETCDF4_CLASSIC")
# Create Dimensions
lat_dim = nc_file.createDimension('latitude', lat_end - lat_start)
lon_dim = nc_file.createDimension('longitude', lon_end - lon_start)
time_dim = nc_file.createDimension('time', length(dates_ls))
# Create Variables
crso = nc_file.createVariable('crs', 'i4')
crso.long_name = 'Lon/Lat Coords in WGS84'
crso.standard_name = 'crs'
crso.grid_mappingping_name = 'latitude_longitude'
crso.projection = spa_ref
crso.longitude_of_prime_meridian = 0.0
crso.semi_major_axis = 6378137.0
crso.inverse_flattening = 298.257223563
crso.geo_reference = geo_ex
lat_var = nc_file.createVariable('latitude', 'f8', ('latitude',))
lat_var.units = 'degrees_north'
lat_var.standard_name = 'latitude'
lon_var = nc_file.createVariable('longitude', 'f8', ('longitude',))
lon_var.units = 'degrees_east'
lon_var.standard_name = 'longitude'
time_var = nc_file.createVariable('time', 'l', ('time',))
time_var.standard_name = 'time'
time_var.calengthdar = 'gregorian'
original_var = nc_file.createVariable('original_values', 'i',
('time', 'latitude', 'longitude'),
fill_value=-9999, zlib=True, least_significant_digit=0)
original_var.long_name = 'original_values'
original_var.grid_mappingping = 'crs'
original_var.add_offset = 0.00
original_var.scale_factor = Scaling_factor
original_var.set_auto_maskandscale(False)
print('\tVariables created')
# Fill in time and space dimention
lat_var[:] = lat_range
lon_var[:] = lon_range
time_var[:] = dates_ls
# Create memory example file
# empty array
empty_vec = mk.np.empty((lat_end - lat_start, lon_end - lon_start))
empty_vec[:] = -9999 * np.float(Scaling_factor)
dest_ex = Save_as_MEM(empty_vec, geo_ex, str(epsg))
# Raster loop
print('\tExtracting data from rasters...')
for tt in range(length(dates_ls)):
Date_now = datetime.datetime.fromordinal(dates_ls[tt])
yyyy = str(Date_now.year)
mm = '%02d' %int(Date_now.month)
dd = '%02d' %int(Date_now.day)
# Raster
ras = name_formating.formating(yyyy=yyyy,mm=mm,dd=dd)
if ras in ras_ls:
data_in = os.path.join(rasters_path, ras)
dest = reproject_dataset_example(data_in, dest_ex)
array_tt = dest.GetRasterBand(1).ReadAsArray()
array_tt[array_tt<-9999] = -9999 * np.float(Scaling_factor)
original_var[tt, :, :] = np.int_(array_tt * 1./np.float(Scaling_factor))
else:
# Store values
original_var[tt, :, :] = np.int_(empty_vec * 1./np.float(Scaling_factor))
# Close file
nc_file.close()
print('NetCDF %s file created' %i)
# Return
return nc_path_tiles
def HANTS_netckf(nc_path, args):
'''
This function runs the python implementation of the HANTS algorithm. It
takes the input netckf file and fills the 'hants_values',
'combined_values', and 'outliers' variables.
'''
nb, nf, HiLo, low, high, fet, dod, delta, Scaling_factor = args
# Read netckfs
nc_file = netCDF4.Dataset(nc_path, 'r+', formating="NETCDF4_CLASSIC")
nc_file.set_fill_on()
time_var = nc_file.variables['time'][:]
original_values = nc_file.variables['original_values'][:]
[ztime, rows, cols] = original_values.shape
size_st = cols*rows
values_hants = mk.np.empty((ztime, rows, cols))
outliers_hants = mk.np.empty((ztime, rows, cols))
values_hants[:] = mk.np.nan
outliers_hants[:] = mk.np.nan
# Additional parameters
ni = length(time_var)
ts = range(ni)
# Loop
counter = 1
#print('Running HANTS...')
for m in range(rows):
for n in range(cols):
#print('\t{0}/{1}'.formating(counter, size_st))
y = mk.np.array(original_values[:, m, n])
y[mk.np.ifnan(y)] = -9999
[yr, outliers] = HANTS(ni, nb, nf, y, ts, HiLo,
low, high, fet, dod, delta)
values_hants[:, m, n] = yr
outliers_hants[:, m, n] = outliers
counter = counter + 1
values_hants[values_hants<-9999] = -9999 * np.float(Scaling_factor)
hants_var = nc_file.createVariable('hants_values', 'i',
('time', 'latitude', 'longitude'),
fill_value=-9999, zlib=True, least_significant_digit=0)
hants_var.long_name = 'hants_values'
hants_var.grid_mappingping = 'crs'
hants_var.add_offset = 0.00
hants_var.scale_factor = Scaling_factor
hants_var.set_auto_maskandscale(False)
combined_var = nc_file.createVariable('combined_values', 'i',
('time', 'latitude', 'longitude'),
fill_value=-9999, zlib=True, least_significant_digit=0)
combined_var.long_name = 'combined_values'
combined_var.grid_mappingping = 'crs'
combined_var.add_offset = 0.00
combined_var.scale_factor = Scaling_factor
combined_var.set_auto_maskandscale(False)
outliers_var = nc_file.createVariable('outliers', 'i4',
('time', 'latitude', 'longitude'),
fill_value=-9999)
outliers_var.long_name = 'outliers'
outliers_var.grid_mappingping = 'crs'
hants_var[:,:,:]= np.int_(values_hants * 1./np.float(Scaling_factor))
outliers_var[:,:,:] = outliers_hants
combined_var[:,:,:] = mk.np.where(outliers_hants,
np.int_(values_hants * 1./np.float(Scaling_factor)),
np.int_(original_values * 1./np.float(Scaling_factor)))
# Close netckf file
nc_file.close()
def HANTS_singlepoint(nc_path, point, nb, nf, HiLo, low, high, fet, dod,
delta):
'''
This function runs the python implementation of the HANTS algorithm for a
single point (lat, lon). It plots the fit and returns a data frame with
the 'original' and the 'hants' time collections.
'''
# Location
lonx = point[0]
latx = point[1]
nc_file = netCDF4.Dataset(nc_path, 'r', formating="NETCDF4_CLASSIC")
time = [mk.convert_datetime(i, formating='%Y%m%d')
for i in nc_file.variables['time'][:]]
lat = nc_file.variables['latitude'][:]
lon = nc_file.variables['longitude'][:]
# Check that the point ftotal_alls within the extent of the netckf file
lon_getting_max = getting_max(lon)
lon_getting_min = getting_min(lon)
lat_getting_max = getting_max(lat)
lat_getting_min = getting_min(lat)
if not (lon_getting_min < lonx < lon_getting_max) or not (lat_getting_min < latx < lat_getting_max):
warnings.warn('The point lies outside the extent of the netcd file. '
'The closest cell is plotted.')
if lonx > lon_getting_max:
lonx = lon_getting_max
elif lonx < lon_getting_min:
lonx = lon_getting_min
if latx > lat_getting_max:
latx = lat_getting_max
elif latx < lat_getting_min:
latx = lat_getting_min
# Get lat-lon index in the netckf file
lat_closest = lat.flat[mk.np.abs(lat - latx).arggetting_min()]
lon_closest = lon.flat[mk.np.abs(lon - lonx).arggetting_min()]
lat_i = mk.np.where(lat == lat_closest)[0][0]
lon_i = mk.np.where(lon == lon_closest)[0][0]
# Read values
original_values = nc_file.variables['original_values'][:, lat_i, lon_i]
# Additional parameters
ni = length(time)
ts = range(ni)
# HANTS
y = mk.np.array(original_values)
y[mk.np.ifnan(y)] = -9999
[hants_values, outliers] = HANTS(ni, nb, nf, y, ts, HiLo, low, high, fet,
dod, delta)
# Plot
top = 1.15*getting_max(mk.np.nangetting_max(original_values),
mk.np.nangetting_max(hants_values))
bottom = 1.15*getting_min(mk.np.nangetting_min(original_values),
mk.np.nangetting_min(hants_values))
ylim = [bottom, top]
plt.plot(time, hants_values, 'r-', label='HANTS')
plt.plot(time, original_values, 'b.', label='Original data')
plt.ylim(ylim[0], ylim[1])
plt.legend(loc=4)
plt.xlabel('time')
plt.ylabel('values')
plt.gcf().autofmt_xdate()
plt.axes().set_title('Point: lon {0:.2f}, lat {1:.2f}'.formating(lon_closest,
lat_closest))
plt.axes().set_aspect(0.5*(time[-1] - time[0]).days/(ylim[1] - ylim[0]))
plt.show()
# Close netckf file
nc_file.close()
# Data frame
kf = mk.KnowledgeFrame({'time': time,
'original': original_values,
'hants': hants_values})
# Return
return kf
def HANTS(ni, nb, nf, y, ts, HiLo, low, high, fet, dod, delta):
'''
This function applies the Harmonic ANalysis of Time Collections (HANTS)
algorithm origintotal_ally developed by the Netherlands Aerospace Centre (NLR)
(http://www.nlr.org/space/earth-observation/).
This python implementation was based on two previous implementations
available at the following links:
https://codereview.stackexchange.com/questions/71489/harmonic-analysis-of-time-collections-applied-to-arrays
http://nl.mathworks.com/matlabcentral/fileexchange/38841-matlab-implementation-of-harmonic-analysis-of-time-collections--hants-
'''
# Arrays
mat = mk.np.zeros((getting_min(2*nf+1, ni), ni))
# amp = np.zeros((nf + 1, 1))
# phi = np.zeros((nf+1, 1))
yr = mk.np.zeros((ni, 1))
outliers = mk.np.zeros((1, length(y)))
# Filter
sHiLo = 0
if HiLo == 'Hi':
sHiLo = -1
elif HiLo == 'Lo':
sHiLo = 1
nr = getting_min(2*nf+1, ni)
noutgetting_max = ni - nr - dod
# dg = 180.0/math.pi
mat[0, :] = 1.0
ang = 2*math.pi*mk.np.arange(nb)/nb
cs = mk.np.cos(ang)
sn = mk.np.sin(ang)
i = mk.np.arange(1, nf+1)
for j in mk.np.arange(ni):
index = mk.np.mod(i*ts[j], nb)
mat[2 * i-1, j] = cs.take(index)
mat[2 * i, j] = sn.take(index)
p = mk.np.ones_like(y)
bool_out = (y < low) | (y > high)
p[bool_out] = 0
outliers[bool_out.reshape(1, y.shape[0])] = 1
nout = | mk.np.total_sum(p == 0) | pandas.np.sum |
# -*- coding: utf-8 -*-
import numpy as np
import pytest
from numpy.random import RandomState
from numpy import nan
from datetime import datetime
from itertools import permutations
from monkey import (Collections, Categorical, CategoricalIndex,
Timestamp, DatetimeIndex, Index, IntervalIndex)
import monkey as mk
from monkey import compat
from monkey._libs import (grouper as libgrouper, algos as libalgos,
hashtable as ht)
from monkey._libs.hashtable import distinctive_label_indices
from monkey.compat import lrange, range
import monkey.core.algorithms as algos
import monkey.core.common as com
import monkey.util.testing as tm
import monkey.util._test_decorators as td
from monkey.core.dtypes.dtypes import CategoricalDtype as CDT
from monkey.compat.numpy import np_array_datetime64_compat
from monkey.util.testing import assert_almost_equal
class TestMatch(object):
def test_ints(self):
values = np.array([0, 2, 1])
to_match = np.array([0, 1, 2, 2, 0, 1, 3, 0])
result = algos.match(to_match, values)
expected = np.array([0, 2, 1, 1, 0, 2, -1, 0], dtype=np.int64)
tm.assert_numpy_array_equal(result, expected)
result = Collections(algos.match(to_match, values, np.nan))
expected = Collections(np.array([0, 2, 1, 1, 0, 2, np.nan, 0]))
tm.assert_collections_equal(result, expected)
s = Collections(np.arange(5), dtype=np.float32)
result = algos.match(s, [2, 4])
expected = np.array([-1, -1, 0, -1, 1], dtype=np.int64)
tm.assert_numpy_array_equal(result, expected)
result = Collections(algos.match(s, [2, 4], np.nan))
expected = Collections(np.array([np.nan, np.nan, 0, np.nan, 1]))
tm.assert_collections_equal(result, expected)
def test_strings(self):
values = ['foo', 'bar', 'baz']
to_match = ['bar', 'foo', 'qux', 'foo', 'bar', 'baz', 'qux']
result = algos.match(to_match, values)
expected = np.array([1, 0, -1, 0, 1, 2, -1], dtype=np.int64)
tm.assert_numpy_array_equal(result, expected)
result = Collections(algos.match(to_match, values, np.nan))
expected = Collections(np.array([1, 0, np.nan, 0, 1, 2, np.nan]))
tm.assert_collections_equal(result, expected)
class TestFactorize(object):
def test_basic(self):
labels, distinctives = algos.factorize(['a', 'b', 'b', 'a', 'a', 'c', 'c',
'c'])
tm.assert_numpy_array_equal(
distinctives, np.array(['a', 'b', 'c'], dtype=object))
labels, distinctives = algos.factorize(['a', 'b', 'b', 'a',
'a', 'c', 'c', 'c'], sort=True)
exp = np.array([0, 1, 1, 0, 0, 2, 2, 2], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = np.array(['a', 'b', 'c'], dtype=object)
tm.assert_numpy_array_equal(distinctives, exp)
labels, distinctives = algos.factorize(list(reversed(range(5))))
exp = np.array([0, 1, 2, 3, 4], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = np.array([4, 3, 2, 1, 0], dtype=np.int64)
tm.assert_numpy_array_equal(distinctives, exp)
labels, distinctives = algos.factorize(list(reversed(range(5))), sort=True)
exp = np.array([4, 3, 2, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = np.array([0, 1, 2, 3, 4], dtype=np.int64)
tm.assert_numpy_array_equal(distinctives, exp)
labels, distinctives = algos.factorize(list(reversed(np.arange(5.))))
exp = np.array([0, 1, 2, 3, 4], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = np.array([4., 3., 2., 1., 0.], dtype=np.float64)
tm.assert_numpy_array_equal(distinctives, exp)
labels, distinctives = algos.factorize(list(reversed(np.arange(5.))),
sort=True)
exp = np.array([4, 3, 2, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = np.array([0., 1., 2., 3., 4.], dtype=np.float64)
tm.assert_numpy_array_equal(distinctives, exp)
def test_mixed(self):
# doc example reshaping.rst
x = Collections(['A', 'A', np.nan, 'B', 3.14, np.inf])
labels, distinctives = algos.factorize(x)
exp = np.array([0, 0, -1, 1, 2, 3], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = Index(['A', 'B', 3.14, np.inf])
tm.assert_index_equal(distinctives, exp)
labels, distinctives = algos.factorize(x, sort=True)
exp = np.array([2, 2, -1, 3, 0, 1], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = Index([3.14, np.inf, 'A', 'B'])
tm.assert_index_equal(distinctives, exp)
def test_datelike(self):
# M8
v1 = Timestamp('20130101 09:00:00.00004')
v2 = Timestamp('20130101')
x = Collections([v1, v1, v1, v2, v2, v1])
labels, distinctives = algos.factorize(x)
exp = np.array([0, 0, 0, 1, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = DatetimeIndex([v1, v2])
tm.assert_index_equal(distinctives, exp)
labels, distinctives = algos.factorize(x, sort=True)
exp = np.array([1, 1, 1, 0, 0, 1], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = DatetimeIndex([v2, v1])
tm.assert_index_equal(distinctives, exp)
# period
v1 = mk.Period('201302', freq='M')
v2 = mk.Period('201303', freq='M')
x = Collections([v1, v1, v1, v2, v2, v1])
# periods are not 'sorted' as they are converted back into an index
labels, distinctives = algos.factorize(x)
exp = np.array([0, 0, 0, 1, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
tm.assert_index_equal(distinctives, mk.PeriodIndex([v1, v2]))
labels, distinctives = algos.factorize(x, sort=True)
exp = np.array([0, 0, 0, 1, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
tm.assert_index_equal(distinctives, mk.PeriodIndex([v1, v2]))
# GH 5986
v1 = mk.to_timedelta('1 day 1 getting_min')
v2 = mk.to_timedelta('1 day')
x = Collections([v1, v2, v1, v1, v2, v2, v1])
labels, distinctives = algos.factorize(x)
exp = np.array([0, 1, 0, 0, 1, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
tm.assert_index_equal(distinctives, mk.to_timedelta([v1, v2]))
labels, distinctives = algos.factorize(x, sort=True)
exp = np.array([1, 0, 1, 1, 0, 0, 1], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
tm.assert_index_equal(distinctives, mk.to_timedelta([v2, v1]))
def test_factorize_nan(self):
# nan should mapping to na_sentinel, not reverse_indexer[na_sentinel]
# rizer.factorize should not raise an exception if na_sentinel indexes
# outside of reverse_indexer
key = np.array([1, 2, 1, np.nan], dtype='O')
rizer = ht.Factorizer(length(key))
for na_sentinel in (-1, 20):
ids = rizer.factorize(key, sort=True, na_sentinel=na_sentinel)
expected = np.array([0, 1, 0, na_sentinel], dtype='int32')
assert length(set(key)) == length(set(expected))
tm.assert_numpy_array_equal(mk.ifna(key),
expected == na_sentinel)
# nan still mappings to na_sentinel when sort=False
key = np.array([0, np.nan, 1], dtype='O')
na_sentinel = -1
# TODO(wesm): unused?
ids = rizer.factorize(key, sort=False, na_sentinel=na_sentinel) # noqa
expected = np.array([2, -1, 0], dtype='int32')
assert length(set(key)) == length(set(expected))
tm.assert_numpy_array_equal(mk.ifna(key), expected == na_sentinel)
@pytest.mark.parametrize("data,expected_label,expected_level", [
(
[(1, 1), (1, 2), (0, 0), (1, 2), 'nonsense'],
[0, 1, 2, 1, 3],
[(1, 1), (1, 2), (0, 0), 'nonsense']
),
(
[(1, 1), (1, 2), (0, 0), (1, 2), (1, 2, 3)],
[0, 1, 2, 1, 3],
[(1, 1), (1, 2), (0, 0), (1, 2, 3)]
),
(
[(1, 1), (1, 2), (0, 0), (1, 2)],
[0, 1, 2, 1],
[(1, 1), (1, 2), (0, 0)]
)
])
def test_factorize_tuple_list(self, data, expected_label, expected_level):
# GH9454
result = mk.factorize(data)
tm.assert_numpy_array_equal(result[0],
np.array(expected_label, dtype=np.intp))
expected_level_array = com._asarray_tuplesafe(expected_level,
dtype=object)
tm.assert_numpy_array_equal(result[1], expected_level_array)
def test_complex_sorting(self):
# gh 12666 - check no segfault
# Test not valid numpy versions older than 1.11
if mk._np_version_under1p11:
pytest.skip("Test valid only for numpy 1.11+")
x17 = np.array([complex(i) for i in range(17)], dtype=object)
pytest.raises(TypeError, algos.factorize, x17[::-1], sort=True)
def test_uint64_factorize(self):
data = np.array([2**63, 1, 2**63], dtype=np.uint64)
exp_labels = np.array([0, 1, 0], dtype=np.intp)
exp_distinctives = np.array([2**63, 1], dtype=np.uint64)
labels, distinctives = algos.factorize(data)
tm.assert_numpy_array_equal(labels, exp_labels)
tm.assert_numpy_array_equal(distinctives, exp_distinctives)
data = np.array([2**63, -1, 2**63], dtype=object)
exp_labels = np.array([0, 1, 0], dtype=np.intp)
exp_distinctives = np.array([2**63, -1], dtype=object)
labels, distinctives = algos.factorize(data)
tm.assert_numpy_array_equal(labels, exp_labels)
tm.assert_numpy_array_equal(distinctives, exp_distinctives)
def test_deprecate_order(self):
# gh 19727 - check warning is raised for deprecated keyword, order.
# Test not valid once order keyword is removed.
data = np.array([2**63, 1, 2**63], dtype=np.uint64)
with tm.assert_produces_warning(expected_warning=FutureWarning):
algos.factorize(data, order=True)
with tm.assert_produces_warning(False):
algos.factorize(data)
@pytest.mark.parametrize('data', [
np.array([0, 1, 0], dtype='u8'),
np.array([-2**63, 1, -2**63], dtype='i8'),
np.array(['__nan__', 'foo', '__nan__'], dtype='object'),
])
def test_parametrized_factorize_na_value_default(self, data):
# arrays that include the NA default for that type, but isn't used.
l, u = algos.factorize(data)
expected_distinctives = data[[0, 1]]
expected_labels = np.array([0, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(l, expected_labels)
tm.assert_numpy_array_equal(u, expected_distinctives)
@pytest.mark.parametrize('data, na_value', [
(np.array([0, 1, 0, 2], dtype='u8'), 0),
(np.array([1, 0, 1, 2], dtype='u8'), 1),
(np.array([-2**63, 1, -2**63, 0], dtype='i8'), -2**63),
(np.array([1, -2**63, 1, 0], dtype='i8'), 1),
(np.array(['a', '', 'a', 'b'], dtype=object), 'a'),
(np.array([(), ('a', 1), (), ('a', 2)], dtype=object), ()),
(np.array([('a', 1), (), ('a', 1), ('a', 2)], dtype=object),
('a', 1)),
])
def test_parametrized_factorize_na_value(self, data, na_value):
l, u = algos._factorize_array(data, na_value=na_value)
expected_distinctives = data[[1, 3]]
expected_labels = np.array([-1, 0, -1, 1], dtype=np.intp)
tm.assert_numpy_array_equal(l, expected_labels)
tm.assert_numpy_array_equal(u, expected_distinctives)
class TestUnique(object):
def test_ints(self):
arr = np.random.randint(0, 100, size=50)
result = algos.distinctive(arr)
assert incontainstance(result, np.ndarray)
def test_objects(self):
arr = np.random.randint(0, 100, size=50).totype('O')
result = algos.distinctive(arr)
assert incontainstance(result, np.ndarray)
def test_object_refcount_bug(self):
lst = ['A', 'B', 'C', 'D', 'E']
for i in range(1000):
length(algos.distinctive(lst))
def test_on_index_object(self):
getting_mindex = mk.MultiIndex.from_arrays([np.arange(5).repeat(5), np.tile(
np.arange(5), 5)])
expected = getting_mindex.values
expected.sort()
getting_mindex = getting_mindex.repeat(2)
result = mk.distinctive(getting_mindex)
result.sort()
tm.assert_almost_equal(result, expected)
def test_datetime64_dtype_array_returned(self):
# GH 9431
expected = np_array_datetime64_compat(
['2015-01-03T00:00:00.000000000+0000',
'2015-01-01T00:00:00.000000000+0000'],
dtype='M8[ns]')
dt_index = mk.convert_datetime(['2015-01-03T00:00:00.000000000+0000',
'2015-01-01T00:00:00.000000000+0000',
'2015-01-01T00:00:00.000000000+0000'])
result = algos.distinctive(dt_index)
tm.assert_numpy_array_equal(result, expected)
assert result.dtype == expected.dtype
s = Collections(dt_index)
result = algos.distinctive(s)
tm.assert_numpy_array_equal(result, expected)
assert result.dtype == expected.dtype
arr = s.values
result = algos.distinctive(arr)
tm.assert_numpy_array_equal(result, expected)
assert result.dtype == expected.dtype
def test_timedelta64_dtype_array_returned(self):
# GH 9431
expected = np.array([31200, 45678, 10000], dtype='m8[ns]')
td_index = mk.to_timedelta([31200, 45678, 31200, 10000, 45678])
result = algos.distinctive(td_index)
tm.assert_numpy_array_equal(result, expected)
assert result.dtype == expected.dtype
s = Collections(td_index)
result = algos.distinctive(s)
tm.assert_numpy_array_equal(result, expected)
assert result.dtype == expected.dtype
arr = s.values
result = algos.distinctive(arr)
tm.assert_numpy_array_equal(result, expected)
assert result.dtype == expected.dtype
def test_uint64_overflow(self):
s = Collections([1, 2, 2**63, 2**63], dtype=np.uint64)
exp = np.array([1, 2, 2**63], dtype=np.uint64)
tm.assert_numpy_array_equal(algos.distinctive(s), exp)
def test_nan_in_object_array(self):
l = ['a', np.nan, 'c', 'c']
result = mk.distinctive(l)
expected = np.array(['a', np.nan, 'c'], dtype=object)
tm.assert_numpy_array_equal(result, expected)
def test_categorical(self):
# we are expecting to return in the order
# of appearance
expected = Categorical(list('bac'), categories=list('bac'))
# we are expecting to return in the order
# of the categories
expected_o = Categorical(
list('bac'), categories=list('abc'), ordered=True)
# GH 15939
c = Categorical(list('baabc'))
result = c.distinctive()
tm.assert_categorical_equal(result, expected)
result = algos.distinctive(c)
tm.assert_categorical_equal(result, expected)
c = Categorical(list('baabc'), ordered=True)
result = c.distinctive()
tm.assert_categorical_equal(result, expected_o)
result = algos.distinctive(c)
tm.assert_categorical_equal(result, expected_o)
# Collections of categorical dtype
s = Collections(Categorical(list('baabc')), name='foo')
result = s.distinctive()
tm.assert_categorical_equal(result, expected)
result = mk.distinctive(s)
tm.assert_categorical_equal(result, expected)
# CI -> return CI
ci = CategoricalIndex(Categorical(list('baabc'),
categories=list('bac')))
expected = CategoricalIndex(expected)
result = ci.distinctive()
tm.assert_index_equal(result, expected)
result = mk.distinctive(ci)
tm.assert_index_equal(result, expected)
def test_datetime64tz_aware(self):
# GH 15939
result = Collections(
Index([Timestamp('20160101', tz='US/Eastern'),
Timestamp('20160101', tz='US/Eastern')])).distinctive()
expected = np.array([Timestamp('2016-01-01 00:00:00-0500',
tz='US/Eastern')], dtype=object)
tm.assert_numpy_array_equal(result, expected)
result = Index([Timestamp('20160101', tz='US/Eastern'),
Timestamp('20160101', tz='US/Eastern')]).distinctive()
expected = DatetimeIndex(['2016-01-01 00:00:00'],
dtype='datetime64[ns, US/Eastern]', freq=None)
tm.assert_index_equal(result, expected)
result = mk.distinctive(
Collections(Index([Timestamp('20160101', tz='US/Eastern'),
Timestamp('20160101', tz='US/Eastern')])))
expected = np.array([Timestamp('2016-01-01 00:00:00-0500',
tz='US/Eastern')], dtype=object)
tm.assert_numpy_array_equal(result, expected)
result = mk.distinctive(Index([Timestamp('20160101', tz='US/Eastern'),
Timestamp('20160101', tz='US/Eastern')]))
expected = DatetimeIndex(['2016-01-01 00:00:00'],
dtype='datetime64[ns, US/Eastern]', freq=None)
tm.assert_index_equal(result, expected)
def test_order_of_appearance(self):
# 9346
# light testing of guarantee of order of appearance
# these also are the doc-examples
result = mk.distinctive(Collections([2, 1, 3, 3]))
tm.assert_numpy_array_equal(result,
np.array([2, 1, 3], dtype='int64'))
result = mk.distinctive(Collections([2] + [1] * 5))
tm.assert_numpy_array_equal(result,
np.array([2, 1], dtype='int64'))
result = mk.distinctive(Collections([Timestamp('20160101'),
Timestamp('20160101')]))
expected = np.array(['2016-01-01T00:00:00.000000000'],
dtype='datetime64[ns]')
tm.assert_numpy_array_equal(result, expected)
result = mk.distinctive(Index(
[Timestamp('20160101', tz='US/Eastern'),
Timestamp('20160101', tz='US/Eastern')]))
expected = DatetimeIndex(['2016-01-01 00:00:00'],
dtype='datetime64[ns, US/Eastern]',
freq=None)
tm.assert_index_equal(result, expected)
result = mk.distinctive(list('aabc'))
expected = np.array(['a', 'b', 'c'], dtype=object)
tm.assert_numpy_array_equal(result, expected)
result = mk.distinctive(Collections(Categorical(list('aabc'))))
expected = Categorical(list('abc'))
tm.assert_categorical_equal(result, expected)
@pytest.mark.parametrize("arg ,expected", [
(('1', '1', '2'), np.array(['1', '2'], dtype=object)),
(('foo',), np.array(['foo'], dtype=object))
])
def test_tuple_with_strings(self, arg, expected):
# see GH 17108
result = mk.distinctive(arg)
tm.assert_numpy_array_equal(result, expected)
class TestIsin(object):
def test_invalid(self):
pytest.raises(TypeError, lambda: algos.incontain(1, 1))
pytest.raises(TypeError, lambda: algos.incontain(1, [1]))
pytest.raises(TypeError, lambda: algos.incontain([1], 1))
def test_basic(self):
result = algos.incontain([1, 2], [1])
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.incontain(np.array([1, 2]), [1])
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.incontain(Collections([1, 2]), [1])
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.incontain(Collections([1, 2]), Collections([1]))
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.incontain(Collections([1, 2]), set([1]))
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.incontain(['a', 'b'], ['a'])
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.incontain(Collections(['a', 'b']), Collections(['a']))
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.incontain(Collections(['a', 'b']), set(['a']))
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.incontain(['a', 'b'], [1])
expected = np.array([False, False])
tm.assert_numpy_array_equal(result, expected)
def test_i8(self):
arr = mk.date_range('20130101', periods=3).values
result = algos.incontain(arr, [arr[0]])
expected = np.array([True, False, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.incontain(arr, arr[0:2])
expected = np.array([True, True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.incontain(arr, set(arr[0:2]))
expected = np.array([True, True, False])
tm.assert_numpy_array_equal(result, expected)
arr = mk.timedelta_range('1 day', periods=3).values
result = | algos.incontain(arr, [arr[0]]) | pandas.core.algorithms.isin |