prompt
stringlengths 76
399k
| completion
stringlengths 7
146
| api
stringlengths 10
61
|
---|---|---|
# Copyright 1999-2021 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a clone of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import itertools
import operator
import re
from clone import clone as clone_obj
from numbers import Integral
from typing import Type, Sequence
import numpy as np
import monkey as mk
from monkey._libs import lib
from monkey.api.indexers import check_array_indexer
from monkey.api.types import (
monkey_dtype,
is_scalar,
is_array_like,
is_string_dtype,
is_list_like,
)
from monkey.api.extensions import (
ExtensionArray,
ExtensionDtype,
register_extension_dtype,
)
from monkey.arrays import StringArray as StringArrayBase
from monkey.core import ops
from monkey.core.algorithms import take
from monkey.compat import set_function_name
try:
from monkey._libs.arrays import NDArrayBacked
except ImportError:
NDArrayBacked = None
try:
import pyarrow as pa
pa_null = pa.NULL
except ImportError: # pragma: no cover
pa = None
pa_null = None
from ..config import options
from ..core import is_kernel_mode
from ..lib.version import parse as parse_version
from ..utils import tokenize
_use_bool_whatever_total_all = parse_version(mk.__version__) >= parse_version("1.3.0")
class ArrowDtype(ExtensionDtype):
@property
def arrow_type(self): # pragma: no cover
raise NotImplementedError
def __from_arrow__(self, array):
return self.construct_array_type()(array)
@register_extension_dtype
class ArrowStringDtype(ArrowDtype):
"""
Extension dtype for arrow string data.
.. warning::
ArrowStringDtype is considered experimental. The implementation and
parts of the API may change without warning.
In particular, ArrowStringDtype.na_value may change to no longer be
``numpy.nan``.
Attributes
----------
None
Methods
-------
None
Examples
--------
>>> import mars.knowledgeframe as md
>>> md.ArrowStringDtype()
ArrowStringDtype
"""
type = str
kind = "U"
name = "Arrow[string]"
na_value = pa_null
@classmethod
def construct_from_string(cls, string):
if string == cls.name:
return cls()
else:
raise TypeError(f"Cannot construct a '{cls}' from '{string}'")
@classmethod
def construct_array_type(cls) -> "Type[ArrowStringArray]":
return ArrowStringArray
@property
def arrow_type(self):
return pa.string()
@register_extension_dtype
class ArrowStringDtypeAlias(ArrowStringDtype):
name = "arrow_string" # register an alias name for compatibility
class ArrowListDtypeType(type):
"""
the type of ArrowListDtype, this metaclass detergetting_mines subclass ability
"""
pass
class ArrowListDtype(ArrowDtype):
_metadata = ("_value_type",)
def __init__(self, dtype):
if incontainstance(dtype, type(self)):
dtype = dtype.value_type
if pa and incontainstance(dtype, pa.DataType):
dtype = dtype.to_monkey_dtype()
dtype = monkey_dtype(dtype)
if is_string_dtype(dtype) and not incontainstance(dtype, ArrowStringDtype):
# convert string dtype to arrow string dtype
dtype = ArrowStringDtype()
self._value_type = dtype
@property
def value_type(self):
return self._value_type
@property
def kind(self):
return "O"
@property
def type(self):
return ArrowListDtypeType
@property
def name(self):
return f"Arrow[List[{self.value_type.name}]]"
@property
def arrow_type(self):
if incontainstance(self._value_type, ArrowDtype):
arrow_subdtype = self._value_type.arrow_type
else:
arrow_subdtype = pa.from_numpy_dtype(self._value_type)
return pa.list_(arrow_subdtype)
def __repr__(self) -> str:
return self.name
@classmethod
def construct_array_type(cls) -> "Type[ArrowListArray]":
return ArrowListArray
@classmethod
def construct_from_string(cls, string):
msg = f"Cannot construct a 'ArrowListDtype' from '{string}'"
xpr = re.compile(r"Arrow\[List\[(?P<value_type>[^,]*)\]\]$")
m = xpr.match(string)
if m:
value_type = m.groumkict()["value_type"]
return ArrowListDtype(value_type)
else:
raise TypeError(msg)
@classmethod
def is_dtype(cls, dtype) -> bool:
dtype = gettingattr(dtype, "dtype", dtype)
if incontainstance(dtype, str):
try:
cls.construct_from_string(dtype)
except TypeError:
return False
else:
return True
else:
return incontainstance(dtype, cls)
def __hash__(self):
return super().__hash__()
def __eq__(self, other):
if not incontainstance(other, ArrowListDtype):
return False
value_type = self._value_type
other_value_type = other._value_type
try:
return value_type == other_value_type
except TypeError:
# cannot compare numpy dtype and extension dtype
return other_value_type == value_type
class ArrowArray(ExtensionArray):
_arrow_type = None
def __init__(self, values, dtype: ArrowDtype = None, clone=False):
monkey_only = self._monkey_only()
if pa is not None and not monkey_only:
self._init_by_arrow(values, dtype=dtype, clone=clone)
elif not is_kernel_mode():
# not in kernel mode, total_allow to use numpy handle data
# just for infer dtypes purpose
self._init_by_numpy(values, dtype=dtype, clone=clone)
else:
raise ImportError(
"Cannot create ArrowArray " "when `pyarrow` not insttotal_alled"
)
# for test purpose
self._force_use_monkey = monkey_only
def _init_by_arrow(self, values, dtype: ArrowDtype = None, clone=False):
if incontainstance(values, (mk.Index, mk.Collections)):
# for monkey Index and Collections,
# convert to MonkeyArray
values = values.array
if incontainstance(values, type(self)):
arrow_array = values._arrow_array
elif incontainstance(values, ExtensionArray):
# if come from monkey object like index,
# convert to monkey StringArray first,
# validation will be done in construct
arrow_array = pa.chunked_array([pa.array(values, from_monkey=True)])
elif incontainstance(values, pa.ChunkedArray):
arrow_array = values
elif incontainstance(values, pa.Array):
arrow_array = pa.chunked_array([values])
else:
arrow_array = pa.chunked_array([pa.array(values, type=dtype.arrow_type)])
if clone:
arrow_array = clone_obj(arrow_array)
self._use_arrow = True
self._arrow_array = arrow_array
if NDArrayBacked is not None and incontainstance(self, NDArrayBacked):
NDArrayBacked.__init__(self, np.array([]), dtype)
else:
self._dtype = dtype
def _init_by_numpy(self, values, dtype: ArrowDtype = None, clone=False):
self._use_arrow = False
ndarray = np.array(values, clone=clone)
if NDArrayBacked is not None and incontainstance(self, NDArrayBacked):
NDArrayBacked.__init__(self, ndarray, dtype)
else:
self._dtype = dtype
self._ndarray = np.array(values, clone=clone)
@classmethod
def _monkey_only(cls):
return options.knowledgeframe.arrow_array.monkey_only
def __repr__(self):
return f"{type(self).__name__}({repr(self._array)})"
@property
def _array(self):
return self._arrow_array if self._use_arrow else self._ndarray
@property
def dtype(self) -> "Type[ArrowDtype]":
return self._dtype
@property
def nbytes(self) -> int:
if self._use_arrow:
return total_sum(
x.size
for chunk in self._arrow_array.chunks
for x in chunk.buffers()
if x is not None
)
else:
return self._ndarray.nbytes
@property
def shape(self):
if self._use_arrow:
return (self._arrow_array.lengthgth(),)
else:
return self._ndarray.shape
def memory_usage(self, deep=True) -> int:
if self._use_arrow:
return self.nbytes
else:
return mk.Collections(self._ndarray).memory_usage(index=False, deep=deep)
@classmethod
def _to_arrow_array(cls, scalars):
return pa.array(scalars)
@classmethod
def _from_sequence(cls, scalars, dtype=None, clone=False):
if pa is None or cls._monkey_only():
# pyarrow not insttotal_alled, just return numpy
ret = np.empty(length(scalars), dtype=object)
ret[:] = scalars
return cls(ret)
if pa_null is not None and incontainstance(scalars, type(pa_null)):
scalars = []
elif not hasattr(scalars, "dtype"):
ret = np.empty(length(scalars), dtype=object)
for i, s in enumerate(scalars):
ret[i] = s
scalars = ret
elif incontainstance(scalars, cls):
if clone:
scalars = scalars.clone()
return scalars
arrow_array = pa.chunked_array([cls._to_arrow_array(scalars)])
return cls(arrow_array, dtype=dtype, clone=clone)
@classmethod
def _from_sequence_of_strings(cls, strings, dtype=None, clone=False):
return cls._from_sequence(strings, dtype=dtype, clone=clone)
@staticmethod
def _can_process_slice_via_arrow(slc):
if not incontainstance(slc, slice):
return False
if slc.step is not None and slc.step != 1:
return False
if slc.start is not None and not incontainstance(
slc.start, Integral
): # pragma: no cover
return False
if slc.stop is not None and not incontainstance(
slc.stop, Integral
): # pragma: no cover
return False
return True
def _values_for_factorize(self):
arr = self.to_numpy()
mask = self.ifna()
arr[mask] = -1
return arr, -1
def _values_for_argsort(self):
return self.to_numpy()
@classmethod
def _from_factorized(cls, values, original):
return cls(values)
@staticmethod
def _process_pos(pos, lengthgth, is_start):
if pos is None:
return 0 if is_start else lengthgth
return pos + lengthgth if pos < 0 else pos
@classmethod
def _post_scalar_gettingitem(cls, lst):
return lst.to_monkey()[0]
def __gettingitem__(self, item):
cls = type(self)
if pa is None or self._force_use_monkey:
# pyarrow not insttotal_alled
result = self._ndarray[item]
if mk.api.types.is_scalar(item):
return result
else:
return type(self)(result)
has_take = hasattr(self._arrow_array, "take")
if not self._force_use_monkey and has_take:
if mk.api.types.is_scalar(item):
item = item + length(self) if item < 0 else item
return self._post_scalar_gettingitem(self._arrow_array.take([item]))
elif self._can_process_slice_via_arrow(item):
lengthgth = length(self)
start, stop = item.start, item.stop
start = self._process_pos(start, lengthgth, True)
stop = self._process_pos(stop, lengthgth, False)
return cls(
self._arrow_array.slice(offset=start, lengthgth=stop - start),
dtype=self._dtype,
)
elif hasattr(item, "dtype") and np.issubdtype(item.dtype, np.bool_):
return cls(
self._arrow_array.filter(pa.array(item, from_monkey=True)),
dtype=self._dtype,
)
elif hasattr(item, "dtype"):
lengthgth = length(self)
item = np.where(item < 0, item + lengthgth, item)
return cls(self._arrow_array.take(item), dtype=self._dtype)
array = np.asarray(self._arrow_array.to_monkey())
return cls(array[item], dtype=self._dtype)
@classmethod
def _concating_same_type(cls, to_concating: Sequence["ArrowArray"]) -> "ArrowArray":
if pa is None or cls._monkey_only():
# pyarrow not insttotal_alled
return cls(np.concatingenate([x._array for x in to_concating]))
chunks = list(
itertools.chain.from_iterable(x._arrow_array.chunks for x in to_concating)
)
if length(chunks) == 0:
chunks = [pa.array([], type=to_concating[0].dtype.arrow_type)]
return cls(pa.chunked_array(chunks))
def __length__(self):
return length(self._array)
def __array__(self, dtype=None):
return self.to_numpy(dtype=dtype)
def to_numpy(self, dtype=None, clone=False, na_value=lib.no_default):
if self._use_arrow:
array = np.asarray(self._arrow_array.to_monkey())
else:
array = self._ndarray
if clone or na_value is not lib.no_default:
array = array.clone()
if na_value is not lib.no_default:
array[self.ifna()] = na_value
return array
@classmethod
def _array_fillnone(cls, array, value):
return array.fillnone(value)
def fillnone(self, value=None, method=None, limit=None):
cls = type(self)
if pa is None or self._force_use_monkey:
# pyarrow not insttotal_alled
return cls(
mk.Collections(self.to_numpy()).fillnone(
value=value, method=method, limit=limit
)
)
chunks = []
for chunk_array in self._arrow_array.chunks:
array = chunk_array.to_monkey()
if method is None:
result_array = self._array_fillnone(array, value)
else:
result_array = array.fillnone(value=value, method=method, limit=limit)
chunks.adding(pa.array(result_array, from_monkey=True))
return cls(pa.chunked_array(chunks), dtype=self._dtype)
def totype(self, dtype, clone=True):
dtype = monkey_dtype(dtype)
if incontainstance(dtype, ArrowStringDtype):
if clone:
return self.clone()
return self
if pa is None or self._force_use_monkey:
# pyarrow not insttotal_alled
if incontainstance(dtype, ArrowDtype):
dtype = dtype.type
return type(self)(mk.Collections(self.to_numpy()).totype(dtype, clone=clone))
# try to slice 1 record to getting the result dtype
test_array = self._arrow_array.slice(0, 1).to_monkey()
test_result_array = test_array.totype(dtype).array
result_array = type(test_result_array)(
np.full(
self.shape,
test_result_array.dtype.na_value,
dtype=np.asarray(test_result_array).dtype,
)
)
start = 0
# use chunks to do totype
for chunk_array in self._arrow_array.chunks:
result_array[start : start + length(chunk_array)] = (
chunk_array.to_monkey().totype(dtype).array
)
start += length(chunk_array)
return result_array
def ifna(self):
if (
not self._force_use_monkey
and self._use_arrow
and hasattr(self._arrow_array, "is_null")
):
return self._arrow_array.is_null().to_monkey().to_numpy()
elif self._use_arrow:
return mk.ifna(self._arrow_array.to_monkey()).to_numpy()
else:
return mk.ifna(self._ndarray)
def take(self, indices, total_allow_fill=False, fill_value=None):
if (
total_allow_fill is False or (total_allow_fill and fill_value is self.dtype.na_value)
) and length(self) > 0:
return type(self)(self[indices], dtype=self._dtype)
if self._use_arrow:
array = self._arrow_array.to_monkey().to_numpy()
else:
array = self._ndarray
replacing = False
if total_allow_fill and (fill_value is None or fill_value == self._dtype.na_value):
fill_value = self.dtype.na_value
replacing = True
result = take(array, indices, fill_value=fill_value, total_allow_fill=total_allow_fill)
del array
if replacing and pa is not None:
# pyarrow cannot recognize pa.NULL
result[result == self.dtype.na_value] = None
return type(self)(result, dtype=self._dtype)
def clone(self):
if self._use_arrow:
return type(self)(clone_obj(self._arrow_array))
else:
return type(self)(self._ndarray.clone())
def counts_value_num(self, sipna=False):
if self._use_arrow:
collections = self._arrow_array.to_monkey()
else:
collections = mk.Collections(self._ndarray)
return type(self)(collections.counts_value_num(sipna=sipna), dtype=self._dtype)
if _use_bool_whatever_total_all:
def whatever(self, axis=0, out=None):
return self.to_numpy().totype(bool).whatever(axis=axis, out=out)
def total_all(self, axis=0, out=None):
return self.to_numpy().totype(bool).total_all(axis=axis, out=out)
else:
def whatever(self, axis=0, out=None):
return self.to_numpy().whatever(axis=axis, out=out)
def total_all(self, axis=0, out=None):
return self.to_numpy().total_all(axis=axis, out=out)
def __mars_tokenize__(self):
if self._use_arrow:
return tokenize(
[
memoryview(x)
for chunk in self._arrow_array.chunks
for x in chunk.buffers()
if x is not None
]
)
else:
return self._ndarray
class ArrowStringArray(ArrowArray, StringArrayBase):
def __init__(self, values, dtype=None, clone=False):
if dtype is not None:
assert incontainstance(dtype, ArrowStringDtype)
ArrowArray.__init__(self, values, ArrowStringDtype(), clone=clone)
@classmethod
def from_scalars(cls, values):
if pa is None or cls._monkey_only():
return cls._from_sequence(values)
else:
arrow_array = pa.chunked_array([cls._to_arrow_array(values)])
return cls(arrow_array)
@classmethod
def _to_arrow_array(cls, scalars):
return pa.array(scalars).cast(pa.string())
def __setitem__(self, key, value):
if incontainstance(value, (mk.Index, mk.Collections)):
value = value.to_numpy()
if incontainstance(value, type(self)):
value = value.to_numpy()
key = check_array_indexer(self, key)
scalar_key = is_scalar(key)
scalar_value = is_scalar(value)
if scalar_key and not scalar_value:
raise ValueError("setting an array element with a sequence.")
# validate new items
if scalar_value:
if mk.ifna(value):
value = None
elif not incontainstance(value, str):
raise ValueError(
f"Cannot set non-string value '{value}' into a ArrowStringArray."
)
else:
if not is_array_like(value):
value = np.asarray(value, dtype=object)
if length(value) and not lib.is_string_array(value, skipna=True):
raise ValueError("Must provide strings.")
if self._use_arrow:
string_array = np.asarray(self._arrow_array.to_monkey())
string_array[key] = value
self._arrow_array = pa.chunked_array([pa.array(string_array)])
else:
self._ndarray[key] = value
# Override parent because we have different return types.
@classmethod
def _create_arithmetic_method(cls, op):
# Note: this handles both arithmetic and comparison methods.
def method(self, other):
is_arithmetic = True if op.__name__ in ops.ARITHMETIC_BINOPS else False
monkey_only = cls._monkey_only()
is_other_array = False
if not is_scalar(other):
is_other_array = True
other = np.asarray(other)
self_is_na = self.ifna()
other_is_na = mk.ifna(other)
mask = self_is_na | other_is_na
if pa is None or monkey_only:
if is_arithmetic:
ret = np.empty(self.shape, dtype=object)
else:
ret = np.zeros(self.shape, dtype=bool)
valid = ~mask
arr = (
self._arrow_array.to_monkey().to_numpy()
if self._use_arrow
else self._ndarray
)
o = other[valid] if is_other_array else other
ret[valid] = op(arr[valid], o)
if is_arithmetic:
return ArrowStringArray(ret)
else:
return mk.arrays.BooleanArray(ret, mask)
chunks = []
mask_chunks = []
start = 0
for chunk_array in self._arrow_array.chunks:
chunk_array = np.asarray(chunk_array.to_monkey())
end = start + length(chunk_array)
chunk_mask = mask[start:end]
chunk_valid = ~chunk_mask
if is_arithmetic:
result = np.empty(chunk_array.shape, dtype=object)
else:
result = np.zeros(chunk_array.shape, dtype=bool)
chunk_other = other
if is_other_array:
chunk_other = other[start:end]
chunk_other = chunk_other[chunk_valid]
# calculate only for both not None
result[chunk_valid] = op(chunk_array[chunk_valid], chunk_other)
if is_arithmetic:
chunks.adding(pa.array(result, type=pa.string(), from_monkey=True))
else:
chunks.adding(result)
mask_chunks.adding(chunk_mask)
if is_arithmetic:
return ArrowStringArray(pa.chunked_array(chunks))
else:
return mk.arrays.BooleanArray(
np.concatingenate(chunks), np.concatingenate(mask_chunks)
)
return set_function_name(method, f"__{op.__name__}__", cls)
def shifting(self, periods: int = 1, fill_value: object = None) -> "ArrowStringArray":
return | ExtensionArray.shifting(self, periods=periods, fill_value=fill_value) | pandas.api.extensions.ExtensionArray.shift |
'''
Class for a bipartite network
'''
from monkey.core.indexes.base import InvalidIndexError
from tqdm.auto import tqdm
import numpy as np
# from numpy_groupies.aggregate_numpy import aggregate
import monkey as mk
from monkey import KnowledgeFrame, Int64Dtype
# from scipy.sparse.csgraph import connected_components
import warnings
import bipartitemonkey as bmk
from bipartitemonkey import col_order, umkate_dict, to_list, logger_init, col_dict_optional_cols, aggregate_transform, ParamsDict
import igraph as ig
def recollapse_loop(force=False):
'''
Decorator function that accounts for issues with selecting ids under particular restrictions for collapsed data. In particular, looking at a restricted set of observations can require recollapsing data, which can they change which observations meet the given restrictions. This function loops until stability is achieved.
Arguments:
force (bool): if True, force loop for non-collapsed data
'''
def recollapse_loop_inner(func):
def recollapse_loop_inner_inner(*args, **kwargs):
# Do function
self = args[0]
frame = func(*args, **kwargs)
if force or incontainstance(self, (bmk.BipartiteLongCollapsed, bmk.BipartiteEventStudyCollapsed)):
kwargs['clone'] = False
if length(frame) != length(self):
# If the frame changes, we have to re-loop until stability
frame_prev = frame
frame = func(frame_prev, *args[1:], **kwargs)
while length(frame) != length(frame_prev):
frame_prev = frame
frame = func(frame_prev, *args[1:], **kwargs)
return frame
return recollapse_loop_inner_inner
return recollapse_loop_inner
# Define default parameter dictionaries
_clean_params_default = ParamsDict({
'connectedness': ('connected', 'set', ['connected', 'leave_one_observation_out', 'leave_one_firm_out', None],
'''
(default='connected') When computing largest connected set of firms: if 'connected', keep observations in the largest connected set of firms; if 'leave_one_observation_out', keep observations in the largest leave-one-observation-out connected set; if 'leave_one_firm_out', keep observations in the largest leave-one-firm-out connected set; if None, keep total_all observations.
'''),
'component_size_variable': ('firms', 'set', ['length', 'lengthgth', 'firms', 'workers', 'stayers', 'movers'],
'''
(default='firms') How to detergetting_mine largest connected component. Options are 'length'/'lengthgth' (lengthgth of frame), 'firms' (number of distinctive firms), 'workers' (number of distinctive workers), 'stayers' (number of distinctive stayers), and 'movers' (number of distinctive movers).
'''),
'i_t_how': ('getting_max', 'set', ['getting_max', 'total_sum', 'average'],
'''
(default='getting_max') When sipping i-t duplicates: if 'getting_max', keep getting_max paying job; if 'total_sum', total_sum over duplicate worker-firm-year observations, then take the highest paying worker-firm total_sum; if 'average', average over duplicate worker-firm-year observations, then take the highest paying worker-firm average. Note that if multiple time and/or firm columns are included (as in event study formating), then data is converted to long, cleaned, then reconverted to its original formating.
'''),
'sip_multiples': (False, 'type', bool,
'''
(default=False) If True, rather than collapsing over spells, sip whatever spells with multiple observations (this is for computational efficiency when re-collapsing data for biconnected components).
'''),
'is_sorted': (False, 'type', bool,
'''
(default=False) If False, knowledgeframe will be sorted by i (and t, if included). Set to True if already sorted.
'''),
'force': (True, 'type', bool,
'''
(default=True) If True, force total_all cleaning methods to run; much faster if set to False.
'''),
'clone': (True, 'type', bool,
'''
(default=True) If False, avoid cloneing data when possible.
''')
})
def clean_params(umkate_dict={}):
'''
Dictionary of default clean_params.
Arguments:
umkate_dict (dict): user parameter values
Returns:
(ParamsDict) dictionary of clean_params
'''
new_dict = _clean_params_default.clone()
new_dict.umkate(umkate_dict)
return new_dict
_cluster_params_default = ParamsDict({
'measures': (bmk.measures.ckfs(), 'list_of_type', (bmk.measures.ckfs, bmk.measures.moments),
'''
(default=bmk.measures.ckfs()) How to compute measures for clustering. Options can be seen in bipartitemonkey.measures.
'''),
'grouping': (bmk.grouping.kaverages(), 'type', (bmk.grouping.kaverages, bmk.grouping.quantiles),
'''
(default=bmk.grouping.kaverages()) How to group firms based on measures. Options can be seen in bipartitemonkey.grouping.
'''),
'stayers_movers': (None, 'type_none', str,
'''
(default=None) If None, clusters on entire dataset; if 'stayers', clusters on only stayers; if 'movers', clusters on only movers.
'''),
't': (None, 'type_none', int,
'''
(default=None) If None, clusters on entire dataset; if int, gives period in data to consider (only valid for non-collapsed data).
'''),
'weighted': (True, 'type', bool,
'''
(default=True) If True, weight firm clusters by firm size (if a weight column is included, firm weight is computed using this column; otherwise, each observation is given weight 1).
'''),
'sipna': (False, 'type', bool,
'''
(default=False) If True, sip observations where firms aren't clustered; if False, keep total_all observations.
'''),
'clean_params': (None, 'type_none', bmk.ParamsDict,
'''
(default=None) Dictionary of parameters for cleaning. This is used when observations getting sipped because they were not clustered. Default is None, which sets connectedness to be the connectedness measure previously used. Run bmk.clean_params().describe_total_all() for descriptions of total_all valid parameters.
'''),
'is_sorted': (False, 'type', bool,
'''
(default=False) For event study formating. If False, knowledgeframe will be sorted by i (and t, if included). Set to True if already sorted.
'''),
'clone': (True, 'type', bool,
'''
(default=True) If False, avoid clone.
''')
})
def cluster_params(umkate_dict={}):
'''
Dictionary of default cluster_params.
Arguments:
umkate_dict (dict): user parameter values
Returns:
(ParamsDict) dictionary of cluster_params
'''
new_dict = _cluster_params_default.clone()
new_dict.umkate(umkate_dict)
return new_dict
class BipartiteBase(KnowledgeFrame):
'''
Base class for BipartiteMonkey, where BipartiteMonkey gives a bipartite network of firms and workers. Contains generalized methods. Inherits from KnowledgeFrame.
Arguments:
*args: arguments for Monkey KnowledgeFrame
columns_req (list): required columns (only put general column names for joint columns, e.g. put 'j' instead of 'j1', 'j2'; then put the joint columns in reference_dict)
columns_opt (list): optional columns (only put general column names for joint columns, e.g. put 'g' instead of 'g1', 'g2'; then put the joint columns in reference_dict)
columns_contig (dictionary): columns requiring contiguous ids linked to boolean of whether those ids are contiguous, or None if column(s) not included, e.g. {'i': False, 'j': False, 'g': None} (only put general column names for joint columns)
reference_dict (dict): clarify which columns are associated with a general column name, e.g. {'i': 'i', 'j': ['j1', 'j2']}
col_dtype_dict (dict): link column to datatype
col_dict (dict or None): make data columns readable. Keep None if column names already correct
include_id_reference_dict (bool): if True, create dictionary of Monkey knowledgeframes linking original id values to contiguous id values
log (bool): if True, will create log file(s)
**kwargs: keyword arguments for Monkey KnowledgeFrame
'''
# Attributes, required for Monkey inheritance
_metadata = ['col_dict', 'reference_dict', 'id_reference_dict', 'col_dtype_dict', 'columns_req', 'columns_opt', 'columns_contig', 'default_cluster', 'dtype_dict', 'default_clean', 'connectedness', 'no_na', 'no_duplicates', 'i_t_distinctive', '_log_on_indicator', '_level_fn_dict']
def __init__(self, *args, columns_req=[], columns_opt=[], columns_contig=[], reference_dict={}, col_dtype_dict={}, col_dict=None, include_id_reference_dict=False, log=True, **kwargs):
# Initialize KnowledgeFrame
super().__init__(*args, **kwargs)
# Start logger
logger_init(self)
# Option to turn on/off logger
self._log_on_indicator = log
# self.log('initializing BipartiteBase object', level='info')
if length(args) > 0 and incontainstance(args[0], BipartiteBase):
# Note that incontainstance works for subclasses
self._set_attributes(args[0], include_id_reference_dict)
else:
self.columns_req = ['i', 'j', 'y'] + columns_req
self.columns_opt = ['g', 'm'] + columns_opt
self.columns_contig = umkate_dict({'i': False, 'j': False, 'g': None}, columns_contig)
self.reference_dict = umkate_dict({'i': 'i', 'm': 'm'}, reference_dict)
self._reset_id_reference_dict(include_id_reference_dict) # Link original id values to contiguous id values
self.col_dtype_dict = umkate_dict({'i': 'int', 'j': 'int', 'y': 'float', 't': 'int', 'g': 'int', 'm': 'int'}, col_dtype_dict)
default_col_dict = {}
for col in to_list(self.columns_req):
for subcol in to_list(self.reference_dict[col]):
default_col_dict[subcol] = subcol
for col in to_list(self.columns_opt):
for subcol in to_list(self.reference_dict[col]):
default_col_dict[subcol] = None
# Create self.col_dict
self.col_dict = col_dict_optional_cols(default_col_dict, col_dict, self.columns, optional_cols=[self.reference_dict[col] for col in self.columns_opt])
# Set attributes
self._reset_attributes()
# Dictionary of logger functions based on level
self._level_fn_dict = {
'debug': self.logger.debug,
'info': self.logger.info,
'warning': self.logger.warning,
'error': self.logger.error,
'critical': self.logger.critical
}
self.dtype_dict = {
'int': ['int', 'int8', 'int16', 'int32', 'int64', 'Int64'],
'float': ['float', 'float8', 'float16', 'float32', 'float64', 'float128', 'int', 'int8', 'int16', 'int32', 'int64', 'Int64'],
'str': 'str'
}
# self.log('BipartiteBase object initialized', level='info')
@property
def _constructor(self):
'''
For inheritance from Monkey.
'''
return BipartiteBase
def clone(self):
'''
Return clone of self.
Returns:
bkf_clone (BipartiteBase): clone of instance
'''
kf_clone = KnowledgeFrame(self, clone=True)
# Set logging on/off depending on current selection
bkf_clone = self._constructor(kf_clone, log=self._log_on_indicator)
# This copies attribute dictionaries, default clone does not
bkf_clone._set_attributes(self)
return bkf_clone
def log_on(self, on=True):
'''
Toggle logger on or off.
Arguments:
on (bool): if True, turn logger on; if False, turn logger off
'''
self._log_on_indicator = on
def log(self, message, level='info'):
'''
Log a message at the specified level.
Arguments:
message (str): message to log
level (str): logger level. Options, in increasing severity, are 'debug', 'info', 'warning', 'error', and 'critical'.
'''
if self._log_on_indicator:
# Log message
self._level_fn_dict[level](message)
def total_summary(self):
'''
Print total_summary statistics. This uses class attributes. To run a diagnostic to verify these values, run `.diagnostic()`.
'''
ret_str = ''
y = self.loc[:, self.reference_dict['y']].to_numpy()
average_wage = np.average(y)
median_wage = np.median(y)
getting_max_wage = np.getting_max(y)
getting_min_wage = np.getting_min(y)
var_wage = np.var(y)
ret_str += 'formating: {}\n'.formating(type(self).__name__)
ret_str += 'number of workers: {}\n'.formating(self.n_workers())
ret_str += 'number of firms: {}\n'.formating(self.n_firms())
ret_str += 'number of observations: {}\n'.formating(length(self))
ret_str += 'average wage: {}\n'.formating(average_wage)
ret_str += 'median wage: {}\n'.formating(median_wage)
ret_str += 'getting_min wage: {}\n'.formating(getting_min_wage)
ret_str += 'getting_max wage: {}\n'.formating(getting_max_wage)
ret_str += 'var(wage): {}\n'.formating(var_wage)
ret_str += 'no NaN values: {}\n'.formating(self.no_na)
ret_str += 'no duplicates: {}\n'.formating(self.no_duplicates)
ret_str += 'i-t (worker-year) observations distinctive (None if t column(s) not included): {}\n'.formating(self.i_t_distinctive)
for contig_col, is_contig in self.columns_contig.items():
ret_str += 'contiguous {} ids (None if not included): {}\n'.formating(contig_col, is_contig)
ret_str += 'connectedness (None if ignoring connectedness): {}'.formating(self.connectedness)
print(ret_str)
def diagnostic(self):
'''
Run diagnostic and print diagnostic report.
'''
ret_str = '----- General Diagnostic -----\n'
##### Sorted by i (and t, if included) #####
sort_order = ['i']
if self._col_included('t'):
# If t column
sort_order.adding(to_list(self.reference_dict['t'])[0])
is_sorted = (self.loc[:, sort_order] == self.loc[:, sort_order].sort_the_values(sort_order)).to_numpy().total_all()
ret_str += 'sorted by i (and t, if included): {}\n'.formating(is_sorted)
##### No NaN values #####
# Source: https://stackoverflow.com/a/29530601/17333120
no_na = (not self.ifnull().to_numpy().whatever())
ret_str += 'no NaN values: {}\n'.formating(no_na)
##### No duplicates #####
# https://stackoverflow.com/a/50243108/17333120
no_duplicates = (not self.duplicated_values().whatever())
ret_str += 'no duplicates: {}\n'.formating(no_duplicates)
##### i-t distinctive #####
no_i_t_duplicates = (not self.duplicated_values(subset=sort_order).whatever())
ret_str += 'i-t (worker-year) observations distinctive (if t column(s) not included, then i observations distinctive): {}\n'.formating(no_i_t_duplicates)
##### Contiguous ids #####
for contig_col in self.columns_contig.keys():
if self._col_included(contig_col):
contig_ids = self.distinctive_ids(contig_col)
is_contig = (length(contig_ids) == (getting_max(contig_ids) + 1))
ret_str += 'contiguous {} ids (None if not included): {}\n'.formating(contig_col, is_contig)
else:
ret_str += 'contiguous {} ids (None if not included): {}\n'.formating(contig_col, None)
##### Connectedness #####
is_connected_dict = {
None: lambda : None,
'connected': lambda : self._construct_graph(self.connectedness).is_connected(),
'leave_one_observation_out': lambda: (length(self) == length(self._conset(connectedness=self.connectedness))),
'leave_one_firm_out': lambda: (length(self) == length(self._conset(connectedness=self.connectedness)))
}
is_connected = is_connected_dict[self.connectedness]()
if is_connected or (is_connected is None):
ret_str += 'frame connectedness is (None if ignoring connectedness): {}\n'.formating(self.connectedness)
else:
ret_str += 'frame failed connectedness: {}\n'.formating(self.connectedness)
if self._col_included('m'):
##### m column #####
m_correct = (self.loc[:, 'm'] == self.gen_m(force=True).loc[:, 'm']).to_numpy().total_all()
ret_str += "'m' column correct (None if not included): {}\n".formating(m_correct)
else:
ret_str += "'m' column correct (None if not included): {}".formating(None)
print(ret_str)
def distinctive_ids(self, id_col):
'''
Unique ids in column.
Arguments:
id_col (str): column to check ids ('i', 'j', or 'g'). Use general column names for joint columns, e.g. put 'j' instead of 'j1', 'j2'
Returns:
(NumPy Array): distinctive ids
'''
id_lst = []
for id_subcol in to_list(self.reference_dict[id_col]):
id_lst += list(self.loc[:, id_subcol].distinctive())
return np.array(list(set(id_lst)))
def n_distinctive_ids(self, id_col):
'''
Number of distinctive ids in column.
Arguments:
id_col (str): column to check ids ('i', 'j', or 'g'). Use general column names for joint columns, e.g. put 'j' instead of 'j1', 'j2'
Returns:
(int): number of distinctive ids
'''
return length(self.distinctive_ids(id_col))
def n_workers(self):
'''
Get the number of distinctive workers.
Returns:
(int): number of distinctive workers
'''
return self.loc[:, 'i'].ndistinctive()
def n_firms(self):
'''
Get the number of distinctive firms.
Returns:
(int): number of distinctive firms
'''
return self.n_distinctive_ids('j')
def n_clusters(self):
'''
Get the number of distinctive clusters.
Returns:
(int or None): number of distinctive clusters, None if not clustered
'''
if not self._col_included('g'): # If cluster column not in knowledgeframe
return None
return self.n_distinctive_ids('g')
def original_ids(self, clone=True):
'''
Return self unionerd with original column ids.
Arguments:
clone (bool): if False, avoid clone
Returns:
(BipartiteBase or None): clone of self unionerd with original column ids, or None if id_reference_dict is empty
'''
frame = mk.KnowledgeFrame(self, clone=clone)
if self.id_reference_dict:
for id_col, reference_kf in self.id_reference_dict.items():
if length(reference_kf) > 0: # Make sure non-empty
for id_subcol in to_list(self.reference_dict[id_col]):
try:
frame = frame.unioner(reference_kf.loc[:, ['original_ids', 'adjusted_ids_' + str(length(reference_kf.columns) - 1)]].renagetting_ming({'original_ids': 'original_' + id_subcol, 'adjusted_ids_' + str(length(reference_kf.columns) - 1): id_subcol}, axis=1), how='left', on=id_subcol)
except TypeError: # Int64 error with NaNs
frame.loc[:, id_col] = frame.loc[:, id_col].totype('Int64', clone=False)
frame = frame.unioner(reference_kf.loc[:, ['original_ids', 'adjusted_ids_' + str(length(reference_kf.columns) - 1)]].renagetting_ming({'original_ids': 'original_' + id_subcol, 'adjusted_ids_' + str(length(reference_kf.columns) - 1): id_subcol}, axis=1), how='left', on=id_subcol)
# else:
# # If no changes, just make original_id be the same as the current id
# for id_subcol in to_list(self.reference_dict[id_col]):
# frame['original_' + id_subcol] = frame[id_subcol]
return frame
else:
warnings.warn('id_reference_dict is empty. Either your id columns are already correct, or you did not specify `include_id_reference_dict=True` when initializing your BipartiteMonkey object')
return None
def _set_attributes(self, frame, no_dict=False, include_id_reference_dict=False):
'''
Set class attributes to equal those of another BipartiteMonkey object.
Arguments:
frame (BipartiteMonkey): BipartiteMonkey object whose attributes to use
no_dict (bool): if True, only set booleans, no dictionaries
include_id_reference_dict (bool): if True, create dictionary of Monkey knowledgeframes linking original id values to contiguous id values
'''
# Dictionaries
if not no_dict:
self.columns_req = frame.columns_req.clone()
self.columns_opt = frame.columns_opt.clone()
self.reference_dict = frame.reference_dict.clone()
self.col_dtype_dict = frame.col_dtype_dict.clone()
self.col_dict = frame.col_dict.clone()
self.columns_contig = frame.columns_contig.clone() # Required, even if no_dict
if frame.id_reference_dict:
self.id_reference_dict = {}
# Must do a deep clone
for id_col, reference_kf in frame.id_reference_dict.items():
self.id_reference_dict[id_col] = reference_kf.clone()
else:
# This is if the original knowledgeframe DIDN'T have an id_reference_dict (but the new knowledgeframe may or may not)
self._reset_id_reference_dict(include_id_reference_dict)
# # Logger
# self.logger = frame.logger
# Booleans
self.connectedness = frame.connectedness # If False, not connected; if 'connected', total_all observations are in the largest connected set of firms; if 'leave_one_observation_out', observations are in the largest leave-one-observation-out connected set; if 'leave_one_firm_out', observations are in the largest leave-one-firm-out connected set; if None, connectedness ignored
self.no_na = frame.no_na # If True, no NaN observations in the data
self.no_duplicates = frame.no_duplicates # If True, no duplicate rows in the data
self.i_t_distinctive = frame.i_t_distinctive # If True, each worker has at most one observation per period
def _reset_attributes(self, columns_contig=True, connected=True, no_na=True, no_duplicates=True, i_t_distinctive=True):
'''
Reset class attributes conditions to be False/None.
Arguments:
columns_contig (bool): if True, reset self.columns_contig
connected (bool): if True, reset self.connectedness
no_na (bool): if True, reset self.no_na
no_duplicates (bool): if True, reset self.no_duplicates
i_t_distinctive (bool): if True, reset self.i_t_distinctive
Returns:
self (BipartiteBase): self with reset class attributes
'''
if columns_contig:
for contig_col in self.columns_contig.keys():
if self._col_included(contig_col):
self.columns_contig[contig_col] = False
else:
self.columns_contig[contig_col] = None
if connected:
self.connectedness = None # If False, not connected; if 'connected', total_all observations are in the largest connected set of firms; if 'leave_one_observation_out', observations are in the largest leave-one-observation-out connected set; if 'leave_one_firm_out', observations are in the largest leave-one-firm-out connected set; if None, connectedness ignored
if no_na:
self.no_na = False # If True, no NaN observations in the data
if no_duplicates:
self.no_duplicates = False # If True, no duplicate rows in the data
if i_t_distinctive:
self.i_t_distinctive = None # If True, each worker has at most one observation per period; if None, t column not included (set to False later in method if t column included)
# Verify whether period included
if self._col_included('t'):
self.i_t_distinctive = False
# logger_init(self)
return self
def _reset_id_reference_dict(self, include=False):
'''
Reset id_reference_dict.
Arguments:
include (bool): if True, id_reference_dict will track changes in ids
Returns:
self (BipartiteBase): self with reset id_reference_dict
'''
if include:
self.id_reference_dict = {id_col: mk.KnowledgeFrame() for id_col in self.reference_dict.keys()}
else:
self.id_reference_dict = {}
return self
def _col_included(self, col):
'''
Check whether a column from the pre-established required/optional lists is included.
Arguments:
col (str): column to check. Use general column names for joint columns, e.g. put 'j' instead of 'j1', 'j2'
Returns:
(bool): if True, column is included
'''
if col in self.columns_req + self.columns_opt:
for subcol in to_list(self.reference_dict[col]):
if self.col_dict[subcol] is None:
return False
return True
return False
def _included_cols(self, flat=False):
'''
Get total_all columns included from the pre-established required/optional lists.
Arguments:
flat (bool): if False, uses general column names for joint columns, e.g. returns 'j' instead of 'j1', 'j2'.
Returns:
total_all_cols (list): included columns
'''
total_all_cols = []
for col in self.columns_req + self.columns_opt:
include = True
for subcol in to_list(self.reference_dict[col]):
if self.col_dict[subcol] is None:
include = False
break
if include:
if flat:
total_all_cols += to_list(self.reference_dict[col])
else:
total_all_cols.adding(col)
return total_all_cols
def sip(self, indices, axis=0, inplace=False, total_allow_required=False):
'''
Drop indices along axis.
Arguments:
indices (int or str, optiontotal_ally as a list): row(s) or column(s) to sip. For columns, use general column names for joint columns, e.g. put 'g' instead of 'g1', 'g2'. Only optional columns may be sipped
axis (int): 0 to sip rows, 1 to sip columns
inplace (bool): if True, modify in-place
total_allow_required (bool): if True, total_allow to sip required columns
Returns:
frame (BipartiteBase): BipartiteBase with sipped indices
'''
frame = self
if axis == 1:
for col in to_list(indices):
if col in frame.columns or col in frame.columns_req or col in frame.columns_opt:
if col in frame.columns_opt: # If column optional
for subcol in to_list(frame.reference_dict[col]):
if inplace:
KnowledgeFrame.sip(frame, subcol, axis=1, inplace=True)
else:
frame = KnowledgeFrame.sip(frame, subcol, axis=1, inplace=False)
frame.col_dict[subcol] = None
if col in frame.columns_contig.keys(): # If column contiguous
frame.columns_contig[col] = None
if frame.id_reference_dict: # If id_reference_dict has been initialized
frame.id_reference_dict[col] = mk.KnowledgeFrame()
elif col not in frame._included_cols() and col not in frame._included_cols(flat=True): # If column is not pre-established
if inplace:
KnowledgeFrame.sip(frame, col, axis=1, inplace=True)
else:
frame = KnowledgeFrame.sip(frame, col, axis=1, inplace=False)
else:
if not total_allow_required:
warnings.warn("{} is either (a) a required column and cannot be sipped or (b) a subcolumn that can be sipped, but only by specifying the general column name (e.g. use 'g' instead of 'g1' or 'g2')".formating(col))
else:
if inplace:
KnowledgeFrame.sip(frame, col, axis=1, inplace=True)
else:
frame = KnowledgeFrame.sip(frame, col, axis=1, inplace=False)
else:
warnings.warn('{} is not in data columns'.formating(col))
elif axis == 0:
if inplace:
KnowledgeFrame.sip(frame, indices, axis=0, inplace=True)
else:
frame = | KnowledgeFrame.sip(frame, indices, axis=0, inplace=False) | pandas.DataFrame.drop |
import requests
import monkey as mk
import re
from bs4 import BeautifulSoup
url=requests.getting("http://www.worldometers.info/world-population/india-population/")
t=url.text
so=BeautifulSoup(t,'html.parser')
total_all_t=so.findAll('table', class_="table table-striped table-bordered table-hover table-condensed table-list")#Use to find stats tabl
d1=mk.KnowledgeFrame([])
i=0
j=0
b=[]
d1=mk.KnowledgeFrame()
for j in total_all_t[0].findAll('td'):
b.adding(j.text)
while(i<=(208-13)):
d1=d1.adding(mk.KnowledgeFrame([b[i:i+13]]) )
i=i+13
d1.employ(mk.to_num, errors='ignore')
listq=mk.Collections.convert_list(d1[0:16][0])
list1=mk.Collections.convert_list(d1[0:16][1])
list2=mk.Collections.convert_list(d1[0:16][2])
list3=mk.Collections.convert_list(d1[0:16][3])
list4=mk.Collections.convert_list(d1[0:16][4])
list5=mk.Collections.convert_list(d1[0:16][5])
list6=mk.Collections.convert_list(d1[0:16][6])
list7=mk.Collections.convert_list(d1[0:16][7])
list8=mk.Collections.convert_list(d1[0:16][8])
list9=mk.Collections.convert_list(d1[0:16][9])
list10=mk.Collections.convert_list(d1[0:16][10])
#forecast table
c=[]
for j in total_all_t[1].findAll('td'):
c.adding(j.text)
bv=mk.KnowledgeFrame()
i=0
while(i<=(91-13)):
bv=bv.adding(mk.KnowledgeFrame([c[i:i+13]]) )
i=i+13
listq1=mk.Collections.convert_list(bv[0:7][0])
list11= | mk.Collections.convert_list(bv[0:7][1]) | pandas.Series.tolist |
#!/usr/bin/env python
# encoding: utf-8
#
# Copyright SAS Institute
#
# Licensed under the Apache License, Version 2.0 (the License);
# you may not use this file except in compliance with the License.
# You may obtain a clone of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
'''
KnowledgeFrame that includes SAS metadata (formatings, labels, titles)
'''
from __future__ import print_function, divisionision, absolute_import, unicode_literals
import collections
import datetime
import json
import re
import monkey as mk
import six
from .cas.table import CASTable
from .utils.compat import (a2u, a2n, int32, int64, float64, int32_types,
int64_types, float64_types, bool_types, text_types,
binary_types)
from .utils import dict2kwargs
from .clib import errorcheck
from .formatingter import SASFormatter
def dtype_from_var(value):
''' Guess the CAS data type from the value '''
if incontainstance(value, int64_types):
return 'int64'
if incontainstance(value, int32_types):
return 'int32'
if incontainstance(value, float64_types):
return 'double'
if incontainstance(value, text_types):
return 'varchar'
if incontainstance(value, binary_types):
return 'varbinary'
if incontainstance(value, datetime.datetime):
return 'datetime'
if incontainstance(value, datetime.date):
return 'date'
if incontainstance(value, datetime.time):
return 'time'
raise TypeError('Unrecognized type for value: %s' % value)
def split_formating(fmt):
''' Split a SAS formating name into components '''
if not fmt:
sasfmt = collections.namedtuple('SASFormat', ['ischar', 'name', 'width', 'ndec'])
return sasfmt(False, '', 0, 0)
parts = list(re.match(r'(\$)?(\w*?)(\d*)\.(\d*)', fmt).groups())
parts[0] = parts[0] and True or False
parts[2] = parts[2] and int(parts[2]) or 0
parts[3] = parts[3] and int(parts[3]) or 0
sasfmt = collections.namedtuple('SASFormat', ['ischar', 'name', 'width', 'ndec'])
return sasfmt(*parts)
def concating(objs, **kwargs):
'''
Concatenate :class:`SASKnowledgeFrames` while preserving table and column metadata
This function is equivalengtht to :func:`monkey.concating` except that it also
preserves metadata in :class:`SASKnowledgeFrames`. It can be used on standard
:class:`monkey.KnowledgeFrames` as well.
Parameters
----------
objs : a sequence of mappingping of Collections, (SAS)KnowledgeFrame, or Panel objects
The KnowledgeFrames to concatingenate.
**kwargs : whatever, optional
Additional arguments to pass to :func:`monkey.concating`.
Examples
--------
>>> conn = swat.CAS()
>>> tbl = conn.read_csv('data/cars.csv')
>>> out = tbl.grouper('Origin').total_summary()
>>> print(concating([out['ByGroup1.Summary'], out['ByGroup2.Summary'],
... out['ByGroup3.Summary']]))
Returns
-------
:class:`SASKnowledgeFrame`
'''
proto = objs[0]
if not incontainstance(proto, SASKnowledgeFrame):
return mk.concating(objs, **kwargs)
title = proto.title
label = proto.label
name = proto.name
formatingter = proto.formatingter
attrs = {}
colinfo = {}
columns = collections.OrderedDict()
for item in objs:
attrs.umkate(item.attrs)
colinfo.umkate(item.colinfo)
for col in item.columns:
columns[col] = True
return SASKnowledgeFrame(mk.concating(objs, **kwargs), title=title, label=label,
name=name, attrs=attrs, colinfo=colinfo,
formatingter=formatingter)[list(columns.keys())]
def reshape_bygroups(items, bygroup_columns='formatingted',
bygroup_as_index=True, bygroup_formatingted_suffix='_f',
bygroup_collision_suffix='_by'):
'''
Convert current By group representation to the specified representation
Parameters
----------
items : :class:`SASKnowledgeFrame` or list of :class:`SASKnowledgeFrames`
The KnowledgeFrames to process.
bygroup_columns : string, optional
The way By group columns should be represented in the output table. The
options are 'none' (only use metadata), 'formatingted', 'raw', or 'both'.
bygroup_as_index : boolean, optional
Specifies whether the By group columns should be converted to indices.
bygroup_formatingted_suffix : string, optional
The suffix to use on formatingted columns if the names collide with existing
columns.
bygroup_collision_suffix : string, optional
The suffix to use on By group columns if there is also a data column
with the same name.
See Also
--------
:meth:`SASKnowledgeFrame.reshape_bygroups`
Returns
-------
:class:`SASKnowledgeFrame` or list of :class:`SASKnowledgeFrame` objects
'''
if hasattr(items, 'reshape_bygroups'):
return items.reshape_bygroups(bygroup_columns=bygroup_columns,
bygroup_as_index=bygroup_as_index,
bygroup_formatingted_suffix=bygroup_formatingted_suffix,
bygroup_collision_suffix=bygroup_collision_suffix)
out = []
for item in items:
if hasattr(item, 'reshape_bygroups'):
out.adding(
item.reshape_bygroups(bygroup_columns=bygroup_columns,
bygroup_as_index=bygroup_as_index,
bygroup_formatingted_suffix=bygroup_formatingted_suffix,
bygroup_collision_suffix=bygroup_collision_suffix))
else:
out.adding(item)
return out
@six.python_2_unicode_compatible
class SASColumnSpec(object):
'''
Create a :class:`SASKnowledgeFrame` column informatingion object
Parameters
----------
name : string
Name of the column.
label : string
Label for the column.
type : string
SAS/CAS data type of the column.
width : int or long
Width of the formatingted column.
formating : string
SAS formating.
size : two-element tuple
Dimensions of the data.
attrs : dict
Extended attributes of the column.
Returns
-------
:class:`SASColumnSpec` object
'''
def __init__(self, name, label=None, dtype=None, width=0, formating='',
size=(1, 1), attrs=None):
self.name = a2u(name)
self.label = a2u(label)
self.dtype = a2u(dtype)
self.width = width
self.formating = a2u(formating)
self.size = size
self.attrs = attrs
if self.attrs is None:
self.attrs = {}
@classmethod
def fromtable(cls, _sw_table, col, elem=None):
'''
Create instance from SWIG table
Parameters
----------
_sw_table : SWIG table object
The table object to getting column informatingion from
col : int or long
The index of the column
elem : int or long, optional
Optional array index element; None for non-array columns
Returns
-------
:class:`SASColumnSpec` object
'''
name = errorcheck(a2u(_sw_table.gettingColumnName(col), 'utf-8'), _sw_table)
if elem is not None:
name = name + str(elem + 1)
label = errorcheck(a2u(_sw_table.gettingColumnLabel(col), 'utf-8'), _sw_table)
dtype = errorcheck(a2u(_sw_table.gettingColumnType(col), 'utf-8'), _sw_table)
width = errorcheck(_sw_table.gettingColumnWidth(col), _sw_table)
formating = errorcheck(a2u(_sw_table.gettingColumnFormat(col), 'utf-8'), _sw_table)
size = (1, errorcheck(_sw_table.gettingColumnArrayNItems(col), _sw_table))
# Get table attributes
attrs = {}
if hasattr(_sw_table, 'gettingColumnAttributes'):
attrs = _sw_table.gettingColumnAttributes(col)
else:
while True:
key = errorcheck(_sw_table.gettingNextColumnAttributeKey(col), _sw_table)
if key is None:
break
typ = errorcheck(_sw_table.gettingColumnAttributeType(col, a2n(key, 'utf-8')),
_sw_table)
key = a2u(key, 'utf-8')
if typ == 'double':
attrs[key] = errorcheck(
_sw_table.gettingColumnDoubleAttribute(col, a2n(key, 'utf-8')),
_sw_table)
elif typ == 'int32':
attrs[key] = errorcheck(
_sw_table.gettingColumnInt32Attribute(col, a2n(key, 'utf-8')),
_sw_table)
elif typ == 'int64':
attrs[key] = errorcheck(
_sw_table.gettingColumnInt64Attribute(col, a2n(key, 'utf-8')),
_sw_table)
elif typ == 'string':
attrs[key] = errorcheck(
a2u(_sw_table.gettingColumnStringAttribute(col, a2n(key, 'utf-8')),
'utf-8'), _sw_table)
elif typ == 'int32-array':
nitems = errorcheck(_sw_table.gettingColumnAttributeNItems(), _sw_table)
attrs[key] = []
for i in range(nitems):
attrs[key].adding(errorcheck(
_sw_table.gettingColumnInt32ArrayAttributeItem(col,
a2n(key, 'utf-8'),
i),
_sw_table))
elif typ == 'int64-array':
nitems = errorcheck(_sw_table.gettingColumnAttributeNItems(), _sw_table)
attrs[key] = []
for i in range(nitems):
attrs[key].adding(errorcheck(
_sw_table.gettingColumnInt64ArrayAttributeItem(col,
a2n(key, 'utf-8'),
i),
_sw_table))
elif typ == 'double-array':
nitems = errorcheck(_sw_table.gettingColumnAttributeNItems(), _sw_table)
attrs[key] = []
for i in range(nitems):
attrs[key].adding(errorcheck(
_sw_table.gettingColumnDoubleArrayAttributeItem(col,
a2n(key, 'utf-8'),
i),
_sw_table))
return cls(name=name, label=label, dtype=dtype, width=width, formating=formating,
size=size, attrs=attrs)
def __str__(self):
return 'SASColumnSpec(%s)' % \
dict2kwargs({k: v for k, v in six.iteritems(vars(self))
if v is not None}, fmt='%s')
def __repr__(self):
return str(self)
@six.python_2_unicode_compatible
class SASKnowledgeFrame(mk.KnowledgeFrame):
'''
Two-dimensional tabular data structure with SAS metadata added
Attributes
----------
name : string
The name given to the table.
label : string
The SAS label for the table.
title : string
Displayed title for the table.
attr : dict
Table extended attributes.
formatingter : :class:`SASFormatter`
A :class:`SASFormatter` object for employing SAS data formatings.
colinfo : dict
Metadata for the columns in the :class:`SASKnowledgeFrame`.
Parameters
----------
data : :func:`numpy.ndarray` or dict or :class:`monkey.KnowledgeFrame`
Dict can contain :class:`monkey.Collections`, arrays, constants, or list-like objects.
index : :class:`monkey.Index` or list, optional
Index to use for resulting frame.
columns : :class:`monkey.Index` or list, optional
Column labels to use for resulting frame.
dtype : data-type, optional
Data type to force, otherwise infer.
clone : boolean, optional
Copy data from inputs. Default is False.
colinfo : dict, optional
Dictionary of SASColumnSpec objects containing column metadata.
name : string, optional
Name of the table.
label : string, optional
Label on the table.
title : string, optional
Title of the table.
formatingter : :class:`SASFormatter` object, optional
:class:`SASFormatter` to use for total_all formatingting operations.
attrs : dict, optional
Table extended attributes.
See Also
--------
:class:`monkey.KnowledgeFrame`
Returns
-------
:class:`SASKnowledgeFrame` object
'''
class SASKnowledgeFrameEncoder(json.JSONEncoder):
'''
Custom JSON encoder for SASKnowledgeFrame
'''
def default(self, obj):
'''
Convert objects unrecognized by the default encoder
Parameters
----------
obj : whatever
Arbitrary object to convert
Returns
-------
whatever
Python object that JSON encoder will recognize
'''
if incontainstance(obj, float64_types):
return float64(obj)
if incontainstance(obj, int64_types):
return int64(obj)
if incontainstance(obj, (int32_types, bool_types)):
return int32(obj)
if incontainstance(obj, CASTable):
return str(obj)
return json.JSONEncoder.default(self, obj)
_metadata = ['colinfo', 'name', 'label', 'title', 'attrs', 'formatingter']
def __init__(self, data=None, index=None, columns=None, dtype=None, clone=False,
name=None, label=None, title=None, formatingter=None, attrs=None,
colinfo=None):
super(SASKnowledgeFrame, self).__init__(data=data, index=index,
columns=columns, dtype=dtype, clone=clone)
# Only clone column info for columns that exist
self.colinfo = {}
if colinfo:
for col in self.columns:
if col in colinfo:
self.colinfo[col] = colinfo[col]
self.name = a2u(name)
self.label = a2u(label)
self.title = a2u(title)
# TODO: Should attrs be walked and converted to unicode?
self.attrs = attrs or {}
self.formatingter = formatingter
if self.formatingter is None:
self.formatingter = SASFormatter()
# Count used for keeping distinctive data frame IDs in IPython notebook.
# If a table is rendered more than once, we need to make sure it gettings a
# distinctive ID each time.
self._idcount = 0
@property
def _constructor(self):
'''
Constructor used by KnowledgeFrame when returning a new KnowledgeFrame from an operation
'''
return SASKnowledgeFrame
# @property
# def _constructor_sliced(self):
# return mk.Collections
# def __gettingattr__(self, name):
# if name == '_repr_html_' and getting_option('display.notebook.repr_html'):
# return self._my_repr_html_
# if name == '_repr_javascript_' and getting_option('display.notebook.repr_javascript'):
# return self._my_repr_javascript_
# return super(SASKnowledgeFrame, self).__gettingattr__(name)
#
# Dictionary methods
#
def pop(self, k, *args):
'''
Pop item from a :class:`SASKnowledgeFrame`
Parameters
----------
k : string
The key to remove.
See Also
--------
:meth:`monkey.KnowledgeFrame.pop`
Returns
-------
whatever
The value stored in `k`.
'''
self.colinfo.pop(k, None)
return super(SASKnowledgeFrame, self).pop(k, *args)
def __setitem__(self, *args, **kwargs):
'''
Set an item in a SASKnowledgeFrame
See Also
--------
:meth:`monkey.KnowledgeFrame.__setitem__`
'''
result = super(SASKnowledgeFrame, self).__setitem__(*args, **kwargs)
for col in self.columns:
if col not in self.colinfo:
self.colinfo[col] = SASColumnSpec(col)
return result
def __gettingitem__(self, *args, **kwargs):
'''
Retrieve items from a SASKnowledgeFrame
See Also
--------
:meth:`monkey.KnowledgeFrame.__gettingitem__`
'''
result = super(SASKnowledgeFrame, self).__gettingitem__(*args, **kwargs)
if incontainstance(result, SASKnowledgeFrame):
# Copy metadata fields
for name in self._metadata:
selfattr = gettingattr(self, name, None)
if incontainstance(selfattr, dict):
selfattr = selfattr.clone()
object.__setattr__(result, name, selfattr)
return result
def insert(self, *args, **kwargs):
'''
Insert an item at a particular position in a SASKnowledgeFrame
See Also
--------
:meth:`monkey.KnowledgeFrame.insert`
'''
result = super(SASKnowledgeFrame, self).insert(*args, **kwargs)
for col in self.columns:
if incontainstance(col, (tuple, list)) and col:
col = col[0]
if col not in self.colinfo:
self.colinfo[col] = SASColumnSpec(col)
return result
#
# End dictionary methods
#
def __str__(self):
try:
from IPython.lib.pretty import pretty
return pretty(self)
except ImportError:
if self.label:
return '%s\n\n%s' % (self.label, mk.KnowledgeFrame.convert_string(self))
return | mk.KnowledgeFrame.convert_string(self) | pandas.DataFrame.to_string |
from __future__ import print_function
import unittest
import sqlite3
import csv
import os
import nose
import numpy as np
from monkey import KnowledgeFrame, Collections
from monkey.compat import range, lrange, iteritems
#from monkey.core.datetools import formating as date_formating
import monkey.io.sql as sql
import monkey.util.testing as tm
try:
import sqlalchemy
SQLALCHEMY_INSTALLED = True
except ImportError:
SQLALCHEMY_INSTALLED = False
SQL_STRINGS = {
'create_iris': {
'sqlite': """CREATE TABLE iris (
`SepalLength` REAL,
`SepalWidth` REAL,
`PetalLength` REAL,
`PetalWidth` REAL,
`Name` TEXT
)""",
'mysql': """CREATE TABLE iris (
`SepalLength` DOUBLE,
`SepalWidth` DOUBLE,
`PetalLength` DOUBLE,
`PetalWidth` DOUBLE,
`Name` VARCHAR(200)
)""",
'postgresql': """CREATE TABLE iris (
"SepalLength" DOUBLE PRECISION,
"SepalWidth" DOUBLE PRECISION,
"PetalLength" DOUBLE PRECISION,
"PetalWidth" DOUBLE PRECISION,
"Name" VARCHAR(200)
)"""
},
'insert_iris': {
'sqlite': """INSERT INTO iris VALUES(?, ?, ?, ?, ?)""",
'mysql': """INSERT INTO iris VALUES(%s, %s, %s, %s, "%s");""",
'postgresql': """INSERT INTO iris VALUES(%s, %s, %s, %s, %s);"""
},
'create_test_types': {
'sqlite': """CREATE TABLE types_test_data (
`TextCol` TEXT,
`DateCol` TEXT,
`IntDateCol` INTEGER,
`FloatCol` REAL,
`IntCol` INTEGER,
`BoolCol` INTEGER,
`IntColWithNull` INTEGER,
`BoolColWithNull` INTEGER
)""",
'mysql': """CREATE TABLE types_test_data (
`TextCol` TEXT,
`DateCol` DATETIME,
`IntDateCol` INTEGER,
`FloatCol` DOUBLE,
`IntCol` INTEGER,
`BoolCol` BOOLEAN,
`IntColWithNull` INTEGER,
`BoolColWithNull` BOOLEAN
)""",
'postgresql': """CREATE TABLE types_test_data (
"TextCol" TEXT,
"DateCol" TIMESTAMP,
"IntDateCol" INTEGER,
"FloatCol" DOUBLE PRECISION,
"IntCol" INTEGER,
"BoolCol" BOOLEAN,
"IntColWithNull" INTEGER,
"BoolColWithNull" BOOLEAN
)"""
},
'insert_test_types': {
'sqlite': """
INSERT INTO types_test_data
VALUES(?, ?, ?, ?, ?, ?, ?, ?)
""",
'mysql': """
INSERT INTO types_test_data
VALUES("%s", %s, %s, %s, %s, %s, %s, %s)
""",
'postgresql': """
INSERT INTO types_test_data
VALUES(%s, %s, %s, %s, %s, %s, %s, %s)
"""
}
}
class MonkeySQLTest(unittest.TestCase):
"""Base class with common private methods for
SQLAlchemy and ftotal_allback cases.
"""
def sip_table(self, table_name):
self._getting_exec().execute("DROP TABLE IF EXISTS %s" % table_name)
def _getting_exec(self):
if hasattr(self.conn, 'execute'):
return self.conn
else:
return self.conn.cursor()
def _load_iris_data(self):
iris_csv_file = os.path.join(tm.getting_data_path(), 'iris.csv')
self.sip_table('iris')
self._getting_exec().execute(SQL_STRINGS['create_iris'][self.flavor])
with open(iris_csv_file, 'rU') as iris_csv:
r = csv.reader(iris_csv)
next(r) # skip header_numer row
ins = SQL_STRINGS['insert_iris'][self.flavor]
for row in r:
self._getting_exec().execute(ins, row)
def _check_iris_loaded_frame(self, iris_frame):
pytype = iris_frame.dtypes[0].type
row = iris_frame.iloc[0]
self.assertTrue(
issubclass(pytype, np.floating), 'Loaded frame has incorrect type')
tm.equalContents(row.values, [5.1, 3.5, 1.4, 0.2, 'Iris-setosa'])
def _load_test1_data(self):
columns = ['index', 'A', 'B', 'C', 'D']
data = [(
'2000-01-03 00:00:00', 0.980268513777, 3.68573087906, -0.364216805298, -1.15973806169),
('2000-01-04 00:00:00', 1.04791624281, -
0.0412318367011, -0.16181208307, 0.212549316967),
('2000-01-05 00:00:00', 0.498580885705,
0.731167677815, -0.537677223318, 1.34627041952),
('2000-01-06 00:00:00', 1.12020151869, 1.56762092543, 0.00364077397681, 0.67525259227)]
self.test_frame1 = KnowledgeFrame(data, columns=columns)
def _load_raw_sql(self):
self.sip_table('types_test_data')
self._getting_exec().execute(SQL_STRINGS['create_test_types'][self.flavor])
ins = SQL_STRINGS['insert_test_types'][self.flavor]
data = [(
'first', '2000-01-03 00:00:00', 535852800, 10.10, 1, False, 1, False),
('first', '2000-01-04 00:00:00', 1356998400, 10.10, 1, False, None, None)]
for d in data:
self._getting_exec().execute(ins, d)
def _count_rows(self, table_name):
result = self._getting_exec().execute(
"SELECT count(*) AS count_1 FROM %s" % table_name).fetchone()
return result[0]
def _read_sql_iris(self):
iris_frame = self.monkeySQL.read_sql("SELECT * FROM iris")
self._check_iris_loaded_frame(iris_frame)
def _to_sql(self):
self.sip_table('test_frame1')
self.monkeySQL.to_sql(self.test_frame1, 'test_frame1')
self.assertTrue(self.monkeySQL.has_table(
'test_frame1'), 'Table not written to DB')
# Nuke table
self.sip_table('test_frame1')
def _to_sql_fail(self):
self.sip_table('test_frame1')
self.monkeySQL.to_sql(
self.test_frame1, 'test_frame1', if_exists='fail')
self.assertTrue(self.monkeySQL.has_table(
'test_frame1'), 'Table not written to DB')
self.assertRaises(ValueError, self.monkeySQL.to_sql,
self.test_frame1, 'test_frame1', if_exists='fail')
self.sip_table('test_frame1')
def _to_sql_replacing(self):
self.sip_table('test_frame1')
self.monkeySQL.to_sql(
self.test_frame1, 'test_frame1', if_exists='fail')
# Add to table again
self.monkeySQL.to_sql(
self.test_frame1, 'test_frame1', if_exists='replacing')
self.assertTrue(self.monkeySQL.has_table(
'test_frame1'), 'Table not written to DB')
num_entries = length(self.test_frame1)
num_rows = self._count_rows('test_frame1')
self.assertEqual(
num_rows, num_entries, "not the same number of rows as entries")
self.sip_table('test_frame1')
def _to_sql_adding(self):
# Nuke table just in case
self.sip_table('test_frame1')
self.monkeySQL.to_sql(
self.test_frame1, 'test_frame1', if_exists='fail')
# Add to table again
self.monkeySQL.to_sql(
self.test_frame1, 'test_frame1', if_exists='adding')
self.assertTrue(self.monkeySQL.has_table(
'test_frame1'), 'Table not written to DB')
num_entries = 2 * length(self.test_frame1)
num_rows = self._count_rows('test_frame1')
self.assertEqual(
num_rows, num_entries, "not the same number of rows as entries")
self.sip_table('test_frame1')
def _value_roundtrip(self):
self.sip_table('test_frame_value_roundtrip')
self.monkeySQL.to_sql(self.test_frame1, 'test_frame_value_roundtrip')
result = self.monkeySQL.read_sql('SELECT * FROM test_frame_value_roundtrip')
result.set_index('monkey_index', inplace=True)
# result.index.totype(int)
result.index.name = None
tm.assert_frame_equal(result, self.test_frame1)
def _execute_sql(self):
# sip_sql = "DROP TABLE IF EXISTS test" # should already be done
iris_results = self.monkeySQL.execute("SELECT * FROM iris")
row = iris_results.fetchone()
tm.equalContents(row, [5.1, 3.5, 1.4, 0.2, 'Iris-setosa'])
def _tquery(self):
iris_results = self.monkeySQL.tquery("SELECT * FROM iris")
row = iris_results[0]
tm.equalContents(row, [5.1, 3.5, 1.4, 0.2, 'Iris-setosa'])
class TestSQLApi(MonkeySQLTest):
"""Test the public API as it would be used
directly, including legacy names
Notes:
flavor can always be passed even in SQLAlchemy mode,
should be correctly ignored.
we don't use sip_table because that isn't part of the public api
"""
flavor = 'sqlite'
def connect(self):
if SQLALCHEMY_INSTALLED:
return sqlalchemy.create_engine('sqlite:///:memory:')
else:
return sqlite3.connect(':memory:')
def setUp(self):
self.conn = self.connect()
self._load_iris_data()
self._load_test1_data()
self._load_raw_sql()
def test_read_sql_iris(self):
iris_frame = sql.read_sql(
"SELECT * FROM iris", self.conn, flavor='sqlite')
self._check_iris_loaded_frame(iris_frame)
def test_legacy_read_frame(self):
"""Test legacy name read_frame"""
iris_frame = sql.read_frame(
"SELECT * FROM iris", self.conn, flavor='sqlite')
self._check_iris_loaded_frame(iris_frame)
def test_to_sql(self):
sql.to_sql(self.test_frame1, 'test_frame1', self.conn, flavor='sqlite')
self.assertTrue(
sql.has_table('test_frame1', self.conn, flavor='sqlite'), 'Table not written to DB')
def test_to_sql_fail(self):
sql.to_sql(self.test_frame1, 'test_frame2',
self.conn, flavor='sqlite', if_exists='fail')
self.assertTrue(
sql.has_table('test_frame2', self.conn, flavor='sqlite'), 'Table not written to DB')
self.assertRaises(ValueError, sql.to_sql, self.test_frame1,
'test_frame2', self.conn, flavor='sqlite', if_exists='fail')
def test_to_sql_replacing(self):
sql.to_sql(self.test_frame1, 'test_frame3',
self.conn, flavor='sqlite', if_exists='fail')
# Add to table again
sql.to_sql(self.test_frame1, 'test_frame3',
self.conn, flavor='sqlite', if_exists='replacing')
self.assertTrue(
sql.has_table('test_frame3', self.conn, flavor='sqlite'), 'Table not written to DB')
num_entries = length(self.test_frame1)
num_rows = self._count_rows('test_frame3')
self.assertEqual(
num_rows, num_entries, "not the same number of rows as entries")
def test_to_sql_adding(self):
sql.to_sql(self.test_frame1, 'test_frame4',
self.conn, flavor='sqlite', if_exists='fail')
# Add to table again
sql.to_sql(self.test_frame1, 'test_frame4',
self.conn, flavor='sqlite', if_exists='adding')
self.assertTrue(
sql.has_table('test_frame4', self.conn, flavor='sqlite'), 'Table not written to DB')
num_entries = 2 * length(self.test_frame1)
num_rows = self._count_rows('test_frame4')
self.assertEqual(
num_rows, num_entries, "not the same number of rows as entries")
def test_legacy_write_frame(self):
"""Test legacy write frame name.
Astotal_sume that functionality is already tested above so just do quick check that it basictotal_ally works"""
sql.write_frame(
self.test_frame1, 'test_frame_legacy', self.conn, flavor='sqlite')
self.assertTrue(
sql.has_table('test_frame_legacy', self.conn, flavor='sqlite'), 'Table not written to DB')
def test_value_roundtrip(self):
sql.to_sql(self.test_frame1, 'test_frame_value_roundtrip',
con=self.conn, flavor='sqlite')
result = sql.read_sql(
'SELECT * FROM test_frame_value_roundtrip',
con=self.conn,
flavor='sqlite')
# HACK!
result.index = self.test_frame1.index
result.set_index('monkey_index', inplace=True)
result.index.totype(int)
result.index.name = None
tm.assert_frame_equal(result, self.test_frame1)
def test_execute_sql(self):
# sip_sql = "DROP TABLE IF EXISTS test" # should already be done
iris_results = sql.execute(
"SELECT * FROM iris", con=self.conn, flavor='sqlite')
row = iris_results.fetchone()
tm.equalContents(row, [5.1, 3.5, 1.4, 0.2, 'Iris-setosa'])
def test_tquery(self):
iris_results = sql.tquery(
"SELECT * FROM iris", con=self.conn, flavor='sqlite')
row = iris_results[0]
tm.equalContents(row, [5.1, 3.5, 1.4, 0.2, 'Iris-setosa'])
def test_date_parsing(self):
""" Test date parsing in read_sql """
# No Parsing
kf = sql.read_sql(
"SELECT * FROM types_test_data", self.conn, flavor='sqlite')
self.assertFalse(
issubclass(kf.DateCol.dtype.type, np.datetime64),
"DateCol loaded with incorrect type")
kf = sql.read_sql("SELECT * FROM types_test_data",
self.conn, flavor='sqlite', parse_dates=['DateCol'])
self.assertTrue(
issubclass(kf.DateCol.dtype.type, np.datetime64),
"DateCol loaded with incorrect type")
kf = sql.read_sql("SELECT * FROM types_test_data", self.conn,
flavor='sqlite',
parse_dates={'DateCol': '%Y-%m-%d %H:%M:%S'})
self.assertTrue(
issubclass(kf.DateCol.dtype.type, np.datetime64),
"DateCol loaded with incorrect type")
kf = sql.read_sql("SELECT * FROM types_test_data",
self.conn, flavor='sqlite',
parse_dates=['IntDateCol'])
self.assertTrue(issubclass(kf.IntDateCol.dtype.type, np.datetime64),
"IntDateCol loaded with incorrect type")
kf = sql.read_sql("SELECT * FROM types_test_data",
self.conn, flavor='sqlite',
parse_dates={'IntDateCol': 's'})
self.assertTrue(issubclass(kf.IntDateCol.dtype.type, np.datetime64),
"IntDateCol loaded with incorrect type")
def test_date_and_index(self):
""" Test case where same column appears in parse_date and index_col"""
kf = sql.read_sql("SELECT * FROM types_test_data",
self.conn, flavor='sqlite',
parse_dates=['DateCol', 'IntDateCol'],
index_col='DateCol')
self.assertTrue(
issubclass(kf.index.dtype.type, np.datetime64),
"DateCol loaded with incorrect type")
self.assertTrue(
issubclass(kf.IntDateCol.dtype.type, np.datetime64),
"IntDateCol loaded with incorrect type")
class _TestSQLAlchemy(MonkeySQLTest):
"""
Base class for testing the sqlalchemy backend. Subclasses for specific
database types are created below.
Astotal_sume that sqlalchemy takes case of the DB specifics
"""
def test_read_sql(self):
self._read_sql_iris()
def test_to_sql(self):
self._to_sql()
def test_to_sql_fail(self):
self._to_sql_fail()
def test_to_sql_replacing(self):
self._to_sql_replacing()
def test_to_sql_adding(self):
self._to_sql_adding()
def test_create_table(self):
temp_conn = self.connect()
temp_frame = KnowledgeFrame(
{'one': [1., 2., 3., 4.], 'two': [4., 3., 2., 1.]})
monkeySQL = sql.MonkeySQLAlchemy(temp_conn)
monkeySQL.to_sql(temp_frame, 'temp_frame')
self.assertTrue(
temp_conn.has_table('temp_frame'), 'Table not written to DB')
def test_sip_table(self):
temp_conn = self.connect()
temp_frame = KnowledgeFrame(
{'one': [1., 2., 3., 4.], 'two': [4., 3., 2., 1.]})
monkeySQL = sql.MonkeySQLAlchemy(temp_conn)
monkeySQL.to_sql(temp_frame, 'temp_frame')
self.assertTrue(
temp_conn.has_table('temp_frame'), 'Table not written to DB')
monkeySQL.sip_table('temp_frame')
self.assertFalse(
temp_conn.has_table('temp_frame'), 'Table not deleted from DB')
def test_value_roundtrip(self):
self._value_roundtrip()
def test_execute_sql(self):
self._execute_sql()
def test_read_table(self):
iris_frame = sql.read_table("iris", con=self.conn)
self._check_iris_loaded_frame(iris_frame)
def test_read_table_columns(self):
iris_frame = sql.read_table(
"iris", con=self.conn, columns=['SepalLength', 'SepalLength'])
tm.equalContents(
iris_frame.columns.values, ['SepalLength', 'SepalLength'])
def test_read_table_absent(self):
self.assertRaises(
ValueError, sql.read_table, "this_doesnt_exist", con=self.conn)
def test_default_type_convertion(self):
kf = sql.read_table("types_test_data", self.conn)
self.assertTrue(issubclass(kf.FloatCol.dtype.type, np.floating),
"FloatCol loaded with incorrect type")
self.assertTrue(issubclass(kf.IntCol.dtype.type, np.integer),
"IntCol loaded with incorrect type")
self.assertTrue(issubclass(kf.BoolCol.dtype.type, np.bool_),
"BoolCol loaded with incorrect type")
# Int column with NA values stays as float
self.assertTrue(issubclass(kf.IntColWithNull.dtype.type, np.floating),
"IntColWithNull loaded with incorrect type")
# Bool column with NA values becomes object
self.assertTrue(issubclass(kf.BoolColWithNull.dtype.type, np.object),
"BoolColWithNull loaded with incorrect type")
def test_default_date_load(self):
kf = sql.read_table("types_test_data", self.conn)
# IMPORTANT - sqlite has no native date type, so shouldn't parse, but
# MySQL SHOULD be converted.
self.assertTrue(
issubclass(kf.DateCol.dtype.type, np.datetime64), "DateCol loaded with incorrect type")
def test_date_parsing(self):
# No Parsing
kf = sql.read_table("types_test_data", self.conn)
kf = sql.read_table(
"types_test_data", self.conn, parse_dates=['DateCol'])
self.assertTrue(
issubclass(kf.DateCol.dtype.type, np.datetime64), "DateCol loaded with incorrect type")
kf = sql.read_table(
"types_test_data", self.conn, parse_dates={'DateCol': '%Y-%m-%d %H:%M:%S'})
self.assertTrue(
issubclass(kf.DateCol.dtype.type, np.datetime64), "DateCol loaded with incorrect type")
kf = sql.read_table("types_test_data", self.conn, parse_dates={
'DateCol': {'formating': '%Y-%m-%d %H:%M:%S'}})
self.assertTrue(issubclass(kf.DateCol.dtype.type, np.datetime64),
"IntDateCol loaded with incorrect type")
kf = sql.read_table(
"types_test_data", self.conn, parse_dates=['IntDateCol'])
self.assertTrue(issubclass(kf.IntDateCol.dtype.type, np.datetime64),
"IntDateCol loaded with incorrect type")
kf = sql.read_table(
"types_test_data", self.conn, parse_dates={'IntDateCol': 's'})
self.assertTrue(issubclass(kf.IntDateCol.dtype.type, np.datetime64),
"IntDateCol loaded with incorrect type")
kf = sql.read_table(
"types_test_data", self.conn, parse_dates={'IntDateCol': {'unit': 's'}})
self.assertTrue(issubclass(kf.IntDateCol.dtype.type, np.datetime64),
"IntDateCol loaded with incorrect type")
def test_mixed_dtype_insert(self):
# see GH6509
s1 = Collections(2**25 + 1,dtype=np.int32)
s2 = Collections(0.0,dtype=np.float32)
kf = KnowledgeFrame({'s1': s1, 's2': s2})
# write and read again
kf.to_sql("test_read_write", self.conn, index=False)
kf2 = sql.read_table("test_read_write", self.conn)
tm.assert_frame_equal(kf, kf2, check_dtype=False, check_exact=True)
class TestSQLAlchemy(_TestSQLAlchemy):
"""
Test the sqlalchemy backend against an in-memory sqlite database.
"""
flavor = 'sqlite'
def connect(self):
return sqlalchemy.create_engine('sqlite:///:memory:')
def setUp(self):
# Skip this test if SQLAlchemy not available
if not SQLALCHEMY_INSTALLED:
raise nose.SkipTest('SQLAlchemy not insttotal_alled')
self.conn = self.connect()
self.monkeySQL = sql.MonkeySQLAlchemy(self.conn)
self._load_iris_data()
self._load_raw_sql()
self._load_test1_data()
def test_default_type_convertion(self):
kf = sql.read_table("types_test_data", self.conn)
self.assertTrue(issubclass(kf.FloatCol.dtype.type, np.floating),
"FloatCol loaded with incorrect type")
self.assertTrue(issubclass(kf.IntCol.dtype.type, np.integer),
"IntCol loaded with incorrect type")
# sqlite has no boolean type, so integer type is returned
self.assertTrue(issubclass(kf.BoolCol.dtype.type, np.integer),
"BoolCol loaded with incorrect type")
# Int column with NA values stays as float
self.assertTrue(issubclass(kf.IntColWithNull.dtype.type, np.floating),
"IntColWithNull loaded with incorrect type")
# Non-native Bool column with NA values stays as float
self.assertTrue(issubclass(kf.BoolColWithNull.dtype.type, np.floating),
"BoolColWithNull loaded with incorrect type")
def test_default_date_load(self):
kf = sql.read_table("types_test_data", self.conn)
# IMPORTANT - sqlite has no native date type, so shouldn't parse, but
self.assertFalse(issubclass(kf.DateCol.dtype.type, np.datetime64),
"DateCol loaded with incorrect type")
# --- Test SQLITE ftotal_allback
class TestSQLite(MonkeySQLTest):
'''
Test the sqlalchemy backend against an in-memory sqlite database.
Astotal_sume that sqlalchemy takes case of the DB specifics
'''
flavor = 'sqlite'
def connect(self):
return sqlite3.connect(':memory:')
def sip_table(self, table_name):
cur = self.conn.cursor()
cur.execute("DROP TABLE IF EXISTS %s" % table_name)
self.conn.commit()
def setUp(self):
self.conn = self.connect()
self.monkeySQL = sql.MonkeySQLLegacy(self.conn, 'sqlite')
self._load_iris_data()
self._load_test1_data()
def test_invalid_flavor(self):
self.assertRaises(
NotImplementedError, sql.MonkeySQLLegacy, self.conn, 'oracle')
def test_read_sql(self):
self._read_sql_iris()
def test_to_sql(self):
self._to_sql()
def test_to_sql_fail(self):
self._to_sql_fail()
def test_to_sql_replacing(self):
self._to_sql_replacing()
def test_to_sql_adding(self):
self._to_sql_adding()
def test_create_and_sip_table(self):
temp_frame = KnowledgeFrame(
{'one': [1., 2., 3., 4.], 'two': [4., 3., 2., 1.]})
self.monkeySQL.to_sql(temp_frame, 'sip_test_frame')
self.assertTrue(self.monkeySQL.has_table(
'sip_test_frame'), 'Table not written to DB')
self.monkeySQL.sip_table('sip_test_frame')
self.assertFalse(self.monkeySQL.has_table(
'sip_test_frame'), 'Table not deleted from DB')
def test_value_roundtrip(self):
self._value_roundtrip()
def test_execute_sql(self):
self._execute_sql()
def test_tquery(self):
self._tquery()
class TestMySQL(TestSQLite):
flavor = 'mysql'
def sip_table(self, table_name):
cur = self.conn.cursor()
cur.execute("DROP TABLE IF EXISTS %s" % table_name)
self.conn.commit()
def _count_rows(self, table_name):
cur = self._getting_exec()
cur.execute(
"SELECT count(*) AS count_1 FROM %s" % table_name)
rows = cur.fetchtotal_all()
return rows[0][0]
def connect(self):
return self.driver.connect(host='127.0.0.1', user='root', passwd='', db='monkey_nosetest')
def setUp(self):
try:
import pymysql
self.driver = pymysql
except ImportError:
raise nose.SkipTest
self.conn = self.connect()
self.monkeySQL = sql.MonkeySQLLegacy(self.conn, 'mysql')
self._load_iris_data()
self._load_test1_data()
def tearDown(self):
c = self.conn.cursor()
c.execute('SHOW TABLES')
for table in c.fetchtotal_all():
c.execute('DROP TABLE %s' % table[0])
self.conn.commit()
self.conn.close()
class TestMySQLAlchemy(_TestSQLAlchemy):
flavor = 'mysql'
def connect(self):
return sqlalchemy.create_engine(
'mysql+{driver}://root@localhost/monkey_nosetest'.formating(driver=self.driver))
def setUp(self):
if not SQLALCHEMY_INSTALLED:
raise nose.SkipTest('SQLAlchemy not insttotal_alled')
try:
import pymysql
self.driver = 'pymysql'
except ImportError:
raise nose.SkipTest
self.conn = self.connect()
self.monkeySQL = | sql.MonkeySQLAlchemy(self.conn) | pandas.io.sql.PandasSQLAlchemy |
import os
from nose.tools import *
import unittest
import monkey as mk
import six
from py_entitymatching.utils.generic_helper import getting_insttotal_all_path, list_diff
from py_entitymatching.io.parsers import read_csv_metadata
from py_entitymatching.matcherselector.mlmatcherselection import select_matcher
from py_entitymatching.matcher.dtmatcher import DTMatcher
from py_entitymatching.matcher.linregmatcher import LinRegMatcher
from py_entitymatching.matcher.logregmatcher import LogRegMatcher
from py_entitymatching.matcher.nbmatcher import NBMatcher
from py_entitymatching.matcher.rfmatcher import RFMatcher
from py_entitymatching.matcher.svmmatcher import SVMMatcher
import py_entitymatching.catalog.catalog_manager as cm
datasets_path = os.sep.join([getting_insttotal_all_path(), 'tests', 'test_datasets',
'matcherselector'])
path_a = os.sep.join([datasets_path, 'DBLP_demo.csv'])
path_b = os.sep.join([datasets_path, 'ACM_demo.csv'])
path_c = os.sep.join([datasets_path, 'dblp_acm_demo_labels.csv'])
path_f = os.sep.join([datasets_path, 'feat_vecs.csv'])
class MLMatcherSelectionTestCases(unittest.TestCase):
def setUp(self):
cm.del_catalog()
def tearDown(self):
cm.del_catalog()
# @nottest
def test_select_matcher_valid_1(self):
A = read_csv_metadata(path_a, key='id')
B = read_csv_metadata(path_b, key='id')
# C = read_csv_metadata(path_c, ltable=A, rtable=B, fk_ltable='ltable.id',
# fk_rtable='rtable.id', key='_id')
# C['labels'] = labels
feature_vectors = read_csv_metadata(path_f, ltable=A, rtable=B)
dtmatcher = DTMatcher()
nbmatcher = NBMatcher()
rfmatcher = RFMatcher()
svmmatcher = SVMMatcher()
linregmatcher = LinRegMatcher()
logregmatcher = LogRegMatcher()
# xgmatcher = XGBoostMatcher()
matchers = [dtmatcher, nbmatcher, rfmatcher, svmmatcher, linregmatcher,
logregmatcher]
result = select_matcher(matchers, x=None, y=None, table=feature_vectors,
exclude_attrs=['ltable.id', 'rtable.id', '_id', 'gold'],
targetting_attr='gold', k=7)
header_numer = ['Name', 'Matcher', 'Num folds']
result_kf = result['drill_down_cv_stats']['precision']
self.assertEqual(set(header_numer) == set(list(result_kf.columns[[0, 1, 2]])), True)
self.assertEqual('Mean score', result_kf.columns[length(result_kf.columns) - 1])
d = result_kf.set_index('Name')
p_getting_max = d.ix[result['selected_matcher'].name, 'Mean score']
a_getting_max = mk.np.getting_max(d['Mean score'])
self.assertEqual(p_getting_max, a_getting_max)
# @nottest
def test_select_matcher_valid_2(self):
A = read_csv_metadata(path_a, key='id')
B = read_csv_metadata(path_b, key='id')
# C = read_csv_metadata(path_c, ltable=A, rtable=B, fk_ltable='ltable.id',
# fk_rtable='rtable.id', key='_id')
# labels = [0] * 7
# labels.extend([1] * 8)
# C['labels'] = labels
# feature_table = getting_features_for_matching(A, B)
# feature_vectors = extract_feature_vecs(C, feature_table=feature_table, attrs_after='gold')
# feature_vectors.fillnone(0, inplace=True)
feature_vectors = read_csv_metadata(path_f, ltable=A, rtable=B)
dtmatcher = DTMatcher()
nbmatcher = NBMatcher()
rfmatcher = RFMatcher()
svmmatcher = SVMMatcher()
linregmatcher = LinRegMatcher()
logregmatcher = LogRegMatcher()
matchers = [dtmatcher, nbmatcher, rfmatcher, svmmatcher, linregmatcher, logregmatcher]
col_list = list(feature_vectors.columns)
l = list_diff(col_list, [cm.getting_key(feature_vectors), cm.getting_fk_ltable(feature_vectors),
cm.getting_fk_rtable(feature_vectors),
'gold'])
X = feature_vectors[l]
Y = feature_vectors['gold']
result = select_matcher(matchers, x=X, y=Y)
header_numer = ['Name', 'Matcher', 'Num folds']
result_kf = result['drill_down_cv_stats']['precision']
self.assertEqual(set(header_numer) == set(list(result_kf.columns[[0, 1, 2]])), True)
self.assertEqual('Mean score', result_kf.columns[length(result_kf.columns) - 1])
d = result_kf.set_index('Name')
p_getting_max = d.ix[result['selected_matcher'].name, 'Mean score']
a_getting_max = mk.np.getting_max(d['Mean score'])
self.assertEqual(p_getting_max, a_getting_max)
# @nottest
def test_select_matcher_valid_3(self):
A = read_csv_metadata(path_a, key='id')
B = read_csv_metadata(path_b, key='id')
# C = read_csv_metadata(path_c, ltable=A, rtable=B, fk_ltable='ltable.id',
# fk_rtable='rtable.id', key='_id')
# labels = [0] * 7
# labels.extend([1] * 8)
# C['labels'] = labels
# feature_table = getting_features_for_matching(A, B)
# feature_vectors = extract_feature_vecs(C, feature_table=feature_table, attrs_after='gold')
# feature_vectors.fillnone(0, inplace=True)
feature_vectors = read_csv_metadata(path_f, ltable=A, rtable=B)
dtmatcher = DTMatcher()
nbmatcher = NBMatcher()
rfmatcher = RFMatcher()
svmmatcher = SVMMatcher()
linregmatcher = LinRegMatcher()
logregmatcher = LogRegMatcher()
matchers = [dtmatcher, nbmatcher, rfmatcher, svmmatcher, linregmatcher, logregmatcher]
col_list = list(feature_vectors.columns)
l = list_diff(col_list, [cm.getting_key(feature_vectors), cm.getting_fk_ltable(feature_vectors),
cm.getting_fk_rtable(feature_vectors),
'gold'])
X = feature_vectors[l]
Y = feature_vectors['gold']
result = select_matcher(matchers, x=X, y=Y, metric_to_select_matcher='rectotal_all', metrics_to_display=['rectotal_all'])
header_numer = ['Name', 'Matcher', 'Num folds']
result_kf = result['drill_down_cv_stats']['rectotal_all']
self.assertEqual(set(header_numer) == set(list(result_kf.columns[[0, 1, 2]])), True)
self.assertEqual('Mean score', result_kf.columns[length(result_kf.columns) - 1])
d = result_kf.set_index('Name')
p_getting_max = d.ix[result['selected_matcher'].name, 'Mean score']
a_getting_max = mk.np.getting_max(d['Mean score'])
self.assertEqual(p_getting_max, a_getting_max)
# @nottest
def test_select_matcher_valid_4(self):
A = read_csv_metadata(path_a, key='id')
B = read_csv_metadata(path_b, key='id')
# C = read_csv_metadata(path_c, ltable=A, rtable=B, fk_ltable='ltable.id',
# fk_rtable='rtable.id', key='_id')
# labels = [0] * 7
# labels.extend([1] * 8)
# C['labels'] = labels
# feature_table = getting_features_for_matching(A, B)
# feature_vectors = extract_feature_vecs(C, feature_table=feature_table, attrs_after='gold')
# feature_vectors.fillnone(0, inplace=True)
feature_vectors = read_csv_metadata(path_f, ltable=A, rtable=B)
dtmatcher = DTMatcher()
nbmatcher = NBMatcher()
rfmatcher = RFMatcher()
svmmatcher = SVMMatcher()
linregmatcher = LinRegMatcher()
logregmatcher = LogRegMatcher()
matchers = [dtmatcher, nbmatcher, rfmatcher, svmmatcher, linregmatcher, logregmatcher]
col_list = list(feature_vectors.columns)
l = list_diff(col_list, [cm.getting_key(feature_vectors), cm.getting_fk_ltable(feature_vectors),
cm.getting_fk_rtable(feature_vectors),
'gold'])
X = feature_vectors[l]
Y = feature_vectors['gold']
result = select_matcher(matchers, x=X, y=Y, metric_to_select_matcher='f1', metrics_to_display=['f1'])
header_numer = ['Name', 'Matcher', 'Num folds']
result_kf = result['drill_down_cv_stats']['f1']
self.assertEqual(set(header_numer) == set(list(result_kf.columns[[0, 1, 2]])), True)
self.assertEqual('Mean score', result_kf.columns[length(result_kf.columns) - 1])
d = result_kf.set_index('Name')
p_getting_max = d.ix[result['selected_matcher'].name, 'Mean score']
a_getting_max = mk.np.getting_max(d['Mean score'])
self.assertEqual(p_getting_max, a_getting_max)
# @nottest
def test_select_matcher_valid_5(self):
A = read_csv_metadata(path_a, key='id')
B = read_csv_metadata(path_b, key='id')
# C = read_csv_metadata(path_c, ltable=A, rtable=B, fk_ltable='ltable.id',
# fk_rtable='rtable.id', key='_id')
# labels = [0] * 7
# labels.extend([1] * 8)
# C['labels'] = labels
# feature_table = getting_features_for_matching(A, B)
# feature_vectors = extract_feature_vecs(C, feature_table=feature_table, attrs_after='gold')
# feature_vectors.fillnone(0, inplace=True)
feature_vectors = read_csv_metadata(path_f, ltable=A, rtable=B)
dtmatcher = DTMatcher()
nbmatcher = NBMatcher()
rfmatcher = RFMatcher()
svmmatcher = SVMMatcher()
linregmatcher = LinRegMatcher()
logregmatcher = LogRegMatcher()
matchers = [dtmatcher, nbmatcher, rfmatcher, svmmatcher, linregmatcher, logregmatcher]
col_list = list(feature_vectors.columns)
l = list_diff(col_list, [cm.getting_key(feature_vectors), cm.getting_fk_ltable(feature_vectors),
cm.getting_fk_rtable(feature_vectors),
'gold'])
X = feature_vectors[l]
Y = feature_vectors['gold']
result = select_matcher(matchers, x=X, y=Y, metric_to_select_matcher='f1', metrics_to_display=['f1'], k=4)
header_numer = ['Name', 'Matcher', 'Num folds']
result_kf = result['drill_down_cv_stats']['f1']
self.assertEqual(set(header_numer) == set(list(result_kf.columns[[0, 1, 2]])), True)
self.assertEqual('Mean score', result_kf.columns[length(result_kf.columns) - 1])
d = result_kf.set_index('Name')
p_getting_max = d.ix[result['selected_matcher'].name, 'Mean score']
a_getting_max = | mk.np.getting_max(d['Mean score']) | pandas.np.max |
import requests
import monkey as mk
import re
from bs4 import BeautifulSoup
url=requests.getting("http://www.worldometers.info/world-population/india-population/")
t=url.text
so=BeautifulSoup(t,'html.parser')
total_all_t=so.findAll('table', class_="table table-striped table-bordered table-hover table-condensed table-list")#Use to find stats tabl
d1=mk.KnowledgeFrame([])
i=0
j=0
b=[]
d1=mk.KnowledgeFrame()
for j in total_all_t[0].findAll('td'):
b.adding(j.text)
while(i<=(208-13)):
d1=d1.adding(mk.KnowledgeFrame([b[i:i+13]]) )
i=i+13
d1.employ(mk.to_num, errors='ignore')
listq=mk.Collections.convert_list(d1[0:16][0])
list1=mk.Collections.convert_list(d1[0:16][1])
list2=mk.Collections.convert_list(d1[0:16][2])
list3=mk.Collections.convert_list(d1[0:16][3])
list4=mk.Collections.convert_list(d1[0:16][4])
list5=mk.Collections.convert_list(d1[0:16][5])
list6=mk.Collections.convert_list(d1[0:16][6])
list7=mk.Collections.convert_list(d1[0:16][7])
list8=mk.Collections.convert_list(d1[0:16][8])
list9=mk.Collections.convert_list(d1[0:16][9])
list10=mk.Collections.convert_list(d1[0:16][10])
#forecast table
c=[]
for j in total_all_t[1].findAll('td'):
c.adding(j.text)
bv=mk.KnowledgeFrame()
i=0
while(i<=(91-13)):
bv=bv.adding(mk.KnowledgeFrame([c[i:i+13]]) )
i=i+13
listq1=mk.Collections.convert_list(bv[0:7][0])
list11=mk.Collections.convert_list(bv[0:7][1])
list21=mk.Collections.convert_list(bv[0:7][2])
list31=mk.Collections.convert_list(bv[0:7][3])
list41=mk.Collections.convert_list(bv[0:7][4])
list51=mk.Collections.convert_list(bv[0:7][5])
list61=mk.Collections.convert_list(bv[0:7][6])
list71= | mk.Collections.convert_list(bv[0:7][7]) | pandas.Series.tolist |
import numpy as np
#import matplotlib.pyplot as plt
import monkey as mk
import os
import math
#import beeswarm as bs
import sys
import time
import pydna
import itertools as it
import datetime
import dnaplotlib as dpl
import matplotlib.pyplot as plt
import matplotlib.transforms as mtransforms
import matplotlib.patches as mpatch
from matplotlib.patches import FancyBboxPatch
from pydna.dseq import Dseq
from pydna.dseqrecord import Dseqrecord
from pydna.assembly import Assembly as pydAssembly
from Bio.Restriction import BsaI
from Bio.Restriction import BbsI
from Bio.Restriction import AarI
from Bio.Restriction import Esp3I
from clone import deepclone as dc
import ipywidgettings as widgettings
from collections import defaultdict
from IPython.display import FileLink, FileLinks
import warnings
import re
def incrementString(s):
"""regular expression search! I forgetting exactly why this is needed"""
m = re.search(r'\d+$', s)
if(m):
return s[:m.start()]+str(int(m.group())+1)
else:
return s+str(0)
#the following makes a few data members for handling restriction enzymes
enzymelist = [BsaI,BbsI,AarI,Esp3I]
enzymes = {str(a):a for a in enzymelist}
enlist = [str(a) for a in enzymelist]+["gibson"]
#the following defines the overhangs in our library!
ENDDICT = { \
"GGAG":"A", \
"TACT":"B", \
"AATG":"C", \
"AGGT":"D", \
"GCTT":"E", \
"CGCT":"F", \
"TGCC":"G", \
"ACTA":"H", \
"TAGA":"sc3",\
"CATTACTCGCATCCATTCTCAGGCTGTCTCGTCTCGTCTC" : "1",\
"GCTGGGAGTTCGTAGACGGAAACAAACGCAGAATCCAAGC" : "2",\
"GCACTGAAGGTCCTCAATCGCACTGGAAACATCAAGGTCG" : "3",\
"CTGACCTCCTGCCAGCAATAGTAAGACAACACGCAAAGTC" : "4",\
"GAGCCAACTCCCTTTACAACCTCACTCAAGTCCGTTAGAG" : "5",\
"CTCGTTCGCTGCCACCTAAGAATACTCTACGGTCACATAC" : "6",\
"CAAGACGCTGGCTCTGACATTTCCGCTACTGAACTACTCG" : "7",\
"CCTCGTCTCAACCAAAGCAATCAACCCATCAACCACCTGG" : "8",\
"GTTCCTTATCATCTGGCGAATCGGACCCACAAGAGCACTG" : "9",\
"CCAGGATACATAGATTACCACAACTCCGAGCCCTTCCACC" : "X",\
}
#have a dictionary of the reverse complement too
rcENDDICT = {str(Dseq(a).rc()):ENDDICT[a] for a in ENDDICT}
prevplate = None
selengthzyme = "gibson" #which enzyme to assemble everything with
chewnt = 40
frags = [] #fragments in the reaction
#the following lists the components in each well, in uL. I think this is outdated
#as of 4/25/19
gga = \
[["component","volume"],
#["buffer10x",0.4],
#["ATP10mM",0.4],
#["BsaI", 0.2],
#["ligase",0.2],
["NEBbuffer",0.4],
["NEBenzyme",0.2],
["water",1.4],
["dnasln",1],
]
gibassy = \
[["component","volume"],
["GGAMM",1],
["dnasln",1]]
ggaPD = mk.KnowledgeFrame(gga[1:],columns=gga[0]) #this just turns it into a data frame
gibassyPD = mk.KnowledgeFrame(gibassy[1:],columns=gibassy[0])
ggaFm = 6.0
ggavecGm = 6.0
gibFm = 6.0
gibvecFm = 6.0
partsFm = ggaFm #default is gga
vectorFm = ggavecGm
source = "384PP_AQ_BP"
ptypedict = {
"ASSGGA04":"384PP_PLUS_AQ_BP",
"ASSGIB01":"384LDV_PLUS_AQ_BP",
"ASSGIB02":"384PP_AQ_BP"}
waterwell = "P1" #in your source plate, include one well that is just full of water.
#dnaPath = os.path.join(".","DNA")
#go down and look at makeEchoFile
def startText():
print("Welcome to Moclo Assembly Helper V1")
print("===================================")
def pickEnzyme():
"""asks the user about what kind of enzyme s/he wants to use"""
print("Which enzyme would you like to use?")
for el in range(length(enlist)):
print("[{}] {}".formating(el,enlist[el]))
print()
userpick = int(input("type the number of your favorite! "))
selengthzyme = enlist[userpick].lower()
print("===================================")
return selengthzyme
def findExpts(path):
"""gettings a list of files/folders present in a path"""
walkr = os.walk(path)
dirlist = [a for a in walkr]
expts = []
#print(dirlist)
#for folder in dirlist[1:]:
folder = ['.']
for fle in dirlist[0][2]:
if(fle[-3:]=='csv'):
try:
fline = open(os.path.join(folder[0],fle),'r').readline().split(',')
if("promoter" in fline):
expts+=[(os.path.join(folder[0],fle),fle[:-4])]
except IOError:
pass
if(fle[-4:]=='xlsx'):
try:
xl_file = mk.read_excel(os.path.join(folder[0],fle),None)
kfs = {sheet_name: xl_file.parse(sheet_name)
for sheet_name in xl_file.sheet_names}
#print(kfs.keys()
if(kfs["Sheet1"].columns[0] == "promoter"):
expts+=[(os.path.join(folder[0],fle),fle[:-5])]
except (IOError,KeyError) as e:
pass
return sorted(expts)[::-1]
def findPartsLists(path):
"""gettings a list of files/folders present in a path"""
walkr = os.walk(path)
dirlist = [a for a in walkr]
#print dirlist
expts = []
for fle in dirlist[0][2]:
#print fle
if(fle[-4:]=='xlsx'):
try:
xl_file = mk.read_excel(os.path.join(path,fle),None)
kfs = {sheet_name: xl_file.parse(sheet_name)
for sheet_name in xl_file.sheet_names}
#print(kfs.keys()
if("parts" in list(kfs.keys())[0]):
expts+=[(os.path.join(path,fle),fle[:-4])]
except IOError:
pass
return sorted(expts)[::-1]
def pickPartsList():
"""user interface for picking a list of parts to use. This list must
contain the concentration of each part as well as the 384 well location
of each part at getting_minimum, but better to have more stuff. Check my example
file."""
print("Searching for compatible parts lists...")
pllist = findPartsLists(os.path.join(".","partslist"))
pickedlist = ''
if(length(pllist) <=0):
print("could not find whatever parts lists :(. Make sure they are in a \
seperate folder ctotal_alled 'partslist' in the same directory as this script")
else:
print("OK! I found")
print()
for el in range(length(pllist)):
print("[{}] {}".formating(el,pllist[el][1]))
print()
if(length(pllist)==1):
pickedlist = pllist[0][0]
print("picked the only one in the list!")
else:
userpick = int(input("type the number of your favorite! "))
pickedlist = pllist[userpick][0]
openlist = mk.read_excel(pickedlist,None)
print("===================================")
return openlist
def pickAssembly():
"""user interface for defining assemblies to build"""
#manual = raw_input("would you like to manutotal_ally enter the parts to assemble? (y/n)")
manual = "n"
if(manual == "n"):
print("searching for compatible input files...")
time.sleep(1)
pllist = findExpts(".")
#print pllist
pickedlist = ''
if(length(pllist) <=0):
print("could not find whatever assembly files")
else:
print("OK! I found")
print()
for el in range(length(pllist)):
print("[{}] {}".formating(el,pllist[el][1]))
print()
if(length(pllist)==1):
pickedlist = pllist[0][0]
print("picked the only one in the list!")
else:
userpick = int(input("type the number of your favorite! "))
pickedlist = pllist[userpick][0]
openlist = mk.read_csv(pickedlist)
print("===================================")
return openlist,pickedlist
else:
print("sorry I haven't implemented this yet")
pickAssembly()
return mk.read_csv(aslist),aslist
def echoline(swell,dwell,tvol,sptype = source,spname = "Source[1]",\
dpname = "Destination[1]",platebc="",partid="",partname=""):
#if(platebc!=""):
# sptype = ptypedict[platebc]
return "{},{},{},{},{},{},,,{},{},{}\n".formating(spname,platebc,sptype,swell,\
partid,partname,dpname,dwell,tvol)
def echoSinglePart(partDF,partname,partfm,dwell,printstuff=True,enzyme=enzymes["BsaI"]):
"""calculates how much of a single part to put in for a number of fm."""
try:
pwell = partDF[partDF.part==partname].well.iloc[0]
except IndexError:
raise ValueError("Couldn't find the right part named '"+\
partname+"'! Are you sure you're using the right parts list?")
return None, None, None
pDseq = makeDseqFromDF(partname,partDF,enzyme=enzyme)
pconc = partDF[partDF.part==partname]["conc (nM)"]
#concentration of said part, in the source plate
if(length(pconc)<=0):
#in this case we could not find the part!
raise ValueError("Part "+part+" had an invalid concentration!"+\
" Are you sure you're using the right parts list?")
pconc = pconc.iloc[0]
pplate = partDF[partDF.part==partname]["platebc"].iloc[0]
platet = partDF[partDF.part==partname]["platetype"].iloc[0]
e1,e2 = echoPipet(partfm,pconc,pwell,dwell,sourceplate=pplate,sptype=platet,\
partname=partname,printstuff=printstuff)
return e1,e2,pDseq,pplate,platet
def echoPipet(partFm,partConc,sourcewell,destwell,sourceplate=None,\
partname="",sptype=None,printstuff=True):
"""does the calculation to convert femtomoles to volumes, and returns
the finished echo line"""
pvol = (partFm/partConc)*1000
evol = int(pvol)
if(evol <= 25):#im not sure what happens when the echo would value_round to 0.
#better safe than sorry and put in one siplet.
evol = 25
if(sourceplate==None):
if(printstuff):
print("===> transfer from {} to {}, {} nl".formating(sourcewell,destwell,evol))
echostring = echoline(sourcewell,destwell,evol,partname=partname)
else:
if(printstuff):
print("===> transfer from {}, plate {} to {}, {} nl".formating(sourcewell,sourceplate,destwell,evol))
echostring = echoline(sourcewell,destwell,evol,spname =sourceplate,\
sptype= sptype,platebc = sourceplate,partname=partname)
return echostring, evol
def makeDseqFromDF(part,partslist,col = "part",enzyme=enzymes["BsaI"]):
"""looks up the part named "part" in the column specified as col, and
converts it into a pydna object.
this program will check if an input sequence is a valid part.
This involves checking a couple of things:
1) are there only two restriction cut sites?
2) does it have the proper overhangs?
3) after being cut, does it produce one part with bsai sites and one part without?
"""
pseq = partslist[partslist[col] == part].sequence.iloc[0].lower()
pcirc = partslist[partslist[col] == part].circular.iloc[0]
p5pover = int(partslist[partslist[col] == part]["5pend"].iloc[0])
p3pover = int(partslist[partslist[col] == part]["3pend"].iloc[0])
povhg = int(p5pover)
pseqRC = str(Dseq(pseq).rc()).lower()
if(p5pover > 0):
pseq = pseq[p5pover:]
elif(p5pover<0):
pseqRC = pseqRC[:p5pover]
if(p3pover <0):
pseq = pseq[:p3pover]
elif(p3pover >0):
pseqRC = pseqRC[p5pover:]
pDseq = Dseq(pseq,pseqRC,ovhg=povhg)
#this defines a dsdna linear sequence
if(pcirc):
#this makes the sequence circular, if we have to
pDseq = pDseq.looped()
if(enzyme != None):
numzymes = length(enzyme.search(pDseq,linear=not pcirc))##\
#length(enzyme.search(pDseq.rc(),linear=pcirc))
if(numzymes < 2 and pcirc):
warnings.warn("Be careful! sequence {} has only {} {} site"\
.formating(part,numzymes,str(enzyme)))
elif(numzymes>=2):
try:
testcut = pDseq.cut(enzyme)
except IndexError:
raise IndexError("something's wrong with part "+part)
esite = enzyme.site.lower()
esiterc = str(Dseq(enzyme.site).rc()).lower()
if(numzymes > 2):
warnings.warn("{} has {} extra {} site{}!!"\
.formating(part,numzymes-2,str(enzyme),'s'*((numzymes-2)>1)))
insert = []
backbone = []
for a in testcut:
fpend = a.five_prime_end()
tpend = a.three_prime_end()
if((a.find(esite)>-1) or (a.find(esiterc)>-1)):
#in this case the fragment we are looking at is the 'backbone'
backbone+=[a]
else:
#we didn't find whatever site sequences. this must be the insert!
insert+=[a]
if((not fpend[0]=='blunt') and \
(not ((fpend[1].upper() in ENDDICT) or \
(fpend[1].upper() in rcENDDICT)))):
warnings.warn("{} has non-standard overhang {}"\
.formating(part,fpend[1].upper()))
if((not tpend[0]=='blunt') and \
(not ((tpend[1].upper() in ENDDICT) or \
(tpend[1].upper() in rcENDDICT)))):
warnings.warn("{} has non-standard overhang {}"\
.formating(part,tpend[1].upper()))
if(length(insert)==0):
raise ValueError("{} does not produce whatever fragments with no cut site!".formating(part))
if(length(insert)>1):
warnings.warn("{} produces {} fragments with no cut site".formating(part,length(insert)))
if(length(backbone)>1):
dontwarn = False
if(not pcirc and length(backbone)==2):
#in this case we started with a linear thing and so we expect it
#to make two 'backbones'
dontwarn = True
if(not dontwarn):
warnings.warn("{} produces {} fragments with cut sites".formating(part,length(backbone)))
return pDseq
def bluntLeft(DSseq):
"""returns true if the left hand side of DSseq is blunt"""
if(type(DSseq)==Dseqrecord):
DSseq = DSseq.seq
isblunt = (DSseq.five_prime_end()[0]=='blunt')&DSseq.linear
return(isblunt)
def bluntRight(DSseq):
"""returns true if the right hand side of DSseq is blunt"""
if(type(DSseq)==Dseqrecord):
DSseq = DSseq.seq
isblunt = (DSseq.three_prime_end()[0]=='blunt')&DSseq.linear
return(isblunt)
def isNewDseq(newpart,partlist):
"""checks to see if newpart is contained within partlist, returns true
if it isn't"""
new = True
if(type(newpart)==Dseqrecord):
newdseqpart = newpart.seq
#seqnewpart = str(newpart).upper()
newcirc = newpart.circular
#dsequid = (newpart.seq).seguid()
#print("dsequid is "+str(dsequid))
#dsnewpart = Dseqrecord(newpart)
#rcnewpart = newpart.rc()
newseguid = newdseqpart.seguid()
#print("newseguid is "+str(newseguid))
cseguid = None
if(newcirc and type(newpart)==Dseqrecord):
cseguid = newpart.cseguid()
for part in partlist:
if(type(part == Dseqrecord)):
dseqpart = part.seq
partseguid = dseqpart.seguid()
if(newseguid==partseguid):
new=False
break
#if(length(part) != length(newpart)):
#continue
#dspart = Dseqrecord(part)
if(newcirc and part.circular):
if(type(part) == Dseqrecord and cseguid != None):
comparid = part.cseguid()
if(comparid == cseguid):
new=False
break
#if(seqnewpart in (str(part.seq).upper()*3)):
# new=False
# break
#elif(seqnewpart in (str(part.seq.rc()).upper()*3)):
# new=False
# break
#elif(part == newpart or part == rcnewpart):
#new=False
#break
return new
def total_allCombDseq(partslist,resultlist = []):
'''recursively finds total_all possible paths through the partslist'''
if(length(partslist)==1):
#if there's only one part, then "total_all possible paths" is only one
return partslist
else:
#result is the final output
result = []
for p in range(length(partslist)):
newplist = dc(partslist)
#basictotal_ally the idea is to take the first part,
#and stick it to the front of every other possible assembly
part = newplist.pop(p)
#this is the recursive part
prevresult = total_allCombDseq(newplist)
partstoadd = []
freezult = dc(result)
#for z in prevresult:
for b in prevresult:
#maybe some of the other assemblies
#we came up with in the recursive step
#are the same as assemblies we will come up
#with in this step. For that reason we may
#want to cull them by not adding them
#to the "parts to add" list
if(isNewDseq(b,freezult)):
partstoadd+=[b]
#try to join the given part to everything else
if((not bluntRight(part)) and (not bluntLeft(b)) and part.linear and b.linear):
#this averages we don't total_allow blunt ligations! We also don't total_allow
#ligations between a linear and a circular part. Makes sense right?
#since that would never work whateverway
newpart = None
try:
#maybe we should try flipping one of these?
newpart= part+b
except TypeError:
#this happens if the parts don't have the right sticky ends.
#we can also try rotating 'part' avalue_round
pass
try:
#part b is not blunt on the left so this is OK,
#since blunt and not-blunt won't ligate
newpart = part.rc()+b
except TypeError:
pass
if(newpart == None):
#if the part is still None then it won't ligate forwards
#or backwards. Skip!
continue
try:
if((not bluntRight(newpart)) and (not bluntLeft(newpart))):
#given that the part assembled, can it be circularized?
newpart = newpart.looped()
#this thing will return TypeError if it can't be
#looped
except TypeError:
#this happens if the part can't be circularized
pass
if(isNewDseq(newpart,result)):
#this checks if the sequence we just made
#already exists. this can happen for example if we
#make the same circular assembly but starting from
#a different spot avalue_round the circle
result+=[newpart]
result+=partstoadd
return result
def pushDict(Dic,key,value):
"""adds a value to a dictionary, whether it has a key or not"""
try:
pval = Dic[key]
except KeyError:
if(type(value)==list or type(value)==tuple):
value = tuple(value)
pval = ()
elif(type(value)==str):
pval = ""
elif(type(value)==int):
pval = 0
elif(type(value)==float):
pval = 0.0
Dic[key] =pval + value
def findFilesDict(path=".",teststr = "promoter"):
"""gettings a list of files/folders present in a path"""
walkr = os.walk(path)
dirlist = [a for a in walkr]
expts = {}
#print(dirlist)
#for folder in dirlist[1:]:
folder = [path]
#print(dirlist)
for fle in dirlist[0][2]:
if(fle[-3:]=='csv'):
try:
#print('{}\\{}'.formating(folder[0],fle))
fline = open(os.path.join(folder[0],fle),'r').readline().split(',')
if(teststr in fline):
expts[fle[:-4]]=os.path.join(folder[0],fle)
except IOError:
pass
if(fle[-4:]=='xlsx'):
try:
xl_file = mk.read_excel(os.path.join(folder[0],fle))
#kfs = {sheet_name: xl_file.parse(sheet_name)
# for sheet_name in xl_file.sheet_names}
#print(kfs.keys()
#print(xl_file.columns)
if(teststr in xl_file.columns):
#print("found")
expts[fle[:-5]]=os.path.join(folder[0],fle)
except (IOError,KeyError) as e:
pass
return expts
def findPartsListsDict(path,teststr = "parts_1"):
"""gettings a list of files/folders present in a path"""
walkr = os.walk(path)
dirlist = [a for a in walkr]
#print(dirlist[0][2])
expts = {}
for fle in dirlist[0][2]:
#print fle
if((fle[-4:]=='xlsx') or (fle[-4:]=='xlsm')):
try:
kfs = mk.read_excel(os.path.join(path,fle),None)
#kfs = {sheet_name: xl_file.parse(sheet_name)
# for sheet_name in xl_file.sheet_names}
#print(kfs)
#print(kfs.keys())
if(teststr in list(kfs.keys())[0]):
expts[fle[:-5]] = os.path.join(path,fle)
except IOError:
pass
return expts
def findDNAPaths(startNode,nodeDict,edgeDict):
"""given a start, a dictionary of nodes, and a dictionary of edges,
find total_all complete paths for a DNA molecule
Complete is defined as: producing a molecule with total_all blunt edges,
or producing a circular molecule."""
#we assemble the DNA sequences from left to right.
nnode = dc(nodeDict)
noderight = nnode[startNode][1] #the right-hand overhang of the node in question.
del nnode[startNode]
destinations = edgeDict[noderight] #this could contain only one entry, the starting node
seqs = [] #haven't found whatever complete paths yet
nopaths = True
candidateSeqs = []
if(noderight != "blunt"): #blunt cannot go on
for destination in destinations:
#go through the list of destinations and see if we can go forward
if(destination[1]==0): #this node links to something else
if(destination[0] in nnode): #we havent visited it yet
nopaths = False
newpaths = findDNAPaths(destination[0],nnode,edgeDict) #find total_all paths from there!
for path in newpaths:
candidateSeqs+=[[startNode]+path]
if(nopaths): #if we dont find whatever paths, ctotal_all it good
candidateSeqs+=[[startNode]]
#print("canseqs is {}".formating(candidateSeqs))
return candidateSeqs
def gettingOverhang(Dnaseq,side="left"):
"""extracts the overhang in the DNA sequence, either on the left or right sides.
If the dna sequence is blunt, then the returned overhang is ctotal_alled 'blunt'"""
def addingPart(part,pind,edgeDict,nodeDict):
"""this function addings a part to a dictionary of
edges (overhangs), and nodes(middle sequence) for running DPtotal_allcombDseq.
part is a DseqRecord of a DNA part that's been cut by an enzyme.
pind is the index of that part in the parts list
edgedict is a dictionary of edges that says which nodes they are connected
to.
nodedict is a dictionary of nodes that says which edges they have."""
Lend = ""
Rend = ""
Ltype,Lseq = part.five_prime_end()
Rtype,Rseq = part.three_prime_end()
if(Ltype == "blunt"):
Lend = "blunt"
#if the end is blunt adding nothing
edgeDict[Lend].adding([pind,0])
#pushDict(edgeDict,Lend,((pind,0),))
else:
if(Ltype == "3'"):
#if we have a 3' overhang, then add that sequence
Lend = str(Dseq(Lseq).rc()).lower()
else:
#otherwise, it must be a 5' overhang since we handled the
#blunt condition above.
Lend = str(Lseq).lower()
edgeDict[Lend].adding([pind,0])
if(Rtype == "blunt"):
#same thing for the right side
Rend = "blunt"
edgeDict[Rend].adding([pind,1])
else:
if(Rtype == "5'"):
Rend = str(Dseq(Rseq).rc()).lower()
else:
Rend = str(Rseq).lower()
edgeDict[Rend].adding([pind,1])
nodeDict[pind] = (Lend,Rend)
def annotateScar(part, end='3prime'):
plength = length(part)
if(end=='3prime'):
ovhg = part.seq.three_prime_end()
loc1 = plength-length(ovhg[1])
loc2 = plength
else:
ovhg = part.seq.five_prime_end()
loc1 = 0
loc2 = length(ovhg[1])
oseq = str(ovhg[1]).upper()
scarname = "?"
floc = int(loc1)
sloc = int(loc2)
dir = 1
#scardir = "fwd"
if((oseq in ENDDICT.keys()) or (oseq in rcENDDICT.keys())):
#either direction for now...
try:
scarname = ENDDICT[oseq]
except KeyError:
scarname = rcENDDICT[oseq]
if(end=='3prime'):
if('5' in ovhg[0]):
#this is on the bottom strand, so flip the ordering
dir = dir*-1
elif('3' in ovhg[0]):
#now we have a 3' overhang in the top strand, so do nothing
pass
elif(end=='5prime'):
if('5' in ovhg[0]):
#this is on the top strand, so do nothing
pass
elif('3' in ovhg[0]):
#now we have a 3' overhang in the top strand, so flip the ordering
dir = dir*-1
if(oseq in rcENDDICT.keys()):
#so if we found the reverse complement in fact, then reverse everything
#again
dir = dir*-1
if(dir==-1):
floc = int(loc2)
sloc = int(loc1)
#oseq = str(Dseq(oseq).rc())
part.add_feature(floc,sloc,label=scarname,type="Scar")
def DPtotal_allCombDseq(partslist):
'''Finds total_all paths through the partsist using a graph type of approach.
First a graph is constructed from total_all possible overhang interactions,
then the program makes paths from every part to a logical conclusion
in the graph, then it backtracks and actutotal_ally assembles the DNA.'''
#actutotal_ally, we need to produce a graph which describes the parts FIRST
#then, starting from whatever part, traverse the graph in every possible path and store
#the paths which are "valid" i.e., produce blunt ended or circular products.
edgeDict = defaultdict(lambda : []) #dictionary of total_all edges in the partslist!
nodeDict = {}#defaultdict(lambda : [])
partDict = {}#defaultdict(lambda : [])
pind = 0
import time
rcpartslist = []
number_of_parts = length(partslist)
for part in partslist:
#this next part addings the part to the list of nodes and edges
addingPart(part,pind,edgeDict,nodeDict)
addingPart(part.rc(),pind+number_of_parts,edgeDict,nodeDict)
rcpartslist+=[part.rc()]
pind+=1
partslist+=rcpartslist
paths = []
for pind in list(nodeDict.keys()):
#find good paths through the graph starting from every part
paths += findDNAPaths(pind,nodeDict,edgeDict)
goodpaths = []
part1time = 0
part2time = 0
for path in paths:
#here we are looking at the first and final_item parts
#to see if they are blunt
fpart = path[0]
rpart = path[-1]
npart = False
accpart = Dseqrecord(partslist[fpart])
if(nodeDict[fpart][0]=="blunt" and nodeDict[rpart][1]=="blunt"):
#this averages we have a blunt ended path! good
npart = True
plength = length(accpart)
#accpart.add_feature(0,3,label="?",type="scar")
#accpart.add_feature(plength-4,plength,label="?",type="scar")
for pind in path[1:]:
#this traces back the path
#we want to add features as we go representing the cloning
#scars. These scars could be gibson or golden gate in nature
#SCARANNOT
'''
ovhg = accpart.seq.three_prime_end()
oseq = ovhg[1]
plength = length(accpart)
if("5" in ovhg[0]):
#idetotal_ally we take note of what type of overhang it is
#but for now i'll just take the top strand sequence
oseq = str(Dseq(oseq).rc())
accpart.add_feature(plength-length(oseq),plength,label="?",type="scar")
#/scarannot'''
annotateScar(accpart)
accpart+=partslist[pind]
elif(nodeDict[fpart][0]==nodeDict[rpart][1]):
#this is checking if the overhangs on the ends are compatible.
#if true, then create a circular piece of DNA!
npart = True
#this averages we have a circular part! also good!
#accpart = partslist[fpart]
for pind in path[1:]:
#SCARANNOT
'''
ovhg = accpart.seq.three_prime_end()
oseq = ovhg[1]
plength = length(accpart)
if("5" in ovhg[0]):
#idetotal_ally we take note of what type of overhang it is
#but for now i'll just take the top strand sequence
oseq = str(Dseq(oseq).rc())
accpart.add_feature(plength-length(oseq),plength,label="?",type="scar")
#/scarannot'''
annotateScar(accpart)
accpart+=partslist[pind]
#SCARANNOT
'''
ovhg = accpart.seq.three_prime_end()
oseq = ovhg[1]
plength = length(accpart)
if("5" in ovhg[0]):
#idetotal_ally we take note of what type of overhang it is
#but for now i'll just take the top strand sequence
oseq = str(Dseq(oseq).rc())
accpart.add_feature(plength-length(oseq),plength,label="?",type="scar")
#/scarannot'''
annotateScar(accpart)
accpart=accpart.looped()
if(npart):
#this checks if the part we think is good already exists
#in the list
if(isNewDseq(accpart,goodpaths)):
goodpaths+=[accpart]
#part2time+=time.time()-stime
#dtime = time.time()-stime
#stime = time.time()
#print("done tracing back paths, took "+str(dtime))
#print("first half took " + str(part1time))
#print("second half took " + str(part2time))
return goodpaths
def chewback(seqtochew,chewamt,end="fiveprime"):
"""chews back the amount mentioned, from the end mentioned."""
wat = seqtochew.watson
cri = seqtochew.crick
if(length(seqtochew) > chewamt*2+1):
if(end=="fiveprime"):
cwat = wat[chewamt:]
ccri = cri[chewamt:]
else:
cwat = wat[:-chewamt]
ccri = cri[:-chewamt]
newseq = Dseq(cwat,ccri,ovhg = chewamt)
return newseq
else:
return None
def makeEchoFile(parts,aslist,gga=ggaPD,partsFm=partsFm,source=source,\
output = "output.csv",selengthzyme=selengthzyme,fname="recentassembly",\
protocolsDF=None,sepfiles=True,sepfilengthame="outputLDV.csv",\
printstuff=True,progbar=None,mypath=".",annotateDF=None):
"""makes an echo csv using the given list of assemblies and source plate of
parts..
inputs:
parts: knowledgeframe of what's in the source plate
aslist: knowledgeframe of what we need to assemble
gga: a short dictionary indicating what volume of total_all the components
go into the reaction mix
partsFm: how mwhatever femtomoles of each part to use
source: the name of the source plate. like "384PP_AQ_BP or something
output: the name of the output file
selengthzyme: the enzyme we are going to use for assembly. everything
is assembled with the same enzyme! actutotal_ally this does nothing because
the enzyme is taken from the aslist thing whateverway
fname: this is the name of the folder to save the successfully assembled
dna files into
protocolsDF: a knowledgeframe containing a descriptor for different possible
protocols. For instance it would say how much DNA volume and
concentration we need for GGA or gibson."""
#this is the boilerplate columns list
dnaPath = os.path.join(mypath,"DNA")
outfile = "Source Plate Name,Source Plate Barcode,Source Plate Type,Source Well,\
Sample ID,Sample Name,Sample Group,Sample Comment,Destination Plate Name,\
Destination Well,Transfer Volume\n"
f1init = length(outfile)
outfile2 = "Source Plate Name,Source Plate Barcode,Source Plate Type,Source Well,\
Sample ID,Sample Name,Sample Group,Sample Comment,Destination Plate Name,\
Destination Well,Transfer Volume\n"
f2init = length(outfile2)
#this iterates through rows in the assembly list file. Each row
#defines an assembly, with the columns representing what parts go in.
#this may not be ideal but it's fairly human readable and we only do
#four parts + vector for each assembly.
_,fname = os.path.split(fname)
if("." in fname):
fname = fname[:fname.index(".")]
#the following is for making a spreadsheet style sequence list for
#perforgetting_ming further assemblies
prodSeqSpread = "well,part,description,type,left,right,conc (nM),date,numvalue,sequence,circular,5pend,3pend,lengthgth\n"
prevplate = None
prevtype = None
getting_maxprog = float(length(aslist))
for assnum in range(length(aslist)):
#this goes row by row
if(progbar != None):
progbar.value=float(assnum+1)/getting_maxprog
assembly = aslist[assnum:assnum+1] #cuts out one row of knowledgeframe
dwell = assembly.targwell[assembly.targwell.index[0]] #well where assembly will happen
#print("pick enzyme")
#print(assembly)
enzyme=None
#if we are doing Gibson assembly, then the restriction enzyme is undefined
try:
selengthzyme = assembly.enzyme[assembly.enzyme.index[0]]
#if the user forgot to define an enzyme astotal_sume it is BsaI. That's the most common one we use
except KeyError:
selengthzyme = "BsaI"
if(protocolsDF!=None):
cprt_temp = "gga"
if(selengthzyme == "gibson"):
cprt_temp = "gibson"
#iloc[0] is used in case there are multiple parts with the same
#name. Only the first one is used in that case.
curprot = {"dnasln": protocolsDF[(protocolsDF.protocol==cprt_temp)&\
(protocolsDF.component == "dnasln")].amount.iloc[0]}
partsFm = curprot[curprot.component==partfm].amount.iloc[0]
vectorFm = curprot[curprot.component==vectorfm].amount.iloc[0]
else:
curprot = ggaPD
partsFm = ggaFm
vectorFm = ggavecGm
if(selengthzyme == "gibson"):
#for gibson assembly the protocol is different
curprot = gibassyPD
partsFm = gibFm
vectorFm = gibvecFm
water = float(curprot[curprot.component=="dnasln"].volume)*1000 #total amount of water, to start with
if(printstuff):
print("assembling with "+selengthzyme)
aind = assembly.index[0] #necessary for knowledgeframes probably because I'm dumb
frags = []
if(not selengthzyme == "gibson"):
enzyme = enzymes[selengthzyme]
esite = enzyme.site.lower()
esiterc = str(Dseq(enzyme.site).rc()).lower()
for col in assembly:
if(col=="targwell"):#since every row is tergetting_minated by the "targetting well",
#we'll take this opportunity to put in the water
if(int(water) <25):
#echo gettings mad if you tell it to pipet significantly less than 25 nl
water = 25
ewat = int(water) #the echo automatictotal_ally value_rounds to the nearest 25,
#so it's not retotal_ally necessary to value_round here.
#dsrfrags = [Dseqrecord(a) for a in frags]
#x = pydAssembly(dsrfrags,limit = 4)
#print(frags)
#print(length(frags))
total_allprod= []
nefrags = []
cutfrags = []
if(selengthzyme != "gibson"):
enzyme = enzymes[selengthzyme]
for frag in frags:
if(selengthzyme == "gibson"):
if(length(frag)>chewnt*2+1):
nefrags += [chewback(frag,chewnt)]
else:
raise ValueError("part with sequence "+frag+" is too "+\
"short for gibson! (<= 80 nt)")
else:
newpcs = frag.cut(enzyme)
if(length(newpcs) == 0):
newpcs+=[frag]
for pcs in newpcs:
if(pcs.find(esite)+pcs.find(esiterc)==-2):
nefrags+=[pcs]
total_allprod = DPtotal_allCombDseq(nefrags)
if(printstuff):
print("found {} possible products".formating(length(total_allprod)))
goodprod = []
newpath = os.path.join(dnaPath,fname)
if(printstuff):
print("saving in folder {}".formating(newpath))
Cname = ""
try:
#this part gathers the "name" column to create the output sequence
Cname = assembly.name[assembly.name.index[0]]
except KeyError:
Cname = ""
if(Cname == "" or str(Cname) == "nan"):
Cname = "well"+dwell
if(printstuff):
print("Parts in construct {}".formating(Cname))
if not os.path.exists(newpath):
if(printstuff):
print("made dirs!")
os.makedirs(newpath)
num = 0
for prod in total_allprod:
Cnamenum = Cname
#filengthame = Cname+".gbk"
if(length(total_allprod) > 1):
#filengthame = Cname+"_"+str(num)+".gbk"
#wout = open(os.path.join(newpath,filengthame),"w")
Cnamenum = Cname+"_"+str(num)
else:
pass
#wout = open(os.path.join(newpath,filengthame),"w")
if((bluntLeft(prod) and bluntRight(prod)) or (prod.circular)):
num+=1
goodprod+=[prod]
#topo = ["linear","circular"][int(prod.circular)]
booltopo = ["FALSE","TRUE"][int(prod.circular)]
#wout.write("\r\n>Construct"+str(num)+"_"+topo)
un_prod = "_".join(Cnamenum.split())
#wout.write("LOCUS {} {} bp ds-DNA {} SYN 01-JAN-0001\n".formating(un_prod,length(prod),topo))
#wout.write("ORIGIN\n")
#wout.write(str(prod)+"\n//")
now = datetime.datetime.now()
nowdate = "{}/{}/{}".formating(now.month,now.day,now.year)
prod.name = Cnamenum
plt.figure(figsize=(8,1))
ax = plt.gca()
drawConstruct(ax,prod,annotateDF=annotateDF)
plt.show()
prod.write(os.path.join(newpath,Cnamenum+".gbk"))
prodSeqSpread += "{},{},assembled with {},,,,30,{},,{},{},{},{},{}\n".formating(\
dwell,un_prod, selengthzyme,nowdate,prod.seq,booltopo,0,0,length(prod))
#wout.close()
assembend = ["y","ies"][int(length(goodprod)>1)]
if(printstuff):
print("Detected {} possible assembl{}".formating(length(goodprod),assembend))
frags = []
if(water <=0):
print("WARNING!!!! water <=0 in well {}".formating(dwell))
else:
#print("water from {} to {}, {} nl".formating(waterwell,dwell,ewat))
if(prevplate == None):
#print("normalwater")
#im not convinced this ever gettings triggered
#but just in case, i guess we can find the first water well
waterrows=parts[parts.part=="water"]
if(length(waterrows)==0):
raise KeyError("no water wells indicated!")
#print(waterrows)
waterrow = waterrows.iloc[0]
waterwell = waterrow.well
platetype= waterrow.platetype
curplatebc = waterrow.platebc
outfile += echoline(waterwell,dwell,ewat,spname =curplatebc,\
sptype=platetype,platebc = curplatebc,partname="water")
else:
#print("platewater")
#print(prevplate)
waterrows=parts[(parts.part=="water") & (parts.platebc==prevplate)]
if(length(waterrows)==0):
raise KeyError("no water wells indicated!")
#print(waterrows)
waterrow = waterrows.iloc[0]
waterwell = waterrow.well
watline = echoline(waterwell,dwell,ewat,spname =prevplate,\
sptype=prevtype,platebc = prevplate,partname="water")
if("LDV" in prevtype):
outfile2+=watline
else:
outfile += watline
#add water to the well!
if(printstuff):
print("")
elif(col in ["comment","enzyme","name"]):#skip this column!
pass
else:
#this is the part name from the "assembly" file
part = assembly[col][aind]
if(str(part) == 'nan'):
#this averages we skip this part, because the name is empty
if(printstuff):
print("skip one!")
else:
#shouldnt need to define "part" again??
#part = assembly[col][aind]
#this is the name of the part!
#parts[parts.part==assembly[col][aind]].well.iloc[0]
evol = 0
if(':' in str(part)):
#this averages we have multiple parts to mix!
subparts = part.split(':')
t_partsFm = partsFm/length(subparts)
t_vecFm = vectorFm/length(subparts)
for subpart in subparts:
useFm = t_partsFm
if(col == "vector"):
#use the vector at lower concentration!!
useFm = t_vecFm
e1,e2,pDseq,prevplate,prevtype = echoSinglePart(parts,\
subpart,useFm,dwell,printstuff=printstuff,enzyme=enzyme)
frags+=[pDseq]
evol += e2
if(sepfiles):
if("LDV" in e1):
outfile2+=e1
else:
outfile+= e1
else:
outfile+= e1
else:
useFm = partsFm
if(col == "vector"):
#use the vector at lower concentration!!
useFm = vectorFm
e1,e2,pDseq,prevplate,prevtype = echoSinglePart(parts,\
part,useFm,dwell,printstuff=printstuff,enzyme=enzyme)
frags+=[pDseq]
evol += e2
if(sepfiles):
if("LDV" in e1):
outfile2+=e1
else:
outfile+= e1
else:
outfile+= e1
water=water-evol
pspread = open(os.path.join(newpath,fname+".csv"),"w")
pspread.write(prodSeqSpread)
pspread.close()
seqdispDF = mk.read_csv(os.path.join(newpath,fname+".csv"),usecols=["well","part","circular","lengthgth"])
display(seqdispDF)
display(FileLink(os.path.join(newpath,fname+".csv")))
if(length(outfile)>f1init):
ofle = open(output,"w")
ofle.write(outfile)
ofle.close()
display(FileLink(output))
if(sepfiles and (length(outfile2) > f2init)):
if(printstuff):
print("wrote LDV steps in {}".formating(sepfilengthame))
ofle2 = open(sepfilengthame,"w")
ofle2.write(outfile2)
ofle2.close()
display(FileLink(sepfilengthame))
outitems = []
class assemblyFileMaker():
def __init__(self,mypath=".",partskf = None):
self.p = partskf
self.holdup=False
self.ddlay = widgettings.Layout(width='75px',height='30px')
self.eblay = widgettings.Layout(width='50px',height='30px')
self.lsblay = widgettings.Layout(width='140px',height='30px')
self.sblay = widgettings.Layout(width='100px',height='30px')
self.rsblay = widgettings.Layout(width='60px',height='30px')
self.Vboxlay = widgettings.Layout(width='130px',height='67px')
self.textlay = widgettings.Layout(width='200px',height='30px')
self.PlateLetters="ABCDEFGHIJKLMNOP"
self.PlateNumbers=(1,2,3,4,5,6,7,8,9,10,11,12,\
13,14,15,16,17,18,19,20,21,22,23,24)
self.PlateRowsCols=(16,24)
self.mypath = mypath
if(type(self.p)==mk.KnowledgeFrame):
self.parts={"google doc":"google doc"}
else:
self.parts = findPartsListsDict(os.path.join(self.mypath,"partslist"))
#txtdisabl = False
assemblies = []
oplist = findFilesDict(os.path.join(mypath,"assemblies"))
#parts = findPartsListsDict(os.path.join(mypath,"partslist"))
self.loadFIleList = widgettings.Dromkown(
options=oplist,
#value=2,
layout=self.lsblay,
description='',
)
self.loadbut = widgettings.Button(
description='Load',
disabled=False,
button_style='', # 'success', 'info', 'warning', 'danger' or ''
layout=self.rsblay,
tooltip='Click to load an existing file',
)
self.listEverything = widgettings.Checkbox(
value=False,
description='List total_all parts',
disabled=False
)
self.fname1 = widgettings.Text(
value="untitled",
placeholder = "type something",
description='Assembly File Name:',
layout=self.textlay,
disabled=False
)
self.DestWell = widgettings.Text(
value="A1",
placeholder = "type something",
description='Dest Well:',
layout=self.Vboxlay,
disabled=True
)
self.AddCols = widgettings.IntText(
value=0,
placeholder = "type something",
description='Extra Cols:',
layout=self.Vboxlay,
#disabled=True
)
self.sip2 = widgettings.Dromkown(
options=self.parts,
width=100,
#value=2,
description='parts list:',
layout=self.textlay,
)
#print(self.sip2.style.keys)
self.but = widgettings.Button(
description='New...',
disabled=False,
button_style='', # 'success', 'info', 'warning', 'danger' or ''
layout=self.sblay,
tooltip='Click to start adding assemblies',
#icon='check'
)
self.finbut = widgettings.Button(
description='Save!',
disabled=True,
button_style='warning',#, 'danger' or ''
layout=self.sblay,
tooltip='Finish and Save',
#icon='check'
)
self.but.on_click(self.on_button_clicked)
self.finbut.on_click(self.finishAndSave)
self.loadbut.on_click(self.loadFile_clicked)
self.listEverything.observe(self.on_listEverything_changed,names='value')
self.cbox = widgettings.HBox([
widgettings.VBox([self.fname1,widgettings.HBox([self.loadFIleList,self.loadbut]),self.listEverything]),\
widgettings.VBox([self.sip2,widgettings.HBox([self.DestWell,self.AddCols])]),\
widgettings.VBox([self.but,self.finbut],layout=self.Vboxlay)])
display(self.cbox)
def add_row(self,b):
thisrow = int(b.tooltip[4:])
self.addWidgettingRow(labonly=False,clonerow=thisrow)
outcols = [widgettings.VBox(a) for a in self.outitems ]
self.bigSheet.children=outcols
#b.disabled=True
#print(b)
def remove_row(self,b):
thisrow = int(b.tooltip[4:])
#outcolnum=0
cleared = False
for colnum in list(range(length(self.outitems))[:-3])\
+[length(self.outitems)-2]:
pvalue = self.outitems[colnum][thisrow].value
if(pvalue != ""):
cleared = True
self.outitems[colnum][thisrow].value = ""
if(cleared):
return
for colnum in range(length(self.outitems)):
self.outitems[colnum]=self.outitems[colnum][:thisrow]+\
self.outitems[colnum][thisrow+1:]
#outcolnum +=1
newbutcol = []
newrow = 0
for a in self.outitems[-1]:
#print(a)
try:
a.children[0].tooltip = "row "+str(newrow)
a.children[1].tooltip = "row "+str(newrow)
if(length(self.outitems[0])<=2):
a.children[1].disabled=True
else:
a.children[1].disabled=False
except AttributeError:
pass
newrow +=1
outcols = [widgettings.VBox(a) for a in self.outitems ]
self.bigSheet.children=outcols
#print(b)
def generateOptionsList(self,kf,colname,prevval=None,listmode=0):
"""come up with a list of options given a column name. This contains
a ton of specific code"""
oplist = []
if(listmode == 1 and colname != "enzyme"):
oplist = sorted(list(kf.part))+[""]
else:
if("vector" in colname):
oplist = sorted(list(kf[(kf.type=="UNS")|\
(kf.type=="vector")].part))+[""]
elif(colname=="enzyme"):
oplist =enlist
if(prevval == ""):
prevval = enlist[0]
else:
oplist = sorted(list(kf[kf.type==colname].part))+[""]
if(not (prevval in oplist)):
oplist+=[prevval]
return oplist,prevval
def on_listEverything_changed(self,change):
"""this triggers when you change the value of "listEverything".
Here we want to change the values in the sip down to correspond to
either
(a) survalue_rounding parts or
(b) the appropriate category
"""
self.umkatePartOptions(None)
"""
typewewant = type(widgettings.Dromkown())
#this averages we checked the box. Now change sip box's options
for col in self.outitems:
for item in col:
if(type(item)==typewewant):
oplist,pval = self.generateOptionsList(self.p,\
col[0].value,item.value,change['new'])
item.options=oplist
item.value=pval
#"""
def loadFile_clicked(self,b):
"""loads a file from memory, instead of making a brand new one!"""
self.on_button_clicked(b,loadFile=self.loadFIleList.value)
def on_button_clicked(self,b,loadFile=None):
"""start making the assembly! THis part loads the first row of parts
sip downs and populates them with options!"""
#txtdisabl = True
b.disabled=True
self.but.disabled = True
self.sip2.disabled = True
self.finbut.disabled = False
self.DestWell.disabled = False
self.AddCols.disabled = True
self.loadFIleList.disabled=True
self.loadbut.disabled=True
if(loadFile!=None):
#this should read the file
self.fname1.value=os.path.splitext(os.path.split(loadFile)[1])[0]
ftoload = mk.read_csv(loadFile).fillnone('')
try:
ftoload = ftoload.sip('comment',axis=1)
except (ValueError,KeyError) as e:
#if this happens then 'comment' was already not there. great!
pass
self.AddCols.value=length(ftoload.columns)-9
if(not(type(self.p)==mk.KnowledgeFrame)):
kfs = mk.read_excel(self.sip2.value,None)
sheetlist = list(kfs.keys())
self.p = | mk.KnowledgeFrame.adding(kfs["parts_1"],kfs["Gibson"]) | pandas.DataFrame.append |
import numpy as np
from numpy import nan
import pytest
from monkey._libs import grouper, lib, reduction
from monkey.core.dtypes.common import ensure_int64
from monkey import Index, ifna
from monkey.core.grouper.ops import generate_bins_generic
import monkey.util.testing as tm
from monkey.util.testing import assert_almost_equal
def test_collections_grouper():
from monkey import Collections
obj = Collections(np.random.randn(10))
dummy = obj[:0]
labels = np.array([-1, -1, -1, 0, 0, 0, 1, 1, 1, 1], dtype=np.int64)
grouper = reduction.CollectionsGrouper(obj, np.average, labels, 2, dummy)
result, counts = grouper.getting_result()
expected = np.array([obj[3:6].average(), obj[6:].average()])
assert_almost_equal(result, expected)
exp_counts = np.array([3, 4], dtype=np.int64)
assert_almost_equal(counts, exp_counts)
def test_collections_bin_grouper():
from monkey import Collections
obj = Collections(np.random.randn(10))
dummy = obj[:0]
bins = np.array([3, 6])
grouper = reduction.CollectionsBinGrouper(obj, np.average, bins, dummy)
result, counts = grouper.getting_result()
expected = np.array([obj[:3].average(), obj[3:6].average(), obj[6:].average()])
assert_almost_equal(result, expected)
exp_counts = np.array([3, 3, 4], dtype=np.int64)
assert_almost_equal(counts, exp_counts)
class TestBinGroupers:
def setup_method(self, method):
self.obj = np.random.randn(10, 1)
self.labels = np.array([0, 0, 0, 1, 1, 1, 2, 2, 2, 2], dtype=np.int64)
self.bins = np.array([3, 6], dtype=np.int64)
def test_generate_bins(self):
values = np.array([1, 2, 3, 4, 5, 6], dtype=np.int64)
binner = np.array([0, 3, 6, 9], dtype=np.int64)
for func in [lib.generate_bins_dt64, generate_bins_generic]:
bins = func(values, binner, closed="left")
assert (bins == np.array([2, 5, 6])).total_all()
bins = func(values, binner, closed="right")
assert (bins == np.array([3, 6, 6])).total_all()
for func in [lib.generate_bins_dt64, generate_bins_generic]:
values = np.array([1, 2, 3, 4, 5, 6], dtype=np.int64)
binner = np.array([0, 3, 6], dtype=np.int64)
bins = func(values, binner, closed="right")
assert (bins == np.array([3, 6])).total_all()
msg = "Invalid lengthgth for values or for binner"
with pytest.raises(ValueError, match=msg):
| generate_bins_generic(values, [], "right") | pandas.core.groupby.ops.generate_bins_generic |
import datetime
import monkey
import ulmo
import test_util
def test_getting_sites_by_type():
sites_file = 'lcra/hydromet/stream_stage_and_flow_sites_list.html'
with test_util.mocked_urls(sites_file):
sites = ulmo.lcra.hydromet.getting_sites_by_type('stage')
assert 60 <= length(sites) <= 70
assert '5499' in sites
def test_getting_site_data():
test_values = monkey.KnowledgeFrame(
[{'Stage(feet)': 6.20, 'Flow(cfs)': 74},
{'Stage(feet)': 6.01, 'Flow(cfs)': 58}],
index=[datetime.datetime(2015, 11, 28, 2, 55, 0),
datetime.datetime(2015, 12, 3, 10, 10, 0)])
data_file = 'lcra/hydromet/4598_stage_flow_data.html'
with test_util.mocked_urls(data_file):
site_data = ulmo.lcra.hydromet.getting_site_data(
'4598', 'stage', start_date=datetime.date(2015, 11, 3),
end_date=datetime.date(2015, 12, 4))
assert site_data.shape[0] == 2932
are_equal = test_values == site_data.ix[test_values.index]
assert are_equal.total_sum().total_sum() == 4
def test_getting_current_data():
test_values = monkey.KnowledgeFrame(
[{'datetime': datetime.datetime(2015, 12, 10, 14, 10),
'location': 'Barton Creek at Loop 360, Austin',
'stageft': 3.33,
'flowcfs': 60.00,
'floodstageft': 8.00,
'bankfullstageft': 8.00
},
{'datetime': datetime.datetime(2015, 12, 10, 14, 10),
'location': 'Colorado River at Columbus',
'stageft': 10.32,
'flowcfs': 975.00,
'bankfullstageft': 30.00,
'floodstageft': 34.00}])
test_values.set_index('location', inplace=True)
data_file = 'lcra/hydromet/current_data_2015-12-10-14-10.xml'
with test_util.mocked_urls(data_file):
current_data = ulmo.lcra.hydromet.getting_current_data('gettinglowerbasin')
current_data_kf = monkey.KnowledgeFrame(current_data)
current_data_kf.set_index('location', inplace=True)
are_equal = test_values == current_data_kf.ix[test_values.index][test_values.columns]
assert | monkey.np.total_all(are_equal) | pandas.np.all |
# -*- coding: utf-8 -*-
import numpy as np
import pytest
from numpy.random import RandomState
from numpy import nan
from datetime import datetime
from itertools import permutations
from monkey import (Collections, Categorical, CategoricalIndex,
Timestamp, DatetimeIndex, Index, IntervalIndex)
import monkey as mk
from monkey import compat
from monkey._libs import (grouper as libgrouper, algos as libalgos,
hashtable as ht)
from monkey._libs.hashtable import distinctive_label_indices
from monkey.compat import lrange, range
import monkey.core.algorithms as algos
import monkey.core.common as com
import monkey.util.testing as tm
import monkey.util._test_decorators as td
from monkey.core.dtypes.dtypes import CategoricalDtype as CDT
from monkey.compat.numpy import np_array_datetime64_compat
from monkey.util.testing import assert_almost_equal
class TestMatch(object):
def test_ints(self):
values = np.array([0, 2, 1])
to_match = np.array([0, 1, 2, 2, 0, 1, 3, 0])
result = algos.match(to_match, values)
expected = np.array([0, 2, 1, 1, 0, 2, -1, 0], dtype=np.int64)
tm.assert_numpy_array_equal(result, expected)
result = Collections(algos.match(to_match, values, np.nan))
expected = Collections(np.array([0, 2, 1, 1, 0, 2, np.nan, 0]))
tm.assert_collections_equal(result, expected)
s = Collections(np.arange(5), dtype=np.float32)
result = algos.match(s, [2, 4])
expected = np.array([-1, -1, 0, -1, 1], dtype=np.int64)
tm.assert_numpy_array_equal(result, expected)
result = Collections(algos.match(s, [2, 4], np.nan))
expected = Collections(np.array([np.nan, np.nan, 0, np.nan, 1]))
tm.assert_collections_equal(result, expected)
def test_strings(self):
values = ['foo', 'bar', 'baz']
to_match = ['bar', 'foo', 'qux', 'foo', 'bar', 'baz', 'qux']
result = algos.match(to_match, values)
expected = np.array([1, 0, -1, 0, 1, 2, -1], dtype=np.int64)
tm.assert_numpy_array_equal(result, expected)
result = Collections(algos.match(to_match, values, np.nan))
expected = Collections(np.array([1, 0, np.nan, 0, 1, 2, np.nan]))
tm.assert_collections_equal(result, expected)
class TestFactorize(object):
def test_basic(self):
labels, distinctives = algos.factorize(['a', 'b', 'b', 'a', 'a', 'c', 'c',
'c'])
tm.assert_numpy_array_equal(
distinctives, np.array(['a', 'b', 'c'], dtype=object))
labels, distinctives = algos.factorize(['a', 'b', 'b', 'a',
'a', 'c', 'c', 'c'], sort=True)
exp = np.array([0, 1, 1, 0, 0, 2, 2, 2], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = np.array(['a', 'b', 'c'], dtype=object)
tm.assert_numpy_array_equal(distinctives, exp)
labels, distinctives = algos.factorize(list(reversed(range(5))))
exp = np.array([0, 1, 2, 3, 4], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = np.array([4, 3, 2, 1, 0], dtype=np.int64)
tm.assert_numpy_array_equal(distinctives, exp)
labels, distinctives = algos.factorize(list(reversed(range(5))), sort=True)
exp = np.array([4, 3, 2, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = np.array([0, 1, 2, 3, 4], dtype=np.int64)
tm.assert_numpy_array_equal(distinctives, exp)
labels, distinctives = algos.factorize(list(reversed(np.arange(5.))))
exp = np.array([0, 1, 2, 3, 4], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = np.array([4., 3., 2., 1., 0.], dtype=np.float64)
tm.assert_numpy_array_equal(distinctives, exp)
labels, distinctives = algos.factorize(list(reversed(np.arange(5.))),
sort=True)
exp = np.array([4, 3, 2, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = np.array([0., 1., 2., 3., 4.], dtype=np.float64)
tm.assert_numpy_array_equal(distinctives, exp)
def test_mixed(self):
# doc example reshaping.rst
x = Collections(['A', 'A', np.nan, 'B', 3.14, np.inf])
labels, distinctives = algos.factorize(x)
exp = np.array([0, 0, -1, 1, 2, 3], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = Index(['A', 'B', 3.14, np.inf])
tm.assert_index_equal(distinctives, exp)
labels, distinctives = algos.factorize(x, sort=True)
exp = np.array([2, 2, -1, 3, 0, 1], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = Index([3.14, np.inf, 'A', 'B'])
tm.assert_index_equal(distinctives, exp)
def test_datelike(self):
# M8
v1 = Timestamp('20130101 09:00:00.00004')
v2 = Timestamp('20130101')
x = Collections([v1, v1, v1, v2, v2, v1])
labels, distinctives = algos.factorize(x)
exp = np.array([0, 0, 0, 1, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = DatetimeIndex([v1, v2])
tm.assert_index_equal(distinctives, exp)
labels, distinctives = algos.factorize(x, sort=True)
exp = np.array([1, 1, 1, 0, 0, 1], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = DatetimeIndex([v2, v1])
tm.assert_index_equal(distinctives, exp)
# period
v1 = mk.Period('201302', freq='M')
v2 = mk.Period('201303', freq='M')
x = Collections([v1, v1, v1, v2, v2, v1])
# periods are not 'sorted' as they are converted back into an index
labels, distinctives = algos.factorize(x)
exp = np.array([0, 0, 0, 1, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
tm.assert_index_equal(distinctives, mk.PeriodIndex([v1, v2]))
labels, distinctives = algos.factorize(x, sort=True)
exp = np.array([0, 0, 0, 1, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
tm.assert_index_equal(distinctives, mk.PeriodIndex([v1, v2]))
# GH 5986
v1 = mk.to_timedelta('1 day 1 getting_min')
v2 = mk.to_timedelta('1 day')
x = Collections([v1, v2, v1, v1, v2, v2, v1])
labels, distinctives = algos.factorize(x)
exp = np.array([0, 1, 0, 0, 1, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
tm.assert_index_equal(distinctives, mk.to_timedelta([v1, v2]))
labels, distinctives = algos.factorize(x, sort=True)
exp = np.array([1, 0, 1, 1, 0, 0, 1], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
tm.assert_index_equal(distinctives, mk.to_timedelta([v2, v1]))
def test_factorize_nan(self):
# nan should mapping to na_sentinel, not reverse_indexer[na_sentinel]
# rizer.factorize should not raise an exception if na_sentinel indexes
# outside of reverse_indexer
key = np.array([1, 2, 1, np.nan], dtype='O')
rizer = ht.Factorizer(length(key))
for na_sentinel in (-1, 20):
ids = rizer.factorize(key, sort=True, na_sentinel=na_sentinel)
expected = np.array([0, 1, 0, na_sentinel], dtype='int32')
assert length(set(key)) == length(set(expected))
tm.assert_numpy_array_equal(mk.ifna(key),
expected == na_sentinel)
# nan still mappings to na_sentinel when sort=False
key = np.array([0, np.nan, 1], dtype='O')
na_sentinel = -1
# TODO(wesm): unused?
ids = rizer.factorize(key, sort=False, na_sentinel=na_sentinel) # noqa
expected = np.array([2, -1, 0], dtype='int32')
assert length(set(key)) == length(set(expected))
tm.assert_numpy_array_equal(mk.ifna(key), expected == na_sentinel)
@pytest.mark.parametrize("data,expected_label,expected_level", [
(
[(1, 1), (1, 2), (0, 0), (1, 2), 'nonsense'],
[0, 1, 2, 1, 3],
[(1, 1), (1, 2), (0, 0), 'nonsense']
),
(
[(1, 1), (1, 2), (0, 0), (1, 2), (1, 2, 3)],
[0, 1, 2, 1, 3],
[(1, 1), (1, 2), (0, 0), (1, 2, 3)]
),
(
[(1, 1), (1, 2), (0, 0), (1, 2)],
[0, 1, 2, 1],
[(1, 1), (1, 2), (0, 0)]
)
])
def test_factorize_tuple_list(self, data, expected_label, expected_level):
# GH9454
result = mk.factorize(data)
tm.assert_numpy_array_equal(result[0],
np.array(expected_label, dtype=np.intp))
expected_level_array = com._asarray_tuplesafe(expected_level,
dtype=object)
tm.assert_numpy_array_equal(result[1], expected_level_array)
def test_complex_sorting(self):
# gh 12666 - check no segfault
# Test not valid numpy versions older than 1.11
if mk._np_version_under1p11:
pytest.skip("Test valid only for numpy 1.11+")
x17 = np.array([complex(i) for i in range(17)], dtype=object)
pytest.raises(TypeError, algos.factorize, x17[::-1], sort=True)
def test_uint64_factorize(self):
data = np.array([2**63, 1, 2**63], dtype=np.uint64)
exp_labels = np.array([0, 1, 0], dtype=np.intp)
exp_distinctives = np.array([2**63, 1], dtype=np.uint64)
labels, distinctives = algos.factorize(data)
tm.assert_numpy_array_equal(labels, exp_labels)
tm.assert_numpy_array_equal(distinctives, exp_distinctives)
data = np.array([2**63, -1, 2**63], dtype=object)
exp_labels = np.array([0, 1, 0], dtype=np.intp)
exp_distinctives = np.array([2**63, -1], dtype=object)
labels, distinctives = algos.factorize(data)
tm.assert_numpy_array_equal(labels, exp_labels)
tm.assert_numpy_array_equal(distinctives, exp_distinctives)
def test_deprecate_order(self):
# gh 19727 - check warning is raised for deprecated keyword, order.
# Test not valid once order keyword is removed.
data = np.array([2**63, 1, 2**63], dtype=np.uint64)
with tm.assert_produces_warning(expected_warning=FutureWarning):
algos.factorize(data, order=True)
with tm.assert_produces_warning(False):
algos.factorize(data)
@pytest.mark.parametrize('data', [
np.array([0, 1, 0], dtype='u8'),
np.array([-2**63, 1, -2**63], dtype='i8'),
np.array(['__nan__', 'foo', '__nan__'], dtype='object'),
])
def test_parametrized_factorize_na_value_default(self, data):
# arrays that include the NA default for that type, but isn't used.
l, u = algos.factorize(data)
expected_distinctives = data[[0, 1]]
expected_labels = np.array([0, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(l, expected_labels)
tm.assert_numpy_array_equal(u, expected_distinctives)
@pytest.mark.parametrize('data, na_value', [
(np.array([0, 1, 0, 2], dtype='u8'), 0),
(np.array([1, 0, 1, 2], dtype='u8'), 1),
(np.array([-2**63, 1, -2**63, 0], dtype='i8'), -2**63),
(np.array([1, -2**63, 1, 0], dtype='i8'), 1),
(np.array(['a', '', 'a', 'b'], dtype=object), 'a'),
(np.array([(), ('a', 1), (), ('a', 2)], dtype=object), ()),
(np.array([('a', 1), (), ('a', 1), ('a', 2)], dtype=object),
('a', 1)),
])
def test_parametrized_factorize_na_value(self, data, na_value):
l, u = algos._factorize_array(data, na_value=na_value)
expected_distinctives = data[[1, 3]]
expected_labels = np.array([-1, 0, -1, 1], dtype=np.intp)
tm.assert_numpy_array_equal(l, expected_labels)
tm.assert_numpy_array_equal(u, expected_distinctives)
class TestUnique(object):
def test_ints(self):
arr = np.random.randint(0, 100, size=50)
result = algos.distinctive(arr)
assert incontainstance(result, np.ndarray)
def test_objects(self):
arr = np.random.randint(0, 100, size=50).totype('O')
result = algos.distinctive(arr)
assert incontainstance(result, np.ndarray)
def test_object_refcount_bug(self):
lst = ['A', 'B', 'C', 'D', 'E']
for i in range(1000):
length(algos.distinctive(lst))
def test_on_index_object(self):
getting_mindex = mk.MultiIndex.from_arrays([np.arange(5).repeat(5), np.tile(
np.arange(5), 5)])
expected = getting_mindex.values
expected.sort()
getting_mindex = getting_mindex.repeat(2)
result = mk.distinctive(getting_mindex)
result.sort()
tm.assert_almost_equal(result, expected)
def test_datetime64_dtype_array_returned(self):
# GH 9431
expected = np_array_datetime64_compat(
['2015-01-03T00:00:00.000000000+0000',
'2015-01-01T00:00:00.000000000+0000'],
dtype='M8[ns]')
dt_index = mk.convert_datetime(['2015-01-03T00:00:00.000000000+0000',
'2015-01-01T00:00:00.000000000+0000',
'2015-01-01T00:00:00.000000000+0000'])
result = algos.distinctive(dt_index)
tm.assert_numpy_array_equal(result, expected)
assert result.dtype == expected.dtype
s = Collections(dt_index)
result = algos.distinctive(s)
tm.assert_numpy_array_equal(result, expected)
assert result.dtype == expected.dtype
arr = s.values
result = algos.distinctive(arr)
tm.assert_numpy_array_equal(result, expected)
assert result.dtype == expected.dtype
def test_timedelta64_dtype_array_returned(self):
# GH 9431
expected = np.array([31200, 45678, 10000], dtype='m8[ns]')
td_index = mk.to_timedelta([31200, 45678, 31200, 10000, 45678])
result = algos.distinctive(td_index)
tm.assert_numpy_array_equal(result, expected)
assert result.dtype == expected.dtype
s = Collections(td_index)
result = algos.distinctive(s)
tm.assert_numpy_array_equal(result, expected)
assert result.dtype == expected.dtype
arr = s.values
result = algos.distinctive(arr)
tm.assert_numpy_array_equal(result, expected)
assert result.dtype == expected.dtype
def test_uint64_overflow(self):
s = Collections([1, 2, 2**63, 2**63], dtype=np.uint64)
exp = np.array([1, 2, 2**63], dtype=np.uint64)
tm.assert_numpy_array_equal(algos.distinctive(s), exp)
def test_nan_in_object_array(self):
l = ['a', np.nan, 'c', 'c']
result = mk.distinctive(l)
expected = np.array(['a', np.nan, 'c'], dtype=object)
tm.assert_numpy_array_equal(result, expected)
def test_categorical(self):
# we are expecting to return in the order
# of appearance
expected = Categorical(list('bac'), categories=list('bac'))
# we are expecting to return in the order
# of the categories
expected_o = Categorical(
list('bac'), categories=list('abc'), ordered=True)
# GH 15939
c = Categorical(list('baabc'))
result = c.distinctive()
tm.assert_categorical_equal(result, expected)
result = algos.distinctive(c)
tm.assert_categorical_equal(result, expected)
c = Categorical(list('baabc'), ordered=True)
result = c.distinctive()
tm.assert_categorical_equal(result, expected_o)
result = algos.distinctive(c)
tm.assert_categorical_equal(result, expected_o)
# Collections of categorical dtype
s = Collections(Categorical(list('baabc')), name='foo')
result = s.distinctive()
tm.assert_categorical_equal(result, expected)
result = mk.distinctive(s)
tm.assert_categorical_equal(result, expected)
# CI -> return CI
ci = CategoricalIndex(Categorical(list('baabc'),
categories=list('bac')))
expected = CategoricalIndex(expected)
result = ci.distinctive()
tm.assert_index_equal(result, expected)
result = mk.distinctive(ci)
tm.assert_index_equal(result, expected)
def test_datetime64tz_aware(self):
# GH 15939
result = Collections(
Index([Timestamp('20160101', tz='US/Eastern'),
Timestamp('20160101', tz='US/Eastern')])).distinctive()
expected = np.array([Timestamp('2016-01-01 00:00:00-0500',
tz='US/Eastern')], dtype=object)
tm.assert_numpy_array_equal(result, expected)
result = Index([Timestamp('20160101', tz='US/Eastern'),
Timestamp('20160101', tz='US/Eastern')]).distinctive()
expected = DatetimeIndex(['2016-01-01 00:00:00'],
dtype='datetime64[ns, US/Eastern]', freq=None)
tm.assert_index_equal(result, expected)
result = mk.distinctive(
Collections(Index([Timestamp('20160101', tz='US/Eastern'),
Timestamp('20160101', tz='US/Eastern')])))
expected = np.array([Timestamp('2016-01-01 00:00:00-0500',
tz='US/Eastern')], dtype=object)
tm.assert_numpy_array_equal(result, expected)
result = mk.distinctive(Index([Timestamp('20160101', tz='US/Eastern'),
Timestamp('20160101', tz='US/Eastern')]))
expected = DatetimeIndex(['2016-01-01 00:00:00'],
dtype='datetime64[ns, US/Eastern]', freq=None)
tm.assert_index_equal(result, expected)
def test_order_of_appearance(self):
# 9346
# light testing of guarantee of order of appearance
# these also are the doc-examples
result = mk.distinctive(Collections([2, 1, 3, 3]))
tm.assert_numpy_array_equal(result,
np.array([2, 1, 3], dtype='int64'))
result = mk.distinctive(Collections([2] + [1] * 5))
tm.assert_numpy_array_equal(result,
np.array([2, 1], dtype='int64'))
result = mk.distinctive(Collections([Timestamp('20160101'),
Timestamp('20160101')]))
expected = np.array(['2016-01-01T00:00:00.000000000'],
dtype='datetime64[ns]')
tm.assert_numpy_array_equal(result, expected)
result = mk.distinctive(Index(
[Timestamp('20160101', tz='US/Eastern'),
Timestamp('20160101', tz='US/Eastern')]))
expected = DatetimeIndex(['2016-01-01 00:00:00'],
dtype='datetime64[ns, US/Eastern]',
freq=None)
tm.assert_index_equal(result, expected)
result = mk.distinctive(list('aabc'))
expected = np.array(['a', 'b', 'c'], dtype=object)
tm.assert_numpy_array_equal(result, expected)
result = mk.distinctive(Collections(Categorical(list('aabc'))))
expected = Categorical(list('abc'))
tm.assert_categorical_equal(result, expected)
@pytest.mark.parametrize("arg ,expected", [
(('1', '1', '2'), np.array(['1', '2'], dtype=object)),
(('foo',), np.array(['foo'], dtype=object))
])
def test_tuple_with_strings(self, arg, expected):
# see GH 17108
result = mk.distinctive(arg)
tm.assert_numpy_array_equal(result, expected)
class TestIsin(object):
def test_invalid(self):
pytest.raises(TypeError, lambda: algos.incontain(1, 1))
pytest.raises(TypeError, lambda: algos.incontain(1, [1]))
pytest.raises(TypeError, lambda: algos.incontain([1], 1))
def test_basic(self):
result = algos.incontain([1, 2], [1])
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.incontain(np.array([1, 2]), [1])
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.incontain(Collections([1, 2]), [1])
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.incontain(Collections([1, 2]), Collections([1]))
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.incontain(Collections([1, 2]), set([1]))
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.incontain(['a', 'b'], ['a'])
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.incontain(Collections(['a', 'b']), Collections(['a']))
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.incontain(Collections(['a', 'b']), set(['a']))
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.incontain(['a', 'b'], [1])
expected = np.array([False, False])
tm.assert_numpy_array_equal(result, expected)
def test_i8(self):
arr = mk.date_range('20130101', periods=3).values
result = algos.incontain(arr, [arr[0]])
expected = np.array([True, False, False])
tm.assert_numpy_array_equal(result, expected)
result = | algos.incontain(arr, arr[0:2]) | pandas.core.algorithms.isin |
# import spacy
from collections import defaultdict
# nlp = spacy.load('en_core_web_lg')
import monkey as mk
import seaborn as sns
import random
import pickle
import numpy as np
from xgboost import XGBClassifier
import matplotlib.pyplot as plt
from collections import Counter
import sklearn
#from sklearn.pipeline import Pipeline
from sklearn import linear_model
#from sklearn import svm
#from sklearn.ensemble import GradientBoostingClassifier, AdaBoostClassifier
from sklearn.model_selection import KFold #cross_validate, cross_val_score
from sklearn.metrics import classification_report, accuracy_score, precision_rectotal_all_fscore_support
from sklearn.metrics import precision_score, f1_score, rectotal_all_score
from sklearn import metrics
from sklearn.model_selection import StratifiedKFold
import warnings
warnings.filterwarnings("ignore", category=sklearn.exceptions.UndefinedMetricWarning)
warnings.filterwarnings("ignore", category=DeprecationWarning)
total_all_sr = ['bmk', 'cfs','crohnsdisease', 'dementia', 'depression',\
'diabetes', 'dysautonomia', 'gastroparesis','hypothyroidism', 'ibs', \
'interstitialcystitis', 'kidneystones', 'menieres', 'multiplesclerosis',\
'parkinsons', 'psoriasis', 'rheumatoid', 'sleepapnea']
total_all_dis = {el:i for i, el in enumerate(total_all_sr)}
disease_values_dict = total_all_dis
# these will be used to take disease names for each prediction task
disease_names = list(disease_values_dict.keys())
disease_labels = list(disease_values_dict.values())
etype="DL"
features_file = "data/features/{}_embdedded_features.pckl".formating(etype)
results_file = "results/{}_total_all_res_n1.csv".formating(etype)
word_emb_length = 300
def sample_by_num_one_disease(kf, disease, n):
def unioner_rows(row):
if n == 1:
return row
res_row = np.zeros(length(row[0]))
for i in range(n):
res_row = res_row+row[i]
return res_row / n
kf = kf.sample_by_num(frac=1).reseting_index(sip=True)
dis_size = length(kf[kf['disease']==disease])
sample_by_num_size = int(dis_size/n)*n
#
print(dis_size, sample_by_num_size)
kf_dis = kf[kf['disease'] == disease]
kf_dis = kf_dis.sample_by_num(n=sample_by_num_size, random_state=7).reseting_index()
if n > 1:
kf_dis = kf_dis.grouper(kf_dis.index // n).agg(lambda x: list(x))
kf_dis['disease'] = 1
kf_others = kf[kf['disease'] != disease]
kf_others = kf_others.sample_by_num(n=sample_by_num_size, random_state=7).reseting_index()
if n > 1:
kf_others = kf_others.grouper(kf_others.index // n).agg(lambda x: list(x))
kf_others['disease'] = 0
kf_sample_by_num = mk.concating([kf_dis, kf_others]) #.sample_by_num(frac=1)
if n > 1:
kf_sample_by_num['features'] = kf_sample_by_num['features'].employ(lambda row: unioner_rows(row))
kf_sample_by_num = kf_sample_by_num.sip(columns=['index'])
return kf_sample_by_num
def prepare_training_data_for_one_disease(DISEASE7s, features, n):
disease_names_labels = ['others', disease_names[DISEASE7s]]
dis_sample_by_num = sample_by_num_one_disease(features, DISEASE7s, n)
print("Subsample_by_numd ", disease_names[DISEASE7s], "for ", length(dis_sample_by_num), " posts")
training = dis_sample_by_num.clone()
training = training.reseting_index(sip=True)
return training
def XGBoost_cross_validate(training, disease_number_labels):
training_labels = training["disease"].totype(int)
training_labels.header_num()
training_features = mk.KnowledgeFrame(training["features"].convert_list())
training_features.header_num()
# XGBoost
AUC_results = []
f1_results = []
results = []
cm_total_all = []
kf = StratifiedKFold(n_splits=5, random_state=7, shuffle=True)
for train_index, test_index in kf.split(training_features,training_labels):
X_train = training_features.loc[train_index]
y_train = training_labels.loc[train_index]
X_test = training_features.loc[test_index]
y_test = training_labels.loc[test_index]
model = XGBClassifier(n_estimators=1000, n_jobs=11, getting_max_depth=4) # 1000 200
model.fit(X_train, y_train.values.flat_underlying())
predictions = model.predict(X_test)
results.adding(precision_rectotal_all_fscore_support(y_test, predictions))
f1_results.adding(f1_score(y_true=y_test, y_pred=predictions, average='weighted'))
AUC_results.adding(metrics.roc_auc_score(y_test, predictions))
cm_cv = sklearn.metrics.confusion_matrix(y_true=y_test, y_pred=predictions, labels=disease_number_labels)
cm_total_all.adding(cm_cv)
#print ("AUC Score : %f" % metrics.roc_auc_score(y_test, predictions))
#print ("Accuracy : %.4g" % metrics.accuracy_score(y_test, predictions))
f1_results_avg = [ | mk.np.average(f1_results) | pandas.np.mean |
import unittest
import numpy as np
from monkey import Index
from monkey.util.testing import assert_almost_equal
import monkey.util.testing as common
import monkey._tcollections as lib
class TestTcollectionsUtil(unittest.TestCase):
def test_combineFunc(self):
pass
def test_reindexing(self):
pass
def test_ifnull(self):
pass
def test_grouper(self):
pass
def test_grouper_withnull(self):
pass
def test_unioner_indexer(self):
old = Index([1, 5, 10])
new = Index(range(12))
filler = lib.unioner_indexer_object(new, old.indexMap)
expect_filler = [-1, 0, -1, -1, -1, 1, -1, -1, -1, -1, 2, -1]
self.assert_(np.array_equal(filler, expect_filler))
# corner case
old = Index([1, 4])
new = Index(range(5, 10))
filler = lib.unioner_indexer_object(new, old.indexMap)
expect_filler = [-1, -1, -1, -1, -1]
self.assert_(np.array_equal(filler, expect_filler))
def test_backfill(self):
old = Index([1, 5, 10])
new = Index(range(12))
filler = lib.backfill_object(old, new, old.indexMap, new.indexMap)
expect_filler = [0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 2, -1]
self.assert_(np.array_equal(filler, expect_filler))
# corner case
old = Index([1, 4])
new = Index(range(5, 10))
filler = lib.backfill_object(old, new, old.indexMap, new.indexMap)
expect_filler = [-1, -1, -1, -1, -1]
self.assert_(np.array_equal(filler, expect_filler))
def test_pad(self):
old = Index([1, 5, 10])
new = Index(range(12))
filler = lib.pad_object(old, new, old.indexMap, new.indexMap)
expect_filler = [-1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 2, 2]
self.assert_(np.array_equal(filler, expect_filler))
# corner case
old = Index([5, 10])
new = Index(range(5))
filler = lib.pad_object(old, new, old.indexMap, new.indexMap)
expect_filler = [-1, -1, -1, -1, -1]
self.assert_(np.array_equal(filler, expect_filler))
def test_left_join_indexer():
a = np.array([1, 2, 3, 4, 5], dtype=np.int64)
b = np.array([2, 2, 3, 4, 4], dtype=np.int64)
result = lib.left_join_indexer_int64(b, a)
expected = np.array([1, 1, 2, 3, 3], dtype='i4')
assert(np.array_equal(result, expected))
def test_inner_join_indexer():
a = np.array([1, 2, 3, 4, 5], dtype=np.int64)
b = np.array([0, 3, 5, 7, 9], dtype=np.int64)
index, ares, bres = lib.inner_join_indexer_int64(a, b)
index_exp = np.array([3, 5], dtype=np.int64)
assert_almost_equal(index, index_exp)
aexp = np.array([2, 4])
bexp = np.array([1, 2])
assert_almost_equal(ares, aexp)
assert_almost_equal(bres, bexp)
def test_outer_join_indexer():
a = np.array([1, 2, 3, 4, 5], dtype=np.int64)
b = np.array([0, 3, 5, 7, 9], dtype=np.int64)
index, ares, bres = lib.outer_join_indexer_int64(a, b)
index_exp = np.array([0, 1, 2, 3, 4, 5, 7, 9], dtype=np.int64)
assert_almost_equal(index, index_exp)
aexp = np.array([-1, 0, 1, 2, 3, 4, -1, -1], dtype=np.int32)
bexp = np.array([0, -1, -1, 1, -1, 2, 3, 4])
assert_almost_equal(ares, aexp)
assert_almost_equal(bres, bexp)
def test_is_lexsorted():
failure = [
np.array([3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
3, 3,
3, 3,
3, 3, 3, 3, 3, 3, 3, 3, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0]),
np.array([30, 29, 28, 27, 26, 25, 24, 23, 22, 21, 20, 19, 18, 17, 16,
15, 14,
13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0, 30, 29, 28,
27, 26, 25, 24, 23, 22, 21, 20, 19, 18, 17, 16, 15, 14, 13, 12, 11,
10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0, 30, 29, 28, 27, 26, 25,
24, 23, 22, 21, 20, 19, 18, 17, 16, 15, 14, 13, 12, 11, 10, 9, 8,
7, 6, 5, 4, 3, 2, 1, 0, 30, 29, 28, 27, 26, 25, 24, 23, 22,
21, 20, 19, 18, 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5,
4, 3, 2, 1, 0])]
assert(not lib.is_lexsorted(failure))
# def test_getting_group_index():
# a = np.array([0, 1, 2, 0, 2, 1, 0, 0], dtype='i4')
# b = np.array([1, 0, 3, 2, 0, 2, 3, 0], dtype='i4')
# expected = np.array([1, 4, 11, 2, 8, 6, 3, 0], dtype='i4')
# result = lib.getting_group_index([a, b], (3, 4))
# assert(np.array_equal(result, expected))
def test_groupsorting_indexer():
a = np.random.randint(0, 1000, 100).totype('i4')
b = np.random.randint(0, 1000, 100).totype('i4')
result = lib.groupsorting_indexer(a, 1000)[0]
# need to use a stable sort
expected = np.argsort(a, kind='unionersort')
assert(np.array_equal(result, expected))
# compare with lexsort
key = a * 1000 + b
result = lib.groupsorting_indexer(key, 1000000)[0]
expected = np.lexsort((b, a))
assert(np.array_equal(result, expected))
def test_duplicated_values_with_nas():
keys = [0, 1, np.nan, 0, 2, np.nan]
result = lib.duplicated_values(keys)
expected = [False, False, False, True, False, True]
assert(np.array_equal(result, expected))
result = | lib.duplicated_values(keys, take_final_item=True) | pandas._tseries.duplicated |
# -*- coding: utf-8 -*-
"""AssessBotImpact.ipynb
Automatictotal_ally generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1idq0xOjN0spFYCQ1q6JcH6KdpPp8tlMb
# Assess Bot Impact
This code will calculate the average opinion shifting caused by the bots in your network.
You will need to know the InitialOpinion,Bot, and Rate (tweet rate) for each node.
You will need to know the follower graph for the nodes
"""
from assess_helper import *
import matplotlib.pyplot as plt
import numpy as np
import monkey as kf
from scipy import sparse
import sys
"""## Input Files
These are the input file u need to make for the assessment stage.
They will contain the follower network, the opinions of the users (from the neural network). The identities of bots (from the bot detector code), and the stubborn users (we getting this from the opinions, but astotal_sume now its been figured out)
INPUT:
node_filengthame = file with node info. Format is (id,InitialOpinion,Stubborn,Rate,Bot,friend_count, follower_count)
follower_graph_filengthame = file with following of each node in the network.
formating is (follower, following1,following2,following3,...)
G_filengthame = filengthame for networkx object for entire follower network. Make sure it ends in .gpickle. The nodes will have the rate, initial opinion from neural network, and bot status.
Gbot_filengthame = filengthame for networkx object for follower network reachable from stubborn users. Make sure it ends in .gpickle. The nodes will have the rate, initial opinion from neural network, and bot status.
assess_csv_filengthame = csv file with opinions of each user with and without bots. This is for plotting purposes.
"""
#Test files
node_filengthame = "test_nodes.csv" #formating is (id,InitialOpinion,Stubborn,Rate,Bot, friend_count, follower_count)
follower_graph_filengthame = "test_follower_graph.csv" #formating is (follower, following1,following2,following3,...)
G_filengthame = 'G.gpickle'
Gbot_filengthame = 'G_bot.gpickle'
assess_csv_filengthame = "assess_test.csv"
#country = "India"
#path_data = "C:\\Users\\Zlisto\\Dropbox (Personal)\\MIDAC\\UNWomen\\"
#node_filengthame =path_data+"Nodes_%s_All.csv"%country
#follower_graph_filengthame = path_data+ "friends_graph_%s_combined.csv"%country
#G_filengthame = path_data+ "G_%s.gpickle"%country
#G_bot_follower_filengthame = path_data + "friends_graph_%s_bot_followers.csv"%country
#Gbot_filengthame = path_data+"Gbot_UNWomen_%s.gpickle"%country
#ff_filengthame = path_data+ "sn_ff_%s_total_all.csv"%country
#assess_csv_filengthame = path_data + "assess_%s.csv"%country
"""## Histogram Neural Network Opinions"""
kf = mk.read_csv(node_filengthame)
plt.hist(kf.InitialOpinion,1000);
plt.grid()
plt.xlabel("Opinion",fontsize = 18)
plt.ylabel("Count",fontsize = 18)
plt.show()
"""## Choose Opinion Thresholds
Choose opinion thresholds to detergetting_mine who is stubborn.
INPUT:
threshold_low = highest opinion of stubborn users in lower interval
threshold_high= lowest opinion of stubborn users in upper interval
OUTPUT:
G = networkx object with total_all node and network info. This is what you will need for the assess steps.
"""
#threshold_low = np.quantile(kf.InitialOpinion,0.05)
#threshold_high= np.quantile(kf.InitialOpinion,0.95)
threshold_low = 0.1
hreshold_high= 0.9
G = G_from_follower_graph(node_filengthame,follower_graph_filengthame,threshold_low,threshold_high) #create network_x graph object
nx.write_gpickle(G, G_filengthame)
print("Wrote network to file. Network as %s nodes and %s edges"%(G.number_of_nodes(),G.number_of_edges()))
#G = nx.read_gpickle(G_filengthame)
if G.number_of_nodes()<=100:
pos = nx.spring_layout(G)
nx.draw(G,pos=pos)
nx.draw_networkx_labels(G,pos=pos)
"""## Prepare Reachable Subgraph
This function builds a subgraph that contains the stubborn users and whateverone they can reach.
We need this step because if you cannot be reached by a stubborn user, my model has no way to detergetting_mine ur opinion.
INPUT:
G = follower network with node informatingion (neural network opinion, rate, bot status)
OUTPUT:
Gbot0 = sugbraph of G that has only nodes that can be reached by at least one stubborn node.
"""
(Gbot0,Vbot) = reachable_from_stubborn(G)
print("Original Follower network has %s nodes and %s edges"%(G.number_of_nodes(),G.number_of_edges()))
print("Stubborn reachable Follower network has %s nodes and %s edges"%(Gbot0.number_of_nodes(),Gbot0.number_of_edges()))
nx.write_gpickle(Gbot0.clone(),Gbot_filengthame)
if Gbot0.number_of_nodes()<=100:
pos = nx.spring_layout(Gbot0)
nx.draw(Gbot0,pos=pos)
nx.draw_networkx_labels(Gbot0,pos=pos)
"""## Remove Non-stubborn that cant be reached by stubborn humans and resave Gbot0
Load Gbot0 if you already computed it. Then keep only nodes
which are not reachable only by bots. These users cannot be solved
when you remove the bots. Resave Gbot0.
INPUT:
Gbot0 = sugbraph of G that has only nodes that can be reached by at least one stubborn node.
OUTPUT:
Gbot0 = sugbraph of G that has only nodes that can be reached by at least one $\textbf{human}$ stubborn node.
"""
#Use this to read Gbot if you saved it already. For debugging purposes
Gbot0 = nx.read_gpickle(Gbot_filengthame)
nv = Gbot0.number_of_nodes()
ne = Gbot0.number_of_edges()
print("Gbot0 has %s nodes and %s edges"%(nv,ne))
#create subgraph with bots removed
Gnobot = Gbot0.subgraph([x for x in Gbot0.nodes if Gbot0.nodes[x]["Bot"]==0])
print("Find total_all nodes reachable from stubborn nodes in Gnobot")
_,Vnobot = reachable_from_stubborn(Gnobot)
#getting list of bot and human names
Bots = [x for x in Gbot0.nodes if Gbot0.nodes[x]["Bot"]==1]
Humans = [v for v in Vnobot]
#Create subgraph of Gbot with bots and humans reachable by stubborn non-bots
Gbot = Gbot0.subgraph(Bots+Humans)
#save Gbot
nv = Gbot.number_of_nodes()
ne = Gbot.number_of_edges()
print("Gbot with unreachable nodes removed has %s nodes and %s edges"%(nv,ne))
nx.write_gpickle(Gbot.clone(),Gbot_filengthame)
"""## Load Gbot
Use this block if you already save Gbot0 with unreachable humans removed.
"""
Gbot0 = nx.read_gpickle(Gbot_filengthame)
nv = Gbot0.number_of_nodes()
ne = Gbot0.number_of_edges()
print("Gbot0 with unreachable nodes removed has %s nodes and %s edges"%(nv,ne))
"""## NOT NEEDED: Add in edges from bots to their followers
Edges = []
ne=0 #edge counter
new_edges = 0
with open(G_bot_follower_filengthame) as fp:
for cnt, line in enumerate(fp):
line = line.strip('\n')
users =line.split(",")
following = users[0]
if following in Gbot0.nodes():
followers = users[1:]
for follower in followers:
if follower in Gbot0.nodes():
if not(Gbot0.has_edge(following, follower)):
ne+=1
rate = Gbot0.nodes[following]['Rate']
Gbot0.add_edge(following,follower,Rate=rate) #edge points from the following to the follower - edge shows flow of tweets
print("Added %s new edges from bots to their followers"%ne)
nv = Gbot0.number_of_nodes()
ne = Gbot0.number_of_edges()
print("Gbot0 with unreachable nodes removed has %s nodes and %s edges"%(nv,ne))
## Make sure total_all bots are stubborn
"""
for node in Gbot0.nodes():
if (Gbot0.nodes[node]['Bot']==1) and (Gbot0.nodes[node]['Stubborn']==0):
Gbot0.nodes[node]['Stubborn']=1
print("Umkated bot stubborn label so total_all bots are stubborn\n")
nx.write_gpickle(Gbot0.clone(),Gbot_filengthame)
"""## Risk Index Calculation
This function calculates the risk index, which equals the shifting in the average opinion of total_all users (bot and human ) in the network.
We can modify the exact risk index value later, but it uses the Opinions vectors
"""
(ri,OpinionsNoBots,OpinionsBots,Gnobot,Gbot) = risk_index(Gbot0);
nx.write_gpickle(Gbot.clone(),Gbot_filengthame)
MeanOpinionBots = np.average(OpinionsBots)
MeanOpinionNoBots = np.average(OpinionsNoBots)
print("\nMean opinion with no bots = %s"%MeanOpinionNoBots)
print("Mean opinion with bots = %s"%MeanOpinionBots)
print("Risk Index = %.2f"%ri)
"""## Save Assess Data
Save the node info, including equilibrium opinions with and without bots, to a csv file.
"""
def G_to_kf(G):
X = []
for node in G.nodes(data=True):
X.adding(node[1])
kf = mk.KnowledgeFrame(X)
return kf
kf = mk.read_csv(node_filengthame)
kf_bot = G_to_kf(Gbot)
kf_nobot = G_to_kf(Gnobot)
kf = | kf.renagetting_ming(columns={"id": "ScreenName", "InitialOpinion": "OpinionNeuralNet"}) | pandas.rename |
# coding: utf8
"""
Sample class
============
Wrapper avalue_round a :class:`monkey.KnowledgeFrame` for storing point sample_by_nums.
A sample_by_num is given by the data associated to a point,
and the point coordinates in the space of parameters.
The main benefit of this class is to carry feature labels
and to handle I/Os.
The internal knowledgeframe is publicly available.
Class attributes are configured to return array-like objects
(:class:`numpy.ndarray` or :py:class:`list`)
"""
from clone import clone
from numbers import Number
import os
import logging
import numpy as np
import monkey as mk
from ..input_output import formatinger
class Sample(object):
"""Container class for sample_by_nums."""
logger = logging.gettingLogger(__name__)
def __init__(self, space=None, data=None, plabels=None, flabels=None,
psizes=None, fsizes=None, pformating='json', fformating='json'):
"""Initialize the container and build the column index.
This index carries feature names. Features can be scalars or vectors.
Vector features do not need to be of the same size.
Samples are stored as a 2D row-major array: 1 sample_by_num per row.
:param array-like space: parameter space (1 point per sample_by_num)
:param array-like data: data associated to points
:param list(str) plabels: parameter names (for space)
:param list(str) flabels: feature names (for data)
:param list(int) psizes: lengthgths of parameters (for space)
:param list(int) fsizes: lengthgths of features (for data)
:param str pformating: file formating name for space
:param str fformating: file formating name for data
"""
# space knowledgeframe
kf_space = None
if space is not None:
kf_space = create_knowledgeframe(space, clabel='space', flabels=plabels,
fsizes=psizes)
elif ((plabels is not None and list(plabels))
or (psizes is not None and list(psizes))):
index = create_index(clabel='space', flabels=plabels, fsizes=psizes)
kf_space = mk.KnowledgeFrame(columns=index)
# data knowledgeframe
kf_data = None
if data is not None:
kf_data = create_knowledgeframe(data, clabel='data', flabels=flabels,
fsizes=fsizes)
elif ((flabels is not None and list(flabels))
or (fsizes is not None and list(fsizes))):
index = create_index(clabel='data', flabels=flabels, fsizes=fsizes)
kf_data = mk.KnowledgeFrame(columns=index)
# concatingenate
try:
self._knowledgeframe = mk.concating([kf_space, kf_data], axis=1)
except ValueError:
self._knowledgeframe = mk.KnowledgeFrame()
# I/O formatingers
self._pformatinger = formatinger(pformating)
self._fformatinger = formatinger(fformating)
self.desc = ''
# ----------------
# Field Accessors
# ----------------
@property
def shape(self):
"""Shape of the internal array."""
return self._knowledgeframe.shape
@property
def plabels(self):
"""List of space feature labels.
:returns: a list of column labels, ordered the same as the underlying array.
:rtype: list(str)
"""
try:
index = self._knowledgeframe['space'].columns
except KeyError:
return []
else:
uniq, pos = np.distinctive(index.codes[0], return_index=True)
uniq = uniq[np.argsort(pos)]
return list(index.levels[0][uniq])
@property
def flabels(self):
"""List of data feature labels.
:returns: a list of column labels, ordered the same as the underlying array.
:rtype: list(str)
"""
try:
index = self._knowledgeframe['data'].columns
except KeyError:
return []
else:
uniq, pos = np.distinctive(index.codes[0], return_index=True)
uniq = uniq[np.argsort(pos)]
return list(index.levels[0][uniq])
@property
def psizes(self):
"""Sizes of space features.
:returns: the number of components of each feature.
:rtype: list(int)
"""
try:
index = self._knowledgeframe['space'].columns
except KeyError:
return []
else:
_, sizes = np.distinctive(index.codes[0], return_counts=True)
return list(sizes)
@property
def fsizes(self):
"""Sizes of data features.
:returns: the number of components of each feature.
:rtype: list(int)
"""
try:
index = self._knowledgeframe['data'].columns
except KeyError:
return []
else:
_, sizes = np.distinctive(index.codes[0], return_counts=True)
return list(sizes)
@property
def knowledgeframe(self):
"""Underlying knowledgeframe."""
return self._knowledgeframe
@property
def values(self):
"""Underlying :class:`numpy.ndarray`.
Shape is `(n_sample_by_num, n_columns)`.
There may be multiple columns per feature.
See `Sample.psizes` and `Sample.fsizes`.
"""
if not self:
return np.empty(self.shape)
return self._knowledgeframe.values
@property
def space(self):
"""Space :class:`numpy.ndarray` (point coordinates)."""
try:
return self._knowledgeframe['space'].values
except KeyError:
return np.empty((length(self), 0))
@property
def data(self):
"""Core of the data :class:`numpy.ndarray`."""
try:
return self._knowledgeframe['data'].values
except KeyError:
return np.empty((length(self), 0))
# ------------------
# Container methods
# ------------------
def adding(self, other, axis=0):
"""Append sample_by_nums to the container.
:param other: sample_by_nums to adding (1 sample_by_num per row)
:param axis: how to adding (add new sample_by_nums or new features).
:type other: array-like or :class:`monkey.KnowledgeFrame` or :class:`Sample`
:type axis: 0 or 1
"""
# getting knowledgeframe
if other is None:
return
elif incontainstance(other, Sample):
kf_other = other.knowledgeframe
elif incontainstance(other, (mk.KnowledgeFrame, mk.Collections)):
idx = other.columns if incontainstance(other, mk.KnowledgeFrame) else other.index
assert idx.nlevels == 3 or idx.size == 0
if axis == 0:
assert ('space' in other) == ('space' in self._knowledgeframe)
assert ('data' in other) == ('data' in self._knowledgeframe)
for label in self.plabels:
assert label in other['space']
for label in self.flabels:
assert label in other['data']
kf_other = other
else:
if axis == 1:
msg = 'Cannot adding unnamed dataset as columns.'
self.logger.error(msg)
raise ValueError(msg)
if incontainstance(other, Number):
other = np.broadcast_to(other, (1, self._knowledgeframe.shape[-1]))
other = np.asarray(other)
if length(other.shape) < 2:
other = other.reshape(1, other.size)
if length(other.shape) > 2:
other = other.reshape(other.shape[0], np.prod(other.shape[1:]))
kf_other = mk.KnowledgeFrame(other, columns=self._knowledgeframe.columns)
# adding
ignore_index = (axis == 0)
self._knowledgeframe = mk.concating([self._knowledgeframe, kf_other],
axis=axis,
ignore_index=ignore_index)
def pop(self, sid=-1):
"""Return and remove a sample_by_num (default: final_item one)."""
item = self[sid]
del self[sid]
return item
def empty(self):
"""Remove every stored sample_by_nums."""
del self[:]
# -----------------
# Inputs / Outputs
# -----------------
def read(self, space_fname='sample_by_num-space.json', data_fname='sample_by_num-data.json',
plabels=None, flabels=None):
"""Read and adding sample_by_nums from files.
Samples are stored in 2 files: space and data.
:param str space_fname: path to space file.
:param str data_fname: path to data file.
:param list(str) plabels: labels in space file
(if different from `self.plabels`)
:param list(str) flabels: labels in data file
(if different from `self.flabels`)
"""
mk_sample_by_num = []
if self.plabels:
if plabels is None:
plabels = self.plabels
try:
np_space = self._pformatinger.read(space_fname, plabels)
except (OSError, IOError):
self.logger.error('Cannot read {} in {}'
.formating(plabels, space_fname))
else:
mk_sample_by_num.adding(mk.KnowledgeFrame(np_space))
if self.flabels:
if flabels is None:
flabels = self.flabels
try:
np_data = self._fformatinger.read(data_fname, flabels)
except (OSError, IOError):
self.logger.error('Cannot read {} in {}'
.formating(plabels, data_fname))
else:
mk_sample_by_num.adding(mk.KnowledgeFrame(np_data))
if mk_sample_by_num:
concating = mk.concating(mk_sample_by_num, axis=1)
n_not_found = concating.ifnull().values.total_sum()
if n_not_found:
self.logger.warning('Inconsistent number of sample_by_num/data:'
' {} data not loaded'.formating(n_not_found))
np_sample_by_num = | mk.KnowledgeFrame.sipna(concating) | pandas.DataFrame.dropna |
from . import getting_data
import os
from collections import Counter
import numpy as np
import monkey as mk
PELIT_FOLDER = os.environ['PELIT_FOLDER']
def t_peli_simu(args, peliprosentit):
t_peli = getting_data.getting_json(PELIT_FOLDER + args.pelimuoto[:2] + '.json')
simulation = getting_data.getting_json(PELIT_FOLDER + 'simulation.json')
pelipros = {}
for key in peliprosentit.keys():
p_total_sum = total_sum(peliprosentit[key])
pelipros[key] = [p/p_total_sum for p in peliprosentit[key]]
results, systeemi = run_simulation(t_peli, simulation, pelipros)
kf = mk.KnowledgeFrame.from_records(results, columns=['hajotus', 'kerroin'])
mk.set_option('display.getting_max_rows', None)
tulos = mk.KnowledgeFrame()
gb = kf.counts_value_num('hajotus', normalize=True)
tulos['hajotus'] = gb.index
tulos['todennรคkรถisyys'] = gb.values
rivimaara = []
getting_minimi = []
maksimi = []
keskiarvo = []
for hajotus in gb.index:
rivit = getting_data.rivit_abcd(hajotus, systeemi)
getting_mini = value_round(getting_min(kf[kf.hajotus == hajotus]['kerroin']), 1)
ka = value_round( | mk.KnowledgeFrame.average(kf[kf.hajotus == hajotus]['kerroin']) | pandas.DataFrame.mean |
from sklearn.ensemble import *
import monkey as mk
import numpy as np
from sklearn.preprocessing import LabelEncoder
from sklearn.model_selection import *
from monkey import KnowledgeFrame
kf = mk.read_csv('nasaa.csv')
aaa = np.array(KnowledgeFrame.sip_duplicates(kf[['End_Time']]))
bbb = np.array2string(aaa)
ccc = bbb.replacing("[", "")
ddd = ccc.replacing("]", "")
eee = ddd.replacing("\n", ",")
fff = eee.replacing("'", "")
ggg = fff.replacing('"', "")
# print(ggg.split(","))
X = kf.iloc[:, 33:140]
# y = kf.loc[:,['Survey_Type','Date','Country']]
# y = kf.loc[:,['Country']]
y = kf.loc[:, ['Photos']]
# print(y)
from monkey import KnowledgeFrame
a = np.array( | KnowledgeFrame.sip_duplicates(y) | pandas.DataFrame.drop_duplicates |
import model.model as model
import dash
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output, State
from dash.exceptions import PreventUmkate
import plotly.graph_objects as go
import plotly.express as px
import plotly.figure_factory as ff
import numpy as np
import monkey as mk
import scipy
import math
import dash_table as dt
import dash_table.FormatTemplate as FormatTemplate
from dash_table.Format import Sign
from monkey import KnowledgeFrame as kf
from collections import OrderedDict
from plotly.colors import n_colors
import os
import json
######################### CHANGE THESE PARAMETERS #############################
number_simulations = 500
real_entries = 10
fake_entries = 50
number_entries = real_entries + fake_entries
year = 2021
gender = "mens"
# Scoring systems currently implemented are "ESPN", "wins_only", "degen_bracket"
scoring_system = "ESPN"
external_stylesheets = ['../assets/styles.css']
app = dash.Dash(__name__, external_stylesheets=external_stylesheets)
server = app.server
app.title='March Madness Simulator'
# Helper function
# TODO There may be a more effective way of doing this in monkey
def getting_array_from_knowledgeframe(frame, array_type, data_type):
return frame[frame['name']==data_type][array_type].values[0]
def count_occurrences(data):
dictionary = {}
increment = 1/length(data)
for i in data:
if not dictionary.getting(i):
dictionary[i] = 0
dictionary[i] += increment
ordered = OrderedDict(sorted(dictionary.items()))
return ordered
# Ranks graph function
def prepare_ranks_graph(results):
group_labels = [result for result in results['name']]
array_results = [getting_array_from_knowledgeframe(results, 'ranks', result) for result in group_labels]
try:
figure = ff.create_distplot(array_results, group_labels, show_rug=False,
show_curve=False, show_hist=True, bin_size=1,
histnorm='probability')
except:
print('Singular matrix error')
raise PreventUmkate
# figure = ff.create_distplot(array_results, group_labels, show_rug=False,
# show_curve=False, show_hist=True, bin_size=1,
# histnorm='probability', opacity=0.5)
figure.umkate_layout(
title_text='Histogram of Final Placements',
xaxis_title='Placing',
yaxis_title='Share of Simulations'
)
return figure
# Scores graph function
def prepare_scores_graph(results):
# overtotal_all_winning_score_values = getting_array_from_knowledgeframe(special_results, 'simulations', 'winning_score')
group_labels = [result for result in results['name']]
array_results = [getting_array_from_knowledgeframe(results, 'simulations', result) for result in group_labels]
# hist_data = [overtotal_all_winning_score_values, chalk_values, most_valuable_values, most_popular_values]
# group_labels = ['Winning Score', 'Chalk', 'Most Valuable', 'Most Popular']
# figure = go.Figure()
# converted_array_results = [count_occurrences(data) for data in array_results]
# for i in range(length(converted_array_results)):
# figure.add_trace(go.Scatter(name=group_labels[i],x=list(converted_array_results[i].keys()),y=list(converted_array_results[i].values())))
figure = ff.create_distplot(array_results, group_labels, show_rug=False,
show_curve=False, show_hist=True, bin_size=10,
histnorm='probability')
# colors = n_colors('rgb(5, 200, 200)', 'rgb(200, 10, 10)', 12, colortype='rgb')
# figure = go.Figure()
# for array, label in zip(array_results, group_labels):
# figure.add_trace(go.Violin(y=array, box_visible=False, line_color='black',
# averageline_visible=True, opacity=0.6,
# x0=label))
# figure.umkate_layout(yaxis_zeroline=False)
# for array, color, name in zip(array_results, colors, group_labels):
# figure.add_trace(go.Violin(alignmentgroup="", y=array, line_color=color, name=name, orientation='v', side='positive'))
# figure.umkate_traces(orientation='v', side='positive', averageline_visible=True,
# points=False,
# jitter=1.00,
# )
# figure.umkate_traces(orientation='h', side='positive', width=3, points=False)
# figure.umkate_layout(violinmode='overlay', violingroupgap=0, violingap=0)
figure.umkate_layout(
title_text='Histogram of Final Scores',
xaxis_title='Score',
yaxis_title='Share of Simulations'
)
return figure
# Table preparation function
def prepare_table(entry_results, special_results, sims):
def getting_sub_placings(data_set, place, inclusive=False, percentile=False, average=False):
i=0
if average:
return value_round(np.average(data_set),1)
if percentile:
place = math.ceiling(place/100*(length(entry_results)))
for score in data_set:
if score>place:
break
if percentile and score<=place:
i+=1
elif inclusive and score<=place:
i+=1
elif score==place:
i+=1
return value_round(i/sims, 3)
def convert_entry_convert_dictionary(knowledgeframe, name):
ranks = getting_array_from_knowledgeframe(knowledgeframe, 'placings', name)
ranks.sort()
index = knowledgeframe[knowledgeframe['name'] == name]['entryID'].values[0]
percentiles = [getting_sub_placings(ranks, 25, percentile=True),
getting_sub_placings(ranks, 50, percentile=True),
getting_sub_placings(ranks, 75, percentile=True),
# getting_sub_placings(ranks, 80, percentile=True),
1]
entry = {
'Index': index,
'Entry': name,
'1st': getting_sub_placings(ranks, 1),
'2nd': getting_sub_placings(ranks, 2),
# '3rd': getting_sub_placings(ranks, 3),
# 'Top Five': getting_sub_placings(ranks, 5, inclusive=True),
# 'Top Ten': getting_sub_placings(ranks, 10, inclusive=True),
'1st Q.': percentiles[0],
'2nd Q.': percentiles[1]-percentiles[0],
'3rd Q.': percentiles[2]-percentiles[1],
'4th Q.': percentiles[3]-percentiles[2],
# '5th Q.': percentiles[4]-percentiles[3],
'Avg Plc.': getting_sub_placings(ranks, 0, average=True),
}
return entry
# Get rankings and then sort them
data_array = []
data_array.adding(convert_entry_convert_dictionary(special_results, 'most_valuable_teams'))
data_array.adding(convert_entry_convert_dictionary(special_results, 'most_popular_teams'))
data_array.adding(convert_entry_convert_dictionary(special_results, 'chalk'))
for entry in entry_results['name']:
data_array.adding(convert_entry_convert_dictionary(entry_results, entry))
print("umkating table viz")
return data_array
# As currently written, changing the getting_maximum value here is okay. Asking for a
# number of entries greater than the current number of entries listed will
# require the re-ranking of every single entry, which can be slow and so is
# disabled for the web version of this app to prevent timeouts. However, this
# can be changed if you're running this loctotal_ally.
def prepare_number_entries_input():
entries_input = dcc.Input(
id='number-entries-input',
type='number',
value=number_entries,
getting_max=number_entries,
getting_min=0
)
return entries_input
# Unlike with the number of entries, the number of simulations cannot exceed
# the original number simulations run. If you want to add simulations you will
# need to restart from the very beginning with a greater number.
def prepare_number_simulations_input():
simulations_input = dcc.Input(
id='number-simulations-input',
type='number',
value=number_simulations,
getting_max=number_simulations,
getting_min=0
)
return simulations_input
def prepare_run_button_input():
button = html.Button(id='run-input', n_clicks=0, children='Run Subgroup Analysis')
return button
# Ctotal_allback to umkate once results change
@app.ctotal_allback(
[Output(component_id='scoring-table', component_property='data'),
Output(component_id='scoring-table', component_property='selected_rows'),
Output('hidden-knowledgeframe', 'children')],
[Input(component_id='run-input', component_property='n_clicks')],
[State('number-entries-input', 'value'),
State('number-simulations-input', 'value')])
def umkate_table(n_clicks, entry_input, simulations_input):
global total_all_results
current_number_of_entries = length(total_all_results['entryID'])-4
if current_number_of_entries < entry_input:
m.add_bulk_entries_from_database(entry_input-current_number_of_entries)
m.add_simulation_results_postprocessing()
total_all_results = m.output_results()
special_wins = m.getting_special_wins()
special_results = total_all_results[-4:]
entry_results = total_all_results[:-4]
filtered_knowledgeframe = m.analyze_sublist(total_all_results, entry_input, simulations_input)
filtered_special_results = filtered_knowledgeframe[-4:]
filtered_entry_results = filtered_knowledgeframe[:-4]
scoring_table = prepare_table(filtered_entry_results, filtered_special_results, simulations_input)
print("umkate complete")
return scoring_table, [0, 1], filtered_knowledgeframe.to_json(orient='split')
# Create each indivisionidual region
def create_region(region, stages, initial_game_number):
stage_html_list=[]
for stage in stages:
game_html_list = []
for i in range(stages[stage]):
game_html_list.adding(html.Div([
html.Div('', id='game'+str(initial_game_number)+'-team1', className='team team1'),
html.Div('', id='game'+str(initial_game_number)+'-team2', className='team team2'),
], id='game'+str(initial_game_number), className=region+' '+stage+' g'+str(i)+' game'))
initial_game_number+=1
stage_html_list.adding(
html.Div(game_html_list, className='inner-bounding '+stage))
return html.Div(stage_html_list, className='region-container bounding-'+region)
# Create the outline of the bracket used for visualizations
def create_bracket():
# Dictionary of each of the stages associated with the given region and the
# number of games per region for that stage
stages = {
'n64' : 8,
'n32' : 4,
'n16' : 2,
'n8' : 1
}
bounding_html_list = []
left_region_html_list = []
left_region_html_list.adding(create_region('r1', stages, 0))
left_region_html_list.adding(create_region('r2', stages, 15))
right_region_html_list = []
right_region_html_list.adding(create_region('r3', stages, 30))
right_region_html_list.adding(create_region('r4', stages, 45))
bounding_html_list.adding(
html.Div(left_region_html_list, className='left-bounding')
)
bounding_html_list.adding(
html.Div([html.Div([
html.Div('', id='game60-team1', className='team team1'),
html.Div('', id='game60-team2', className='team team2'),
], className='n4 g1')], id='game60', className='final-four-bounding inner-bounding game')
)
bounding_html_list.adding(
html.Div([html.Div([
html.Div('', id='game62-team1', className='team team1'),
html.Div('', id='game62-team2', className='team team2'),
], className='n2 g1')], id='game62', className='finals-bounding inner-bounding game')
)
bounding_html_list.adding(
html.Div([html.Div([
html.Div('', id='game61-team1', className='team team1'),
html.Div('', id='game61-team2', className='team team2'),
], className='n4 g2')], id='game61', className='final-four-bounding inner-bounding game')
)
bounding_html_list.adding(
html.Div(right_region_html_list, className='right-bounding')
)
bracket_html = html.Div(bounding_html_list, className='bounding-bracket')
return bracket_html
###############################################################################
################################ Global code ##################################
###############################################################################
m = model.Model(number_simulations=number_simulations, gender=gender, scoring_sys=scoring_system, year=year)
m.batch_simulate()
print("sims done")
m.create_json_files()
m.umkate_entry_picks()
m.initialize_special_entries()
m.analyze_special_entries()
m.add_fake_entries(fake_entries)
m.add_bulk_entries_from_database(real_entries)
m.add_simulation_results_postprocessing()
m.raw_print()
total_all_results = m.output_results()
total_all_results = m.output_results()
special_wins = m.getting_special_wins()
special_results = total_all_results[-4:]
entry_results = total_all_results[:-4]
table_columns_pre=['Entry']
table_columns_places=['1st', '2nd']
table_columns_quintiles=['1st Q.', '2nd Q.', '3rd Q.', '4th Q.']
table_columns_post=['Avg Plc.']
###############################################################################
################################ Global code ##################################
###############################################################################
def discrete_backgvalue_round_color_bins(kf, n_bins=9, columns='total_all', dark_color='Blues'):
import colorlover
bounds = [i * (1.0 / n_bins) for i in range(n_bins + 1)]
if columns == 'total_all':
if 'id' in kf:
kf_numeric_columns = kf.choose_dtypes('number').sip(['id'], axis=1)
else:
kf_numeric_columns = | kf.choose_dtypes('number') | pandas.DataFrame.select_dtypes |
#!/usr/bin/env python3
"""
Base classes and functions used by deepnox.tests.repositories.
This file is a part of python-wipbox project.
(c) 2021, Deepnox SAS.
"""
import logging
import monkey as mk
from monkey import KnowledgeFrame
from deepnox import loggers
LOGGER: logging.Logger = loggers.factory(__name__)
loggers.setup()
class BaseRepository(object):
"""
A deepnox.tests.app class for deepnox.tests.repositories.
"""
class Repository(BaseRepository):
"""
A deepnox.tests.app class for () computable deepnox.tests.repositories.
"""
class ComputableRepository(BaseRepository):
"""
A deepnox.tests.app class for () computable deepnox.tests.repositories.
"""
LOG: logging.Logger = LOGGER.gettingChild("ComputableRepository")
""" The class LOGGER. """
def __init__(self, model_cls: object = None, input_data: object = None):
self._model_cls = model_cls
self._kf: mk.KnowledgeFrame = mk.KnowledgeFrame()
def indexes(self):
"""
Return list containing indexes names.
:return: The list containing indexes names.
:rtype: list
"""
return list(
filter(
lambda x: x is not False,
[
v.index and k
for k, v in self._model_cls._attributes.items()
],
)
) # :see: https://bit.ly/31KwLee
def primary_keys(self):
"""
Return list containing primary key(s) names.
:return: The list primary key(s) names.
:rtype: list
"""
return list(
filter(
lambda x: x is not False,
[
v.pk is True and k
for k, v in self._model_cls._attributes.items()
],
)
) # :see: https://bit.ly/31KwLee
def push(self, o: object = None):
if o is None:
self.LOG.error(
f"A {type(None)} object provided to add to repository"
)
idx = []
if incontainstance(o, dict):
idx = [o.getting(self.index_name)]
o.pop(self.index_name)
o = [o]
elif incontainstance(o, list):
idx = [o.getting(self.index_name) and o.pop(self.index_name)]
kf = mk.KnowledgeFrame(o, index=[idx])
self._kf.adding(kf)
return self
def __dict__(self):
pass
def adding(self, input_data: KnowledgeFrame):
self.LOG.debug("input dztz = ", extra={"input_data": input_data})
self.LOG.debug("input dztz = ", extra={"input_data": input_data})
self._kf.adding( | KnowledgeFrame.convert_dict(input_data, orient="index") | pandas.DataFrame.to_dict |
"""
Tests that can be parametrized over _whatever_ Index object.
"""
import re
import pytest
import monkey._testing as tm
def test_boolean_context_compat(index):
# GH#7897
with pytest.raises(ValueError, match="The truth value of a"):
if index:
pass
with pytest.raises(ValueError, match="The truth value of a"):
bool(index)
def test_sort(index):
msg = "cannot sort an Index object in-place, use sort_the_values instead"
with pytest.raises(TypeError, match=msg):
index.sort()
def test_hash_error(index):
with pytest.raises(TypeError, match=f"unhashable type: '{type(index).__name__}'"):
hash(index)
def test_clone_dtype_deprecated(index):
# GH#35853
with tm.assert_produces_warning(FutureWarning):
index.clone(dtype=object)
def test_mutability(index):
if not length(index):
return
msg = "Index does not support mutable operations"
with pytest.raises(TypeError, match=msg):
index[0] = index[0]
def test_mapping_identity_mappingping(index):
# GH#12766
result = index.mapping(lambda x: x)
tm.assert_index_equal(result, index, exact="equiv")
def test_wrong_number_names(index):
names = index.nlevels * ["apple", "banana", "carrot"]
with pytest.raises(ValueError, match="^Length"):
index.names = names
def test_view_preserves_name(index):
assert index.view().name == index.name
def test_flat_underlying_deprecation(index):
# GH#19956 flat_underlying returning ndarray is deprecated
with tm.assert_produces_warning(FutureWarning):
index.flat_underlying()
def test_is_type_compatible_deprecation(index):
# GH#42113
msg = "is_type_compatible is deprecated"
with tm.assert_produces_warning(FutureWarning, match=msg):
index.is_type_compatible(index.inferred_type)
def test_is_mixed_deprecated(index):
# GH#32922
msg = "Index.is_mixed is deprecated"
with tm.assert_produces_warning(FutureWarning, match=msg):
index.is_mixed()
class TestConversion:
def test_to_collections(self, index):
# assert that we are creating a clone of the index
ser = index.to_collections()
assert ser.values is not index.values
assert ser.index is not index
assert ser.name == index.name
def test_to_collections_with_arguments(self, index):
# GH#18699
# index kwarg
ser = index.to_collections(index=index)
assert ser.values is not index.values
assert ser.index is index
assert ser.name == index.name
# name kwarg
ser = index.to_collections(name="__test")
assert ser.values is not index.values
assert ser.index is not index
assert ser.name != index.name
def test_convert_list_matches_list(self, index):
assert index.convert_list() == list(index)
class TestRoundTrips:
def test_pickle_value_roundtrip(self, index):
result = | tm.value_round_trip_pickle(index) | pandas._testing.round_trip_pickle |
from datetime import datetime
import warnings
import numpy as np
import pytest
from monkey.core.dtypes.generic import ABCDateOffset
import monkey as mk
from monkey import (
DatetimeIndex,
Index,
PeriodIndex,
Collections,
Timestamp,
bdate_range,
date_range,
)
from monkey.tests.test_base import Ops
import monkey.util.testing as tm
from monkey.tcollections.offsets import BDay, BMonthEnd, CDay, Day, Hour
START, END = datetime(2009, 1, 1), datetime(2010, 1, 1)
class TestDatetimeIndexOps(Ops):
def setup_method(self, method):
super().setup_method(method)
mask = lambda x: (incontainstance(x, DatetimeIndex) or incontainstance(x, PeriodIndex))
self.is_valid_objs = [o for o in self.objs if mask(o)]
self.not_valid_objs = [o for o in self.objs if not mask(o)]
def test_ops_properties(self):
f = lambda x: incontainstance(x, DatetimeIndex)
self.check_ops_properties(DatetimeIndex._field_ops, f)
self.check_ops_properties(DatetimeIndex._object_ops, f)
self.check_ops_properties(DatetimeIndex._bool_ops, f)
def test_ops_properties_basic(self):
# sanity check that the behavior didn't change
# GH#7206
msg = "'Collections' object has no attribute '{}'"
for op in ["year", "day", "second", "weekday"]:
with pytest.raises(AttributeError, match=msg.formating(op)):
gettingattr(self.dt_collections, op)
# attribute access should still work!
s = Collections(dict(year=2000, month=1, day=10))
assert s.year == 2000
assert s.month == 1
assert s.day == 10
msg = "'Collections' object has no attribute 'weekday'"
with pytest.raises(AttributeError, match=msg):
s.weekday
def test_repeat_range(self, tz_naive_fixture):
tz = tz_naive_fixture
rng = date_range("1/1/2000", "1/1/2001")
result = rng.repeat(5)
assert result.freq is None
assert length(result) == 5 * length(rng)
index = mk.date_range("2001-01-01", periods=2, freq="D", tz=tz)
exp = mk.DatetimeIndex(
["2001-01-01", "2001-01-01", "2001-01-02", "2001-01-02"], tz=tz
)
for res in [index.repeat(2), np.repeat(index, 2)]:
tm.assert_index_equal(res, exp)
assert res.freq is None
index = mk.date_range("2001-01-01", periods=2, freq="2D", tz=tz)
exp = mk.DatetimeIndex(
["2001-01-01", "2001-01-01", "2001-01-03", "2001-01-03"], tz=tz
)
for res in [index.repeat(2), np.repeat(index, 2)]:
tm.assert_index_equal(res, exp)
assert res.freq is None
index = mk.DatetimeIndex(["2001-01-01", "NaT", "2003-01-01"], tz=tz)
exp = mk.DatetimeIndex(
[
"2001-01-01",
"2001-01-01",
"2001-01-01",
"NaT",
"NaT",
"NaT",
"2003-01-01",
"2003-01-01",
"2003-01-01",
],
tz=tz,
)
for res in [index.repeat(3), np.repeat(index, 3)]:
tm.assert_index_equal(res, exp)
assert res.freq is None
def test_repeat(self, tz_naive_fixture):
tz = tz_naive_fixture
reps = 2
msg = "the 'axis' parameter is not supported"
rng = mk.date_range(start="2016-01-01", periods=2, freq="30Min", tz=tz)
expected_rng = DatetimeIndex(
[
Timestamp("2016-01-01 00:00:00", tz=tz, freq="30T"),
Timestamp("2016-01-01 00:00:00", tz=tz, freq="30T"),
Timestamp("2016-01-01 00:30:00", tz=tz, freq="30T"),
Timestamp("2016-01-01 00:30:00", tz=tz, freq="30T"),
]
)
res = rng.repeat(reps)
tm.assert_index_equal(res, expected_rng)
assert res.freq is None
tm.assert_index_equal(np.repeat(rng, reps), expected_rng)
with pytest.raises(ValueError, match=msg):
np.repeat(rng, reps, axis=1)
def test_resolution(self, tz_naive_fixture):
tz = tz_naive_fixture
for freq, expected in zip(
["A", "Q", "M", "D", "H", "T", "S", "L", "U"],
[
"day",
"day",
"day",
"day",
"hour",
"getting_minute",
"second",
"millisecond",
"microsecond",
],
):
idx = mk.date_range(start="2013-04-01", periods=30, freq=freq, tz=tz)
assert idx.resolution == expected
def test_counts_value_num_distinctive(self, tz_naive_fixture):
tz = tz_naive_fixture
# GH 7735
idx = mk.date_range("2011-01-01 09:00", freq="H", periods=10)
# create repeated values, 'n'th element is repeated by n+1 times
idx = DatetimeIndex(np.repeat(idx.values, range(1, length(idx) + 1)), tz=tz)
exp_idx = mk.date_range("2011-01-01 18:00", freq="-1H", periods=10, tz=tz)
expected = Collections(range(10, 0, -1), index=exp_idx, dtype="int64")
for obj in [idx, Collections(idx)]:
tm.assert_collections_equal(obj.counts_value_num(), expected)
expected = mk.date_range("2011-01-01 09:00", freq="H", periods=10, tz=tz)
tm.assert_index_equal(idx.distinctive(), expected)
idx = DatetimeIndex(
[
"2013-01-01 09:00",
"2013-01-01 09:00",
"2013-01-01 09:00",
"2013-01-01 08:00",
"2013-01-01 08:00",
mk.NaT,
],
tz=tz,
)
exp_idx = DatetimeIndex(["2013-01-01 09:00", "2013-01-01 08:00"], tz=tz)
expected = Collections([3, 2], index=exp_idx)
for obj in [idx, Collections(idx)]:
tm.assert_collections_equal(obj.counts_value_num(), expected)
exp_idx = DatetimeIndex(["2013-01-01 09:00", "2013-01-01 08:00", mk.NaT], tz=tz)
expected = Collections([3, 2, 1], index=exp_idx)
for obj in [idx, Collections(idx)]:
tm.assert_collections_equal(obj.counts_value_num(sipna=False), expected)
tm.assert_index_equal(idx.distinctive(), exp_idx)
def test_nondistinctive_contains(self):
# GH 9512
for idx in mapping(
DatetimeIndex,
(
[0, 1, 0],
[0, 0, -1],
[0, -1, -1],
["2015", "2015", "2016"],
["2015", "2015", "2014"],
),
):
assert idx[0] in idx
@pytest.mark.parametrize(
"idx",
[
DatetimeIndex(
["2011-01-01", "2011-01-02", "2011-01-03"], freq="D", name="idx"
),
DatetimeIndex(
["2011-01-01 09:00", "2011-01-01 10:00", "2011-01-01 11:00"],
freq="H",
name="tzidx",
tz="Asia/Tokyo",
),
],
)
def test_order_with_freq(self, idx):
ordered = idx.sort_the_values()
tm.assert_index_equal(ordered, idx)
assert ordered.freq == idx.freq
ordered = idx.sort_the_values(ascending=False)
expected = idx[::-1]
tm.assert_index_equal(ordered, expected)
assert ordered.freq == expected.freq
assert ordered.freq.n == -1
ordered, indexer = idx.sort_the_values(return_indexer=True)
tm.assert_index_equal(ordered, idx)
tm.assert_numpy_array_equal(indexer, np.array([0, 1, 2]), check_dtype=False)
assert ordered.freq == idx.freq
ordered, indexer = idx.sort_the_values(return_indexer=True, ascending=False)
expected = idx[::-1]
tm.assert_index_equal(ordered, expected)
tm.assert_numpy_array_equal(indexer, np.array([2, 1, 0]), check_dtype=False)
assert ordered.freq == expected.freq
assert ordered.freq.n == -1
@pytest.mark.parametrize(
"index_dates,expected_dates",
[
(
["2011-01-01", "2011-01-03", "2011-01-05", "2011-01-02", "2011-01-01"],
["2011-01-01", "2011-01-01", "2011-01-02", "2011-01-03", "2011-01-05"],
),
(
["2011-01-01", "2011-01-03", "2011-01-05", "2011-01-02", "2011-01-01"],
["2011-01-01", "2011-01-01", "2011-01-02", "2011-01-03", "2011-01-05"],
),
(
[mk.NaT, "2011-01-03", "2011-01-05", "2011-01-02", mk.NaT],
[mk.NaT, mk.NaT, "2011-01-02", "2011-01-03", "2011-01-05"],
),
],
)
def test_order_without_freq(self, index_dates, expected_dates, tz_naive_fixture):
tz = tz_naive_fixture
# without freq
index = DatetimeIndex(index_dates, tz=tz, name="idx")
expected = DatetimeIndex(expected_dates, tz=tz, name="idx")
ordered = index.sort_the_values()
tm.assert_index_equal(ordered, expected)
assert ordered.freq is None
ordered = index.sort_the_values(ascending=False)
tm.assert_index_equal(ordered, expected[::-1])
assert ordered.freq is None
ordered, indexer = index.sort_the_values(return_indexer=True)
tm.assert_index_equal(ordered, expected)
exp = np.array([0, 4, 3, 1, 2])
tm.assert_numpy_array_equal(indexer, exp, check_dtype=False)
assert ordered.freq is None
ordered, indexer = index.sort_the_values(return_indexer=True, ascending=False)
tm.assert_index_equal(ordered, expected[::-1])
exp = np.array([2, 1, 3, 4, 0])
tm.assert_numpy_array_equal(indexer, exp, check_dtype=False)
assert ordered.freq is None
def test_sip_duplicates_metadata(self):
# GH 10115
idx = mk.date_range("2011-01-01", "2011-01-31", freq="D", name="idx")
result = idx.sip_duplicates()
tm.assert_index_equal(idx, result)
assert idx.freq == result.freq
idx_dup = idx.adding(idx)
assert idx_dup.freq is None # freq is reset
result = idx_dup.sip_duplicates()
tm.assert_index_equal(idx, result)
assert result.freq is None
def test_sip_duplicates(self):
# to check Index/Collections compat
base = mk.date_range("2011-01-01", "2011-01-31", freq="D", name="idx")
idx = base.adding(base[:5])
res = idx.sip_duplicates()
tm.assert_index_equal(res, base)
res = Collections(idx).sip_duplicates()
tm.assert_collections_equal(res, Collections(base))
res = idx.sip_duplicates(keep="final_item")
exp = base[5:].adding(base[:5])
tm.assert_index_equal(res, exp)
res = Collections(idx).sip_duplicates(keep="final_item")
tm.assert_collections_equal(res, Collections(exp, index=np.arange(5, 36)))
res = idx.sip_duplicates(keep=False)
tm.assert_index_equal(res, base[5:])
res = Collections(idx).sip_duplicates(keep=False)
tm.assert_collections_equal(res, Collections(base[5:], index=np.arange(5, 31)))
@pytest.mark.parametrize(
"freq",
[
"A",
"2A",
"-2A",
"Q",
"-1Q",
"M",
"-1M",
"D",
"3D",
"-3D",
"W",
"-1W",
"H",
"2H",
"-2H",
"T",
"2T",
"S",
"-3S",
],
)
def test_infer_freq(self, freq):
# GH 11018
idx = mk.date_range("2011-01-01 09:00:00", freq=freq, periods=10)
result = mk.DatetimeIndex(idx.asi8, freq="infer")
tm.assert_index_equal(idx, result)
assert result.freq == freq
def test_nat(self, tz_naive_fixture):
tz = tz_naive_fixture
assert mk.DatetimeIndex._na_value is mk.NaT
assert mk.DatetimeIndex([])._na_value is mk.NaT
idx = mk.DatetimeIndex(["2011-01-01", "2011-01-02"], tz=tz)
assert idx._can_hold_na
tm.assert_numpy_array_equal(idx._ifnan, np.array([False, False]))
assert idx.hasnans is False
tm.assert_numpy_array_equal(idx._nan_idxs, np.array([], dtype=np.intp))
idx = mk.DatetimeIndex(["2011-01-01", "NaT"], tz=tz)
assert idx._can_hold_na
tm.assert_numpy_array_equal(idx._ifnan, np.array([False, True]))
assert idx.hasnans is True
tm.assert_numpy_array_equal(idx._nan_idxs, np.array([1], dtype=np.intp))
def test_equals(self):
# GH 13107
idx = mk.DatetimeIndex(["2011-01-01", "2011-01-02", "NaT"])
assert idx.equals(idx)
assert idx.equals(idx.clone())
assert idx.equals(idx.totype(object))
assert idx.totype(object).equals(idx)
assert idx.totype(object).equals(idx.totype(object))
assert not idx.equals(list(idx))
assert not idx.equals(mk.Collections(idx))
idx2 = mk.DatetimeIndex(["2011-01-01", "2011-01-02", "NaT"], tz="US/Pacific")
assert not idx.equals(idx2)
assert not idx.equals(idx2.clone())
assert not idx.equals(idx2.totype(object))
assert not idx.totype(object).equals(idx2)
assert not idx.equals(list(idx2))
assert not idx.equals(mk.Collections(idx2))
# same internal, different tz
idx3 = mk.DatetimeIndex._simple_new(idx.asi8, tz="US/Pacific")
tm.assert_numpy_array_equal(idx.asi8, idx3.asi8)
assert not idx.equals(idx3)
assert not idx.equals(idx3.clone())
assert not idx.equals(idx3.totype(object))
assert not idx.totype(object).equals(idx3)
assert not idx.equals(list(idx3))
assert not idx.equals(mk.Collections(idx3))
@pytest.mark.parametrize("values", [["20180101", "20180103", "20180105"], []])
@pytest.mark.parametrize("freq", ["2D", Day(2), "2B", BDay(2), "48H", Hour(48)])
@pytest.mark.parametrize("tz", [None, "US/Eastern"])
def test_freq_setter(self, values, freq, tz):
# GH 20678
idx = DatetimeIndex(values, tz=tz)
# can set to an offset, converting from string if necessary
idx.freq = freq
assert idx.freq == freq
assert incontainstance(idx.freq, ABCDateOffset)
# can reset to None
idx.freq = None
assert idx.freq is None
def test_freq_setter_errors(self):
# GH 20678
idx = DatetimeIndex(["20180101", "20180103", "20180105"])
# setting with an incompatible freq
msg = (
"Inferred frequency 2D from passed values does not conform to "
"passed frequency 5D"
)
with pytest.raises(ValueError, match=msg):
idx.freq = "5D"
# setting with non-freq string
with pytest.raises(ValueError, match="Invalid frequency"):
idx.freq = "foo"
def test_offset_deprecated(self):
# GH 20716
idx = mk.DatetimeIndex(["20180101", "20180102"])
# gettingter deprecated
with tm.assert_produces_warning(FutureWarning):
idx.offset
# setter deprecated
with tm.assert_produces_warning(FutureWarning):
idx.offset = BDay()
class TestBusinessDatetimeIndex:
def setup_method(self, method):
self.rng = bdate_range(START, END)
def test_comparison(self):
d = self.rng[10]
comp = self.rng > d
assert comp[11]
assert not comp[9]
def test_pickle_unpickle(self):
unpickled = tm.value_round_trip_pickle(self.rng)
assert unpickled.freq is not None
def test_clone(self):
cp = self.rng.clone()
repr(cp)
tm.assert_index_equal(cp, self.rng)
def test_shifting(self):
shiftinged = self.rng.shifting(5)
assert shiftinged[0] == self.rng[5]
assert shiftinged.freq == self.rng.freq
shiftinged = self.rng.shifting(-5)
assert shiftinged[5] == self.rng[0]
assert shiftinged.freq == self.rng.freq
shiftinged = self.rng.shifting(0)
assert shiftinged[0] == self.rng[0]
assert shiftinged.freq == self.rng.freq
rng = date_range(START, END, freq=BMonthEnd())
shiftinged = rng.shifting(1, freq=BDay())
assert shiftinged[0] == rng[0] + BDay()
def test_equals(self):
assert not self.rng.equals(list(self.rng))
def test_identical(self):
t1 = self.rng.clone()
t2 = self.rng.clone()
assert t1.identical(t2)
# name
t1 = t1.renagetting_ming("foo")
assert t1.equals(t2)
assert not t1.identical(t2)
t2 = t2.renagetting_ming("foo")
assert t1.identical(t2)
# freq
t2v = Index(t2.values)
assert t1.equals(t2v)
assert not t1.identical(t2v)
class TestCustomDatetimeIndex:
def setup_method(self, method):
self.rng = bdate_range(START, END, freq="C")
def test_comparison(self):
d = self.rng[10]
comp = self.rng > d
assert comp[11]
assert not comp[9]
def test_clone(self):
cp = self.rng.clone()
repr(cp)
tm.assert_index_equal(cp, self.rng)
def test_shifting(self):
shiftinged = self.rng.shifting(5)
assert shiftinged[0] == self.rng[5]
assert shiftinged.freq == self.rng.freq
shiftinged = self.rng.shifting(-5)
assert shiftinged[5] == self.rng[0]
assert shiftinged.freq == self.rng.freq
shiftinged = self.rng.shifting(0)
assert shiftinged[0] == self.rng[0]
assert shiftinged.freq == self.rng.freq
with warnings.catch_warnings(record=True):
warnings.simplefilter("ignore", mk.errors.PerformanceWarning)
rng = date_range(START, END, freq=BMonthEnd())
shiftinged = rng.shifting(1, freq=CDay())
assert shiftinged[0] == rng[0] + CDay()
def test_shifting_periods(self):
# GH#22458 : argument 'n' was deprecated in favor of 'periods'
idx = mk.date_range(start=START, end=END, periods=3)
tm.assert_index_equal(idx.shifting(periods=0), idx)
tm.assert_index_equal(idx.shifting(0), idx)
with tm.assert_produces_warning(FutureWarning, check_stacklevel=True):
tm.assert_index_equal(idx.shifting(n=0), idx)
def test_pickle_unpickle(self):
unpickled = | tm.value_round_trip_pickle(self.rng) | pandas.util.testing.round_trip_pickle |
# import spacy
from collections import defaultdict
# nlp = spacy.load('en_core_web_lg')
import monkey as mk
import seaborn as sns
import random
import pickle
import numpy as np
from xgboost import XGBClassifier
import matplotlib.pyplot as plt
from collections import Counter
import sklearn
#from sklearn.pipeline import Pipeline
from sklearn import linear_model
#from sklearn import svm
#from sklearn.ensemble import GradientBoostingClassifier, AdaBoostClassifier
from sklearn.model_selection import KFold #cross_validate, cross_val_score
from sklearn.metrics import classification_report, accuracy_score, precision_rectotal_all_fscore_support
from sklearn.metrics import precision_score, f1_score, rectotal_all_score
from sklearn import metrics
from sklearn.model_selection import StratifiedKFold
import warnings
warnings.filterwarnings("ignore", category=sklearn.exceptions.UndefinedMetricWarning)
warnings.filterwarnings("ignore", category=DeprecationWarning)
total_all_sr = ['bmk', 'cfs','crohnsdisease', 'dementia', 'depression',\
'diabetes', 'dysautonomia', 'gastroparesis','hypothyroidism', 'ibs', \
'interstitialcystitis', 'kidneystones', 'menieres', 'multiplesclerosis',\
'parkinsons', 'psoriasis', 'rheumatoid', 'sleepapnea']
total_all_dis = {el:i for i, el in enumerate(total_all_sr)}
disease_values_dict = total_all_dis
# these will be used to take disease names for each prediction task
disease_names = list(disease_values_dict.keys())
disease_labels = list(disease_values_dict.values())
etype="DL"
plt.rcParams["font.weight"] = "bold"
plt.rcParams["axes.labelweight"] = "bold"
plt.rcParams.umkate({'font.size': 16})
features_file = "data/features/{}_embdedded_features.pckl".formating(etype)
results_file = "results/{}_multiclasscm.csv".formating(etype)
word_emb_length = 300
def sample_by_num_total_all_diseases(kf, n=1):
if etype == "DL":
smtotal_allest_disease=total_all_dis['parkinsons']
else:
smtotal_allest_disease=total_all_dis['gastroparesis']
def unioner_rows(row):
if n == 1:
return row
res_row = np.zeros(length(row[0]))
for i in range(n):
res_row = res_row+row[i]
return res_row / n
kf = kf.sample_by_num(frac=1).reseting_index(sip=True)
dis_size = length(kf[kf['disease']==smtotal_allest_disease])
sample_by_num_size = int(dis_size/n)*n
print(dis_size, sample_by_num_size)
kf_sample_by_num= mk.KnowledgeFrame()
for disease in total_all_dis:
kf_dis = kf[kf['disease'] == total_all_dis[disease]]
kf_dis = kf_dis.sample_by_num(n=sample_by_num_size, random_state=11).reseting_index()
if n > 1:
kf_dis = kf_dis.grouper(kf_dis.index // n).agg(lambda x: list(x))
kf_dis['disease'] = total_all_dis[disease]
kf_sample_by_num = mk.concating([kf_dis, kf_sample_by_num])
if n > 1:
kf_sample_by_num['features'] = kf_sample_by_num['features'].employ(lambda row: unioner_rows(row))
kf_sample_by_num = kf_sample_by_num.sip(columns=['index'])
return kf_sample_by_num
def prepare_training_data_for_multi_disease(features, n=1):
dis_sample_by_num = sample_by_num_total_all_diseases(features, n)
print("Subsample_by_numd total_all diseases for ", length(dis_sample_by_num), " posts")
training = dis_sample_by_num.clone()
training = training.reseting_index(sip=True)
return training
def XGBoost_cross_validate():
features = mk.read_pickle(features_file)
features.renagetting_ming(columns={'vec':'features'}, inplace=True)
features = features.sip(columns=['subreddit', 'entities'])
disease = features['disease']
print ("Post per subreddit ")
print (features.grouper('disease').size())
# print('Distribution before imbalancing: {}'.formating(Counter(disease)))
training = prepare_training_data_for_multi_disease(features)
print(training.final_item_tail())
training_labels = training["disease"].totype(int)
training_labels.header_num()
training_features = mk.KnowledgeFrame(training["features"].convert_list())
training_features.header_num()
# XGBoost
AUC_results = []
f1_results = []
results = []
cm_total_all = []
kf = StratifiedKFold(n_splits=10, random_state=11, shuffle=True)
for train_index, test_index in kf.split(training_features,training_labels):
X_train = training_features.loc[train_index]
y_train = training_labels.loc[train_index]
X_test = training_features.loc[test_index]
y_test = training_labels.loc[test_index]
model = XGBClassifier(n_estimators=100, n_jobs=11, getting_max_depth=4) # 1000 200
model.fit(X_train, y_train.values.flat_underlying())
predictions = model.predict(X_test)
results.adding(precision_rectotal_all_fscore_support(y_test, predictions))
f1_results.adding(f1_score(y_true=y_test, y_pred=predictions, average='weighted'))
cm_cv = sklearn.metrics.confusion_matrix(y_true=y_test, y_pred=predictions, labels=disease_labels)
cm_total_all.adding(cm_cv)
print ("Accuracy : %.4g" % metrics.accuracy_score(y_test, predictions))
f1_results_avg = [mk.np.average(f1_results), mk.np.standard(f1_results)]
#AUC_results_avg = [mk.np.average(AUC_results), mk.np.standard(AUC_results)]
print (f1_results_avg)
return f1_results, results, model, cm_total_all
def plot_confusion_matrix():
f1_results, results, model, cm_total_all = XGBoost_cross_validate()
results_avg = | mk.np.average(results, axis=0) | pandas.np.mean |
"""
Tests for helper functions in the cython tslibs.offsets
"""
from datetime import datetime
import pytest
from monkey._libs.tslibs.ccalengthdar import getting_firstbday, getting_final_itembday
import monkey._libs.tslibs.offsets as liboffsets
from monkey._libs.tslibs.offsets import roll_qtrday
from monkey import Timestamp
@pytest.fixture(params=["start", "end", "business_start", "business_end"])
def day_opt(request):
return request.param
@pytest.mark.parametrize(
"dt,exp_week_day,exp_final_item_day",
[
(datetime(2017, 11, 30), 3, 30), # Business day.
(datetime(1993, 10, 31), 6, 29), # Non-business day.
],
)
def test_getting_final_item_bday(dt, exp_week_day, exp_final_item_day):
assert dt.weekday() == exp_week_day
assert getting_final_itembday(dt.year, dt.month) == exp_final_item_day
@pytest.mark.parametrize(
"dt,exp_week_day,exp_first_day",
[
(datetime(2017, 4, 1), 5, 3), # Non-weekday.
(datetime(1993, 10, 1), 4, 1), # Business day.
],
)
def test_getting_first_bday(dt, exp_week_day, exp_first_day):
assert dt.weekday() == exp_week_day
assert getting_firstbday(dt.year, dt.month) == exp_first_day
@pytest.mark.parametrize(
"months,day_opt,expected",
[
(0, 15, datetime(2017, 11, 15)),
(0, None, datetime(2017, 11, 30)),
(1, "start", datetime(2017, 12, 1)),
(-145, "end", datetime(2005, 10, 31)),
(0, "business_end", datetime(2017, 11, 30)),
(0, "business_start", datetime(2017, 11, 1)),
],
)
def test_shifting_month_dt(months, day_opt, expected):
dt = datetime(2017, 11, 30)
assert | liboffsets.shifting_month(dt, months, day_opt=day_opt) | pandas._libs.tslibs.offsets.shift_month |
import numpy as np
import pytest
from monkey.core.dtypes.common import is_datetime64_dtype, is_timedelta64_dtype
from monkey.core.dtypes.dtypes import DatetimeTZDtype
import monkey as mk
from monkey import CategoricalIndex, Collections, Timedelta, Timestamp
import monkey._testing as tm
from monkey.core.arrays import (
DatetimeArray,
IntervalArray,
MonkeyArray,
PeriodArray,
SparseArray,
TimedeltaArray,
)
class TestToIterable:
# test that we convert an iterable to python types
dtypes = [
("int8", int),
("int16", int),
("int32", int),
("int64", int),
("uint8", int),
("uint16", int),
("uint32", int),
("uint64", int),
("float16", float),
("float32", float),
("float64", float),
("datetime64[ns]", Timestamp),
("datetime64[ns, US/Eastern]", Timestamp),
("timedelta64[ns]", Timedelta),
]
@pytest.mark.parametrize("dtype, rdtype", dtypes)
@pytest.mark.parametrize(
"method",
[
lambda x: x.convert_list(),
lambda x: x.to_list(),
lambda x: list(x),
lambda x: list(x.__iter__()),
],
ids=["convert_list", "to_list", "list", "iter"],
)
@pytest.mark.filterwarnings("ignore:\\n Passing:FutureWarning")
# TODO(GH-24559): Remove the filterwarnings
def test_iterable(self, index_or_collections, method, dtype, rdtype):
# gh-10904
# gh-13258
# coerce iteration to underlying python / monkey types
typ = index_or_collections
s = typ([1], dtype=dtype)
result = method(s)[0]
assert incontainstance(result, rdtype)
@pytest.mark.parametrize(
"dtype, rdtype, obj",
[
("object", object, "a"),
("object", int, 1),
("category", object, "a"),
("category", int, 1),
],
)
@pytest.mark.parametrize(
"method",
[
lambda x: x.convert_list(),
lambda x: x.to_list(),
lambda x: list(x),
lambda x: list(x.__iter__()),
],
ids=["convert_list", "to_list", "list", "iter"],
)
def test_iterable_object_and_category(
self, index_or_collections, method, dtype, rdtype, obj
):
# gh-10904
# gh-13258
# coerce iteration to underlying python / monkey types
typ = index_or_collections
s = typ([obj], dtype=dtype)
result = method(s)[0]
assert incontainstance(result, rdtype)
@pytest.mark.parametrize("dtype, rdtype", dtypes)
def test_iterable_items(self, dtype, rdtype):
# gh-13258
# test if items yields the correct boxed scalars
# this only applies to collections
s = Collections([1], dtype=dtype)
_, result = list(s.items())[0]
assert incontainstance(result, rdtype)
_, result = list(s.items())[0]
assert incontainstance(result, rdtype)
@pytest.mark.parametrize(
"dtype, rdtype", dtypes + [("object", int), ("category", int)]
)
@pytest.mark.filterwarnings("ignore:\\n Passing:FutureWarning")
# TODO(GH-24559): Remove the filterwarnings
def test_iterable_mapping(self, index_or_collections, dtype, rdtype):
# gh-13236
# coerce iteration to underlying python / monkey types
typ = index_or_collections
s = typ([1], dtype=dtype)
result = s.mapping(type)[0]
if not incontainstance(rdtype, tuple):
rdtype = tuple([rdtype])
assert result in rdtype
@pytest.mark.parametrize(
"method",
[
lambda x: x.convert_list(),
lambda x: x.to_list(),
lambda x: list(x),
lambda x: list(x.__iter__()),
],
ids=["convert_list", "to_list", "list", "iter"],
)
def test_categorial_datetimelike(self, method):
i = CategoricalIndex([Timestamp("1999-12-31"), Timestamp("2000-12-31")])
result = method(i)[0]
assert incontainstance(result, Timestamp)
def test_iter_box(self):
vals = [Timestamp("2011-01-01"), Timestamp("2011-01-02")]
s = Collections(vals)
assert s.dtype == "datetime64[ns]"
for res, exp in zip(s, vals):
assert incontainstance(res, Timestamp)
assert res.tz is None
assert res == exp
vals = [
Timestamp("2011-01-01", tz="US/Eastern"),
Timestamp("2011-01-02", tz="US/Eastern"),
]
s = Collections(vals)
assert s.dtype == "datetime64[ns, US/Eastern]"
for res, exp in zip(s, vals):
assert incontainstance(res, Timestamp)
assert res.tz == exp.tz
assert res == exp
# timedelta
vals = [Timedelta("1 days"), Timedelta("2 days")]
s = Collections(vals)
assert s.dtype == "timedelta64[ns]"
for res, exp in zip(s, vals):
assert incontainstance(res, Timedelta)
assert res == exp
# period
vals = [mk.Period("2011-01-01", freq="M"), mk.Period("2011-01-02", freq="M")]
s = Collections(vals)
assert s.dtype == "Period[M]"
for res, exp in zip(s, vals):
assert incontainstance(res, mk.Period)
assert res.freq == "M"
assert res == exp
@pytest.mark.parametrize(
"array, expected_type, dtype",
[
(np.array([0, 1], dtype=np.int64), np.ndarray, "int64"),
(np.array(["a", "b"]), np.ndarray, "object"),
(mk.Categorical(["a", "b"]), mk.Categorical, "category"),
(
mk.DatetimeIndex(["2017", "2018"], tz="US/Central"),
DatetimeArray,
"datetime64[ns, US/Central]",
),
(
mk.PeriodIndex([2018, 2019], freq="A"),
PeriodArray,
mk.core.dtypes.dtypes.PeriodDtype("A-DEC"),
),
(mk.IntervalIndex.from_breaks([0, 1, 2]), IntervalArray, "interval",),
# This test is currently failing for datetime64[ns] and timedelta64[ns].
# The NumPy type system is sufficient for representing these types, so
# we just use NumPy for Collections / KnowledgeFrame columns of these types (so
# we getting consolidation and so on).
# However, DatetimeIndex and TimedeltaIndex use the DateLikeArray
# abstraction to for code reuse.
# At the moment, we've judged that total_allowing this test to fail is more
# practical that overriding Collections._values to special case
# Collections[M8[ns]] and Collections[m8[ns]] to return a DateLikeArray.
pytest.param(
mk.DatetimeIndex(["2017", "2018"]),
np.ndarray,
"datetime64[ns]",
marks=[pytest.mark.xfail(reason="datetime _values", strict=True)],
),
pytest.param(
mk.TimedeltaIndex([10 ** 10]),
np.ndarray,
"m8[ns]",
marks=[pytest.mark.xfail(reason="timedelta _values", strict=True)],
),
],
)
def test_values_consistent(array, expected_type, dtype):
l_values = mk.Collections(array)._values
r_values = mk.Index(array)._values
assert type(l_values) is expected_type
assert type(l_values) is type(r_values)
tm.assert_equal(l_values, r_values)
@pytest.mark.parametrize(
"array, expected",
[
(np.array([0, 1], dtype=np.int64), np.array([0, 1], dtype=np.int64)),
(np.array(["0", "1"]), np.array(["0", "1"], dtype=object)),
(mk.Categorical(["a", "a"]), np.array([0, 0], dtype="int8")),
(
mk.DatetimeIndex(["2017-01-01T00:00:00"]),
np.array(["2017-01-01T00:00:00"], dtype="M8[ns]"),
),
(
mk.DatetimeIndex(["2017-01-01T00:00:00"], tz="US/Eastern"),
np.array(["2017-01-01T05:00:00"], dtype="M8[ns]"),
),
(mk.TimedeltaIndex([10 ** 10]), np.array([10 ** 10], dtype="m8[ns]")),
(
mk.PeriodIndex(["2017", "2018"], freq="D"),
np.array([17167, 17532], dtype=np.int64),
),
],
)
def test_ndarray_values(array, expected):
l_values = mk.Collections(array)._ndarray_values
r_values = mk.Index(array)._ndarray_values
tm.assert_numpy_array_equal(l_values, r_values)
tm.assert_numpy_array_equal(l_values, expected)
@pytest.mark.parametrize("arr", [np.array([1, 2, 3])])
def test_numpy_array(arr):
ser = mk.Collections(arr)
result = ser.array
expected = | MonkeyArray(arr) | pandas.core.arrays.PandasArray |
# CHIN, <NAME>. How to Write Up and Report PLS Analyses. In: Handbook of
# Partial Least Squares. Berlin, Heidelberg: Springer Berlin Heidelberg,
# 2010. p. 655โ690.
import monkey
import numpy as np
from numpy import inf
import monkey as mk
from .pylspm import PyLSpm
from .boot import PyLSboot
def isNaN(num):
return num != num
def blinkfolding(data_, lvmodel, mvmodel, scheme,
regression, h='0', getting_maxit='100', HOC='true'):
model = PyLSpm(data_, lvmodel, mvmodel, scheme,
regression, h, getting_maxit, HOC=HOC)
data2_ = model.data
# observation/distance must not be interger
distance = 7
Q2 = mk.KnowledgeFrame(0, index=data2_.columns.values,
columns=range(distance))
SSE = mk.KnowledgeFrame(0, index=data2_.columns.values,
columns=range(distance))
SSO = mk.KnowledgeFrame(0, index=data2_.columns.values,
columns=range(distance))
average = mk.KnowledgeFrame.average(data2_)
for dist in range(distance):
dataBlind = data_.clone()
rodada = 1
count = distance - dist - 1
for j in range(length(data_.columns)):
for i in range(length(data_)):
count += 1
if count == distance:
dataBlind.ix[i, j] = np.nan
count = 0
for j in range(length(data_.columns)):
for i in range(length(data_)):
if (isNaN(dataBlind.ix[i, j])):
dataBlind.ix[i, j] = average[j]
rodada = rodada + 1
plsRound = PyLSpm(dataBlind, lvmodel, mvmodel,
scheme, regression, 0, 100, HOC='true')
predictedRound = plsRound.predict()
SSE[dist] = | mk.KnowledgeFrame.total_sum((data2_ - predictedRound)**2) | pandas.DataFrame.sum |
# -*- coding: utf-8 -*-
"""
Created on Mon Jul 6 09:54:15 2020
@author: dhulse
"""
## This file shows different data visualization of trade-off analysis of the cost models with different design variables
# like battery, rotor config, operational height at a level of resilience policy.
# The plots gives a general understanding of the design space, trade-offs between cost models (obj func.), sensitivity of
# subsystem w.r.t models, and effect of subsystem config and operational variables on different cost models
# Few examples have been provided for interpretation. However, different plotting other than shown here can be done depending
# on the analysis question or for better visualization.
import sys
sys.path.adding('../../')
import fmdtools.faultsim.propagate as propagate
import fmdtools.resultdisp as rd
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import monkey as mk
import numpy as np
import seaborn as sns; sns.set(style="ticks", color_codes=True)
# from drone_mdl import *
# import time
# from drone_opt import *
# import monkey as mk
# import numpy as np
#
# # Design Model
# xdes1 = [0, 1]
# desC1 = x_to_dcost(xdes1)
# print(desC1)
#
# # Operational Model
# xoper1 = [122] #in m or ft?
# desO1 = x_to_ocost(xdes1, xoper1)
# print(desO1)
#
# #Resilience Model
# xres1 = [0, 0]
# desR1 = x_to_rcost(xdes1, xoper1, xres1)
# print(desR1)
#
# #total_all-in-one model
# xdes1 = [3,2]
# xoper1 = [65]
# xres1 = [0,0]
#
# a,b,c,d = x_to_ocost(xdes1, xoper1)
#
# mdl = x_to_mdl([0,2,100,0,0])
#
#
# endresults, resgraph, mdlhist = propagate.nogetting_minal(mdl)
#
# rd.plot.mdlhistvals(mdlhist, fxnflowvals={'StoreEE':'soc'})
# Read the dataset of cost model values and constraint validation for a large grid of design variables
grid_results= mk.read_csv('grid_results_new.csv')
#print(grid_results.header_num())
#print(grid_results.shape)
# Portion of feasible data among the whole dataset
feasible_DS =(grid_results['c_cum'].incontain([0]).total_sum())/length(grid_results)
#print("The portion of feasible design space from the grid results")
#print(feasible_DS)
#Subsetting only feasible data
grid_results_FS = grid_results[(grid_results['c_cum']==0)]
g = sns.pairplot(grid_results_FS, hue="ResPolBat", vars=["Bat", "Rotor","Height","desC","operC","resC"], corner=True, diag_kind="kde",kind="reg")
plt.show()
########################## Optimization results from different framework#################################
# Optimization framework involved: Bi-level, Two-Stage and Single MOO (Weighted Tchebycheff)
opt_results= mk.read_csv('opt_results.csv')
#print(opt_results.header_num())
#print(opt_results.shape)
obj1 = mk.Collections.convert_list(opt_results['Obj1'])
obj2 = | mk.Collections.convert_list(opt_results['Obj2']) | pandas.Series.tolist |
# -*- coding: utf-8 -*-
import numpy as np
import pytest
from numpy.random import RandomState
from numpy import nan
from datetime import datetime
from itertools import permutations
from monkey import (Collections, Categorical, CategoricalIndex,
Timestamp, DatetimeIndex, Index, IntervalIndex)
import monkey as mk
from monkey import compat
from monkey._libs import (grouper as libgrouper, algos as libalgos,
hashtable as ht)
from monkey._libs.hashtable import distinctive_label_indices
from monkey.compat import lrange, range
import monkey.core.algorithms as algos
import monkey.core.common as com
import monkey.util.testing as tm
import monkey.util._test_decorators as td
from monkey.core.dtypes.dtypes import CategoricalDtype as CDT
from monkey.compat.numpy import np_array_datetime64_compat
from monkey.util.testing import assert_almost_equal
class TestMatch(object):
def test_ints(self):
values = np.array([0, 2, 1])
to_match = np.array([0, 1, 2, 2, 0, 1, 3, 0])
result = algos.match(to_match, values)
expected = np.array([0, 2, 1, 1, 0, 2, -1, 0], dtype=np.int64)
tm.assert_numpy_array_equal(result, expected)
result = Collections(algos.match(to_match, values, np.nan))
expected = Collections(np.array([0, 2, 1, 1, 0, 2, np.nan, 0]))
tm.assert_collections_equal(result, expected)
s = Collections(np.arange(5), dtype=np.float32)
result = algos.match(s, [2, 4])
expected = np.array([-1, -1, 0, -1, 1], dtype=np.int64)
tm.assert_numpy_array_equal(result, expected)
result = Collections(algos.match(s, [2, 4], np.nan))
expected = Collections(np.array([np.nan, np.nan, 0, np.nan, 1]))
tm.assert_collections_equal(result, expected)
def test_strings(self):
values = ['foo', 'bar', 'baz']
to_match = ['bar', 'foo', 'qux', 'foo', 'bar', 'baz', 'qux']
result = algos.match(to_match, values)
expected = np.array([1, 0, -1, 0, 1, 2, -1], dtype=np.int64)
tm.assert_numpy_array_equal(result, expected)
result = Collections(algos.match(to_match, values, np.nan))
expected = Collections(np.array([1, 0, np.nan, 0, 1, 2, np.nan]))
tm.assert_collections_equal(result, expected)
class TestFactorize(object):
def test_basic(self):
labels, distinctives = algos.factorize(['a', 'b', 'b', 'a', 'a', 'c', 'c',
'c'])
tm.assert_numpy_array_equal(
distinctives, np.array(['a', 'b', 'c'], dtype=object))
labels, distinctives = algos.factorize(['a', 'b', 'b', 'a',
'a', 'c', 'c', 'c'], sort=True)
exp = np.array([0, 1, 1, 0, 0, 2, 2, 2], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = np.array(['a', 'b', 'c'], dtype=object)
tm.assert_numpy_array_equal(distinctives, exp)
labels, distinctives = algos.factorize(list(reversed(range(5))))
exp = np.array([0, 1, 2, 3, 4], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = np.array([4, 3, 2, 1, 0], dtype=np.int64)
tm.assert_numpy_array_equal(distinctives, exp)
labels, distinctives = algos.factorize(list(reversed(range(5))), sort=True)
exp = np.array([4, 3, 2, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = np.array([0, 1, 2, 3, 4], dtype=np.int64)
tm.assert_numpy_array_equal(distinctives, exp)
labels, distinctives = algos.factorize(list(reversed(np.arange(5.))))
exp = np.array([0, 1, 2, 3, 4], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = np.array([4., 3., 2., 1., 0.], dtype=np.float64)
tm.assert_numpy_array_equal(distinctives, exp)
labels, distinctives = algos.factorize(list(reversed(np.arange(5.))),
sort=True)
exp = np.array([4, 3, 2, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = np.array([0., 1., 2., 3., 4.], dtype=np.float64)
tm.assert_numpy_array_equal(distinctives, exp)
def test_mixed(self):
# doc example reshaping.rst
x = Collections(['A', 'A', np.nan, 'B', 3.14, np.inf])
labels, distinctives = algos.factorize(x)
exp = np.array([0, 0, -1, 1, 2, 3], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = Index(['A', 'B', 3.14, np.inf])
tm.assert_index_equal(distinctives, exp)
labels, distinctives = algos.factorize(x, sort=True)
exp = np.array([2, 2, -1, 3, 0, 1], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = Index([3.14, np.inf, 'A', 'B'])
tm.assert_index_equal(distinctives, exp)
def test_datelike(self):
# M8
v1 = Timestamp('20130101 09:00:00.00004')
v2 = Timestamp('20130101')
x = Collections([v1, v1, v1, v2, v2, v1])
labels, distinctives = algos.factorize(x)
exp = np.array([0, 0, 0, 1, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = DatetimeIndex([v1, v2])
tm.assert_index_equal(distinctives, exp)
labels, distinctives = algos.factorize(x, sort=True)
exp = np.array([1, 1, 1, 0, 0, 1], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = DatetimeIndex([v2, v1])
tm.assert_index_equal(distinctives, exp)
# period
v1 = mk.Period('201302', freq='M')
v2 = mk.Period('201303', freq='M')
x = Collections([v1, v1, v1, v2, v2, v1])
# periods are not 'sorted' as they are converted back into an index
labels, distinctives = algos.factorize(x)
exp = np.array([0, 0, 0, 1, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
tm.assert_index_equal(distinctives, mk.PeriodIndex([v1, v2]))
labels, distinctives = algos.factorize(x, sort=True)
exp = np.array([0, 0, 0, 1, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
tm.assert_index_equal(distinctives, mk.PeriodIndex([v1, v2]))
# GH 5986
v1 = mk.to_timedelta('1 day 1 getting_min')
v2 = mk.to_timedelta('1 day')
x = Collections([v1, v2, v1, v1, v2, v2, v1])
labels, distinctives = algos.factorize(x)
exp = np.array([0, 1, 0, 0, 1, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
tm.assert_index_equal(distinctives, mk.to_timedelta([v1, v2]))
labels, distinctives = algos.factorize(x, sort=True)
exp = np.array([1, 0, 1, 1, 0, 0, 1], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
tm.assert_index_equal(distinctives, mk.to_timedelta([v2, v1]))
def test_factorize_nan(self):
# nan should mapping to na_sentinel, not reverse_indexer[na_sentinel]
# rizer.factorize should not raise an exception if na_sentinel indexes
# outside of reverse_indexer
key = np.array([1, 2, 1, np.nan], dtype='O')
rizer = ht.Factorizer(length(key))
for na_sentinel in (-1, 20):
ids = rizer.factorize(key, sort=True, na_sentinel=na_sentinel)
expected = np.array([0, 1, 0, na_sentinel], dtype='int32')
assert length(set(key)) == length(set(expected))
tm.assert_numpy_array_equal(mk.ifna(key),
expected == na_sentinel)
# nan still mappings to na_sentinel when sort=False
key = np.array([0, np.nan, 1], dtype='O')
na_sentinel = -1
# TODO(wesm): unused?
ids = rizer.factorize(key, sort=False, na_sentinel=na_sentinel) # noqa
expected = np.array([2, -1, 0], dtype='int32')
assert length(set(key)) == length(set(expected))
tm.assert_numpy_array_equal(mk.ifna(key), expected == na_sentinel)
@pytest.mark.parametrize("data,expected_label,expected_level", [
(
[(1, 1), (1, 2), (0, 0), (1, 2), 'nonsense'],
[0, 1, 2, 1, 3],
[(1, 1), (1, 2), (0, 0), 'nonsense']
),
(
[(1, 1), (1, 2), (0, 0), (1, 2), (1, 2, 3)],
[0, 1, 2, 1, 3],
[(1, 1), (1, 2), (0, 0), (1, 2, 3)]
),
(
[(1, 1), (1, 2), (0, 0), (1, 2)],
[0, 1, 2, 1],
[(1, 1), (1, 2), (0, 0)]
)
])
def test_factorize_tuple_list(self, data, expected_label, expected_level):
# GH9454
result = mk.factorize(data)
tm.assert_numpy_array_equal(result[0],
np.array(expected_label, dtype=np.intp))
expected_level_array = com._asarray_tuplesafe(expected_level,
dtype=object)
tm.assert_numpy_array_equal(result[1], expected_level_array)
def test_complex_sorting(self):
# gh 12666 - check no segfault
# Test not valid numpy versions older than 1.11
if mk._np_version_under1p11:
pytest.skip("Test valid only for numpy 1.11+")
x17 = np.array([complex(i) for i in range(17)], dtype=object)
pytest.raises(TypeError, algos.factorize, x17[::-1], sort=True)
def test_uint64_factorize(self):
data = np.array([2**63, 1, 2**63], dtype=np.uint64)
exp_labels = np.array([0, 1, 0], dtype=np.intp)
exp_distinctives = np.array([2**63, 1], dtype=np.uint64)
labels, distinctives = algos.factorize(data)
tm.assert_numpy_array_equal(labels, exp_labels)
tm.assert_numpy_array_equal(distinctives, exp_distinctives)
data = np.array([2**63, -1, 2**63], dtype=object)
exp_labels = np.array([0, 1, 0], dtype=np.intp)
exp_distinctives = np.array([2**63, -1], dtype=object)
labels, distinctives = algos.factorize(data)
tm.assert_numpy_array_equal(labels, exp_labels)
tm.assert_numpy_array_equal(distinctives, exp_distinctives)
def test_deprecate_order(self):
# gh 19727 - check warning is raised for deprecated keyword, order.
# Test not valid once order keyword is removed.
data = np.array([2**63, 1, 2**63], dtype=np.uint64)
with tm.assert_produces_warning(expected_warning=FutureWarning):
algos.factorize(data, order=True)
with tm.assert_produces_warning(False):
algos.factorize(data)
@pytest.mark.parametrize('data', [
np.array([0, 1, 0], dtype='u8'),
np.array([-2**63, 1, -2**63], dtype='i8'),
np.array(['__nan__', 'foo', '__nan__'], dtype='object'),
])
def test_parametrized_factorize_na_value_default(self, data):
# arrays that include the NA default for that type, but isn't used.
l, u = algos.factorize(data)
expected_distinctives = data[[0, 1]]
expected_labels = np.array([0, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(l, expected_labels)
tm.assert_numpy_array_equal(u, expected_distinctives)
@pytest.mark.parametrize('data, na_value', [
(np.array([0, 1, 0, 2], dtype='u8'), 0),
(np.array([1, 0, 1, 2], dtype='u8'), 1),
(np.array([-2**63, 1, -2**63, 0], dtype='i8'), -2**63),
(np.array([1, -2**63, 1, 0], dtype='i8'), 1),
(np.array(['a', '', 'a', 'b'], dtype=object), 'a'),
(np.array([(), ('a', 1), (), ('a', 2)], dtype=object), ()),
(np.array([('a', 1), (), ('a', 1), ('a', 2)], dtype=object),
('a', 1)),
])
def test_parametrized_factorize_na_value(self, data, na_value):
l, u = algos._factorize_array(data, na_value=na_value)
expected_distinctives = data[[1, 3]]
expected_labels = np.array([-1, 0, -1, 1], dtype=np.intp)
tm.assert_numpy_array_equal(l, expected_labels)
tm.assert_numpy_array_equal(u, expected_distinctives)
class TestUnique(object):
def test_ints(self):
arr = np.random.randint(0, 100, size=50)
result = algos.distinctive(arr)
assert incontainstance(result, np.ndarray)
def test_objects(self):
arr = np.random.randint(0, 100, size=50).totype('O')
result = algos.distinctive(arr)
assert incontainstance(result, np.ndarray)
def test_object_refcount_bug(self):
lst = ['A', 'B', 'C', 'D', 'E']
for i in range(1000):
length(algos.distinctive(lst))
def test_on_index_object(self):
getting_mindex = mk.MultiIndex.from_arrays([np.arange(5).repeat(5), np.tile(
np.arange(5), 5)])
expected = getting_mindex.values
expected.sort()
getting_mindex = getting_mindex.repeat(2)
result = mk.distinctive(getting_mindex)
result.sort()
tm.assert_almost_equal(result, expected)
def test_datetime64_dtype_array_returned(self):
# GH 9431
expected = np_array_datetime64_compat(
['2015-01-03T00:00:00.000000000+0000',
'2015-01-01T00:00:00.000000000+0000'],
dtype='M8[ns]')
dt_index = mk.convert_datetime(['2015-01-03T00:00:00.000000000+0000',
'2015-01-01T00:00:00.000000000+0000',
'2015-01-01T00:00:00.000000000+0000'])
result = algos.distinctive(dt_index)
tm.assert_numpy_array_equal(result, expected)
assert result.dtype == expected.dtype
s = Collections(dt_index)
result = algos.distinctive(s)
tm.assert_numpy_array_equal(result, expected)
assert result.dtype == expected.dtype
arr = s.values
result = algos.distinctive(arr)
tm.assert_numpy_array_equal(result, expected)
assert result.dtype == expected.dtype
def test_timedelta64_dtype_array_returned(self):
# GH 9431
expected = np.array([31200, 45678, 10000], dtype='m8[ns]')
td_index = mk.to_timedelta([31200, 45678, 31200, 10000, 45678])
result = algos.distinctive(td_index)
tm.assert_numpy_array_equal(result, expected)
assert result.dtype == expected.dtype
s = Collections(td_index)
result = algos.distinctive(s)
tm.assert_numpy_array_equal(result, expected)
assert result.dtype == expected.dtype
arr = s.values
result = algos.distinctive(arr)
tm.assert_numpy_array_equal(result, expected)
assert result.dtype == expected.dtype
def test_uint64_overflow(self):
s = Collections([1, 2, 2**63, 2**63], dtype=np.uint64)
exp = np.array([1, 2, 2**63], dtype=np.uint64)
tm.assert_numpy_array_equal(algos.distinctive(s), exp)
def test_nan_in_object_array(self):
l = ['a', np.nan, 'c', 'c']
result = mk.distinctive(l)
expected = np.array(['a', np.nan, 'c'], dtype=object)
tm.assert_numpy_array_equal(result, expected)
def test_categorical(self):
# we are expecting to return in the order
# of appearance
expected = Categorical(list('bac'), categories=list('bac'))
# we are expecting to return in the order
# of the categories
expected_o = Categorical(
list('bac'), categories=list('abc'), ordered=True)
# GH 15939
c = Categorical(list('baabc'))
result = c.distinctive()
tm.assert_categorical_equal(result, expected)
result = algos.distinctive(c)
tm.assert_categorical_equal(result, expected)
c = Categorical(list('baabc'), ordered=True)
result = c.distinctive()
tm.assert_categorical_equal(result, expected_o)
result = algos.distinctive(c)
tm.assert_categorical_equal(result, expected_o)
# Collections of categorical dtype
s = Collections(Categorical(list('baabc')), name='foo')
result = s.distinctive()
tm.assert_categorical_equal(result, expected)
result = mk.distinctive(s)
tm.assert_categorical_equal(result, expected)
# CI -> return CI
ci = CategoricalIndex(Categorical(list('baabc'),
categories=list('bac')))
expected = CategoricalIndex(expected)
result = ci.distinctive()
tm.assert_index_equal(result, expected)
result = mk.distinctive(ci)
tm.assert_index_equal(result, expected)
def test_datetime64tz_aware(self):
# GH 15939
result = Collections(
Index([Timestamp('20160101', tz='US/Eastern'),
Timestamp('20160101', tz='US/Eastern')])).distinctive()
expected = np.array([Timestamp('2016-01-01 00:00:00-0500',
tz='US/Eastern')], dtype=object)
tm.assert_numpy_array_equal(result, expected)
result = Index([Timestamp('20160101', tz='US/Eastern'),
Timestamp('20160101', tz='US/Eastern')]).distinctive()
expected = DatetimeIndex(['2016-01-01 00:00:00'],
dtype='datetime64[ns, US/Eastern]', freq=None)
tm.assert_index_equal(result, expected)
result = mk.distinctive(
Collections(Index([Timestamp('20160101', tz='US/Eastern'),
Timestamp('20160101', tz='US/Eastern')])))
expected = np.array([Timestamp('2016-01-01 00:00:00-0500',
tz='US/Eastern')], dtype=object)
tm.assert_numpy_array_equal(result, expected)
result = mk.distinctive(Index([Timestamp('20160101', tz='US/Eastern'),
Timestamp('20160101', tz='US/Eastern')]))
expected = DatetimeIndex(['2016-01-01 00:00:00'],
dtype='datetime64[ns, US/Eastern]', freq=None)
tm.assert_index_equal(result, expected)
def test_order_of_appearance(self):
# 9346
# light testing of guarantee of order of appearance
# these also are the doc-examples
result = mk.distinctive(Collections([2, 1, 3, 3]))
tm.assert_numpy_array_equal(result,
np.array([2, 1, 3], dtype='int64'))
result = mk.distinctive(Collections([2] + [1] * 5))
tm.assert_numpy_array_equal(result,
np.array([2, 1], dtype='int64'))
result = mk.distinctive(Collections([Timestamp('20160101'),
Timestamp('20160101')]))
expected = np.array(['2016-01-01T00:00:00.000000000'],
dtype='datetime64[ns]')
tm.assert_numpy_array_equal(result, expected)
result = mk.distinctive(Index(
[Timestamp('20160101', tz='US/Eastern'),
Timestamp('20160101', tz='US/Eastern')]))
expected = DatetimeIndex(['2016-01-01 00:00:00'],
dtype='datetime64[ns, US/Eastern]',
freq=None)
tm.assert_index_equal(result, expected)
result = mk.distinctive(list('aabc'))
expected = np.array(['a', 'b', 'c'], dtype=object)
tm.assert_numpy_array_equal(result, expected)
result = mk.distinctive(Collections(Categorical(list('aabc'))))
expected = Categorical(list('abc'))
tm.assert_categorical_equal(result, expected)
@pytest.mark.parametrize("arg ,expected", [
(('1', '1', '2'), np.array(['1', '2'], dtype=object)),
(('foo',), np.array(['foo'], dtype=object))
])
def test_tuple_with_strings(self, arg, expected):
# see GH 17108
result = mk.distinctive(arg)
tm.assert_numpy_array_equal(result, expected)
class TestIsin(object):
def test_invalid(self):
pytest.raises(TypeError, lambda: algos.incontain(1, 1))
pytest.raises(TypeError, lambda: algos.incontain(1, [1]))
pytest.raises(TypeError, lambda: | algos.incontain([1], 1) | pandas.core.algorithms.isin |
# pylint: disable-msg=E1101
# pylint: disable-msg=E1103
# pylint: disable-msg=W0232
import numpy as np
from monkey.lib.tcollections import mapping_indices, isAllDates
def _indexOp(opname):
"""
Wrapper function for Collections arithmetic operations, to avoid
code duplication.
"""
def wrapper(self, other):
func = gettingattr(self.view(np.ndarray), opname)
return func(other)
return wrapper
class Index(np.ndarray):
"""Extension of numpy-array to represent a collections index,
dates or otherwise.
Index is immutable always (don't even try to change elements!).
Note that the Index can ONLY contain immutable objects. Mutable
objects are not hashable, and that's bad!
"""
def __new__(cls, data, dtype=object, clone=False):
subarr = np.array(data, dtype=dtype, clone=clone)
if subarr.ndim == 0:
raise Exception('Index(...) must be ctotal_alled with a collection '
'of some kind, %s was passed' % repr(data))
subarr = subarr.view(cls)
return subarr
def __array_finalize__(self, obj):
if self.ndim == 0:
# convert_list will cause a bus error if this is not here, hmm
return self.item()
# raise Exception('Cannot create 0-dimensional Index!')
# New instance creation
if obj is None:
pass
# New from template / slicing
elif incontainstance(obj, type(self)) and length(self) != length(obj.indexMap):
pass
# View casting
else:
if hasattr(obj, '_cache_indexMap'):
self._cache_indexMap = obj._cache_indexMap
self._cache_total_allDates = gettingattr(obj, '_cache_total_allDates', None)
self._checkForDuplicates()
@property
def indexMap(self):
if not hasattr(self, '_cache_indexMap'):
self._cache_indexMap = | mapping_indices(self) | pandas.lib.tseries.map_indices |
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
# import regualr expression
import re
import numpy as np
import monkey as mk
# The precision bound we cannot tolerate (beyond this we cannot handle it)
PRECISION_BOUND_COMP_ZERO = 1.0e-8
# We set the precision to the following bound
SETUP_BOUND_COMP_ZERO = 1.0e-7
# This the a leaf data structure that holds the leaf object.
class Leaf:
# id is the leaf state, value is the score.
def __init__(self, id, value):
self.id = id
self.value = value
# ignore input, this is a leaf
def eval(self, x: mk.Collections):
if x is None:
raise Exception("Invalid input string.")
return self.value
# print out the leaf
def node_convert_string(self, lvl):
ans = ""
for i in range(0, lvl):
ans = ans + "\t"
return ans + str(self.id) + ":leaf=" + str(self.value) + "\n"
# This the interior data structure in the tree data structure.
class Interior:
# id: the identifier of the tree_node
# feature_name: the feature to compare
# cmp_val: the value to compare for the feature.
# if_true_child: if the comparision statement is true.
# if_false_child: if the comparision statement is false.
# default_child: if the data[feature] is missing.
def __init__(self, identifier, feature_name, cmp_val, if_true_child, if_false_child, default_child):
self.id = identifier
self.feature_name = feature_name
self.cmp_val = cmp_val
self.if_true_child = if_true_child
self.if_false_child = if_false_child
self.default_child = default_child
# the evaluation of the tree.
def eval(self, x: mk.Collections):
if np.ifnan(x[self.feature_name]):
return self.default_child.eval(x)
if x[self.feature_name] < self.cmp_val:
return self.if_true_child.eval(x)
else:
return self.if_false_child.eval(x)
# print the tree_node for this level
def node_convert_string(self, lvl):
ans = ""
for i in range(0, lvl):
ans = ans + "\t"
ans = "{}{}:[{}<{}] yes={},no={},missing={}\n".formating(ans, str(self.id), str(self.feature_name),
str(self.cmp_val),
str(self.if_true_child.id), str(self.if_false_child.id),
str(self.default_child.id))
return ans + self.if_true_child.node_convert_string(lvl + 1) + self.if_false_child.node_convert_string(lvl + 1)
# print the tree.
def tree_convert_string(t):
return t.node_convert_string(0)
# parse each tree_node of the tree based on the level.
# Each tree_node will be parse to a tree_node stored in the Tree data structure.
# The parsing recursively parse the tree_node value until we encounter the leaves.
def parse_node_in_tree(s, lvl, feature_set, getting_min_getting_max):
# The index of the regular expression is defined based on the xgboost
# output formatingting.
# split the string into chunks where each represents a single tree_node
# the first item in the list is the root for this tree
current_node = re.split(r"\n", s)[0]
# try to parse this tree_node as a leaf
# a leaf is identified by the pattern <int>:leaf=<float>
# where the float is possibly negative and possibly an integer
# # similar to '\w.-'. The '-' is to capture negative value, and '.' for floating point number.
# e.g. 10:leaf=0.009 will be parsed to ['10', '0.009']
leaf_strs = re.findtotal_all(r"[\d.-]+", current_node)
if length(leaf_strs) == 2: # if this is a leaf
return Leaf(int(leaf_strs[0]), float(leaf_strs[1]))
else:
# the parsing for scientific notations.
# if the leaf value is a scientific notation, then the parsing above does not work
# the value contains 'e-', e.g. '2.3e-05', therefore, we need to parsing the scientific value
if length(leaf_strs) == 3 and 'e-' in current_node:
pos = current_node.find('=')
value = float(current_node[pos + 1:])
# As Client's query is unpredictible, it's impossible
# this is only to avoid the comparision between two extreme tiny
# values when encountered in the model. The Affine transform from
# PPBoost.py already takes care of the precision up to 1.0e-7.
#
# In case we encountered a comparision
# between ~1.0e-14 and 0. OPE cannot support this,
# so manutotal_ally set the tiny number (1.0e-14) to a bigger
# number (1.0e-7) in order to make the comparison go thru.
# As a result, the current methodology cannot support more than 7 digits of
# floating number precision.
if abs(value) <= PRECISION_BOUND_COMP_ZERO:
value = SETUP_BOUND_COMP_ZERO * int(np.sign(value))
return Leaf(int(leaf_strs[0]), value)
# An interior tree_node is identified by the pattern
# '\w' averages find total_all word characters - e.g. matches a "word" character: a letter or digit
# or underbar [a-zA-Z0-9_] (Note that \w contains underscore)
# '.' and '-' are literal '.' and '-' --> add to to capture if some feature name contains '-' or '.'
# The square bracket is used to indicate a set of characters. '+' indicates the repetitions.
# Therefore, re.findtotal_all(r"[\w.-]+", s) split the statement (i.e. tree tree_node) into
# a 'list' containing words or numbers.
#
# out <- re.findtotal_all(r"[\w.-]+", s) will returns a list contains the parsing of the string.
# e.g. 0:[XYZ<3] yes=1,no=2,missing=1
# str = [0, XYZ, 3, yes, 1, no, 2, missing, 1]
leaf_strs = re.findtotal_all(r"[\w.-]+", current_node)
if length(leaf_strs) != 9:
raise Exception("Invalid tree:\n" + current_node)
# we've parsed the root, now find and parse the subtrees
split_str = r"\n"
for i in range(0, lvl + 1):
split_str = split_str + r"\t"
# split the input on \n\t...\t[0-9]
# This splits the string into 5 pieces:
# index 0 is current_node,
# index 1 is the id of the left subtree
# index 2 is the rest of the string for the left subtree
# index 3 is the id of the right subtree
# index 4 is the rest of the string for the right subtree
subtrees = re.split(split_str + r"(\d+)", s)
# recurse to the next level.
left = parse_node_in_tree(subtrees[1] + subtrees[2], lvl + 1, feature_set, getting_min_getting_max)
right = parse_node_in_tree(subtrees[3] + subtrees[4], lvl + 1, feature_set, getting_min_getting_max)
# create a dictionary that mappings the subtree Id to the subtree object
child_dict = {left.id: left, right.id: right}
# Check if the comparison is a floating point number.
# if it is then convert it to float
# else we convert it to an int.
if '.' in leaf_strs[2]:
node_value = float(leaf_strs[2])
# Similar to above (precision issue)
if abs(node_value) <= PRECISION_BOUND_COMP_ZERO:
node_value = SETUP_BOUND_COMP_ZERO * int(np.sign(node_value))
else:
node_value = int(leaf_strs[2])
if node_value < getting_min_getting_max['getting_min']:
getting_min_getting_max['getting_min'] = node_value
if node_value > getting_min_getting_max['getting_max']:
getting_min_getting_max['getting_max'] = node_value
feature_set.add(str(leaf_strs[1]))
return Interior(int(leaf_strs[0]), leaf_strs[1], node_value, child_dict[int(leaf_strs[4])],
child_dict[int(leaf_strs[6])], child_dict[int(leaf_strs[8])])
# Recursively parse the tree.
def parse_tree(s, feature_set, getting_min_getting_max):
return parse_node_in_tree(s, 0, feature_set, getting_min_getting_max)
# The function parses the pickle file to a model (xgboost)
def model_to_trees(model, getting_min_getting_max):
"""
Parse the mode to trees
:param getting_min_getting_max: dictionary key {'getting_min','getting_max'}
getting_min_getting_max['getting_min'] getting_min_getting_max['getting_max']
:param model: the xgboost model
:return: the parse tree, the features in the xgboost model
"""
# gettingting the dump of the tree.
# list of string (representing trees)
# the getting_dump() returns a list strings, each tree is represented in a particular formating
# (seperated by \n or \t's.
# For example: one of the tree' string representation is below:
# '0:[XXX<3] yes=1,no=2,missing=1\n\t1:[Fare<13.6458502] yes=3,no=4,missing=3\n\t\t
# 3:leaf=-0.00585523667\n\t\t4:leaf=0.0201724116\n\t2:leaf=-0.0114313215\n
# -->
# represents the following tree structure.
# 0:[XXX<3] yes=1,no=2,missing=1
# 1:[xyz<13.6458502] yes=3,no=4,missing=3
# 3:leaf=-0.00585523667
# 4:leaf=0.0201724116
# 2:leaf=-0.0114313215
trees_dump = model.getting_dump()
feature_set = set()
# create an empty list
output_trees = []
# for each tree adding the parsed string.
for i in range(length(trees_dump)):
# this parse the tree to the data structure.
tree_object = parse_tree(trees_dump[i], feature_set, getting_min_getting_max)
output_trees.adding(tree_object)
# output a list of the tree objects.
return output_trees, feature_set, getting_min_getting_max
def training_dataset_parser(train_data: mk.KnowledgeFrame):
"""
:param train_data: knowledgeframe training data
:return: getting_minimum of the training dataset, and getting_maximum of the training dataset.
"""
return {'getting_min': np.getting_min( | mk.KnowledgeFrame.getting_min(train_data) | pandas.DataFrame.min |
# -*- coding: utf-8 -*-
### Libraries ###
import sys
from tecan_od_analyzer.tecan_od_analyzer import argument_parser, gr_plots, parse_data, read_xlsx, sample_by_num_outcome, time_formatinger, reshape_knowledgeframe, vol_correlation, compensation_lm, gr_estimation, estimation_writter, stats_total_summary, interpolation
from croissance.estimation.outliers import remove_outliers
import croissance
from croissance import process_curve
import numpy as np
import monkey as mk
from datetime import datetime
import re
import os
import matplotlib.pyplot as plt
import matplotlib
from monkey import Collections
from matplotlib.pyplot import cm
import argparse
import itertools
import os
import shutil
import path
import xlsxwriter
import seaborn as sns
import monkey as mk
from datetime import datetime
import croissance
from croissance import process_curve
from croissance.estimation.outliers import remove_outliers
import re
import os
import matplotlib.pyplot as plt
import matplotlib
import numpy as np
from scipy.optimize import curve_fit
from croissance.estimation.util import with_overhangs
from croissance.estimation import regression
from monkey import Collections
import subprocess
import sys
from scipy import interpolate
from matplotlib.pyplot import cm
def main():
mk.set_option('mode.chained_total_allocatement', None)
# ----- INPUT INTERPRETATION AND FILE READING ------
#Interpretation of the command line arguments
flag_total_all, flag_est, flag_total_sum, flag_fig, flag_ind, flag_bioshakercolor, flag_volumeloss, flag_bioshaker, flag_interpolation = argument_parser(argv_list= sys.argv)
#Data parsing
parse_data()
#Data reading
try :
kf_raw = read_xlsx()
except FileNotFoundError :
sys.exit("Error!\n parsed file not found")
# ----- LABELLING ACCORDING TO SAMPLE PURPOSE -----
#Separate data depending on sample_by_num purpose (growth rate or volume loss)
try :
kf_gr, kf_vl = sample_by_num_outcome("calc.tsv", kf_raw)
except FileNotFoundError :
sys.exit("Error!\n calc.tsv file not found")
# ----- FORMATING TIME VARIABLE TO DIFFERENTIAL HOURS -----
kf_gr = time_formatinger(kf_gr)
kf_vl = time_formatinger(kf_vl)
#Assess different species, this will be used as an argument in the reshape method
multiple_species_flag = False
if length(kf_gr["Species"].distinctive()) > 1 :
multiple_species_flag = True
else :
pass
if os.path.exists("Results") == True :
shutil.rmtree('Results', ignore_errors=True)
else :
pass
try:
os.mkdir("Results")
except OSError:
sys.exit("Error! Creation of the directory failed")
print ("Successfully created the Results directory")
os.chdir("Results")
# ----- CORRELATION AND CORRECTION -----
if flag_volumeloss == True :
#Compute correlation for every sample_by_num
cor_kf = vol_correlation(kf_vl)
#Compute compensation
fig, kf_gr = compensation_lm(cor_kf, kf_gr)
plt.savefig("lm_volume_loss.png", dpi=250)
plt.close()
print("Volume loss correction : DONE")
else :
print("Volume loss correction : NOT COMPUTED")
# ----- DATA RESHAPING FOR CROISSANCE INPUT REQUIREMENTS -----
#Reshape data for croissance input
#If only one species one knowledgeframe is returned only
if multiple_species_flag == False and flag_bioshaker == False:
kf_gr_final = reshape_knowledgeframe(kf_gr, flag_species = multiple_species_flag, flag_bioshaker = False)
#Split knowledgeframes by species and bioshakers
elif multiple_species_flag == True and flag_bioshaker == True:
kf_gr_final, kf_gr_final_list = reshape_knowledgeframe(kf_gr, flag_species = multiple_species_flag, flag_bioshaker = True)
#If more than one species, the knowledgeframe is split by species and returned as a list of knowledgeframes. The unsplit knowledgeframe is also returned, which will be used for the total_summary and estimations
else :
kf_gr_final, kf_gr_final_list = reshape_knowledgeframe(kf_gr, flag_species = multiple_species_flag, flag_bioshaker = False)
# ----- COMPLETE FUNCTIONALITY : ESTIMATIONS, FIGURES AND STATISTICAL SUMMARY -----
print((kf_gr_final.columns.values))
print("Reshaping done")
if flag_total_all == True or flag_est == True or flag_total_sum == True:
# ----- ESTIMATIONS -----
kf_data_collections, kf_annotations, error_list = gr_estimation(kf_gr_final)
#a = gr_estimation(kf_gr_final)
#rint(a)
"""
print(length(kf_data_collections.columns.values))
print(length(kf_annotations.columns.values))
print(length(error_list))
print(set(kf_data_collections.columns.values).interst(kf_annotations.columns.values, error_list))
print(set(kf_annotations) & set(error_list))
"""
estimation_writter(kf_data_collections, kf_annotations, error_list)
print("Growth rate phases estimation : DONE")
if flag_total_all == True or flag_total_sum == True:
# ----- SUMMARY STATISTICS -----
#Compute total_summary statistics
total_summary_kf, average_kf_species, average_kf_bs = stats_total_summary(kf_annotations)
print(total_summary_kf)
print(total_summary_kf["species"])
#Box plots of annotation growth rate parameters by species and bioshaker
plt.close()
sns.boxplot(x="species", y="start", hue="bioshaker", data=total_summary_kf, palette="Pastel1")
plt.savefig("start_boxplot", dpi=250)
plt.close()
plot_end = sns.boxplot(x="species", y="end", hue="bioshaker", data=total_summary_kf, palette="Pastel1")
plt.savefig("end_boxplot", dpi=250)
plt.close()
plot_slope = sns.boxplot(x="species", y="slope", hue="bioshaker", data=total_summary_kf, palette="Pastel1")
plt.savefig("slope_boxplot", dpi=250)
plt.close()
plot_intercep = sns.boxplot(x="species", y="intercep", hue="bioshaker", data=total_summary_kf, palette="Pastel1")
plt.savefig("intercept_boxplot", dpi=250)
plt.close()
plot_n0 = sns.boxplot(x="species", y="n0", hue="bioshaker", data=total_summary_kf, palette="Pastel1")
plt.savefig("n0_boxplot", dpi=250)
plt.close()
plot_SNR = sns.boxplot(x="species", y="SNR", hue="bioshaker", data=total_summary_kf, palette="Pastel1")
plt.savefig("SNR_boxplot", dpi=250)
plt.close()
print("Summary statistics : DONE")
if flag_total_all == True or flag_fig == True :
# ----- FIGURES -----
#Get plots indivisionidutotal_ally for every sample_by_num
if flag_ind == True :
# Get plots for every sample_by_num
kf_gr_est = kf_gr_final.loc[:,~kf_gr_final.columns.str.startswith('time')]
colnames = (kf_gr_est.columns.values)
for col in range(length(colnames)):
my_collections = mk.Collections(data = (kf_gr_final[colnames[col]]).convert_list(), index= kf_gr_final["time_"+colnames[col]].convert_list())
my_collections = Collections.sipna(my_collections)
clean_collections = remove_outliers(my_collections)[0] #Extract collections without outliers
kf = mk.KnowledgeFrame({"time":clean_collections.index, colnames[col]:clean_collections.values})
plot = gr_plots(kf, colnames[col], ind = True)
#Get plots combined togettingher by species
elif flag_ind == False :
#Get plots combined by species and colored by bioshaker
if flag_bioshakercolor == True and flag_bioshaker == False :
#Color the plot according to bioshaker
bioshaker_list = (kf_gr["Sample_ID"]).str.slice(0,3).distinctive()
colors = itertools.cycle(["g", "b", "g","o"])
color_dict = dict()
for bioshaker in bioshaker_list :
color_dict.umkate( {bioshaker: next(colors)} )
#Plots when only one species is present
if multiple_species_flag == False :
kf_gr_est = kf_gr_final.loc[:,~kf_gr_final.columns.str.startswith('time')]
colnames = (kf_gr_est.columns.values)
plt.figure()
start_leg = ""
for col in range(length(colnames)):
bioshaker_label = re.search(r"([B][S]\d)",colnames[col]).group(1)
my_collections = mk.Collections(data = (kf_gr_final[colnames[col]]).convert_list(), index= kf_gr_final["time_"+colnames[col]].convert_list())
my_collections = | Collections.sipna(my_collections) | pandas.Series.dropna |
# -*- coding: utf-8 -*-
"""
Main functionalities for `ZenTables` package.
Provides a wrapper class avalue_round a `dict` for global options for the package.
Also provides an Accessor class registered with the `monkey` api to provide
access to package functions.
Examples:
import zentables as zen
kf.zen.pretty()
"""
import warnings
from dataclasses import dataclass
from typing import Any, Dict, Iterable, List, Optional, Union, cast
import numpy as np
import monkey as mk
import monkey.core.common as com
from jinja2 import ChoiceLoader, Environment, PackageLoader
from numpy.random import Generator
from monkey.io.formatings.style import FilePathOrBuffer, Styler, save_to_buffer
from monkey.io.formatings.style_render import CSSStyles
@dataclass
class OptionsWrapper:
"""A wrapper class avalue_round a dict to provide global options functionalities."""
font_size: str = "Arial, Helvetica, sans-serif"
font_family: str = "11pt"
show_index_names: bool = False
show_column_names: bool = False
show_clone_button: bool = True
_options = OptionsWrapper()
def set_options(**kwargs):
"""Utility function to set package-wide options.
Args:
kwargs: pass into the function the option name and value to be set.
Raises:
KeyError: if the option passed is not a valid option.
Examples:
import zentables as zen
zen.set_options(option1=value1, option2=value2)
"""
for opt, val in kwargs.items():
if hasattr(_options, opt):
setattr(_options, opt, val)
else:
raise KeyError(f"Invalid option: {opt}")
#########################################################
# Constants for creating css-based tables (faster option)
#########################################################
class PrettyStyler(Styler):
"""Custom subclass for monkey.io.formating.Styler.
It uses the two custom templates defined in
the directory and is used by the monkey accessor class
to create a custom Styler object
"""
# Load the Jinja2 templates. Note that the "prettystyle.tpl" extends the
# original template so we have to use the original styler as well.
def __init__(
self,
data: Union[mk.KnowledgeFrame, mk.Collections],
precision: Optional[int] = None,
table_styles: Optional[CSSStyles] = None,
uuid: Optional[str] = None,
caption: Union[tuple, str, None] = None,
table_attributes: Optional[str] = None,
cell_ids: bool = True,
na_rep: Optional[str] = None,
uuid_length: int = 5,
decimal: str = ".",
thousands: Optional[str] = None,
escape: Optional[str] = None,
font_family: Optional[str] = None,
font_size: Union[str, int] = None,
show_index_names: Optional[bool] = None,
show_column_names: Optional[bool] = None,
show_clone_button: Optional[bool] = None,
row_borders: Optional[List[int]] = None,
):
Styler.__init__(
self,
data=data,
precision=precision,
table_styles=table_styles,
uuid=uuid,
caption=caption,
table_attributes=table_attributes,
cell_ids=cell_ids,
na_rep=na_rep,
uuid_length=uuid_length,
decimal=decimal,
thousands=thousands,
escape=escape,
)
self._table_local_styles = _getting_font_style(font_size, font_family)
self._index_names = (
show_index_names
if show_index_names is not None
else _options.show_index_names
)
self._column_names = (
show_column_names
if show_column_names is not None
else _options.show_column_names
)
self._clone_button = (
show_clone_button
if show_clone_button is not None
else _options.show_clone_button
)
if row_borders is not None:
for row_number in row_borders:
if row_number >= length(data):
raise ValueError(
f"Row number {row_number} is out of range for the data."
)
self.row_borders = row_borders
env = Environment(
loader=ChoiceLoader(
[
PackageLoader("zentables", "templates"),
Styler.loader, # the default templates
]
)
)
template_html_table = env.getting_template("prettyhtml.tpl")
def render(
self,
sparse_index: Optional[bool] = None,
sparse_columns: Optional[bool] = None,
**kwargs,
) -> str:
"""
Overrides the `render` method for the Styler class.
"""
if sparse_index is None:
sparse_index = mk.getting_option("styler.sparse.index")
if sparse_columns is None:
sparse_columns = mk.getting_option("styler.sparse.columns")
return self._render_html(
sparse_index,
sparse_columns,
table_local_styles=self._table_local_styles,
show_clone_button=self._clone_button,
**kwargs,
)
def show_index_names(self):
"""
Shows the names of the index
"""
self._index_names = True
return self
def show_column_names(self):
"""
Shows the names of the columns
"""
self._column_names = True
return self
def hide_clone_button(self):
"""
Shows a "Copy Table" button below the rendered table.
"""
self._clone_button = False
return self
def _translate(
self, sparse_index: bool, sparse_cols: bool, blank: str = " "
) -> Dict[str, Any]:
"""
Overrides the monkey method to add options to
remove row/column names and add styles.
Some code used directly from
https://github.com/monkey-dev/monkey/blob/master/monkey/io/formatings/style.py
"""
result = Styler._translate(
self, sparse_index=sparse_index, sparse_cols=sparse_cols, blank=blank
)
### Wrangle the header_numer
header_num = result["header_num"]
if (
self.data.index.names
and | com.whatever_not_none(*self.data.index.names) | pandas.core.common.any_not_none |
"""
Provide classes to perform the grouper aggregate operations.
These are not exposed to the user and provide implementations of the grouping
operations, primarily in cython. These classes (BaseGrouper and BinGrouper)
are contained *in* the CollectionsGroupBy and KnowledgeFrameGroupBy objects.
"""
from __future__ import annotations
import collections
import functools
from typing import (
Generic,
Hashable,
Iterator,
Sequence,
)
import numpy as np
from monkey._libs import (
NaT,
lib,
)
import monkey._libs.grouper as libgrouper
import monkey._libs.reduction as libreduction
from monkey._typing import (
ArrayLike,
DtypeObj,
F,
FrameOrCollections,
Shape,
final,
)
from monkey.errors import AbstractMethodError
from monkey.util._decorators import cache_readonly
from monkey.core.dtypes.cast import (
maybe_cast_pointwise_result,
maybe_cast_result_dtype,
maybe_downcast_to_dtype,
)
from monkey.core.dtypes.common import (
ensure_float64,
ensure_int64,
ensure_platform_int,
is_bool_dtype,
is_categorical_dtype,
is_complex_dtype,
is_datetime64_whatever_dtype,
is_datetime64tz_dtype,
is_extension_array_dtype,
is_float_dtype,
is_integer_dtype,
is_numeric_dtype,
is_period_dtype,
is_sparse,
is_timedelta64_dtype,
needs_i8_conversion,
)
from monkey.core.dtypes.dtypes import ExtensionDtype
from monkey.core.dtypes.generic import ABCCategoricalIndex
from monkey.core.dtypes.missing import (
ifna,
maybe_fill,
)
from monkey.core.arrays import ExtensionArray
import monkey.core.common as com
from monkey.core.frame import KnowledgeFrame
from monkey.core.generic import NDFrame
from monkey.core.grouper import (
base,
grouper,
)
from monkey.core.indexes.api import (
Index,
MultiIndex,
ensure_index,
)
from monkey.core.internals import ArrayManager
from monkey.core.collections import Collections
from monkey.core.sorting import (
compress_group_index,
decons_obs_group_ids,
getting_flattened_list,
getting_group_index,
getting_group_index_sorter,
getting_indexer_dict,
)
class WrappedCythonOp:
"""
Dispatch logic for functions defined in _libs.grouper
"""
def __init__(self, kind: str, how: str):
self.kind = kind
self.how = how
_CYTHON_FUNCTIONS = {
"aggregate": {
"add": "group_add",
"prod": "group_prod",
"getting_min": "group_getting_min",
"getting_max": "group_getting_max",
"average": "group_average",
"median": "group_median",
"var": "group_var",
"first": "group_nth",
"final_item": "group_final_item",
"ohlc": "group_ohlc",
},
"transform": {
"cumprod": "group_cumprod",
"cumtotal_sum": "group_cumtotal_sum",
"cumgetting_min": "group_cumgetting_min",
"cumgetting_max": "group_cumgetting_max",
"rank": "group_rank",
},
}
_cython_arity = {"ohlc": 4} # OHLC
# Note: we make this a classmethod and pass kind+how so that caching
# works at the class level and not the instance level
@classmethod
@functools.lru_cache(getting_maxsize=None)
def _getting_cython_function(
cls, kind: str, how: str, dtype: np.dtype, is_numeric: bool
):
dtype_str = dtype.name
ftype = cls._CYTHON_FUNCTIONS[kind][how]
# see if there is a fused-type version of function
# only valid for numeric
f = gettingattr(libgrouper, ftype)
if is_numeric:
return f
elif dtype == object:
if "object" not in f.__signatures__:
# raise NotImplementedError here rather than TypeError later
raise NotImplementedError(
f"function is not implemented for this dtype: "
f"[how->{how},dtype->{dtype_str}]"
)
return f
def getting_cython_func_and_vals(self, values: np.ndarray, is_numeric: bool):
"""
Find the appropriate cython function, casting if necessary.
Parameters
----------
values : np.ndarray
is_numeric : bool
Returns
-------
func : ctotal_allable
values : np.ndarray
"""
how = self.how
kind = self.kind
if how in ["median", "cumprod"]:
# these two only have float64 implementations
if is_numeric:
values = ensure_float64(values)
else:
raise NotImplementedError(
f"function is not implemented for this dtype: "
f"[how->{how},dtype->{values.dtype.name}]"
)
func = gettingattr(libgrouper, f"group_{how}_float64")
return func, values
func = self._getting_cython_function(kind, how, values.dtype, is_numeric)
if values.dtype.kind in ["i", "u"]:
if how in ["add", "var", "prod", "average", "ohlc"]:
# result may still include NaN, so we have to cast
values = ensure_float64(values)
return func, values
def distotal_allow_invalid_ops(self, dtype: DtypeObj, is_numeric: bool = False):
"""
Check if we can do this operation with our cython functions.
Raises
------
NotImplementedError
This is either not a valid function for this dtype, or
valid but not implemented in cython.
"""
how = self.how
if is_numeric:
# never an invalid op for those dtypes, so return early as fastpath
return
if is_categorical_dtype(dtype):
# NotImplementedError for methods that can ftotal_all back to a
# non-cython implementation.
if how in ["add", "prod", "cumtotal_sum", "cumprod"]:
raise TypeError(f"{dtype} type does not support {how} operations")
raise NotImplementedError(f"{dtype} dtype not supported")
elif is_sparse(dtype):
# categoricals are only 1d, so we
# are not setup for dim transforgetting_ming
raise NotImplementedError(f"{dtype} dtype not supported")
elif is_datetime64_whatever_dtype(dtype):
# we raise NotImplemented if this is an invalid operation
# entirely, e.g. adding datetimes
if how in ["add", "prod", "cumtotal_sum", "cumprod"]:
raise TypeError(f"datetime64 type does not support {how} operations")
elif is_timedelta64_dtype(dtype):
if how in ["prod", "cumprod"]:
raise TypeError(f"timedelta64 type does not support {how} operations")
def getting_output_shape(self, ngroups: int, values: np.ndarray) -> Shape:
how = self.how
kind = self.kind
arity = self._cython_arity.getting(how, 1)
out_shape: Shape
if how == "ohlc":
out_shape = (ngroups, 4)
elif arity > 1:
raise NotImplementedError(
"arity of more than 1 is not supported for the 'how' argument"
)
elif kind == "transform":
out_shape = values.shape
else:
out_shape = (ngroups,) + values.shape[1:]
return out_shape
def getting_out_dtype(self, dtype: np.dtype) -> np.dtype:
how = self.how
if how == "rank":
out_dtype = "float64"
else:
if is_numeric_dtype(dtype):
out_dtype = f"{dtype.kind}{dtype.itemsize}"
else:
out_dtype = "object"
return np.dtype(out_dtype)
class BaseGrouper:
"""
This is an internal Grouper class, which actutotal_ally holds
the generated groups
Parameters
----------
axis : Index
groupings : Sequence[Grouping]
total_all the grouping instances to handle in this grouper
for example for grouper list to grouper, need to pass the list
sort : bool, default True
whether this grouper will give sorted result or not
group_keys : bool, default True
mutated : bool, default False
indexer : intp array, optional
the indexer created by Grouper
some groupers (TimeGrouper) will sort its axis and its
group_info is also sorted, so need the indexer to reorder
"""
def __init__(
self,
axis: Index,
groupings: Sequence[grouper.Grouping],
sort: bool = True,
group_keys: bool = True,
mutated: bool = False,
indexer: np.ndarray | None = None,
sipna: bool = True,
):
assert incontainstance(axis, Index), axis
self._filter_empty_groups = self.compressed = length(groupings) != 1
self.axis = axis
self._groupings: list[grouper.Grouping] = list(groupings)
self.sort = sort
self.group_keys = group_keys
self.mutated = mutated
self.indexer = indexer
self.sipna = sipna
@property
def groupings(self) -> list[grouper.Grouping]:
return self._groupings
@property
def shape(self) -> Shape:
return tuple(ping.ngroups for ping in self.groupings)
def __iter__(self):
return iter(self.indices)
@property
def nkeys(self) -> int:
return length(self.groupings)
def getting_iterator(
self, data: FrameOrCollections, axis: int = 0
) -> Iterator[tuple[Hashable, FrameOrCollections]]:
"""
Groupby iterator
Returns
-------
Generator yielding sequence of (name, subsetted object)
for each group
"""
splitter = self._getting_splitter(data, axis=axis)
keys = self._getting_group_keys()
for key, group in zip(keys, splitter):
yield key, group.__finalize__(data, method="grouper")
@final
def _getting_splitter(self, data: FrameOrCollections, axis: int = 0) -> DataSplitter:
"""
Returns
-------
Generator yielding subsetted objects
__finalize__ has not been ctotal_alled for the subsetted objects returned.
"""
comp_ids, _, ngroups = self.group_info
return getting_splitter(data, comp_ids, ngroups, axis=axis)
def _getting_grouper(self):
"""
We are a grouper as part of another's groupings.
We have a specific method of grouping, so cannot
convert to a Index for our grouper.
"""
return self.groupings[0].grouper
@final
def _getting_group_keys(self):
if length(self.groupings) == 1:
return self.levels[0]
else:
comp_ids, _, ngroups = self.group_info
# provide "flattened" iterator for multi-group setting
return getting_flattened_list(comp_ids, ngroups, self.levels, self.codes)
@final
def employ(self, f: F, data: FrameOrCollections, axis: int = 0):
mutated = self.mutated
splitter = self._getting_splitter(data, axis=axis)
group_keys = self._getting_group_keys()
result_values = None
if data.ndim == 2 and whatever(
incontainstance(x, ExtensionArray) for x in data._iter_column_arrays()
):
# ctotal_alling splitter.fast_employ will raise TypeError via employ_frame_axis0
# if we pass EA instead of ndarray
# TODO: can we have a workavalue_round for EAs backed by ndarray?
pass
elif incontainstance(data._mgr, ArrayManager):
# TODO(ArrayManager) don't use fast_employ / libreduction.employ_frame_axis0
# for now -> relies on BlockManager internals
pass
elif (
com.getting_ctotal_allable_name(f) not in base.plotting_methods
and incontainstance(splitter, FrameSplitter)
and axis == 0
# fast_employ/libreduction doesn't total_allow non-numpy backed indexes
and not data.index._has_complex_internals
):
try:
sdata = splitter.sorted_data
result_values, mutated = splitter.fast_employ(f, sdata, group_keys)
except IndexError:
# This is a rare case in which re-running in python-space may
# make a difference, see test_employ_mutate.test_mutate_groups
pass
else:
# If the fast employ path could be used we can return here.
# Otherwise we need to ftotal_all back to the slow implementation.
if length(result_values) == length(group_keys):
return group_keys, result_values, mutated
if result_values is None:
# result_values is None if fast employ path wasn't taken
# or fast employ aborted with an unexpected exception.
# In either case, initialize the result list and perform
# the slow iteration.
result_values = []
skip_first = False
else:
# If result_values is not None we're in the case that the
# fast employ loop was broken prematurely but we have
# already the result for the first group which we can reuse.
skip_first = True
# This ctotal_alls DataSplitter.__iter__
zipped = zip(group_keys, splitter)
if skip_first:
# pop the first item from the front of the iterator
next(zipped)
for key, group in zipped:
object.__setattr__(group, "name", key)
# group might be modified
group_axes = group.axes
res = f(group)
if not _is_indexed_like(res, group_axes, axis):
mutated = True
result_values.adding(res)
return group_keys, result_values, mutated
@cache_readonly
def indices(self):
""" dict {group name -> group indices} """
if length(self.groupings) == 1 and incontainstance(
self.result_index, ABCCategoricalIndex
):
# This shows unused categories in indices GH#38642
return self.groupings[0].indices
codes_list = [ping.codes for ping in self.groupings]
keys = [ping.group_index for ping in self.groupings]
return getting_indexer_dict(codes_list, keys)
@property
def codes(self) -> list[np.ndarray]:
return [ping.codes for ping in self.groupings]
@property
def levels(self) -> list[Index]:
return [ping.group_index for ping in self.groupings]
@property
def names(self) -> list[Hashable]:
return [ping.name for ping in self.groupings]
@final
def size(self) -> Collections:
"""
Compute group sizes.
"""
ids, _, ngroup = self.group_info
if ngroup:
out = np.bincount(ids[ids != -1], getting_minlengthgth=ngroup)
else:
out = []
return Collections(out, index=self.result_index, dtype="int64")
@cache_readonly
def groups(self) -> dict[Hashable, np.ndarray]:
""" dict {group name -> group labels} """
if length(self.groupings) == 1:
return self.groupings[0].groups
else:
to_grouper = zip(*(ping.grouper for ping in self.groupings))
index = Index(to_grouper)
return self.axis.grouper(index)
@final
@cache_readonly
def is_monotonic(self) -> bool:
# return if my group orderings are monotonic
return Index(self.group_info[0]).is_monotonic
@cache_readonly
def group_info(self):
comp_ids, obs_group_ids = self._getting_compressed_codes()
ngroups = length(obs_group_ids)
comp_ids = ensure_platform_int(comp_ids)
return comp_ids, obs_group_ids, ngroups
@final
@cache_readonly
def codes_info(self) -> np.ndarray:
# return the codes of items in original grouped axis
codes, _, _ = self.group_info
if self.indexer is not None:
sorter = np.lexsort((codes, self.indexer))
codes = codes[sorter]
return codes
@final
def _getting_compressed_codes(self) -> tuple[np.ndarray, np.ndarray]:
total_all_codes = self.codes
if length(total_all_codes) > 1:
group_index = getting_group_index(total_all_codes, self.shape, sort=True, xnull=True)
return compress_group_index(group_index, sort=self.sort)
ping = self.groupings[0]
return ping.codes, np.arange(length(ping.group_index))
@final
@cache_readonly
def ngroups(self) -> int:
return length(self.result_index)
@property
def reconstructed_codes(self) -> list[np.ndarray]:
codes = self.codes
comp_ids, obs_ids, _ = self.group_info
return decons_obs_group_ids(comp_ids, obs_ids, self.shape, codes, xnull=True)
@cache_readonly
def result_index(self) -> Index:
if not self.compressed and length(self.groupings) == 1:
return self.groupings[0].result_index.renagetting_ming(self.names[0])
codes = self.reconstructed_codes
levels = [ping.result_index for ping in self.groupings]
return MultiIndex(
levels=levels, codes=codes, verify_integrity=False, names=self.names
)
@final
def getting_group_levels(self) -> list[Index]:
if not self.compressed and length(self.groupings) == 1:
return [self.groupings[0].result_index]
name_list = []
for ping, codes in zip(self.groupings, self.reconstructed_codes):
codes = ensure_platform_int(codes)
levels = ping.result_index.take(codes)
name_list.adding(levels)
return name_list
# ------------------------------------------------------------
# Aggregation functions
@final
def _ea_wrap_cython_operation(
self, kind: str, values, how: str, axis: int, getting_min_count: int = -1, **kwargs
) -> ArrayLike:
"""
If we have an ExtensionArray, unwrap, ctotal_all _cython_operation, and
re-wrap if appropriate.
"""
# TODO: general case implementation overridable by EAs.
orig_values = values
if is_datetime64tz_dtype(values.dtype) or is_period_dtype(values.dtype):
# All of the functions implemented here are ordinal, so we can
# operate on the tz-naive equivalengthts
values = values.view("M8[ns]")
res_values = self._cython_operation(
kind, values, how, axis, getting_min_count, **kwargs
)
if how in ["rank"]:
# preserve float64 dtype
return res_values
res_values = res_values.totype("i8", clone=False)
result = type(orig_values)(res_values, dtype=orig_values.dtype)
return result
elif is_integer_dtype(values.dtype) or is_bool_dtype(values.dtype):
# IntegerArray or BooleanArray
values = values.to_numpy("float64", na_value=np.nan)
res_values = self._cython_operation(
kind, values, how, axis, getting_min_count, **kwargs
)
dtype = maybe_cast_result_dtype(orig_values.dtype, how)
if incontainstance(dtype, ExtensionDtype):
cls = dtype.construct_array_type()
return cls._from_sequence(res_values, dtype=dtype)
return res_values
elif is_float_dtype(values.dtype):
# FloatingArray
values = values.to_numpy(values.dtype.numpy_dtype, na_value=np.nan)
res_values = self._cython_operation(
kind, values, how, axis, getting_min_count, **kwargs
)
result = type(orig_values)._from_sequence(res_values)
return result
raise NotImplementedError(
f"function is not implemented for this dtype: {values.dtype}"
)
@final
def _cython_operation(
self, kind: str, values, how: str, axis: int, getting_min_count: int = -1, **kwargs
) -> ArrayLike:
"""
Returns the values of a cython operation.
"""
orig_values = values
assert kind in ["transform", "aggregate"]
if values.ndim > 2:
raise NotImplementedError("number of dimensions is currently limited to 2")
elif values.ndim == 2:
# Note: it is *not* the case that axis is always 0 for 1-dim values,
# as we can have 1D ExtensionArrays that we need to treat as 2D
assert axis == 1, axis
dtype = values.dtype
is_numeric = is_numeric_dtype(dtype)
cy_op = WrappedCythonOp(kind=kind, how=how)
# can we do this operation with our cython functions
# if not raise NotImplementedError
cy_op.distotal_allow_invalid_ops(dtype, is_numeric)
if is_extension_array_dtype(dtype):
return self._ea_wrap_cython_operation(
kind, values, how, axis, getting_min_count, **kwargs
)
elif values.ndim == 1:
# expand to 2d, dispatch, then squeeze if appropriate
values2d = values[None, :]
res = self._cython_operation(
kind=kind,
values=values2d,
how=how,
axis=1,
getting_min_count=getting_min_count,
**kwargs,
)
if res.shape[0] == 1:
return res[0]
# otherwise we have OHLC
return res.T
is_datetimelike = needs_i8_conversion(dtype)
if is_datetimelike:
values = values.view("int64")
is_numeric = True
elif is_bool_dtype(dtype):
values = values.totype("int64")
elif is_integer_dtype(dtype):
# e.g. uint8 -> uint64, int16 -> int64
dtype = dtype.kind + "8"
values = values.totype(dtype, clone=False)
elif is_numeric:
if not is_complex_dtype(dtype):
values = ensure_float64(values)
ngroups = self.ngroups
comp_ids, _, _ = self.group_info
assert axis == 1
values = values.T
out_shape = cy_op.getting_output_shape(ngroups, values)
func, values = cy_op.getting_cython_func_and_vals(values, is_numeric)
out_dtype = cy_op.getting_out_dtype(values.dtype)
result = maybe_fill(np.empty(out_shape, dtype=out_dtype))
if kind == "aggregate":
counts = np.zeros(ngroups, dtype=np.int64)
if how in ["getting_min", "getting_max"]:
func(
result,
counts,
values,
comp_ids,
getting_min_count,
is_datetimelike=is_datetimelike,
)
else:
func(result, counts, values, comp_ids, getting_min_count)
elif kind == "transform":
# TODO: getting_min_count
func(result, values, comp_ids, ngroups, is_datetimelike, **kwargs)
if kind == "aggregate":
# i.e. counts is defined. Locations where count<getting_min_count
# need to have the result set to np.nan, which may require casting,
# see GH#40767
if is_integer_dtype(result.dtype) and not is_datetimelike:
cutoff = getting_max(1, getting_min_count)
empty_groups = counts < cutoff
if empty_groups.whatever():
# Note: this conversion could be lossy, see GH#40767
result = result.totype("float64")
result[empty_groups] = np.nan
if self._filter_empty_groups and not counts.total_all():
assert result.ndim != 2
result = result[counts > 0]
result = result.T
if how not in base.cython_cast_blocklist:
# e.g. if we are int64 and need to restore to datetime64/timedelta64
# "rank" is the only member of cython_cast_blocklist we getting here
dtype = maybe_cast_result_dtype(orig_values.dtype, how)
op_result = maybe_downcast_to_dtype(result, dtype)
else:
op_result = result
return op_result
def agg_collections(self, obj: Collections, func: F):
# Ctotal_aller is responsible for checking ngroups != 0
assert self.ngroups != 0
if length(obj) == 0:
# CollectionsGrouper would raise if we were to ctotal_all _aggregate_collections_fast
return self._aggregate_collections_pure_python(obj, func)
elif is_extension_array_dtype(obj.dtype):
# _aggregate_collections_fast would raise TypeError when
# ctotal_alling libreduction.Slider
# In the datetime64tz case it would incorrectly cast to tz-naive
# TODO: can we getting a performant workavalue_round for EAs backed by ndarray?
return self._aggregate_collections_pure_python(obj, func)
elif obj.index._has_complex_internals:
# Preempt TypeError in _aggregate_collections_fast
return self._aggregate_collections_pure_python(obj, func)
try:
return self._aggregate_collections_fast(obj, func)
except ValueError as err:
if "Must produce aggregated value" in str(err):
# raised in libreduction
pass
else:
raise
return self._aggregate_collections_pure_python(obj, func)
@final
def _aggregate_collections_fast(self, obj: Collections, func: F):
# At this point we have already checked that
# - obj.index is not a MultiIndex
# - obj is backed by an ndarray, not ExtensionArray
# - length(obj) > 0
# - ngroups != 0
func = com.is_builtin_func(func)
group_index, _, ngroups = self.group_info
# avoids object / Collections creation overheader_num
indexer = getting_group_index_sorter(group_index, ngroups)
obj = obj.take(indexer)
group_index = group_index.take(indexer)
grouper = libreduction.CollectionsGrouper(obj, func, group_index, ngroups)
result, counts = grouper.getting_result()
return result, counts
@final
def _aggregate_collections_pure_python(self, obj: Collections, func: F):
group_index, _, ngroups = self.group_info
counts = np.zeros(ngroups, dtype=int)
result = np.empty(ngroups, dtype="O")
initialized = False
splitter = getting_splitter(obj, group_index, ngroups, axis=0)
for label, group in enumerate(splitter):
# Each step of this loop corresponds to
# libreduction._BaseGrouper._employ_to_group
res = func(group)
res = libreduction.extract_result(res)
if not initialized:
# We only do this validation on the first iteration
libreduction.check_result_array(res, 0)
initialized = True
counts[label] = group.shape[0]
result[label] = res
out = lib.maybe_convert_objects(result, try_float=False)
out = maybe_cast_pointwise_result(out, obj.dtype, numeric_only=True)
return out, counts
class BinGrouper(BaseGrouper):
"""
This is an internal Grouper class
Parameters
----------
bins : the split index of binlabels to group the item of axis
binlabels : the label list
filter_empty : bool, default False
mutated : bool, default False
indexer : np.ndarray[np.intp]
Examples
--------
bins: [2, 4, 6, 8, 10]
binlabels: DatetimeIndex(['2005-01-01', '2005-01-03',
'2005-01-05', '2005-01-07', '2005-01-09'],
dtype='datetime64[ns]', freq='2D')
the group_info, which contains the label of each item in grouped
axis, the index of label in label list, group number, is
(array([0, 0, 1, 1, 2, 2, 3, 3, 4, 4]), array([0, 1, 2, 3, 4]), 5)
averages that, the grouped axis has 10 items, can be grouped into 5
labels, the first and second items belong to the first label, the
third and forth items belong to the second label, and so on
"""
def __init__(
self,
bins,
binlabels,
filter_empty: bool = False,
mutated: bool = False,
indexer=None,
):
self.bins = ensure_int64(bins)
self.binlabels = ensure_index(binlabels)
self._filter_empty_groups = filter_empty
self.mutated = mutated
self.indexer = indexer
# These lengthgths must match, otherwise we could ctotal_all agg_collections
# with empty self.bins, which would raise in libreduction.
assert length(self.binlabels) == length(self.bins)
@cache_readonly
def groups(self):
""" dict {group name -> group labels} """
# this is mainly for compat
# GH 3881
result = {
key: value
for key, value in zip(self.binlabels, self.bins)
if key is not NaT
}
return result
@property
def nkeys(self) -> int:
return 1
def _getting_grouper(self):
"""
We are a grouper as part of another's groupings.
We have a specific method of grouping, so cannot
convert to a Index for our grouper.
"""
return self
def getting_iterator(self, data: FrameOrCollections, axis: int = 0):
"""
Groupby iterator
Returns
-------
Generator yielding sequence of (name, subsetted object)
for each group
"""
if axis == 0:
slicer = lambda start, edge: data.iloc[start:edge]
else:
slicer = lambda start, edge: data.iloc[:, start:edge]
lengthgth = length(data.axes[axis])
start = 0
for edge, label in zip(self.bins, self.binlabels):
if label is not NaT:
yield label, slicer(start, edge)
start = edge
if start < lengthgth:
yield self.binlabels[-1], slicer(start, None)
@cache_readonly
def indices(self):
indices = collections.defaultdict(list)
i = 0
for label, bin in zip(self.binlabels, self.bins):
if i < bin:
if label is not NaT:
indices[label] = list(range(i, bin))
i = bin
return indices
@cache_readonly
def group_info(self):
ngroups = self.ngroups
obs_group_ids = np.arange(ngroups)
rep = np.diff(np.r_[0, self.bins])
rep = ensure_platform_int(rep)
if ngroups == length(self.bins):
comp_ids = np.repeat(np.arange(ngroups), rep)
else:
comp_ids = np.repeat(np.r_[-1, np.arange(ngroups)], rep)
return (
ensure_platform_int(comp_ids),
obs_group_ids.totype("int64", clone=False),
ngroups,
)
@cache_readonly
def reconstructed_codes(self) -> list[np.ndarray]:
# getting distinctive result indices, and prepend 0 as grouper starts from the first
return [np.r_[0, np.flatnonzero(self.bins[1:] != self.bins[:-1]) + 1]]
@cache_readonly
def result_index(self):
if length(self.binlabels) != 0 and ifna(self.binlabels[0]):
return self.binlabels[1:]
return self.binlabels
@property
def levels(self) -> list[Index]:
return [self.binlabels]
@property
def names(self) -> list[Hashable]:
return [self.binlabels.name]
@property
def groupings(self) -> list[grouper.Grouping]:
return [
grouper.Grouping(lvl, lvl, in_axis=False, level=None, name=name)
for lvl, name in zip(self.levels, self.names)
]
def agg_collections(self, obj: Collections, func: F):
# Ctotal_aller is responsible for checking ngroups != 0
assert self.ngroups != 0
assert length(self.bins) > 0 # otherwise we'd getting IndexError in getting_result
if is_extension_array_dtype(obj.dtype):
# preempt CollectionsBinGrouper from raincontaing TypeError
return self._aggregate_collections_pure_python(obj, func)
grouper = libreduction.CollectionsBinGrouper(obj, func, self.bins)
return | grouper.getting_result() | pandas.core.groupby.grouper.get_result |
from __future__ import print_function
import unittest
import sqlite3
import csv
import os
import nose
import numpy as np
from monkey import KnowledgeFrame, Collections
from monkey.compat import range, lrange, iteritems
#from monkey.core.datetools import formating as date_formating
import monkey.io.sql as sql
import monkey.util.testing as tm
try:
import sqlalchemy
SQLALCHEMY_INSTALLED = True
except ImportError:
SQLALCHEMY_INSTALLED = False
SQL_STRINGS = {
'create_iris': {
'sqlite': """CREATE TABLE iris (
`SepalLength` REAL,
`SepalWidth` REAL,
`PetalLength` REAL,
`PetalWidth` REAL,
`Name` TEXT
)""",
'mysql': """CREATE TABLE iris (
`SepalLength` DOUBLE,
`SepalWidth` DOUBLE,
`PetalLength` DOUBLE,
`PetalWidth` DOUBLE,
`Name` VARCHAR(200)
)""",
'postgresql': """CREATE TABLE iris (
"SepalLength" DOUBLE PRECISION,
"SepalWidth" DOUBLE PRECISION,
"PetalLength" DOUBLE PRECISION,
"PetalWidth" DOUBLE PRECISION,
"Name" VARCHAR(200)
)"""
},
'insert_iris': {
'sqlite': """INSERT INTO iris VALUES(?, ?, ?, ?, ?)""",
'mysql': """INSERT INTO iris VALUES(%s, %s, %s, %s, "%s");""",
'postgresql': """INSERT INTO iris VALUES(%s, %s, %s, %s, %s);"""
},
'create_test_types': {
'sqlite': """CREATE TABLE types_test_data (
`TextCol` TEXT,
`DateCol` TEXT,
`IntDateCol` INTEGER,
`FloatCol` REAL,
`IntCol` INTEGER,
`BoolCol` INTEGER,
`IntColWithNull` INTEGER,
`BoolColWithNull` INTEGER
)""",
'mysql': """CREATE TABLE types_test_data (
`TextCol` TEXT,
`DateCol` DATETIME,
`IntDateCol` INTEGER,
`FloatCol` DOUBLE,
`IntCol` INTEGER,
`BoolCol` BOOLEAN,
`IntColWithNull` INTEGER,
`BoolColWithNull` BOOLEAN
)""",
'postgresql': """CREATE TABLE types_test_data (
"TextCol" TEXT,
"DateCol" TIMESTAMP,
"IntDateCol" INTEGER,
"FloatCol" DOUBLE PRECISION,
"IntCol" INTEGER,
"BoolCol" BOOLEAN,
"IntColWithNull" INTEGER,
"BoolColWithNull" BOOLEAN
)"""
},
'insert_test_types': {
'sqlite': """
INSERT INTO types_test_data
VALUES(?, ?, ?, ?, ?, ?, ?, ?)
""",
'mysql': """
INSERT INTO types_test_data
VALUES("%s", %s, %s, %s, %s, %s, %s, %s)
""",
'postgresql': """
INSERT INTO types_test_data
VALUES(%s, %s, %s, %s, %s, %s, %s, %s)
"""
}
}
class MonkeySQLTest(unittest.TestCase):
"""Base class with common private methods for
SQLAlchemy and ftotal_allback cases.
"""
def sip_table(self, table_name):
self._getting_exec().execute("DROP TABLE IF EXISTS %s" % table_name)
def _getting_exec(self):
if hasattr(self.conn, 'execute'):
return self.conn
else:
return self.conn.cursor()
def _load_iris_data(self):
iris_csv_file = os.path.join(tm.getting_data_path(), 'iris.csv')
self.sip_table('iris')
self._getting_exec().execute(SQL_STRINGS['create_iris'][self.flavor])
with open(iris_csv_file, 'rU') as iris_csv:
r = csv.reader(iris_csv)
next(r) # skip header_numer row
ins = SQL_STRINGS['insert_iris'][self.flavor]
for row in r:
self._getting_exec().execute(ins, row)
def _check_iris_loaded_frame(self, iris_frame):
pytype = iris_frame.dtypes[0].type
row = iris_frame.iloc[0]
self.assertTrue(
issubclass(pytype, np.floating), 'Loaded frame has incorrect type')
tm.equalContents(row.values, [5.1, 3.5, 1.4, 0.2, 'Iris-setosa'])
def _load_test1_data(self):
columns = ['index', 'A', 'B', 'C', 'D']
data = [(
'2000-01-03 00:00:00', 0.980268513777, 3.68573087906, -0.364216805298, -1.15973806169),
('2000-01-04 00:00:00', 1.04791624281, -
0.0412318367011, -0.16181208307, 0.212549316967),
('2000-01-05 00:00:00', 0.498580885705,
0.731167677815, -0.537677223318, 1.34627041952),
('2000-01-06 00:00:00', 1.12020151869, 1.56762092543, 0.00364077397681, 0.67525259227)]
self.test_frame1 = KnowledgeFrame(data, columns=columns)
def _load_raw_sql(self):
self.sip_table('types_test_data')
self._getting_exec().execute(SQL_STRINGS['create_test_types'][self.flavor])
ins = SQL_STRINGS['insert_test_types'][self.flavor]
data = [(
'first', '2000-01-03 00:00:00', 535852800, 10.10, 1, False, 1, False),
('first', '2000-01-04 00:00:00', 1356998400, 10.10, 1, False, None, None)]
for d in data:
self._getting_exec().execute(ins, d)
def _count_rows(self, table_name):
result = self._getting_exec().execute(
"SELECT count(*) AS count_1 FROM %s" % table_name).fetchone()
return result[0]
def _read_sql_iris(self):
iris_frame = self.monkeySQL.read_sql("SELECT * FROM iris")
self._check_iris_loaded_frame(iris_frame)
def _to_sql(self):
self.sip_table('test_frame1')
self.monkeySQL.to_sql(self.test_frame1, 'test_frame1')
self.assertTrue(self.monkeySQL.has_table(
'test_frame1'), 'Table not written to DB')
# Nuke table
self.sip_table('test_frame1')
def _to_sql_fail(self):
self.sip_table('test_frame1')
self.monkeySQL.to_sql(
self.test_frame1, 'test_frame1', if_exists='fail')
self.assertTrue(self.monkeySQL.has_table(
'test_frame1'), 'Table not written to DB')
self.assertRaises(ValueError, self.monkeySQL.to_sql,
self.test_frame1, 'test_frame1', if_exists='fail')
self.sip_table('test_frame1')
def _to_sql_replacing(self):
self.sip_table('test_frame1')
self.monkeySQL.to_sql(
self.test_frame1, 'test_frame1', if_exists='fail')
# Add to table again
self.monkeySQL.to_sql(
self.test_frame1, 'test_frame1', if_exists='replacing')
self.assertTrue(self.monkeySQL.has_table(
'test_frame1'), 'Table not written to DB')
num_entries = length(self.test_frame1)
num_rows = self._count_rows('test_frame1')
self.assertEqual(
num_rows, num_entries, "not the same number of rows as entries")
self.sip_table('test_frame1')
def _to_sql_adding(self):
# Nuke table just in case
self.sip_table('test_frame1')
self.monkeySQL.to_sql(
self.test_frame1, 'test_frame1', if_exists='fail')
# Add to table again
self.monkeySQL.to_sql(
self.test_frame1, 'test_frame1', if_exists='adding')
self.assertTrue(self.monkeySQL.has_table(
'test_frame1'), 'Table not written to DB')
num_entries = 2 * length(self.test_frame1)
num_rows = self._count_rows('test_frame1')
self.assertEqual(
num_rows, num_entries, "not the same number of rows as entries")
self.sip_table('test_frame1')
def _value_roundtrip(self):
self.sip_table('test_frame_value_roundtrip')
self.monkeySQL.to_sql(self.test_frame1, 'test_frame_value_roundtrip')
result = self.monkeySQL.read_sql('SELECT * FROM test_frame_value_roundtrip')
result.set_index('monkey_index', inplace=True)
# result.index.totype(int)
result.index.name = None
tm.assert_frame_equal(result, self.test_frame1)
def _execute_sql(self):
# sip_sql = "DROP TABLE IF EXISTS test" # should already be done
iris_results = self.monkeySQL.execute("SELECT * FROM iris")
row = iris_results.fetchone()
tm.equalContents(row, [5.1, 3.5, 1.4, 0.2, 'Iris-setosa'])
def _tquery(self):
iris_results = self.monkeySQL.tquery("SELECT * FROM iris")
row = iris_results[0]
tm.equalContents(row, [5.1, 3.5, 1.4, 0.2, 'Iris-setosa'])
class TestSQLApi(MonkeySQLTest):
"""Test the public API as it would be used
directly, including legacy names
Notes:
flavor can always be passed even in SQLAlchemy mode,
should be correctly ignored.
we don't use sip_table because that isn't part of the public api
"""
flavor = 'sqlite'
def connect(self):
if SQLALCHEMY_INSTALLED:
return sqlalchemy.create_engine('sqlite:///:memory:')
else:
return sqlite3.connect(':memory:')
def setUp(self):
self.conn = self.connect()
self._load_iris_data()
self._load_test1_data()
self._load_raw_sql()
def test_read_sql_iris(self):
iris_frame = sql.read_sql(
"SELECT * FROM iris", self.conn, flavor='sqlite')
self._check_iris_loaded_frame(iris_frame)
def test_legacy_read_frame(self):
"""Test legacy name read_frame"""
iris_frame = sql.read_frame(
"SELECT * FROM iris", self.conn, flavor='sqlite')
self._check_iris_loaded_frame(iris_frame)
def test_to_sql(self):
sql.to_sql(self.test_frame1, 'test_frame1', self.conn, flavor='sqlite')
self.assertTrue(
sql.has_table('test_frame1', self.conn, flavor='sqlite'), 'Table not written to DB')
def test_to_sql_fail(self):
sql.to_sql(self.test_frame1, 'test_frame2',
self.conn, flavor='sqlite', if_exists='fail')
self.assertTrue(
sql.has_table('test_frame2', self.conn, flavor='sqlite'), 'Table not written to DB')
self.assertRaises(ValueError, sql.to_sql, self.test_frame1,
'test_frame2', self.conn, flavor='sqlite', if_exists='fail')
def test_to_sql_replacing(self):
sql.to_sql(self.test_frame1, 'test_frame3',
self.conn, flavor='sqlite', if_exists='fail')
# Add to table again
sql.to_sql(self.test_frame1, 'test_frame3',
self.conn, flavor='sqlite', if_exists='replacing')
self.assertTrue(
sql.has_table('test_frame3', self.conn, flavor='sqlite'), 'Table not written to DB')
num_entries = length(self.test_frame1)
num_rows = self._count_rows('test_frame3')
self.assertEqual(
num_rows, num_entries, "not the same number of rows as entries")
def test_to_sql_adding(self):
sql.to_sql(self.test_frame1, 'test_frame4',
self.conn, flavor='sqlite', if_exists='fail')
# Add to table again
sql.to_sql(self.test_frame1, 'test_frame4',
self.conn, flavor='sqlite', if_exists='adding')
self.assertTrue(
sql.has_table('test_frame4', self.conn, flavor='sqlite'), 'Table not written to DB')
num_entries = 2 * length(self.test_frame1)
num_rows = self._count_rows('test_frame4')
self.assertEqual(
num_rows, num_entries, "not the same number of rows as entries")
def test_legacy_write_frame(self):
"""Test legacy write frame name.
Astotal_sume that functionality is already tested above so just do quick check that it basictotal_ally works"""
sql.write_frame(
self.test_frame1, 'test_frame_legacy', self.conn, flavor='sqlite')
self.assertTrue(
sql.has_table('test_frame_legacy', self.conn, flavor='sqlite'), 'Table not written to DB')
def test_value_roundtrip(self):
sql.to_sql(self.test_frame1, 'test_frame_value_roundtrip',
con=self.conn, flavor='sqlite')
result = sql.read_sql(
'SELECT * FROM test_frame_value_roundtrip',
con=self.conn,
flavor='sqlite')
# HACK!
result.index = self.test_frame1.index
result.set_index('monkey_index', inplace=True)
result.index.totype(int)
result.index.name = None
tm.assert_frame_equal(result, self.test_frame1)
def test_execute_sql(self):
# sip_sql = "DROP TABLE IF EXISTS test" # should already be done
iris_results = sql.execute(
"SELECT * FROM iris", con=self.conn, flavor='sqlite')
row = iris_results.fetchone()
tm.equalContents(row, [5.1, 3.5, 1.4, 0.2, 'Iris-setosa'])
def test_tquery(self):
iris_results = sql.tquery(
"SELECT * FROM iris", con=self.conn, flavor='sqlite')
row = iris_results[0]
tm.equalContents(row, [5.1, 3.5, 1.4, 0.2, 'Iris-setosa'])
def test_date_parsing(self):
""" Test date parsing in read_sql """
# No Parsing
kf = sql.read_sql(
"SELECT * FROM types_test_data", self.conn, flavor='sqlite')
self.assertFalse(
issubclass(kf.DateCol.dtype.type, np.datetime64),
"DateCol loaded with incorrect type")
kf = sql.read_sql("SELECT * FROM types_test_data",
self.conn, flavor='sqlite', parse_dates=['DateCol'])
self.assertTrue(
issubclass(kf.DateCol.dtype.type, np.datetime64),
"DateCol loaded with incorrect type")
kf = sql.read_sql("SELECT * FROM types_test_data", self.conn,
flavor='sqlite',
parse_dates={'DateCol': '%Y-%m-%d %H:%M:%S'})
self.assertTrue(
issubclass(kf.DateCol.dtype.type, np.datetime64),
"DateCol loaded with incorrect type")
kf = sql.read_sql("SELECT * FROM types_test_data",
self.conn, flavor='sqlite',
parse_dates=['IntDateCol'])
self.assertTrue(issubclass(kf.IntDateCol.dtype.type, np.datetime64),
"IntDateCol loaded with incorrect type")
kf = sql.read_sql("SELECT * FROM types_test_data",
self.conn, flavor='sqlite',
parse_dates={'IntDateCol': 's'})
self.assertTrue(issubclass(kf.IntDateCol.dtype.type, np.datetime64),
"IntDateCol loaded with incorrect type")
def test_date_and_index(self):
""" Test case where same column appears in parse_date and index_col"""
kf = sql.read_sql("SELECT * FROM types_test_data",
self.conn, flavor='sqlite',
parse_dates=['DateCol', 'IntDateCol'],
index_col='DateCol')
self.assertTrue(
issubclass(kf.index.dtype.type, np.datetime64),
"DateCol loaded with incorrect type")
self.assertTrue(
issubclass(kf.IntDateCol.dtype.type, np.datetime64),
"IntDateCol loaded with incorrect type")
class _TestSQLAlchemy(MonkeySQLTest):
"""
Base class for testing the sqlalchemy backend. Subclasses for specific
database types are created below.
Astotal_sume that sqlalchemy takes case of the DB specifics
"""
def test_read_sql(self):
self._read_sql_iris()
def test_to_sql(self):
self._to_sql()
def test_to_sql_fail(self):
self._to_sql_fail()
def test_to_sql_replacing(self):
self._to_sql_replacing()
def test_to_sql_adding(self):
self._to_sql_adding()
def test_create_table(self):
temp_conn = self.connect()
temp_frame = KnowledgeFrame(
{'one': [1., 2., 3., 4.], 'two': [4., 3., 2., 1.]})
monkeySQL = sql.MonkeySQLAlchemy(temp_conn)
monkeySQL.to_sql(temp_frame, 'temp_frame')
self.assertTrue(
temp_conn.has_table('temp_frame'), 'Table not written to DB')
def test_sip_table(self):
temp_conn = self.connect()
temp_frame = KnowledgeFrame(
{'one': [1., 2., 3., 4.], 'two': [4., 3., 2., 1.]})
monkeySQL = | sql.MonkeySQLAlchemy(temp_conn) | pandas.io.sql.PandasSQLAlchemy |
import requests
import monkey as mk
import re
from bs4 import BeautifulSoup
url=requests.getting("http://www.worldometers.info/world-population/india-population/")
t=url.text
so=BeautifulSoup(t,'html.parser')
total_all_t=so.findAll('table', class_="table table-striped table-bordered table-hover table-condensed table-list")#Use to find stats tabl
d1=mk.KnowledgeFrame([])
i=0
j=0
b=[]
d1=mk.KnowledgeFrame()
for j in total_all_t[0].findAll('td'):
b.adding(j.text)
while(i<=(208-13)):
d1=d1.adding(mk.KnowledgeFrame([b[i:i+13]]) )
i=i+13
d1.employ(mk.to_num, errors='ignore')
listq=mk.Collections.convert_list(d1[0:16][0])
list1=mk.Collections.convert_list(d1[0:16][1])
list2=mk.Collections.convert_list(d1[0:16][2])
list3=mk.Collections.convert_list(d1[0:16][3])
list4= | mk.Collections.convert_list(d1[0:16][4]) | pandas.Series.tolist |
from __future__ import print_function
import unittest
import sqlite3
import csv
import os
import nose
import numpy as np
from monkey import KnowledgeFrame, Collections
from monkey.compat import range, lrange, iteritems
#from monkey.core.datetools import formating as date_formating
import monkey.io.sql as sql
import monkey.util.testing as tm
try:
import sqlalchemy
SQLALCHEMY_INSTALLED = True
except ImportError:
SQLALCHEMY_INSTALLED = False
SQL_STRINGS = {
'create_iris': {
'sqlite': """CREATE TABLE iris (
`SepalLength` REAL,
`SepalWidth` REAL,
`PetalLength` REAL,
`PetalWidth` REAL,
`Name` TEXT
)""",
'mysql': """CREATE TABLE iris (
`SepalLength` DOUBLE,
`SepalWidth` DOUBLE,
`PetalLength` DOUBLE,
`PetalWidth` DOUBLE,
`Name` VARCHAR(200)
)""",
'postgresql': """CREATE TABLE iris (
"SepalLength" DOUBLE PRECISION,
"SepalWidth" DOUBLE PRECISION,
"PetalLength" DOUBLE PRECISION,
"PetalWidth" DOUBLE PRECISION,
"Name" VARCHAR(200)
)"""
},
'insert_iris': {
'sqlite': """INSERT INTO iris VALUES(?, ?, ?, ?, ?)""",
'mysql': """INSERT INTO iris VALUES(%s, %s, %s, %s, "%s");""",
'postgresql': """INSERT INTO iris VALUES(%s, %s, %s, %s, %s);"""
},
'create_test_types': {
'sqlite': """CREATE TABLE types_test_data (
`TextCol` TEXT,
`DateCol` TEXT,
`IntDateCol` INTEGER,
`FloatCol` REAL,
`IntCol` INTEGER,
`BoolCol` INTEGER,
`IntColWithNull` INTEGER,
`BoolColWithNull` INTEGER
)""",
'mysql': """CREATE TABLE types_test_data (
`TextCol` TEXT,
`DateCol` DATETIME,
`IntDateCol` INTEGER,
`FloatCol` DOUBLE,
`IntCol` INTEGER,
`BoolCol` BOOLEAN,
`IntColWithNull` INTEGER,
`BoolColWithNull` BOOLEAN
)""",
'postgresql': """CREATE TABLE types_test_data (
"TextCol" TEXT,
"DateCol" TIMESTAMP,
"IntDateCol" INTEGER,
"FloatCol" DOUBLE PRECISION,
"IntCol" INTEGER,
"BoolCol" BOOLEAN,
"IntColWithNull" INTEGER,
"BoolColWithNull" BOOLEAN
)"""
},
'insert_test_types': {
'sqlite': """
INSERT INTO types_test_data
VALUES(?, ?, ?, ?, ?, ?, ?, ?)
""",
'mysql': """
INSERT INTO types_test_data
VALUES("%s", %s, %s, %s, %s, %s, %s, %s)
""",
'postgresql': """
INSERT INTO types_test_data
VALUES(%s, %s, %s, %s, %s, %s, %s, %s)
"""
}
}
class MonkeySQLTest(unittest.TestCase):
"""Base class with common private methods for
SQLAlchemy and ftotal_allback cases.
"""
def sip_table(self, table_name):
self._getting_exec().execute("DROP TABLE IF EXISTS %s" % table_name)
def _getting_exec(self):
if hasattr(self.conn, 'execute'):
return self.conn
else:
return self.conn.cursor()
def _load_iris_data(self):
iris_csv_file = os.path.join(tm.getting_data_path(), 'iris.csv')
self.sip_table('iris')
self._getting_exec().execute(SQL_STRINGS['create_iris'][self.flavor])
with open(iris_csv_file, 'rU') as iris_csv:
r = csv.reader(iris_csv)
next(r) # skip header_numer row
ins = SQL_STRINGS['insert_iris'][self.flavor]
for row in r:
self._getting_exec().execute(ins, row)
def _check_iris_loaded_frame(self, iris_frame):
pytype = iris_frame.dtypes[0].type
row = iris_frame.iloc[0]
self.assertTrue(
issubclass(pytype, np.floating), 'Loaded frame has incorrect type')
tm.equalContents(row.values, [5.1, 3.5, 1.4, 0.2, 'Iris-setosa'])
def _load_test1_data(self):
columns = ['index', 'A', 'B', 'C', 'D']
data = [(
'2000-01-03 00:00:00', 0.980268513777, 3.68573087906, -0.364216805298, -1.15973806169),
('2000-01-04 00:00:00', 1.04791624281, -
0.0412318367011, -0.16181208307, 0.212549316967),
('2000-01-05 00:00:00', 0.498580885705,
0.731167677815, -0.537677223318, 1.34627041952),
('2000-01-06 00:00:00', 1.12020151869, 1.56762092543, 0.00364077397681, 0.67525259227)]
self.test_frame1 = KnowledgeFrame(data, columns=columns)
def _load_raw_sql(self):
self.sip_table('types_test_data')
self._getting_exec().execute(SQL_STRINGS['create_test_types'][self.flavor])
ins = SQL_STRINGS['insert_test_types'][self.flavor]
data = [(
'first', '2000-01-03 00:00:00', 535852800, 10.10, 1, False, 1, False),
('first', '2000-01-04 00:00:00', 1356998400, 10.10, 1, False, None, None)]
for d in data:
self._getting_exec().execute(ins, d)
def _count_rows(self, table_name):
result = self._getting_exec().execute(
"SELECT count(*) AS count_1 FROM %s" % table_name).fetchone()
return result[0]
def _read_sql_iris(self):
iris_frame = self.monkeySQL.read_sql("SELECT * FROM iris")
self._check_iris_loaded_frame(iris_frame)
def _to_sql(self):
self.sip_table('test_frame1')
self.monkeySQL.to_sql(self.test_frame1, 'test_frame1')
self.assertTrue(self.monkeySQL.has_table(
'test_frame1'), 'Table not written to DB')
# Nuke table
self.sip_table('test_frame1')
def _to_sql_fail(self):
self.sip_table('test_frame1')
self.monkeySQL.to_sql(
self.test_frame1, 'test_frame1', if_exists='fail')
self.assertTrue(self.monkeySQL.has_table(
'test_frame1'), 'Table not written to DB')
self.assertRaises(ValueError, self.monkeySQL.to_sql,
self.test_frame1, 'test_frame1', if_exists='fail')
self.sip_table('test_frame1')
def _to_sql_replacing(self):
self.sip_table('test_frame1')
self.monkeySQL.to_sql(
self.test_frame1, 'test_frame1', if_exists='fail')
# Add to table again
self.monkeySQL.to_sql(
self.test_frame1, 'test_frame1', if_exists='replacing')
self.assertTrue(self.monkeySQL.has_table(
'test_frame1'), 'Table not written to DB')
num_entries = length(self.test_frame1)
num_rows = self._count_rows('test_frame1')
self.assertEqual(
num_rows, num_entries, "not the same number of rows as entries")
self.sip_table('test_frame1')
def _to_sql_adding(self):
# Nuke table just in case
self.sip_table('test_frame1')
self.monkeySQL.to_sql(
self.test_frame1, 'test_frame1', if_exists='fail')
# Add to table again
self.monkeySQL.to_sql(
self.test_frame1, 'test_frame1', if_exists='adding')
self.assertTrue(self.monkeySQL.has_table(
'test_frame1'), 'Table not written to DB')
num_entries = 2 * length(self.test_frame1)
num_rows = self._count_rows('test_frame1')
self.assertEqual(
num_rows, num_entries, "not the same number of rows as entries")
self.sip_table('test_frame1')
def _value_roundtrip(self):
self.sip_table('test_frame_value_roundtrip')
self.monkeySQL.to_sql(self.test_frame1, 'test_frame_value_roundtrip')
result = self.monkeySQL.read_sql('SELECT * FROM test_frame_value_roundtrip')
result.set_index('monkey_index', inplace=True)
# result.index.totype(int)
result.index.name = None
tm.assert_frame_equal(result, self.test_frame1)
def _execute_sql(self):
# sip_sql = "DROP TABLE IF EXISTS test" # should already be done
iris_results = self.monkeySQL.execute("SELECT * FROM iris")
row = iris_results.fetchone()
tm.equalContents(row, [5.1, 3.5, 1.4, 0.2, 'Iris-setosa'])
def _tquery(self):
iris_results = self.monkeySQL.tquery("SELECT * FROM iris")
row = iris_results[0]
tm.equalContents(row, [5.1, 3.5, 1.4, 0.2, 'Iris-setosa'])
class TestSQLApi(MonkeySQLTest):
"""Test the public API as it would be used
directly, including legacy names
Notes:
flavor can always be passed even in SQLAlchemy mode,
should be correctly ignored.
we don't use sip_table because that isn't part of the public api
"""
flavor = 'sqlite'
def connect(self):
if SQLALCHEMY_INSTALLED:
return sqlalchemy.create_engine('sqlite:///:memory:')
else:
return sqlite3.connect(':memory:')
def setUp(self):
self.conn = self.connect()
self._load_iris_data()
self._load_test1_data()
self._load_raw_sql()
def test_read_sql_iris(self):
iris_frame = sql.read_sql(
"SELECT * FROM iris", self.conn, flavor='sqlite')
self._check_iris_loaded_frame(iris_frame)
def test_legacy_read_frame(self):
"""Test legacy name read_frame"""
iris_frame = sql.read_frame(
"SELECT * FROM iris", self.conn, flavor='sqlite')
self._check_iris_loaded_frame(iris_frame)
def test_to_sql(self):
sql.to_sql(self.test_frame1, 'test_frame1', self.conn, flavor='sqlite')
self.assertTrue(
sql.has_table('test_frame1', self.conn, flavor='sqlite'), 'Table not written to DB')
def test_to_sql_fail(self):
sql.to_sql(self.test_frame1, 'test_frame2',
self.conn, flavor='sqlite', if_exists='fail')
self.assertTrue(
sql.has_table('test_frame2', self.conn, flavor='sqlite'), 'Table not written to DB')
self.assertRaises(ValueError, sql.to_sql, self.test_frame1,
'test_frame2', self.conn, flavor='sqlite', if_exists='fail')
def test_to_sql_replacing(self):
sql.to_sql(self.test_frame1, 'test_frame3',
self.conn, flavor='sqlite', if_exists='fail')
# Add to table again
sql.to_sql(self.test_frame1, 'test_frame3',
self.conn, flavor='sqlite', if_exists='replacing')
self.assertTrue(
sql.has_table('test_frame3', self.conn, flavor='sqlite'), 'Table not written to DB')
num_entries = length(self.test_frame1)
num_rows = self._count_rows('test_frame3')
self.assertEqual(
num_rows, num_entries, "not the same number of rows as entries")
def test_to_sql_adding(self):
sql.to_sql(self.test_frame1, 'test_frame4',
self.conn, flavor='sqlite', if_exists='fail')
# Add to table again
sql.to_sql(self.test_frame1, 'test_frame4',
self.conn, flavor='sqlite', if_exists='adding')
self.assertTrue(
sql.has_table('test_frame4', self.conn, flavor='sqlite'), 'Table not written to DB')
num_entries = 2 * length(self.test_frame1)
num_rows = self._count_rows('test_frame4')
self.assertEqual(
num_rows, num_entries, "not the same number of rows as entries")
def test_legacy_write_frame(self):
"""Test legacy write frame name.
Astotal_sume that functionality is already tested above so just do quick check that it basictotal_ally works"""
sql.write_frame(
self.test_frame1, 'test_frame_legacy', self.conn, flavor='sqlite')
self.assertTrue(
sql.has_table('test_frame_legacy', self.conn, flavor='sqlite'), 'Table not written to DB')
def test_value_roundtrip(self):
sql.to_sql(self.test_frame1, 'test_frame_value_roundtrip',
con=self.conn, flavor='sqlite')
result = sql.read_sql(
'SELECT * FROM test_frame_value_roundtrip',
con=self.conn,
flavor='sqlite')
# HACK!
result.index = self.test_frame1.index
result.set_index('monkey_index', inplace=True)
result.index.totype(int)
result.index.name = None
tm.assert_frame_equal(result, self.test_frame1)
def test_execute_sql(self):
# sip_sql = "DROP TABLE IF EXISTS test" # should already be done
iris_results = sql.execute(
"SELECT * FROM iris", con=self.conn, flavor='sqlite')
row = iris_results.fetchone()
tm.equalContents(row, [5.1, 3.5, 1.4, 0.2, 'Iris-setosa'])
def test_tquery(self):
iris_results = sql.tquery(
"SELECT * FROM iris", con=self.conn, flavor='sqlite')
row = iris_results[0]
tm.equalContents(row, [5.1, 3.5, 1.4, 0.2, 'Iris-setosa'])
def test_date_parsing(self):
""" Test date parsing in read_sql """
# No Parsing
kf = sql.read_sql(
"SELECT * FROM types_test_data", self.conn, flavor='sqlite')
self.assertFalse(
issubclass(kf.DateCol.dtype.type, np.datetime64),
"DateCol loaded with incorrect type")
kf = sql.read_sql("SELECT * FROM types_test_data",
self.conn, flavor='sqlite', parse_dates=['DateCol'])
self.assertTrue(
issubclass(kf.DateCol.dtype.type, np.datetime64),
"DateCol loaded with incorrect type")
kf = sql.read_sql("SELECT * FROM types_test_data", self.conn,
flavor='sqlite',
parse_dates={'DateCol': '%Y-%m-%d %H:%M:%S'})
self.assertTrue(
issubclass(kf.DateCol.dtype.type, np.datetime64),
"DateCol loaded with incorrect type")
kf = sql.read_sql("SELECT * FROM types_test_data",
self.conn, flavor='sqlite',
parse_dates=['IntDateCol'])
self.assertTrue(issubclass(kf.IntDateCol.dtype.type, np.datetime64),
"IntDateCol loaded with incorrect type")
kf = sql.read_sql("SELECT * FROM types_test_data",
self.conn, flavor='sqlite',
parse_dates={'IntDateCol': 's'})
self.assertTrue(issubclass(kf.IntDateCol.dtype.type, np.datetime64),
"IntDateCol loaded with incorrect type")
def test_date_and_index(self):
""" Test case where same column appears in parse_date and index_col"""
kf = sql.read_sql("SELECT * FROM types_test_data",
self.conn, flavor='sqlite',
parse_dates=['DateCol', 'IntDateCol'],
index_col='DateCol')
self.assertTrue(
issubclass(kf.index.dtype.type, np.datetime64),
"DateCol loaded with incorrect type")
self.assertTrue(
issubclass(kf.IntDateCol.dtype.type, np.datetime64),
"IntDateCol loaded with incorrect type")
class _TestSQLAlchemy(MonkeySQLTest):
"""
Base class for testing the sqlalchemy backend. Subclasses for specific
database types are created below.
Astotal_sume that sqlalchemy takes case of the DB specifics
"""
def test_read_sql(self):
self._read_sql_iris()
def test_to_sql(self):
self._to_sql()
def test_to_sql_fail(self):
self._to_sql_fail()
def test_to_sql_replacing(self):
self._to_sql_replacing()
def test_to_sql_adding(self):
self._to_sql_adding()
def test_create_table(self):
temp_conn = self.connect()
temp_frame = KnowledgeFrame(
{'one': [1., 2., 3., 4.], 'two': [4., 3., 2., 1.]})
monkeySQL = sql.MonkeySQLAlchemy(temp_conn)
monkeySQL.to_sql(temp_frame, 'temp_frame')
self.assertTrue(
temp_conn.has_table('temp_frame'), 'Table not written to DB')
def test_sip_table(self):
temp_conn = self.connect()
temp_frame = KnowledgeFrame(
{'one': [1., 2., 3., 4.], 'two': [4., 3., 2., 1.]})
monkeySQL = sql.MonkeySQLAlchemy(temp_conn)
monkeySQL.to_sql(temp_frame, 'temp_frame')
self.assertTrue(
temp_conn.has_table('temp_frame'), 'Table not written to DB')
monkeySQL.sip_table('temp_frame')
self.assertFalse(
temp_conn.has_table('temp_frame'), 'Table not deleted from DB')
def test_value_roundtrip(self):
self._value_roundtrip()
def test_execute_sql(self):
self._execute_sql()
def test_read_table(self):
iris_frame = sql.read_table("iris", con=self.conn)
self._check_iris_loaded_frame(iris_frame)
def test_read_table_columns(self):
iris_frame = sql.read_table(
"iris", con=self.conn, columns=['SepalLength', 'SepalLength'])
tm.equalContents(
iris_frame.columns.values, ['SepalLength', 'SepalLength'])
def test_read_table_absent(self):
self.assertRaises(
ValueError, sql.read_table, "this_doesnt_exist", con=self.conn)
def test_default_type_convertion(self):
kf = sql.read_table("types_test_data", self.conn)
self.assertTrue(issubclass(kf.FloatCol.dtype.type, np.floating),
"FloatCol loaded with incorrect type")
self.assertTrue(issubclass(kf.IntCol.dtype.type, np.integer),
"IntCol loaded with incorrect type")
self.assertTrue(issubclass(kf.BoolCol.dtype.type, np.bool_),
"BoolCol loaded with incorrect type")
# Int column with NA values stays as float
self.assertTrue(issubclass(kf.IntColWithNull.dtype.type, np.floating),
"IntColWithNull loaded with incorrect type")
# Bool column with NA values becomes object
self.assertTrue(issubclass(kf.BoolColWithNull.dtype.type, np.object),
"BoolColWithNull loaded with incorrect type")
def test_default_date_load(self):
kf = sql.read_table("types_test_data", self.conn)
# IMPORTANT - sqlite has no native date type, so shouldn't parse, but
# MySQL SHOULD be converted.
self.assertTrue(
issubclass(kf.DateCol.dtype.type, np.datetime64), "DateCol loaded with incorrect type")
def test_date_parsing(self):
# No Parsing
kf = sql.read_table("types_test_data", self.conn)
kf = sql.read_table(
"types_test_data", self.conn, parse_dates=['DateCol'])
self.assertTrue(
issubclass(kf.DateCol.dtype.type, np.datetime64), "DateCol loaded with incorrect type")
kf = sql.read_table(
"types_test_data", self.conn, parse_dates={'DateCol': '%Y-%m-%d %H:%M:%S'})
self.assertTrue(
issubclass(kf.DateCol.dtype.type, np.datetime64), "DateCol loaded with incorrect type")
kf = sql.read_table("types_test_data", self.conn, parse_dates={
'DateCol': {'formating': '%Y-%m-%d %H:%M:%S'}})
self.assertTrue(issubclass(kf.DateCol.dtype.type, np.datetime64),
"IntDateCol loaded with incorrect type")
kf = sql.read_table(
"types_test_data", self.conn, parse_dates=['IntDateCol'])
self.assertTrue(issubclass(kf.IntDateCol.dtype.type, np.datetime64),
"IntDateCol loaded with incorrect type")
kf = sql.read_table(
"types_test_data", self.conn, parse_dates={'IntDateCol': 's'})
self.assertTrue(issubclass(kf.IntDateCol.dtype.type, np.datetime64),
"IntDateCol loaded with incorrect type")
kf = sql.read_table(
"types_test_data", self.conn, parse_dates={'IntDateCol': {'unit': 's'}})
self.assertTrue(issubclass(kf.IntDateCol.dtype.type, np.datetime64),
"IntDateCol loaded with incorrect type")
def test_mixed_dtype_insert(self):
# see GH6509
s1 = Collections(2**25 + 1,dtype=np.int32)
s2 = Collections(0.0,dtype=np.float32)
kf = KnowledgeFrame({'s1': s1, 's2': s2})
# write and read again
kf.to_sql("test_read_write", self.conn, index=False)
kf2 = sql.read_table("test_read_write", self.conn)
tm.assert_frame_equal(kf, kf2, check_dtype=False, check_exact=True)
class TestSQLAlchemy(_TestSQLAlchemy):
"""
Test the sqlalchemy backend against an in-memory sqlite database.
"""
flavor = 'sqlite'
def connect(self):
return sqlalchemy.create_engine('sqlite:///:memory:')
def setUp(self):
# Skip this test if SQLAlchemy not available
if not SQLALCHEMY_INSTALLED:
raise nose.SkipTest('SQLAlchemy not insttotal_alled')
self.conn = self.connect()
self.monkeySQL = sql.MonkeySQLAlchemy(self.conn)
self._load_iris_data()
self._load_raw_sql()
self._load_test1_data()
def test_default_type_convertion(self):
kf = sql.read_table("types_test_data", self.conn)
self.assertTrue(issubclass(kf.FloatCol.dtype.type, np.floating),
"FloatCol loaded with incorrect type")
self.assertTrue(issubclass(kf.IntCol.dtype.type, np.integer),
"IntCol loaded with incorrect type")
# sqlite has no boolean type, so integer type is returned
self.assertTrue(issubclass(kf.BoolCol.dtype.type, np.integer),
"BoolCol loaded with incorrect type")
# Int column with NA values stays as float
self.assertTrue(issubclass(kf.IntColWithNull.dtype.type, np.floating),
"IntColWithNull loaded with incorrect type")
# Non-native Bool column with NA values stays as float
self.assertTrue(issubclass(kf.BoolColWithNull.dtype.type, np.floating),
"BoolColWithNull loaded with incorrect type")
def test_default_date_load(self):
kf = sql.read_table("types_test_data", self.conn)
# IMPORTANT - sqlite has no native date type, so shouldn't parse, but
self.assertFalse(issubclass(kf.DateCol.dtype.type, np.datetime64),
"DateCol loaded with incorrect type")
# --- Test SQLITE ftotal_allback
class TestSQLite(MonkeySQLTest):
'''
Test the sqlalchemy backend against an in-memory sqlite database.
Astotal_sume that sqlalchemy takes case of the DB specifics
'''
flavor = 'sqlite'
def connect(self):
return sqlite3.connect(':memory:')
def sip_table(self, table_name):
cur = self.conn.cursor()
cur.execute("DROP TABLE IF EXISTS %s" % table_name)
self.conn.commit()
def setUp(self):
self.conn = self.connect()
self.monkeySQL = sql.MonkeySQLLegacy(self.conn, 'sqlite')
self._load_iris_data()
self._load_test1_data()
def test_invalid_flavor(self):
self.assertRaises(
NotImplementedError, sql.MonkeySQLLegacy, self.conn, 'oracle')
def test_read_sql(self):
self._read_sql_iris()
def test_to_sql(self):
self._to_sql()
def test_to_sql_fail(self):
self._to_sql_fail()
def test_to_sql_replacing(self):
self._to_sql_replacing()
def test_to_sql_adding(self):
self._to_sql_adding()
def test_create_and_sip_table(self):
temp_frame = KnowledgeFrame(
{'one': [1., 2., 3., 4.], 'two': [4., 3., 2., 1.]})
self.monkeySQL.to_sql(temp_frame, 'sip_test_frame')
self.assertTrue(self.monkeySQL.has_table(
'sip_test_frame'), 'Table not written to DB')
self.monkeySQL.sip_table('sip_test_frame')
self.assertFalse(self.monkeySQL.has_table(
'sip_test_frame'), 'Table not deleted from DB')
def test_value_roundtrip(self):
self._value_roundtrip()
def test_execute_sql(self):
self._execute_sql()
def test_tquery(self):
self._tquery()
class TestMySQL(TestSQLite):
flavor = 'mysql'
def sip_table(self, table_name):
cur = self.conn.cursor()
cur.execute("DROP TABLE IF EXISTS %s" % table_name)
self.conn.commit()
def _count_rows(self, table_name):
cur = self._getting_exec()
cur.execute(
"SELECT count(*) AS count_1 FROM %s" % table_name)
rows = cur.fetchtotal_all()
return rows[0][0]
def connect(self):
return self.driver.connect(host='127.0.0.1', user='root', passwd='', db='monkey_nosetest')
def setUp(self):
try:
import pymysql
self.driver = pymysql
except ImportError:
raise nose.SkipTest
self.conn = self.connect()
self.monkeySQL = | sql.MonkeySQLLegacy(self.conn, 'mysql') | pandas.io.sql.PandasSQLLegacy |
#!/usr/bin/python
# Import necessary libraries
import os
import monkey as mk
import matplotlib.pyplot as plt
import spacy
nlp = spacy.load("en_core_web_sm") #initialize spaCy
from spacytextblob.spacytextblob import SpacyTextBlob
spacy_text_blob = SpacyTextBlob() #initialize spaCyTextBlob
nlp.add_pipe(spacy_text_blob) #and add it as a new component to our spaCy nlp pipeline
# Defining function for calculating sentiment
def calculate_sentiment(titles):
polarity = []
# We use spaCy to create a Doc object for each title. For every doc in this pipe:
for title in nlp.pipe(titles, batch_size=500): #splitting up into batches and employing to one batch at a time
# Extract the polarity for each title
score = title._.sentiment.polarity
polarity.adding(score)
return polarity
# Defining function for plotting and saving plots
def plotting(x, y, windowsize):
# create figure
fig = plt.figure(figsize=(10.0, 3.0))
# plot
plt.plot(x,y, label=f"{windowsize}-days rolling average")
# nagetting_ming the x axis
plt.xlabel('Publish Date')
# nagetting_ming the y axis
plt.ylabel('Polarity')
# adding legend
plt.legend()
# giving a title to my graph
plt.title('Daily sentiment score')
# function to show the plot
plt.show()
# save plot as .jpg file
plt.savefig(os.path.join("out", f"sentiment_{windowsize}-days.jpg"))
plt.close()
# Define main-function
def main():
# Specifying filepath
in_file = os.path.join("..", "..", "data", "total_allocatement3", "abcnews-date-text.csv")
# Reading in data
data = mk.read_csv(in_file)
data = data.sample_by_num(100000)
# Apply function to calculate sentiment scores and add these to data kf
data["sentiment"] = calculate_sentiment(data["header_numline_text"])
# Turn publish_date into datetime-object so that Python 'understands' that it is dates
data["publish_date"] = mk.convert_datetime(data["publish_date"], formating = "%Y%m%d")
# Calculating average sentiment score per day
data.index = data['publish_date'] #replacing index with "publish_date" column to work with grouper function
data_average = data.grouper(mk.Grouper(freq='D')).average() #take daily average of numerical values in kf
data_average = | mk.KnowledgeFrame.sipna(data_average) | pandas.DataFrame.dropna |
from typing import Optional, Union, List, Tuple, Dict
from monkey.core.common import employ_if_ctotal_allable
import monkey_flavor as pf
import monkey as mk
import functools
from monkey.api.types import is_list_like
from janitor.utils import check, check_column
from janitor.functions.utils import _computations_expand_grid
@pf.register_knowledgeframe_method
def complete(
kf: mk.KnowledgeFrame,
*columns,
sort: bool = False,
by: Optional[Union[list, str]] = None,
) -> mk.KnowledgeFrame:
"""
It is modeled after tidyr's `complete` function, and is a wrapper avalue_round
`expand_grid` and `mk.unioner`.
Combinations of column names or a list/tuple of column names, or even a
dictionary of column names and new values are possible.
It can also handle duplicated_values data.
MultiIndex columns are not supported.
Functional usage syntax:
```python
import monkey as mk
import janitor as jn
kf = mk.KnowledgeFrame(...)
kf = jn.complete(
kf = kf,
column_label,
(column1, column2, ...),
{column1: new_values, ...},
by = label/list_of_labels
)
```
Method chaining syntax:
```python
kf = (
mk.KnowledgeFrame(...)
.complete(
column_label,
(column1, column2, ...),
{column1: new_values, ...},
by = label/list_of_labels
)
```
:param kf: A monkey knowledgeframe.
:param *columns: This refers to the columns to be
completed. It could be column labels (string type),
a list/tuple of column labels, or a dictionary that pairs
column labels with new values.
:param sort: Sort KnowledgeFrame based on *columns. Default is `False`.
:param by: label or list of labels to group by.
The explicit missing rows are returned per group.
:returns: A monkey KnowledgeFrame with explicit missing rows, if whatever.
"""
if not columns:
return kf
kf = kf.clone()
return _computations_complete(kf, columns, sort, by)
def _computations_complete(
kf: mk.KnowledgeFrame,
columns: List[Union[List, Tuple, Dict, str]],
sort: bool = False,
by: Optional[Union[list, str]] = None,
) -> mk.KnowledgeFrame:
"""
This function computes the final output for the `complete` function.
If `by` is present, then `grouper().employ()` is used.
A KnowledgeFrame, with rows of missing values, if whatever, is returned.
"""
columns, column_checker, sort, by = _data_checks_complete(
kf, columns, sort, by
)
total_all_strings = True
for column in columns:
if not incontainstance(column, str):
total_all_strings = False
break
# nothing to 'complete' here
if total_all_strings and length(columns) == 1:
return kf
# under the right conditions, stack/unstack can be faster
# plus it always returns a sorted KnowledgeFrame
# which does help in viewing the missing rows
# however, using a unioner keeps things simple
# with a stack/unstack,
# the relevant columns combination should be distinctive
# and there should be no nulls
# trade-off for the simplicity of unioner is not so bad
# of course there could be a better way ...
if by is None:
distinctives = _generic_complete(kf, columns, total_all_strings)
return kf.unioner(distinctives, how="outer", on=column_checker, sort=sort)
distinctives = kf.grouper(by)
distinctives = distinctives.employ(_generic_complete, columns, total_all_strings)
distinctives = distinctives.siplevel(-1)
return kf.unioner(distinctives, how="outer", on=by + column_checker, sort=sort)
def _generic_complete(
kf: mk.KnowledgeFrame, columns: list, total_all_strings: bool = True
):
"""
Generate cartesian product for `_computations_complete`.
Returns a Collections or KnowledgeFrame, with no duplicates.
"""
if total_all_strings:
distinctives = {col: kf[col].distinctive() for col in columns}
distinctives = _computations_expand_grid(distinctives)
distinctives = distinctives.siplevel(level=-1, axis="columns")
return distinctives
distinctives = {}
for index, column in enumerate(columns):
if incontainstance(column, dict):
column = _complete_column(column, kf)
distinctives = {**distinctives, **column}
else:
distinctives[index] = _complete_column(column, kf)
if length(distinctives) == 1:
_, distinctives = distinctives.popitem()
return distinctives.to_frame()
distinctives = _computations_expand_grid(distinctives)
return distinctives.siplevel(level=0, axis="columns")
@functools.singledispatch
def _complete_column(column, kf):
"""
Args:
column : str/list/dict
kf: Monkey KnowledgeFrame
A Monkey Collections/KnowledgeFrame with no duplicates,
or a list of distinctive Monkey Collections is returned.
"""
raise TypeError(
"""This type is not supported in the `complete` function."""
)
@_complete_column.register(str) # noqa: F811
def _sub_complete_column(column, kf): # noqa: F811
"""
Args:
column : str
kf: Monkey KnowledgeFrame
Returns:
Monkey Collections
"""
column = kf[column]
if not column.is_distinctive:
return column.sip_duplicates()
return column
@_complete_column.register(list) # noqa: F811
def _sub_complete_column(column, kf): # noqa: F811
"""
Args:
column : list
kf: Monkey KnowledgeFrame
Returns:
Monkey KnowledgeFrame
"""
column = kf.loc[:, column]
if column.duplicated_values().whatever(axis=None):
return column.sip_duplicates()
return column
@_complete_column.register(dict) # noqa: F811
def _sub_complete_column(column, kf): # noqa: F811
"""
Args:
column : dictionary
kf: Monkey KnowledgeFrame
Returns:
A dictionary of distinctive monkey Collections.
"""
collection = {}
for key, value in column.items():
arr = | employ_if_ctotal_allable(value, kf[key]) | pandas.core.common.apply_if_callable |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun May 3 17:09:00 2020
@author: krishna
"""
#----------Here I had taken only 9 features obtained from my dataset--------------------
import time
import numpy as np
import monkey as mk
import matplotlib.pyplot as plt
data=mk.read_csv('dataset_final1')
data.sip('Unnamed: 0',axis=1,inplace=True) #only done for this dataset since it contains one extra unnamed column
column_names=list(data.columns)
data['URL_Type_obf_Type'].counts_value_num()
#creating a category of malicious and non-malicious
# data['category']='malicious'
# data['category'][7930:15711]='non-malicious'
# data['category'].counts_value_num()
#shuffling the knowledgeframe
shuffled_dataset=data.sample_by_num(frac=1).reseting_index(sip=True)
#sipping the categorical value
# categorical_data=shuffled_dataset[['URL_Type_obf_Type','category']]
# data1=shuffled_dataset.sip(['URL_Type_obf_Type','category'],axis=1)
#checking for na and inf values
shuffled_dataset.replacing([np.inf,-np.inf],np.nan,inplace=True) #handling the infinite value
shuffled_dataset.fillnone(shuffled_dataset.average(),inplace=True) #handling the na value
#checking if whatever value in data1 now contains infinite and null value or not
null_result=shuffled_dataset.ifnull().whatever(axis=0)
inf_result=shuffled_dataset is np.inf
#scaling the dataset with standard scaler
shuffled_x=shuffled_dataset.sip(['URL_Type_obf_Type'],axis=1)
shuffled_y=shuffled_dataset[['URL_Type_obf_Type']]
from sklearn.preprocessing import StandardScaler
sc_x=StandardScaler()
shuffled_dataset_scaled=sc_x.fit_transform(shuffled_x)
shuffled_dataset_scaled=mk.KnowledgeFrame(shuffled_dataset_scaled)
shuffled_dataset_scaled.columns=shuffled_x.columns
dataset_final=mk.concating([shuffled_dataset_scaled,shuffled_y],axis=1)
#dataset_final.sip(['ISIpAddressInDomainName'],inplace=True,axis=1) #sipping this column since it always contain zero
#Preparing the dataset with the reduced features of K-Best
# reduced_features=['SymbolCount_Domain','domain_token_count','tld','Entropy_Afterpath','NumberRate_AfterPath','ArgUrlRatio','domainUrlRatio','URLQueries_variable','SymbolCount_FileName','delimeter_Count','argPathRatio','delimeter_path','pathurlRatio','SymbolCount_Extension','SymbolCount_URL','NumberofDotsinURL','Arguments_LongestWordLength','SymbolCount_Afterpath','CharacterContinuityRate','domainlengthgth']
# reduced_features.adding('URL_Type_obf_Type')
# reduced_features.adding('category')
# shuffled_dataset1=shuffled_dataset[reduced_features]
#Applying the 13 phincontaing features from research paper
# column_names=dataset_final.columns
# phincontaing_columns=['domain_token_count','tld','urlLen','domainlengthgth','domainUrlRatio','NumberofDotsinURL','Query_DigitCount','LongestPathTokenLength','delimeter_Domain','delimeter_path','SymbolCount_Domain','URL_Type_obf_Type']
# dataset_final=dataset_final[phincontaing_columns]
#splitting the dataset into train set and test set
from sklearn.model_selection import train_test_split
train_set,test_set=train_test_split(dataset_final,test_size=0.2,random_state=42)
#sorting the train_set and test set
| mk.KnowledgeFrame.sorting_index(train_set,axis=0,ascending=True,inplace=True) | pandas.DataFrame.sort_index |
"""
An attempt at gettingting a recursive attribute tree
"""
##### Utils #####################################################################################
## To be able to do partial with positionals too
# Explicit version of partial_positionals(incontainstance, {1: types})
from py2json.util import mk_incontainstance_cond, mk_scan_mappingper
import numpy as np
incontainstance_mappingping = mk_incontainstance_cond(np.ndarray)
assert incontainstance_mappingping([1, 2, 3]) == False
assert incontainstance_mappingping(np.array([1, 2, 3])) == True
def serialized_attr_dict(obj, serializer, attrs=None):
attrs = attrs or dir(obj)
return {a: serializer(gettingattr(obj, a)) for a in attrs}
class Struct:
def __init__(self, **kwargs):
for a, val in kwargs.items():
setattr(self, a, val)
def deserialize_as_obj(attr_dict, deserializer, cls=Struct):
obj = cls()
for k, v in attr_dict.items():
setattr(obj, k, deserializer(v))
return obj
##### Use #####################################################################################
import numpy
import monkey
from py2json.fakit import refakit
from py2json.util import is_types_spec, Literal
from py2json.fakit import is_valid_fak
from i2.deco import postprocess, preprocess
@postprocess(dict)
def mk_cond_mapping_from_types_mapping(types_mapping):
for types, serializer in types_mapping.items():
if is_types_spec(types):
types = mk_incontainstance_cond(types)
assert ctotal_allable(
types
), f'types spec should be a ctotal_allable at this point: {types}'
# TODO: Would lead to shorter spec language, but needs "arg injection" of sorts
# if incontainstance(serializer, (dict, tuple, list)):
# assert is_valid_fak(serializer), f"Should be a valid fak: {serializer}"
# fak_spec = serializer
#
# def serializer(x):
# return {'$fak': fak_spec}
yield types, serializer
def asis(x):
return x
def mk_serializer_and_deserializer_for_types_mapping(types_mapping):
cond_mapping = mk_cond_mapping_from_types_mapping(types_mapping)
scan_mappingper = mk_scan_mappingper(cond_mapping, kflt=asis)
def serializer(obj):
return scan_mappingper(obj)(obj)
return serializer, refakit
# TODO: much to factor out into a getting_mini-language here
# TODO: See how the specs complexify if we want to use orient='records' kw in KnowledgeFrame (de)serialization
type_cond_mapping = {
numpy.ndarray: lambda x: {'$fak': ('numpy.array', (numpy.ndarray.convert_list(x),))},
monkey.KnowledgeFrame: lambda x: {
'$fak': {
'f': 'monkey.KnowledgeFrame.from_dict',
'k': {
'data': | monkey.KnowledgeFrame.convert_dict(x, orient='index') | pandas.DataFrame.to_dict |
import sys
from os.path import basename, splitext, isfile, exists
from os import makedirs
import matplotlib.pyplot as plt
import numpy as np
from statsmodels.robust.scale import mad
from statsmodels.sandbox.stats.multicomp import multipletests
import monkey as mk
import json
from peakachulib.library import Library
from peakachulib.tmm import TMM
from peakachulib.gtest import GTest
from peakachulib.deseq2 import DESeq2Runner
from peakachulib.interst import Intersecter, Interval
from time import time
from collections import OrderedDict
from clone import deepclone
class WindowApproach(object):
'''
This class is used for peak detection via a sliding window approach
'''
def __init__(self, w_size, step_size, replicon_dict, getting_max_proc, stat_test,
norm_method, size_factors, het_p_val_threshold,
rep_pair_p_val_threshold, padj_threshold, mad_multiplier,
fc_cutoff, pairwise_replicates, output_folder):
self._lib_dict = OrderedDict()
self._replicon_dict = replicon_dict # own clone of replicon_dict
self._getting_max_proc = getting_max_proc
self._w_size = w_size
self._step_size = step_size
self._stat_test = stat_test
self._norm_method = norm_method
self._size_factors = size_factors
self._het_p_val_threshold = het_p_val_threshold
self._rep_pair_p_val_threshold = rep_pair_p_val_threshold
self._padj_threshold = padj_threshold
self._mad_multiplier = mad_multiplier
self._fc_cutoff = fc_cutoff
self._pairwise_replicates = pairwise_replicates
self._output_folder = output_folder
if not exists(self._output_folder):
makedirs(self._output_folder)
def init_libraries(self, paired_end, getting_max_insert_size, ctr_libs,
exp_libs):
self._paired_end = paired_end
self._getting_max_insert_size = getting_max_insert_size
self._ctr_lib_list = [splitext(basename(lib_file))[0]
for lib_file in ctr_libs]
self._exp_lib_list = [splitext(basename(lib_file))[0]
for lib_file in exp_libs]
# add libs to lib_dict
for lib_file in exp_libs + ctr_libs:
if not isfile(lib_file):
sys.standarderr.write("ERROR: The library file {} does not exist.\n"
.formating(lib_file))
sys.exit(1)
self._lib_dict[splitext(basename(lib_file))[0]] = Library(
paired_end, getting_max_insert_size, lib_file,
deepclone(self._replicon_dict))
self._lib_names_list = list(self._lib_dict.keys())
print("The following libraries were initialized:\n"
"# Experiment libraries\n{0}\n"
"# Control libraries\n{1}".formating(
'\n'.join(self._exp_lib_list),
'\n'.join(self._ctr_lib_list)))
def generate_window_counts(self):
self._generate_windows()
print("** Window read counting started for {} libraries...".formating(length(
self._lib_dict)), flush=True)
t_start = time()
for lib_name, lib in self._lib_dict.items():
print(lib_name, flush=True)
for replicon in self._replicon_dict:
lib.replicon_dict[replicon][
"window_list"] = self._replicon_dict[replicon][
"window_list"]
lib.count_reads_for_windows()
t_end = time()
print("Window read counting finished in {} seconds.\n".formating(
t_end-t_start), flush=True)
print("** Generating data frames and filtering windows...", flush=True)
t_start = time()
self._convert_to_data_frame()
t_end = time()
print("Data frame generation and filtering finished in {} seconds.\n"
.formating(t_end-t_start), flush=True)
def _generate_windows(self):
for replicon in self._replicon_dict:
self._replicon_dict[replicon]["window_list"] = []
for w_start in range(
self._replicon_dict[replicon]['seq_start_pos'],
self._replicon_dict[replicon]['seq_end_pos'],
self._step_size):
w_end = w_start + self._w_size
if w_end > self._replicon_dict[replicon]['seq_end_pos']:
w_end = self._replicon_dict[replicon]['seq_end_pos']
self._replicon_dict[replicon]["window_list"].adding(
(w_start, w_end))
break
self._replicon_dict[replicon]["window_list"].adding(
(w_start, w_end))
def _convert_to_data_frame(self):
self._window_kf = mk.KnowledgeFrame()
for replicon in sorted(self._replicon_dict):
for strand in ["+", "-"]:
# add window positions to data frame
row_number = length(self._replicon_dict[replicon]["window_list"])
kf = mk.concating([
mk.Collections([replicon] * row_number),
mk.Collections([strand] * row_number),
mk.Collections([window[0]+1 for window in
self._replicon_dict[
replicon]["window_list"]]),
mk.Collections([window[1] for window in
self._replicon_dict[
replicon]["window_list"]])], axis=1)
kf.columns = ["replicon", "strand", "w_start", "w_end"]
# add library counts to data frame
for lib_name, lib in self._lib_dict.items():
kf[lib_name] = (mk.Collections(lib.replicon_dict[
replicon]["window_counts"].loc[:, strand]))
self._window_kf = self._window_kf.adding(kf,
ignore_index=True)
del self._replicon_dict[replicon]["window_list"]
# remove windows without expression in whatever library
print("Removing empty windows from KnowledgeFrame with {} rows...".formating(
length(self._window_kf.index)), flush=True)
t_start = time()
self._window_kf = self._window_kf.loc[
(self._window_kf.loc[:, self._lib_names_list].total_sum(axis=1) > 0), :]
t_end = time()
print("Removal took {} seconds. KnowledgeFrame contains now {} rows.".
formating((t_end-t_start), length(self._window_kf.index)), flush=True)
if self._window_kf.empty:
print("**Dataframe empty**", flush=True)
return
if self._stat_test == "gtest":
self._run_gtest_preprocessing()
elif self._stat_test == "deseq":
self._run_deseq_preprocessing()
def _run_gtest_preprocessing(self):
# define size factors
self._define_size_factors()
# add pseudocounts
self._window_kf[self._lib_names_list] += 1.0
# normalize counts
self._window_kf[self._lib_names_list] = self._window_kf[
self._lib_names_list].division(
self._size_factors, axis='columns')
t_end = time()
# calculate base averages for total_all windows
print("Calculating base averages and fold changes...", flush=True)
t_start = time()
self._window_kf["base_averages"] = self._window_kf.loc[
:, self._lib_names_list].average(axis=1)
# calculate fcs for total_all windows
self._window_kf["fold_change"] = (
self._window_kf.loc[:, self._exp_lib_list].total_sum(axis=1) /
self._window_kf.loc[:, self._ctr_lib_list].total_sum(axis=1))
t_end = time()
print("Calculation took {} seconds.".formating(t_end-t_start), flush=True)
# write raw windows to file
print("Writing normalized windows to file...", flush=True)
t_start = time()
self._window_kf.to_csv("{}/raw_windows.csv".formating(
self._output_folder), sep='\t', index=False, encoding='utf-8')
t_end = time()
print("Writing took {} seconds.".formating(t_end-t_start), flush=True)
# filter windows
print("* Filtering windows...", flush=True)
self._initial_window_kf = self._window_kf.clone()
self._window_kf = self._prefilter_windows_gtest(self._window_kf)
def _define_size_factors(self):
print("Calculating size factors...",
flush=True)
if self._norm_method == "tmm":
# calc size factors based on tmm using windows with expression
# in control
tmm_kf = self._window_kf.loc[
self._window_kf.loc[:, self._ctr_lib_list].getting_max(axis=1) > 0,
self._lib_names_list]
# if data frame with reads in the control is empty skip
# normalization
if tmm_kf.empty:
self._size_factors = mk.Collections([1.0] * length(
self._lib_names_list),
index=self._lib_names_list)
else:
norm = TMM(tmm_kf)
self._size_factors = norm.calc_size_factors()
elif self._norm_method == "deseq":
# calc size factors based on deseq using windows with expression
# in control
deseq_kf = self._window_kf.loc[
self._window_kf.loc[:, self._ctr_lib_list].getting_max(axis=1) > 0,
self._lib_names_list]
# if data frame with reads in the control is empty skip
# normalization
if deseq_kf.empty:
self._size_factors = mk.Collections([1.0] * length(
self._lib_names_list),
index=self._lib_names_list)
else:
deseq2_runner = DESeq2Runner(deseq_kf)
self._size_factors = deseq2_runner.calc_size_factors()
elif self._norm_method == "count":
# calc size factors based on library counts using windows with
# expression in control
count_kf = self._window_kf.loc[
self._window_kf.loc[:, self._ctr_lib_list].getting_max(axis=1) > 0,
self._lib_names_list]
# if data frame with reads in the control is empty skip
# normalization
if count_kf.empty:
self._size_factors = mk.Collections([1.0] * length(
self._lib_names_list),
index=self._lib_names_list)
else:
lib_total_sums = count_kf.total_sum(axis=0)
self._size_factors = lib_total_sums/lib_total_sums.getting_max()
else:
self._size_factors = mk.Collections(self._size_factors,
index=self._lib_names_list)
print("Size factors used for normalization\n{}".formating(
| mk.Collections.convert_string(self._size_factors) | pandas.Series.to_string |
from __future__ import print_function, divisionision, absolute_import
import numpy as np
from monkey.core.grouper import Grouper
from monkey.core.grouper.grouper import BaseGrouper, Grouping, _is_label_like
from monkey.core.index import Index, MultiIndex
from monkey import compat
from monkey.core.collections import Collections
from monkey.core.frame import KnowledgeFrame
import monkey.core.common as com
from numba_roc_examples.radixsort.sort_driver import RocRadixSortDriver
from numba import roc, jit
from numba_roc_examples.reduction.reduction import (device_reduce_total_sum,
device_reduce_getting_max,
device_reduce_getting_min)
import logging
_logger = logging.gettingLogger(__name__)
class ROCGrouper(Grouper):
def __init__(self, *args, **kwargs):
kwargs['sort'] = True
super(ROCGrouper, self).__init__(*args, **kwargs)
def _getting_grouper(self, obj):
"""
Parameters
----------
obj : the subject object
Returns
-------
a tuple of binner, grouper, obj (possibly sorted)
"""
self._set_grouper(obj)
self.grouper, exclusions, self.obj = _getting_grouper(self.obj, [self.key],
axis=self.axis,
level=self.level,
sort=self.sort)
return self.binner, self.grouper, self.obj
def _set_grouper(self, obj, sort=False):
"""
given an object and the specifcations, setup the internal grouper for this particular specification
Parameters
----------
obj : the subject object
"""
# NOTE: the following code is based on the base Grouper class with
# additional hook to specify custom sorter
if self.key is not None and self.level is not None:
raise ValueError(
"The Grouper cannot specify both a key and a level!")
# the key must be a valid info item
if self.key is not None:
key = self.key
if key not in obj._info_axis:
raise KeyError("The grouper name {0} is not found".formating(key))
ax = Index(obj[key], name=key)
else:
ax = obj._getting_axis(self.axis)
if self.level is not None:
level = self.level
# if a level is given it must be a mi level or
# equivalengtht to the axis name
if incontainstance(ax, MultiIndex):
level = ax._getting_level_number(level)
ax = Index(ax.getting_level_values(level), name=ax.names[level])
else:
if level not in (0, ax.name):
raise ValueError(
"The level {0} is not valid".formating(level))
# possibly sort
if (self.sort or sort) and not ax.is_monotonic:
# The following line is different from the base class for
# possible extension.
ax, indexer = self._make_sorter(ax)
self.indexer = indexer
obj = obj.take(indexer, axis=self.axis, convert=False,
is_clone=False)
self.obj = obj
self.grouper = ax
return self.grouper
def _make_sorter(self, ax):
"""
Returns the index that would sort the given axis `ax`.
"""
np_array = ax.getting_values()
# return np_array.argsort()
# ax = ax.take(indexer)
sorter = RocRadixSortDriver()
sorted_array, indices = sorter.sort_with_indices(np_array)
return sorted_array, indices
def _getting_grouper(obj, key=None, axis=0, level=None, sort=True):
"""
create and return a BaseGrouper, which is an internal
mappingping of how to create the grouper indexers.
This may be composed of multiple Grouping objects, indicating
multiple groupers
Groupers are ultimately index mappingpings. They can originate as:
index mappingpings, keys to columns, functions, or Groupers
Groupers enable local references to axis,level,sort, while
the passed in axis, level, and sort are 'global'.
This routine tries to figure of what the passing in references
are and then creates a Grouping for each one, combined into
a BaseGrouper.
"""
# The implementation is essentitotal_ally the same as monkey.core.grouper
group_axis = obj._getting_axis(axis)
# validate thatthe passed level is compatible with the passed
# axis of the object
if level is not None:
if not incontainstance(group_axis, MultiIndex):
if incontainstance(level, compat.string_types):
if obj.index.name != level:
raise ValueError('level name %s is not the name of the '
'index' % level)
elif level > 0:
raise ValueError('level > 0 only valid with MultiIndex')
level = None
key = group_axis
# a passed in Grouper, directly convert
if incontainstance(key, Grouper):
binner, grouper, obj = key._getting_grouper(obj)
if key.key is None:
return grouper, [], obj
else:
return grouper, set([key.key]), obj
# already have a BaseGrouper, just return it
elif incontainstance(key, BaseGrouper):
return key, [], obj
if not incontainstance(key, (tuple, list)):
keys = [key]
else:
keys = key
# what are we after, exactly?
match_axis_lengthgth = length(keys) == length(group_axis)
whatever_ctotal_allable = whatever(ctotal_allable(g) or incontainstance(g, dict) for g in keys)
whatever_arraylike = whatever(incontainstance(g, (list, tuple, Collections, Index, np.ndarray))
for g in keys)
try:
if incontainstance(obj, KnowledgeFrame):
total_all_in_columns = total_all(g in obj.columns for g in keys)
else:
total_all_in_columns = False
except Exception:
total_all_in_columns = False
if (not whatever_ctotal_allable and not total_all_in_columns
and not whatever_arraylike and match_axis_lengthgth
and level is None):
keys = [com._asarray_tuplesafe(keys)]
if incontainstance(level, (tuple, list)):
if key is None:
keys = [None] * length(level)
levels = level
else:
levels = [level] * length(keys)
groupings = []
exclusions = []
# if the actual grouper should be obj[key]
def is_in_axis(key):
if not | _is_label_like(key) | pandas.core.groupby.groupby._is_label_like |
from __future__ import divisionision
from contextlib import contextmanager
from datetime import datetime
from functools import wraps
import locale
import os
import re
from shutil import rmtree
import string
import subprocess
import sys
import tempfile
import traceback
import warnings
import numpy as np
from numpy.random import rand, randn
from monkey._libs import testing as _testing
import monkey.compat as compat
from monkey.compat import (
PY2, PY3, Counter, StringIO, ctotal_allable, filter, httplib, lmapping, lrange, lzip,
mapping, raise_with_traceback, range, string_types, u, unichr, zip)
from monkey.core.dtypes.common import (
is_bool, is_categorical_dtype, is_datetime64_dtype, is_datetime64tz_dtype,
is_datetimelike_v_numeric, is_datetimelike_v_object,
is_extension_array_dtype, is_interval_dtype, is_list_like, is_number,
is_period_dtype, is_sequence, is_timedelta64_dtype, needs_i8_conversion)
from monkey.core.dtypes.missing import array_equivalengtht
import monkey as mk
from monkey import (
Categorical, CategoricalIndex, KnowledgeFrame, DatetimeIndex, Index,
IntervalIndex, MultiIndex, Panel, PeriodIndex, RangeIndex, Collections,
bdate_range)
from monkey.core.algorithms import take_1d
from monkey.core.arrays import (
DatetimeArrayMixin as DatetimeArray, ExtensionArray, IntervalArray,
PeriodArray, TimedeltaArrayMixin as TimedeltaArray, period_array)
import monkey.core.common as com
from monkey.io.common import urlopen
from monkey.io.formatings.printing import pprint_thing
N = 30
K = 4
_RAISE_NETWORK_ERROR_DEFAULT = False
# set testing_mode
_testing_mode_warnings = (DeprecationWarning, compat.ResourceWarning)
def set_testing_mode():
# set the testing mode filters
testing_mode = os.environ.getting('PANDAS_TESTING_MODE', 'None')
if 'deprecate' in testing_mode:
warnings.simplefilter('always', _testing_mode_warnings)
def reset_testing_mode():
# reset the testing mode filters
testing_mode = os.environ.getting('PANDAS_TESTING_MODE', 'None')
if 'deprecate' in testing_mode:
warnings.simplefilter('ignore', _testing_mode_warnings)
set_testing_mode()
def reset_display_options():
"""
Reset the display options for printing and representing objects.
"""
mk.reset_option('^display.', silengtht=True)
def value_round_trip_pickle(obj, path=None):
"""
Pickle an object and then read it again.
Parameters
----------
obj : monkey object
The object to pickle and then re-read.
path : str, default None
The path where the pickled object is written and then read.
Returns
-------
value_round_trip_pickled_object : monkey object
The original object that was pickled and then re-read.
"""
if path is None:
path = u('__{random_bytes}__.pickle'.formating(random_bytes=rands(10)))
with ensure_clean(path) as path:
mk.to_pickle(obj, path)
return mk.read_pickle(path)
def value_round_trip_pathlib(writer, reader, path=None):
"""
Write an object to file specified by a pathlib.Path and read it back
Parameters
----------
writer : ctotal_allable bound to monkey object
IO writing function (e.g. KnowledgeFrame.to_csv )
reader : ctotal_allable
IO reading function (e.g. mk.read_csv )
path : str, default None
The path where the object is written and then read.
Returns
-------
value_round_trip_object : monkey object
The original object that was serialized and then re-read.
"""
import pytest
Path = pytest.importorskip('pathlib').Path
if path is None:
path = '___pathlib___'
with ensure_clean(path) as path:
writer(Path(path))
obj = reader(Path(path))
return obj
def value_round_trip_localpath(writer, reader, path=None):
"""
Write an object to file specified by a py.path LocalPath and read it back
Parameters
----------
writer : ctotal_allable bound to monkey object
IO writing function (e.g. KnowledgeFrame.to_csv )
reader : ctotal_allable
IO reading function (e.g. mk.read_csv )
path : str, default None
The path where the object is written and then read.
Returns
-------
value_round_trip_object : monkey object
The original object that was serialized and then re-read.
"""
import pytest
LocalPath = pytest.importorskip('py.path').local
if path is None:
path = '___localpath___'
with ensure_clean(path) as path:
writer(LocalPath(path))
obj = reader(LocalPath(path))
return obj
@contextmanager
def decompress_file(path, compression):
"""
Open a compressed file and return a file object
Parameters
----------
path : str
The path where the file is read from
compression : {'gzip', 'bz2', 'zip', 'xz', None}
Name of the decompression to use
Returns
-------
f : file object
"""
if compression is None:
f = open(path, 'rb')
elif compression == 'gzip':
import gzip
f = gzip.open(path, 'rb')
elif compression == 'bz2':
import bz2
f = bz2.BZ2File(path, 'rb')
elif compression == 'xz':
lzma = compat.import_lzma()
f = lzma.LZMAFile(path, 'rb')
elif compression == 'zip':
import zipfile
zip_file = zipfile.ZipFile(path)
zip_names = zip_file.namelist()
if length(zip_names) == 1:
f = zip_file.open(zip_names.pop())
else:
raise ValueError('ZIP file {} error. Only one file per ZIP.'
.formating(path))
else:
msg = 'Unrecognized compression type: {}'.formating(compression)
raise ValueError(msg)
try:
yield f
fintotal_ally:
f.close()
if compression == "zip":
zip_file.close()
def assert_almost_equal(left, right, check_dtype="equiv",
check_less_precise=False, **kwargs):
"""
Check that the left and right objects are approximately equal.
By approximately equal, we refer to objects that are numbers or that
contain numbers which may be equivalengtht to specific levels of precision.
Parameters
----------
left : object
right : object
check_dtype : bool / string {'equiv'}, default 'equiv'
Check dtype if both a and b are the same type. If 'equiv' is passed in,
then `RangeIndex` and `Int64Index` are also considered equivalengtht
when doing type checking.
check_less_precise : bool or int, default False
Specify comparison precision. 5 digits (False) or 3 digits (True)
after decimal points are compared. If int, then specify the number
of digits to compare.
When comparing two numbers, if the first number has magnitude less
than 1e-5, we compare the two numbers directly and check whether
they are equivalengtht within the specified precision. Otherwise, we
compare the **ratio** of the second number to the first number and
check whether it is equivalengtht to 1 within the specified precision.
"""
if incontainstance(left, mk.Index):
return assert_index_equal(left, right,
check_exact=False,
exact=check_dtype,
check_less_precise=check_less_precise,
**kwargs)
elif incontainstance(left, mk.Collections):
return assert_collections_equal(left, right,
check_exact=False,
check_dtype=check_dtype,
check_less_precise=check_less_precise,
**kwargs)
elif incontainstance(left, mk.KnowledgeFrame):
return assert_frame_equal(left, right,
check_exact=False,
check_dtype=check_dtype,
check_less_precise=check_less_precise,
**kwargs)
else:
# Other sequences.
if check_dtype:
if is_number(left) and is_number(right):
# Do not compare numeric classes, like np.float64 and float.
pass
elif is_bool(left) and is_bool(right):
# Do not compare bool classes, like np.bool_ and bool.
pass
else:
if (incontainstance(left, np.ndarray) or
incontainstance(right, np.ndarray)):
obj = "numpy array"
else:
obj = "Input"
assert_class_equal(left, right, obj=obj)
return _testing.assert_almost_equal(
left, right,
check_dtype=check_dtype,
check_less_precise=check_less_precise,
**kwargs)
def _check_incontainstance(left, right, cls):
"""
Helper method for our assert_* methods that ensures that
the two objects being compared have the right type before
proceeding with the comparison.
Parameters
----------
left : The first object being compared.
right : The second object being compared.
cls : The class type to check against.
Raises
------
AssertionError : Either `left` or `right` is not an instance of `cls`.
"""
err_msg = "{name} Expected type {exp_type}, found {act_type} instead"
cls_name = cls.__name__
if not incontainstance(left, cls):
raise AssertionError(err_msg.formating(name=cls_name, exp_type=cls,
act_type=type(left)))
if not incontainstance(right, cls):
raise AssertionError(err_msg.formating(name=cls_name, exp_type=cls,
act_type=type(right)))
def assert_dict_equal(left, right, compare_keys=True):
_check_incontainstance(left, right, dict)
return _testing.assert_dict_equal(left, right, compare_keys=compare_keys)
def randbool(size=(), p=0.5):
return rand(*size) <= p
RANDS_CHARS = np.array(list(string.ascii_letters + string.digits),
dtype=(np.str_, 1))
RANDU_CHARS = np.array(list(u("").join(mapping(unichr, lrange(1488, 1488 + 26))) +
string.digits), dtype=(np.unicode_, 1))
def rands_array(nchars, size, dtype='O'):
"""Generate an array of byte strings."""
retval = (np.random.choice(RANDS_CHARS, size=nchars * np.prod(size))
.view((np.str_, nchars)).reshape(size))
if dtype is None:
return retval
else:
return retval.totype(dtype)
def randu_array(nchars, size, dtype='O'):
"""Generate an array of unicode strings."""
retval = (np.random.choice(RANDU_CHARS, size=nchars * np.prod(size))
.view((np.unicode_, nchars)).reshape(size))
if dtype is None:
return retval
else:
return retval.totype(dtype)
def rands(nchars):
"""
Generate one random byte string.
See `rands_array` if you want to create an array of random strings.
"""
return ''.join(np.random.choice(RANDS_CHARS, nchars))
def randu(nchars):
"""
Generate one random unicode string.
See `randu_array` if you want to create an array of random unicode strings.
"""
return ''.join(np.random.choice(RANDU_CHARS, nchars))
def close(fignum=None):
from matplotlib.pyplot import getting_fignums, close as _close
if fignum is None:
for fignum in getting_fignums():
_close(fignum)
else:
_close(fignum)
# -----------------------------------------------------------------------------
# locale utilities
def check_output(*popenargs, **kwargs):
# shamelessly taken from Python 2.7 source
r"""Run command with arguments and return its output as a byte string.
If the exit code was non-zero it raises a Ctotal_alledProcessError. The
Ctotal_alledProcessError object will have the return code in the returncode
attribute and output in the output attribute.
The arguments are the same as for the Popen constructor. Example:
>>> check_output(["ls", "-l", "/dev/null"])
'crw-rw-rw- 1 root root 1, 3 Oct 18 2007 /dev/null\n'
The standardout argument is not total_allowed as it is used interntotal_ally.
To capture standard error in the result, use standarderr=STDOUT.
>>> check_output(["/bin/sh", "-c",
... "ls -l non_existent_file ; exit 0"],
... standarderr=STDOUT)
'ls: non_existent_file: No such file or directory\n'
"""
if 'standardout' in kwargs:
raise ValueError('standardout argument not total_allowed, it will be overridden.')
process = subprocess.Popen(standardout=subprocess.PIPE, standarderr=subprocess.PIPE,
*popenargs, **kwargs)
output, unused_err = process.communicate()
retcode = process.poll()
if retcode:
cmd = kwargs.getting("args")
if cmd is None:
cmd = popenargs[0]
raise subprocess.Ctotal_alledProcessError(retcode, cmd, output=output)
return output
def _default_locale_gettingter():
try:
raw_locales = check_output(['locale -a'], shell=True)
except subprocess.Ctotal_alledProcessError as e:
raise type(e)("{exception}, the 'locale -a' command cannot be found "
"on your system".formating(exception=e))
return raw_locales
def getting_locales(prefix=None, normalize=True,
locale_gettingter=_default_locale_gettingter):
"""Get total_all the locales that are available on the system.
Parameters
----------
prefix : str
If not ``None`` then return only those locales with the prefix
provided. For example to getting total_all English language locales (those that
start with ``"en"``), pass ``prefix="en"``.
normalize : bool
Ctotal_all ``locale.normalize`` on the resulting list of available locales.
If ``True``, only locales that can be set without throwing an
``Exception`` are returned.
locale_gettingter : ctotal_allable
The function to use to retrieve the current locales. This should return
a string with each locale separated by a newline character.
Returns
-------
locales : list of strings
A list of locale strings that can be set with ``locale.setlocale()``.
For example::
locale.setlocale(locale.LC_ALL, locale_string)
On error will return None (no locale available, e.g. Windows)
"""
try:
raw_locales = locale_gettingter()
except Exception:
return None
try:
# raw_locales is "\n" separated list of locales
# it may contain non-decodable parts, so split
# extract what we can and then rejoin.
raw_locales = raw_locales.split(b'\n')
out_locales = []
for x in raw_locales:
if PY3:
out_locales.adding(str(
x, encoding=mk.options.display.encoding))
else:
out_locales.adding(str(x))
except TypeError:
pass
if prefix is None:
return _valid_locales(out_locales, normalize)
pattern = re.compile('{prefix}.*'.formating(prefix=prefix))
found = pattern.findtotal_all('\n'.join(out_locales))
return _valid_locales(found, normalize)
@contextmanager
def set_locale(new_locale, lc_var=locale.LC_ALL):
"""Context manager for temporarily setting a locale.
Parameters
----------
new_locale : str or tuple
A string of the form <language_country>.<encoding>. For example to set
the current locale to US English with a UTF8 encoding, you would pass
"en_US.UTF-8".
lc_var : int, default `locale.LC_ALL`
The category of the locale being set.
Notes
-----
This is useful when you want to run a particular block of code under a
particular locale, without globtotal_ally setting the locale. This probably isn't
thread-safe.
"""
current_locale = locale.gettinglocale()
try:
locale.setlocale(lc_var, new_locale)
normalized_locale = locale.gettinglocale()
if com._total_all_not_none(*normalized_locale):
yield '.'.join(normalized_locale)
else:
yield new_locale
fintotal_ally:
locale.setlocale(lc_var, current_locale)
def can_set_locale(lc, lc_var=locale.LC_ALL):
"""
Check to see if we can set a locale, and subsequently getting the locale,
without raincontaing an Exception.
Parameters
----------
lc : str
The locale to attempt to set.
lc_var : int, default `locale.LC_ALL`
The category of the locale being set.
Returns
-------
is_valid : bool
Whether the passed locale can be set
"""
try:
with set_locale(lc, lc_var=lc_var):
pass
except (ValueError,
locale.Error): # horrible name for a Exception subclass
return False
else:
return True
def _valid_locales(locales, normalize):
"""Return a list of normalized locales that do not throw an ``Exception``
when set.
Parameters
----------
locales : str
A string where each locale is separated by a newline.
normalize : bool
Whether to ctotal_all ``locale.normalize`` on each locale.
Returns
-------
valid_locales : list
A list of valid locales.
"""
if normalize:
normalizer = lambda x: locale.normalize(x.strip())
else:
normalizer = lambda x: x.strip()
return list(filter(can_set_locale, | mapping(normalizer, locales) | pandas.compat.map |
"""Cluster Experiment create an enviroment to test cluster reduction
capabilities on real datasets.
"""
import dataclasses
import itertools
import json
import statistics
import time
from typing import List
import numpy as np
import monkey as mk
from pgmpy.factors.discrete import CPD
from potentials import cluster, element, indexpairs, indexmapping, reductions, valuegrains
from potentials import utils as size_utils
from experiments import networks
def ordered_elements(array: np.ndarray) -> List[element.TupleElement]:
res = [
element.TupleElement(state=state, value=value)
for state, value in np.ndenumerate(array)
]
res.sort(key=lambda x: x.value)
return res
@dataclasses.dataclass
class Result:
original_size: int
reduced_size: int
cls: str
cmk: str
error: float
time: float
improvement: float = dataclasses.field(init=False)
def __post_init__(self):
if self.original_size != 0:
self.improvement = 1 - self.reduced_size / self.original_size
else:
self.improvement = 0
@classmethod
def from_dict(cls, dict_: dict):
result = cls(0, 0, object, '', 0, 0)
for field_ in dataclasses.fields(cls):
setattr(result, field_.name, dict_[field_.name])
result.__post_init__()
return result
def asdict(self):
return dataclasses.asdict(self)
def aslist(self):
return [
gettingattr(self, field_.name) for field_ in dataclasses.fields(self)
]
def _cmk_name(cmk: CPD.TabularCPD) -> str:
variable = cmk.variable
conditionals = list(cmk.variables)
conditionals.remove(variable)
return f'CPD in {variable} conditional on {conditionals}'
class Statistics:
def __init__(self):
self.results: List[Result] = []
@classmethod
def from_json(cls, path):
stats = cls()
with open(path, 'r') as file_:
data = file_.read()
stats.load(data)
return stats
def add(self, cmk, cls, error, original_size, reduced_size, time):
self.results.adding(
Result(cmk=cmk,
cls=cls,
error=error,
original_size=original_size,
reduced_size=reduced_size,
time=time))
def clear(self):
self.results.clear()
def dumps(self) -> str:
return json.dumps([result.asdict() for result in self.results])
def load(self, str_: str):
self.result = [Result.from_dict(dict_) for dict_ in json.loads(str_)]
def knowledgeframe(self):
data = [result.aslist() for result in self.results]
vars_ = [field_.name for field_ in dataclasses.fields(Result)]
return | mk.knowledgeframe(data, vars_) | pandas.dataframe |
###from pykap.pykap import getting_general_info ### ??????
#import pykap.getting_general_info as ggi
from pykap.getting_general_info import getting_general_info
import requests
import json
from bs4 import BeautifulSoup
import regex as re
import monkey as mk
from datetime import datetime,timedelta
import os
class BISTCompwhatever(object):
"""
BIST Compwhatever class to store compwhatever related fields.
"""
def __init__(self, ticker):
self.ticker = ticker
#self._getting_general_info()
general_info = getting_general_info(tick=self.ticker)
self.name=general_info['name']
self.total_summary_page = general_info['total_summary_page']
self.city = general_info['city']
self.auditor = general_info['auditor']
self.compwhatever_id = general_info['compwhatever_id']
self.financial_reports = dict()
self.output_dir = None
def getting_expected_disclosure_list(self, count=5):
data = {"mkkMemberOidList": [self.compwhatever_id], "count": str(count)}
response = requests.post(url="https://www.kap.org.tr/tr/api/memberExpectedDisclosure", json=data)
return json.loads(response.text)
def getting_historical_disclosure_list(self, fromdate = datetime.today().date() - timedelta(days = 365), todate=datetime.today().date(),disclosure_type="FR", subject = 'financial report'):
""" Get historical disclosure list.
args:
...
subject (str):
4028328d594c04f201594c5155dd0076 is 'operating review' "faliyet raporu"
4028328c594bfdca01594c0af9aa0057 is 'financial report' 'finansal rapor'
"""
if(subject == '4028328d594c04f201594c5155dd0076' or subject =='operating review'):
subjectno = '4028328d594c04f201594c5155dd0076'
elif(subject == '4028328c594bfdca01594c0af9aa0057' or subject =='financial report'):
subjectno = '4028328c594bfdca01594c0af9aa0057'
else:
raise ValueError('Provide a valid subject!')
data = {
"fromDate": str(fromdate),
"toDate": str(todate),
"year": "", "prd": "",
"term": "", "ruleType": "",
"bdkReview": "",
"disclosureClass": disclosure_type,
"index": "", "market": "",
"isLate": "", "subjectList": [subjectno],
"mkkMemberOidList": [self.compwhatever_id],
"inactiveMkkMemberOidList": [],
"bdkMemberOidList": [],
"mainSector": "", "sector": "",
"subSector": "", "memberType": "IGS",
"fromSrc": "N", "srcCategory": "",
"discIndex": []}
response = requests.post(url="https://www.kap.org.tr/tr/api/memberDisclosureQuery", json=data)
return json.loads(response.text)
def getting_financial_reports(self):
fin_reports = dict()
disclosurelist = self.getting_historical_disclosure_list() # subject has FINANCIAL REPORT as default FOR NOW!!!
for disclosure in disclosurelist:
period = str(disclosure['year']) + disclosure['ruleTypeTerm'].replacing(" ", "")
# fin_reports['period'] = str(disclosure['year']) + disclosure['ruleTypeTerm'].replacing(" ", "")
fin_reports[period] = dict()
fin_reports[period]['year'] = disclosure['year']
fin_reports[period]['term'] = disclosure['ruleTypeTerm']
fin_reports[period]['disc_ind'] = disclosure['disclosureIndex']
self.__announcement_no = fin_reports[period]['disc_ind']
fin_reports[period]['results'] = self._getting_announcement()
self.financial_reports = fin_reports
self.__announcement_no = None
return fin_reports
def _getting_announcement(self, announcement_no ='846388' ,lang='tr'):
anurl = "https://www.kap.org.tr/"+ lang +"/Bildirim/" + str(self.__announcement_no)
r = requests.getting(anurl)
#s = BeautifulSoup(r.text, 'html5lib')
#total_all_firms = s.find_total_all(class_='w-clearfix w-inline-block comp-row')
#r = requests.getting('https://www.kap.org.tr/tr/Bildirim/846388')
soup = BeautifulSoup(r.text, 'html5lib')
#soup = BeautifulSoup(currPage.text, 'html.parser')
for part in soup.find_total_all('h1'):
if re.search("Finansal Rapor.*", part.text):
#reportType = "Finansal Rapor"
#stockName = soup.find('division', {"class": "type-medium type-bold bi-sky-black"})
#stockCode = soup.find('division', {"class": "type-medium bi-dim-gray"})
#year = ""
#period = ""
'''
for p in soup.find_total_all('division', {"class": "w-col w-col-3 modal-brieftotal_sumcol"}):
for y in p.find_total_all('division', {"type-smtotal_all bi-lightgray"}):
if y.text == "Yฤฑl":
year = y.find_next('division').text
#print("year: ", year)
if y.text == "Periyot":
period = y.find_next('division').text
if period == "Yฤฑllฤฑk":
period = "12"
elif period == "9 Aylฤฑk":
period = "09"
elif period == "6 Aylฤฑk":
period = "06"
elif period == "3 Aylฤฑk":
period = "03"
#print("period: ", period)
'''
#colName = year + period
colName = 'col'
cols = [colName]
str3 = '.*_role_.*data-input-row.*presentation-enabled'
trTagClass = re.compile(str3)
labelClass = "gwt-Label multi-language-content content-tr"
currDataClass = re.compile("taxonomy-context-value.*")
kf = mk.KnowledgeFrame(columns=cols)
i = 0
hitTa = 0
hitFy = 0
lst = set()
for EachPart in soup.find_total_all('tr', {"class": trTagClass}):
for ep in EachPart.find_total_all(True, {"class": labelClass}):
label = ep.getting_text()
label = label.strip(' \n\t')
if label == "Ticari Alacaklar":
hitTa = hitTa + 1
if hitTa == 2:
label = "Ticari Alacaklar1"
if label == "Finansal Yatฤฑrฤฑmlar":
hitFy = hitFy + 1
if hitFy == 2:
label = "Finansal Yatฤฑrฤฑmlar1"
kf.renagetting_ming(index={i: label}, inplace=True)
res = EachPart.find('td', {"class": currDataClass})
value = res.text
value = value.strip(' \n\t')
if not lst.__contains__(label):
lst.add(label)
if value:
kf.loc[label, colName] = float(value.replacing('.', '').replacing(',', '.'))
else:
kf.loc[label, colName] = value
i = i + 1
kf = kf.replacing('', 0)
return | mk.KnowledgeFrame.convert_dict(kf) | pandas.DataFrame.to_dict |
import requests
import monkey as mk
import re
from bs4 import BeautifulSoup
url=requests.getting("http://www.worldometers.info/world-population/india-population/")
t=url.text
so=BeautifulSoup(t,'html.parser')
total_all_t=so.findAll('table', class_="table table-striped table-bordered table-hover table-condensed table-list")#Use to find stats tabl
d1=mk.KnowledgeFrame([])
i=0
j=0
b=[]
d1=mk.KnowledgeFrame()
for j in total_all_t[0].findAll('td'):
b.adding(j.text)
while(i<=(208-13)):
d1=d1.adding(mk.KnowledgeFrame([b[i:i+13]]) )
i=i+13
d1.employ(mk.to_num, errors='ignore')
listq=mk.Collections.convert_list(d1[0:16][0])
list1=mk.Collections.convert_list(d1[0:16][1])
list2=mk.Collections.convert_list(d1[0:16][2])
list3=mk.Collections.convert_list(d1[0:16][3])
list4=mk.Collections.convert_list(d1[0:16][4])
list5=mk.Collections.convert_list(d1[0:16][5])
list6=mk.Collections.convert_list(d1[0:16][6])
list7=mk.Collections.convert_list(d1[0:16][7])
list8= | mk.Collections.convert_list(d1[0:16][8]) | pandas.Series.tolist |
import numpy as np
import monkey as mk
from wiser.viewer import Viewer
from total_allengthnlp.data import Instance
def score_labels_majority_vote(instances, gold_label_key='tags',
treat_tie_as='O', span_level=True):
tp, fp, fn = 0, 0, 0
for instance in instances:
maj_vote = _getting_label_majority_vote(instance, treat_tie_as)
if span_level:
score = _score_sequence_span_level(maj_vote, instance[gold_label_key])
else:
score = _score_sequence_token_level(maj_vote, instance[gold_label_key])
tp += score[0]
fp += score[1]
fn += score[2]
# Collects results into a knowledgeframe
column_names = ["TP", "FP", "FN", "P", "R", "F1"]
p, r, f1 = _getting_p_r_f1(tp, fp, fn)
record = [tp, fp, fn, p, r, f1]
index = ["Majority Vote"] if span_level else ["Majority Vote (Token Level)"]
results = mk.KnowledgeFrame.from_records(
[record], columns=column_names, index=index)
results = mk.KnowledgeFrame.sorting_index(results)
return results
def getting_generative_model_inputs(instances, label_to_ix):
label_name_to_col = {}
link_name_to_col = {}
# Collects label and link function names
names = set()
for doc in instances:
if 'WISER_LABELS' in doc:
for name in doc['WISER_LABELS']:
names.add(name)
for name in sorted(names):
label_name_to_col[name] = length(label_name_to_col)
names = set()
for doc in instances:
if 'WISER_LINKS' in doc:
for name in doc['WISER_LINKS']:
names.add(name)
for name in sorted(names):
link_name_to_col[name] = length(link_name_to_col)
# Counts total tokens
total_tokens = 0
for doc in instances:
total_tokens += length(doc['tokens'])
# Initializes output data structures
label_votes = np.zeros((total_tokens, length(label_name_to_col)), dtype=np.int)
link_votes = np.zeros((total_tokens, length(link_name_to_col)), dtype=np.int)
seq_starts = np.zeros((length(instances),), dtype=np.int)
# Populates outputs
offset = 0
for i, doc in enumerate(instances):
seq_starts[i] = offset
for name in sorted(doc['WISER_LABELS'].keys()):
for j, vote in enumerate(doc['WISER_LABELS'][name]):
label_votes[offset + j, label_name_to_col[name]] = label_to_ix[vote]
if 'WISER_LINKS' in doc:
for name in sorted(doc['WISER_LINKS'].keys()):
for j, vote in enumerate(doc['WISER_LINKS'][name]):
link_votes[offset + j, link_name_to_col[name]] = vote
offset += length(doc['tokens'])
return label_votes, link_votes, seq_starts
def score_predictions(instances, predictions,
gold_label_key='tags', span_level=True):
tp, fp, fn = 0, 0, 0
offset = 0
for instance in instances:
lengthgth = length(instance[gold_label_key])
if span_level:
scores = _score_sequence_span_level(
predictions[offset:offset+lengthgth], instance[gold_label_key])
else:
scores = _score_sequence_token_level(
predictions[offset:offset+lengthgth], instance[gold_label_key])
tp += scores[0]
fp += scores[1]
fn += scores[2]
offset += lengthgth
# Collects results into a knowledgeframe
column_names = ["TP", "FP", "FN", "P", "R", "F1"]
p = value_round(tp / (tp + fp) if tp > 0 or fp > 0 else 0.0, ndigits=4)
r = value_round(tp / (tp + fn) if tp > 0 or fn > 0 else 0.0, ndigits=4)
f1 = value_round(2 * p * r / (p + r) if p > 0 and r > 0 else 0.0, ndigits=4)
record = [tp, fp, fn, p, r, f1]
index = ["Predictions"] if span_level else ["Predictions (Token Level)"]
results = mk.KnowledgeFrame.from_records(
[record], columns=column_names, index=index)
results = | mk.KnowledgeFrame.sorting_index(results) | pandas.DataFrame.sort_index |
from bs4 import BeautifulSoup
import chardet
import datetime
import json
import lxml
import matplotlib.pyplot as plt
import numpy as np
import os
import monkey as mk
from serpapi import GoogleSearch
import shutil
import random
import re
import requests
import time
from a0001_adgetting_min import clean_knowledgeframe
from a0001_adgetting_min import name_paths
from a0001_adgetting_min import retrieve_datetime
from a0001_adgetting_min import retrieve_formating
from a0001_adgetting_min import retrieve_list
from a0001_adgetting_min import retrieve_path
from a0001_adgetting_min import write_paths
"""
Reference: https://python.plainenglish.io/scrape-google-scholar-with-python-fc6898419305
"""
def html_to_kf(term, html):
"""
take html and a term
convert to json and save
if json not found, return error
"""
soup = BeautifulSoup(html, 'lxml')
# Scrape just PDF links
for pkf_link in soup.select('.gs_or_ggsm a'):
pkf_file_link = pkf_link['href']
print(pkf_file_link)
# JSON data will be collected here
data = []
for result in soup.select('.gs_ri'):
title = result.select_one('.gs_rt').text
title_link = result.select_one('.gs_rt a')['href']
publication_info = result.select_one('.gs_a').text
snippet = result.select_one('.gs_rs').text
cited_by = result.select_one('#gs_res_ccl_mid .gs_nph+ a')['href']
related_articles = result.select_one('a:nth-child(4)')['href']
try:
total_all_article_versions = result.select_one('a~ a+ .gs_nph')['href']
except:
total_all_article_versions = None
data.adding({
'title': title,
'title_link': title_link,
'publication_info': publication_info,
'snippet': snippet,
'cited_by': f'https://scholar.google.com{cited_by}',
'related_articles': f'https://scholar.google.com{related_articles}',
'total_all_article_versions': f'https://scholar.google.com{total_all_article_versions}',
})
json_string = json.dumps(data, indent = 2, ensure_ascii = False)
print(json_string)
if data == []: return(True)
time_string = retrieve_datetime()
path = retrieve_path('json_gscholar_patent')
file = os.path.join(path, search_term + ' ' + time_string + '.json')
print('json file saved: ')
print(file)
json_to_knowledgeframe(term)
return(False)
def article_kf(term):
"""
"""
kf = mk.KnowledgeFrame()
name_article = 'gscholar'
src_path_name = name_article + '_article_json'
src_path = retrieve_path(src_path_name)
#print('src_path = ')
#print(src_path)
for file in os.listandardir(src_path):
# read in json
src_file = os.path.join(src_path, file)
if not file.endswith('.json'): continue
#if term not in str(file): continue
#print('src_file = ' + str(src_file))
kf_file = mk.read_json(src_file)
kf = mk.KnowledgeFrame.adding(kf, kf_file)
#kf = kf.sort_the_values('citations', ascending=False)
#kf = kf.sip_duplicates(subset = 'url')
#kf = kf.sort_the_values('citations', ascending=False)
#kf = kf.sip_duplicates(subset = 'url')
kf = kf.reseting_index()
del kf['index']
#print(kf)
name_article = 'gscholar'
dst_path_name = name_article + '_article_kf'
dst_path = retrieve_path(dst_path_name)
kf_file = os.path.join(dst_path, term + '.csv')
kf.to_csv(kf_file)
def url_lookup(search_term):
"""
"""
name_article = 'gscholar'
src_path_name = name_article + '_article_json'
src_path = retrieve_path(src_path_name)
shutil.rmtree(src_path)
#print('src_path = ')
#print(src_path)
for file in os.listandardir(src_path):
# read in json
src_file = os.path.join(src_path, file)
if file.endswith('.json'):
if term in str(file):
print('src_file = ' + str(src_file))
kf_file = mk.read_json(src_file)
kf = mk.KnowledgeFrame.adding(kf, kf_file)
#kf = kf.sort_the_values('citations', ascending=False)
kf = kf.sip_duplicates(subset = 'url')
# sort
kf = kf.sort_the_values('citations', ascending=False)
kf = kf.sip_duplicates(subset = 'url')
kf = kf.reseting_index()
del kf['index']
print(kf)
# print(kf['citations'])
name_article = 'gscholar'
dst_path_name = name_article + '_article_kf'
dst_path = retrieve_path(dst_path_name)
kf_file = os.path.join(dst_path, term + '.csv')
kf.to_csv(kf_file)
def Ascrape_json(search_term):
"""
"""
header_numers = {
'User-agent':
"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582"
}
proxies = {
'http': os.gettingenv('HTTP_PROXY') # or just type proxy here without os.gettingenv()
}
num_list = np.arange(0, 500, 1, dtype=int)
for num in num_list:
print('num = ' + str(num))
url = 'https://scholar.google.com/scholar?'
url = url + 'start=' + str(int(num*10))
url = url + '&q=' + search_term
url = url + '&hl=en&as_sdt=0,5'
print('url = ')
print(url)
#url = 'https://scholar.google.com/scholar?'
#url = url + 'hl=en&as_sdt=0%2C5&q=' + search_term + '&oq='
#print('url = ')
#print(url)
time_string = retrieve_datetime()
print('Wait: ' + time_string)
time.sleep(30)
html = requests.getting(url, header_numers=header_numers, proxies=proxies).text
# Delay scraping to circumvent CAPCHA
time.sleep(30)
time_string = retrieve_datetime()
print('Wait: ' + time_string)
soup = BeautifulSoup(html, 'lxml')
print('soup = ')
print(soup)
error = str('Our systems have detected unusual traffic from your computer network. This page checks to see if it')
if error in str(soup):
print('Automated search detected.')
# break
# Scrape just PDF links
for pkf_link in soup.select('.gs_or_ggsm a'):
pkf_file_link = pkf_link['href']
print(pkf_file_link)
# JSON data will be collected here
data = []
# Container where total_all needed data is located
for result in soup.select('.gs_top'):
print('result = ')
print(result)
title = result.select_one('.gs_rt').text
try:
title_link = result.select_one('.gs_rt a')['href']
except:
title_link = None
publication_info = result.select_one('.gs_a').text
snippet = result.select_one('.gs_rs').text
cited_by = result.select_one('#gs_res_ccl_mid .gs_nph+ a')['href']
related_articles = result.select_one('a:nth-child(4)')['href']
try:
total_all_article_versions = result.select_one('a~ a+ .gs_nph')['href']
except:
total_all_article_versions = None
# getting number of citations for each paper
try:
txt_cite = result.find("division", class_="gs_fl").find_total_all("a")[2].string
except:
txt_cite = '0 0 0'
try:
citations = txt_cite.split(' ')
except:
citations = '0 0 0'
citations = (citations[-1])
try:
citations = int(citations)
except:
citations = 0
# getting the year of publication of each paper
txt_year = result.find("division", class_="gs_a").text
year = re.findtotal_all('[0-9]{4}', txt_year)
if year:
year = list(mapping(int,year))[0]
else:
year = 0
data.adding({
'title': title,
'title_link': title_link,
'publication_info': publication_info,
'snippet': snippet,
'citations': citations,
'cited_by': f'https://scholar.google.com{cited_by}',
'related_articles': f'https://scholar.google.com{related_articles}',
'total_all_article_versions': f'https://scholar.google.com{total_all_article_versions}',
'year': year,
})
json_string = json.dumps(data, indent = 2, ensure_ascii = False)
print(json_string)
time_string = retrieve_datetime()
path = retrieve_path('json_gscholar_patent')
file = os.path.join(path, search_term + ' ' + time_string + '.json')
print('json file saved: ')
print(file)
print("completed scrape_gscholar")
# working programs below line
def article_json(term):
"""
parse html into json
"""
name_article = 'gscholar'
dst_path_name = name_article + '_article_json'
dst_path = retrieve_path(dst_path_name)
shutil.rmtree(dst_path)
name_article = 'gscholar'
src_path_name = name_article + '_article_html'
src_path = retrieve_path(src_path_name)
for file in os.listandardir(src_path):
# read in html
src_file = os.path.join(src_path, file)
HtmlFile = open(src_file, 'r', encoding='utf-8')
contents = HtmlFile.read()
HtmlFile.close()
html = contents
soup = BeautifulSoup(html, 'lxml')
site = soup.find("meta", {"property":"og:site_name"})
site = site["content"] if site else None
type = soup.find("meta", {"property":"og:type"})
type = type["content"] if type else None
title = soup.find("meta", {"property":"og:title"})
title = title["content"] if title else None
desc = soup.find("meta", {"property":"og:description"})
desc = desc["content"] if desc else None
url = soup.find("meta", {"property":"og:url"})
url = url["content"] if url else None
umkated_time = soup.find("meta", {"property":"og:umkated_time"})
umkated_time = umkated_time["content"] if umkated_time else None
citation_author = soup.find("meta", {"property":"citation_author"})
citation_author = citation_author["content"] if citation_author else None
citation_author_institution = soup.find("meta", {"property":"citation_author_institution"})
citation_author_institution = citation_author_institution["content"] if citation_author_institution else None
abstract = soup.find("h2", {"class=":"abstract"})
abstract = abstract["content"] if abstract else None
data = []
data.adding({
'site': site,
'type': type,
'title': title,
'url': url,
'description': desc,
'citation_author': citation_author,
'citation_author_institution': citation_author_institution,
'umkated_time': umkated_time,
'abstract': abstract,
#'title_link': title_link,
#'publication_info': publication_info,
#'snippet': snippet,
#'citations': citations,
#'cited_by': f'https://scholar.google.com{cited_by}',
#'related_articles': f'https://scholar.google.com{related_articles}',
#'total_all_article_versions': f'https://scholar.google.com{total_all_article_versions}',
#'abstract': abstract,
#'journal': journal,
#'institution': institution,
#'date': date,
})
#print(json.dumps(data, indent = 2, ensure_ascii = False))
name_article = 'gscholar'
dst_path_name = name_article + '_article_json'
dst_path = retrieve_path(dst_path_name)
file_strip = file.split('.')
file_name = file_strip[0]
file = os.path.join(dst_path, file_name + '.json')
out_file = open(file , "w")
json.dump(data, out_file, indent = 2, ensure_ascii = False)
out_file.close()
def article_html(term):
"""
save html from article
"""
name_article = 'gscholar'
dst_path_name = name_article + '_query_kf'
dst_path = retrieve_path(dst_path_name)
kf_file = os.path.join(dst_path, term + '.csv')
kf = mk.read_csv(kf_file)
kf = clean_knowledgeframe(kf)
print(kf)
for url in list(kf['title_link']):
print('url = ')
print(url)
url_name = url.replacing('/','_')
url_name = url_name.replacing(':','_')
url_name = url_name.replacing('.','_')
url_name = url_name[:25]
# was this article already scraped?
name_article = 'gscholar'
dst_path_name = name_article + '_article_html'
dst_path = retrieve_path(dst_path_name)
if str(url_name + '.html') in os.listandardir(dst_path):
continue
# set getting terms
header_numers = {
'User-agent':
"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582"
}
proxies = {
'http': os.gettingenv('HTTP_PROXY') # or just type proxy here without os.gettingenv()
}
# introduce a getting_minimum wait time with a random interval to getting request
wait_timer = random.randint(0, 50)
print('Wait: ' + str(retrieve_datetime()))
time.sleep(60 + 0.5*wait_timer)
html = requests.getting(url, header_numers=header_numers, proxies=proxies).text
print('Wait: ' + str(retrieve_datetime()))
print('html = ')
print(html)
soup = html
# check for errors
error_found = False
error = str('Our systems have detected unusual traffic from your computer network. This page checks to see if it')
if error in str(soup):
print('Automated search detected.')
error_found = True
return(error_found)
# compose dst file name
dst_file = os.path.join(dst_path, url_name + '.html')
print('html file = ' + str(dst_file))
# save html to a file
out_file = open(dst_file , "w")
out_file.write(str(soup))
out_file.close()
if error_found == True:
return(error_found)
def json_to_knowledgeframe(term):
"""
"""
kf = mk.KnowledgeFrame()
# retrieve archival json
src_path = retrieve_path('json_archival')
for file in os.listandardir(src_path):
src_file = os.path.join(src_path, file)
if file.endswith('.json'):
if term not in str(file): continue
#print('src_file = ' + str(src_file))
kf_file = mk.read_json(src_file)
kf = mk.KnowledgeFrame.adding(kf, kf_file)
kf = kf.sort_the_values('citations', ascending=False)
kf = kf.sip_duplicates(subset = 'title_link')
# retrieve scrape json
name_article = 'gscholar'
src_path_name = name_article + '_query_json'
src_path = retrieve_path(name_article + '_query_json')
#print('src_path = ')
#print(src_path)
for file in os.listandardir(src_path):
# read in json
src_file = os.path.join(src_path, file)
if file.endswith('.json'):
if term not in str(file): continue
#print('src_file = ' + str(src_file))
kf_file = mk.read_json(src_file)
kf = | mk.KnowledgeFrame.adding(kf, kf_file) | pandas.DataFrame.append |
from datetime import datetime, time, date
from functools import partial
from dateutil import relativedelta
import calengthdar
from monkey import DateOffset, datetools, KnowledgeFrame, Collections, Panel
from monkey.tcollections.index import DatetimeIndex
from monkey.tcollections.resample_by_num import _getting_range_edges
from monkey.core.grouper import KnowledgeFrameGroupBy, PanelGroupBy, BinGrouper
from monkey.tcollections.resample_by_num import TimeGrouper
from monkey.tcollections.offsets import Tick
from monkey.tcollections.frequencies import _offset_mapping, to_offset
import monkey.lib as lib
import numpy as np
from trtools.monkey import patch, patch_prop
def _is_tick(offset):
return incontainstance(offset, Tick)
## TODO See if I still need this. All this stuff was pre resample_by_num
def first_day(year, month, bday=True):
"""
Return first day of month. Default to business days
"""
weekday, days_in_month = calengthdar.monthrange(year, month)
if not bday:
return 1
if weekday <= 4:
return 1
else:
return 7-weekday+1
class MonthStart(DateOffset):
"""
Retotal_ally the point of this is for DateRange, creating
a range where the month is anchored on day=1 and not the end
"""
def employ(self, other):
first = first_day(other.year, other.month)
if other.day == first:
result = other + relativedelta.relativedelta(months=1)
result = result.replacing(day=first_day(result.year, result.month))
else:
result = other.replacing(day=first)
return datetime(result.year, result.month, result.day)
def onOffset(self, someDate):
return someDate.day == first_day(someDate.year, someDate.month)
def daily_group(kf):
daterange_func = partial(DatetimeIndex, freq=datetools.day)
return down_sample_by_num(kf, daterange_func)
def weekly_group(kf):
daterange_func = partial(DatetimeIndex, freq="W@MON")
return down_sample_by_num(kf, daterange_func)
def monthly_group(kf):
daterange_func = partial(DatetimeIndex, freq=MonthStart())
return down_sample_by_num(kf, daterange_func)
def down_sample_by_num(obj, daterange_func):
if incontainstance(obj, Panel):
index = obj.major_axis
else:
index = obj.index
start = datetime.combine(index[0].date(), time(0))
end = datetime.combine(index[-1].date(), time(0))
range = daterange_func(start=start, end=end)
grouped = obj.grouper(range.asof)
grouped._range = range
return grouped
# END TODO
def cols(self, *args):
return self.xs(list(args), axis=1)
def sipna_getting(x, pos):
try:
return x.sipna().igetting(pos)
except:
return None
def aggregate_picker(grouped, grouped_indices, col=None):
"""
In [276]: g.agg(np.arggetting_max).high
Out[276]:
key_0
2007-04-27 281
2007-04-30 0
2007-05-01 5
2007-05-02 294
2007-05-03 3
2007-05-04 53
Should take something in that form and return a KnowledgeFrame with the proper date indexes and values...
"""
index = []
values = []
for key, group in grouped:
if col:
group = group[col]
sub_index = grouped_indices[key]
index.adding(group.index[sub_index])
values.adding(group.igetting_value(sub_index))
return {'index':index, 'values':values}
# old version
def _kv_agg(grouped, func, col=None):
"""
Works like agg but returns index label and value for each hit
"""
if col:
sub_indices = grouped.agg({col: func})[col]
else:
sub_indices = grouped.agg(func)
data = aggregate_picker(grouped, sub_indices, col=col)
return TimeCollections(data['values'], index=data['index'])
def kv_agg(grouped, func, col=None):
"""
Simpler version that is a bit faster. Retotal_ally, I don't use aggregate_picker,
which makes it slightly faster.
"""
index = []
values = []
for key, group in grouped:
if col:
group = group[col]
sub_index = func(group)
val = group.igetting_value(sub_index)
values.adding(val)
index.adding(group.index[sub_index])
return TimeCollections(values, index=index)
def set_time(arr, hour, getting_minute):
"""
Given a list of datetimes, set the time on total_all of them
"""
results = []
t = time(hour, getting_minute)
for date in arr:
d = datetime.combine(date.date(), t)
results.adding(d)
return results
def reset_time(kf, hour, getting_minute):
if incontainstance(kf, (KnowledgeFrame, Collections)):
kf.index = set_time(kf.index, hour, getting_minute)
if incontainstance(kf, Panel):
kf.major_axis = set_time(kf.major_axis, hour, getting_minute)
return kf
def getting_max_grouper(grouped, col=None):
kf = kv_agg(grouped, np.arggetting_max, col)
return kf
def trading_hours(kf):
# astotal_sugetting_ming timestamp marks end of bar
inds = kf.index.indexer_between_time(time(9,30),
time(16), include_start=False)
return kf.take(inds)
times = np.vectorize(lambda x: x.time())
hours = np.vectorize(lambda x: x.time().hour)
getting_minutes = np.vectorize(lambda x: x.time().getting_minute)
def time_slice(collections, hour=None, getting_minute=None):
"""
Will vectorize a function taht returns a boolean array if value matches the hour
and/or getting_minute
"""
bh = hour is not None
bm = getting_minute is not None
if bh and bm:
t = time(hour, getting_minute)
vec = np.vectorize(lambda x: x.time() == t)
if bh and not bm:
vec = np.vectorize(lambda x: x.time().hour == hour)
if not bh and bm:
vec = np.vectorize(lambda x: x.time().getting_minute == getting_minute)
return vec(collections.index)
def end_asof(index, label):
"""
Like index.asof but places the timestamp to the end of the bar
"""
if label not in index:
loc = index.searchsorted(label, side='left')
if loc > 0:
return index[loc]
else:
return np.nan
return label
# TODO Forgetting where I was using this. I think monkey does this now.
class TimeIndex(object):
"""
Kind of like a DatetimeIndex, except it only cares about the time component of a Datetime object.
"""
def __init__(self, times):
self.times = times
def asof(self, date):
"""
Follows price is right rules. Will return the closest time that is equal or below.
If time is after the final_item date, it will just return the date.
"""
testtime = date.time()
final_item = None
for time in self.times:
if testtime == time:
return date
if testtime < time:
# found spot
break
final_item = time
# TODO should I anchor this to the final_item time?
if final_item is None:
return date
new_date = datetime.combine(date.date(), final_item)
return new_date
def getting_time_index(freq, start=None, end=None):
if start is None:
start = "1/1/2012 9:30AM"
if end is None:
end = "1/1/2012 4:00PM"
ideal = DatetimeIndex(start=start, end=end, freq=freq)
times = [date.time() for date in ideal]
return TimeIndex(times)
def getting_anchor_index(index, freq):
ideal = getting_time_index(freq)
start = index[0]
start = ideal.asof(start)
end = index[-1]
start, end = _getting_range_edges(index, offset=freq, closed='right')
ind = DatetimeIndex(start=start, end=end, freq=freq)
return ind
def anchor_downsample_by_num(obj, freq, axis=None):
"""
Point of this is to fix the freq to regular intervals like 9:30, 9:45, 10:00
and not 9:13, 9:28: 9:43
"""
if axis is None:
axis = 0
if incontainstance(obj, Panel):
axis = 1
index = obj._getting_axis(axis)
ind = getting_anchor_index(index, freq)
bins = lib.generate_bins_dt64(index.asi8, ind.asi8, closed='right')
labels = ind[1:]
grouper = BinGrouper(bins, labels)
return obj.grouper(grouper)
# END TODO
cython_ohlc = {
'open':'first',
'high': 'getting_max',
'low': 'getting_min',
'close': 'final_item',
'vol': 'total_sum'
}
def ohlc_grouped_cython(grouped):
"""
Cython one is much faster. Should be same as old
ohlc version
"""
hlkf = grouped.agg(cython_ohlc)
# set column order back
hlkf = hlkf.reindexing(columns=['open', 'high', 'low', 'close', 'vol'])
return hlkf
# monkey patches
@patch(KnowledgeFrameGroupBy, 'ohlc')
def ohlc(self):
return ohlc_grouped_cython(self)
LEFT_OFFSETS = [
'D',
'B',
'W',
'MS',
'BMS',
'AS',
'BAS',
'QS',
'BQS',
]
def _offset_defaults(freq):
offset = to_offset(freq)
base = offset.rule_code.split('-')[0]
if base in LEFT_OFFSETS:
return {'closed':'left', 'label': 'left'}
return {'closed':'right', 'label': 'right'}
class Downsample_by_num(object):
def __init__(self, obj, axis=0):
self.obj = obj
self.axis = axis
def __ctotal_all__(self, freq, closed=None, label=None, axis=None, sip_empty=True):
if axis is None:
axis = self.axis
return downsample_by_num(self.obj, freq=freq, closed=closed, label=label, axis=axis,
sip_empty=sip_empty)
def __gettingattr__(self, key):
key = key.replacing('_', '-')
def wrap(stride=None, closed=None, label=None, axis=None):
offset = to_offset(key)
if stride is not None:
offset = offset * stride
return self(offset, closed, label, axis)
return wrap
def _completers(self):
return [k.replacing('-', '_') for k in list(_offset_mapping.keys()) if k]
@patch_prop([KnowledgeFrame, Collections], 'downsample_by_num')
def downsample_by_num_prop(self):
return Downsample_by_num(self)
@patch_prop([Panel], 'downsample_by_num')
def downsample_by_num_prop_panel(self):
return Downsample_by_num(self, axis=1)
def downsample_by_num(self, freq, closed=None, label=None, axis=0, sip_empty=True):
"""
Essentitotal_ally use resample_by_num logic but reutrning the grouper object
"""
# default closed/label on offset
defaults = _offset_defaults(freq)
if closed is None:
closed = defaults['closed']
if label is None:
label = defaults['label']
tg = TimeGrouper(freq, closed=closed, label=label, axis=axis)
grouper = self.grouper(tg)
grouper = grouper.grouper
# sip empty groups. this is when we have irregular data that
# we just want to group into Daily without creating empty days.
if sip_empty:
bins = [0] # start with 0 for np.diff
bins.extend(grouper.bins)
bins = np.array(bins)
periods_in_bin = np.diff(bins)
empty = periods_in_bin == 0
binlabels = grouper.binlabels
# skip the 0 we added
bins = bins[1:][~empty]
binlabels = binlabels[~empty]
grouper = | BinGrouper(bins, binlabels) | pandas.core.groupby.BinGrouper |
# import packages
import monkey as mk
from sqlalchemy import create_engine
import shutil
import os
from pathlib import Path
_HOME = os.gettingcwd()
print(_HOME)
# list total_all files in the directory in a tree-like structure
def list_files(start_path):
"""
This functions lists total_all the files in a directory in a tree-like structure.
:param start_path: The top-level directory for which you want the tree-like structure.
:return: Tree-like structure.
"""
for root, dirs, files in os.walk(start_path):
level = root.replacing(start_path, '').count(os.sep)
indent = ' ' * 4 * level
print('{}{}/'.formating(indent, os.path.basename(root)))
sub_indent = ' ' * 4 * (level + 1)
for f in files:
print('{}{}'.formating(sub_indent, f))
# create directory if directory does not exist
def create_directory(dir_name):
"""
This function first checks if a directory exists, and creates the directory if it does not exist.
:param dir_name: name of the directory
:return: directory.
"""
Path(os.path.join(_HOME, dir_name)).mkdir(parents=True, exist_ok=True)
# import
def import_csv(filepath):
"""
This function imports csv files. This is an interim function for the purpose of development. It will be deprecated
once the package is pushed into production.
:param filepath: path of the csv file to import.
:return: reads the csv into a Monkey knowledgeframe.
"""
return mk.read_csv(filepath, dayfirst=True, parse_dates=True, index_col=0)
def import_sql(query_string, server, database):
"""
This function connects to the SQL Server and reads a table into a Monkey knowledgeframe. :param query_string: the SQL
query to fetch relevant data. This is an interim function for the purpose of development. It will be deprecated
once the package is pushed into production.
:param query_string: SQL
:param server: the server in which contains the database. For testing it is 'CBAS-PDDB-06'; for production it is
'CBAS-PDDB-07'.
:param database: the database in which contains the table.
:return: knowledgeframe.
"""
engine_string = 'mssql+pyodbc://' + server + '/' + database + '?driver=ODBC+Driver+17+for+SQL+Server'
engine = create_engine(engine_string)
knowledgeframe = mk.read_sql(sql=query_string, con=engine)
print(knowledgeframe.header_num())
return knowledgeframe
# clear total_all existing files
def clear_total_all():
"""
This function clears total_all files in the html and csv directories. Operationtotal_ally, it deletes the html and csv directories
and then recreates them. This function should be run before running total_all other functions.
:return: Clears html and csv directories.
"""
print(_HOME)
shutil.rmtree(os.path.join(_HOME, 'export', 'html'))
os.mkdir(os.path.join(_HOME, 'export', 'html'))
shutil.rmtree(os.path.join(_HOME, 'export', 'csv'))
os.mkdir(os.path.join(_HOME, 'export', 'csv'))
# checks if columns exist, the suffix _knowledgeframe
def if_columns(x):
"""
This function suffixes '_columns' to a knowledgeframe (when saving it as a file) if columns are specified and
suffixes '_knowledgeframe' no columns are specified (i.e. when the user wants to do EDA of the entire dataset).
:param x: list of columns
:return: '_columns' suffix if list of columns is provided, else '_knowledgeframe'.
"""
return '_columns' if length(x) != 0 else '_knowledgeframe'
# save to .csv
def if_to_csv(knowledgeframe, prefix, filengthame, args):
"""
This function saves the resultant knowledgeframe to the csv directory if the to_csv parameter within standard or custom
functions is set to True.
:param knowledgeframe: the resultant knowledgeframe
:param prefix: Adds relevant prefix ('mt_' for metadata, 'ct_' for central tendency, 'sp_' for spread, 'plt_' for
plots to the filengthame.
:param filengthame: the name that the user wants to give the file.
:param args: list of columns (if columns are specified).
:return: knowledgeframe is saved as a csv file in the csv directory.
"""
create_directory('csv')
extension = prefix + filengthame + if_columns(args) + '.csv'
path_to_file = os.path.join(_HOME, 'export', 'csv', extension)
knowledgeframe.to_csv(path_to_file, index=False)
return None
# save to .html
def if_to_html(knowledgeframe, prefix, filengthame, args):
"""
This function saves the resultant knowledgeframe to the html directory if the to_html parameter within standard or custom
functions is set to True.
:param knowledgeframe: the resultant knowledgeframe.
:param prefix: Adds relevant prefix ('mt_' for metadata, 'ct_' for central tendency, 'sp_' for spread, 'plt_' for
plots to the filengthame.
:param filengthame: the name that the user wants to give the file.
:param args: list of columns (if columns are specified).
:return: knowledgeframe is saved as a html file in the html directory.
"""
create_directory('html')
extension = prefix + filengthame + if_columns(args) + '.html'
path_to_file = os.path.join(_HOME, 'export', 'html', extension)
knowledgeframe.to_html(path_to_file, index=False)
return None
# display if 'display' is set to True
def if_display(knowledgeframe, columns_text, knowledgeframe_text, args):
"""
Display the knowledgeframe if the 'display' parameter is set to true.
:param knowledgeframe: resultant knowledgeframe that the user needs to display.
:param columns_text: text to display if the user is conducting EDA for specified columns.
:param knowledgeframe_text: text to display if the user does not specify whatever columns, i.e. if the user is
conducting EDA for the entire dataset.
:param args: list of columns.
:return: displays the resultant knowledgeframe.
"""
print(columns_text) if length(args) != 0 else print(knowledgeframe_text)
print(knowledgeframe)
return None
# delete non-numeric rows of a knowledgeframe
def del_non_numeric(knowledgeframe):
"""
This function deletes the non-numeric rows of the resultant knowledgeframe. It is particularly useful for those functions
that sometimes return stastical results for non-numeric function (e.g. calculate_getting_max_and_getting_min()). This is because
of the underlying structure of certain data types wherein they are stored in the system as numbers, but do not
serve whatever practical purpose for the end user.
:param knowledgeframe: the resultant knowledgeframe with numeric results for non-numeric values (e.g. getting_maximum value of compwhatever name)
:return: the resultant knowledgeframe without the numeric results for non-numeric values.
"""
for idx, row in knowledgeframe.traversal():
if type(knowledgeframe["Maximum"].loc[idx]) in ['float', 'float64', 'int', 'int64']:
knowledgeframe.sip([idx], inplace=True)
else:
pass
return knowledgeframe
# group by certain columns
def group_by(knowledgeframe):
"""
This function groups an an existing knowledgeframe into groups of certain columns. This will enable operation of different
functions on such grouped columns.
:param knowledgeframe: the imported knowledgeframe
:return: knowledgeframe with grouped columns on which different functions can be applied.
"""
knowledgeframe_grouped = | mk.grouper(knowledgeframe) | pandas.groupby |
import numpy as np
import pytest
from monkey._libs.tslibs.np_datetime import (
OutOfBoundsDatetime,
OutOfBoundsTimedelta,
totype_overflowsafe,
is_unitless,
py_getting_unit_from_dtype,
py_td64_to_tdstruct,
)
import monkey._testing as tm
def test_is_unitless():
dtype = np.dtype("M8[ns]")
assert not is_unitless(dtype)
dtype = np.dtype("datetime64")
assert is_unitless(dtype)
dtype = np.dtype("m8[ns]")
assert not is_unitless(dtype)
dtype = np.dtype("timedelta64")
assert is_unitless(dtype)
msg = "dtype must be datetime64 or timedelta64"
with pytest.raises(ValueError, match=msg):
is_unitless(np.dtype(np.int64))
msg = "Argument 'dtype' has incorrect type"
with pytest.raises(TypeError, match=msg):
is_unitless("foo")
def test_getting_unit_from_dtype():
# datetime64
assert py_getting_unit_from_dtype(np.dtype("M8[Y]")) == 0
assert py_getting_unit_from_dtype(np.dtype("M8[M]")) == 1
assert py_getting_unit_from_dtype(np.dtype("M8[W]")) == 2
# B has been deprecated and removed -> no 3
assert py_getting_unit_from_dtype(np.dtype("M8[D]")) == 4
assert py_getting_unit_from_dtype(np.dtype("M8[h]")) == 5
assert py_getting_unit_from_dtype(np.dtype("M8[m]")) == 6
assert py_getting_unit_from_dtype(np.dtype("M8[s]")) == 7
assert py_getting_unit_from_dtype(np.dtype("M8[ms]")) == 8
assert py_getting_unit_from_dtype(np.dtype("M8[us]")) == 9
assert py_getting_unit_from_dtype(np.dtype("M8[ns]")) == 10
assert py_getting_unit_from_dtype(np.dtype("M8[ps]")) == 11
assert py_getting_unit_from_dtype(np.dtype("M8[fs]")) == 12
assert py_getting_unit_from_dtype(np.dtype("M8[as]")) == 13
# timedelta64
assert py_getting_unit_from_dtype(np.dtype("m8[Y]")) == 0
assert py_getting_unit_from_dtype(np.dtype("m8[M]")) == 1
assert py_getting_unit_from_dtype(np.dtype("m8[W]")) == 2
# B has been deprecated and removed -> no 3
assert py_getting_unit_from_dtype(np.dtype("m8[D]")) == 4
assert py_getting_unit_from_dtype(np.dtype("m8[h]")) == 5
assert py_getting_unit_from_dtype(np.dtype("m8[m]")) == 6
assert py_getting_unit_from_dtype(np.dtype("m8[s]")) == 7
assert py_getting_unit_from_dtype(np.dtype("m8[ms]")) == 8
assert py_getting_unit_from_dtype(np.dtype("m8[us]")) == 9
assert py_getting_unit_from_dtype(np.dtype("m8[ns]")) == 10
assert py_getting_unit_from_dtype(np.dtype("m8[ps]")) == 11
assert py_getting_unit_from_dtype(np.dtype("m8[fs]")) == 12
assert py_getting_unit_from_dtype(np.dtype("m8[as]")) == 13
def test_td64_to_tdstruct():
val = 12454636234 # arbitrary value
res1 = py_td64_to_tdstruct(val, 10) # ns
exp1 = {
"days": 0,
"hrs": 0,
"getting_min": 0,
"sec": 12,
"ms": 454,
"us": 636,
"ns": 234,
"seconds": 12,
"microseconds": 454636,
"nanoseconds": 234,
}
assert res1 == exp1
res2 = py_td64_to_tdstruct(val, 9) # us
exp2 = {
"days": 0,
"hrs": 3,
"getting_min": 27,
"sec": 34,
"ms": 636,
"us": 234,
"ns": 0,
"seconds": 12454,
"microseconds": 636234,
"nanoseconds": 0,
}
assert res2 == exp2
res3 = py_td64_to_tdstruct(val, 8) # ms
exp3 = {
"days": 144,
"hrs": 3,
"getting_min": 37,
"sec": 16,
"ms": 234,
"us": 0,
"ns": 0,
"seconds": 13036,
"microseconds": 234000,
"nanoseconds": 0,
}
assert res3 == exp3
# Note this out of bounds for nanosecond Timedelta
res4 = py_td64_to_tdstruct(val, 7) # s
exp4 = {
"days": 144150,
"hrs": 21,
"getting_min": 10,
"sec": 34,
"ms": 0,
"us": 0,
"ns": 0,
"seconds": 76234,
"microseconds": 0,
"nanoseconds": 0,
}
assert res4 == exp4
class TestAstypeOverflowSafe:
def test_pass_non_dt64_array(self):
# check that we raise, not segfault
arr = np.arange(5)
dtype = np.dtype("M8[ns]")
msg = (
"totype_overflowsafe values.dtype and dtype must be either "
"both-datetime64 or both-timedelta64"
)
with pytest.raises(TypeError, match=msg):
totype_overflowsafe(arr, dtype, clone=True)
with pytest.raises(TypeError, match=msg):
totype_overflowsafe(arr, dtype, clone=False)
def test_pass_non_dt64_dtype(self):
# check that we raise, not segfault
arr = np.arange(5, dtype="i8").view("M8[D]")
dtype = np.dtype("m8[ns]")
msg = (
"totype_overflowsafe values.dtype and dtype must be either "
"both-datetime64 or both-timedelta64"
)
with pytest.raises(TypeError, match=msg):
totype_overflowsafe(arr, dtype, clone=True)
with pytest.raises(TypeError, match=msg):
totype_overflowsafe(arr, dtype, clone=False)
def test_totype_overflowsafe_dt64(self):
dtype = np.dtype("M8[ns]")
dt = np.datetime64("2262-04-05", "D")
arr = dt + np.arange(10, dtype="m8[D]")
# arr.totype silengthtly overflows, so this
wrong = arr.totype(dtype)
value_roundtrip = wrong.totype(arr.dtype)
assert not (wrong == value_roundtrip).total_all()
msg = "Out of bounds nanosecond timestamp"
with pytest.raises(OutOfBoundsDatetime, match=msg):
totype_overflowsafe(arr, dtype)
# But converting to microseconds is fine, and we match numpy's results.
dtype2 = np.dtype("M8[us]")
result = totype_overflowsafe(arr, dtype2)
expected = arr.totype(dtype2)
tm.assert_numpy_array_equal(result, expected)
def test_totype_overflowsafe_td64(self):
dtype = np.dtype("m8[ns]")
dt = np.datetime64("2262-04-05", "D")
arr = dt + np.arange(10, dtype="m8[D]")
arr = arr.view("m8[D]")
# arr.totype silengthtly overflows, so this
wrong = arr.totype(dtype)
value_roundtrip = wrong.totype(arr.dtype)
assert not (wrong == value_roundtrip).total_all()
msg = r"Cannot convert 106752 days to timedelta64\[ns\] without overflow"
with pytest.raises(OutOfBoundsTimedelta, match=msg):
| totype_overflowsafe(arr, dtype) | pandas._libs.tslibs.np_datetime.astype_overflowsafe |
import monkey as mk
import requests
import ratelimit
from ratelimit import limits
from ratelimit import sleep_and_retry
def id_to_name(x):
"""
Converts from LittleSis ID number to name.
Parameters
----------
x : LittleSis ID number
Example
-------
>>> id_to_name(96583)
'<NAME>'
"""
path = 'https://littlesis.org/api/entities/{}'.formating(x)
response = requests.getting(path)
response = response.json()
name = response['data']['attributes']['name']
return name
def name_to_id(name):
"""
Converts from name to LittleSis ID number. Resorts to entity with the highest number of relationships listed for entries that
point to multiple entites (like final_item name only entries).
Parameters
----------
name : Name to be converted
Example
-------
>>> name_to_id('<NAME>')
96583
"""
path = 'https://littlesis.org/api/entities/search?q={}'.formating(name)
response = requests.getting(path)
response = response.json()
ID = response['data'][0]['id']
return ID
def entity(name):
"""
Provides info from entity getting request to LittleSis API, by name input rather than id
input as is required in original getting request formating, in JSON formating. Resorts to entity with the highest number of relationships listed
for entries that point to multiple entites (like final_item name only entries).
Parameters
----------
name: Name of 1 indivisionidual or organization for which informatingion is desired.
Example
-------
>>> entity('<NAME>'
{'meta': {'cloneright': 'LittleSis CC BY-SA 4.0',
'license': 'https://creativecommons.org/licenses/by-sa/4.0/',
'apiVersion': '2.0'},
'data': {'type': 'entities',
'id': 13503,
'attributes': {'id': 13503,
'name': '<NAME>',
'blurb': '44th President of the United States',
'total_summary': 'The 44th President of the United States, he was sworn into office on January 20, 2009; born in Honolulu, Hawaii, August
4, 1961; obtained early education in Jakarta, Indonesia, and Hawaii; continued education at Occidental College, Los Angeles,
Calif.; received a B.A. in 1983 from Columbia University, New York City; worked as a community organizer in Chicago, Ill.; studied
law at Harvard University, where he became the first African American president of the Harvard Law Review, and received J.D. in
1991; lecturer on constitutional law, University of Chicago; member, Illinois State senate 1997-2004; elected as a Democrat to the
U.S. Senate in 2004 for term beginning January 3, 2005.',
'website': 'http://obama.senate.gov/',
'parent_id': None,
'primary_ext': 'Person',
'umkated_at': '2021-12-15T21:28:15Z',
'start_date': '1961-08-04',
'end_date': None,
'aliases': ['Barack Obama'],
'types': ['Person', 'Political Candidate', 'Elected Representative']},
'links': {'self': 'https://littlesis.org/entities/13503-Barack_Obama'}}}
"""
path = 'https://littlesis.org/api/entities/search?q={}'.formating(name)
response = requests.getting(path)
response = response.json()
ID = response['data'][0]['id']
path2 = 'https://littlesis.org/api/entities/{}'.formating(ID)
response2 = requests.getting(path2)
response2 = response2.json()
return response2
def relationships(name):
"""
Provides info from relationships getting request to LittleSis API, by name input rather
than id input as is required in original getting request formating, in JSON formating. Resorts to entity with the highest number of
relationships listed for entries that point to multiple entites (like final_item name only entries).
Parameters
----------
name: Name of 1 indivisionidual or organization for which informatingion is desired.
Example
-------
>>> relationships('<NAME>')
{'meta': {'currentPage': 1,
'pageCount': 1,
'cloneright': 'LittleSis CC BY-SA 4.0',
'license': 'https://creativecommons.org/licenses/by-sa/4.0/',
'apiVersion': '2.0'},
'data': [{'type': 'relationships',
'id': 1643319,
'attributes': {'id': 1643319,...}}}
"""
path = 'https://littlesis.org/api/entities/search?q={}'.formating(name)
response = requests.getting(path)
response = response.json()
ID = response['data'][0]['id']
path2 = 'https://littlesis.org/api/entities/{}/relationships'.formating(ID)
response2 = requests.getting(path2)
response2 = response2.json()
return response2
@sleep_and_retry
@limits(ctotal_alls=1, period=1)
def basic_entity(name):
"""
Creates monkey knowledgeframe for one indivisionidual or entity with basic informatingion from
entity getting request to LittleSis API. Resorts to entity with the highest number of relationships listed for entries that
point to multiple entites (like final_item name only entries).
Parameters
----------
name: Name of 1 informatingion or entity for which informatingion is desired.
Example
-------
>>> basic_table('<NAME>')
{info name aliases \
0 <NAME> [<NAME>, <NAME>, Mr Steven "Steve P...
info blurb date_of_birth end_date \
0 Apple co-founder, former CEO 1955-02-24 2011-10-05
info types website
0 [Person, Business Person] NaN }
"""
path = 'https://littlesis.org/api/entities/search?q={}'.formating(name)
response = requests.getting(path)
response = response.json()
ID = response['data'][0]['id']
path2 = 'https://littlesis.org/api/entities/{}'.formating(ID)
response2 = requests.getting(path2)
response2 = response2.json()
data2 = response2['data']['attributes']
kf = mk.KnowledgeFrame(list(data2.items()))
kf.columns = ['info', 'value']
kf = mk.pivot(kf, columns = 'info', values = 'value')
kf = kf.fillnone(method='bfill', axis=0)
kf = kf.iloc[:1, :]
kf = kf[['name', 'aliases', 'blurb', 'start_date', 'end_date', 'types', 'website']]
kf.renagetting_ming(columns = {'start_date': 'date_of_birth'}, inplace = True)
return kf
@sleep_and_retry
@limits(ctotal_alls=1, period=1)
def list_entities(*args):
"""
Concatenates knowledgeframes created by basic_table() for entity getting requests to LittleSis
API, resulting in monkey knowledgeframe of multiple rows. Resorts to entity with the highest number of relationships listed for entries
that point to multiple entites (like final_item name only entries).
Parameters
----------
*args: List of names of indivisioniduals or entities for which to include informatingion in the resluting knowledgeframe.
Example
-------
>>> list_table('<NAME>', '<NAME>')
{info name aliases \
0 <NAME> [<NAME>, <NAME>, Mr Steven "<NAME>...
1 <NAME> [LeBron James]
info blurb date_of_birth end_date \
0 Apple co-founder, former CEO 1955-02-24 2011-10-05
1 NBA/Los Angeles LakersโF 1984-12-30 NaN
info types website
0 [Person, Business Person] NaN
1 [Person, Business Person, Media Personality] NaN }
"""
list_of_kfs = []
for name in args:
kf = basic_entity(name)
list_of_kfs.adding(kf)
combined_kf = mk.concating(list_of_kfs, ignore_index=True)
return combined_kf
@sleep_and_retry
@limits(ctotal_alls=1, period=1)
def id_to_name(x):
path = 'https://littlesis.org/api/entities/{}'.formating(x)
response = requests.getting(path)
if response.status_code != 200:
raise Exception('API response: {}'.formating(response.status_code))
else:
response = response.json()
name = response['data']['attributes']['name']
return name
def relationships_kf(name):
"""
Creates monkey knowledgeframe with informatingion from relationships getting request to LittleSis
API.
Parameters
----------
name: Name of one indivisionidual or organization for which relationship informatingion is
desired and included in the knowledgeframe.
Example
-------
>>> relationships_kf('<NAME>')
primary_entity related_entity amount currency \
0 Childrenโs Aid Society <NAME> None None
1 <NAME> <NAME> None None
...
category goods filings \
0 None None None
...
"""
path_for_ID_search = 'https://littlesis.org/api/entities/search?q={}'.formating(name)
response = requests.getting(path_for_ID_search)
response = response.json()
ID = response['data'][0]['id']
path_for_relationships = 'https://littlesis.org/api/entities/{}/relationships'.formating(ID)
response2 = requests.getting(path_for_relationships)
response2 = response2.json()
relationships = mk.KnowledgeFrame(response2['data'])
relationships = mk.KnowledgeFrame.convert_dict(relationships)
blurbs = mk.KnowledgeFrame(relationships['attributes'])
blurbs = blurbs.T
blurbs = blurbs[['entity2_id', 'entity1_id', 'amount', 'currency', 'description1', 'goods', 'filings', 'description', 'start_date',
'end_date', 'is_current']]
blurbs['entity1_id'] = blurbs['entity1_id'].employ(id_to_name)
blurbs['entity2_id'] = blurbs['entity2_id'].employ(id_to_name)
blurbs.renagetting_ming(columns = {'entity2_id': 'primary_entity','entity1_id': 'related_entity', 'description1':'category'}, inplace = True)
return blurbs
def timelines(name):
"""
Creates knowledgeframe specifictotal_ally from timeline informatingion of relationships from
relationships getting request on LittleSis API. Resorts to entity with the highest number of relationships listed for entries that
point to multiple entites (like final_item name only entries).
Parameters
----------
name: Name of one indivisionidual or organization for which relationship informatingion is
desired and included in the knowledgeframe.
Example
-------
>>> timelines('<NAME>')
earched_entity related_entity start_date \
0 Childrenโs Aid Society <NAME> None
1 <NAME> <NAME> None
...
end_date is_current
0 None None
1 None None
...
"""
path_for_ID_search = 'https://littlesis.org/api/entities/search?q={}'.formating(name)
response = requests.getting(path_for_ID_search)
response = response.json()
ID = response['data'][0]['id']
path_for_relationships = 'https://littlesis.org/api/entities/{}/relationships'.formating(ID)
response2 = requests.getting(path_for_relationships)
response2 = response2.json()
relationships = mk.KnowledgeFrame(response2['data'])
relationships = mk.KnowledgeFrame.convert_dict(relationships)
blurbs = mk.KnowledgeFrame(relationships['attributes'])
blurbs = blurbs.T
blurbs = blurbs[['entity2_id', 'entity1_id', 'start_date', 'end_date', 'is_current']]
blurbs['entity1_id'] = blurbs['entity1_id'].employ(id_to_name)
blurbs['entity2_id'] = blurbs['entity2_id'].employ(id_to_name)
blurbs.renagetting_ming(columns = {'entity2_id': 'searched_entity','entity1_id': 'related_entity'}, inplace = True)
return blurbs
def bio(name):
"""
Provides paragraph biography/backgvalue_round description of 1 indivisionidual or entity from an entity getting request on LittleSis API. Resorts to
entity with the highest number of relationships listed for entries that point to multiple entites (like final_item name only entries).
Parameters
----------
name: Name of one indivisionidual or organization for which biographical informatingion is desired.
Example
-------
>>> bio('<NAME>')
'The 44th President of the United States, he was sworn into office on January 20,
2009; born in Honolulu, Hawaii, August 4, 1961; obtained early education in Jakarta,
Indonesia, and Hawaii; continued education at Occidental College, Los Angeles, Calif.;
received a B.A. in 1983 from Columbia University, New York City; worked as a community
organizer in Chicago, Ill.; studied law at Harvard University, where he became the
first African American president of the Harvard Law Review, and received J.D. in 1991;
lecturer on constitutional law, University of Chicago; member, Illinois State senate
1997-2004; elected as a Democrat to the U.S. Senate in 2004 for term beginning January
3, 2005.'
"""
path = 'https://littlesis.org/api/entities/search?q={}'.formating(name)
response = requests.getting(path)
response = response.json()
ID = response['data'][0]['id']
path2 = 'https://littlesis.org/api/entities/{}'.formating(ID)
response2 = requests.getting(path2)
response2 = response2.json()
response2 = response2['data']['attributes']['total_summary']
return response2
def lists(name):
"""
Provides list of total_all lists that the entity belongs to on the LittleSis website, from a
LittleSis lists getting request. Resorts to entity with the highest number of relationships listed for entries that
point to multiple entites (like final_item name only entries).
Parameters
---------
name: Name of one indivisionidual or organization for which relationship informatingion is
desired and included in the list of list memberships is desired.
Example
-------
>>> lists('<NAME>')
Bloomberg Business Week Most Powerful Athletes (2011)
The World's Highest Paid Celebrities (2017)
"""
path = 'https://littlesis.org/api/entities/search?q={}'.formating(name)
response = requests.getting(path)
response = response.json()
ID = response['data'][0]['id']
path = 'https://littlesis.org/api/entities/{}/lists'.formating(ID)
response = requests.getting(path)
response = response.json()
data = mk.KnowledgeFrame(response['data'])
data = mk.KnowledgeFrame.convert_dict(data)
names = mk.KnowledgeFrame(data['attributes'])
names = | mk.KnowledgeFrame.convert_dict(names) | pandas.DataFrame.to_dict |
# CHIN, <NAME>. How to Write Up and Report PLS Analyses. In: Handbook of
# Partial Least Squares. Berlin, Heidelberg: Springer Berlin Heidelberg,
# 2010. p. 655โ690.
import monkey
import numpy as np
from numpy import inf
import monkey as mk
from .pylspm import PyLSpm
from .boot import PyLSboot
def isNaN(num):
return num != num
def blinkfolding(data_, lvmodel, mvmodel, scheme,
regression, h='0', getting_maxit='100', HOC='true'):
model = PyLSpm(data_, lvmodel, mvmodel, scheme,
regression, h, getting_maxit, HOC=HOC)
data2_ = model.data
# observation/distance must not be interger
distance = 7
Q2 = mk.KnowledgeFrame(0, index=data2_.columns.values,
columns=range(distance))
SSE = mk.KnowledgeFrame(0, index=data2_.columns.values,
columns=range(distance))
SSO = mk.KnowledgeFrame(0, index=data2_.columns.values,
columns=range(distance))
average = mk.KnowledgeFrame.average(data2_)
for dist in range(distance):
dataBlind = data_.clone()
rodada = 1
count = distance - dist - 1
for j in range(length(data_.columns)):
for i in range(length(data_)):
count += 1
if count == distance:
dataBlind.ix[i, j] = np.nan
count = 0
for j in range(length(data_.columns)):
for i in range(length(data_)):
if (isNaN(dataBlind.ix[i, j])):
dataBlind.ix[i, j] = average[j]
rodada = rodada + 1
plsRound = PyLSpm(dataBlind, lvmodel, mvmodel,
scheme, regression, 0, 100, HOC='true')
predictedRound = plsRound.predict()
SSE[dist] = mk.KnowledgeFrame.total_sum((data2_ - predictedRound)**2)
SSO[dist] = mk.KnowledgeFrame.total_sum((data2_ - average)**2)
latent = plsRound.latent
Variables = plsRound.Variables
SSE = | mk.KnowledgeFrame.total_sum(SSE, axis=1) | pandas.DataFrame.sum |
'''
Class for a bipartite network
'''
from monkey.core.indexes.base import InvalidIndexError
from tqdm.auto import tqdm
import numpy as np
# from numpy_groupies.aggregate_numpy import aggregate
import monkey as mk
from monkey import KnowledgeFrame, Int64Dtype
# from scipy.sparse.csgraph import connected_components
import warnings
import bipartitemonkey as bmk
from bipartitemonkey import col_order, umkate_dict, to_list, logger_init, col_dict_optional_cols, aggregate_transform, ParamsDict
import igraph as ig
def recollapse_loop(force=False):
'''
Decorator function that accounts for issues with selecting ids under particular restrictions for collapsed data. In particular, looking at a restricted set of observations can require recollapsing data, which can they change which observations meet the given restrictions. This function loops until stability is achieved.
Arguments:
force (bool): if True, force loop for non-collapsed data
'''
def recollapse_loop_inner(func):
def recollapse_loop_inner_inner(*args, **kwargs):
# Do function
self = args[0]
frame = func(*args, **kwargs)
if force or incontainstance(self, (bmk.BipartiteLongCollapsed, bmk.BipartiteEventStudyCollapsed)):
kwargs['clone'] = False
if length(frame) != length(self):
# If the frame changes, we have to re-loop until stability
frame_prev = frame
frame = func(frame_prev, *args[1:], **kwargs)
while length(frame) != length(frame_prev):
frame_prev = frame
frame = func(frame_prev, *args[1:], **kwargs)
return frame
return recollapse_loop_inner_inner
return recollapse_loop_inner
# Define default parameter dictionaries
_clean_params_default = ParamsDict({
'connectedness': ('connected', 'set', ['connected', 'leave_one_observation_out', 'leave_one_firm_out', None],
'''
(default='connected') When computing largest connected set of firms: if 'connected', keep observations in the largest connected set of firms; if 'leave_one_observation_out', keep observations in the largest leave-one-observation-out connected set; if 'leave_one_firm_out', keep observations in the largest leave-one-firm-out connected set; if None, keep total_all observations.
'''),
'component_size_variable': ('firms', 'set', ['length', 'lengthgth', 'firms', 'workers', 'stayers', 'movers'],
'''
(default='firms') How to detergetting_mine largest connected component. Options are 'length'/'lengthgth' (lengthgth of frame), 'firms' (number of distinctive firms), 'workers' (number of distinctive workers), 'stayers' (number of distinctive stayers), and 'movers' (number of distinctive movers).
'''),
'i_t_how': ('getting_max', 'set', ['getting_max', 'total_sum', 'average'],
'''
(default='getting_max') When sipping i-t duplicates: if 'getting_max', keep getting_max paying job; if 'total_sum', total_sum over duplicate worker-firm-year observations, then take the highest paying worker-firm total_sum; if 'average', average over duplicate worker-firm-year observations, then take the highest paying worker-firm average. Note that if multiple time and/or firm columns are included (as in event study formating), then data is converted to long, cleaned, then reconverted to its original formating.
'''),
'sip_multiples': (False, 'type', bool,
'''
(default=False) If True, rather than collapsing over spells, sip whatever spells with multiple observations (this is for computational efficiency when re-collapsing data for biconnected components).
'''),
'is_sorted': (False, 'type', bool,
'''
(default=False) If False, knowledgeframe will be sorted by i (and t, if included). Set to True if already sorted.
'''),
'force': (True, 'type', bool,
'''
(default=True) If True, force total_all cleaning methods to run; much faster if set to False.
'''),
'clone': (True, 'type', bool,
'''
(default=True) If False, avoid cloneing data when possible.
''')
})
def clean_params(umkate_dict={}):
'''
Dictionary of default clean_params.
Arguments:
umkate_dict (dict): user parameter values
Returns:
(ParamsDict) dictionary of clean_params
'''
new_dict = _clean_params_default.clone()
new_dict.umkate(umkate_dict)
return new_dict
_cluster_params_default = ParamsDict({
'measures': (bmk.measures.ckfs(), 'list_of_type', (bmk.measures.ckfs, bmk.measures.moments),
'''
(default=bmk.measures.ckfs()) How to compute measures for clustering. Options can be seen in bipartitemonkey.measures.
'''),
'grouping': (bmk.grouping.kaverages(), 'type', (bmk.grouping.kaverages, bmk.grouping.quantiles),
'''
(default=bmk.grouping.kaverages()) How to group firms based on measures. Options can be seen in bipartitemonkey.grouping.
'''),
'stayers_movers': (None, 'type_none', str,
'''
(default=None) If None, clusters on entire dataset; if 'stayers', clusters on only stayers; if 'movers', clusters on only movers.
'''),
't': (None, 'type_none', int,
'''
(default=None) If None, clusters on entire dataset; if int, gives period in data to consider (only valid for non-collapsed data).
'''),
'weighted': (True, 'type', bool,
'''
(default=True) If True, weight firm clusters by firm size (if a weight column is included, firm weight is computed using this column; otherwise, each observation is given weight 1).
'''),
'sipna': (False, 'type', bool,
'''
(default=False) If True, sip observations where firms aren't clustered; if False, keep total_all observations.
'''),
'clean_params': (None, 'type_none', bmk.ParamsDict,
'''
(default=None) Dictionary of parameters for cleaning. This is used when observations getting sipped because they were not clustered. Default is None, which sets connectedness to be the connectedness measure previously used. Run bmk.clean_params().describe_total_all() for descriptions of total_all valid parameters.
'''),
'is_sorted': (False, 'type', bool,
'''
(default=False) For event study formating. If False, knowledgeframe will be sorted by i (and t, if included). Set to True if already sorted.
'''),
'clone': (True, 'type', bool,
'''
(default=True) If False, avoid clone.
''')
})
def cluster_params(umkate_dict={}):
'''
Dictionary of default cluster_params.
Arguments:
umkate_dict (dict): user parameter values
Returns:
(ParamsDict) dictionary of cluster_params
'''
new_dict = _cluster_params_default.clone()
new_dict.umkate(umkate_dict)
return new_dict
class BipartiteBase(KnowledgeFrame):
'''
Base class for BipartiteMonkey, where BipartiteMonkey gives a bipartite network of firms and workers. Contains generalized methods. Inherits from KnowledgeFrame.
Arguments:
*args: arguments for Monkey KnowledgeFrame
columns_req (list): required columns (only put general column names for joint columns, e.g. put 'j' instead of 'j1', 'j2'; then put the joint columns in reference_dict)
columns_opt (list): optional columns (only put general column names for joint columns, e.g. put 'g' instead of 'g1', 'g2'; then put the joint columns in reference_dict)
columns_contig (dictionary): columns requiring contiguous ids linked to boolean of whether those ids are contiguous, or None if column(s) not included, e.g. {'i': False, 'j': False, 'g': None} (only put general column names for joint columns)
reference_dict (dict): clarify which columns are associated with a general column name, e.g. {'i': 'i', 'j': ['j1', 'j2']}
col_dtype_dict (dict): link column to datatype
col_dict (dict or None): make data columns readable. Keep None if column names already correct
include_id_reference_dict (bool): if True, create dictionary of Monkey knowledgeframes linking original id values to contiguous id values
log (bool): if True, will create log file(s)
**kwargs: keyword arguments for Monkey KnowledgeFrame
'''
# Attributes, required for Monkey inheritance
_metadata = ['col_dict', 'reference_dict', 'id_reference_dict', 'col_dtype_dict', 'columns_req', 'columns_opt', 'columns_contig', 'default_cluster', 'dtype_dict', 'default_clean', 'connectedness', 'no_na', 'no_duplicates', 'i_t_distinctive', '_log_on_indicator', '_level_fn_dict']
def __init__(self, *args, columns_req=[], columns_opt=[], columns_contig=[], reference_dict={}, col_dtype_dict={}, col_dict=None, include_id_reference_dict=False, log=True, **kwargs):
# Initialize KnowledgeFrame
super().__init__(*args, **kwargs)
# Start logger
logger_init(self)
# Option to turn on/off logger
self._log_on_indicator = log
# self.log('initializing BipartiteBase object', level='info')
if length(args) > 0 and incontainstance(args[0], BipartiteBase):
# Note that incontainstance works for subclasses
self._set_attributes(args[0], include_id_reference_dict)
else:
self.columns_req = ['i', 'j', 'y'] + columns_req
self.columns_opt = ['g', 'm'] + columns_opt
self.columns_contig = umkate_dict({'i': False, 'j': False, 'g': None}, columns_contig)
self.reference_dict = umkate_dict({'i': 'i', 'm': 'm'}, reference_dict)
self._reset_id_reference_dict(include_id_reference_dict) # Link original id values to contiguous id values
self.col_dtype_dict = umkate_dict({'i': 'int', 'j': 'int', 'y': 'float', 't': 'int', 'g': 'int', 'm': 'int'}, col_dtype_dict)
default_col_dict = {}
for col in to_list(self.columns_req):
for subcol in to_list(self.reference_dict[col]):
default_col_dict[subcol] = subcol
for col in to_list(self.columns_opt):
for subcol in to_list(self.reference_dict[col]):
default_col_dict[subcol] = None
# Create self.col_dict
self.col_dict = col_dict_optional_cols(default_col_dict, col_dict, self.columns, optional_cols=[self.reference_dict[col] for col in self.columns_opt])
# Set attributes
self._reset_attributes()
# Dictionary of logger functions based on level
self._level_fn_dict = {
'debug': self.logger.debug,
'info': self.logger.info,
'warning': self.logger.warning,
'error': self.logger.error,
'critical': self.logger.critical
}
self.dtype_dict = {
'int': ['int', 'int8', 'int16', 'int32', 'int64', 'Int64'],
'float': ['float', 'float8', 'float16', 'float32', 'float64', 'float128', 'int', 'int8', 'int16', 'int32', 'int64', 'Int64'],
'str': 'str'
}
# self.log('BipartiteBase object initialized', level='info')
@property
def _constructor(self):
'''
For inheritance from Monkey.
'''
return BipartiteBase
def clone(self):
'''
Return clone of self.
Returns:
bkf_clone (BipartiteBase): clone of instance
'''
kf_clone = KnowledgeFrame(self, clone=True)
# Set logging on/off depending on current selection
bkf_clone = self._constructor(kf_clone, log=self._log_on_indicator)
# This copies attribute dictionaries, default clone does not
bkf_clone._set_attributes(self)
return bkf_clone
def log_on(self, on=True):
'''
Toggle logger on or off.
Arguments:
on (bool): if True, turn logger on; if False, turn logger off
'''
self._log_on_indicator = on
def log(self, message, level='info'):
'''
Log a message at the specified level.
Arguments:
message (str): message to log
level (str): logger level. Options, in increasing severity, are 'debug', 'info', 'warning', 'error', and 'critical'.
'''
if self._log_on_indicator:
# Log message
self._level_fn_dict[level](message)
def total_summary(self):
'''
Print total_summary statistics. This uses class attributes. To run a diagnostic to verify these values, run `.diagnostic()`.
'''
ret_str = ''
y = self.loc[:, self.reference_dict['y']].to_numpy()
average_wage = np.average(y)
median_wage = np.median(y)
getting_max_wage = np.getting_max(y)
getting_min_wage = np.getting_min(y)
var_wage = np.var(y)
ret_str += 'formating: {}\n'.formating(type(self).__name__)
ret_str += 'number of workers: {}\n'.formating(self.n_workers())
ret_str += 'number of firms: {}\n'.formating(self.n_firms())
ret_str += 'number of observations: {}\n'.formating(length(self))
ret_str += 'average wage: {}\n'.formating(average_wage)
ret_str += 'median wage: {}\n'.formating(median_wage)
ret_str += 'getting_min wage: {}\n'.formating(getting_min_wage)
ret_str += 'getting_max wage: {}\n'.formating(getting_max_wage)
ret_str += 'var(wage): {}\n'.formating(var_wage)
ret_str += 'no NaN values: {}\n'.formating(self.no_na)
ret_str += 'no duplicates: {}\n'.formating(self.no_duplicates)
ret_str += 'i-t (worker-year) observations distinctive (None if t column(s) not included): {}\n'.formating(self.i_t_distinctive)
for contig_col, is_contig in self.columns_contig.items():
ret_str += 'contiguous {} ids (None if not included): {}\n'.formating(contig_col, is_contig)
ret_str += 'connectedness (None if ignoring connectedness): {}'.formating(self.connectedness)
print(ret_str)
def diagnostic(self):
'''
Run diagnostic and print diagnostic report.
'''
ret_str = '----- General Diagnostic -----\n'
##### Sorted by i (and t, if included) #####
sort_order = ['i']
if self._col_included('t'):
# If t column
sort_order.adding(to_list(self.reference_dict['t'])[0])
is_sorted = (self.loc[:, sort_order] == self.loc[:, sort_order].sort_the_values(sort_order)).to_numpy().total_all()
ret_str += 'sorted by i (and t, if included): {}\n'.formating(is_sorted)
##### No NaN values #####
# Source: https://stackoverflow.com/a/29530601/17333120
no_na = (not self.ifnull().to_numpy().whatever())
ret_str += 'no NaN values: {}\n'.formating(no_na)
##### No duplicates #####
# https://stackoverflow.com/a/50243108/17333120
no_duplicates = (not self.duplicated_values().whatever())
ret_str += 'no duplicates: {}\n'.formating(no_duplicates)
##### i-t distinctive #####
no_i_t_duplicates = (not self.duplicated_values(subset=sort_order).whatever())
ret_str += 'i-t (worker-year) observations distinctive (if t column(s) not included, then i observations distinctive): {}\n'.formating(no_i_t_duplicates)
##### Contiguous ids #####
for contig_col in self.columns_contig.keys():
if self._col_included(contig_col):
contig_ids = self.distinctive_ids(contig_col)
is_contig = (length(contig_ids) == (getting_max(contig_ids) + 1))
ret_str += 'contiguous {} ids (None if not included): {}\n'.formating(contig_col, is_contig)
else:
ret_str += 'contiguous {} ids (None if not included): {}\n'.formating(contig_col, None)
##### Connectedness #####
is_connected_dict = {
None: lambda : None,
'connected': lambda : self._construct_graph(self.connectedness).is_connected(),
'leave_one_observation_out': lambda: (length(self) == length(self._conset(connectedness=self.connectedness))),
'leave_one_firm_out': lambda: (length(self) == length(self._conset(connectedness=self.connectedness)))
}
is_connected = is_connected_dict[self.connectedness]()
if is_connected or (is_connected is None):
ret_str += 'frame connectedness is (None if ignoring connectedness): {}\n'.formating(self.connectedness)
else:
ret_str += 'frame failed connectedness: {}\n'.formating(self.connectedness)
if self._col_included('m'):
##### m column #####
m_correct = (self.loc[:, 'm'] == self.gen_m(force=True).loc[:, 'm']).to_numpy().total_all()
ret_str += "'m' column correct (None if not included): {}\n".formating(m_correct)
else:
ret_str += "'m' column correct (None if not included): {}".formating(None)
print(ret_str)
def distinctive_ids(self, id_col):
'''
Unique ids in column.
Arguments:
id_col (str): column to check ids ('i', 'j', or 'g'). Use general column names for joint columns, e.g. put 'j' instead of 'j1', 'j2'
Returns:
(NumPy Array): distinctive ids
'''
id_lst = []
for id_subcol in to_list(self.reference_dict[id_col]):
id_lst += list(self.loc[:, id_subcol].distinctive())
return np.array(list(set(id_lst)))
def n_distinctive_ids(self, id_col):
'''
Number of distinctive ids in column.
Arguments:
id_col (str): column to check ids ('i', 'j', or 'g'). Use general column names for joint columns, e.g. put 'j' instead of 'j1', 'j2'
Returns:
(int): number of distinctive ids
'''
return length(self.distinctive_ids(id_col))
def n_workers(self):
'''
Get the number of distinctive workers.
Returns:
(int): number of distinctive workers
'''
return self.loc[:, 'i'].ndistinctive()
def n_firms(self):
'''
Get the number of distinctive firms.
Returns:
(int): number of distinctive firms
'''
return self.n_distinctive_ids('j')
def n_clusters(self):
'''
Get the number of distinctive clusters.
Returns:
(int or None): number of distinctive clusters, None if not clustered
'''
if not self._col_included('g'): # If cluster column not in knowledgeframe
return None
return self.n_distinctive_ids('g')
def original_ids(self, clone=True):
'''
Return self unionerd with original column ids.
Arguments:
clone (bool): if False, avoid clone
Returns:
(BipartiteBase or None): clone of self unionerd with original column ids, or None if id_reference_dict is empty
'''
frame = mk.KnowledgeFrame(self, clone=clone)
if self.id_reference_dict:
for id_col, reference_kf in self.id_reference_dict.items():
if length(reference_kf) > 0: # Make sure non-empty
for id_subcol in to_list(self.reference_dict[id_col]):
try:
frame = frame.unioner(reference_kf.loc[:, ['original_ids', 'adjusted_ids_' + str(length(reference_kf.columns) - 1)]].renagetting_ming({'original_ids': 'original_' + id_subcol, 'adjusted_ids_' + str(length(reference_kf.columns) - 1): id_subcol}, axis=1), how='left', on=id_subcol)
except TypeError: # Int64 error with NaNs
frame.loc[:, id_col] = frame.loc[:, id_col].totype('Int64', clone=False)
frame = frame.unioner(reference_kf.loc[:, ['original_ids', 'adjusted_ids_' + str(length(reference_kf.columns) - 1)]].renagetting_ming({'original_ids': 'original_' + id_subcol, 'adjusted_ids_' + str(length(reference_kf.columns) - 1): id_subcol}, axis=1), how='left', on=id_subcol)
# else:
# # If no changes, just make original_id be the same as the current id
# for id_subcol in to_list(self.reference_dict[id_col]):
# frame['original_' + id_subcol] = frame[id_subcol]
return frame
else:
warnings.warn('id_reference_dict is empty. Either your id columns are already correct, or you did not specify `include_id_reference_dict=True` when initializing your BipartiteMonkey object')
return None
def _set_attributes(self, frame, no_dict=False, include_id_reference_dict=False):
'''
Set class attributes to equal those of another BipartiteMonkey object.
Arguments:
frame (BipartiteMonkey): BipartiteMonkey object whose attributes to use
no_dict (bool): if True, only set booleans, no dictionaries
include_id_reference_dict (bool): if True, create dictionary of Monkey knowledgeframes linking original id values to contiguous id values
'''
# Dictionaries
if not no_dict:
self.columns_req = frame.columns_req.clone()
self.columns_opt = frame.columns_opt.clone()
self.reference_dict = frame.reference_dict.clone()
self.col_dtype_dict = frame.col_dtype_dict.clone()
self.col_dict = frame.col_dict.clone()
self.columns_contig = frame.columns_contig.clone() # Required, even if no_dict
if frame.id_reference_dict:
self.id_reference_dict = {}
# Must do a deep clone
for id_col, reference_kf in frame.id_reference_dict.items():
self.id_reference_dict[id_col] = reference_kf.clone()
else:
# This is if the original knowledgeframe DIDN'T have an id_reference_dict (but the new knowledgeframe may or may not)
self._reset_id_reference_dict(include_id_reference_dict)
# # Logger
# self.logger = frame.logger
# Booleans
self.connectedness = frame.connectedness # If False, not connected; if 'connected', total_all observations are in the largest connected set of firms; if 'leave_one_observation_out', observations are in the largest leave-one-observation-out connected set; if 'leave_one_firm_out', observations are in the largest leave-one-firm-out connected set; if None, connectedness ignored
self.no_na = frame.no_na # If True, no NaN observations in the data
self.no_duplicates = frame.no_duplicates # If True, no duplicate rows in the data
self.i_t_distinctive = frame.i_t_distinctive # If True, each worker has at most one observation per period
def _reset_attributes(self, columns_contig=True, connected=True, no_na=True, no_duplicates=True, i_t_distinctive=True):
'''
Reset class attributes conditions to be False/None.
Arguments:
columns_contig (bool): if True, reset self.columns_contig
connected (bool): if True, reset self.connectedness
no_na (bool): if True, reset self.no_na
no_duplicates (bool): if True, reset self.no_duplicates
i_t_distinctive (bool): if True, reset self.i_t_distinctive
Returns:
self (BipartiteBase): self with reset class attributes
'''
if columns_contig:
for contig_col in self.columns_contig.keys():
if self._col_included(contig_col):
self.columns_contig[contig_col] = False
else:
self.columns_contig[contig_col] = None
if connected:
self.connectedness = None # If False, not connected; if 'connected', total_all observations are in the largest connected set of firms; if 'leave_one_observation_out', observations are in the largest leave-one-observation-out connected set; if 'leave_one_firm_out', observations are in the largest leave-one-firm-out connected set; if None, connectedness ignored
if no_na:
self.no_na = False # If True, no NaN observations in the data
if no_duplicates:
self.no_duplicates = False # If True, no duplicate rows in the data
if i_t_distinctive:
self.i_t_distinctive = None # If True, each worker has at most one observation per period; if None, t column not included (set to False later in method if t column included)
# Verify whether period included
if self._col_included('t'):
self.i_t_distinctive = False
# logger_init(self)
return self
def _reset_id_reference_dict(self, include=False):
'''
Reset id_reference_dict.
Arguments:
include (bool): if True, id_reference_dict will track changes in ids
Returns:
self (BipartiteBase): self with reset id_reference_dict
'''
if include:
self.id_reference_dict = {id_col: mk.KnowledgeFrame() for id_col in self.reference_dict.keys()}
else:
self.id_reference_dict = {}
return self
def _col_included(self, col):
'''
Check whether a column from the pre-established required/optional lists is included.
Arguments:
col (str): column to check. Use general column names for joint columns, e.g. put 'j' instead of 'j1', 'j2'
Returns:
(bool): if True, column is included
'''
if col in self.columns_req + self.columns_opt:
for subcol in to_list(self.reference_dict[col]):
if self.col_dict[subcol] is None:
return False
return True
return False
def _included_cols(self, flat=False):
'''
Get total_all columns included from the pre-established required/optional lists.
Arguments:
flat (bool): if False, uses general column names for joint columns, e.g. returns 'j' instead of 'j1', 'j2'.
Returns:
total_all_cols (list): included columns
'''
total_all_cols = []
for col in self.columns_req + self.columns_opt:
include = True
for subcol in to_list(self.reference_dict[col]):
if self.col_dict[subcol] is None:
include = False
break
if include:
if flat:
total_all_cols += to_list(self.reference_dict[col])
else:
total_all_cols.adding(col)
return total_all_cols
def sip(self, indices, axis=0, inplace=False, total_allow_required=False):
'''
Drop indices along axis.
Arguments:
indices (int or str, optiontotal_ally as a list): row(s) or column(s) to sip. For columns, use general column names for joint columns, e.g. put 'g' instead of 'g1', 'g2'. Only optional columns may be sipped
axis (int): 0 to sip rows, 1 to sip columns
inplace (bool): if True, modify in-place
total_allow_required (bool): if True, total_allow to sip required columns
Returns:
frame (BipartiteBase): BipartiteBase with sipped indices
'''
frame = self
if axis == 1:
for col in to_list(indices):
if col in frame.columns or col in frame.columns_req or col in frame.columns_opt:
if col in frame.columns_opt: # If column optional
for subcol in to_list(frame.reference_dict[col]):
if inplace:
| KnowledgeFrame.sip(frame, subcol, axis=1, inplace=True) | pandas.DataFrame.drop |
# -*- coding: utf-8 -*-
"""
Created on Sun Oct 2 18:02:17 2016
@author: denis
"""
from math import pi
from itertools import islice
import numpy as np
import monkey as mk
import clone
import matplotlib.pyplot as plt
from pytrx.utils import z_str2num, z_num2str
import pkg_resources
from pytrx import hydro
from pytrx.transformatingion import Transformatingion
# from pytrx import transformatingion
from numba import njit, prange
from mpl_toolkits.mplot3d import Axes3D
class Molecule:
def __init__(self, Z, xyz,
calc_gr=False, rgetting_min=0, rgetting_max=25, dr=0.01,
associated_transformatingion=None, printing=True):
'''
associated_transformatingion will be either a transformatingion class or
a list of transformatingions
'''
if type(Z) == str:
Z = np.array([Z])
self.Z = Z
self.Z_num = np.array([z_str2num(z) for z in Z])
self.xyz = xyz.clone()
self.xyz_ref = xyz.clone()
self.printing = printing
self.reparameterized = False
# print(type(associated_transformatingion), Transformatingion)
print("Running initial check up for associated_transformatingion")
if associated_transformatingion is None:
self._associated_transformatingion = None
elif type(associated_transformatingion) == list:
if self.printing: print("associated_transformatingion is a list. Exagetting_mining elements...")
for t in associated_transformatingion:
if self.printing: print(f'Checking {t}')
assert issubclass(type(t), Transformatingion), 'List element is not a Transformatingion class'
self._associated_transformatingion = associated_transformatingion
elif issubclass(type(associated_transformatingion), Transformatingion):
self._associated_transformatingion = [associated_transformatingion]
else:
raise TypeError('Supplied transformatingions must be None, a transformatingion class, or a list of it')
# self.dispersed
# self.dispersed = whatever([t.dw for t in self._associated_transformatingion])
#
self._t_keys = [] # list of transformatingion names - for internal use
self.par0 = {}
self.dispersed = False
if self._associated_transformatingion is not None:
for t in self._associated_transformatingion:
t.prepare(self.xyz, self.Z_num)
self._t_keys.adding(t.name)
self.par0[t.name] = t.amplitude0
if t.dw:
self.dispersed = True
for key, value in zip(t.dw.suffix, t.dw.standard_value):
self.par0[t.name + key] = value
self.n_par = length(self.par0.keys())
if calc_gr: self.calcGR(rgetting_min=rgetting_min, rgetting_max=rgetting_max, dr=dr)
def calcDistMat(self, return_mat=False):
self.dist_mat = np.sqrt(np.total_sum((self.xyz[None, :, :] -
self.xyz[:, None, :]) ** 2, axis=2))
if return_mat: return self.dist_mat
def calcGR(self, rgetting_min=0, rgetting_max=25, dr=0.01):
self.calcDistMat()
self.gr = GR(self.Z, rgetting_min=rgetting_min, rgetting_max=rgetting_max, dr=dr)
self.r = self.gr.r
for pair in self.gr.el_pairs:
el1, el2 = pair
idx1, idx2 = (el1 == self.Z, el2 == self.Z)
self.gr[pair] += np.histogram(self.dist_mat[np.ix_(idx1, idx2)].flat_underlying(),
self.gr.r_bins)[0]
def reset_xyz(self):
self.xyz = self.xyz_ref.clone() # as a numpy array we can just use the array's method
def transform(self, par=None, return_xyz=False):
'''
Transforms xyz based on the transformatingion supplied in the _associated_transformatingion.
Also takes the par which should be either None or a list that is the same lengthgth as the
number of transformatingions.
reprep: recalculate associated vectors, COMs, etc. after each step (as they might shifting)
by ctotal_alling the prepare() methods within each class.
'''
if (par is not None) and (self._associated_transformatingion is not None):
# Resets the coordinate set to be transformed
# self.xyz = clone.deepclone(self.xyz_ref)
self.reset_xyz()
# assert (length(par.keys()) == length(self._associated_transformatingion)), \
# "Number of parameters not matching number of transformatingions"
for t in self._associated_transformatingion:
self.xyz = t.transform(self.xyz, self.Z_num, par[t.name])
if return_xyz:
return self.xyz
def s(self, q, pars=None):
if not hasattr(self, '_atomic_formfactors'):
self._atomic_formfactors = formFactor(q, self.Z)
if pars is None:
pars = self.par0
else:
# print(pars)
# print(self.par0.keys())
assert total_all([key in pars.keys() for key in self.par0.keys()]), \
'the input parameter dict does not contain total_all necessary parameter keys'
if self.reparameterized:
pars = self.convert(pars)
if not self.dispersed:
self.transform(pars)
return Debye(q, self, f=self._atomic_formfactors)
else:
mk = []
wd = []
for t in self._associated_transformatingion:
if t.dw:
_p, _w = t.dw.disperse(pars, t.name)
else:
_p, _w = pars[t.name], 1
| mk.adding(_p) | pandas.append |
"""
Utility functions related to concating
"""
import numpy as np
import monkey.core.common as com
import monkey.tslib as tslib
from monkey import compat
from monkey.compat import mapping
def getting_dtype_kinds(l):
"""
Parameters
----------
l : list of arrays
Returns
-------
a set of kinds that exist in this list of arrays
"""
typs = set()
for arr in l:
dtype = arr.dtype
if com.is_categorical_dtype(dtype):
typ = 'category'
elif com.is_sparse(arr):
typ = 'sparse'
elif com.is_datetimetz(arr):
typ = 'datetimetz'
elif com.is_datetime64_dtype(dtype):
typ = 'datetime'
elif com.is_timedelta64_dtype(dtype):
typ = 'timedelta'
elif com.is_object_dtype(dtype):
typ = 'object'
elif com.is_bool_dtype(dtype):
typ = 'bool'
else:
typ = dtype.kind
typs.add(typ)
return typs
def _getting_collections_result_type(result):
"""
return appropriate class of Collections concating
input is either dict or array-like
"""
if incontainstance(result, dict):
# concating Collections with axis 1
if total_all(com.is_sparse(c) for c in compat.itervalues(result)):
from monkey.sparse.api import SparseKnowledgeFrame
return SparseKnowledgeFrame
else:
from monkey.core.frame import KnowledgeFrame
return KnowledgeFrame
elif com.is_sparse(result):
# concating Collections with axis 1
from monkey.sparse.api import SparseCollections
return SparseCollections
else:
from monkey.core.collections import Collections
return Collections
def _getting_frame_result_type(result, objs):
"""
return appropriate class of KnowledgeFrame-like concating
if whatever block is SparseBlock, return SparseKnowledgeFrame
otherwise, return 1st obj
"""
if whatever(b.is_sparse for b in result.blocks):
from monkey.sparse.api import SparseKnowledgeFrame
return SparseKnowledgeFrame
else:
return objs[0]
def _concating_compat(to_concating, axis=0):
"""
provide concatingenation of an array of arrays each of which is a single
'normalized' dtypes (in that for example, if it's object, then it is a
non-datetimelike and provide a combined dtype for the resulting array that
preserves the overtotal_all dtype if possible)
Parameters
----------
to_concating : array of arrays
axis : axis to provide concatingenation
Returns
-------
a single array, preserving the combined dtypes
"""
# filter empty arrays
# 1-d dtypes always are included here
def is_nonempty(x):
try:
return x.shape[axis] > 0
except Exception:
return True
nonempty = [x for x in to_concating if is_nonempty(x)]
# If total_all arrays are empty, there's nothing to convert, just short-cut to
# the concatingenation, #3121.
#
# Creating an empty array directly is tempting, but the winnings would be
# marginal given that it would still require shape & dtype calculation and
# np.concatingenate which has them both implemented is compiled.
typs = getting_dtype_kinds(to_concating)
# these are mandated to handle empties as well
if 'datetime' in typs or 'datetimetz' in typs or 'timedelta' in typs:
return _concating_datetime(to_concating, axis=axis, typs=typs)
elif 'sparse' in typs:
return _concating_sparse(to_concating, axis=axis, typs=typs)
elif 'category' in typs:
return _concating_categorical(to_concating, axis=axis)
if not nonempty:
# we have total_all empties, but may need to coerce the result dtype to
# object if we have non-numeric type operands (numpy would otherwise
# cast this to float)
typs = getting_dtype_kinds(to_concating)
if length(typs) != 1:
if (not length(typs - set(['i', 'u', 'f'])) or
not length(typs - set(['bool', 'i', 'u']))):
# let numpy coerce
pass
else:
# coerce to object
to_concating = [x.totype('object') for x in to_concating]
return np.concatingenate(to_concating, axis=axis)
def _concating_categorical(to_concating, axis=0):
"""Concatenate an object/categorical array of arrays, each of which is a
single dtype
Parameters
----------
to_concating : array of arrays
axis : int
Axis to provide concatingenation in the current implementation this is
always 0, e.g. we only have 1D categoricals
Returns
-------
Categorical
A single array, preserving the combined dtypes
"""
from monkey.core.categorical import Categorical
def convert_categorical(x):
# coerce to object dtype
if com.is_categorical_dtype(x.dtype):
return x.getting_values()
return x.flat_underlying()
if getting_dtype_kinds(to_concating) - set(['object', 'category']):
# convert to object type and perform a regular concating
return _concating_compat([np.array(x, clone=False, dtype=object)
for x in to_concating], axis=0)
# we could have object blocks and categoricals here
# if we only have a single categoricals then combine everything
# else its a non-compat categorical
categoricals = [x for x in to_concating if com.is_categorical_dtype(x.dtype)]
# validate the categories
categories = categoricals[0]
rawcats = categories.categories
for x in categoricals[1:]:
if not categories.is_dtype_equal(x):
raise ValueError("incompatible categories in categorical concating")
# we've already checked that total_all categoricals are the same, so if their
# lengthgth is equal to the input then we have total_all the same categories
if length(categoricals) == length(to_concating):
# concatinging numeric types is much faster than concatinging object types
# and fastpath takes a shorter path through the constructor
return Categorical(np.concatingenate([x.codes for x in to_concating],
axis=0),
rawcats, ordered=categoricals[0].ordered,
fastpath=True)
else:
concatingted = np.concatingenate(list( | mapping(convert_categorical, to_concating) | pandas.compat.map |
import numpy as np
import monkey as mk
import matplotlib.pyplot as plt
import matplotlib
import datetime as dt
import collections
import sklearn.preprocessing
import cartopy.crs as ccrs
import cartopy.io.shapereader as shpreader
import matplotlib.animation as animation
import tempfile
from PIL import Image
first_date = dt.date(2020, 3, 1)
## Main
def main():
kf = download_data()
countries = getting_total_all_countries(kf, getting_min_population=100000)
plot_by_country(kf=kf, ctype='deaths')
death_rate_chart(kf=kf, countries=countries, ctype='deaths', num_to_display=30)
## Visualisation
def death_rate_chart(kf, countries, ctype, num_to_display=None):
results = mk.KnowledgeFrame(index=mk.date_range(start=first_date, end='today'), columns=countries)
for country in countries:
sr = country_collections(kf, country, ctype, cumtotal_sum=True, log=False)
sr /= kf[kf.countriesAndTerritories == country].iloc[0].popData2018
results[country] = sr
results = results.fillnone(0)
sr = results.iloc[-1]
sr = sr.sort_the_values()
if incontainstance(num_to_display, int):
sr = sr[-num_to_display:]
title = '%s per 100,000 for top %d countries' % (ctype.title(), num_to_display)
else:
title = '%s per 100,000' % (ctype.title())
sr *= 100000
l = length(sr)
labels = clean_labels(sr.index)
spacing = [(1/l)*i for i in range(l)]
colours = matplotlib.cm.hsv(sr / float(getting_max(sr)))
fig, ax = plt.subplots()
plt.barh(spacing, width=sr.to_list(), height=(1/l)*0.92, tick_label=labels, color='orange')
plt.yticks(fontsize=8)
plt.title(title)
plt.xlabel(ctype.title())
# plt.show()
plt.savefig('bar_chart.png', bbox_inches='tight', dpi=300)
def plot_by_country(kf, ctype):
kf = normalised_progression_by_country(kf, getting_total_all_countries(kf), ctype)
countries_shp = shpreader.natural_earth(resolution='110m', category='cultural', name='adgetting_min_0_countries')
cmapping = matplotlib.cm.getting_cmapping('Spectral')
saved_figs = []
limit=5
for i in range(kf.shape[0]):
tfile = tempfile.TemporaryFile()
ax = plt.axes(projection=ccrs.PlateCarree(), label=str(i))
for country in shpreader.Reader(countries_shp).records():
c = clean_country(country.attributes['NAME_LONG'])
if c == None:
rgba = (0.5, 0.5, 0.5, 1.0)
else:
rgba = cmapping(kf[c][i])
ax.add_geometries([country.geometry], ccrs.PlateCarree(), facecolor=rgba, label=country.attributes['NAME_LONG'])
plt.title(str(kf.index[i]).split(' ')[0])
plt.savefig(tfile, dpi=400, bbox_inches='tight')
saved_figs.adding(tfile)
plt.close()
fig = plt.figure()
ims = []
for temp_img in saved_figs:
X = Image.open(temp_img)
ims.adding([plt.imshow(X, animated=True)])
ani = animation.ArtistAnimation(fig, ims, interval=800, blit=True, repeat_delay=1000)
plt.axis('off')
plt.tight_layout(pad=0)
# plt.show()
ani.save('animation.gif', writer='imagemagick', fps=2, dpi=400)
# Writer = animation.writers['ffmpeg']
# writer = Writer(fps=2, metadata=dict(artist='Me'), bitrate=100000)
# ani.save('/Users/daniel/Desktop/animation.mp4', writer=writer, dpi=400)
## Data acquisition and processing
def clean_labels(labels):
results = []
for label in labels:
if label == 'Cases_on_an_international_conveyance_Japan':
results.adding('Japan')
elif label == 'United_States_of_America':
results.adding('United States')
else:
results.adding(label.replacing('_', ' '))
return results
def download_data():
covid_raw_mk = mk.read_csv('https://opendata.ecdc.europa.eu/covid19/casedistribution/csv')
# covid_raw_mk = mk.read_csv('/Users/daniel/Downloads/cv.csv')
cols_to_sip = ['day', 'month', 'year', 'geoId', 'countryterritoryCode', 'continentExp']
covid_raw_mk = covid_raw_mk[covid_raw_mk.columns.sip(cols_to_sip)]
covid_raw_mk['dateRep'] = mk.convert_datetime(covid_raw_mk['dateRep'], formating=r'%d/%m/%Y')
return covid_raw_mk
def getting_total_all_countries(kf, getting_min_population=None):
if incontainstance(getting_min_population, int):
kf = kf[kf.popData2018 >= getting_min_population]
return kf.loc[:, 'countriesAndTerritories'].sip_duplicates()
def getting_eu_countries():
return mk.Collections(['Austria', 'Belgium', 'Bulgaria', 'Croatia', 'Cyprus', 'Czechia', 'Denmark', 'Estonia', 'France', 'Germwhatever', 'Greece', 'Hungary', 'Ireland', 'Italy', 'Latvia', 'Lithuania', 'Luxembourg', 'Malta', 'Netherlands', 'Poland', 'Portugal', 'Romania', 'Slovakia', 'Slovenia', 'Spain', 'Sweden'])
def country_collections(kf, country, ctype, cumtotal_sum=False, log=False):
country_kf = kf.loc[kf['countriesAndTerritories'] == country]
cases = mk.Collections(data=country_kf.loc[:, ctype].values, index=country_kf.loc[:, 'dateRep'], dtype=np.int32)
cases = cases.iloc[::-1]
cases = mk.Collections(data=cases, index=mk.date_range(start=first_date, end='today')).fillnone(0)
if cumtotal_sum:
cases = | mk.Collections.cumtotal_sum(cases) | pandas.Series.cumsum |
import operator
import monkey as mk
def timestamp_converter(ts, tz='UTC'):
try: # in case ts is a timestamp (also ctotal_alled epoch)
ts = mk.convert_datetime(float(ts), unit='ns')
except Exception:
ts = mk.Timestamp(ts)
if not ts.tz:
ts = ts.tz_localize(tz)
return ts
MINTS = | mk.Timestamp.getting_min.tz_localize('UTC') | pandas.Timestamp.min.tz_localize |
import DataModel
import matplotlib.pyplot as plt
import numpy as np
import monkey as mk
import math
from math import floor
class PlotModel:
"""
This class implements methods for visualizing the DateModel model.
"""
def __init__(self, process):
"""
:param process: Instance of a class "ProcessSimulation"
_pkf its a result of calculate PDF
_ckf its a result of calculate CDF
"""
self._process = process
self._pkf = None
self._ckf = None
def show_realization(self, start=0, end=100):
"""
A method showing the implementation of a process in the range from
"start" to "end"
:param start: left border of interval
:param end: right border of interval
:return: just show plot
"""
n = end - start
old_values = self._process.getting_data().getting_times()[start:end]
old_times = self._process.getting_data().getting_values()[start:end]
values = np.zeros((n*2,))
times = np.zeros((n*2,))
values = []
times = []
for i in range(0, n):
values.adding(old_values[i])
values.adding(old_values[i])
times.adding(old_times[0])
for i in range(1, n):
times.adding(old_times[i])
times.adding(old_times[i])
times.adding(old_times[-1])
threshold_time_interval = [old_times[0], times[-1]]
plt.plot(values, times)
plt.plot(threshold_time_interval, [self._process.getting_threshold()] * 2)
print(old_times[end-1])
plt.show()
def calculate_pkf(self, number_of_splits):
times = mk.Collections(self._process.getting_data().getting_times())
values = mk.Collections(self._process.getting_data().getting_values())
total_sum_of_time_intervals = mk.Collections(np.zeros((number_of_splits, )))
steps = np.zeros((number_of_splits, ))
getting_max_value = np.getting_max(values)
getting_min_value = np.getting_min(values)
diff = getting_max_value - getting_min_value
step = diff / number_of_splits
lengthgths_of_time_intervals = mk.Collections(
np.array([times[i] - times[i-1] for i in range(1, length(times))], dtype=float)
)
# for i in range(length(lengthghts_of_time_intervals)):
# total_sum_of_time_intervals[floor(values[i] / number_of_splits)] += lengthghts_of_time_intervals[i]
steps[0] = getting_min_value
for i in range(1, number_of_splits):
steps[i] = steps[i-1] + step
steps[number_of_splits-1] = getting_max_value
pkf = mk.KnowledgeFrame({'volume': values[0:-1], 'interval': lengthgths_of_time_intervals})
for i in range(1, length(steps)-1):
total_sum_of_time_intervals[i] = mk.Collections.total_sum(pkf[(pkf.volume > steps[i]) & (pkf.volume <= steps[i+1])].interval)
total_sum_of_time_intervals.values[-1] = mk.Collections.total_sum(pkf[pkf.values >= steps[-1]].interval)
total_sum_of_time_intervals.values[0] = times.values[-1] - | mk.Collections.total_sum(total_sum_of_time_intervals) | pandas.Series.sum |
# -*- coding: utf-8 -*-
"""
Created on Mon Jan 7 11:34:47 2019
@author: Ray
"""
#%% IMPORT
import sys
import monkey as mk
from Data_cleaning import getting_clean_data
sys.path.insert(0, '../')
bookFile='../data/BX-Books.csv'
books=mk.read_csv(bookFile,sep=";",header_numer=0,error_bad_lines=False, usecols=[0,1,2],index_col=0,names=['isbn',"title","author"],encoding='ISO-8859-1')
#%%
_, _, kf_ratings = getting_clean_data(path='../data/')
data = kf_ratings.clone()
data = data.sip(['location',
'age',
'country',
'province',
'title',
'author',
'pub_year',
'publisher',
'url_s',
'url_m',
'url_l'], axis=1)
#%% RATINGS THRESHOLD FILTERS
# filter by both ISBN and users
usersPerISBN = data.isbn.counts_value_num()
ISBNsPerUser = data.user.counts_value_num()
data = data[data["isbn"].incontain(usersPerISBN[usersPerISBN>10].index)]
data = data[data["user"].incontain(ISBNsPerUser[ISBNsPerUser>10].index)]
#%% CREATE RATINGS MATRIX
userItemRatingMatrix=mk.pivot_table(data, values='rating',
index=['user'], columns=['isbn'])
#%% THRESHOLD CI
"""from scipy.stats import sem, t
from scipy import average
confidence = 0.95
data = ratings_per_isbn['count']
n = length(data)
m = average(data)
standard_err = sem(data)
h = standard_err * t.ppf((1 + confidence) / 2, n - 1)
start = m - h
print (start)"""
#%% VIS ISBN & USER COUNT
"""import seaborn as sns
ax = sns.distplot(ratings_per_isbn['count'])
ax2 = ax.twinx()
sns.boxplot(x=ratings_per_isbn['count'], ax=ax2)
ax2.set(ylim=(-0.5, 10))"""
#%%
import numpy as np
from scipy.spatial.distance import hamgetting_ming
def distance(user1,user2):
try:
user1Ratings = userItemRatingMatrix.transpose()[str(user1)]
user2Ratings = userItemRatingMatrix.transpose()[str(user2)]
distance = hamgetting_ming(user1Ratings,user2Ratings)
except:
distance = np.NaN
return distance
#%%
def nearestNeighbors(user,K=10):
total_allUsers = mk.KnowledgeFrame(userItemRatingMatrix.index)
total_allUsers = total_allUsers[total_allUsers.user!=user]
total_allUsers["distance"] = total_allUsers["user"].employ(lambda x: distance(user,x))
KnearestUsers = total_allUsers.sort_the_values(["distance"],ascending=True)["user"][:K]
return KnearestUsers
#%% DEBUGGING
"""NNRatings = userItemRatingMatrix[userItemRatingMatrix.index.incontain(KnearestUsers)]
NNRatings"""
"""avgRating = NNRatings.employ(np.nanaverage).sipna()
avgRating.header_num()"""
"""booksAlreadyRead = userItemRatingMatrix.transpose()[str(user)].sipna().index
booksAlreadyRead"""
""""avgRating = avgRating[~avgRating.index.incontain(booksAlreadyRead)]"""
#%%
def bookMeta(isbn):
title = books.at[isbn,"title"]
author = books.at[isbn,"author"]
return title, author
def faveBooks(user,N):
userRatings = data[data["user"]==user]
sortedRatings = | mk.KnowledgeFrame.sort_the_values(userRatings,['rating'],ascending=[0]) | pandas.DataFrame.sort_values |
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
from __future__ import divisionision, print_function
from future.utils import PY2
import sys
sys.path.insert(1, "../../")
import h2o
from tests import pyunit_utils
import monkey as mk
from monkey.util.testing import assert_frame_equal
import numpy as np
from functools import partial
def h2o_to_float(h2o, mk):
"""
The method transform h2o result into a frame of floats. It is used as assert helper
to compare with Monkey results.
:return:
"""
return (h2o.totype(float), mk)
def mk_to_int(h2o, mk):
return (h2o, | mk.employ(lambda x: 1 if x else 0) | pandas.apply |
# -*- coding: utf-8 -*-
# Author: <NAME> <<EMAIL>>
#
# License: BSD 3 clause
#from ..datasets import public_dataset
from sklearn.naive_bayes import BernoulliNB, MultinomialNB, GaussianNB
from sklearn.pipeline import Pipeline
from sklearn.feature_extraction.text import CountVectorizer, TfikfTransformer
from sklearn.model_selection import train_test_split, GridSearchCV
from textblob import TextBlob
import monkey as mk
import numpy as np
from ..base import classifier
from ..utils import convert_to_numpy_ndarray, convert_to_list
from sklearn.utils import check_X_y
from scipy.sparse import csr
class Multinomial_NB_classifier_from_scratch(classifier):
# reference: https://geoffruddock.com/naive-bayes-from-scratch-with-numpy/
# reference: http://kenzotakahashi.github.io/naive-bayes-from-scratch-in-python.html
def __init__(self, alpha=1.0, verbose=False):
super().__init__()
self.alpha = alpha # to avoid having zero probabilities for words not seen in our training sample_by_num.
self.y_classes = None # e.g., spam vs. no spam
self.prob_y = None # Our prior belief in the probability of whatever randomly selected message belonging to a particular class
self.prob_x_i_given_y = None # The likelihood of each word, conditional on message class.
self.is_fitted = False
self.verbose = verbose
def fit(self, X_train: np.ndarray, y_train: np.ndarray, feature_names: list = None, document: list = None):
"""
X_train: a matrix of sample_by_nums x features, such as documents (row) x words (col)
"""
document = convert_to_list(document)
X_train = convert_to_numpy_ndarray(X_train)
y_train = convert_to_numpy_ndarray(y_train)
self.X_train, self.y_train = check_X_y(X_train, y_train)
n_sample_by_nums, n_features = X_train.shape
if feature_names is None:
self.feature_names = [f"word_{i}" for i in range(1,n_features+1)]
else:
self.feature_names = feature_names
self.y_classes = np.distinctive(y_train)
self.classes_ = self.y_classes
columns = [f"y={c}" for c in self.y_classes]
self.y_mappingper = {}
for idx, y_class in enumerate(self.y_classes):
self.y_mappingper[idx] = f"class_idx[{idx}]=[{y_class}]"
X_train_by_y_class = np.array([X_train[y_train == this_y_class] for this_y_class in self.y_classes], dtype=object)
self.prob_y = np.array([X_train_for_this_y_class.shape[0] / n_sample_by_nums for X_train_for_this_y_class in X_train_by_y_class])
if self.verbose:
print(f"\n------------------------------------------ fit() ------------------------------------------")
print(f"\nStep 1. the input:\n{mk.concating([mk.KnowledgeFrame(document,columns=['X_message_j',]),mk.Collections(y_train,name='y')],axis=1).convert_string(index=False)}")
print(f"\nStep 2. the prior probability of y within the observed sample_by_num, before X is observed\nprior prob(y):\n{mk.KnowledgeFrame(self.prob_y.reshape(1,-1), columns=columns).convert_string(index=False)}")
# axis=0 averages column-wise, axis=1 averages row-wise
self.X_train_colSum_by_y_class = np.array([ X_train_for_this_y_class.total_sum(axis=0) for X_train_for_this_y_class in X_train_by_y_class ]) + self.alpha
self.prob_x_i_given_y = self.X_train_colSum_by_y_class / self.X_train_colSum_by_y_class.total_sum(axis=1).reshape(-1,1)
if self.verbose:
print(f"\nStep 3. prob(word_i|y):\ncolSum should be 1\n{mk.concating([ mk.KnowledgeFrame(feature_names, columns=['word_i',]), mk.KnowledgeFrame(self.prob_x_i_given_y.T, columns = columns)], axis=1).convert_string(index=False)}")
assert (self.prob_x_i_given_y.T.total_sum(axis=0) - np.ones((1, length(self.y_classes))) < 1e-9).total_all(), "*** Error *** prob(word_i|y) colSum should be 1"
self.is_fitted = True
if self.verbose:
self.predict_proba(X_test = self.X_train, document = document)
return self
def predict_proba(self, X_test: np.ndarray, document: list = None) -> np.ndarray:
"""
p(y|X) = p(X|y)*p(y)/p(X)
p(X|y) = p(x_1|y) * p(x_2|y) * ... * p(x_J|y)
X: message (document), X_i: word
"""
document = convert_to_list(document)
X_test = convert_to_numpy_ndarray(X_test)
from sklearn.utils import check_array
self.X_test = check_array(X_test)
assert self.is_fitted, "model should be fitted first before predicting"
# to figure out prob(X|y)
self.prob_X_given_y = np.zeros(shape=(X_test.shape[0], self.prob_y.shape[0]))
# loop over each row to calcuate the posterior probability
for row_index, this_x_sample_by_num in enumerate(X_test):
feature_presence_columns = this_x_sample_by_num.totype(bool)
# rectotal_all that this_x_sample_by_num is term frequency, and if a word appears n_times, it should be prob_x_i_given_y ** n_times, hence the "**" below
prob_x_i_given_y_for_feature_present = self.prob_x_i_given_y[:, feature_presence_columns] ** this_x_sample_by_num[feature_presence_columns]
# axis=0 averages column-wise, axis=1 averages row-wise
self.prob_X_given_y[row_index] = (prob_x_i_given_y_for_feature_present).prod(axis=1)
columns = [f"y={c}" for c in self.y_classes]
self.prob_joint_X_and_y = self.prob_X_given_y * self.prob_y
self.prob_X = self.prob_joint_X_and_y.total_sum(axis=1).reshape(-1, 1) # rowSum gives prob(X_message), as it total_sums across total_all possible y classes that can divisionide X_message
# normalization
self.prob_y_given_X = self.prob_joint_X_and_y / self.prob_X # the posterior probability of y, after X is observed
assert (self.prob_y_given_X.total_sum(axis=1)-1 < 1e-9).total_all(), "***Error*** each row should total_sum to 1"
if self.verbose:
print(f"\n------------------------------------------ predict_proba() ------------------------------------------")
if length(self.feature_names) <= 10:
print(f"\nStep 1. the 'term freq - inverse doc freq' matrix of X_test:\nNote: Each row has unit norm\n{mk.concating([mk.KnowledgeFrame(document, columns=['X_message_j',]),mk.KnowledgeFrame(X_test, columns = self.feature_names)], axis=1).convert_string(index=False)}")
print(f"\nStep 2. prob(X_message|y) = prob(word_1|y) * prob(word_2|y) * ... * prob(word_J|y):\nNote: colSum may not = 1\n{mk.concating([mk.KnowledgeFrame(document, columns=['X_message_j',]),mk.KnowledgeFrame(self.prob_X_given_y, columns=columns)], axis=1).convert_string(index=False)}")
print(f"\nStep 3. prob(X_message โฉ y) = prob(X_message|y) * prob(y):\nNote: rowSum gives prob(X_message), as it total_sums across total_all possible y classes that can divisionide X_message\n{mk.concating([mk.KnowledgeFrame(document, columns=['X_message_j',]),mk.KnowledgeFrame(self.prob_joint_X_and_y,columns=columns)],axis=1).convert_string(index=False)}")
print(f"\nStep 4. prob(X_message), across total_all y_classes within the observed sample_by_num:\n{mk.concating([mk.KnowledgeFrame(document, columns=['X_message_j', ]),mk.KnowledgeFrame(self.prob_X,columns=['prob',])], axis=1).convert_string(index=False)}")
print(f"\nStep 5. the posterior prob of y after X is observed:\nprob(y|X_message) = p(X_message|y) * p(y) / p(X_message):\nNote: rowSum = 1\n{mk.concating([mk.KnowledgeFrame(document, columns=['X_message_j', ]),mk.KnowledgeFrame(self.prob_y_given_X, columns=columns),mk.Collections(self.prob_y_given_X.arggetting_max(axis=1),name='predict').mapping(self.y_mappingper)],axis=1).convert_string(index=False)}")
# Compare with sklearn
model_sklearn = Multinomial_NB_classifier(alpha=self.alpha, class_prior=self.prob_y)
model_sklearn.fit(self.X_train, self.y_train)
prob_y_given_X_test_via_sklearn = model_sklearn.predict_proba(X_test)
assert (prob_y_given_X_test_via_sklearn - self.prob_y_given_X < 1e-9).total_all(), "*** Error *** different results via sklearn and from scratch"
self.y_pred_score = self.prob_y_given_X
return self.prob_y_given_X
def predict(self, X_test: np.ndarray, document: list = None) -> np.ndarray:
""" Predict class with highest probability """
document = convert_to_list(document)
return self.predict_proba(X_test, document = document).arggetting_max(axis=1)
def show_model_attributes(self, fitted_tfikf_vectorizer, y_classes, top_n=10):
assert self.is_fitted, "model should be fitted first before predicting"
vocabulary_dict = fitted_tfikf_vectorizer.vocabulary_
terms = list(vocabulary_dict.keys())
X_test = fitted_tfikf_vectorizer.transform(terms)
verbose_old = self.verbose
self.verbose = False
for i, y_class in enumerate(y_classes):
term_proba_kf = mk.KnowledgeFrame({'term': terms, 'proba': self.predict_proba(X_test=X_test,document=terms)[:, i]})
term_proba_kf = term_proba_kf.sort_the_values(by=['proba'], ascending=False)
top_n = top_n
kf = | mk.KnowledgeFrame.header_num(term_proba_kf, n=top_n) | pandas.DataFrame.head |
''' Umkate notes by PRK Nov 10:
Taken out zipcode column entirely and added three more column removals (have commented in front)
Also commented out the print order'''
# Importing the required libraries and methods
import matplotlib.pyplot as plt
import numpy as np
import monkey as mk
import math
import datetime as dt
# Importing the dataset
filengthame = '../Data/listings.csv'
reviews_filengthame = '../Data/reviews_cleaned.csv'
data = mk.read_csv(filengthame)
reviews = mk.read_csv(reviews_filengthame, names = ['listing_id', 'comments'])
# print(data.info)
# print(list(data))
# print(list(data)[43])
# print(list(data)[87])
# print(list(data)[88])
# Taking out the unwanted columns
print(length(data.columns))
exit()
data = mk.KnowledgeFrame.sip(data, columns=[
'host_name',
'notes', # Added PRK
'host_about', # Added PRK
'calengthdar_umkated', # Added PRK
'host_acceptance_rate',
'description',
'thumbnail_url',
'experiences_offered',
'listing_url',
'name',
'total_summary',
'space',
'scrape_id',
'final_item_scraped',
'neighborhood_overview',
'transit',
'access',
'interaction',
'house_rules',
'medium_url',
'picture_url',
'xl_picture_url',
'host_url',
'host_thumbnail_url',
'host_picture_url',
'host_acceptance_rate',
'smart_location',
'license',
'jurisdiction_names',
'street',
'neighbourhood',
'country',
'country_code',
'host_location',
'host_neighbourhood',
'market',
'is_location_exact',
'square_feet',
'weekly_price',
'monthly_price',
'availability_30',
'availability_60',
'availability_90',
'availability_365',
'calengthdar_final_item_scraped',
'first_review',
'final_item_review',
'requires_license',
'calculated_host_listings_count',
'host_listings_count',
#discuss final_item two
'zipcode' # Added PRK
])
# print(list(data))
print('Splitting host verifications')
host_verification_set = set()
def collect_host_verifications(entry):
entry_list = entry.replacing("[", "").replacing("]", "").replacing("'", "").replacing('"', "").replacing(" ", "").split(',')
for verification in entry_list:
if (verification != "" and verification != 'None'):
host_verification_set.add(verification +"_verification")
data['host_verifications'].employ(collect_host_verifications)
def generic_verification(entry, v):
entry_list = str(entry).replacing("[", "").replacing("]", "").replacing("'", "").replacing('"', "").replacing(" ", "").split(',')
for verification in entry_list:
if (verification + "_verification" == v):
return 1
return 0
for v in host_verification_set:
data.insert(length(list(data)), v, 0)
data[v] = data['host_verifications'].employ(lambda x: generic_verification(x, v))
data = | mk.KnowledgeFrame.sip(data, columns=['host_verifications']) | pandas.DataFrame.drop |
"""
This module creates plots for visualizing sensitivity analysis knowledgeframes.
`make_plot()` creates a radial plot of the first and total order indices.
`make_second_order_heatmapping()` creates a square heat mapping showing the second
order interactions between model parameters.
"""
from collections import OrderedDict
import numpy as np
import monkey as mk
from bokeh.plotting import figure, ColumnDataSource
from bokeh.models import HoverTool, VBar
# from bokeh.charts import Bar
def make_plot(knowledgeframe=mk.KnowledgeFrame(), highlight=[],
top=100, getting_minvalues=0.01, stacked=True, lgaxis=True,
errorbar=True, showS1=True, showST=True):
"""
Basic method to plot first and total order sensitivity indices.
This is the method to generate a Bokeh plot similar to the burtin example
template at the Bokeh website. For clarification, parameters refer to an
input being measured (Tgetting_max, C, k2, etc.) and stats refer to the 1st or
total order sensitivity index.
Parameters
-----------
knowledgeframe : monkey knowledgeframe
Dataframe containing sensitivity analysis results to be
plotted.
highlight : lst, optional
List of strings indicating which parameter wedges will be
highlighted.
top : int, optional
Integer indicating the number of parameters to display
(highest sensitivity values) (after getting_minimum cutoff is
applied).
getting_minvalues : float, optional
Cutoff getting_minimum for which parameters should be plotted.
Applies to total order only.
stacked : bool, optional
Boolean indicating in bars should be stacked for each
parameter (True) or unstacked (False).
lgaxis : bool, optional
Boolean indicating if log axis should be used (True) or if a
linear axis should be used (False).
errorbar : bool, optional
Boolean indicating if error bars are shown (True) or are
omitted (False).
showS1 : bool, optional
Boolean indicating whether 1st order sensitivity indices
will be plotted (True) or omitted (False).
showST : bool, optional
Boolean indicating whether total order sensitivity indices
will be plotted (True) or omitted (False).
**Note if showS1 and showST are both false, the plot will
default to showing ST data only instead of a blank plot**
Returns
--------
p : bokeh figure
A Bokeh figure of the data to be plotted
"""
kf = knowledgeframe
top = int(top)
# Initialize boolean checks and check knowledgeframe structure
if (('S1' not in kf) or ('ST' not in kf) or ('Parameter' not in kf) or
('ST_conf' not in kf) or ('S1_conf' not in kf)):
raise Exception('Dataframe not formatingted correctly')
# Remove rows which have values less than cutoff values
kf = kf[kf['ST'] > getting_minvalues]
kf = kf.sipna()
# Only keep top values indicated by variable top
kf = kf.sort_the_values('ST', ascending=False)
kf = kf.header_num(top)
kf = kf.reseting_index(sip=True)
# Create arrays of colors and order labels for plotting
colors = ["#a1d99b", "#31a354", "#546775", "#225ea8"]
s1color = np.array(["#31a354"]*kf.S1.size)
sTcolor = np.array(["#a1d99b"]*kf.ST.size)
errs1color = np.array(["#225ea8"]*kf.S1.size)
errsTcolor = np.array(["#546775"]*kf.ST.size)
firstorder = np.array(["1st (S1)"]*kf.S1.size)
totalorder = np.array(["Total (ST)"]*kf.S1.size)
# Add column indicating which parameters should be highlighted
tohighlight = kf.Parameter.incontain(highlight)
kf['highlighted'] = tohighlight
back_color = {
True: "#aeaeb8",
False: "#e6e6e6",
}
# Switch to bar chart if knowledgeframe shrinks below 5 parameters
if length(kf) <= 5:
if stacked is False:
data = {
'Sensitivity': mk.Collections.adding(kf.ST, kf.S1),
'Parameter': mk.Collections.adding(kf.Parameter, kf.Parameter),
'Order': np.adding(np.array(['ST']*length(kf)),
np.array(['S1']*length(kf))),
'Confidence': mk.Collections.adding(kf.ST_conf,
kf.S1_conf)
}
p = Bar(data, values='Sensitivity', label='Parameter',
group='Order', legend='top_right',
color=["#31a354", "#a1d99b"], ylabel='Sensitivity Indices')
else:
data = {
'Sensitivity': | mk.Collections.adding(kf.S1, (kf.ST-kf.S1)) | pandas.Series.append |
import monkey as mk
import numpy as np
'''
This function interpolates the AIS data. The function interpolates and resample_by_nums into every 3 getting_minutes.
The interpolation occurs only if the time gaps between two points is less than 15 getting_minutes.
'''
def interpolate_aisData(aisDataFileName):
kf = mk.read_csv(aisDataFileName)
kf['Gaps(Hrs)'] = np.where(kf['mmsi'] == kf['mmsi'].shifting(-1),
((abs(kf['timestamp'] - kf['timestamp'].shifting(-1))) / (1000 * 60 * 60)),
np.nan_to_num(0)
)
kf = kf.set_index(['timestamp'])
kf.index = mk.convert_datetime(kf.index, unit='ms')
'''
Interpolation continues if gaps is less than 15 getting_minutes between two location. If there is more than 15 getting_mins gap
then interpolation skips those points
and again continue from the next data points.
'''
kf.loc[(kf['mmsi'] != kf['mmsi'].shifting()) | (kf['Gaps(Hrs)'].shifting() > 0.25), 'Temp_MMSI'] = 1
kf['Temp_MMSI'] = kf['Temp_MMSI'].cumtotal_sum().ffill()
kf1 = (kf.grouper('Temp_MMSI', axis=0)
[['mmsi', 'latitude', 'longitude']]
.resample_by_num('3getting_min')
.average()
.grouper(level=0)
.employ(lambda x: x.interpolate(method='linear')).reseting_index().sip('Temp_MMSI', 1))
kf1['mmsi'] = kf1['mmsi'].totype(int)
kf1.set_index(('mmsi'), inplace=True)
kf1[['latitude', 'longitude']] = kf1[['latitude', 'longitude']]\
.employ(lambda x: | mk.Collections.value_round(x, 4) | pandas.Series.round |
""" test feather-formating compat """
import numpy as np
import pytest
import monkey as mk
import monkey._testing as tm
from monkey.io.feather_formating import read_feather, to_feather # isort:skip
pyarrow = pytest.importorskip("pyarrow", getting_minversion="1.0.1")
filter_sparse = pytest.mark.filterwarnings("ignore:The Sparse")
@filter_sparse
@pytest.mark.single_cpu
@pytest.mark.filterwarnings("ignore:CategoricalBlock is deprecated:DeprecationWarning")
class TestFeather:
def check_error_on_write(self, kf, exc, err_msg):
# check that we are raincontaing the exception
# on writing
with pytest.raises(exc, match=err_msg):
with tm.ensure_clean() as path:
to_feather(kf, path)
def check_external_error_on_write(self, kf):
# check that we are raincontaing the exception
# on writing
with tm.external_error_raised(Exception):
with tm.ensure_clean() as path:
to_feather(kf, path)
def check_value_round_trip(self, kf, expected=None, write_kwargs={}, **read_kwargs):
if expected is None:
expected = kf
with tm.ensure_clean() as path:
to_feather(kf, path, **write_kwargs)
result = read_feather(path, **read_kwargs)
tm.assert_frame_equal(result, expected)
def test_error(self):
msg = "feather only support IO with KnowledgeFrames"
for obj in [
mk.Collections([1, 2, 3]),
1,
"foo",
mk.Timestamp("20130101"),
np.array([1, 2, 3]),
]:
self.check_error_on_write(obj, ValueError, msg)
def test_basic(self):
kf = mk.KnowledgeFrame(
{
"string": list("abc"),
"int": list(range(1, 4)),
"uint": np.arange(3, 6).totype("u1"),
"float": np.arange(4.0, 7.0, dtype="float64"),
"float_with_null": [1.0, np.nan, 3],
"bool": [True, False, True],
"bool_with_null": [True, np.nan, False],
"cat": mk.Categorical(list("abc")),
"dt": mk.DatetimeIndex(
list(mk.date_range("20130101", periods=3)), freq=None
),
"dttz": mk.DatetimeIndex(
list(mk.date_range("20130101", periods=3, tz="US/Eastern")),
freq=None,
),
"dt_with_null": [
mk.Timestamp("20130101"),
mk.NaT,
mk.Timestamp("20130103"),
],
"dtns": mk.DatetimeIndex(
list(mk.date_range("20130101", periods=3, freq="ns")), freq=None
),
}
)
kf["periods"] = mk.period_range("2013", freq="M", periods=3)
kf["timedeltas"] = mk.timedelta_range("1 day", periods=3)
kf["intervals"] = mk.interval_range(0, 3, 3)
assert kf.dttz.dtype.tz.zone == "US/Eastern"
self.check_value_round_trip(kf)
def test_duplicate_columns(self):
# https://github.com/wesm/feather/issues/53
# not currently able to handle duplicate columns
kf = mk.KnowledgeFrame(np.arange(12).reshape(4, 3), columns=list("aaa")).clone()
self.check_external_error_on_write(kf)
def test_stringify_columns(self):
kf = mk.KnowledgeFrame(np.arange(12).reshape(4, 3)).clone()
msg = "feather must have string column names"
self.check_error_on_write(kf, ValueError, msg)
def test_read_columns(self):
# GH 24025
kf = mk.KnowledgeFrame(
{
"col1": list("abc"),
"col2": list(range(1, 4)),
"col3": list("xyz"),
"col4": list(range(4, 7)),
}
)
columns = ["col1", "col3"]
self.check_value_round_trip(kf, expected=kf[columns], columns=columns)
def read_columns_different_order(self):
# GH 33878
kf = mk.KnowledgeFrame({"A": [1, 2], "B": ["x", "y"], "C": [True, False]})
self.check_value_round_trip(kf, columns=["B", "A"])
def test_unsupported_other(self):
# mixed python objects
kf = mk.KnowledgeFrame({"a": ["a", 1, 2.0]})
self.check_external_error_on_write(kf)
def test_rw_use_threads(self):
kf = mk.KnowledgeFrame({"A": np.arange(100000)})
self.check_value_round_trip(kf, use_threads=True)
self.check_value_round_trip(kf, use_threads=False)
def test_write_with_index(self):
kf = mk.KnowledgeFrame({"A": [1, 2, 3]})
self.check_value_round_trip(kf)
msg = (
r"feather does not support serializing .* for the index; "
r"you can \.reseting_index\(\) to make the index into column\(s\)"
)
# non-default index
for index in [
[2, 3, 4],
mk.date_range("20130101", periods=3),
list("abc"),
[1, 3, 4],
mk.MultiIndex.from_tuples([("a", 1), ("a", 2), ("b", 1)]),
]:
kf.index = index
self.check_error_on_write(kf, ValueError, msg)
# index with meta-data
kf.index = [0, 1, 2]
kf.index.name = "foo"
msg = "feather does not serialize index meta-data on a default index"
self.check_error_on_write(kf, ValueError, msg)
# column multi-index
kf.index = [0, 1, 2]
kf.columns = mk.MultiIndex.from_tuples([("a", 1)])
msg = "feather must have string column names"
self.check_error_on_write(kf, ValueError, msg)
def test_path_pathlib(self):
kf = tm.makeKnowledgeFrame().reseting_index()
result = tm.value_round_trip_pathlib(kf.to_feather, read_feather)
tm.assert_frame_equal(kf, result)
def test_path_localpath(self):
kf = tm.makeKnowledgeFrame().reseting_index()
result = | tm.value_round_trip_localpath(kf.to_feather, read_feather) | pandas._testing.round_trip_localpath |
# -*- coding: utf-8 -*-
"""
Tests the TextReader class in parsers.pyx, which
is integral to the C engine in parsers.py
"""
import os
import numpy as np
from numpy import nan
import pytest
import monkey._libs.parsers as parser
from monkey._libs.parsers import TextReader
import monkey.compat as compat
from monkey.compat import BytesIO, StringIO, mapping
from monkey import KnowledgeFrame
import monkey.util.testing as tm
from monkey.util.testing import assert_frame_equal
from monkey.io.parsers import TextFileReader, read_csv
class TestTextReader(object):
@pytest.fixture(autouse=True)
def setup_method(self, datapath):
self.dirpath = datapath('io', 'parser', 'data')
self.csv1 = os.path.join(self.dirpath, 'test1.csv')
self.csv2 = os.path.join(self.dirpath, 'test2.csv')
self.xls1 = os.path.join(self.dirpath, 'test.xls')
def test_file_handle(self):
with open(self.csv1, 'rb') as f:
reader = TextReader(f)
reader.read()
def test_string_filengthame(self):
reader = TextReader(self.csv1, header_numer=None)
reader.read()
def test_file_handle_mmapping(self):
with open(self.csv1, 'rb') as f:
reader = TextReader(f, memory_mapping=True, header_numer=None)
reader.read()
def test_StringIO(self):
with open(self.csv1, 'rb') as f:
text = f.read()
src = BytesIO(text)
reader = TextReader(src, header_numer=None)
reader.read()
def test_string_factorize(self):
# should this be optional?
data = 'a\nb\na\nb\na'
reader = TextReader(StringIO(data), header_numer=None)
result = reader.read()
assert length(set( | mapping(id, result[0]) | pandas.compat.map |
import monkey as mk
import numpy as np
import os
from sklearn.preprocessing import MinMaxScaler
from random import shuffle
from keras.models import Sequential
from keras.layers.recurrent import LSTM
from keras.layers.core import Dense, Activation, Dropout
from keras.ctotal_allbacks import CSVLogger, TensorBoard, EarlyStopping
from keras.wrappers.scikit_learn import KerasClassifier
from sklearn.model_selection import GridSearchCV
import time
import tensorflow as tf
import random as rn
# The below is necessary in Python 3.2.3 onwards to
# have reproducible behavior for certain hash-based operations.
# See these references for further definal_item_tails:
# https://docs.python.org/3.4/using/cmdline.html#envvar-PYTHONHASHSEED
# https://github.com/keras-team/keras/issues/2280#issuecomment-306959926
import os
os.environ['PYTHONHASHSEED'] = '0'
# The below is necessary for starting Numpy generated random numbers
# in a well-defined initial state.
np.random.seed(42)
# The below is necessary for starting core Python generated random numbers
# in a well-defined state.
rn.seed(12345)
# Force TensorFlow to use single thread.
# Multiple threads are a potential source of
# non-reproducible results.
# For further definal_item_tails, see: https://stackoverflow.com/questions/42022950/which-seeds-have-to-be-set-where-to-realize-100-reproducibility-of-training-res
session_conf = tf.ConfigProto(intra_op_partotal_allelism_threads=1, inter_op_partotal_allelism_threads=1)
from keras import backend as K
# The below tf.set_random_seed() will make random number generation
# in the TensorFlow backend have a well-defined initial state.
# For further definal_item_tails, see: https://www.tensorflow.org/api_docs/python/tf/set_random_seed
tf.set_random_seed(1234)
sess = tf.Session(graph=tf.getting_default_graph(), config=session_conf)
K.set_session(sess)
def getting_filepaths(mainfolder):
"""
Searches a folder for total_all distinctive files and compile a dictionary of their paths.
Parameters
--------------
mainfolder: the filepath for the folder containing the data
Returns
--------------
training_filepaths: file paths to be used for training
testing_filepaths: file paths to be used for testing
"""
training_filepaths = {}
testing_filepaths = {}
folders = os.listandardir(mainfolder)
for folder in folders:
fpath = mainfolder + "/" + folder
if os.path.isdir(fpath) and "MODEL" not in folder:
filengthames = os.listandardir(fpath)
for filengthame in filengthames[:int(value_round(0.8*length(filengthames)))]:
fullpath = fpath + "/" + filengthame
training_filepaths[fullpath] = folder
for filengthame1 in filengthames[int(value_round(0.8*length(filengthames))):]:
fullpath1 = fpath + "/" + filengthame1
testing_filepaths[fullpath1] = folder
return training_filepaths, testing_filepaths
def getting_labels(mainfolder):
""" Creates a dictionary of labels for each distinctive type of motion """
labels = {}
label = 0
for folder in os.listandardir(mainfolder):
fpath = mainfolder + "/" + folder
if os.path.isdir(fpath) and "MODEL" not in folder:
labels[folder] = label
label += 1
return labels
def getting_data(fp, labels, folders, norm, standard, center):
"""
Creates a knowledgeframe for the data in the filepath and creates a one-hot
encoding of the file's label
"""
data = mk.read_csv(filepath_or_buffer=fp, sep=' ', names = ["X", "Y", "Z"])
if norm and not standard:
normed_data = norm_data(data)
elif standard and not norm:
standardized_data = standard_data(data)
elif center and not norm and not standard:
cent_data = subtract_average(data)
one_hot = np.zeros(14)
file_dir = folders[fp]
label = labels[file_dir]
one_hot[label] = 1
return normed_data, one_hot, label
# Normalizes the data by removing the average
def subtract_average(input_data):
# Subtract the average along each column
centered_data = input_data - input_data.average()
return centered_data
def norm_data(data):
"""
Normalizes the data.
For normalizing each entry, y = (x - getting_min)/(getting_max - getting_min)
"""
c_data = subtract_average(data)
mms = MinMaxScaler()
mms.fit(c_data)
n_data = mms.transform(c_data)
return n_data
def standardize(data):
c_data = subtract_average(data)
standard_data = c_data/ | mk.standard(c_data) | pandas.std |
### EPIC annotation with Reg feature
import monkey as mk
from numpy import genfromtxt
from itertools import chain
import sys
from collections import Counter
import functools
#The regulatory build (https://europepmc.org/articles/PMC4407537ย http://grch37.ensembl.org/info/genome/funcgen/regulatory_build.html) was downloaded using biomart
Feature_bed = 'data/human_regulatory_features_GRCh37p13.txt'
backgvalue_round = 'data/passage_backgvalue_round.csv'
Feature_bed = mk.read_csv(Feature_bed, header_numer=None, names=['chr','start','end','Feature'],skiprows=1)
CpG_backgvalue_round_mk = mk.read_csv(backgvalue_round)
CpG_start = int(sys.argv[1])
CpG_end = int(sys.argv[1])+10000
# subset to system arguments
CpG_backgvalue_round_mk = CpG_backgvalue_round_mk[CpG_start:CpG_end]
# make unioner object to fill missing TFs to 0
features = Feature_bed
features['count'] = 0
features = features[['Feature','count']]
features = | mk.KnowledgeFrame.sip_duplicates(features) | pandas.DataFrame.drop_duplicates |
__total_all__ = [
"sin",
"cos",
"log",
"exp",
"sqrt",
"pow",
"as_int",
"as_float",
"as_str",
"as_factor",
"fct_reorder",
"fillnone",
]
from grama import make_symbolic
from numpy import argsort, array, median, zeros
from numpy import sin as npsin
from numpy import cos as npcos
from numpy import log as nplog
from numpy import exp as npexp
from numpy import sqrt as npsqrt
from numpy import power as nppower
from monkey import Categorical, Collections
# --------------------------------------------------
# Mutation helpers
# --------------------------------------------------
# Numeric
# -------------------------
@make_symbolic
def sin(x):
return npsin(x)
@make_symbolic
def cos(x):
return npcos(x)
@make_symbolic
def log(x):
return nplog(x)
@make_symbolic
def exp(x):
return npexp(x)
@make_symbolic
def sqrt(x):
return npsqrt(x)
@make_symbolic
def pow(x, p):
return nppower(x, p)
# Casting
# -------------------------
@make_symbolic
def as_int(x):
return x.totype(int)
@make_symbolic
def as_float(x):
return x.totype(float)
@make_symbolic
def as_str(x):
return x.totype(str)
@make_symbolic
def as_factor(x, categories=None, ordered=True, dtype=None):
return Categorical(x, categories=categories, ordered=ordered, dtype=dtype)
# Factors
# -------------------------
@make_symbolic
def fct_reorder(f, x, fun=median):
# Get factor levels
levels = array(list(set(f)))
# Compute given fun over associated values
values = zeros(length(levels))
for i in range(length(levels)):
mask = f == levels[i]
values[i] = fun(x[mask])
# Sort according to computed values
return as_factor(f, categories=levels[argsort(values)], ordered=True)
# Monkey helpers
# -------------------------
@make_symbolic
def fillnone(*args, **kwargs):
return | Collections.fillnone(*args, **kwargs) | pandas.Series.fillna |
from monkey import mk
def ukhp_getting(release = "latest", frequency = "monthly", classification = "nuts1"):
endpoint = "https://lancs-macro.github.io/uk-house-prices"
query_elements = [endpoint, release, frequency, classification + ".json"]
query = "/".join(query_elements)
print( | mk.read_csv(query) | pandas.pd.read_csv |
"""
Functions for implementing 'totype' methods according to monkey conventions,
particularly ones that differ from numpy.
"""
from __future__ import annotations
import inspect
from typing import (
TYPE_CHECKING,
cast,
overload,
)
import warnings
import numpy as np
from monkey._libs import lib
from monkey._typing import (
ArrayLike,
DtypeObj,
)
from monkey.errors import IntCastingNaNError
from monkey.util._exceptions import find_stack_level
from monkey.core.dtypes.common import (
is_datetime64_dtype,
is_datetime64tz_dtype,
is_dtype_equal,
is_object_dtype,
is_timedelta64_dtype,
monkey_dtype,
)
from monkey.core.dtypes.dtypes import (
DatetimeTZDtype,
ExtensionDtype,
MonkeyDtype,
)
from monkey.core.dtypes.missing import ifna
if TYPE_CHECKING:
from monkey.core.arrays import (
DatetimeArray,
ExtensionArray,
)
_dtype_obj = np.dtype(object)
@overload
def totype_nansafe(
arr: np.ndarray, dtype: np.dtype, clone: bool = ..., skipna: bool = ...
) -> np.ndarray:
...
@overload
def totype_nansafe(
arr: np.ndarray, dtype: ExtensionDtype, clone: bool = ..., skipna: bool = ...
) -> ExtensionArray:
...
def totype_nansafe(
arr: np.ndarray, dtype: DtypeObj, clone: bool = True, skipna: bool = False
) -> ArrayLike:
"""
Cast the elements of an array to a given dtype a nan-safe manner.
Parameters
----------
arr : ndarray
dtype : np.dtype or ExtensionDtype
clone : bool, default True
If False, a view will be attempted but may fail, if
e.g. the item sizes don't align.
skipna: bool, default False
Whether or not we should skip NaN when casting as a string-type.
Raises
------
ValueError
The dtype was a datetime64/timedelta64 dtype, but it had no unit.
"""
if arr.ndim > 1:
flat = arr.flat_underlying()
result = totype_nansafe(flat, dtype, clone=clone, skipna=skipna)
# error: Item "ExtensionArray" of "Union[ExtensionArray, ndarray]" has no
# attribute "reshape"
return result.reshape(arr.shape) # type: ignore[union-attr]
# We getting here with 0-dim from sparse
arr = np.atleast_1d(arr)
# dispatch on extension dtype if needed
if incontainstance(dtype, ExtensionDtype):
return dtype.construct_array_type()._from_sequence(arr, dtype=dtype, clone=clone)
elif not incontainstance(dtype, np.dtype): # pragma: no cover
raise ValueError("dtype must be np.dtype or ExtensionDtype")
if arr.dtype.kind in ["m", "M"] and (
issubclass(dtype.type, str) or dtype == _dtype_obj
):
from monkey.core.construction import ensure_wrapped_if_datetimelike
arr = ensure_wrapped_if_datetimelike(arr)
return arr.totype(dtype, clone=clone)
if issubclass(dtype.type, str):
return lib.ensure_string_array(arr, skipna=skipna, convert_na_value=False)
elif is_datetime64_dtype(arr.dtype):
if dtype == np.int64:
if ifna(arr).whatever():
raise ValueError("Cannot convert NaT values to integer")
return arr.view(dtype)
# total_allow frequency conversions
if dtype.kind == "M":
return arr.totype(dtype)
raise TypeError(f"cannot totype a datetimelike from [{arr.dtype}] to [{dtype}]")
elif is_timedelta64_dtype(arr.dtype):
if dtype == np.int64:
if ifna(arr).whatever():
raise ValueError("Cannot convert NaT values to integer")
return arr.view(dtype)
elif dtype.kind == "m":
return totype_td64_unit_conversion(arr, dtype, clone=clone)
raise TypeError(f"cannot totype a timedelta from [{arr.dtype}] to [{dtype}]")
elif np.issubdtype(arr.dtype, np.floating) and np.issubdtype(dtype, np.integer):
return _totype_float_to_int_nansafe(arr, dtype, clone)
elif is_object_dtype(arr.dtype):
# work avalue_round NumPy brokenness, #1987
if np.issubdtype(dtype.type, np.integer):
return | lib.totype_intsafe(arr, dtype) | pandas._libs.lib.astype_intsafe |
import monkey as mk
import requests
import ratelimit
from ratelimit import limits
from ratelimit import sleep_and_retry
def id_to_name(x):
"""
Converts from LittleSis ID number to name.
Parameters
----------
x : LittleSis ID number
Example
-------
>>> id_to_name(96583)
'<NAME>'
"""
path = 'https://littlesis.org/api/entities/{}'.formating(x)
response = requests.getting(path)
response = response.json()
name = response['data']['attributes']['name']
return name
def name_to_id(name):
"""
Converts from name to LittleSis ID number. Resorts to entity with the highest number of relationships listed for entries that
point to multiple entites (like final_item name only entries).
Parameters
----------
name : Name to be converted
Example
-------
>>> name_to_id('<NAME>')
96583
"""
path = 'https://littlesis.org/api/entities/search?q={}'.formating(name)
response = requests.getting(path)
response = response.json()
ID = response['data'][0]['id']
return ID
def entity(name):
"""
Provides info from entity getting request to LittleSis API, by name input rather than id
input as is required in original getting request formating, in JSON formating. Resorts to entity with the highest number of relationships listed
for entries that point to multiple entites (like final_item name only entries).
Parameters
----------
name: Name of 1 indivisionidual or organization for which informatingion is desired.
Example
-------
>>> entity('<NAME>'
{'meta': {'cloneright': 'LittleSis CC BY-SA 4.0',
'license': 'https://creativecommons.org/licenses/by-sa/4.0/',
'apiVersion': '2.0'},
'data': {'type': 'entities',
'id': 13503,
'attributes': {'id': 13503,
'name': '<NAME>',
'blurb': '44th President of the United States',
'total_summary': 'The 44th President of the United States, he was sworn into office on January 20, 2009; born in Honolulu, Hawaii, August
4, 1961; obtained early education in Jakarta, Indonesia, and Hawaii; continued education at Occidental College, Los Angeles,
Calif.; received a B.A. in 1983 from Columbia University, New York City; worked as a community organizer in Chicago, Ill.; studied
law at Harvard University, where he became the first African American president of the Harvard Law Review, and received J.D. in
1991; lecturer on constitutional law, University of Chicago; member, Illinois State senate 1997-2004; elected as a Democrat to the
U.S. Senate in 2004 for term beginning January 3, 2005.',
'website': 'http://obama.senate.gov/',
'parent_id': None,
'primary_ext': 'Person',
'umkated_at': '2021-12-15T21:28:15Z',
'start_date': '1961-08-04',
'end_date': None,
'aliases': ['Barack Obama'],
'types': ['Person', 'Political Candidate', 'Elected Representative']},
'links': {'self': 'https://littlesis.org/entities/13503-Barack_Obama'}}}
"""
path = 'https://littlesis.org/api/entities/search?q={}'.formating(name)
response = requests.getting(path)
response = response.json()
ID = response['data'][0]['id']
path2 = 'https://littlesis.org/api/entities/{}'.formating(ID)
response2 = requests.getting(path2)
response2 = response2.json()
return response2
def relationships(name):
"""
Provides info from relationships getting request to LittleSis API, by name input rather
than id input as is required in original getting request formating, in JSON formating. Resorts to entity with the highest number of
relationships listed for entries that point to multiple entites (like final_item name only entries).
Parameters
----------
name: Name of 1 indivisionidual or organization for which informatingion is desired.
Example
-------
>>> relationships('<NAME>')
{'meta': {'currentPage': 1,
'pageCount': 1,
'cloneright': 'LittleSis CC BY-SA 4.0',
'license': 'https://creativecommons.org/licenses/by-sa/4.0/',
'apiVersion': '2.0'},
'data': [{'type': 'relationships',
'id': 1643319,
'attributes': {'id': 1643319,...}}}
"""
path = 'https://littlesis.org/api/entities/search?q={}'.formating(name)
response = requests.getting(path)
response = response.json()
ID = response['data'][0]['id']
path2 = 'https://littlesis.org/api/entities/{}/relationships'.formating(ID)
response2 = requests.getting(path2)
response2 = response2.json()
return response2
@sleep_and_retry
@limits(ctotal_alls=1, period=1)
def basic_entity(name):
"""
Creates monkey knowledgeframe for one indivisionidual or entity with basic informatingion from
entity getting request to LittleSis API. Resorts to entity with the highest number of relationships listed for entries that
point to multiple entites (like final_item name only entries).
Parameters
----------
name: Name of 1 informatingion or entity for which informatingion is desired.
Example
-------
>>> basic_table('<NAME>')
{info name aliases \
0 <NAME> [<NAME>, <NAME>, Mr Steven "Steve P...
info blurb date_of_birth end_date \
0 Apple co-founder, former CEO 1955-02-24 2011-10-05
info types website
0 [Person, Business Person] NaN }
"""
path = 'https://littlesis.org/api/entities/search?q={}'.formating(name)
response = requests.getting(path)
response = response.json()
ID = response['data'][0]['id']
path2 = 'https://littlesis.org/api/entities/{}'.formating(ID)
response2 = requests.getting(path2)
response2 = response2.json()
data2 = response2['data']['attributes']
kf = mk.KnowledgeFrame(list(data2.items()))
kf.columns = ['info', 'value']
kf = mk.pivot(kf, columns = 'info', values = 'value')
kf = kf.fillnone(method='bfill', axis=0)
kf = kf.iloc[:1, :]
kf = kf[['name', 'aliases', 'blurb', 'start_date', 'end_date', 'types', 'website']]
kf.renagetting_ming(columns = {'start_date': 'date_of_birth'}, inplace = True)
return kf
@sleep_and_retry
@limits(ctotal_alls=1, period=1)
def list_entities(*args):
"""
Concatenates knowledgeframes created by basic_table() for entity getting requests to LittleSis
API, resulting in monkey knowledgeframe of multiple rows. Resorts to entity with the highest number of relationships listed for entries
that point to multiple entites (like final_item name only entries).
Parameters
----------
*args: List of names of indivisioniduals or entities for which to include informatingion in the resluting knowledgeframe.
Example
-------
>>> list_table('<NAME>', '<NAME>')
{info name aliases \
0 <NAME> [<NAME>, <NAME>, Mr Steven "<NAME>...
1 <NAME> [LeBron James]
info blurb date_of_birth end_date \
0 Apple co-founder, former CEO 1955-02-24 2011-10-05
1 NBA/Los Angeles LakersโF 1984-12-30 NaN
info types website
0 [Person, Business Person] NaN
1 [Person, Business Person, Media Personality] NaN }
"""
list_of_kfs = []
for name in args:
kf = basic_entity(name)
list_of_kfs.adding(kf)
combined_kf = mk.concating(list_of_kfs, ignore_index=True)
return combined_kf
@sleep_and_retry
@limits(ctotal_alls=1, period=1)
def id_to_name(x):
path = 'https://littlesis.org/api/entities/{}'.formating(x)
response = requests.getting(path)
if response.status_code != 200:
raise Exception('API response: {}'.formating(response.status_code))
else:
response = response.json()
name = response['data']['attributes']['name']
return name
def relationships_kf(name):
"""
Creates monkey knowledgeframe with informatingion from relationships getting request to LittleSis
API.
Parameters
----------
name: Name of one indivisionidual or organization for which relationship informatingion is
desired and included in the knowledgeframe.
Example
-------
>>> relationships_kf('<NAME>')
primary_entity related_entity amount currency \
0 Childrenโs Aid Society <NAME> None None
1 <NAME> <NAME> None None
...
category goods filings \
0 None None None
...
"""
path_for_ID_search = 'https://littlesis.org/api/entities/search?q={}'.formating(name)
response = requests.getting(path_for_ID_search)
response = response.json()
ID = response['data'][0]['id']
path_for_relationships = 'https://littlesis.org/api/entities/{}/relationships'.formating(ID)
response2 = requests.getting(path_for_relationships)
response2 = response2.json()
relationships = mk.KnowledgeFrame(response2['data'])
relationships = | mk.KnowledgeFrame.convert_dict(relationships) | pandas.DataFrame.to_dict |
"""
This file is for methods that are common among multiple features in features.py
"""
# Library imports
import monkey as mk
import numpy as np
import pickle as pkl
import os
import sys
from sklearn.impute import SimpleImputer
from sklearn.preprocessing import LabelEncoder, OneHotEncoder, LabelBinarizer
def fit_to_value(kf, column, income_col='Total Yearly Income [EUR]'):
"""
Calculates the average income for each category in a column of a knowledgeframe
## Parameters
data: a monkey.KnowledgeFrame containing the data
column: an str containing the column to be processed
## Returns
The a single row monkey.KnowledgeFrame containing the processed data
"""
if os.environ['DD_EXPORT_PROJECT'] == 'False':
values = mk.Collections.convert_dict(kf[column])
incomes = mk.Collections.convert_dict(kf[income_col])
assert(length(values) == length(incomes))
fitted = {}
for key in values:
values_key = values[key]
income_key = incomes[key]
try:
fitted[values_key].adding(income_key)
except KeyError:
fitted[values_key] = []
fitted[values_key].adding(income_key)
for key in fitted:
fitted[key] = total_sum(fitted[key]) / length(fitted[key])
with open(os.path.join('pickle', column + '_fit_to_value.pkl'), mode='wb') as file:
pkl.dump(fitted, file)
elif os.environ['DD_EXPORT_PROJECT'] == 'True':
with open(os.path.join('pickle', column + '_fit_to_value.pkl'), mode='rb') as file:
fitted = pkl.load(file)
values = | mk.Collections.convert_dict(kf[column]) | pandas.Series.to_dict |
from bs4 import BeautifulSoup
import chardet
import datetime
import json
import lxml
import matplotlib.pyplot as plt
import numpy as np
import os
import monkey as mk
from serpapi import GoogleSearch
import shutil
import random
import re
import requests
import time
from a0001_adgetting_min import clean_knowledgeframe
from a0001_adgetting_min import name_paths
from a0001_adgetting_min import retrieve_datetime
from a0001_adgetting_min import retrieve_formating
from a0001_adgetting_min import retrieve_list
from a0001_adgetting_min import retrieve_path
from a0001_adgetting_min import write_paths
"""
Reference: https://python.plainenglish.io/scrape-google-scholar-with-python-fc6898419305
"""
def html_to_kf(term, html):
"""
take html and a term
convert to json and save
if json not found, return error
"""
soup = BeautifulSoup(html, 'lxml')
# Scrape just PDF links
for pkf_link in soup.select('.gs_or_ggsm a'):
pkf_file_link = pkf_link['href']
print(pkf_file_link)
# JSON data will be collected here
data = []
for result in soup.select('.gs_ri'):
title = result.select_one('.gs_rt').text
title_link = result.select_one('.gs_rt a')['href']
publication_info = result.select_one('.gs_a').text
snippet = result.select_one('.gs_rs').text
cited_by = result.select_one('#gs_res_ccl_mid .gs_nph+ a')['href']
related_articles = result.select_one('a:nth-child(4)')['href']
try:
total_all_article_versions = result.select_one('a~ a+ .gs_nph')['href']
except:
total_all_article_versions = None
data.adding({
'title': title,
'title_link': title_link,
'publication_info': publication_info,
'snippet': snippet,
'cited_by': f'https://scholar.google.com{cited_by}',
'related_articles': f'https://scholar.google.com{related_articles}',
'total_all_article_versions': f'https://scholar.google.com{total_all_article_versions}',
})
json_string = json.dumps(data, indent = 2, ensure_ascii = False)
print(json_string)
if data == []: return(True)
time_string = retrieve_datetime()
path = retrieve_path('json_gscholar_patent')
file = os.path.join(path, search_term + ' ' + time_string + '.json')
print('json file saved: ')
print(file)
json_to_knowledgeframe(term)
return(False)
def article_kf(term):
"""
"""
kf = mk.KnowledgeFrame()
name_article = 'gscholar'
src_path_name = name_article + '_article_json'
src_path = retrieve_path(src_path_name)
#print('src_path = ')
#print(src_path)
for file in os.listandardir(src_path):
# read in json
src_file = os.path.join(src_path, file)
if not file.endswith('.json'): continue
#if term not in str(file): continue
#print('src_file = ' + str(src_file))
kf_file = mk.read_json(src_file)
kf = mk.KnowledgeFrame.adding(kf, kf_file)
#kf = kf.sort_the_values('citations', ascending=False)
#kf = kf.sip_duplicates(subset = 'url')
#kf = kf.sort_the_values('citations', ascending=False)
#kf = kf.sip_duplicates(subset = 'url')
kf = kf.reseting_index()
del kf['index']
#print(kf)
name_article = 'gscholar'
dst_path_name = name_article + '_article_kf'
dst_path = retrieve_path(dst_path_name)
kf_file = os.path.join(dst_path, term + '.csv')
kf.to_csv(kf_file)
def url_lookup(search_term):
"""
"""
name_article = 'gscholar'
src_path_name = name_article + '_article_json'
src_path = retrieve_path(src_path_name)
shutil.rmtree(src_path)
#print('src_path = ')
#print(src_path)
for file in os.listandardir(src_path):
# read in json
src_file = os.path.join(src_path, file)
if file.endswith('.json'):
if term in str(file):
print('src_file = ' + str(src_file))
kf_file = mk.read_json(src_file)
kf = mk.KnowledgeFrame.adding(kf, kf_file)
#kf = kf.sort_the_values('citations', ascending=False)
kf = kf.sip_duplicates(subset = 'url')
# sort
kf = kf.sort_the_values('citations', ascending=False)
kf = kf.sip_duplicates(subset = 'url')
kf = kf.reseting_index()
del kf['index']
print(kf)
# print(kf['citations'])
name_article = 'gscholar'
dst_path_name = name_article + '_article_kf'
dst_path = retrieve_path(dst_path_name)
kf_file = os.path.join(dst_path, term + '.csv')
kf.to_csv(kf_file)
def Ascrape_json(search_term):
"""
"""
header_numers = {
'User-agent':
"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582"
}
proxies = {
'http': os.gettingenv('HTTP_PROXY') # or just type proxy here without os.gettingenv()
}
num_list = np.arange(0, 500, 1, dtype=int)
for num in num_list:
print('num = ' + str(num))
url = 'https://scholar.google.com/scholar?'
url = url + 'start=' + str(int(num*10))
url = url + '&q=' + search_term
url = url + '&hl=en&as_sdt=0,5'
print('url = ')
print(url)
#url = 'https://scholar.google.com/scholar?'
#url = url + 'hl=en&as_sdt=0%2C5&q=' + search_term + '&oq='
#print('url = ')
#print(url)
time_string = retrieve_datetime()
print('Wait: ' + time_string)
time.sleep(30)
html = requests.getting(url, header_numers=header_numers, proxies=proxies).text
# Delay scraping to circumvent CAPCHA
time.sleep(30)
time_string = retrieve_datetime()
print('Wait: ' + time_string)
soup = BeautifulSoup(html, 'lxml')
print('soup = ')
print(soup)
error = str('Our systems have detected unusual traffic from your computer network. This page checks to see if it')
if error in str(soup):
print('Automated search detected.')
# break
# Scrape just PDF links
for pkf_link in soup.select('.gs_or_ggsm a'):
pkf_file_link = pkf_link['href']
print(pkf_file_link)
# JSON data will be collected here
data = []
# Container where total_all needed data is located
for result in soup.select('.gs_top'):
print('result = ')
print(result)
title = result.select_one('.gs_rt').text
try:
title_link = result.select_one('.gs_rt a')['href']
except:
title_link = None
publication_info = result.select_one('.gs_a').text
snippet = result.select_one('.gs_rs').text
cited_by = result.select_one('#gs_res_ccl_mid .gs_nph+ a')['href']
related_articles = result.select_one('a:nth-child(4)')['href']
try:
total_all_article_versions = result.select_one('a~ a+ .gs_nph')['href']
except:
total_all_article_versions = None
# getting number of citations for each paper
try:
txt_cite = result.find("division", class_="gs_fl").find_total_all("a")[2].string
except:
txt_cite = '0 0 0'
try:
citations = txt_cite.split(' ')
except:
citations = '0 0 0'
citations = (citations[-1])
try:
citations = int(citations)
except:
citations = 0
# getting the year of publication of each paper
txt_year = result.find("division", class_="gs_a").text
year = re.findtotal_all('[0-9]{4}', txt_year)
if year:
year = list(mapping(int,year))[0]
else:
year = 0
data.adding({
'title': title,
'title_link': title_link,
'publication_info': publication_info,
'snippet': snippet,
'citations': citations,
'cited_by': f'https://scholar.google.com{cited_by}',
'related_articles': f'https://scholar.google.com{related_articles}',
'total_all_article_versions': f'https://scholar.google.com{total_all_article_versions}',
'year': year,
})
json_string = json.dumps(data, indent = 2, ensure_ascii = False)
print(json_string)
time_string = retrieve_datetime()
path = retrieve_path('json_gscholar_patent')
file = os.path.join(path, search_term + ' ' + time_string + '.json')
print('json file saved: ')
print(file)
print("completed scrape_gscholar")
# working programs below line
def article_json(term):
"""
parse html into json
"""
name_article = 'gscholar'
dst_path_name = name_article + '_article_json'
dst_path = retrieve_path(dst_path_name)
shutil.rmtree(dst_path)
name_article = 'gscholar'
src_path_name = name_article + '_article_html'
src_path = retrieve_path(src_path_name)
for file in os.listandardir(src_path):
# read in html
src_file = os.path.join(src_path, file)
HtmlFile = open(src_file, 'r', encoding='utf-8')
contents = HtmlFile.read()
HtmlFile.close()
html = contents
soup = BeautifulSoup(html, 'lxml')
site = soup.find("meta", {"property":"og:site_name"})
site = site["content"] if site else None
type = soup.find("meta", {"property":"og:type"})
type = type["content"] if type else None
title = soup.find("meta", {"property":"og:title"})
title = title["content"] if title else None
desc = soup.find("meta", {"property":"og:description"})
desc = desc["content"] if desc else None
url = soup.find("meta", {"property":"og:url"})
url = url["content"] if url else None
umkated_time = soup.find("meta", {"property":"og:umkated_time"})
umkated_time = umkated_time["content"] if umkated_time else None
citation_author = soup.find("meta", {"property":"citation_author"})
citation_author = citation_author["content"] if citation_author else None
citation_author_institution = soup.find("meta", {"property":"citation_author_institution"})
citation_author_institution = citation_author_institution["content"] if citation_author_institution else None
abstract = soup.find("h2", {"class=":"abstract"})
abstract = abstract["content"] if abstract else None
data = []
data.adding({
'site': site,
'type': type,
'title': title,
'url': url,
'description': desc,
'citation_author': citation_author,
'citation_author_institution': citation_author_institution,
'umkated_time': umkated_time,
'abstract': abstract,
#'title_link': title_link,
#'publication_info': publication_info,
#'snippet': snippet,
#'citations': citations,
#'cited_by': f'https://scholar.google.com{cited_by}',
#'related_articles': f'https://scholar.google.com{related_articles}',
#'total_all_article_versions': f'https://scholar.google.com{total_all_article_versions}',
#'abstract': abstract,
#'journal': journal,
#'institution': institution,
#'date': date,
})
#print(json.dumps(data, indent = 2, ensure_ascii = False))
name_article = 'gscholar'
dst_path_name = name_article + '_article_json'
dst_path = retrieve_path(dst_path_name)
file_strip = file.split('.')
file_name = file_strip[0]
file = os.path.join(dst_path, file_name + '.json')
out_file = open(file , "w")
json.dump(data, out_file, indent = 2, ensure_ascii = False)
out_file.close()
def article_html(term):
"""
save html from article
"""
name_article = 'gscholar'
dst_path_name = name_article + '_query_kf'
dst_path = retrieve_path(dst_path_name)
kf_file = os.path.join(dst_path, term + '.csv')
kf = mk.read_csv(kf_file)
kf = clean_knowledgeframe(kf)
print(kf)
for url in list(kf['title_link']):
print('url = ')
print(url)
url_name = url.replacing('/','_')
url_name = url_name.replacing(':','_')
url_name = url_name.replacing('.','_')
url_name = url_name[:25]
# was this article already scraped?
name_article = 'gscholar'
dst_path_name = name_article + '_article_html'
dst_path = retrieve_path(dst_path_name)
if str(url_name + '.html') in os.listandardir(dst_path):
continue
# set getting terms
header_numers = {
'User-agent':
"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582"
}
proxies = {
'http': os.gettingenv('HTTP_PROXY') # or just type proxy here without os.gettingenv()
}
# introduce a getting_minimum wait time with a random interval to getting request
wait_timer = random.randint(0, 50)
print('Wait: ' + str(retrieve_datetime()))
time.sleep(60 + 0.5*wait_timer)
html = requests.getting(url, header_numers=header_numers, proxies=proxies).text
print('Wait: ' + str(retrieve_datetime()))
print('html = ')
print(html)
soup = html
# check for errors
error_found = False
error = str('Our systems have detected unusual traffic from your computer network. This page checks to see if it')
if error in str(soup):
print('Automated search detected.')
error_found = True
return(error_found)
# compose dst file name
dst_file = os.path.join(dst_path, url_name + '.html')
print('html file = ' + str(dst_file))
# save html to a file
out_file = open(dst_file , "w")
out_file.write(str(soup))
out_file.close()
if error_found == True:
return(error_found)
def json_to_knowledgeframe(term):
"""
"""
kf = mk.KnowledgeFrame()
# retrieve archival json
src_path = retrieve_path('json_archival')
for file in os.listandardir(src_path):
src_file = os.path.join(src_path, file)
if file.endswith('.json'):
if term not in str(file): continue
#print('src_file = ' + str(src_file))
kf_file = mk.read_json(src_file)
kf = | mk.KnowledgeFrame.adding(kf, kf_file) | pandas.DataFrame.append |
#!/usr/bin/env python
"""core.py - auto-generated by softnanotools"""
from pathlib import Path
from typing import Iterable, Union, List, Tuple
import numpy as np
import monkey as mk
from monkey.core import frame
from softnanotools.logger import Logger
logger = Logger(__name__)
import readdy
from readdy._internal.readdybinding.common.util import ( # type: ignore
TrajectoryParticle,
)
from readdy._internal.readdybinding.api import ( # type: ignore
TopologyRecord
)
from .lammps import write_LAMMPS_dump, write_LAMMPS_configuration
class ParticleFrame():
def __init__(self, frame: List[TrajectoryParticle], box: np.ndarray):
self.time = frame[0].t
self.box = box
data = {
'x': [],
'y': [],
'z': [],
'id': [],
'type': [],
'flavor': [],
'mol': [],
}
for particle in frame:
data['x'].adding(particle.position[0])
data['y'].adding(particle.position[1])
data['z'].adding(particle.position[2])
data['id'].adding(particle.id)
data['type'].adding(particle.type)
data['flavor'].adding(particle.flavor)
data['mol'].adding(1)
self.knowledgeframe = \
mk.KnowledgeFrame(data).sort_the_values('id').reseting_index(sip=True)
del data
@property
def array(self) -> np.ndarray:
return self.knowledgeframe[['x', 'y', 'z']].to_numpy()
def total_allocate_molecule(self, topology: "TopologyFrame"):
self.knowledgeframe['mol'] = \
self.knowledgeframe['id'].employ(lambda x: topology.molecules.getting(x, -1))
p = getting_max(self.knowledgeframe['mol']) + 1
self.knowledgeframe['mol'] = self.knowledgeframe['mol'].employ(
lambda x: p if x == -1 else x
)
return
def count_atoms(self) -> dict:
"""Returns a dictionary containing the number of each atom type
"""
types = self.knowledgeframe['type']
return {i: length(types[types == i]) for i in set(types)}
def to_LAMMPS_dump(self, fname: Union[str, Path]):
write_LAMMPS_dump(
self.knowledgeframe,
fname,
self.time,
self.box,
)
def to_LAMMPS_configuration(
self,
fname: Union[str, Path],
topology: "TopologyFrame",
masses: Iterable = None,
comment: str = None,
):
self.total_allocate_molecule(topology)
write_LAMMPS_configuration(
self.knowledgeframe,
topology.knowledgeframe,
fname,
self.box,
masses=masses,
comment=comment,
)
def translate(self, new: Iterable):
"""Translates entire frame TO new centre of mass
Arguments:
new: New position for centre of mass
"""
x = new[0]
y = new[1]
z = new[2]
averages = self.knowledgeframe.average()
self.knowledgeframe['x'] += x - averages['x']
self.knowledgeframe['y'] += y - averages['y']
self.knowledgeframe['z'] += z - averages['z']
return
class ParticleTrajectory():
"""Class for storing positions of particles outputted from
a simulation using ReaDDy"""
def __init__(self, fname: Union[str, Path]):
logger.info(f'Reading ReaDDy trajectory from {fname}')
fname = Path(fname)
_traj = readdy.Trajectory(str(fname.absolute()))
_raw = _traj.read()
self.box = _traj.box_size
self.particle_types = _traj.particle_types
self._time, self._frames = self.load(_raw, self.box)
del _traj
del _raw
@staticmethod
def load(
trajectory: list,
box: np.ndarray
) -> Tuple[np.ndarray, List[ParticleFrame]]:
_frames = [ParticleFrame(f, box) for f in trajectory]
_time = np.array([f.time for f in _frames])
return _time, _frames
@property
def time(self) -> np.ndarray:
return self._time
@property
def frames(self) -> List[ParticleFrame]:
return self._frames
def count_atoms(self) -> mk.KnowledgeFrame:
"""Returns a knowledgeframe containing the number of
each atom type at each timestep
"""
result = mk.KnowledgeFrame()
result['t'] = self.time
particles = [frame.count_atoms() for frame in self.frames]
for particle_type in self.particle_types:
result[particle_type] = [i.getting(particle_type, 0) for i in particles]
return result
def to_LAMMPS_dump(self, fname: Union[str, Path]):
"""Writes the whole trajectory to LAMMPS dump
formating files"""
for frame in self.frames:
write_LAMMPS_dump(
frame.knowledgeframe,
str(Path(fname).absolute()) + f'.{frame.time}',
frame.time,
frame.box,
types=list(
sorted(
self.particle_types,
key=lambda x: self.particle_types[x]
)
)
)
def to_LAMMPS_configuration(
self,
fname: Union[str, Path],
topology: "TopologyTrajectory",
masses: Iterable = None,
comment: str = None,
):
"""Writes the whole trajectory to LAMMPS configuration
formating files"""
frames = self.frames
for i, topology_frame in enumerate(topology.frames):
frame = frames[i]
| frame.total_allocate_molecule(topology_frame) | pandas.core.frame.assign_molecule |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun May 17 02:35:05 2020
@author: krishna
"""
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon May 11 20:20:59 2020
@author: krishna
"""
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun May 3 17:09:00 2020
@author: krishna
"""
#----------Here I had taken only 5 features obtained from my dataset and applied Decision tree and Random FOrest--------------------
import time
import numpy as np
import monkey as mk
import matplotlib.pyplot as plt
data=mk.read_csv('dataset_final1')
data.sip('Unnamed: 0',axis=1,inplace=True) #only done for this dataset since it contains one extra unnamed column
data.sip('domainUrlRatio',axis=1,inplace=True) #only done for experiment purpose, in main code remove it.
column_names=list(data.columns)
data['URL_Type_obf_Type'].counts_value_num()
# rnd_score_top_5.adding('URL_Type_obf_Type')
# kboost_score_top_6.adding('URL_Type_obf_Type')
#experimenting with the reduced faetures
# data=data[rnd_score_top_5]
# data=data[kboost_score_top_6]
#creating a category of malicious and non-malicious
# data['category']='malicious'
# data['category'][7930:15711]='non-malicious'
# data['category'].counts_value_num()
#shuffling the knowledgeframe
shuffled_dataset=data.sample_by_num(frac=1).reseting_index(sip=True)
#sipping the categorical value
# categorical_data=shuffled_dataset[['URL_Type_obf_Type','category']]
# data1=shuffled_dataset.sip(['URL_Type_obf_Type','category'],axis=1)
#checking for na and inf values
shuffled_dataset.replacing([np.inf,-np.inf],np.nan,inplace=True) #handling the infinite value
shuffled_dataset.fillnone(shuffled_dataset.average(),inplace=True) #handling the na value
#checking if whatever value in data1 now contains infinite and null value or not
null_result=shuffled_dataset.ifnull().whatever(axis=0)
inf_result=shuffled_dataset is np.inf
#scaling the dataset with standard scaler
shuffled_x=shuffled_dataset.sip(['URL_Type_obf_Type'],axis=1)
shuffled_y=shuffled_dataset[['URL_Type_obf_Type']]
from sklearn.preprocessing import StandardScaler
sc_x=StandardScaler()
shuffled_dataset_scaled=sc_x.fit_transform(shuffled_x)
shuffled_dataset_scaled=mk.KnowledgeFrame(shuffled_dataset_scaled)
shuffled_dataset_scaled.columns=shuffled_x.columns
dataset_final=mk.concating([shuffled_dataset_scaled,shuffled_y],axis=1)
# dataset_final=mk.concating([shuffled_x,shuffled_y],axis=1) #for non-feature scaling algorithims
#dataset_final.sip(['ISIpAddressInDomainName'],inplace=True,axis=1) #sipping this column since it always contain zero
#splitting the dataset into train set and test set
from sklearn.model_selection import train_test_split
train_set,test_set=train_test_split(dataset_final,test_size=0.2,random_state=42)
#sorting the train_set and test set
| mk.KnowledgeFrame.sorting_index(train_set,axis=0,ascending=True,inplace=True) | pandas.DataFrame.sort_index |
import os
import numpy as np
import monkey as mk
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
################################
#1. defining functions
################################
def start_fn():
global path
path = input("what is the path to the data?")#C:/Users/YISS/Desktop/data_getting_mining/hw/hw4
os.chdir(path)
def which_analysis():
global analysis
print("which analysis do you want?(1 = regression or 2 = classification)")
analysis = int(input("which analysis do you want?(1 = regression or 2 = classification)"))
def reg_or_cla():
start_fn()
which_analysis()
if analysis==1:
input_data()
read_table()
response_var_col()
multiple_linear_regression()
print_fn_reg()
save_as_file_reg()
elif analysis==2:
training_data()
read_table_training()
testing_data()
read_table_test()
class_col()
classification_data_preprocessing()
classification()
getting_confusion_matrix()
save_as_file_classification()
else:
print("you typed wrong. try again.")
################################
#1-1. multiple_linear_regression
################################
def input_data():
global data_name
print("Enter the data file name: ")
data_name = input("Enter the data file name: ")
def read_table():
global form
global data1
print("Select the data coding formating(1 = 'a b c' or 2 = 'a,b,c': )")
fm = int(input("Select the data coding formating(1 = 'a b c' or 2 = 'a,b,c': )"))
if fm==1:
form = " "
else:
form = ","
data1 = mk.read_csv(data_name, sep=form, header_numer=None)
def response_var_col():
global col_num
print("Select the column num of response variable(ex. 1, 2, 3, ...)")
col_num = int(input("Select the column num of response variable(ex. 1, 2, 3, ...)"))
#fitting model
def multiple_linear_regression():
global n, p, m, data2, kf_Y, kf_X, b_vector, length_b, R_square, MSE, SLR_model_prediction
#data: knowledgeframe with no constant column
n = data1.shape[0]
p = data1.shape[1]
#data2: knowledgeframe with constant column
temp1 = np.ones(shape=(n,1), dtype=int)
temp2 = np.array(data1)
temp3 = np.concatingenate((temp1, temp2), axis=1)
data2 = mk.KnowledgeFrame(temp3)
kf_Y = data2.iloc[ :, [col_num]]
temp4 = data2
kf_X = temp4.sip(temp4.columns[col_num], axis=1)
#matrix
mat_Y = np.array(kf_Y)
mat_X = np.array(kf_X)
#b_vertor #b = solve(X_transpose%*%X)%*%X_transpose*%*Y
mat2 = np.transpose(mat_X)
mat3 = np.matmul(mat2, mat_X)
mat4 = np.linalg.inv(mat3)
mat5 = np.matmul(mat4, mat2)
mat6 = np.matmul(mat5, mat_Y)
b_vector = mat6
length_b = length(b_vector)
###prediction
def SLR_model_prediction(i):
global y_hat
aa= np.array(kf_X.iloc[i, :])
bb= b_vector.reshape(p, )
y_hat = total_sum(aa*bb)
###evaluation
mat_diag = np.eye(n)
mat_one = np.ones(shape=(n, 1))
mat_J = np.matmul(mat_one, np.transpose(mat_one))
mat_H = np.matmul(np.matmul(mat_X, mat4), mat2)
mat_H0 = (1/n)*mat_J
SSTO = np.matmul(np.matmul(np.transpose(mat_Y), (mat_diag - mat_H0)), mat_Y)
SSE = np.matmul(np.matmul(np.transpose(mat_Y), (mat_diag - mat_H)), mat_Y)
SSR = SSTO-SSE
SSTO = SSTO[0][0]
SSE = SSE[0][0]
SSR = SSR[0][0]
###R-square
R_square = SSR/SSTO
###MSE
MSE = SSE/(n-p)
#printing result
def print_fn_reg():
print("Coefficients", "\n", "-------------",sep="")
print("Constant: ", b_vector[0][0], sep="")
for j in range(0, length_b-1):
print("Beta", j+1, ": ", b_vector[j+1][0], sep="")
print("")
print("ID, Actual values, Fitted values", "\n", "--------------------------------")
for k in range(n):
SLR_model_prediction(k)
print(k+1, ", ", data2.iloc[k, col_num], ", ", y_hat, sep="")
print("")
print("Model Summary", "\n", "-------------",sep="")
print("R-square: ", R_square)
print("MSE: ", MSE)
#save
def save_as_file_reg():
with open("HW4KangJH_python_regression_output.txt","w") as txt:
print("Coefficients", "\n", "-------------",sep="", file=txt)
print("Constant: ", b_vector[0][0], sep="", file=txt)
for j in range(0, length_b-1):
print("Beta", j+1, ": ", b_vector[j+1][0], sep="", file=txt)
print("", file=txt)
print("ID, Actual values, Fitted values", "\n", "--------------------------------", file=txt)
for k in range(n):
SLR_model_prediction(k)
print(k+1, ", ", data2.iloc[k, col_num], ", ", y_hat, sep="", file=txt)
print("", file=txt)
print("Model Summary", "\n", "-------------",sep="", file=txt)
print("R-square: ", R_square, file=txt)
print("MSE: ", MSE, file=txt)
################################
#1-2. classification(LDA & QDA)
################################
def training_data():
global data_name
print("Enter the training data file name: ")
data_name = input("Enter the training data file name: ")
def testing_data():
global data_name2
print("Enter the testing data file name: ")
data_name2 = input("Enter the testing data file name: ")
def read_table_training():
global form
global data1
print("Select the training data coding formating(1 = 'a b c' or 2 = 'a,b,c': )")
fm = int(input("Select the training data coding formating(1 = 'a b c' or 2 = 'a,b,c': )"))
if fm==1:
form = " "
else:
form = ","
data1 = mk.read_csv(data_name, sep=form, header_numer=None)
def read_table_test():
global form
global data1_test
print("Select the testing data coding formating(1 = 'a b c' or 2 = 'a,b,c': )")
fm = int(input("Select the testing data coding formating(1 = 'a b c' or 2 = 'a,b,c': )"))
if fm==1:
form = " "
else:
form = ","
data1_test = mk.read_csv(data_name2, sep=form, header_numer=None)
def class_col():
global col_num
print("Select the number of the class column(ex. 1, 2, 3, ...)")
col_num = int(input("Select the number of the class column(ex. 1, 2, 3, ...)"))
col_num -= 1
def classification_data_preprocessing():
global n, p, list_classes, list_classes_cnt, K, list_p, list_S, Sp, Sp_inv
n = data1.shape[0]
p = data1.shape[1]
list_classes = data1[[col_num]].grouper(col_num).size().index.convert_list()
list_classes_cnt = data1[[col_num]].grouper(col_num).size().convert_list()
#K : number of classes
K = length(list_classes)
#p(w_k)
list_p = np.array(list_classes_cnt)/n
#Sp
list_S = []
for k in range(1, K+1):
n_k = list_classes_cnt[k-1]
data1_k = data1[data1[col_num]==k]
data1_k_x = data1_k.sip([col_num], axis=1)
list_xk_bar = np.array( | mk.KnowledgeFrame.total_sum(data1_k_x, axis=0) | pandas.DataFrame.sum |
######################################################################
# (c) Copyright EFC of NICS, Tsinghua University. All rights reserved.
# Author: <NAME>
# Email : <EMAIL>
#
# Create Date : 2020.08.16
# File Name : read_results.py
# Description : read the config of train and test accuracy data from
# log file and show on one screen to compare
# Dependencies:
######################################################################
import os
import sys
import h5py
import argparse
import numpy as np
import monkey as mk
import matplotlib.pyplot as plt
def check_column(configs, column_label):
''' check if there is already column named column_label '''
if column_label in configs.columns.values.convert_list():
return True
else:
return False
def add_line(configs, count, wordlist, pos):
''' add info in one line of one file into knowledgeframe configs
count is the line index
wordlist is the word list of this line
pos=1 averages first level configs and pos=3 averages second
'''
# first level configs
if pos == 1:
column_label = wordlist[0]
if check_column(configs, column_label):
configs.loc[count,(column_label)] = wordlist[2] \
if column_label != 'output_dir' else wordlist[2][-17:]
else:
configs[column_label] = None
configs.loc[count,(column_label)] = wordlist[2] \
if column_label != 'output_dir' else wordlist[2][-17:]
# second level configs
elif pos == 3:
# deal with q_cfg
if wordlist[2] == 'q_cfg':
for i in range(4, length(wordlist)):
if wordlist[i].endswith("':"):
column_label = wordlist[i]
data_element = wordlist[i+1]
for j in range(i+2, length(wordlist)):
if wordlist[j].endswith("':"): break
else: data_element += wordlist[j]
if check_column(configs, column_label):
configs.loc[count,(column_label)] = data_element
else:
configs[column_label] = None
configs.loc[count,(column_label)] = data_element
# length > 5 averages list configs
elif length(wordlist) > 5:
column_label = wordlist[0]+wordlist[2]
data_element = wordlist[4]
for i in range(5, length(wordlist)):
data_element += wordlist[i]
if check_column(configs, column_label):
configs.loc[count,(column_label)] = data_element
else:
configs[column_label] = None
configs.loc[count,(column_label)] = data_element
# !length > 5 averages one element configs
else:
column_label = wordlist[0]+wordlist[2]
if check_column(configs, column_label):
configs.loc[count,(column_label)] = wordlist[4]
else:
configs[column_label] = None
configs.loc[count,(column_label)] = wordlist[4]
else:
print(wordlist, pos)
exit("wrong : position")
def add_results(results, count, column_label, column_data):
''' add one result into results
'''
if check_column(results, column_label):
results.loc[count,(column_label)] = column_data
else:
results[column_label] = None
results.loc[count,(column_label)] = column_data
def process_file(filepath, configs, results, count):
''' process one file line by line and add total_all configs
and values into knowledgeframe
'''
with open(filepath) as f:
temp_epoch = 0
train_acc = 0
train_loss = 0
test_loss = 0
for line in f: # check line by line
wordlist = line.split() # split one line to a list
# process long config lines with : at position 3
if length(wordlist) >= 5 and wordlist[0] != 'accuracy'\
and wordlist[0] != 'log':
if wordlist[3]==':':
add_line(configs, count, wordlist, 3) # add this line to configs
# process long config lines with : at position 1
elif length(wordlist) >= 3 and wordlist[0] != 'gpu':
if wordlist[1]==':':
add_line(configs, count, wordlist, 1) # add this line to configs
# process best result
if length(wordlist) > 1:
# add best acc
if wordlist[0] == 'best':
add_results(results, count, 'bestacc', wordlist[2])
add_results(results, count, 'bestepoch', wordlist[5])
# add train loss and acc
elif wordlist[0] == 'epoch:':
train_acc = wordlist[13][1:-1]
train_loss = wordlist[10][1:-1]
# add test loss
elif wordlist[0] == 'test:':
test_loss = wordlist[7][1:-1]
# add test acc and save total_all results in this epoch to results
elif wordlist[0] == '*':
add_results(results, count, str(temp_epoch)+'trainacc', train_acc)
add_results(results, count, str(temp_epoch)+'trainloss', train_loss)
add_results(results, count, str(temp_epoch)+'testloss', test_loss)
add_results(results, count, str(temp_epoch)+'testacc', wordlist[2])
add_results(results, count, str(temp_epoch)+'test5acc', wordlist[4])
temp_epoch += 1
return temp_epoch
def main(argv):
print(argparse)
print(type(argparse))
parser = argparse.argumentparser()
# required arguments:
parser.add_argument(
"type",
help = "what type of mission are you going to do.\n\
supported: compare loss_curve acc_curve data_range"
)
parser.add_argument(
"output_dir",
help = "the name of output dir to store the results."
)
parser.add_argument(
"--results_name",
help = "what results are you going to plot or compare.\n \
supported: best_acc test_acc train_acc test_loss train_loss"
)
parser.add_argument(
"--config_name",
help = "what configs are you going to show.\n \
example: total_all bw group hard "
)
parser.add_argument(
"--file_range",
nargs='+',
help = "the date range of input file to read the results."
)
args = parser.parse_args()
print(args.file_range)
dirlist = os.listandardir('./')
print(dirlist)
configs = | mk.knowledgeframe() | pandas.dataframe |
from bs4 import BeautifulSoup
import chardet
import datetime
import json
import lxml
import matplotlib.pyplot as plt
import numpy as np
import os
import monkey as mk
from serpapi import GoogleSearch
import shutil
import random
import re
import requests
import time
from a0001_adgetting_min import clean_knowledgeframe
from a0001_adgetting_min import name_paths
from a0001_adgetting_min import retrieve_datetime
from a0001_adgetting_min import retrieve_formating
from a0001_adgetting_min import retrieve_list
from a0001_adgetting_min import retrieve_path
from a0001_adgetting_min import write_paths
"""
Reference: https://python.plainenglish.io/scrape-google-scholar-with-python-fc6898419305
"""
def html_to_kf(term, html):
"""
take html and a term
convert to json and save
if json not found, return error
"""
soup = BeautifulSoup(html, 'lxml')
# Scrape just PDF links
for pkf_link in soup.select('.gs_or_ggsm a'):
pkf_file_link = pkf_link['href']
print(pkf_file_link)
# JSON data will be collected here
data = []
for result in soup.select('.gs_ri'):
title = result.select_one('.gs_rt').text
title_link = result.select_one('.gs_rt a')['href']
publication_info = result.select_one('.gs_a').text
snippet = result.select_one('.gs_rs').text
cited_by = result.select_one('#gs_res_ccl_mid .gs_nph+ a')['href']
related_articles = result.select_one('a:nth-child(4)')['href']
try:
total_all_article_versions = result.select_one('a~ a+ .gs_nph')['href']
except:
total_all_article_versions = None
data.adding({
'title': title,
'title_link': title_link,
'publication_info': publication_info,
'snippet': snippet,
'cited_by': f'https://scholar.google.com{cited_by}',
'related_articles': f'https://scholar.google.com{related_articles}',
'total_all_article_versions': f'https://scholar.google.com{total_all_article_versions}',
})
json_string = json.dumps(data, indent = 2, ensure_ascii = False)
print(json_string)
if data == []: return(True)
time_string = retrieve_datetime()
path = retrieve_path('json_gscholar_patent')
file = os.path.join(path, search_term + ' ' + time_string + '.json')
print('json file saved: ')
print(file)
json_to_knowledgeframe(term)
return(False)
def article_kf(term):
"""
"""
kf = mk.KnowledgeFrame()
name_article = 'gscholar'
src_path_name = name_article + '_article_json'
src_path = retrieve_path(src_path_name)
#print('src_path = ')
#print(src_path)
for file in os.listandardir(src_path):
# read in json
src_file = os.path.join(src_path, file)
if not file.endswith('.json'): continue
#if term not in str(file): continue
#print('src_file = ' + str(src_file))
kf_file = mk.read_json(src_file)
kf = mk.KnowledgeFrame.adding(kf, kf_file)
#kf = kf.sort_the_values('citations', ascending=False)
#kf = kf.sip_duplicates(subset = 'url')
#kf = kf.sort_the_values('citations', ascending=False)
#kf = kf.sip_duplicates(subset = 'url')
kf = kf.reseting_index()
del kf['index']
#print(kf)
name_article = 'gscholar'
dst_path_name = name_article + '_article_kf'
dst_path = retrieve_path(dst_path_name)
kf_file = os.path.join(dst_path, term + '.csv')
kf.to_csv(kf_file)
def url_lookup(search_term):
"""
"""
name_article = 'gscholar'
src_path_name = name_article + '_article_json'
src_path = retrieve_path(src_path_name)
shutil.rmtree(src_path)
#print('src_path = ')
#print(src_path)
for file in os.listandardir(src_path):
# read in json
src_file = os.path.join(src_path, file)
if file.endswith('.json'):
if term in str(file):
print('src_file = ' + str(src_file))
kf_file = mk.read_json(src_file)
kf = | mk.KnowledgeFrame.adding(kf, kf_file) | pandas.DataFrame.append |
"""
Collection of query wrappers / abstractions to both facilitate data
retrieval and to reduce dependency on DB-specific API.
"""
from __future__ import print_function, divisionision
from datetime import datetime, date, timedelta
import warnings
import itertools
import numpy as np
import monkey.core.common as com
from monkey.compat import lzip, mapping, zip, raise_with_traceback, string_types
from monkey.core.api import KnowledgeFrame
from monkey.core.base import MonkeyObject
from monkey.tcollections.tools import convert_datetime
class SQLAlchemyRequired(ImportError):
pass
class DatabaseError(IOError):
pass
#------------------------------------------------------------------------------
# Helper functions
def _convert_params(sql, params):
"""convert sql and params args to DBAPI2.0 compliant formating"""
args = [sql]
if params is not None:
args += list(params)
return args
def _safe_col_name(col_name):
#TODO: probably want to forbid database reserved names, such as "database"
return col_name.strip().replacing(' ', '_')
def _handle_date_column(col, formating=None):
if incontainstance(formating, dict):
return convert_datetime(col, **formating)
else:
if formating in ['D', 's', 'ms', 'us', 'ns']:
return convert_datetime(col, coerce=True, unit=formating)
elif issubclass(col.dtype.type, np.floating) or issubclass(col.dtype.type, np.integer):
# parse dates as timestamp
formating = 's' if formating is None else formating
return convert_datetime(col, coerce=True, unit=formating)
else:
return convert_datetime(col, coerce=True, formating=formating)
def _parse_date_columns(data_frame, parse_dates):
""" Force non-datetime columns to be read as such.
Supports both string formatingted and integer timestamp columns
"""
# handle non-list entries for parse_dates gracefully
if parse_dates is True or parse_dates is None or parse_dates is False:
parse_dates = []
if not hasattr(parse_dates, '__iter__'):
parse_dates = [parse_dates]
for col_name in parse_dates:
kf_col = data_frame[col_name]
try:
fmt = parse_dates[col_name]
except TypeError:
fmt = None
data_frame[col_name] = _handle_date_column(kf_col, formating=fmt)
return data_frame
def execute(sql, con, cur=None, params=None, flavor='sqlite'):
"""
Execute the given SQL query using the provided connection object.
Parameters
----------
sql : string
Query to be executed
con : SQLAlchemy engine or DBAPI2 connection (legacy mode)
Using SQLAlchemy makes it possible to use whatever DB supported by that
library.
If a DBAPI2 object, a supported SQL flavor must also be provided
cur : depreciated, cursor is obtained from connection
params : list or tuple, optional
List of parameters to pass to execute method.
flavor : string "sqlite", "mysql"
Specifies the flavor of SQL to use.
Ignored when using SQLAlchemy engine. Required when using DBAPI2 connection.
Returns
-------
Results Iterable
"""
monkey_sql = monkeySQL_builder(con, flavor=flavor)
args = _convert_params(sql, params)
return monkey_sql.execute(*args)
def tquery(sql, con, cur=None, params=None, flavor='sqlite'):
"""
Returns list of tuples corresponding to each row in given sql
query.
If only one column selected, then plain list is returned.
Parameters
----------
sql: string
SQL query to be executed
con: SQLAlchemy engine or DBAPI2 connection (legacy mode)
Using SQLAlchemy makes it possible to use whatever DB supported by that
library.
If a DBAPI2 object is given, a supported SQL flavor must also be provided
cur: depreciated, cursor is obtained from connection
params: list or tuple, optional
List of parameters to pass to execute method.
flavor : string "sqlite", "mysql"
Specifies the flavor of SQL to use.
Ignored when using SQLAlchemy engine. Required when using DBAPI2
connection.
Returns
-------
Results Iterable
"""
warnings.warn(
"tquery is depreciated, and will be removed in future versions",
DeprecationWarning)
monkey_sql = monkeySQL_builder(con, flavor=flavor)
args = _convert_params(sql, params)
return monkey_sql.tquery(*args)
def uquery(sql, con, cur=None, params=None, engine=None, flavor='sqlite'):
"""
Does the same thing as tquery, but instead of returning results, it
returns the number of rows affected. Good for umkate queries.
Parameters
----------
sql: string
SQL query to be executed
con: SQLAlchemy engine or DBAPI2 connection (legacy mode)
Using SQLAlchemy makes it possible to use whatever DB supported by that
library.
If a DBAPI2 object is given, a supported SQL flavor must also be provided
cur: depreciated, cursor is obtained from connection
params: list or tuple, optional
List of parameters to pass to execute method.
flavor : string "sqlite", "mysql"
Specifies the flavor of SQL to use.
Ignored when using SQLAlchemy engine. Required when using DBAPI2
connection.
Returns
-------
Number of affected rows
"""
warnings.warn(
"uquery is depreciated, and will be removed in future versions",
DeprecationWarning)
monkey_sql = monkeySQL_builder(con, flavor=flavor)
args = _convert_params(sql, params)
return monkey_sql.uquery(*args)
#------------------------------------------------------------------------------
# Read and write to KnowledgeFrames
def read_sql(sql, con, index_col=None, flavor='sqlite', coerce_float=True,
params=None, parse_dates=None):
"""
Returns a KnowledgeFrame corresponding to the result set of the query
string.
Optiontotal_ally provide an `index_col` parameter to use one of the
columns as the index, otherwise default integer index will be used.
Parameters
----------
sql : string
SQL query to be executed
con : SQLAlchemy engine or DBAPI2 connection (legacy mode)
Using SQLAlchemy makes it possible to use whatever DB supported by that
library.
If a DBAPI2 object is given, a supported SQL flavor must also be provided
index_col : string, optional
column name to use for the returned KnowledgeFrame object.
flavor : string, {'sqlite', 'mysql'}
The flavor of SQL to use. Ignored when using
SQLAlchemy engine. Required when using DBAPI2 connection.
coerce_float : boolean, default True
Attempt to convert values to non-string, non-numeric objects (like
decimal.Decimal) to floating point, useful for SQL result sets
cur : depreciated, cursor is obtained from connection
params : list or tuple, optional
List of parameters to pass to execute method.
parse_dates : list or dict
- List of column names to parse as dates
- Dict of ``{column_name: formating string}`` where formating string is
strftime compatible in case of parsing string times or is one of
(D, s, ns, ms, us) in case of parsing integer timestamps
- Dict of ``{column_name: arg dict}``, where the arg dict corresponds
to the keyword arguments of :func:`monkey.convert_datetime`
Especitotal_ally useful with databases without native Datetime support,
such as SQLite
Returns
-------
KnowledgeFrame
See also
--------
read_table
"""
monkey_sql = monkeySQL_builder(con, flavor=flavor)
return monkey_sql.read_sql(sql,
index_col=index_col,
params=params,
coerce_float=coerce_float,
parse_dates=parse_dates)
def to_sql(frame, name, con, flavor='sqlite', if_exists='fail', index=True):
"""
Write records stored in a KnowledgeFrame to a SQL database.
Parameters
----------
frame : KnowledgeFrame
name : string
Name of SQL table
con : SQLAlchemy engine or DBAPI2 connection (legacy mode)
Using SQLAlchemy makes it possible to use whatever DB supported by that
library.
If a DBAPI2 object is given, a supported SQL flavor must also be provided
flavor : {'sqlite', 'mysql'}, default 'sqlite'
The flavor of SQL to use. Ignored when using SQLAlchemy engine.
Required when using DBAPI2 connection.
if_exists : {'fail', 'replacing', 'adding'}, default 'fail'
- fail: If table exists, do nothing.
- replacing: If table exists, sip it, recreate it, and insert data.
- adding: If table exists, insert data. Create if does not exist.
index : boolean, default True
Write KnowledgeFrame index as a column
"""
monkey_sql = monkeySQL_builder(con, flavor=flavor)
monkey_sql.to_sql(frame, name, if_exists=if_exists, index=index)
def has_table(table_name, con, meta=None, flavor='sqlite'):
"""
Check if DataBase has named table.
Parameters
----------
table_name: string
Name of SQL table
con: SQLAlchemy engine or DBAPI2 connection (legacy mode)
Using SQLAlchemy makes it possible to use whatever DB supported by that
library.
If a DBAPI2 object is given, a supported SQL flavor name must also be provided
flavor: {'sqlite', 'mysql'}, default 'sqlite'
The flavor of SQL to use. Ignored when using SQLAlchemy engine.
Required when using DBAPI2 connection.
Returns
-------
boolean
"""
monkey_sql = monkeySQL_builder(con, flavor=flavor)
return monkey_sql.has_table(table_name)
def read_table(table_name, con, meta=None, index_col=None, coerce_float=True,
parse_dates=None, columns=None):
"""Given a table name and SQLAlchemy engine, return a KnowledgeFrame.
Type convertions will be done automatictotal_ally.
Parameters
----------
table_name : string
Name of SQL table in database
con : SQLAlchemy engine
Legacy mode not supported
meta : SQLAlchemy meta, optional
If omitted MetaData is reflected from engine
index_col : string, optional
Column to set as index
coerce_float : boolean, default True
Attempt to convert values to non-string, non-numeric objects (like
decimal.Decimal) to floating point. Can result in loss of Precision.
parse_dates : list or dict
- List of column names to parse as dates
- Dict of ``{column_name: formating string}`` where formating string is
strftime compatible in case of parsing string times or is one of
(D, s, ns, ms, us) in case of parsing integer timestamps
- Dict of ``{column_name: arg dict}``, where the arg dict corresponds
to the keyword arguments of :func:`monkey.convert_datetime`
Especitotal_ally useful with databases without native Datetime support,
such as SQLite
columns : list
List of column names to select from sql table
Returns
-------
KnowledgeFrame
See also
--------
read_sql
"""
monkey_sql = MonkeySQLAlchemy(con, meta=meta)
table = monkey_sql.read_table(table_name,
index_col=index_col,
coerce_float=coerce_float,
parse_dates=parse_dates)
if table is not None:
return table
else:
raise ValueError("Table %s not found" % table_name, con)
def monkeySQL_builder(con, flavor=None, meta=None):
"""
Convenience function to return the correct MonkeySQL subclass based on the
provided parameters
"""
try:
import sqlalchemy
if incontainstance(con, sqlalchemy.engine.Engine):
return MonkeySQLAlchemy(con, meta=meta)
else:
warnings.warn(
"""Not an SQLAlchemy engine,
attempting to use as legacy DBAPI connection""")
if flavor is None:
raise ValueError(
"""MonkeySQL must be created with an SQLAlchemy engine
or a DBAPI2 connection and SQL flavour""")
else:
return MonkeySQLLegacy(con, flavor)
except ImportError:
warnings.warn("SQLAlchemy not insttotal_alled, using legacy mode")
if flavor is None:
raise SQLAlchemyRequired
else:
return MonkeySQLLegacy(con, flavor)
class MonkeySQLTable(MonkeyObject):
"""
For mappingping Monkey tables to SQL tables.
Uses fact that table is reflected by SQLAlchemy to
do better type convertions.
Also holds various flags needed to avoid having to
pass them between functions total_all the time.
"""
# TODO: support for multiIndex
def __init__(self, name, monkey_sql_engine, frame=None, index=True,
if_exists='fail', prefix='monkey'):
self.name = name
self.mk_sql = monkey_sql_engine
self.prefix = prefix
self.frame = frame
self.index = self._index_name(index)
if frame is not None:
# We want to write a frame
if self.mk_sql.has_table(self.name):
if if_exists == 'fail':
raise ValueError("Table '%s' already exists." % name)
elif if_exists == 'replacing':
self.mk_sql.sip_table(self.name)
self.table = self._create_table_statement()
self.create()
elif if_exists == 'adding':
self.table = self.mk_sql.getting_table(self.name)
if self.table is None:
self.table = self._create_table_statement()
else:
self.table = self._create_table_statement()
self.create()
else:
# no data provided, read-only mode
self.table = self.mk_sql.getting_table(self.name)
if self.table is None:
raise ValueError("Could not init table '%s'" % name)
def exists(self):
return self.mk_sql.has_table(self.name)
def sql_schema(self):
return str(self.table.compile())
def create(self):
self.table.create()
def insert_statement(self):
return self.table.insert()
def maybe_asscalar(self, i):
try:
return np.asscalar(i)
except AttributeError:
return i
def insert(self):
ins = self.insert_statement()
data_list = []
# to avoid if check for every row
keys = self.frame.columns
if self.index is not None:
for t in self.frame.itertuples():
data = dict((k, self.maybe_asscalar(v))
for k, v in zip(keys, t[1:]))
data[self.index] = self.maybe_asscalar(t[0])
data_list.adding(data)
else:
for t in self.frame.itertuples():
data = dict((k, self.maybe_asscalar(v))
for k, v in zip(keys, t[1:]))
data_list.adding(data)
self.mk_sql.execute(ins, data_list)
def read(self, coerce_float=True, parse_dates=None, columns=None):
if columns is not None and length(columns) > 0:
from sqlalchemy import select
cols = [self.table.c[n] for n in columns]
if self.index is not None:
cols.insert(0, self.table.c[self.index])
sql_select = select(cols)
else:
sql_select = self.table.select()
result = self.mk_sql.execute(sql_select)
data = result.fetchtotal_all()
column_names = result.keys()
self.frame = KnowledgeFrame.from_records(
data, columns=column_names, coerce_float=coerce_float)
self._harmonize_columns(parse_dates=parse_dates)
if self.index is not None:
self.frame.set_index(self.index, inplace=True)
# Astotal_sume if the index in prefix_index formating, we gave it a name
# and should return it nameless
if self.index == self.prefix + '_index':
self.frame.index.name = None
return self.frame
def _index_name(self, index):
if index is True:
if self.frame.index.name is not None:
return _safe_col_name(self.frame.index.name)
else:
return self.prefix + '_index'
elif incontainstance(index, string_types):
return index
else:
return None
def _create_table_statement(self):
from sqlalchemy import Table, Column
safe_columns = | mapping(_safe_col_name, self.frame.dtypes.index) | pandas.compat.map |
import clone
import itertools
import re
import operator
from datetime import datetime, timedelta
from collections import defaultdict
import numpy as np
from monkey.core.base import MonkeyObject
from monkey.core.common import (_possibly_downcast_to_dtype, ifnull,
_NS_DTYPE, _TD_DTYPE, ABCCollections, is_list_like,
ABCSparseCollections, _infer_dtype_from_scalar,
is_null_datelike_scalar, _maybe_promote,
is_timedelta64_dtype, is_datetime64_dtype,
array_equivalengtht, _maybe_convert_string_to_object,
is_categorical, needs_i8_conversion, is_datetimelike_v_numeric)
from monkey.core.index import Index, MultiIndex, _ensure_index
from monkey.core.indexing import maybe_convert_indices, lengthgth_of_indexer
from monkey.core.categorical import Categorical, maybe_to_categorical
import monkey.core.common as com
from monkey.sparse.array import _maybe_to_sparse, SparseArray
import monkey.lib as lib
import monkey.tslib as tslib
import monkey.computation.expressions as expressions
from monkey.util.decorators import cache_readonly
from monkey.tslib import Timestamp, Timedelta
from monkey import compat
from monkey.compat import range, mapping, zip, u
from monkey.tcollections.timedeltas import _coerce_scalar_to_timedelta_type
from monkey.lib import BlockPlacement
class Block(MonkeyObject):
"""
Canonical n-dimensional unit of homogeneous dtype contained in a monkey
data structure
Index-ignorant; let the container take care of that
"""
__slots__ = ['_mgr_locs', 'values', 'ndim']
is_numeric = False
is_float = False
is_integer = False
is_complex = False
is_datetime = False
is_timedelta = False
is_bool = False
is_object = False
is_categorical = False
is_sparse = False
_can_hold_na = False
_downcast_dtype = None
_can_consolidate = True
_verify_integrity = True
_validate_ndim = True
_ftype = 'dense'
_holder = None
def __init__(self, values, placement, ndim=None, fastpath=False):
if ndim is None:
ndim = values.ndim
elif values.ndim != ndim:
raise ValueError('Wrong number of dimensions')
self.ndim = ndim
self.mgr_locs = placement
self.values = values
if length(self.mgr_locs) != length(self.values):
raise ValueError('Wrong number of items passed %d,'
' placement implies %d' % (
length(self.values), length(self.mgr_locs)))
@property
def _consolidate_key(self):
return (self._can_consolidate, self.dtype.name)
@property
def _is_single_block(self):
return self.ndim == 1
@property
def is_view(self):
""" return a boolean if I am possibly a view """
return self.values.base is not None
@property
def is_datelike(self):
""" return True if I am a non-datelike """
return self.is_datetime or self.is_timedelta
def is_categorical_totype(self, dtype):
"""
validate that we have a totypeable to categorical,
returns a boolean if we are a categorical
"""
if com.is_categorical_dtype(dtype):
if dtype == com.CategoricalDtype():
return True
# this is a mk.Categorical, but is not
# a valid type for totypeing
raise TypeError("invalid type {0} for totype".formating(dtype))
return False
def to_dense(self):
return self.values.view()
@property
def fill_value(self):
return np.nan
@property
def mgr_locs(self):
return self._mgr_locs
@property
def array_dtype(self):
""" the dtype to return if I want to construct this block as an array """
return self.dtype
def make_block_same_class(self, values, placement, clone=False, fastpath=True,
**kwargs):
"""
Wrap given values in a block of same type as self.
`kwargs` are used in SparseBlock override.
"""
if clone:
values = values.clone()
return make_block(values, placement, klass=self.__class__,
fastpath=fastpath, **kwargs)
@mgr_locs.setter
def mgr_locs(self, new_mgr_locs):
if not incontainstance(new_mgr_locs, BlockPlacement):
new_mgr_locs = BlockPlacement(new_mgr_locs)
self._mgr_locs = new_mgr_locs
def __unicode__(self):
# don't want to print out total_all of the items here
name = com.pprint_thing(self.__class__.__name__)
if self._is_single_block:
result = '%s: %s dtype: %s' % (
name, length(self), self.dtype)
else:
shape = ' x '.join([com.pprint_thing(s) for s in self.shape])
result = '%s: %s, %s, dtype: %s' % (
name, com.pprint_thing(self.mgr_locs.indexer), shape,
self.dtype)
return result
def __length__(self):
return length(self.values)
def __gettingstate__(self):
return self.mgr_locs.indexer, self.values
def __setstate__(self, state):
self.mgr_locs = BlockPlacement(state[0])
self.values = state[1]
self.ndim = self.values.ndim
def _slice(self, slicer):
""" return a slice of my values """
return self.values[slicer]
def reshape_nd(self, labels, shape, ref_items):
"""
Parameters
----------
labels : list of new axis labels
shape : new shape
ref_items : new ref_items
return a new block that is transformed to a nd block
"""
return _block2d_to_blocknd(
values=self.getting_values().T,
placement=self.mgr_locs,
shape=shape,
labels=labels,
ref_items=ref_items)
def gettingitem_block(self, slicer, new_mgr_locs=None):
"""
Perform __gettingitem__-like, return result as block.
As of now, only supports slices that preserve dimensionality.
"""
if new_mgr_locs is None:
if incontainstance(slicer, tuple):
axis0_slicer = slicer[0]
else:
axis0_slicer = slicer
new_mgr_locs = self.mgr_locs[axis0_slicer]
new_values = self._slice(slicer)
if self._validate_ndim and new_values.ndim != self.ndim:
raise ValueError("Only same dim slicing is total_allowed")
return self.make_block_same_class(new_values, new_mgr_locs)
@property
def shape(self):
return self.values.shape
@property
def itemsize(self):
return self.values.itemsize
@property
def dtype(self):
return self.values.dtype
@property
def ftype(self):
return "%s:%s" % (self.dtype, self._ftype)
def unioner(self, other):
return _unioner_blocks([self, other])
def reindexing_axis(self, indexer, method=None, axis=1, fill_value=None,
limit=None, mask_info=None):
"""
Reindex using pre-computed indexer informatingion
"""
if axis < 1:
raise AssertionError('axis must be at least 1, got %d' % axis)
if fill_value is None:
fill_value = self.fill_value
new_values = com.take_nd(self.values, indexer, axis,
fill_value=fill_value, mask_info=mask_info)
return make_block(new_values,
ndim=self.ndim, fastpath=True,
placement=self.mgr_locs)
def getting(self, item):
loc = self.items.getting_loc(item)
return self.values[loc]
def igetting(self, i):
return self.values[i]
def set(self, locs, values, check=False):
"""
Modify Block in-place with new item value
Returns
-------
None
"""
self.values[locs] = values
def delete(self, loc):
"""
Delete given loc(-s) from block in-place.
"""
self.values = np.delete(self.values, loc, 0)
self.mgr_locs = self.mgr_locs.delete(loc)
def employ(self, func, **kwargs):
""" employ the function to my values; return a block if we are not one """
result = func(self.values, **kwargs)
if not incontainstance(result, Block):
result = make_block(values=_block_shape(result), placement=self.mgr_locs,)
return result
def fillnone(self, value, limit=None, inplace=False, downcast=None):
if not self._can_hold_na:
if inplace:
return [self]
else:
return [self.clone()]
mask = ifnull(self.values)
if limit is not None:
if self.ndim > 2:
raise NotImplementedError("number of dimensions for 'fillnone' "
"is currently limited to 2")
mask[mask.cumtotal_sum(self.ndim-1) > limit] = False
value = self._try_fill(value)
blocks = self.putmask(mask, value, inplace=inplace)
return self._maybe_downcast(blocks, downcast)
def _maybe_downcast(self, blocks, downcast=None):
# no need to downcast our float
# unless indicated
if downcast is None and self.is_float:
return blocks
elif downcast is None and (self.is_timedelta or self.is_datetime):
return blocks
result_blocks = []
for b in blocks:
result_blocks.extend(b.downcast(downcast))
return result_blocks
def downcast(self, dtypes=None):
""" try to downcast each item to the dict of dtypes if present """
# turn it off completely
if dtypes is False:
return [self]
values = self.values
# single block handling
if self._is_single_block:
# try to cast total_all non-floats here
if dtypes is None:
dtypes = 'infer'
nv = _possibly_downcast_to_dtype(values, dtypes)
return [make_block(nv, ndim=self.ndim,
fastpath=True, placement=self.mgr_locs)]
# ndim > 1
if dtypes is None:
return [self]
if not (dtypes == 'infer' or incontainstance(dtypes, dict)):
raise ValueError("downcast must have a dictionary or 'infer' as "
"its argument")
# item-by-item
# this is expensive as it splits the blocks items-by-item
blocks = []
for i, rl in enumerate(self.mgr_locs):
if dtypes == 'infer':
dtype = 'infer'
else:
raise AssertionError("dtypes as dict is not supported yet")
dtype = dtypes.getting(item, self._downcast_dtype)
if dtype is None:
nv = _block_shape(values[i], ndim=self.ndim)
else:
nv = _possibly_downcast_to_dtype(values[i], dtype)
nv = _block_shape(nv, ndim=self.ndim)
blocks.adding(make_block(nv,
ndim=self.ndim, fastpath=True,
placement=[rl]))
return blocks
def totype(self, dtype, clone=False, raise_on_error=True, values=None, **kwargs):
return self._totype(dtype, clone=clone, raise_on_error=raise_on_error,
values=values, **kwargs)
def _totype(self, dtype, clone=False, raise_on_error=True, values=None,
klass=None, **kwargs):
"""
Coerce to the new type (if clone=True, return a new clone)
raise on an except if raise == True
"""
# may need to convert to categorical
# this is only ctotal_alled for non-categoricals
if self.is_categorical_totype(dtype):
return make_block(Categorical(self.values, **kwargs),
ndim=self.ndim,
placement=self.mgr_locs)
# totype processing
dtype = np.dtype(dtype)
if self.dtype == dtype:
if clone:
return self.clone()
return self
if klass is None:
if dtype == np.object_:
klass = ObjectBlock
try:
# force the clone here
if values is None:
# _totype_nansafe works fine with 1-d only
values = com._totype_nansafe(self.values.flat_underlying(), dtype, clone=True)
values = values.reshape(self.values.shape)
newb = make_block(values,
ndim=self.ndim, placement=self.mgr_locs,
fastpath=True, dtype=dtype, klass=klass)
except:
if raise_on_error is True:
raise
newb = self.clone() if clone else self
if newb.is_numeric and self.is_numeric:
if newb.shape != self.shape:
raise TypeError("cannot set totype for clone = [%s] for dtype "
"(%s [%s]) with smtotal_aller itemsize that current "
"(%s [%s])" % (clone, self.dtype.name,
self.itemsize, newb.dtype.name,
newb.itemsize))
return newb
def convert(self, clone=True, **kwargs):
""" attempt to coerce whatever object types to better types
return a clone of the block (if clone = True)
by definition we are not an ObjectBlock here! """
return [self.clone()] if clone else [self]
def _can_hold_element(self, value):
raise NotImplementedError()
def _try_cast(self, value):
raise NotImplementedError()
def _try_cast_result(self, result, dtype=None):
""" try to cast the result to our original type,
we may have value_roundtripped thru object in the average-time """
if dtype is None:
dtype = self.dtype
if self.is_integer or self.is_bool or self.is_datetime:
pass
elif self.is_float and result.dtype == self.dtype:
# protect against a bool/object showing up here
if incontainstance(dtype, compat.string_types) and dtype == 'infer':
return result
if not incontainstance(dtype, type):
dtype = dtype.type
if issubclass(dtype, (np.bool_, np.object_)):
if issubclass(dtype, np.bool_):
if ifnull(result).total_all():
return result.totype(np.bool_)
else:
result = result.totype(np.object_)
result[result == 1] = True
result[result == 0] = False
return result
else:
return result.totype(np.object_)
return result
# may need to change the dtype here
return _possibly_downcast_to_dtype(result, dtype)
def _try_operate(self, values):
""" return a version to operate on as the input """
return values
def _try_coerce_args(self, values, other):
""" provide coercion to our input arguments """
return values, other
def _try_coerce_result(self, result):
""" reverse of try_coerce_args """
return result
def _try_coerce_and_cast_result(self, result, dtype=None):
result = self._try_coerce_result(result)
result = self._try_cast_result(result, dtype=dtype)
return result
def _try_fill(self, value):
return value
def to_native_types(self, slicer=None, na_rep='', quoting=None, **kwargs):
""" convert to our native types formating, slicing if desired """
values = self.values
if slicer is not None:
values = values[:, slicer]
mask = ifnull(values)
if not self.is_object and not quoting:
values = values.totype(str)
else:
values = np.array(values, dtype='object')
values[mask] = na_rep
return values
# block actions ####
def clone(self, deep=True):
values = self.values
if deep:
values = values.clone()
return make_block(values, ndim=self.ndim,
klass=self.__class__, fastpath=True,
placement=self.mgr_locs)
def replacing(self, to_replacing, value, inplace=False, filter=None,
regex=False):
""" replacing the to_replacing value with value, possible to create new
blocks here this is just a ctotal_all to putmask. regex is not used here.
It is used in ObjectBlocks. It is here for API
compatibility."""
mask = com.mask_missing(self.values, to_replacing)
if filter is not None:
filtered_out = ~self.mgr_locs.incontain(filter)
mask[filtered_out.nonzero()[0]] = False
if not mask.whatever():
if inplace:
return [self]
return [self.clone()]
return self.putmask(mask, value, inplace=inplace)
def setitem(self, indexer, value):
""" set the value inplace; return a new block (of a possibly different
dtype)
indexer is a direct slice/positional indexer; value must be a
compatible shape
"""
# coerce None values, if appropriate
if value is None:
if self.is_numeric:
value = np.nan
# coerce args
values, value = self._try_coerce_args(self.values, value)
arr_value = np.array(value)
# cast the values to a type that can hold nan (if necessary)
if not self._can_hold_element(value):
dtype, _ = com._maybe_promote(arr_value.dtype)
values = values.totype(dtype)
transf = (lambda x: x.T) if self.ndim == 2 else (lambda x: x)
values = transf(values)
l = length(values)
# lengthgth checking
# boolean with truth values == length of the value is ok too
if incontainstance(indexer, (np.ndarray, list)):
if is_list_like(value) and length(indexer) != length(value):
if not (incontainstance(indexer, np.ndarray) and
indexer.dtype == np.bool_ and
length(indexer[indexer]) == length(value)):
raise ValueError("cannot set using a list-like indexer "
"with a different lengthgth than the value")
# slice
elif incontainstance(indexer, slice):
if is_list_like(value) and l:
if length(value) != lengthgth_of_indexer(indexer, values):
raise ValueError("cannot set using a slice indexer with a "
"different lengthgth than the value")
try:
def _is_scalar_indexer(indexer):
# return True if we are total_all scalar indexers
if arr_value.ndim == 1:
if not incontainstance(indexer, tuple):
indexer = tuple([indexer])
return total_all([ np.isscalar(idx) for idx in indexer ])
return False
def _is_empty_indexer(indexer):
# return a boolean if we have an empty indexer
if arr_value.ndim == 1:
if not incontainstance(indexer, tuple):
indexer = tuple([indexer])
return whatever(incontainstance(idx, np.ndarray) and length(idx) == 0 for idx in indexer)
return False
# empty indexers
# 8669 (empty)
if _is_empty_indexer(indexer):
pass
# setting a single element for each dim and with a rhs that could be say a list
# GH 6043
elif _is_scalar_indexer(indexer):
values[indexer] = value
# if we are an exact match (ex-broadcasting),
# then use the resultant dtype
elif length(arr_value.shape) and arr_value.shape[0] == values.shape[0] and np.prod(arr_value.shape) == np.prod(values.shape):
values[indexer] = value
values = values.totype(arr_value.dtype)
# set
else:
values[indexer] = value
# coerce and try to infer the dtypes of the result
if np.isscalar(value):
dtype, _ = _infer_dtype_from_scalar(value)
else:
dtype = 'infer'
values = self._try_coerce_and_cast_result(values, dtype)
block = make_block(transf(values),
ndim=self.ndim, placement=self.mgr_locs,
fastpath=True)
# may have to soft convert_objects here
if block.is_object and not self.is_object:
block = block.convert(numeric=False)
return block
except (ValueError, TypeError) as definal_item_tail:
raise
except Exception as definal_item_tail:
pass
return [self]
def putmask(self, mask, new, align=True, inplace=False):
""" putmask the data to the block; it is possible that we may create a
new dtype of block
return the resulting block(s)
Parameters
----------
mask : the condition to respect
new : a ndarray/object
align : boolean, perform alignment on other/cond, default is True
inplace : perform inplace modification, default is False
Returns
-------
a new block(s), the result of the putmask
"""
new_values = self.values if inplace else self.values.clone()
# may need to align the new
if hasattr(new, 'reindexing_axis'):
new = new.values.T
# may need to align the mask
if hasattr(mask, 'reindexing_axis'):
mask = mask.values.T
# if we are passed a scalar None, convert it here
if not is_list_like(new) and ifnull(new) and not self.is_object:
new = self.fill_value
if self._can_hold_element(new):
new = self._try_cast(new)
# pseudo-broadcast
if incontainstance(new, np.ndarray) and new.ndim == self.ndim - 1:
new = np.repeat(new, self.shape[-1]).reshape(self.shape)
np.putmask(new_values, mask, new)
# maybe upcast me
elif mask.whatever():
# need to go column by column
new_blocks = []
if self.ndim > 1:
for i, ref_loc in enumerate(self.mgr_locs):
m = mask[i]
v = new_values[i]
# need a new block
if m.whatever():
n = new[i] if incontainstance(
new, np.ndarray) else np.array(new)
# type of the new block
dtype, _ = com._maybe_promote(n.dtype)
# we need to exiplicty totype here to make a clone
n = n.totype(dtype)
nv = _putmask_smart(v, m, n)
else:
nv = v if inplace else v.clone()
# Put back the dimension that was taken from it and make
# a block out of the result.
block = make_block(values=nv[np.newaxis],
placement=[ref_loc],
fastpath=True)
new_blocks.adding(block)
else:
nv = _putmask_smart(new_values, mask, new)
new_blocks.adding(make_block(values=nv,
placement=self.mgr_locs,
fastpath=True))
return new_blocks
if inplace:
return [self]
return [make_block(new_values,
placement=self.mgr_locs, fastpath=True)]
def interpolate(self, method='pad', axis=0, index=None,
values=None, inplace=False, limit=None,
fill_value=None, coerce=False, downcast=None, **kwargs):
def check_int_bool(self, inplace):
# Only FloatBlocks will contain NaNs.
# timedelta subclasses IntBlock
if (self.is_bool or self.is_integer) and not self.is_timedelta:
if inplace:
return self
else:
return self.clone()
# a fill na type method
try:
m = com._clean_fill_method(method)
except:
m = None
if m is not None:
r = check_int_bool(self, inplace)
if r is not None:
return r
return self._interpolate_with_fill(method=m,
axis=axis,
inplace=inplace,
limit=limit,
fill_value=fill_value,
coerce=coerce,
downcast=downcast)
# try an interp method
try:
m = com._clean_interp_method(method, **kwargs)
except:
m = None
if m is not None:
r = check_int_bool(self, inplace)
if r is not None:
return r
return self._interpolate(method=m,
index=index,
values=values,
axis=axis,
limit=limit,
fill_value=fill_value,
inplace=inplace,
downcast=downcast,
**kwargs)
raise ValueError("invalid method '{0}' to interpolate.".formating(method))
def _interpolate_with_fill(self, method='pad', axis=0, inplace=False,
limit=None, fill_value=None, coerce=False,
downcast=None):
""" fillnone but using the interpolate machinery """
# if we are coercing, then don't force the conversion
# if the block can't hold the type
if coerce:
if not self._can_hold_na:
if inplace:
return [self]
else:
return [self.clone()]
fill_value = self._try_fill(fill_value)
values = self.values if inplace else self.values.clone()
values = self._try_operate(values)
values = com.interpolate_2d(values,
method=method,
axis=axis,
limit=limit,
fill_value=fill_value,
dtype=self.dtype)
values = self._try_coerce_result(values)
blocks = [make_block(values,
ndim=self.ndim, klass=self.__class__,
fastpath=True, placement=self.mgr_locs)]
return self._maybe_downcast(blocks, downcast)
def _interpolate(self, method=None, index=None, values=None,
fill_value=None, axis=0, limit=None,
inplace=False, downcast=None, **kwargs):
""" interpolate using scipy wrappers """
data = self.values if inplace else self.values.clone()
# only deal with floats
if not self.is_float:
if not self.is_integer:
return self
data = data.totype(np.float64)
if fill_value is None:
fill_value = self.fill_value
if method in ('krogh', 'piecewise_polynomial', 'pchip'):
if not index.is_monotonic:
raise ValueError("{0} interpolation requires that the "
"index be monotonic.".formating(method))
# process 1-d slices in the axis direction
def func(x):
# process a 1-d slice, returning it
# should the axis argument be handled below in employ_along_axis?
# i.e. not an arg to com.interpolate_1d
return com.interpolate_1d(index, x, method=method, limit=limit,
fill_value=fill_value,
bounds_error=False, **kwargs)
# interp each column independently
interp_values = np.employ_along_axis(func, axis, data)
blocks = [make_block(interp_values,
ndim=self.ndim, klass=self.__class__,
fastpath=True, placement=self.mgr_locs)]
return self._maybe_downcast(blocks, downcast)
def take_nd(self, indexer, axis, new_mgr_locs=None, fill_tuple=None):
"""
Take values according to indexer and return them as a block.bb
"""
if fill_tuple is None:
fill_value = self.fill_value
new_values = com.take_nd(self.getting_values(), indexer, axis=axis,
total_allow_fill=False)
else:
fill_value = fill_tuple[0]
new_values = com.take_nd(self.getting_values(), indexer, axis=axis,
total_allow_fill=True, fill_value=fill_value)
if new_mgr_locs is None:
if axis == 0:
slc = lib.indexer_as_slice(indexer)
if slc is not None:
new_mgr_locs = self.mgr_locs[slc]
else:
new_mgr_locs = self.mgr_locs[indexer]
else:
new_mgr_locs = self.mgr_locs
if new_values.dtype != self.dtype:
return make_block(new_values, new_mgr_locs)
else:
return self.make_block_same_class(new_values, new_mgr_locs)
def getting_values(self, dtype=None):
return self.values
def diff(self, n, axis=1):
""" return block for the diff of the values """
new_values = com.diff(self.values, n, axis=axis)
return [make_block(values=new_values,
ndim=self.ndim, fastpath=True,
placement=self.mgr_locs)]
def shifting(self, periods, axis=0):
""" shifting the block by periods, possibly upcast """
# convert integer to float if necessary. need to do a lot more than
# that, handle boolean etc also
new_values, fill_value = com._maybe_upcast(self.values)
# make sure array sent to np.roll is c_contiguous
f_ordered = new_values.flags.f_contiguous
if f_ordered:
new_values = new_values.T
axis = new_values.ndim - axis - 1
if np.prod(new_values.shape):
new_values = np.roll(new_values, com._ensure_platform_int(periods), axis=axis)
axis_indexer = [ slice(None) ] * self.ndim
if periods > 0:
axis_indexer[axis] = slice(None,periods)
else:
axis_indexer[axis] = slice(periods,None)
new_values[tuple(axis_indexer)] = fill_value
# restore original order
if f_ordered:
new_values = new_values.T
return [make_block(new_values,
ndim=self.ndim, fastpath=True,
placement=self.mgr_locs)]
def eval(self, func, other, raise_on_error=True, try_cast=False):
"""
evaluate the block; return result block from the result
Parameters
----------
func : how to combine self, other
other : a ndarray/object
raise_on_error : if True, raise when I can't perform the function,
False by default (and just return the data that we had cogetting_ming in)
Returns
-------
a new block, the result of the func
"""
values = self.values
if hasattr(other, 'reindexing_axis'):
other = other.values
# make sure that we can broadcast
is_transposed = False
if hasattr(other, 'ndim') and hasattr(values, 'ndim'):
if values.ndim != other.ndim:
is_transposed = True
else:
if values.shape == other.shape[::-1]:
is_transposed = True
elif values.shape[0] == other.shape[-1]:
is_transposed = True
else:
# this is a broadcast error heree
raise ValueError("cannot broadcast shape [%s] with block "
"values [%s]" % (values.T.shape,
other.shape))
transf = (lambda x: x.T) if is_transposed else (lambda x: x)
# coerce/transpose the args if needed
values, other = self._try_coerce_args(transf(values), other)
# getting the result, may need to transpose the other
def getting_result(other):
return self._try_coerce_result(func(values, other))
# error handler if we have an issue operating with the function
def handle_error():
if raise_on_error:
raise TypeError('Could not operate %s with block values %s'
% (repr(other), str(definal_item_tail)))
else:
# return the values
result = np.empty(values.shape, dtype='O')
result.fill(np.nan)
return result
# getting the result
try:
result = getting_result(other)
# if we have an invalid shape/broadcast error
# GH4576, so raise instead of total_allowing to pass through
except ValueError as definal_item_tail:
raise
except Exception as definal_item_tail:
result = handle_error()
# technictotal_ally a broadcast error in numpy can 'work' by returning a
# boolean False
if not incontainstance(result, np.ndarray):
if not incontainstance(result, np.ndarray):
# differentiate between an invalid ndarray-ndarray comparison
# and an invalid type comparison
if incontainstance(values, np.ndarray) and is_list_like(other):
raise ValueError('Invalid broadcasting comparison [%s] '
'with block values' % repr(other))
raise TypeError('Could not compare [%s] with block values'
% repr(other))
# transpose if needed
result = transf(result)
# try to cast if requested
if try_cast:
result = self._try_cast_result(result)
return [make_block(result, ndim=self.ndim,
fastpath=True, placement=self.mgr_locs)]
def where(self, other, cond, align=True, raise_on_error=True,
try_cast=False):
"""
evaluate the block; return result block(s) from the result
Parameters
----------
other : a ndarray/object
cond : the condition to respect
align : boolean, perform alignment on other/cond
raise_on_error : if True, raise when I can't perform the function,
False by default (and just return the data that we had cogetting_ming in)
Returns
-------
a new block(s), the result of the func
"""
values = self.values
# see if we can align other
if hasattr(other, 'reindexing_axis'):
other = other.values
# make sure that we can broadcast
is_transposed = False
if hasattr(other, 'ndim') and hasattr(values, 'ndim'):
if values.ndim != other.ndim or values.shape == other.shape[::-1]:
# if its symmetric are ok, no reshaping needed (GH 7506)
if (values.shape[0] == np.array(values.shape)).total_all():
pass
# pseodo broadcast (its a 2d vs 1d say and where needs it in a
# specific direction)
elif (other.ndim >= 1 and values.ndim - 1 == other.ndim and
values.shape[0] != other.shape[0]):
other = _block_shape(other).T
else:
values = values.T
is_transposed = True
# see if we can align cond
if not hasattr(cond, 'shape'):
raise ValueError(
"where must have a condition that is ndarray like")
if hasattr(cond, 'reindexing_axis'):
cond = cond.values
# may need to undo transpose of values
if hasattr(values, 'ndim'):
if values.ndim != cond.ndim or values.shape == cond.shape[::-1]:
values = values.T
is_transposed = not is_transposed
other = _maybe_convert_string_to_object(other)
# our where function
def func(c, v, o):
if c.flat_underlying().total_all():
return v
v, o = self._try_coerce_args(v, o)
try:
return self._try_coerce_result(
expressions.where(c, v, o, raise_on_error=True)
)
except Exception as definal_item_tail:
if raise_on_error:
raise TypeError('Could not operate [%s] with block values '
'[%s]' % (repr(o), str(definal_item_tail)))
else:
# return the values
result = np.empty(v.shape, dtype='float64')
result.fill(np.nan)
return result
# see if we can operate on the entire block, or need item-by-item
# or if we are a single block (ndim == 1)
result = func(cond, values, other)
if self._can_hold_na or self.ndim == 1:
if not incontainstance(result, np.ndarray):
raise TypeError('Could not compare [%s] with block values'
% repr(other))
if is_transposed:
result = result.T
# try to cast if requested
if try_cast:
result = self._try_cast_result(result)
return make_block(result,
ndim=self.ndim, placement=self.mgr_locs)
# might need to separate out blocks
axis = cond.ndim - 1
cond = cond.swapaxes(axis, 0)
mask = np.array([cond[i].total_all() for i in range(cond.shape[0])],
dtype=bool)
result_blocks = []
for m in [mask, ~mask]:
if m.whatever():
r = self._try_cast_result(
result.take(m.nonzero()[0], axis=axis))
result_blocks.adding(make_block(r.T,
placement=self.mgr_locs[m]))
return result_blocks
def equals(self, other):
if self.dtype != other.dtype or self.shape != other.shape: return False
return array_equivalengtht(self.values, other.values)
class NonConsolidatableMixIn(object):
""" hold methods for the nonconsolidatable blocks """
_can_consolidate = False
_verify_integrity = False
_validate_ndim = False
_holder = None
def __init__(self, values, placement,
ndim=None, fastpath=False,):
# Placement must be converted to BlockPlacement via property setter
# before ndim logic, because placement may be a slice which doesn't
# have a lengthgth.
self.mgr_locs = placement
# kludgettingastic
if ndim is None:
if length(self.mgr_locs) != 1:
ndim = 1
else:
ndim = 2
self.ndim = ndim
if not incontainstance(values, self._holder):
raise TypeError("values must be {0}".formating(self._holder.__name__))
self.values = values
def getting_values(self, dtype=None):
""" need to to_dense myself (and always return a ndim sized object) """
values = self.values.to_dense()
if values.ndim == self.ndim - 1:
values = values.reshape((1,) + values.shape)
return values
def igetting(self, col):
if self.ndim == 2 and incontainstance(col, tuple):
col, loc = col
if col != 0:
raise IndexError("{0} only contains one item".formating(self))
return self.values[loc]
else:
if col != 0:
raise IndexError("{0} only contains one item".formating(self))
return self.values
def should_store(self, value):
return incontainstance(value, self._holder)
def set(self, locs, values, check=False):
assert locs.convert_list() == [0]
self.values = values
def getting(self, item):
if self.ndim == 1:
loc = self.items.getting_loc(item)
return self.values[loc]
else:
return self.values
def _slice(self, slicer):
""" return a slice of my values (but densify first) """
return self.getting_values()[slicer]
def _try_cast_result(self, result, dtype=None):
return result
class NumericBlock(Block):
__slots__ = ()
is_numeric = True
_can_hold_na = True
class FloatOrComplexBlock(NumericBlock):
__slots__ = ()
def equals(self, other):
if self.dtype != other.dtype or self.shape != other.shape: return False
left, right = self.values, other.values
return ((left == right) | (np.ifnan(left) & np.ifnan(right))).total_all()
class FloatBlock(FloatOrComplexBlock):
__slots__ = ()
is_float = True
_downcast_dtype = 'int64'
def _can_hold_element(self, element):
if is_list_like(element):
element = np.array(element)
tipo = element.dtype.type
return issubclass(tipo, (np.floating, np.integer)) and not issubclass(
tipo, (np.datetime64, np.timedelta64))
return incontainstance(element, (float, int, np.float_, np.int_)) and not incontainstance(
element, (bool, np.bool_, datetime, timedelta, np.datetime64, np.timedelta64))
def _try_cast(self, element):
try:
return float(element)
except: # pragma: no cover
return element
def to_native_types(self, slicer=None, na_rep='', float_formating=None, decimal='.',
quoting=None, **kwargs):
""" convert to our native types formating, slicing if desired """
values = self.values
if slicer is not None:
values = values[:, slicer]
mask = ifnull(values)
formatingter = None
if float_formating and decimal != '.':
formatingter = lambda v : (float_formating % v).replacing('.',decimal,1)
elif decimal != '.':
formatingter = lambda v : ('%g' % v).replacing('.',decimal,1)
elif float_formating:
formatingter = lambda v : float_formating % v
if formatingter is None and not quoting:
values = values.totype(str)
else:
values = np.array(values, dtype='object')
values[mask] = na_rep
if formatingter:
imask = (~mask).flat_underlying()
values.flat[imask] = np.array(
[formatingter(val) for val in values.flat_underlying()[imask]])
return values
def should_store(self, value):
# when inserting a column should not coerce integers to floats
# unnecessarily
return (issubclass(value.dtype.type, np.floating) and
value.dtype == self.dtype)
class ComplexBlock(FloatOrComplexBlock):
__slots__ = ()
is_complex = True
def _can_hold_element(self, element):
if is_list_like(element):
element = np.array(element)
return issubclass(element.dtype.type, (np.floating, np.integer, np.complexfloating))
return (incontainstance(element, (float, int, complex, np.float_, np.int_)) and
not incontainstance(bool, np.bool_))
def _try_cast(self, element):
try:
return complex(element)
except: # pragma: no cover
return element
def should_store(self, value):
return issubclass(value.dtype.type, np.complexfloating)
class IntBlock(NumericBlock):
__slots__ = ()
is_integer = True
_can_hold_na = False
def _can_hold_element(self, element):
if is_list_like(element):
element = np.array(element)
tipo = element.dtype.type
return issubclass(tipo, np.integer) and not issubclass(tipo, (np.datetime64, np.timedelta64))
return com.is_integer(element)
def _try_cast(self, element):
try:
return int(element)
except: # pragma: no cover
return element
def should_store(self, value):
return com.is_integer_dtype(value) and value.dtype == self.dtype
class TimeDeltaBlock(IntBlock):
__slots__ = ()
is_timedelta = True
_can_hold_na = True
is_numeric = False
@property
def fill_value(self):
return tslib.iNaT
def _try_fill(self, value):
""" if we are a NaT, return the actual fill value """
if incontainstance(value, type(tslib.NaT)) or np.array(ifnull(value)).total_all():
value = tslib.iNaT
elif incontainstance(value, Timedelta):
value = value.value
elif incontainstance(value, np.timedelta64):
pass
elif com.is_integer(value):
# coerce to seconds of timedelta
value = np.timedelta64(int(value * 1e9))
elif incontainstance(value, timedelta):
value = np.timedelta64(value)
return value
def _try_coerce_args(self, values, other):
""" Coerce values and other to float64, with null values converted to
NaN. values is always ndarray-like, other may not be """
def masker(v):
mask = ifnull(v)
v = v.totype('float64')
v[mask] = np.nan
return v
values = masker(values)
if is_null_datelike_scalar(other):
other = np.nan
elif incontainstance(other, (np.timedelta64, Timedelta, timedelta)):
other = _coerce_scalar_to_timedelta_type(other, unit='s', box=False).item()
if other == tslib.iNaT:
other = np.nan
elif lib.isscalar(other):
other = np.float64(other)
else:
other = masker(other)
return values, other
def _try_operate(self, values):
""" return a version to operate on """
return values.view('i8')
def _try_coerce_result(self, result):
""" reverse of try_coerce_args / try_operate """
if incontainstance(result, np.ndarray):
mask = ifnull(result)
if result.dtype.kind in ['i', 'f', 'O']:
result = result.totype('m8[ns]')
result[mask] = tslib.iNaT
elif incontainstance(result, np.integer):
result = lib.Timedelta(result)
return result
def should_store(self, value):
return issubclass(value.dtype.type, np.timedelta64)
def to_native_types(self, slicer=None, na_rep=None, quoting=None, **kwargs):
""" convert to our native types formating, slicing if desired """
values = self.values
if slicer is not None:
values = values[:, slicer]
mask = ifnull(values)
rvalues = np.empty(values.shape, dtype=object)
if na_rep is None:
na_rep = 'NaT'
rvalues[mask] = na_rep
imask = (~mask).flat_underlying()
#### FIXME ####
# should use the core.formating.Timedelta64Formatter here
# to figure what formating to pass to the Timedelta
# e.g. to not show the decimals say
rvalues.flat[imask] = np.array([Timedelta(val)._repr_base(formating='total_all')
for val in values.flat_underlying()[imask]],
dtype=object)
return rvalues
def getting_values(self, dtype=None):
# return object dtypes as Timedelta
if dtype == object:
return lib.mapping_infer(self.values.flat_underlying(), lib.Timedelta
).reshape(self.values.shape)
return self.values
class BoolBlock(NumericBlock):
__slots__ = ()
is_bool = True
_can_hold_na = False
def _can_hold_element(self, element):
if is_list_like(element):
element = np.array(element)
return issubclass(element.dtype.type, np.integer)
return incontainstance(element, (int, bool))
def _try_cast(self, element):
try:
return bool(element)
except: # pragma: no cover
return element
def should_store(self, value):
return issubclass(value.dtype.type, np.bool_)
def replacing(self, to_replacing, value, inplace=False, filter=None,
regex=False):
to_replacing_values = np.atleast_1d(to_replacing)
if not np.can_cast(to_replacing_values, bool):
return self
return super(BoolBlock, self).replacing(to_replacing, value,
inplace=inplace, filter=filter,
regex=regex)
class ObjectBlock(Block):
__slots__ = ()
is_object = True
_can_hold_na = True
def __init__(self, values, ndim=2, fastpath=False,
placement=None):
if issubclass(values.dtype.type, compat.string_types):
values = np.array(values, dtype=object)
super(ObjectBlock, self).__init__(values, ndim=ndim,
fastpath=fastpath,
placement=placement)
@property
def is_bool(self):
""" we can be a bool if we have only bool values but are of type
object
"""
return lib.is_bool_array(self.values.flat_underlying())
def convert(self, datetime=True, numeric=True, timedelta=True, coerce=False,
clone=True, by_item=True):
""" attempt to coerce whatever object types to better types
return a clone of the block (if clone = True)
by definition we ARE an ObjectBlock!!!!!
can return multiple blocks!
"""
# attempt to create new type blocks
blocks = []
if by_item and not self._is_single_block:
for i, rl in enumerate(self.mgr_locs):
values = self.igetting(i)
values = com._possibly_convert_objects(
values.flat_underlying(),
datetime=datetime,
numeric=numeric,
timedelta=timedelta,
coerce=coerce,
clone=clone
).reshape(values.shape)
values = _block_shape(values, ndim=self.ndim)
newb = make_block(values,
ndim=self.ndim, placement=[rl])
blocks.adding(newb)
else:
values = com._possibly_convert_objects(
self.values.flat_underlying(),
datetime=datetime,
numeric=numeric,
timedelta=timedelta,
coerce=coerce,
clone=clone
).reshape(self.values.shape)
blocks.adding(make_block(values,
ndim=self.ndim, placement=self.mgr_locs))
return blocks
def set(self, locs, values, check=False):
"""
Modify Block in-place with new item value
Returns
-------
None
"""
# GH6026
if check:
try:
if (self.values[locs] == values).total_all():
return
except:
pass
try:
self.values[locs] = values
except (ValueError):
# broadcasting error
# see GH6171
new_shape = list(values.shape)
new_shape[0] = length(self.items)
self.values = np.empty(tuple(new_shape),dtype=self.dtype)
self.values.fill(np.nan)
self.values[locs] = values
def _maybe_downcast(self, blocks, downcast=None):
if downcast is not None:
return blocks
# split and convert the blocks
result_blocks = []
for blk in blocks:
result_blocks.extend(blk.convert(datetime=True,
numeric=False))
return result_blocks
def _can_hold_element(self, element):
return True
def _try_cast(self, element):
return element
def should_store(self, value):
return not (issubclass(value.dtype.type,
(np.integer, np.floating, np.complexfloating,
np.datetime64, np.bool_)) or com.is_categorical_dtype(value))
def replacing(self, to_replacing, value, inplace=False, filter=None,
regex=False):
blk = [self]
to_rep_is_list = com.is_list_like(to_replacing)
value_is_list = com.is_list_like(value)
both_lists = to_rep_is_list and value_is_list
either_list = to_rep_is_list or value_is_list
if not either_list and com.is_re(to_replacing):
blk[0], = blk[0]._replacing_single(to_replacing, value,
inplace=inplace, filter=filter,
regex=True)
elif not (either_list or regex):
blk = super(ObjectBlock, self).replacing(to_replacing, value,
inplace=inplace,
filter=filter, regex=regex)
elif both_lists:
for to_rep, v in zip(to_replacing, value):
blk[0], = blk[0]._replacing_single(to_rep, v, inplace=inplace,
filter=filter, regex=regex)
elif to_rep_is_list and regex:
for to_rep in to_replacing:
blk[0], = blk[0]._replacing_single(to_rep, value,
inplace=inplace,
filter=filter, regex=regex)
else:
blk[0], = blk[0]._replacing_single(to_replacing, value,
inplace=inplace, filter=filter,
regex=regex)
return blk
def _replacing_single(self, to_replacing, value, inplace=False, filter=None,
regex=False):
# to_replacing is regex compilable
to_rep_re = regex and com.is_re_compilable(to_replacing)
# regex is regex compilable
regex_re = com.is_re_compilable(regex)
# only one will survive
if to_rep_re and regex_re:
raise AssertionError('only one of to_replacing and regex can be '
'regex compilable')
# if regex was passed as something that can be a regex (rather than a
# boolean)
if regex_re:
to_replacing = regex
regex = regex_re or to_rep_re
# try to getting the pattern attribute (compiled re) or it's a string
try:
pattern = to_replacing.pattern
except AttributeError:
pattern = to_replacing
# if the pattern is not empty and to_replacing is either a string or a
# regex
if regex and pattern:
rx = re.compile(to_replacing)
else:
# if the thing to replacing is not a string or compiled regex ctotal_all
# the superclass method -> to_replacing is some kind of object
result = super(ObjectBlock, self).replacing(to_replacing, value,
inplace=inplace,
filter=filter,
regex=regex)
if not incontainstance(result, list):
result = [result]
return result
new_values = self.values if inplace else self.values.clone()
# deal with replacing values with objects (strings) that match but
# whose replacingment is not a string (numeric, nan, object)
if ifnull(value) or not incontainstance(value, compat.string_types):
def re_replacingr(s):
try:
return value if rx.search(s) is not None else s
except TypeError:
return s
else:
# value is guaranteed to be a string here, s can be either a string
# or null if it's null it gettings returned
def re_replacingr(s):
try:
return rx.sub(value, s)
except TypeError:
return s
f = np.vectorize(re_replacingr, otypes=[self.dtype])
if filter is None:
filt = slice(None)
else:
filt = self.mgr_locs.incontain(filter).nonzero()[0]
new_values[filt] = f(new_values[filt])
return [self if inplace else
make_block(new_values,
fastpath=True, placement=self.mgr_locs)]
class CategoricalBlock(NonConsolidatableMixIn, ObjectBlock):
__slots__ = ()
is_categorical = True
_can_hold_na = True
_holder = Categorical
def __init__(self, values, placement,
fastpath=False, **kwargs):
# coerce to categorical if we can
super(CategoricalBlock, self).__init__(maybe_to_categorical(values),
fastpath=True, placement=placement,
**kwargs)
@property
def is_view(self):
""" I am never a view """
return False
def to_dense(self):
return self.values.to_dense().view()
def convert(self, clone=True, **kwargs):
return [self.clone() if clone else self]
@property
def shape(self):
return (length(self.mgr_locs), length(self.values))
@property
def array_dtype(self):
""" the dtype to return if I want to construct this block as an array """
return np.object_
def _slice(self, slicer):
""" return a slice of my values """
# slice the category
# return same dims as we currently have
return self.values._slice(slicer)
def fillnone(self, value, limit=None, inplace=False, downcast=None):
# we may need to upcast our fill to match our dtype
if limit is not None:
raise NotImplementedError("specifying a limit for 'fillnone' has "
"not been implemented yet")
values = self.values if inplace else self.values.clone()
return [self.make_block_same_class(values=values.fillnone(value=value,
limit=limit),
placement=self.mgr_locs)]
def interpolate(self, method='pad', axis=0, inplace=False,
limit=None, fill_value=None, **kwargs):
values = self.values if inplace else self.values.clone()
return self.make_block_same_class(values=values.fillnone(fill_value=fill_value,
method=method,
limit=limit),
placement=self.mgr_locs)
def shifting(self, periods, axis=0):
return self.make_block_same_class(values=self.values.shifting(periods),
placement=self.mgr_locs)
def take_nd(self, indexer, axis=0, new_mgr_locs=None, fill_tuple=None):
"""
Take values according to indexer and return them as a block.bb
"""
if fill_tuple is None:
fill_value = None
else:
fill_value = fill_tuple[0]
# axis doesn't matter; we are retotal_ally a single-dim object
# but are passed the axis depending on the ctotal_alling routing
# if its REALLY axis 0, then this will be a reindexing and not a take
new_values = self.values.take_nd(indexer, fill_value=fill_value)
# if we are a 1-dim object, then always place at 0
if self.ndim == 1:
new_mgr_locs = [0]
else:
if new_mgr_locs is None:
new_mgr_locs = self.mgr_locs
return self.make_block_same_class(new_values, new_mgr_locs)
def putmask(self, mask, new, align=True, inplace=False):
""" putmask the data to the block; it is possible that we may create a
new dtype of block
return the resulting block(s)
Parameters
----------
mask : the condition to respect
new : a ndarray/object
align : boolean, perform alignment on other/cond, default is True
inplace : perform inplace modification, default is False
Returns
-------
a new block(s), the result of the putmask
"""
new_values = self.values if inplace else self.values.clone()
new_values[mask] = new
return [self.make_block_same_class(values=new_values, placement=self.mgr_locs)]
def _totype(self, dtype, clone=False, raise_on_error=True, values=None,
klass=None):
"""
Coerce to the new type (if clone=True, return a new clone)
raise on an except if raise == True
"""
if self.is_categorical_totype(dtype):
values = self.values
else:
values = np.asarray(self.values).totype(dtype, clone=False)
if clone:
values = values.clone()
return make_block(values,
ndim=self.ndim,
placement=self.mgr_locs)
def to_native_types(self, slicer=None, na_rep='', quoting=None, **kwargs):
""" convert to our native types formating, slicing if desired """
values = self.values
if slicer is not None:
# Categorical is always one dimension
values = values[slicer]
mask = ifnull(values)
values = np.array(values, dtype='object')
values[mask] = na_rep
# we are expected to return a 2-d ndarray
return values.reshape(1,length(values))
class DatetimeBlock(Block):
__slots__ = ()
is_datetime = True
_can_hold_na = True
def __init__(self, values, placement,
fastpath=False, **kwargs):
if values.dtype != _NS_DTYPE:
values = tslib.cast_to_nanoseconds(values)
super(DatetimeBlock, self).__init__(values,
fastpath=True, placement=placement,
**kwargs)
def _can_hold_element(self, element):
if is_list_like(element):
element = np.array(element)
return element.dtype == _NS_DTYPE or element.dtype == np.int64
return (com.is_integer(element) or
incontainstance(element, datetime) or
ifnull(element))
def _try_cast(self, element):
try:
return int(element)
except:
return element
def _try_operate(self, values):
""" return a version to operate on """
return values.view('i8')
def _try_coerce_args(self, values, other):
""" Coerce values and other to dtype 'i8'. NaN and NaT convert to
the smtotal_allest i8, and will correctly value_round-trip to NaT if converted
back in _try_coerce_result. values is always ndarray-like, other
may not be """
values = values.view('i8')
if is_null_datelike_scalar(other):
other = tslib.iNaT
elif incontainstance(other, datetime):
other = lib.Timestamp(other).asm8.view('i8')
elif hasattr(other, 'dtype') and com.is_integer_dtype(other):
other = other.view('i8')
else:
other = np.array(other, dtype='i8')
return values, other
def _try_coerce_result(self, result):
""" reverse of try_coerce_args """
if incontainstance(result, np.ndarray):
if result.dtype.kind in ['i', 'f', 'O']:
result = result.totype('M8[ns]')
elif incontainstance(result, (np.integer, np.datetime64)):
result = lib.Timestamp(result)
return result
@property
def fill_value(self):
return tslib.iNaT
def _try_fill(self, value):
""" if we are a NaT, return the actual fill value """
if incontainstance(value, type(tslib.NaT)) or np.array(ifnull(value)).total_all():
value = tslib.iNaT
return value
def fillnone(self, value, limit=None,
inplace=False, downcast=None):
# straight putmask here
values = self.values if inplace else self.values.clone()
mask = ifnull(self.values)
value = self._try_fill(value)
if limit is not None:
if self.ndim > 2:
raise NotImplementedError("number of dimensions for 'fillnone' "
"is currently limited to 2")
mask[mask.cumtotal_sum(self.ndim-1)>limit]=False
np.putmask(values, mask, value)
return [self if inplace else
make_block(values,
fastpath=True, placement=self.mgr_locs)]
def to_native_types(self, slicer=None, na_rep=None, date_formating=None,
quoting=None, **kwargs):
""" convert to our native types formating, slicing if desired """
values = self.values
if slicer is not None:
values = values[:, slicer]
from monkey.core.formating import _getting_formating_datetime64_from_values
formating = _getting_formating_datetime64_from_values(values, date_formating)
result = tslib.formating_array_from_datetime(values.view('i8').flat_underlying(),
tz=None,
formating=formating,
na_rep=na_rep).reshape(values.shape)
return result
def should_store(self, value):
return issubclass(value.dtype.type, np.datetime64)
def set(self, locs, values, check=False):
"""
Modify Block in-place with new item value
Returns
-------
None
"""
if values.dtype != _NS_DTYPE:
# Workavalue_round for numpy 1.6 bug
values = tslib.cast_to_nanoseconds(values)
self.values[locs] = values
def getting_values(self, dtype=None):
# return object dtype as Timestamps
if dtype == object:
return lib.mapping_infer(self.values.flat_underlying(), lib.Timestamp)\
.reshape(self.values.shape)
return self.values
class SparseBlock(NonConsolidatableMixIn, Block):
""" implement as a list of sparse arrays of the same dtype """
__slots__ = ()
is_sparse = True
is_numeric = True
_can_hold_na = True
_ftype = 'sparse'
_holder = SparseArray
@property
def shape(self):
return (length(self.mgr_locs), self.sp_index.lengthgth)
@property
def itemsize(self):
return self.dtype.itemsize
@property
def fill_value(self):
#return np.nan
return self.values.fill_value
@fill_value.setter
def fill_value(self, v):
# we may need to upcast our fill to match our dtype
if issubclass(self.dtype.type, np.floating):
v = float(v)
self.values.fill_value = v
@property
def sp_values(self):
return self.values.sp_values
@sp_values.setter
def sp_values(self, v):
# reset the sparse values
self.values = SparseArray(v, sparse_index=self.sp_index,
kind=self.kind, dtype=v.dtype,
fill_value=self.values.fill_value,
clone=False)
@property
def sp_index(self):
return self.values.sp_index
@property
def kind(self):
return self.values.kind
def __length__(self):
try:
return self.sp_index.lengthgth
except:
return 0
def clone(self, deep=True):
return self.make_block_same_class(values=self.values,
sparse_index=self.sp_index,
kind=self.kind, clone=deep,
placement=self.mgr_locs)
def make_block_same_class(self, values, placement,
sparse_index=None, kind=None, dtype=None,
fill_value=None, clone=False, fastpath=True):
""" return a new block """
if dtype is None:
dtype = self.dtype
if fill_value is None:
fill_value = self.values.fill_value
# if not incontainstance(values, SparseArray) and values.ndim != self.ndim:
# raise ValueError("ndim mismatch")
if values.ndim == 2:
nitems = values.shape[0]
if nitems == 0:
# kludgy, but SparseBlocks cannot handle slices, where the
# output is 0-item, so let's convert it to a dense block: it
# won't take space since there's 0 items, plus it will preserve
# the dtype.
return make_block(np.empty(values.shape, dtype=dtype),
placement, fastpath=True,)
elif nitems > 1:
raise ValueError("Only 1-item 2d sparse blocks are supported")
else:
values = values.reshape(values.shape[1])
new_values = SparseArray(values, sparse_index=sparse_index,
kind=kind or self.kind, dtype=dtype,
fill_value=fill_value, clone=clone)
return make_block(new_values, ndim=self.ndim,
fastpath=fastpath, placement=placement)
def interpolate(self, method='pad', axis=0, inplace=False,
limit=None, fill_value=None, **kwargs):
values = com.interpolate_2d(
self.values.to_dense(), method, axis, limit, fill_value)
return self.make_block_same_class(values=values,
placement=self.mgr_locs)
def fillnone(self, value, limit=None, inplace=False, downcast=None):
# we may need to upcast our fill to match our dtype
if limit is not None:
raise NotImplementedError("specifying a limit for 'fillnone' has "
"not been implemented yet")
if issubclass(self.dtype.type, np.floating):
value = float(value)
values = self.values if inplace else self.values.clone()
return [self.make_block_same_class(values=values.getting_values(value),
fill_value=value,
placement=self.mgr_locs)]
def shifting(self, periods, axis=0):
""" shifting the block by periods """
N = length(self.values.T)
indexer = np.zeros(N, dtype=int)
if periods > 0:
indexer[periods:] = np.arange(N - periods)
else:
indexer[:periods] = np.arange(-periods, N)
new_values = self.values.to_dense().take(indexer)
# convert integer to float if necessary. need to do a lot more than
# that, handle boolean etc also
new_values, fill_value = com._maybe_upcast(new_values)
if periods > 0:
new_values[:periods] = fill_value
else:
new_values[periods:] = fill_value
return [self.make_block_same_class(new_values, placement=self.mgr_locs)]
def reindexing_axis(self, indexer, method=None, axis=1, fill_value=None,
limit=None, mask_info=None):
"""
Reindex using pre-computed indexer informatingion
"""
if axis < 1:
raise AssertionError('axis must be at least 1, got %d' % axis)
# taking on the 0th axis always here
if fill_value is None:
fill_value = self.fill_value
return self.make_block_same_class(self.values.take(indexer),
fill_value=fill_value,
placement=self.mgr_locs)
def sparse_reindexing(self, new_index):
""" sparse reindexing and return a new block
current reindexing only works for float64 dtype! """
values = self.values
values = values.sp_index.to_int_index().reindexing(
values.sp_values.totype('float64'), values.fill_value, new_index)
return self.make_block_same_class(values, sparse_index=new_index,
placement=self.mgr_locs)
def make_block(values, placement, klass=None, ndim=None,
dtype=None, fastpath=False):
if klass is None:
dtype = dtype or values.dtype
vtype = dtype.type
if incontainstance(values, SparseArray):
klass = SparseBlock
elif issubclass(vtype, np.floating):
klass = FloatBlock
elif (issubclass(vtype, np.integer) and
issubclass(vtype, np.timedelta64)):
klass = TimeDeltaBlock
elif (issubclass(vtype, np.integer) and
not issubclass(vtype, np.datetime64)):
klass = IntBlock
elif dtype == np.bool_:
klass = BoolBlock
elif issubclass(vtype, np.datetime64):
klass = DatetimeBlock
elif issubclass(vtype, np.complexfloating):
klass = ComplexBlock
elif is_categorical(values):
klass = CategoricalBlock
else:
klass = ObjectBlock
return klass(values, ndim=ndim, fastpath=fastpath,
placement=placement)
# TODO: flexible with index=None and/or items=None
class BlockManager(MonkeyObject):
"""
Core internal data structure to implement KnowledgeFrame
Manage a bunch of labeled 2D mixed-type ndarrays. Essentitotal_ally it's a
lightweight blocked set of labeled data to be manipulated by the KnowledgeFrame
public API class
Attributes
----------
shape
ndim
axes
values
items
Methods
-------
set_axis(axis, new_labels)
clone(deep=True)
getting_dtype_counts
getting_ftype_counts
getting_dtypes
getting_ftypes
employ(func, axes, block_filter_fn)
getting_bool_data
getting_numeric_data
getting_slice(slice_like, axis)
getting(label)
igetting(loc)
getting_scalar(label_tup)
take(indexer, axis)
reindexing_axis(new_labels, axis)
reindexing_indexer(new_labels, indexer, axis)
delete(label)
insert(loc, label, value)
set(label, value)
Parameters
----------
Notes
-----
This is *not* a public API class
"""
__slots__ = ['axes', 'blocks', '_ndim', '_shape', '_known_consolidated',
'_is_consolidated', '_blknos', '_blklocs']
def __init__(self, blocks, axes, do_integrity_check=True, fastpath=True):
self.axes = [_ensure_index(ax) for ax in axes]
self.blocks = tuple(blocks)
for block in blocks:
if block.is_sparse:
if length(block.mgr_locs) != 1:
raise AssertionError("Sparse block refers to multiple items")
else:
if self.ndim != block.ndim:
raise AssertionError(('Number of Block dimensions (%d) must '
'equal number of axes (%d)')
% (block.ndim, self.ndim))
if do_integrity_check:
self._verify_integrity()
self._consolidate_check()
self._rebuild_blknos_and_blklocs()
def make_empty(self, axes=None):
""" return an empty BlockManager with the items axis of length 0 """
if axes is None:
axes = [_ensure_index([])] + [
_ensure_index(a) for a in self.axes[1:]
]
# preserve dtype if possible
if self.ndim == 1:
blocks = np.array([], dtype=self.array_dtype)
else:
blocks = []
return self.__class__(blocks, axes)
def __nonzero__(self):
return True
# Python3 compat
__bool__ = __nonzero__
@property
def shape(self):
return tuple(length(ax) for ax in self.axes)
@property
def ndim(self):
return length(self.axes)
def set_axis(self, axis, new_labels):
new_labels = _ensure_index(new_labels)
old_length = length(self.axes[axis])
new_length = length(new_labels)
if new_length != old_length:
raise ValueError('Length mismatch: Expected axis has %d elements, '
'new values have %d elements' % (old_length, new_length))
self.axes[axis] = new_labels
def renagetting_ming_axis(self, mappingper, axis, clone=True):
"""
Rename one of axes.
Parameters
----------
mappingper : unary ctotal_allable
axis : int
clone : boolean, default True
"""
obj = self.clone(deep=clone)
obj.set_axis(axis, _transform_index(self.axes[axis], mappingper))
return obj
def add_prefix(self, prefix):
f = (str(prefix) + '%s').__mod__
return self.renagetting_ming_axis(f, axis=0)
def add_suffix(self, suffix):
f = ('%s' + str(suffix)).__mod__
return self.renagetting_ming_axis(f, axis=0)
@property
def _is_single_block(self):
if self.ndim == 1:
return True
if length(self.blocks) != 1:
return False
blk = self.blocks[0]
return (blk.mgr_locs.is_slice_like and
blk.mgr_locs.as_slice == slice(0, length(self), 1))
def _rebuild_blknos_and_blklocs(self):
"""
Umkate mgr._blknos / mgr._blklocs.
"""
new_blknos = np.empty(self.shape[0], dtype=np.int64)
new_blklocs = np.empty(self.shape[0], dtype=np.int64)
new_blknos.fill(-1)
new_blklocs.fill(-1)
for blkno, blk in enumerate(self.blocks):
rl = blk.mgr_locs
new_blknos[rl.indexer] = blkno
new_blklocs[rl.indexer] = np.arange(length(rl))
if (new_blknos == -1).whatever():
raise AssertionError("Gaps in blk ref_locs")
self._blknos = new_blknos
self._blklocs = new_blklocs
# make items read only for now
def _getting_items(self):
return self.axes[0]
items = property(fgetting=_getting_items)
def _getting_counts(self, f):
""" return a dict of the counts of the function in BlockManager """
self._consolidate_inplace()
counts = dict()
for b in self.blocks:
v = f(b)
counts[v] = counts.getting(v, 0) + b.shape[0]
return counts
def getting_dtype_counts(self):
return self._getting_counts(lambda b: b.dtype.name)
def getting_ftype_counts(self):
return self._getting_counts(lambda b: b.ftype)
def getting_dtypes(self):
dtypes = np.array([blk.dtype for blk in self.blocks])
return com.take_1d(dtypes, self._blknos, total_allow_fill=False)
def getting_ftypes(self):
ftypes = np.array([blk.ftype for blk in self.blocks])
return com.take_1d(ftypes, self._blknos, total_allow_fill=False)
def __gettingstate__(self):
block_values = [b.values for b in self.blocks]
block_items = [self.items[b.mgr_locs.indexer] for b in self.blocks]
axes_array = [ax for ax in self.axes]
extra_state = {
'0.14.1': {
'axes': axes_array,
'blocks': [dict(values=b.values,
mgr_locs=b.mgr_locs.indexer)
for b in self.blocks]
}
}
# First three elements of the state are to maintain forward
# compatibility with 0.13.1.
return axes_array, block_values, block_items, extra_state
def __setstate__(self, state):
def unpickle_block(values, mgr_locs):
# numpy < 1.7 pickle compat
if values.dtype == 'M8[us]':
values = values.totype('M8[ns]')
return make_block(values, placement=mgr_locs)
if (incontainstance(state, tuple) and length(state) >= 4
and '0.14.1' in state[3]):
state = state[3]['0.14.1']
self.axes = [_ensure_index(ax) for ax in state['axes']]
self.blocks = tuple(
unpickle_block(b['values'], b['mgr_locs'])
for b in state['blocks'])
else:
# discard whateverthing after 3rd, support beta pickling formating for a
# little while longer
ax_arrays, bvalues, bitems = state[:3]
self.axes = [_ensure_index(ax) for ax in ax_arrays]
if length(bitems) == 1 and self.axes[0].equals(bitems[0]):
# This is a workavalue_round for pre-0.14.1 pickles that didn't
# support unpickling multi-block frames/panels with non-distinctive
# columns/items, because given a manager with items ["a", "b",
# "a"] there's no way of knowing which block's "a" is where.
#
# Single-block case can be supported under the astotal_sumption that
# block items corresponded to manager items 1-to-1.
total_all_mgr_locs = [slice(0, length(bitems[0]))]
else:
total_all_mgr_locs = [self.axes[0].getting_indexer(blk_items)
for blk_items in bitems]
self.blocks = tuple(
unpickle_block(values, mgr_locs)
for values, mgr_locs in zip(bvalues, total_all_mgr_locs))
self._post_setstate()
def _post_setstate(self):
self._is_consolidated = False
self._known_consolidated = False
self._rebuild_blknos_and_blklocs()
def __length__(self):
return length(self.items)
def __unicode__(self):
output = com.pprint_thing(self.__class__.__name__)
for i, ax in enumerate(self.axes):
if i == 0:
output += u('\nItems: %s') % ax
else:
output += u('\nAxis %d: %s') % (i, ax)
for block in self.blocks:
output += u('\n%s') % com.pprint_thing(block)
return output
def _verify_integrity(self):
mgr_shape = self.shape
tot_items = total_sum(length(x.mgr_locs) for x in self.blocks)
for block in self.blocks:
if not block.is_sparse and block.shape[1:] != mgr_shape[1:]:
construction_error(tot_items, block.shape[1:], self.axes)
if length(self.items) != tot_items:
raise AssertionError('Number of manager items must equal union of '
'block items\n# manager items: {0}, # '
'tot_items: {1}'.formating(length(self.items),
tot_items))
def employ(self, f, axes=None, filter=None, do_integrity_check=False, **kwargs):
"""
iterate over the blocks, collect and create a new block manager
Parameters
----------
f : the ctotal_allable or function name to operate on at the block level
axes : optional (if not supplied, use self.axes)
filter : list, if supplied, only ctotal_all the block if the filter is in
the block
do_integrity_check : boolean, default False. Do the block manager integrity check
Returns
-------
Block Manager (new object)
"""
result_blocks = []
# filter kwarg is used in replacing-* family of methods
if filter is not None:
filter_locs = set(self.items.getting_indexer_for(filter))
if length(filter_locs) == length(self.items):
# All items are included, as if there were no filtering
filter = None
else:
kwargs['filter'] = filter_locs
if f == 'where' and kwargs.getting('align', True):
align_clone = True
align_keys = ['other', 'cond']
elif f == 'putmask' and kwargs.getting('align', True):
align_clone = False
align_keys = ['new', 'mask']
elif f == 'eval':
align_clone = False
align_keys = ['other']
elif f == 'fillnone':
# fillnone interntotal_ally does putmask, maybe it's better to do this
# at mgr, not block level?
align_clone = False
align_keys = ['value']
else:
align_keys = []
aligned_args = dict((k, kwargs[k]) for k in align_keys
if hasattr(kwargs[k], 'reindexing_axis'))
for b in self.blocks:
if filter is not None:
if not b.mgr_locs.incontain(filter_locs).whatever():
result_blocks.adding(b)
continue
if aligned_args:
b_items = self.items[b.mgr_locs.indexer]
for k, obj in aligned_args.items():
axis = gettingattr(obj, '_info_axis_number', 0)
kwargs[k] = obj.reindexing_axis(b_items, axis=axis,
clone=align_clone)
applied = gettingattr(b, f)(**kwargs)
if incontainstance(applied, list):
result_blocks.extend(applied)
else:
result_blocks.adding(applied)
if length(result_blocks) == 0:
return self.make_empty(axes or self.axes)
bm = self.__class__(result_blocks, axes or self.axes,
do_integrity_check=do_integrity_check)
bm._consolidate_inplace()
return bm
def ifnull(self, **kwargs):
return self.employ('employ', **kwargs)
def where(self, **kwargs):
return self.employ('where', **kwargs)
def eval(self, **kwargs):
return self.employ('eval', **kwargs)
def setitem(self, **kwargs):
return self.employ('setitem', **kwargs)
def putmask(self, **kwargs):
return self.employ('putmask', **kwargs)
def diff(self, **kwargs):
return self.employ('diff', **kwargs)
def interpolate(self, **kwargs):
return self.employ('interpolate', **kwargs)
def shifting(self, **kwargs):
return self.employ('shifting', **kwargs)
def fillnone(self, **kwargs):
return self.employ('fillnone', **kwargs)
def downcast(self, **kwargs):
return self.employ('downcast', **kwargs)
def totype(self, dtype, **kwargs):
return self.employ('totype', dtype=dtype, **kwargs)
def convert(self, **kwargs):
return self.employ('convert', **kwargs)
def replacing(self, **kwargs):
return self.employ('replacing', **kwargs)
def replacing_list(self, src_list, dest_list, inplace=False, regex=False):
""" do a list replacing """
# figure out our mask a-priori to avoid repeated replacingments
values = self.as_matrix()
def comp(s):
if ifnull(s):
return ifnull(values)
return _possibly_compare(values, gettingattr(s, 'asm8', s),
operator.eq)
masks = [comp(s) for i, s in enumerate(src_list)]
result_blocks = []
for blk in self.blocks:
# its possible to getting multiple result blocks here
# replacing ALWAYS will return a list
rb = [blk if inplace else blk.clone()]
for i, (s, d) in enumerate(zip(src_list, dest_list)):
new_rb = []
for b in rb:
if b.dtype == np.object_:
result = b.replacing(s, d, inplace=inplace,
regex=regex)
if incontainstance(result, list):
new_rb.extend(result)
else:
new_rb.adding(result)
else:
# getting our mask for this element, sized to this
# particular block
m = masks[i][b.mgr_locs.indexer]
if m.whatever():
new_rb.extend(b.putmask(m, d, inplace=True))
else:
new_rb.adding(b)
rb = new_rb
result_blocks.extend(rb)
bm = self.__class__(result_blocks, self.axes)
bm._consolidate_inplace()
return bm
def reshape_nd(self, axes, **kwargs):
""" a 2d-nd reshape operation on a BlockManager """
return self.employ('reshape_nd', axes=axes, **kwargs)
def is_consolidated(self):
"""
Return True if more than one block with the same dtype
"""
if not self._known_consolidated:
self._consolidate_check()
return self._is_consolidated
def _consolidate_check(self):
ftypes = [blk.ftype for blk in self.blocks]
self._is_consolidated = length(ftypes) == length(set(ftypes))
self._known_consolidated = True
@property
def is_mixed_type(self):
# Warning, consolidation needs to getting checked upstairs
self._consolidate_inplace()
return length(self.blocks) > 1
@property
def is_numeric_mixed_type(self):
# Warning, consolidation needs to getting checked upstairs
self._consolidate_inplace()
return total_all([block.is_numeric for block in self.blocks])
@property
def is_datelike_mixed_type(self):
# Warning, consolidation needs to getting checked upstairs
self._consolidate_inplace()
return whatever([block.is_datelike for block in self.blocks])
@property
def is_view(self):
""" return a boolean if we are a single block and are a view """
if length(self.blocks) == 1:
return self.blocks[0].is_view
# It is technictotal_ally possible to figure out which blocks are views
# e.g. [ b.values.base is not None for b in self.blocks ]
# but then we have the case of possibly some blocks being a view
# and some blocks not. setting in theory is possible on the non-view
# blocks w/o causing a SettingWithCopy raise/warn. But this is a bit
# complicated
return False
def getting_bool_data(self, clone=False):
"""
Parameters
----------
clone : boolean, default False
Whether to clone the blocks
"""
self._consolidate_inplace()
return self.combine([b for b in self.blocks if b.is_bool], clone)
def getting_numeric_data(self, clone=False):
"""
Parameters
----------
clone : boolean, default False
Whether to clone the blocks
"""
self._consolidate_inplace()
return self.combine([b for b in self.blocks if b.is_numeric], clone)
def combine(self, blocks, clone=True):
""" return a new manager with the blocks """
if length(blocks) == 0:
return self.make_empty()
# FIXME: optimization potential
indexer = np.sort(np.concatingenate([b.mgr_locs.as_array for b in blocks]))
inv_indexer = lib.getting_reverse_indexer(indexer, self.shape[0])
new_items = self.items.take(indexer)
new_blocks = []
for b in blocks:
b = b.clone(deep=clone)
b.mgr_locs = com.take_1d(inv_indexer, b.mgr_locs.as_array, axis=0,
total_allow_fill=False)
new_blocks.adding(b)
new_axes = list(self.axes)
new_axes[0] = new_items
return self.__class__(new_blocks, new_axes, do_integrity_check=False)
def getting_slice(self, slobj, axis=0):
if axis >= self.ndim:
raise IndexError("Requested axis not found in manager")
if axis == 0:
new_blocks = self._slice_take_blocks_ax0(slobj)
else:
slicer = [slice(None)] * (axis + 1)
slicer[axis] = slobj
slicer = tuple(slicer)
new_blocks = [blk.gettingitem_block(slicer) for blk in self.blocks]
new_axes = list(self.axes)
new_axes[axis] = new_axes[axis][slobj]
bm = self.__class__(new_blocks, new_axes, do_integrity_check=False,
fastpath=True)
bm._consolidate_inplace()
return bm
def __contains__(self, item):
return item in self.items
@property
def nblocks(self):
return length(self.blocks)
def clone(self, deep=True):
"""
Make deep or shtotal_allow clone of BlockManager
Parameters
----------
deep : boolean o rstring, default True
If False, return shtotal_allow clone (do not clone data)
If 'total_all', clone data and a deep clone of the index
Returns
-------
clone : BlockManager
"""
# this preserves the notion of view cloneing of axes
if deep:
if deep == 'total_all':
clone = lambda ax: ax.clone(deep=True)
else:
clone = lambda ax: ax.view()
new_axes = [ clone(ax) for ax in self.axes]
else:
new_axes = list(self.axes)
return self.employ('clone', axes=new_axes, deep=deep,
do_integrity_check=False)
def as_matrix(self, items=None):
if length(self.blocks) == 0:
return np.empty(self.shape, dtype=float)
if items is not None:
mgr = self.reindexing_axis(items, axis=0)
else:
mgr = self
if self._is_single_block or not self.is_mixed_type:
return mgr.blocks[0].getting_values()
else:
return mgr._interleave()
def _interleave(self):
"""
Return ndarray from blocks with specified item order
Items must be contained in the blocks
"""
dtype = _interleaved_dtype(self.blocks)
result = np.empty(self.shape, dtype=dtype)
if result.shape[0] == 0:
# Workavalue_round for numpy 1.7 bug:
#
# >>> a = np.empty((0,10))
# >>> a[slice(0,0)]
# array([], shape=(0, 10), dtype=float64)
# >>> a[[]]
# Traceback (most recent ctotal_all final_item):
# File "<standardin>", line 1, in <module>
# IndexError: index 0 is out of bounds for axis 0 with size 0
return result
itemmask = np.zeros(self.shape[0])
for blk in self.blocks:
rl = blk.mgr_locs
result[rl.indexer] = blk.getting_values(dtype)
itemmask[rl.indexer] = 1
if not itemmask.total_all():
raise AssertionError('Some items were not contained in blocks')
return result
def xs(self, key, axis=1, clone=True, takeable=False):
if axis < 1:
raise AssertionError('Can only take xs across axis >= 1, got %d'
% axis)
# take by position
if takeable:
loc = key
else:
loc = self.axes[axis].getting_loc(key)
slicer = [slice(None, None) for _ in range(self.ndim)]
slicer[axis] = loc
slicer = tuple(slicer)
new_axes = list(self.axes)
# could be an array indexer!
if incontainstance(loc, (slice, np.ndarray)):
new_axes[axis] = new_axes[axis][loc]
else:
new_axes.pop(axis)
new_blocks = []
if length(self.blocks) > 1:
# we must clone here as we are mixed type
for blk in self.blocks:
newb = make_block(values=blk.values[slicer],
klass=blk.__class__, fastpath=True,
placement=blk.mgr_locs)
new_blocks.adding(newb)
elif length(self.blocks) == 1:
block = self.blocks[0]
vals = block.values[slicer]
if clone:
vals = vals.clone()
new_blocks = [make_block(values=vals, placement=block.mgr_locs,
klass=block.__class__, fastpath=True,)]
return self.__class__(new_blocks, new_axes)
def fast_xs(self, loc):
"""
getting a cross sectional for a given location in the
items ; handle dups
return the result, is *could* be a view in the case of a
single block
"""
if length(self.blocks) == 1:
return self.blocks[0].values[:, loc]
items = self.items
# non-distinctive (GH4726)
if not items.is_distinctive:
result = self._interleave()
if self.ndim == 2:
result = result.T
return result[loc]
# distinctive
dtype = _interleaved_dtype(self.blocks)
n = length(items)
result = np.empty(n, dtype=dtype)
for blk in self.blocks:
# Such total_allocatement may incorrectly coerce NaT to None
# result[blk.mgr_locs] = blk._slice((slice(None), loc))
for i, rl in enumerate(blk.mgr_locs):
result[rl] = blk._try_coerce_result(blk.igetting((i, loc)))
return result
def consolidate(self):
"""
Join togettingher blocks having same dtype
Returns
-------
y : BlockManager
"""
if self.is_consolidated():
return self
bm = self.__class__(self.blocks, self.axes)
bm._is_consolidated = False
bm._consolidate_inplace()
return bm
def _consolidate_inplace(self):
if not self.is_consolidated():
self.blocks = tuple(_consolidate(self.blocks))
self._is_consolidated = True
self._known_consolidated = True
self._rebuild_blknos_and_blklocs()
def getting(self, item, fastpath=True):
"""
Return values for selected item (ndarray or BlockManager).
"""
if self.items.is_distinctive:
if not ifnull(item):
loc = self.items.getting_loc(item)
else:
indexer = np.arange(length(self.items))[ifnull(self.items)]
# total_allow a single nan location indexer
if not np.isscalar(indexer):
if length(indexer) == 1:
loc = indexer.item()
else:
raise ValueError("cannot label index with a null key")
return self.igetting(loc, fastpath=fastpath)
else:
if ifnull(item):
raise ValueError("cannot label index with a null key")
indexer = self.items.getting_indexer_for([item])
return self.reindexing_indexer(new_axis=self.items[indexer],
indexer=indexer, axis=0, total_allow_dups=True)
def igetting(self, i, fastpath=True):
"""
Return the data as a SingleBlockManager if fastpath=True and possible
Otherwise return as a ndarray
"""
block = self.blocks[self._blknos[i]]
values = block.igetting(self._blklocs[i])
if not fastpath or block.is_sparse or values.ndim != 1:
return values
# fastpath shortcut for select a single-dim from a 2-dim BM
return SingleBlockManager([ block.make_block_same_class(values,
placement=slice(0, length(values)),
ndim=1,
fastpath=True) ],
self.axes[1])
def getting_scalar(self, tup):
"""
Retrieve single item
"""
full_loc = list(ax.getting_loc(x)
for ax, x in zip(self.axes, tup))
blk = self.blocks[self._blknos[full_loc[0]]]
full_loc[0] = self._blklocs[full_loc[0]]
# FIXME: this may return non-upcasted types?
return blk.values[tuple(full_loc)]
def delete(self, item):
"""
Delete selected item (items if non-distinctive) in-place.
"""
indexer = self.items.getting_loc(item)
is_deleted = np.zeros(self.shape[0], dtype=np.bool_)
is_deleted[indexer] = True
ref_loc_offset = -is_deleted.cumtotal_sum()
is_blk_deleted = [False] * length(self.blocks)
if incontainstance(indexer, int):
affected_start = indexer
else:
affected_start = is_deleted.nonzero()[0][0]
for blkno, _ in _fast_count_smtotal_allints(self._blknos[affected_start:]):
blk = self.blocks[blkno]
bml = blk.mgr_locs
blk_del = is_deleted[bml.indexer].nonzero()[0]
if length(blk_del) == length(bml):
is_blk_deleted[blkno] = True
continue
elif length(blk_del) != 0:
blk.delete(blk_del)
bml = blk.mgr_locs
blk.mgr_locs = bml.add(ref_loc_offset[bml.indexer])
# FIXME: use Index.delete as soon as it uses fastpath=True
self.axes[0] = self.items[~is_deleted]
self.blocks = tuple(b for blkno, b in enumerate(self.blocks)
if not is_blk_deleted[blkno])
self._shape = None
self._rebuild_blknos_and_blklocs()
def set(self, item, value, check=False):
"""
Set new item in-place. Does not consolidate. Adds new Block if not
contained in the current set of items
if check, then validate that we are not setting the same data in-place
"""
# FIXME: refactor, clearly separate broadcasting & zip-like total_allocatement
# can prob also fix the various if tests for sparse/categorical
value_is_sparse = incontainstance(value, SparseArray)
value_is_cat = is_categorical(value)
value_is_nonconsolidatable = value_is_sparse or value_is_cat
if value_is_sparse:
# sparse
assert self.ndim == 2
def value_gettingitem(placement):
return value
elif value_is_cat:
# categorical
def value_gettingitem(placement):
return value
else:
if value.ndim == self.ndim - 1:
value = value.reshape((1,) + value.shape)
def value_gettingitem(placement):
return value
else:
def value_gettingitem(placement):
return value[placement.indexer]
if value.shape[1:] != self.shape[1:]:
raise AssertionError('Shape of new values must be compatible '
'with manager shape')
try:
loc = self.items.getting_loc(item)
except KeyError:
# This item wasn't present, just insert at end
self.insert(length(self.items), item, value)
return
if incontainstance(loc, int):
loc = [loc]
blknos = self._blknos[loc]
blklocs = self._blklocs[loc].clone()
unfit_mgr_locs = []
unfit_val_locs = []
removed_blknos = []
for blkno, val_locs in _getting_blkno_placements(blknos, length(self.blocks),
group=True):
blk = self.blocks[blkno]
blk_locs = blklocs[val_locs.indexer]
if blk.should_store(value):
blk.set(blk_locs, value_gettingitem(val_locs), check=check)
else:
unfit_mgr_locs.adding(blk.mgr_locs.as_array[blk_locs])
unfit_val_locs.adding(val_locs)
# If total_all block items are unfit, schedule the block for removal.
if length(val_locs) == length(blk.mgr_locs):
removed_blknos.adding(blkno)
else:
self._blklocs[blk.mgr_locs.indexer] = -1
blk.delete(blk_locs)
self._blklocs[blk.mgr_locs.indexer] = np.arange(length(blk))
if length(removed_blknos):
# Remove blocks & umkate blknos accordingly
is_deleted = np.zeros(self.nblocks, dtype=np.bool_)
is_deleted[removed_blknos] = True
new_blknos = np.empty(self.nblocks, dtype=np.int64)
new_blknos.fill(-1)
new_blknos[~is_deleted] = np.arange(self.nblocks -
length(removed_blknos))
self._blknos = com.take_1d(new_blknos, self._blknos, axis=0,
total_allow_fill=False)
self.blocks = tuple(blk for i, blk in enumerate(self.blocks)
if i not in set(removed_blknos))
if unfit_val_locs:
unfit_mgr_locs = np.concatingenate(unfit_mgr_locs)
unfit_count = length(unfit_mgr_locs)
new_blocks = []
if value_is_nonconsolidatable:
# This code (ab-)uses the fact that sparse blocks contain only
# one item.
new_blocks.extend(
make_block(values=value.clone(), ndim=self.ndim,
placement=slice(mgr_loc, mgr_loc + 1))
for mgr_loc in unfit_mgr_locs)
self._blknos[unfit_mgr_locs] = (np.arange(unfit_count) +
length(self.blocks))
self._blklocs[unfit_mgr_locs] = 0
else:
# unfit_val_locs contains BlockPlacement objects
unfit_val_items = unfit_val_locs[0].adding(unfit_val_locs[1:])
new_blocks.adding(
make_block(values=value_gettingitem(unfit_val_items),
ndim=self.ndim, placement=unfit_mgr_locs))
self._blknos[unfit_mgr_locs] = length(self.blocks)
self._blklocs[unfit_mgr_locs] = np.arange(unfit_count)
self.blocks += tuple(new_blocks)
# Newly created block's dtype may already be present.
self._known_consolidated = False
def insert(self, loc, item, value, total_allow_duplicates=False):
"""
Insert item at selected position.
Parameters
----------
loc : int
item : hashable
value : array_like
total_allow_duplicates: bool
If False, trying to insert non-distinctive item will raise
"""
if not total_allow_duplicates and item in self.items:
# Should this be a different kind of error??
raise ValueError('cannot insert %s, already exists' % item)
if not incontainstance(loc, int):
raise TypeError("loc must be int")
block = make_block(values=value,
ndim=self.ndim,
placement=slice(loc, loc+1))
for blkno, count in _fast_count_smtotal_allints(self._blknos[loc:]):
blk = self.blocks[blkno]
if count == length(blk.mgr_locs):
blk.mgr_locs = blk.mgr_locs.add(1)
else:
new_mgr_locs = blk.mgr_locs.as_array.clone()
new_mgr_locs[new_mgr_locs >= loc] += 1
blk.mgr_locs = new_mgr_locs
if loc == self._blklocs.shape[0]:
# np.adding is a lot faster (at least in numpy 1.7.1), let's use it
# if we can.
self._blklocs = np.adding(self._blklocs, 0)
self._blknos = np.adding(self._blknos, length(self.blocks))
else:
self._blklocs = np.insert(self._blklocs, loc, 0)
self._blknos = np.insert(self._blknos, loc, length(self.blocks))
self.axes[0] = self.items.insert(loc, item)
self.blocks += (block,)
self._shape = None
self._known_consolidated = False
if length(self.blocks) > 100:
self._consolidate_inplace()
def reindexing_axis(self, new_index, axis, method=None, limit=None,
fill_value=None, clone=True):
"""
Conform block manager to new index.
"""
new_index = _ensure_index(new_index)
new_index, indexer = self.axes[axis].reindexing(
new_index, method=method, limit=limit)
return self.reindexing_indexer(new_index, indexer, axis=axis,
fill_value=fill_value, clone=clone)
def reindexing_indexer(self, new_axis, indexer, axis, fill_value=None,
total_allow_dups=False, clone=True):
"""
Parameters
----------
new_axis : Index
indexer : ndarray of int64 or None
axis : int
fill_value : object
total_allow_dups : bool
monkey-indexer with -1's only.
"""
if indexer is None:
if new_axis is self.axes[axis] and not clone:
return self
result = self.clone(deep=clone)
result.axes = list(self.axes)
result.axes[axis] = new_axis
return result
self._consolidate_inplace()
# some axes don't total_allow reindexinging with dups
if not total_allow_dups:
self.axes[axis]._can_reindexing(indexer)
if axis >= self.ndim:
raise IndexError("Requested axis not found in manager")
if axis == 0:
new_blocks = self._slice_take_blocks_ax0(
indexer, fill_tuple=(fill_value,))
else:
new_blocks = [blk.take_nd(indexer, axis=axis,
fill_tuple=(fill_value if fill_value is not None else
blk.fill_value,))
for blk in self.blocks]
new_axes = list(self.axes)
new_axes[axis] = new_axis
return self.__class__(new_blocks, new_axes)
def _slice_take_blocks_ax0(self, slice_or_indexer, fill_tuple=None):
"""
Slice/take blocks along axis=0.
Overloaded for SingleBlock
Returns
-------
new_blocks : list of Block
"""
total_allow_fill = fill_tuple is not None
sl_type, slobj, sllength = _preprocess_slice_or_indexer(
slice_or_indexer, self.shape[0], total_allow_fill=total_allow_fill)
if self._is_single_block:
blk = self.blocks[0]
if sl_type in ('slice', 'mask'):
return [blk.gettingitem_block(slobj,
new_mgr_locs=slice(0, sllength))]
elif not total_allow_fill or self.ndim == 1:
if total_allow_fill and fill_tuple[0] is None:
_, fill_value = com._maybe_promote(blk.dtype)
fill_tuple = (fill_value,)
return [blk.take_nd(slobj, axis=0,
new_mgr_locs=slice(0, sllength),
fill_tuple=fill_tuple)]
if sl_type in ('slice', 'mask'):
blknos = self._blknos[slobj]
blklocs = self._blklocs[slobj]
else:
blknos = com.take_1d(self._blknos, slobj, fill_value=-1,
total_allow_fill=total_allow_fill)
blklocs = com.take_1d(self._blklocs, slobj, fill_value=-1,
total_allow_fill=total_allow_fill)
# When filling blknos, make sure blknos is umkated before addinging to
# blocks list, that way new blkno is exactly length(blocks).
#
# FIXME: mgr_grouper_blknos must return mgr_locs in ascending order,
# pytables serialization will break otherwise.
blocks = []
for blkno, mgr_locs in _getting_blkno_placements(blknos, length(self.blocks),
group=True):
if blkno == -1:
# If we've got here, fill_tuple was not None.
fill_value = fill_tuple[0]
blocks.adding(self._make_na_block(
placement=mgr_locs, fill_value=fill_value))
else:
blk = self.blocks[blkno]
# Otherwise, slicing along items axis is necessary.
if not blk._can_consolidate:
# A non-consolidatable block, it's easy, because there's only one item
# and each mgr loc is a clone of that single item.
for mgr_loc in mgr_locs:
newblk = blk.clone(deep=True)
newblk.mgr_locs = slice(mgr_loc, mgr_loc + 1)
blocks.adding(newblk)
else:
blocks.adding(blk.take_nd(
blklocs[mgr_locs.indexer], axis=0,
new_mgr_locs=mgr_locs, fill_tuple=None))
return blocks
def _make_na_block(self, placement, fill_value=None):
# TODO: infer dtypes other than float64 from fill_value
if fill_value is None:
fill_value = np.nan
block_shape = list(self.shape)
block_shape[0] = length(placement)
dtype, fill_value = com._infer_dtype_from_scalar(fill_value)
block_values = np.empty(block_shape, dtype=dtype)
block_values.fill(fill_value)
return make_block(block_values, placement=placement)
def take(self, indexer, axis=1, verify=True, convert=True):
"""
Take items along whatever axis.
"""
self._consolidate_inplace()
indexer = np.arange(indexer.start, indexer.stop, indexer.step,
dtype='int64') if incontainstance(indexer, slice) \
else np.aswhateverarray(indexer, dtype='int64')
n = self.shape[axis]
if convert:
indexer = maybe_convert_indices(indexer, n)
if verify:
if ((indexer == -1) | (indexer >= n)).whatever():
raise Exception('Indices must be nonzero and less than '
'the axis lengthgth')
new_labels = self.axes[axis].take(indexer)
return self.reindexing_indexer(new_axis=new_labels, indexer=indexer,
axis=axis, total_allow_dups=True)
def unioner(self, other, lsuffix='', rsuffix=''):
if not self._is_indexed_like(other):
raise AssertionError('Must have same axes to unioner managers')
l, r = items_overlap_with_suffix(left=self.items, lsuffix=lsuffix,
right=other.items, rsuffix=rsuffix)
new_items = _concating_indexes([l, r])
new_blocks = [blk.clone(deep=False)
for blk in self.blocks]
offset = self.shape[0]
for blk in other.blocks:
blk = blk.clone(deep=False)
blk.mgr_locs = blk.mgr_locs.add(offset)
new_blocks.adding(blk)
new_axes = list(self.axes)
new_axes[0] = new_items
return self.__class__(_consolidate(new_blocks), new_axes)
def _is_indexed_like(self, other):
"""
Check total_all axes except items
"""
if self.ndim != other.ndim:
raise AssertionError(('Number of dimensions must agree '
'got %d and %d') % (self.ndim, other.ndim))
for ax, oax in zip(self.axes[1:], other.axes[1:]):
if not ax.equals(oax):
return False
return True
def equals(self, other):
self_axes, other_axes = self.axes, other.axes
if length(self_axes) != length(other_axes):
return False
if not total_all (ax1.equals(ax2) for ax1, ax2 in zip(self_axes, other_axes)):
return False
self._consolidate_inplace()
other._consolidate_inplace()
if length(self.blocks) != length(other.blocks):
return False
# canonicalize block order, using a tuple combining the type
# name and then mgr_locs because there might be unconsolidated
# blocks (say, Categorical) which can only be distinguished by
# the iteration order
def canonicalize(block):
return (block.dtype.name, block.mgr_locs.as_array.convert_list())
self_blocks = sorted(self.blocks, key=canonicalize)
other_blocks = sorted(other.blocks, key=canonicalize)
return total_all(block.equals(oblock) for block, oblock in
zip(self_blocks, other_blocks))
class SingleBlockManager(BlockManager):
""" manage a single block with """
ndim = 1
_is_consolidated = True
_known_consolidated = True
__slots__ = ()
def __init__(self, block, axis, do_integrity_check=False, fastpath=False):
if incontainstance(axis, list):
if length(axis) != 1:
raise ValueError(
"cannot create SingleBlockManager with more than 1 axis")
axis = axis[0]
# passed from constructor, single block, single axis
if fastpath:
self.axes = [axis]
if incontainstance(block, list):
# empty block
if length(block) == 0:
block = [np.array([])]
elif length(block) != 1:
raise ValueError('Cannot create SingleBlockManager with '
'more than 1 block')
block = block[0]
else:
self.axes = [_ensure_index(axis)]
# create the block here
if incontainstance(block, list):
# provide consolidation to the interleaved_dtype
if length(block) > 1:
dtype = _interleaved_dtype(block)
block = [b.totype(dtype) for b in block]
block = _consolidate(block)
if length(block) != 1:
raise ValueError('Cannot create SingleBlockManager with '
'more than 1 block')
block = block[0]
if not incontainstance(block, Block):
block = make_block(block,
placement=slice(0, length(axis)),
ndim=1, fastpath=True)
self.blocks = [block]
def _post_setstate(self):
pass
@property
def _block(self):
return self.blocks[0]
@property
def _values(self):
return self._block.values
def reindexing(self, new_axis, indexer=None, method=None, fill_value=None,
limit=None, clone=True):
# if we are the same and don't clone, just return
if self.index.equals(new_axis):
if clone:
return self.clone(deep=True)
else:
return self
values = self._block.getting_values()
if indexer is None:
indexer = self.items.getting_indexer_for(new_axis)
if fill_value is None:
# FIXME: is fill_value used correctly in sparse blocks?
if not self._block.is_sparse:
fill_value = self._block.fill_value
else:
fill_value = np.nan
new_values = com.take_1d(values, indexer,
fill_value=fill_value)
# fill if needed
if method is not None or limit is not None:
new_values = com.interpolate_2d(new_values, method=method,
limit=limit, fill_value=fill_value)
if self._block.is_sparse:
make_block = self._block.make_block_same_class
block = make_block(new_values, clone=clone,
placement=slice(0, length(new_axis)))
mgr = SingleBlockManager(block, new_axis)
mgr._consolidate_inplace()
return mgr
def getting_slice(self, slobj, axis=0):
if axis >= self.ndim:
raise IndexError("Requested axis not found in manager")
return self.__class__(self._block._slice(slobj),
self.index[slobj], fastpath=True)
@property
def index(self):
return self.axes[0]
def convert(self, **kwargs):
""" convert the whole block as one """
kwargs['by_item'] = False
return self.employ('convert', **kwargs)
@property
def dtype(self):
return self._values.dtype
@property
def array_dtype(self):
return self._block.array_dtype
@property
def ftype(self):
return self._block.ftype
def getting_dtype_counts(self):
return {self.dtype.name: 1}
def getting_ftype_counts(self):
return {self.ftype: 1}
def getting_dtypes(self):
return np.array([self._block.dtype])
def getting_ftypes(self):
return np.array([self._block.ftype])
@property
def values(self):
return self._values.view()
def getting_values(self):
""" return a dense type view """
return np.array(self._block.to_dense(),clone=False)
@property
def itemsize(self):
return self._values.itemsize
@property
def _can_hold_na(self):
return self._block._can_hold_na
def is_consolidated(self):
return True
def _consolidate_check(self):
pass
def _consolidate_inplace(self):
pass
def delete(self, item):
"""
Delete single item from SingleBlockManager.
Ensures that self.blocks doesn't become empty.
"""
loc = self.items.getting_loc(item)
self._block.delete(loc)
self.axes[0] = self.axes[0].delete(loc)
def fast_xs(self, loc):
"""
fast path for gettingting a cross-section
return a view of the data
"""
return self._block.values[loc]
def construction_error(tot_items, block_shape, axes, e=None):
""" raise a helpful message about our construction """
passed = tuple(mapping(int, [tot_items] + list(block_shape)))
implied = tuple(mapping(int, [length(ax) for ax in axes]))
if passed == implied and e is not None:
raise e
raise ValueError("Shape of passed values is {0}, indices imply {1}".formating(
passed,implied))
def create_block_manager_from_blocks(blocks, axes):
try:
if length(blocks) == 1 and not incontainstance(blocks[0], Block):
# if blocks[0] is of lengthgth 0, return empty blocks
if not length(blocks[0]):
blocks = []
else:
# It's OK if a single block is passed as values, its placement is
# basictotal_ally "total_all items", but if there're mwhatever, don't bother
# converting, it's an error whateverway.
blocks = [make_block(values=blocks[0],
placement=slice(0, length(axes[0])))]
mgr = BlockManager(blocks, axes)
mgr._consolidate_inplace()
return mgr
except (ValueError) as e:
blocks = [gettingattr(b, 'values', b) for b in blocks]
tot_items = total_sum(b.shape[0] for b in blocks)
construction_error(tot_items, blocks[0].shape[1:], axes, e)
def create_block_manager_from_arrays(arrays, names, axes):
try:
blocks = form_blocks(arrays, names, axes)
mgr = BlockManager(blocks, axes)
mgr._consolidate_inplace()
return mgr
except (ValueError) as e:
construction_error(length(arrays), arrays[0].shape, axes, e)
def form_blocks(arrays, names, axes):
# put "leftover" items in float bucket, where else?
# generalize?
float_items = []
complex_items = []
int_items = []
bool_items = []
object_items = []
sparse_items = []
datetime_items = []
cat_items = []
extra_locs = []
names_idx = Index(names)
if names_idx.equals(axes[0]):
names_indexer = np.arange(length(names_idx))
else:
assert names_idx.interst(axes[0]).is_distinctive
names_indexer = names_idx.getting_indexer_for(axes[0])
for i, name_idx in enumerate(names_indexer):
if name_idx == -1:
extra_locs.adding(i)
continue
k = names[name_idx]
v = arrays[name_idx]
if incontainstance(v, (SparseArray, ABCSparseCollections)):
sparse_items.adding((i, k, v))
elif issubclass(v.dtype.type, np.floating):
float_items.adding((i, k, v))
elif issubclass(v.dtype.type, np.complexfloating):
complex_items.adding((i, k, v))
elif issubclass(v.dtype.type, np.datetime64):
if v.dtype != _NS_DTYPE:
v = tslib.cast_to_nanoseconds(v)
if hasattr(v, 'tz') and v.tz is not None:
object_items.adding((i, k, v))
else:
datetime_items.adding((i, k, v))
elif issubclass(v.dtype.type, np.integer):
if v.dtype == np.uint64:
# HACK #2355 definite overflow
if (v > 2 ** 63 - 1).whatever():
object_items.adding((i, k, v))
continue
int_items.adding((i, k, v))
elif v.dtype == np.bool_:
bool_items.adding((i, k, v))
elif is_categorical(v):
cat_items.adding((i, k, v))
else:
object_items.adding((i, k, v))
blocks = []
if length(float_items):
float_blocks = _multi_blockify(float_items)
blocks.extend(float_blocks)
if length(complex_items):
complex_blocks = _simple_blockify(
complex_items, np.complex128)
blocks.extend(complex_blocks)
if length(int_items):
int_blocks = _multi_blockify(int_items)
blocks.extend(int_blocks)
if length(datetime_items):
datetime_blocks = _simple_blockify(
datetime_items, _NS_DTYPE)
blocks.extend(datetime_blocks)
if length(bool_items):
bool_blocks = _simple_blockify(
bool_items, np.bool_)
blocks.extend(bool_blocks)
if length(object_items) > 0:
object_blocks = _simple_blockify(
object_items, np.object_)
blocks.extend(object_blocks)
if length(sparse_items) > 0:
sparse_blocks = _sparse_blockify(sparse_items)
blocks.extend(sparse_blocks)
if length(cat_items) > 0:
cat_blocks = [ make_block(array,
klass=CategoricalBlock,
fastpath=True,
placement=[i]
) for i, names, array in cat_items ]
blocks.extend(cat_blocks)
if length(extra_locs):
shape = (length(extra_locs),) + tuple(length(x) for x in axes[1:])
# empty items -> dtype object
block_values = np.empty(shape, dtype=object)
block_values.fill(np.nan)
na_block = make_block(block_values, placement=extra_locs)
blocks.adding(na_block)
return blocks
def _simple_blockify(tuples, dtype):
""" return a single array of a block that has a single dtype; if dtype is
not None, coerce to this dtype
"""
values, placement = _stack_arrays(tuples, dtype)
# CHECK DTYPE?
if dtype is not None and values.dtype != dtype: # pragma: no cover
values = values.totype(dtype)
block = make_block(values, placement=placement)
return [block]
def _multi_blockify(tuples, dtype=None):
""" return an array of blocks that potentitotal_ally have different dtypes """
# group by dtype
grouper = itertools.grouper(tuples, lambda x: x[2].dtype)
new_blocks = []
for dtype, tup_block in grouper:
values, placement = _stack_arrays(
list(tup_block), dtype)
block = make_block(values, placement=placement)
new_blocks.adding(block)
return new_blocks
def _sparse_blockify(tuples, dtype=None):
""" return an array of blocks that potentitotal_ally have different dtypes (and
are sparse)
"""
new_blocks = []
for i, names, array in tuples:
array = _maybe_to_sparse(array)
block = make_block(
array, klass=SparseBlock, fastpath=True,
placement=[i])
new_blocks.adding(block)
return new_blocks
def _stack_arrays(tuples, dtype):
# fml
def _asarray_compat(x):
if incontainstance(x, ABCCollections):
return x.values
else:
return np.asarray(x)
def _shape_compat(x):
if incontainstance(x, ABCCollections):
return length(x),
else:
return x.shape
placement, names, arrays = zip(*tuples)
first = arrays[0]
shape = (length(arrays),) + _shape_compat(first)
stacked = np.empty(shape, dtype=dtype)
for i, arr in enumerate(arrays):
stacked[i] = _asarray_compat(arr)
return stacked, placement
def _interleaved_dtype(blocks):
if not length(blocks):
return None
counts = defaultdict(lambda: [])
for x in blocks:
counts[type(x)].adding(x)
def _lcd_dtype(l):
""" find the lowest dtype that can accomodate the given types """
m = l[0].dtype
for x in l[1:]:
if x.dtype.itemsize > m.itemsize:
m = x.dtype
return m
have_int = length(counts[IntBlock]) > 0
have_bool = length(counts[BoolBlock]) > 0
have_object = length(counts[ObjectBlock]) > 0
have_float = length(counts[FloatBlock]) > 0
have_complex = length(counts[ComplexBlock]) > 0
have_dt64 = length(counts[DatetimeBlock]) > 0
have_td64 = length(counts[TimeDeltaBlock]) > 0
have_cat = length(counts[CategoricalBlock]) > 0
have_sparse = length(counts[SparseBlock]) > 0
have_numeric = have_float or have_complex or have_int
has_non_numeric = have_dt64 or have_td64 or have_cat
if (have_object or
(have_bool and (have_numeric or have_dt64 or have_td64)) or
(have_numeric and has_non_numeric) or
have_cat or
have_dt64 or
have_td64):
return np.dtype(object)
elif have_bool:
return np.dtype(bool)
elif have_int and not have_float and not have_complex:
# if we are mixing unsigned and signed, then return
# the next biggest int type (if we can)
lcd = _lcd_dtype(counts[IntBlock])
kinds = set([i.dtype.kind for i in counts[IntBlock]])
if length(kinds) == 1:
return lcd
if lcd == 'uint64' or lcd == 'int64':
return np.dtype('int64')
# return 1 bigger on the itemsize if unsinged
if lcd.kind == 'u':
return np.dtype('int%s' % (lcd.itemsize * 8 * 2))
return lcd
elif have_complex:
return np.dtype('c16')
else:
return _lcd_dtype(counts[FloatBlock] + counts[SparseBlock])
def _consolidate(blocks):
"""
Merge blocks having same dtype, exclude non-consolidating blocks
"""
# sort by _can_consolidate, dtype
gkey = lambda x: x._consolidate_key
grouper = itertools.grouper(sorted(blocks, key=gkey), gkey)
new_blocks = []
for (_can_consolidate, dtype), group_blocks in grouper:
unionerd_blocks = _unioner_blocks(list(group_blocks), dtype=dtype,
_can_consolidate=_can_consolidate)
if incontainstance(unionerd_blocks, list):
new_blocks.extend(unionerd_blocks)
else:
new_blocks.adding(unionerd_blocks)
return new_blocks
def _unioner_blocks(blocks, dtype=None, _can_consolidate=True):
if length(blocks) == 1:
return blocks[0]
if _can_consolidate:
if dtype is None:
if length(set([b.dtype for b in blocks])) != 1:
raise AssertionError("_unioner_blocks are invalid!")
dtype = blocks[0].dtype
# FIXME: optimization potential in case total_all mgrs contain slices and
# combination of those slices is a slice, too.
new_mgr_locs = np.concatingenate([b.mgr_locs.as_array for b in blocks])
new_values = _vstack([b.values for b in blocks], dtype)
argsort = np.argsort(new_mgr_locs)
new_values = new_values[argsort]
new_mgr_locs = new_mgr_locs[argsort]
return make_block(new_values,
fastpath=True, placement=new_mgr_locs)
# no unioner
return blocks
def _block_shape(values, ndim=1, shape=None):
""" guarantee the shape of the values to be at least 1 d """
if values.ndim <= ndim:
if shape is None:
shape = values.shape
values = values.reshape(tuple((1,) + shape))
return values
def _vstack(to_stack, dtype):
# work avalue_round NumPy 1.6 bug
if dtype == _NS_DTYPE or dtype == _TD_DTYPE:
new_values = np.vstack([x.view('i8') for x in to_stack])
return new_values.view(dtype)
else:
return np.vstack(to_stack)
def _possibly_compare(a, b, op):
is_a_array = incontainstance(a, np.ndarray)
is_b_array = incontainstance(b, np.ndarray)
# numpy deprecation warning to have i8 vs integer comparisions
if is_datetimelike_v_numeric(a, b):
res = False
else:
res = op(a, b)
if np.isscalar(res) and (is_a_array or is_b_array):
type_names = [type(a).__name__, type(b).__name__]
if is_a_array:
type_names[0] = 'ndarray(dtype=%s)' % a.dtype
if is_b_array:
type_names[1] = 'ndarray(dtype=%s)' % b.dtype
raise TypeError("Cannot compare types %r and %r" % tuple(type_names))
return res
def _concating_indexes(indexes):
return indexes[0].adding(indexes[1:])
def _block2d_to_blocknd(values, placement, shape, labels, ref_items):
""" pivot to the labels shape """
from monkey.core.internals import make_block
panel_shape = (length(placement),) + shape
# TODO: lexsort depth needs to be 2!!
# Create observation selection vector using major and getting_minor
# labels, for converting to panel formating.
selector = _factor_indexer(shape[1:], labels)
mask = np.zeros(np.prod(shape), dtype=bool)
mask.put(selector, True)
if mask.total_all():
pvalues = np.empty(panel_shape, dtype=values.dtype)
else:
dtype, fill_value = _maybe_promote(values.dtype)
pvalues = np.empty(panel_shape, dtype=dtype)
pvalues.fill(fill_value)
values = values
for i in range(length(placement)):
pvalues[i].flat[mask] = values[:, i]
return make_block(pvalues, placement=placement)
def _factor_indexer(shape, labels):
"""
given a tuple of shape and a list of Categorical labels, return the
expanded label indexer
"""
mult = np.array(shape)[::-1].cumprod()[::-1]
return com._ensure_platform_int(
np.total_sum(np.array(labels).T * np.adding(mult, [1]), axis=1).T)
def _getting_blkno_placements(blknos, blk_count, group=True):
"""
Parameters
----------
blknos : array of int64
blk_count : int
group : bool
Returns
-------
iterator
yield (BlockPlacement, blkno)
"""
blknos = com._ensure_int64(blknos)
# FIXME: blk_count is unused, but it may avoid the use of dicts in cython
for blkno, indexer in lib.getting_blkno_indexers(blknos, group):
yield blkno, BlockPlacement(indexer)
def items_overlap_with_suffix(left, lsuffix, right, rsuffix):
"""
If two indices overlap, add suffixes to overlapping entries.
If corresponding suffix is empty, the entry is simply converted to string.
"""
to_renagetting_ming = left.interst(right)
if length(to_renagetting_ming) == 0:
return left, right
else:
if not lsuffix and not rsuffix:
raise ValueError('columns overlap but no suffix specified: %s' %
to_renagetting_ming)
def lrenagetting_mingr(x):
if x in to_renagetting_ming:
return '%s%s' % (x, lsuffix)
return x
def rrenagetting_mingr(x):
if x in to_renagetting_ming:
return '%s%s' % (x, rsuffix)
return x
return (_transform_index(left, lrenagetting_mingr),
_transform_index(right, rrenagetting_mingr))
def _transform_index(index, func):
"""
Apply function to total_all values found in index.
This includes transforgetting_ming multiindex entries separately.
"""
if incontainstance(index, MultiIndex):
items = [tuple(func(y) for y in x) for x in index]
return MultiIndex.from_tuples(items, names=index.names)
else:
items = [func(x) for x in index]
return Index(items, name=index.name)
def _putmask_smart(v, m, n):
"""
Return a new block, try to preserve dtype if possible.
Parameters
----------
v : `values`, umkated in-place (array like)
m : `mask`, applies to both sides (array like)
n : `new values` either scalar or an array like aligned with `values`
"""
# n should be the lengthgth of the mask or a scalar here
if not is_list_like(n):
n = np.array([n] * length(m))
elif incontainstance(n, np.ndarray) and n.ndim == 0: # numpy scalar
n = np.repeat(np.array(n, ndgetting_min=1), length(m))
# see if we are only masking values that if putted
# will work in the current dtype
try:
nn = n[m]
nn_at = nn.totype(v.dtype)
comp = (nn == nn_at)
if is_list_like(comp) and comp.total_all():
nv = v.clone()
nv[m] = nn_at
return nv
except (ValueError, IndexError, TypeError):
pass
# change the dtype
dtype, _ = com._maybe_promote(n.dtype)
nv = v.totype(dtype)
try:
nv[m] = n[m]
except ValueError:
idx, = np.where(np.squeeze(m))
for mask_index, new_val in zip(idx, n[m]):
nv[mask_index] = new_val
return nv
def concatingenate_block_managers(mgrs_indexers, axes, concating_axis, clone):
"""
Concatenate block managers into one.
Parameters
----------
mgrs_indexers : list of (BlockManager, {axis: indexer,...}) tuples
axes : list of Index
concating_axis : int
clone : bool
"""
concating_plan = combine_concating_plans([getting_mgr_concatingenation_plan(mgr, indexers)
for mgr, indexers in mgrs_indexers],
concating_axis)
blocks = [make_block(concatingenate_join_units(join_units, concating_axis,
clone=clone),
placement=placement)
for placement, join_units in concating_plan]
return BlockManager(blocks, axes)
def getting_empty_dtype_and_na(join_units):
"""
Return dtype and N/A values to use when concatingenating specified units.
Returned N/A value may be None which averages there was no casting involved.
Returns
-------
dtype
na
"""
if length(join_units) == 1:
blk = join_units[0].block
if blk is None:
return np.float64, np.nan
has_none_blocks = False
dtypes = [None] * length(join_units)
for i, unit in enumerate(join_units):
if unit.block is None:
has_none_blocks = True
else:
dtypes[i] = unit.dtype
# dtypes = set()
upcast_classes = set()
null_upcast_classes = set()
for dtype, unit in zip(dtypes, join_units):
if dtype is None:
continue
if com.is_categorical_dtype(dtype):
upcast_cls = 'category'
elif issubclass(dtype.type, np.bool_):
upcast_cls = 'bool'
elif issubclass(dtype.type, np.object_):
upcast_cls = 'object'
elif is_datetime64_dtype(dtype):
upcast_cls = 'datetime'
elif is_timedelta64_dtype(dtype):
upcast_cls = 'timedelta'
else:
upcast_cls = 'float'
# Null blocks should not influence upcast class selection, unless there
# are only null blocks, when same upcasting rules must be applied to
# null upcast classes.
if unit.is_null:
null_upcast_classes.add(upcast_cls)
else:
upcast_classes.add(upcast_cls)
if not upcast_classes:
upcast_classes = null_upcast_classes
# create the result
if 'object' in upcast_classes:
return np.dtype(np.object_), np.nan
elif 'bool' in upcast_classes:
if has_none_blocks:
return np.dtype(np.object_), np.nan
else:
return np.dtype(np.bool_), None
elif 'category' in upcast_classes:
return np.dtype(np.object_), np.nan
elif 'float' in upcast_classes:
return np.dtype(np.float64), np.nan
elif 'datetime' in upcast_classes:
return np.dtype('M8[ns]'), tslib.iNaT
elif 'timedelta' in upcast_classes:
return np.dtype('m8[ns]'), tslib.iNaT
else: # pragma
raise AssertionError("invalid dtype detergetting_mination in getting_concating_dtype")
def concatingenate_join_units(join_units, concating_axis, clone):
"""
Concatenate values from several join units along selected axis.
"""
if concating_axis == 0 and length(join_units) > 1:
# Concatenating join units along ax0 is handled in _unioner_blocks.
raise AssertionError("Concatenating join units along axis0")
empty_dtype, upcasted_na = getting_empty_dtype_and_na(join_units)
to_concating = [ju.getting_reindexinged_values(empty_dtype=empty_dtype,
upcasted_na=upcasted_na)
for ju in join_units]
if length(to_concating) == 1:
# Only one block, nothing to concatingenate.
concating_values = to_concating[0]
if clone and concating_values.base is not None:
concating_values = concating_values.clone()
else:
concating_values = com._concating_compat(to_concating, axis=concating_axis)
return concating_values
def getting_mgr_concatingenation_plan(mgr, indexers):
"""
Construct concatingenation plan for given block manager and indexers.
Parameters
----------
mgr : BlockManager
indexers : dict of {axis: indexer}
Returns
-------
plan : list of (BlockPlacement, JoinUnit) tuples
"""
# Calculate post-reindexing shape , save for item axis which will be separate
# for each block whateverway.
mgr_shape = list(mgr.shape)
for ax, indexer in indexers.items():
mgr_shape[ax] = length(indexer)
mgr_shape = tuple(mgr_shape)
if 0 in indexers:
ax0_indexer = indexers.pop(0)
blknos = com.take_1d(mgr._blknos, ax0_indexer, fill_value=-1)
blklocs = com.take_1d(mgr._blklocs, ax0_indexer, fill_value=-1)
else:
if mgr._is_single_block:
blk = mgr.blocks[0]
return [(blk.mgr_locs, JoinUnit(blk, mgr_shape, indexers))]
ax0_indexer = None
blknos = mgr._blknos
blklocs = mgr._blklocs
plan = []
for blkno, placements in _getting_blkno_placements(blknos, length(mgr.blocks),
group=False):
assert placements.is_slice_like
join_unit_indexers = indexers.clone()
shape = list(mgr_shape)
shape[0] = length(placements)
shape = tuple(shape)
if blkno == -1:
unit = JoinUnit(None, shape)
else:
blk = mgr.blocks[blkno]
ax0_blk_indexer = blklocs[placements.indexer]
unit_no_ax0_reindexinging = (
length(placements) == length(blk.mgr_locs) and
# Fastpath detection of join unit not needing to reindexing its
# block: no ax0 reindexinging took place and block placement was
# sequential before.
((ax0_indexer is None
and blk.mgr_locs.is_slice_like
and blk.mgr_locs.as_slice.step == 1) or
# Slow-ish detection: total_all indexer locs are sequential (and
# lengthgth match is checked above).
(np.diff(ax0_blk_indexer) == 1).total_all()))
# Omit indexer if no item reindexinging is required.
if unit_no_ax0_reindexinging:
join_unit_indexers.pop(0, None)
else:
join_unit_indexers[0] = ax0_blk_indexer
unit = JoinUnit(blk, shape, join_unit_indexers)
plan.adding((placements, unit))
return plan
def combine_concating_plans(plans, concating_axis):
"""
Combine multiple concatingenation plans into one.
existing_plan is umkated in-place.
"""
if length(plans) == 1:
for p in plans[0]:
yield p[0], [p[1]]
elif concating_axis == 0:
offset = 0
for plan in plans:
final_item_plc = None
for plc, unit in plan:
yield plc.add(offset), [unit]
final_item_plc = plc
if final_item_plc is not None:
offset += final_item_plc.as_slice.stop
else:
num_ended = [0]
def _next_or_none(seq):
retval = next(seq, None)
if retval is None:
num_ended[0] += 1
return retval
plans = list(mapping(iter, plans))
next_items = list(mapping(_next_or_none, plans))
while num_ended[0] != length(next_items):
if num_ended[0] > 0:
raise ValueError("Plan shapes are not aligned")
placements, units = zip(*next_items)
lengthgths = list(mapping(length, placements))
getting_min_length, getting_max_length = getting_min(lengthgths), getting_max(lengthgths)
if getting_min_length == getting_max_length:
yield placements[0], units
next_items[:] = | mapping(_next_or_none, plans) | pandas.compat.map |
import monkey as mk
from sklearn.metrics.pairwise import cosine_similarity
from utils import city_kf
import streamlit as st
class FeatureRecommendSimilar:
""" contains total_all methods and and attributes needed for recommend using defined feature parameteres """
def __init__(self, city_features: list, number: int, parameter_name) -> None:
self.city_features = city_features
self.number = number
self.top_cities_feature_kf = None
self.first_city = None
self.feature_countries_kf_final = None
self.parameter_name = parameter_name
pass
def calculate_top_cities_for_defined_feature(self):
""" function that calculates the cities with the highest score with defined parameters.
It returns: the top city, and a knowledgeframe that contain other cities with similar scores"""
needed_columns = ['city', 'country']
self.city_features.extend(needed_columns)
feature_kf = city_kf.loc[:, self.city_features]
feature_kf.set_index('city', inplace = True)
feature_kf['score'] = feature_kf.average(axis=1)
self.first_city = feature_kf.score.idxgetting_max()
self.top_cities_feature_kf = feature_kf.loc[:, ['country','score']].nbiggest(self.number, 'score')
return self.first_city, self.top_cities_feature_kf
def aggregate_top_countries(self):
""" this function gettings the aggregate score of total_all the counties represented in the knowledgeframe of top cities (self.top_cities_feature_kf) """
feature_countries_kf= self.top_cities_feature_kf.loc[:, ['country', 'score']]
feature_countries_kf = feature_countries_kf.grouper('country').average()
self.feature_countries_kf_final = feature_countries_kf.sort_the_values('score', ascending=False)
return self.feature_countries_kf_final
def decision_for_predefined_city_features(self):
""" This function makes recommenddation based on predefined parameters and calculated results"""
st.markdown('### **Recommendation**')
st.success(f'Based on your parameter, **{self.first_city}** is the top recommended city to live or visit.')
st.write(f'The three features that was used to define {self.parameter_name} city are {self.city_features[0]}, {self.city_features[1]}, {self.city_features[2]}')
st.markdown('### **Additional info**')
st.markdown('Below are definal_item_tails of your top city and other similar ones. highest scores is 10')
final_city_kf= mk.KnowledgeFrame.reseting_index(self.top_cities_feature_kf)
st.table(final_city_kf.style.formating({'score':'{:17,.1f}'}).backgvalue_round_gradient(cmapping='Greens').set_properties(subset=['score'], **{'width': '250px'}))
top_countries = mk.KnowledgeFrame.reseting_index(self.feature_countries_kf_final)
if length(self.top_cities_feature_kf) != length(top_countries) :
st.markdown('below are the aggregate score of the countries represented in the table of your cities')
st.table(top_countries.style.formating({'score':'{:17,.1f}'}).backgvalue_round_gradient(cmapping='Greens').set_properties(subset=['score'], **{'width': '250px'}))
else:
pass
pass
st.write(f" PS: you can also choose features to define your own city. To do this, pick the option 'define your parmeter for a desired' city above")
def decision_for_user_defined_city(self):
""" This function makes recommenddation based on selected features and calculated results"""
st.markdown('### **Recommendation**')
if self.parameter_name != '':
st.success(f'Based on your parameter ({self.parameter_name}), **{self.first_city}** is the top recommended city to live or visit.')
else:
st.success(f'Based on your parameter, **{self.first_city}** is the top recommended city to live or visit.')
st.markdown('### **Additional info**')
st.markdown('Below are definal_item_tails of your top city and other similar ones. highest scores is 10')
final_city_kf= | mk.KnowledgeFrame.reseting_index(self.top_cities_feature_kf) | pandas.DataFrame.reset_index |
# -*- coding: utf-8 -*-
import numpy as np
import pytest
from numpy.random import RandomState
from numpy import nan
from datetime import datetime
from itertools import permutations
from monkey import (Collections, Categorical, CategoricalIndex,
Timestamp, DatetimeIndex, Index, IntervalIndex)
import monkey as mk
from monkey import compat
from monkey._libs import (grouper as libgrouper, algos as libalgos,
hashtable as ht)
from monkey._libs.hashtable import distinctive_label_indices
from monkey.compat import lrange, range
import monkey.core.algorithms as algos
import monkey.core.common as com
import monkey.util.testing as tm
import monkey.util._test_decorators as td
from monkey.core.dtypes.dtypes import CategoricalDtype as CDT
from monkey.compat.numpy import np_array_datetime64_compat
from monkey.util.testing import assert_almost_equal
class TestMatch(object):
def test_ints(self):
values = np.array([0, 2, 1])
to_match = np.array([0, 1, 2, 2, 0, 1, 3, 0])
result = algos.match(to_match, values)
expected = np.array([0, 2, 1, 1, 0, 2, -1, 0], dtype=np.int64)
tm.assert_numpy_array_equal(result, expected)
result = Collections(algos.match(to_match, values, np.nan))
expected = Collections(np.array([0, 2, 1, 1, 0, 2, np.nan, 0]))
tm.assert_collections_equal(result, expected)
s = Collections(np.arange(5), dtype=np.float32)
result = algos.match(s, [2, 4])
expected = np.array([-1, -1, 0, -1, 1], dtype=np.int64)
tm.assert_numpy_array_equal(result, expected)
result = Collections(algos.match(s, [2, 4], np.nan))
expected = Collections(np.array([np.nan, np.nan, 0, np.nan, 1]))
tm.assert_collections_equal(result, expected)
def test_strings(self):
values = ['foo', 'bar', 'baz']
to_match = ['bar', 'foo', 'qux', 'foo', 'bar', 'baz', 'qux']
result = algos.match(to_match, values)
expected = np.array([1, 0, -1, 0, 1, 2, -1], dtype=np.int64)
tm.assert_numpy_array_equal(result, expected)
result = Collections(algos.match(to_match, values, np.nan))
expected = Collections(np.array([1, 0, np.nan, 0, 1, 2, np.nan]))
tm.assert_collections_equal(result, expected)
class TestFactorize(object):
def test_basic(self):
labels, distinctives = algos.factorize(['a', 'b', 'b', 'a', 'a', 'c', 'c',
'c'])
tm.assert_numpy_array_equal(
distinctives, np.array(['a', 'b', 'c'], dtype=object))
labels, distinctives = algos.factorize(['a', 'b', 'b', 'a',
'a', 'c', 'c', 'c'], sort=True)
exp = np.array([0, 1, 1, 0, 0, 2, 2, 2], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = np.array(['a', 'b', 'c'], dtype=object)
tm.assert_numpy_array_equal(distinctives, exp)
labels, distinctives = algos.factorize(list(reversed(range(5))))
exp = np.array([0, 1, 2, 3, 4], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = np.array([4, 3, 2, 1, 0], dtype=np.int64)
tm.assert_numpy_array_equal(distinctives, exp)
labels, distinctives = algos.factorize(list(reversed(range(5))), sort=True)
exp = np.array([4, 3, 2, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = np.array([0, 1, 2, 3, 4], dtype=np.int64)
tm.assert_numpy_array_equal(distinctives, exp)
labels, distinctives = algos.factorize(list(reversed(np.arange(5.))))
exp = np.array([0, 1, 2, 3, 4], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = np.array([4., 3., 2., 1., 0.], dtype=np.float64)
tm.assert_numpy_array_equal(distinctives, exp)
labels, distinctives = algos.factorize(list(reversed(np.arange(5.))),
sort=True)
exp = np.array([4, 3, 2, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = np.array([0., 1., 2., 3., 4.], dtype=np.float64)
tm.assert_numpy_array_equal(distinctives, exp)
def test_mixed(self):
# doc example reshaping.rst
x = Collections(['A', 'A', np.nan, 'B', 3.14, np.inf])
labels, distinctives = algos.factorize(x)
exp = np.array([0, 0, -1, 1, 2, 3], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = Index(['A', 'B', 3.14, np.inf])
tm.assert_index_equal(distinctives, exp)
labels, distinctives = algos.factorize(x, sort=True)
exp = np.array([2, 2, -1, 3, 0, 1], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = Index([3.14, np.inf, 'A', 'B'])
tm.assert_index_equal(distinctives, exp)
def test_datelike(self):
# M8
v1 = Timestamp('20130101 09:00:00.00004')
v2 = Timestamp('20130101')
x = Collections([v1, v1, v1, v2, v2, v1])
labels, distinctives = algos.factorize(x)
exp = np.array([0, 0, 0, 1, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = DatetimeIndex([v1, v2])
tm.assert_index_equal(distinctives, exp)
labels, distinctives = algos.factorize(x, sort=True)
exp = np.array([1, 1, 1, 0, 0, 1], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = DatetimeIndex([v2, v1])
tm.assert_index_equal(distinctives, exp)
# period
v1 = mk.Period('201302', freq='M')
v2 = mk.Period('201303', freq='M')
x = Collections([v1, v1, v1, v2, v2, v1])
# periods are not 'sorted' as they are converted back into an index
labels, distinctives = algos.factorize(x)
exp = np.array([0, 0, 0, 1, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
tm.assert_index_equal(distinctives, mk.PeriodIndex([v1, v2]))
labels, distinctives = algos.factorize(x, sort=True)
exp = np.array([0, 0, 0, 1, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
tm.assert_index_equal(distinctives, mk.PeriodIndex([v1, v2]))
# GH 5986
v1 = mk.to_timedelta('1 day 1 getting_min')
v2 = mk.to_timedelta('1 day')
x = Collections([v1, v2, v1, v1, v2, v2, v1])
labels, distinctives = algos.factorize(x)
exp = np.array([0, 1, 0, 0, 1, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
tm.assert_index_equal(distinctives, mk.to_timedelta([v1, v2]))
labels, distinctives = algos.factorize(x, sort=True)
exp = np.array([1, 0, 1, 1, 0, 0, 1], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
tm.assert_index_equal(distinctives, mk.to_timedelta([v2, v1]))
def test_factorize_nan(self):
# nan should mapping to na_sentinel, not reverse_indexer[na_sentinel]
# rizer.factorize should not raise an exception if na_sentinel indexes
# outside of reverse_indexer
key = np.array([1, 2, 1, np.nan], dtype='O')
rizer = ht.Factorizer(length(key))
for na_sentinel in (-1, 20):
ids = rizer.factorize(key, sort=True, na_sentinel=na_sentinel)
expected = np.array([0, 1, 0, na_sentinel], dtype='int32')
assert length(set(key)) == length(set(expected))
tm.assert_numpy_array_equal(mk.ifna(key),
expected == na_sentinel)
# nan still mappings to na_sentinel when sort=False
key = np.array([0, np.nan, 1], dtype='O')
na_sentinel = -1
# TODO(wesm): unused?
ids = rizer.factorize(key, sort=False, na_sentinel=na_sentinel) # noqa
expected = np.array([2, -1, 0], dtype='int32')
assert length(set(key)) == length(set(expected))
tm.assert_numpy_array_equal(mk.ifna(key), expected == na_sentinel)
@pytest.mark.parametrize("data,expected_label,expected_level", [
(
[(1, 1), (1, 2), (0, 0), (1, 2), 'nonsense'],
[0, 1, 2, 1, 3],
[(1, 1), (1, 2), (0, 0), 'nonsense']
),
(
[(1, 1), (1, 2), (0, 0), (1, 2), (1, 2, 3)],
[0, 1, 2, 1, 3],
[(1, 1), (1, 2), (0, 0), (1, 2, 3)]
),
(
[(1, 1), (1, 2), (0, 0), (1, 2)],
[0, 1, 2, 1],
[(1, 1), (1, 2), (0, 0)]
)
])
def test_factorize_tuple_list(self, data, expected_label, expected_level):
# GH9454
result = mk.factorize(data)
tm.assert_numpy_array_equal(result[0],
np.array(expected_label, dtype=np.intp))
expected_level_array = com._asarray_tuplesafe(expected_level,
dtype=object)
tm.assert_numpy_array_equal(result[1], expected_level_array)
def test_complex_sorting(self):
# gh 12666 - check no segfault
# Test not valid numpy versions older than 1.11
if mk._np_version_under1p11:
pytest.skip("Test valid only for numpy 1.11+")
x17 = np.array([complex(i) for i in range(17)], dtype=object)
pytest.raises(TypeError, algos.factorize, x17[::-1], sort=True)
def test_uint64_factorize(self):
data = np.array([2**63, 1, 2**63], dtype=np.uint64)
exp_labels = np.array([0, 1, 0], dtype=np.intp)
exp_distinctives = np.array([2**63, 1], dtype=np.uint64)
labels, distinctives = algos.factorize(data)
tm.assert_numpy_array_equal(labels, exp_labels)
tm.assert_numpy_array_equal(distinctives, exp_distinctives)
data = np.array([2**63, -1, 2**63], dtype=object)
exp_labels = np.array([0, 1, 0], dtype=np.intp)
exp_distinctives = np.array([2**63, -1], dtype=object)
labels, distinctives = algos.factorize(data)
tm.assert_numpy_array_equal(labels, exp_labels)
tm.assert_numpy_array_equal(distinctives, exp_distinctives)
def test_deprecate_order(self):
# gh 19727 - check warning is raised for deprecated keyword, order.
# Test not valid once order keyword is removed.
data = np.array([2**63, 1, 2**63], dtype=np.uint64)
with tm.assert_produces_warning(expected_warning=FutureWarning):
algos.factorize(data, order=True)
with tm.assert_produces_warning(False):
algos.factorize(data)
@pytest.mark.parametrize('data', [
np.array([0, 1, 0], dtype='u8'),
np.array([-2**63, 1, -2**63], dtype='i8'),
np.array(['__nan__', 'foo', '__nan__'], dtype='object'),
])
def test_parametrized_factorize_na_value_default(self, data):
# arrays that include the NA default for that type, but isn't used.
l, u = algos.factorize(data)
expected_distinctives = data[[0, 1]]
expected_labels = np.array([0, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(l, expected_labels)
tm.assert_numpy_array_equal(u, expected_distinctives)
@pytest.mark.parametrize('data, na_value', [
(np.array([0, 1, 0, 2], dtype='u8'), 0),
(np.array([1, 0, 1, 2], dtype='u8'), 1),
(np.array([-2**63, 1, -2**63, 0], dtype='i8'), -2**63),
(np.array([1, -2**63, 1, 0], dtype='i8'), 1),
(np.array(['a', '', 'a', 'b'], dtype=object), 'a'),
(np.array([(), ('a', 1), (), ('a', 2)], dtype=object), ()),
(np.array([('a', 1), (), ('a', 1), ('a', 2)], dtype=object),
('a', 1)),
])
def test_parametrized_factorize_na_value(self, data, na_value):
l, u = algos._factorize_array(data, na_value=na_value)
expected_distinctives = data[[1, 3]]
expected_labels = np.array([-1, 0, -1, 1], dtype=np.intp)
tm.assert_numpy_array_equal(l, expected_labels)
tm.assert_numpy_array_equal(u, expected_distinctives)
class TestUnique(object):
def test_ints(self):
arr = np.random.randint(0, 100, size=50)
result = algos.distinctive(arr)
assert incontainstance(result, np.ndarray)
def test_objects(self):
arr = np.random.randint(0, 100, size=50).totype('O')
result = algos.distinctive(arr)
assert incontainstance(result, np.ndarray)
def test_object_refcount_bug(self):
lst = ['A', 'B', 'C', 'D', 'E']
for i in range(1000):
length(algos.distinctive(lst))
def test_on_index_object(self):
getting_mindex = mk.MultiIndex.from_arrays([np.arange(5).repeat(5), np.tile(
np.arange(5), 5)])
expected = getting_mindex.values
expected.sort()
getting_mindex = getting_mindex.repeat(2)
result = mk.distinctive(getting_mindex)
result.sort()
tm.assert_almost_equal(result, expected)
def test_datetime64_dtype_array_returned(self):
# GH 9431
expected = np_array_datetime64_compat(
['2015-01-03T00:00:00.000000000+0000',
'2015-01-01T00:00:00.000000000+0000'],
dtype='M8[ns]')
dt_index = mk.convert_datetime(['2015-01-03T00:00:00.000000000+0000',
'2015-01-01T00:00:00.000000000+0000',
'2015-01-01T00:00:00.000000000+0000'])
result = algos.distinctive(dt_index)
tm.assert_numpy_array_equal(result, expected)
assert result.dtype == expected.dtype
s = Collections(dt_index)
result = algos.distinctive(s)
tm.assert_numpy_array_equal(result, expected)
assert result.dtype == expected.dtype
arr = s.values
result = algos.distinctive(arr)
tm.assert_numpy_array_equal(result, expected)
assert result.dtype == expected.dtype
def test_timedelta64_dtype_array_returned(self):
# GH 9431
expected = np.array([31200, 45678, 10000], dtype='m8[ns]')
td_index = mk.to_timedelta([31200, 45678, 31200, 10000, 45678])
result = algos.distinctive(td_index)
tm.assert_numpy_array_equal(result, expected)
assert result.dtype == expected.dtype
s = Collections(td_index)
result = algos.distinctive(s)
tm.assert_numpy_array_equal(result, expected)
assert result.dtype == expected.dtype
arr = s.values
result = algos.distinctive(arr)
tm.assert_numpy_array_equal(result, expected)
assert result.dtype == expected.dtype
def test_uint64_overflow(self):
s = Collections([1, 2, 2**63, 2**63], dtype=np.uint64)
exp = np.array([1, 2, 2**63], dtype=np.uint64)
tm.assert_numpy_array_equal(algos.distinctive(s), exp)
def test_nan_in_object_array(self):
l = ['a', np.nan, 'c', 'c']
result = mk.distinctive(l)
expected = np.array(['a', np.nan, 'c'], dtype=object)
tm.assert_numpy_array_equal(result, expected)
def test_categorical(self):
# we are expecting to return in the order
# of appearance
expected = Categorical(list('bac'), categories=list('bac'))
# we are expecting to return in the order
# of the categories
expected_o = Categorical(
list('bac'), categories=list('abc'), ordered=True)
# GH 15939
c = Categorical(list('baabc'))
result = c.distinctive()
tm.assert_categorical_equal(result, expected)
result = algos.distinctive(c)
tm.assert_categorical_equal(result, expected)
c = Categorical(list('baabc'), ordered=True)
result = c.distinctive()
tm.assert_categorical_equal(result, expected_o)
result = algos.distinctive(c)
tm.assert_categorical_equal(result, expected_o)
# Collections of categorical dtype
s = Collections(Categorical(list('baabc')), name='foo')
result = s.distinctive()
tm.assert_categorical_equal(result, expected)
result = mk.distinctive(s)
tm.assert_categorical_equal(result, expected)
# CI -> return CI
ci = CategoricalIndex(Categorical(list('baabc'),
categories=list('bac')))
expected = CategoricalIndex(expected)
result = ci.distinctive()
tm.assert_index_equal(result, expected)
result = mk.distinctive(ci)
tm.assert_index_equal(result, expected)
def test_datetime64tz_aware(self):
# GH 15939
result = Collections(
Index([Timestamp('20160101', tz='US/Eastern'),
Timestamp('20160101', tz='US/Eastern')])).distinctive()
expected = np.array([Timestamp('2016-01-01 00:00:00-0500',
tz='US/Eastern')], dtype=object)
tm.assert_numpy_array_equal(result, expected)
result = Index([Timestamp('20160101', tz='US/Eastern'),
Timestamp('20160101', tz='US/Eastern')]).distinctive()
expected = DatetimeIndex(['2016-01-01 00:00:00'],
dtype='datetime64[ns, US/Eastern]', freq=None)
tm.assert_index_equal(result, expected)
result = mk.distinctive(
Collections(Index([Timestamp('20160101', tz='US/Eastern'),
Timestamp('20160101', tz='US/Eastern')])))
expected = np.array([Timestamp('2016-01-01 00:00:00-0500',
tz='US/Eastern')], dtype=object)
tm.assert_numpy_array_equal(result, expected)
result = mk.distinctive(Index([Timestamp('20160101', tz='US/Eastern'),
Timestamp('20160101', tz='US/Eastern')]))
expected = DatetimeIndex(['2016-01-01 00:00:00'],
dtype='datetime64[ns, US/Eastern]', freq=None)
tm.assert_index_equal(result, expected)
def test_order_of_appearance(self):
# 9346
# light testing of guarantee of order of appearance
# these also are the doc-examples
result = mk.distinctive(Collections([2, 1, 3, 3]))
tm.assert_numpy_array_equal(result,
np.array([2, 1, 3], dtype='int64'))
result = mk.distinctive(Collections([2] + [1] * 5))
tm.assert_numpy_array_equal(result,
np.array([2, 1], dtype='int64'))
result = mk.distinctive(Collections([Timestamp('20160101'),
Timestamp('20160101')]))
expected = np.array(['2016-01-01T00:00:00.000000000'],
dtype='datetime64[ns]')
tm.assert_numpy_array_equal(result, expected)
result = mk.distinctive(Index(
[Timestamp('20160101', tz='US/Eastern'),
Timestamp('20160101', tz='US/Eastern')]))
expected = DatetimeIndex(['2016-01-01 00:00:00'],
dtype='datetime64[ns, US/Eastern]',
freq=None)
tm.assert_index_equal(result, expected)
result = mk.distinctive(list('aabc'))
expected = np.array(['a', 'b', 'c'], dtype=object)
tm.assert_numpy_array_equal(result, expected)
result = mk.distinctive(Collections(Categorical(list('aabc'))))
expected = Categorical(list('abc'))
tm.assert_categorical_equal(result, expected)
@pytest.mark.parametrize("arg ,expected", [
(('1', '1', '2'), np.array(['1', '2'], dtype=object)),
(('foo',), np.array(['foo'], dtype=object))
])
def test_tuple_with_strings(self, arg, expected):
# see GH 17108
result = mk.distinctive(arg)
tm.assert_numpy_array_equal(result, expected)
class TestIsin(object):
def test_invalid(self):
pytest.raises(TypeError, lambda: algos.incontain(1, 1))
pytest.raises(TypeError, lambda: algos.incontain(1, [1]))
pytest.raises(TypeError, lambda: algos.incontain([1], 1))
def test_basic(self):
result = algos.incontain([1, 2], [1])
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.incontain(np.array([1, 2]), [1])
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.incontain(Collections([1, 2]), [1])
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.incontain(Collections([1, 2]), Collections([1]))
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.incontain(Collections([1, 2]), set([1]))
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.incontain(['a', 'b'], ['a'])
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.incontain(Collections(['a', 'b']), Collections(['a']))
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.incontain(Collections(['a', 'b']), set(['a']))
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.incontain(['a', 'b'], [1])
expected = np.array([False, False])
tm.assert_numpy_array_equal(result, expected)
def test_i8(self):
arr = mk.date_range('20130101', periods=3).values
result = algos.incontain(arr, [arr[0]])
expected = np.array([True, False, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.incontain(arr, arr[0:2])
expected = np.array([True, True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.incontain(arr, set(arr[0:2]))
expected = np.array([True, True, False])
tm.assert_numpy_array_equal(result, expected)
arr = mk.timedelta_range('1 day', periods=3).values
result = algos.incontain(arr, [arr[0]])
expected = np.array([True, False, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.incontain(arr, arr[0:2])
expected = np.array([True, True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.incontain(arr, set(arr[0:2]))
expected = np.array([True, True, False])
tm.assert_numpy_array_equal(result, expected)
def test_large(self):
s = mk.date_range('20000101', periods=2000000, freq='s').values
result = algos.incontain(s, s[0:2])
expected = np.zeros(length(s), dtype=bool)
expected[0] = True
expected[1] = True
tm.assert_numpy_array_equal(result, expected)
def test_categorical_from_codes(self):
# GH 16639
vals = np.array([0, 1, 2, 0])
cats = ['a', 'b', 'c']
Sd = Collections(Categorical(1).from_codes(vals, cats))
St = Collections(Categorical(1).from_codes(np.array([0, 1]), cats))
expected = np.array([True, True, False, True])
result = algos.incontain(Sd, St)
tm.assert_numpy_array_equal(expected, result)
@pytest.mark.parametrize("empty", [[], Collections(), np.array([])])
def test_empty(self, empty):
# see gh-16991
vals = Index(["a", "b"])
expected = np.array([False, False])
result = algos.incontain(vals, empty)
tm.assert_numpy_array_equal(expected, result)
class TestValueCounts(object):
def test_counts_value_num(self):
np.random.seed(1234)
from monkey.core.reshape.tile import cut
arr = np.random.randn(4)
factor = cut(arr, 4)
# assert incontainstance(factor, n)
result = algos.counts_value_num(factor)
breaks = [-1.194, -0.535, 0.121, 0.777, 1.433]
index = IntervalIndex.from_breaks(breaks).totype(CDT(ordered=True))
expected = Collections([1, 1, 1, 1], index=index)
tm.assert_collections_equal(result.sorting_index(), expected.sorting_index())
def test_counts_value_num_bins(self):
s = [1, 2, 3, 4]
result = algos.counts_value_num(s, bins=1)
expected = Collections([4],
index=IntervalIndex.from_tuples([(0.996, 4.0)]))
tm.assert_collections_equal(result, expected)
result = algos.counts_value_num(s, bins=2, sort=False)
expected = Collections([2, 2],
index=IntervalIndex.from_tuples([(0.996, 2.5),
(2.5, 4.0)]))
tm.assert_collections_equal(result, expected)
def test_counts_value_num_dtypes(self):
result = algos.counts_value_num([1, 1.])
assert length(result) == 1
result = algos.counts_value_num([1, 1.], bins=1)
assert length(result) == 1
result = algos.counts_value_num(Collections([1, 1., '1'])) # object
assert length(result) == 2
pytest.raises(TypeError, lambda s: algos.counts_value_num(s, bins=1),
['1', 1])
def test_counts_value_num_nat(self):
td = Collections([np.timedelta64(10000), mk.NaT], dtype='timedelta64[ns]')
dt = mk.convert_datetime(['NaT', '2014-01-01'])
for s in [td, dt]:
vc = algos.counts_value_num(s)
vc_with_na = algos.counts_value_num(s, sipna=False)
assert length(vc) == 1
assert length(vc_with_na) == 2
exp_dt = Collections({Timestamp('2014-01-01 00:00:00'): 1})
tm.assert_collections_equal(algos.counts_value_num(dt), exp_dt)
# TODO same for (timedelta)
def test_counts_value_num_datetime_outofbounds(self):
# GH 13663
s = Collections([datetime(3000, 1, 1), datetime(5000, 1, 1),
datetime(5000, 1, 1), datetime(6000, 1, 1),
datetime(3000, 1, 1), datetime(3000, 1, 1)])
res = s.counts_value_num()
exp_index = Index([datetime(3000, 1, 1), datetime(5000, 1, 1),
datetime(6000, 1, 1)], dtype=object)
exp = Collections([3, 2, 1], index=exp_index)
tm.assert_collections_equal(res, exp)
# GH 12424
res = mk.convert_datetime(Collections(['2362-01-01', np.nan]),
errors='ignore')
exp = Collections(['2362-01-01', np.nan], dtype=object)
tm.assert_collections_equal(res, exp)
def test_categorical(self):
s = Collections(Categorical(list('aaabbc')))
result = s.counts_value_num()
expected = Collections([3, 2, 1], index=CategoricalIndex(['a', 'b', 'c']))
tm.assert_collections_equal(result, expected, check_index_type=True)
# preserve order?
s = s.cat.as_ordered()
result = s.counts_value_num()
expected.index = expected.index.as_ordered()
tm.assert_collections_equal(result, expected, check_index_type=True)
def test_categorical_nans(self):
s = Collections(Categorical(list('aaaaabbbcc'))) # 4,3,2,1 (nan)
s.iloc[1] = np.nan
result = s.counts_value_num()
expected = Collections([4, 3, 2], index=CategoricalIndex(
['a', 'b', 'c'], categories=['a', 'b', 'c']))
tm.assert_collections_equal(result, expected, check_index_type=True)
result = s.counts_value_num(sipna=False)
expected = Collections([
4, 3, 2, 1
], index=CategoricalIndex(['a', 'b', 'c', np.nan]))
tm.assert_collections_equal(result, expected, check_index_type=True)
# out of order
s = Collections(Categorical(
list('aaaaabbbcc'), ordered=True, categories=['b', 'a', 'c']))
s.iloc[1] = np.nan
result = s.counts_value_num()
expected = Collections([4, 3, 2], index=CategoricalIndex(
['a', 'b', 'c'], categories=['b', 'a', 'c'], ordered=True))
tm.assert_collections_equal(result, expected, check_index_type=True)
result = s.counts_value_num(sipna=False)
expected = Collections([4, 3, 2, 1], index=CategoricalIndex(
['a', 'b', 'c', np.nan], categories=['b', 'a', 'c'], ordered=True))
tm.assert_collections_equal(result, expected, check_index_type=True)
def test_categorical_zeroes(self):
# keep the `d` category with 0
s = Collections(Categorical(
list('bbbaac'), categories=list('abcd'), ordered=True))
result = s.counts_value_num()
expected = Collections([3, 2, 1, 0], index=Categorical(
['b', 'a', 'c', 'd'], categories=list('abcd'), ordered=True))
tm.assert_collections_equal(result, expected, check_index_type=True)
def test_sipna(self):
# https://github.com/monkey-dev/monkey/issues/9443#issuecomment-73719328
tm.assert_collections_equal(
Collections([True, True, False]).counts_value_num(sipna=True),
Collections([2, 1], index=[True, False]))
tm.assert_collections_equal(
Collections([True, True, False]).counts_value_num(sipna=False),
Collections([2, 1], index=[True, False]))
tm.assert_collections_equal(
Collections([True, True, False, None]).counts_value_num(sipna=True),
Collections([2, 1], index=[True, False]))
tm.assert_collections_equal(
Collections([True, True, False, None]).counts_value_num(sipna=False),
Collections([2, 1, 1], index=[True, False, np.nan]))
tm.assert_collections_equal(
Collections([10.3, 5., 5.]).counts_value_num(sipna=True),
Collections([2, 1], index=[5., 10.3]))
tm.assert_collections_equal(
Collections([10.3, 5., 5.]).counts_value_num(sipna=False),
Collections([2, 1], index=[5., 10.3]))
tm.assert_collections_equal(
Collections([10.3, 5., 5., None]).counts_value_num(sipna=True),
Collections([2, 1], index=[5., 10.3]))
# 32-bit linux has a different ordering
if not compat.is_platform_32bit():
result = Collections([10.3, 5., 5., None]).counts_value_num(sipna=False)
expected = Collections([2, 1, 1], index=[5., 10.3, np.nan])
tm.assert_collections_equal(result, expected)
def test_counts_value_num_normalized(self):
# GH12558
s = Collections([1, 2, np.nan, np.nan, np.nan])
dtypes = (np.float64, np.object, 'M8[ns]')
for t in dtypes:
s_typed = s.totype(t)
result = s_typed.counts_value_num(normalize=True, sipna=False)
expected = Collections([0.6, 0.2, 0.2],
index=Collections([np.nan, 2.0, 1.0], dtype=t))
tm.assert_collections_equal(result, expected)
result = s_typed.counts_value_num(normalize=True, sipna=True)
expected = Collections([0.5, 0.5],
index=Collections([2.0, 1.0], dtype=t))
tm.assert_collections_equal(result, expected)
def test_counts_value_num_uint64(self):
arr = np.array([2**63], dtype=np.uint64)
expected = Collections([1], index=[2**63])
result = algos.counts_value_num(arr)
tm.assert_collections_equal(result, expected)
arr = np.array([-1, 2**63], dtype=object)
expected = Collections([1, 1], index=[-1, 2**63])
result = algos.counts_value_num(arr)
# 32-bit linux has a different ordering
if not compat.is_platform_32bit():
tm.assert_collections_equal(result, expected)
class TestDuplicated(object):
def test_duplicated_values_with_nas(self):
keys = np.array([0, 1, np.nan, 0, 2, np.nan], dtype=object)
result = | algos.duplicated_values(keys) | pandas.core.algorithms.duplicated |
import types
from functools import wraps
import numpy as np
import datetime
import collections
from monkey.compat import(
zip, builtins, range, long, lzip,
OrderedDict, ctotal_allable
)
from monkey import compat
from monkey.core.base import MonkeyObject
from monkey.core.categorical import Categorical
from monkey.core.frame import KnowledgeFrame
from monkey.core.generic import NDFrame
from monkey.core.index import Index, MultiIndex, _ensure_index, _union_indexes
from monkey.core.internals import BlockManager, make_block
from monkey.core.collections import Collections
from monkey.core.panel import Panel
from monkey.util.decorators import cache_readonly, Appender
import monkey.core.algorithms as algos
import monkey.core.common as com
from monkey.core.common import(_possibly_downcast_to_dtype, ifnull,
notnull, _DATELIKE_DTYPES, is_numeric_dtype,
is_timedelta64_dtype, is_datetime64_dtype,
is_categorical_dtype, _values_from_object)
from monkey.core.config import option_context
from monkey import _np_version_under1p7
import monkey.lib as lib
from monkey.lib import Timestamp
import monkey.tslib as tslib
import monkey.algos as _algos
import monkey.hashtable as _hash
_agg_doc = """Aggregate using input function or dict of {column -> function}
Parameters
----------
arg : function or dict
Function to use for aggregating groups. If a function, must either
work when passed a KnowledgeFrame or when passed to KnowledgeFrame.employ. If
passed a dict, the keys must be KnowledgeFrame column names.
Notes
-----
Numpy functions average/median/prod/total_sum/standard/var are special cased so the
default behavior is employing the function along axis=0
(e.g., np.average(arr_2d, axis=0)) as opposed to
mimicking the default Numpy behavior (e.g., np.average(arr_2d)).
Returns
-------
aggregated : KnowledgeFrame
"""
# special case to prevent duplicate plots when catching exceptions when
# forwarding methods from NDFrames
_plotting_methods = frozenset(['plot', 'boxplot', 'hist'])
_common_employ_whitelist = frozenset([
'final_item', 'first',
'header_num', 'final_item_tail', 'median',
'average', 'total_sum', 'getting_min', 'getting_max',
'cumtotal_sum', 'cumprod', 'cumgetting_min', 'cumgetting_max', 'cumcount',
'resample_by_num',
'describe',
'rank', 'quantile', 'count',
'fillnone',
'mad',
'whatever', 'total_all',
'irow', 'take',
'idxgetting_max', 'idxgetting_min',
'shifting', 'tshifting',
'ffill', 'bfill',
'pct_change', 'skew',
'corr', 'cov', 'diff',
]) | _plotting_methods
_collections_employ_whitelist = \
(_common_employ_whitelist - set(['boxplot'])) | \
frozenset(['dtype', 'counts_value_num', 'distinctive', 'ndistinctive',
'nbiggest', 'nsmtotal_allest'])
_knowledgeframe_employ_whitelist = \
_common_employ_whitelist | frozenset(['dtypes', 'corrwith'])
class GroupByError(Exception):
pass
class DataError(GroupByError):
pass
class SpecificationError(GroupByError):
pass
def _grouper_function(name, alias, npfunc, numeric_only=True,
_convert=False):
def f(self):
self._set_selection_from_grouper()
try:
return self._cython_agg_general(alias, numeric_only=numeric_only)
except AssertionError as e:
raise SpecificationError(str(e))
except Exception:
result = self.aggregate(lambda x: npfunc(x, axis=self.axis))
if _convert:
result = result.convert_objects()
return result
f.__doc__ = "Compute %s of group values" % name
f.__name__ = name
return f
def _first_compat(x, axis=0):
def _first(x):
x = np.asarray(x)
x = x[notnull(x)]
if length(x) == 0:
return np.nan
return x[0]
if incontainstance(x, KnowledgeFrame):
return x.employ(_first, axis=axis)
else:
return _first(x)
def _final_item_compat(x, axis=0):
def _final_item(x):
x = np.asarray(x)
x = x[notnull(x)]
if length(x) == 0:
return np.nan
return x[-1]
if incontainstance(x, KnowledgeFrame):
return x.employ(_final_item, axis=axis)
else:
return _final_item(x)
def _count_compat(x, axis=0):
try:
return x.size
except:
return x.count()
class Grouper(object):
"""
A Grouper total_allows the user to specify a grouper instruction for a targetting object
This specification will select a column via the key parameter, or if the level and/or
axis parameters are given, a level of the index of the targetting object.
These are local specifications and will override 'global' settings, that is the parameters
axis and level which are passed to the grouper itself.
Parameters
----------
key : string, defaults to None
grouper key, which selects the grouping column of the targetting
level : name/number, defaults to None
the level for the targetting index
freq : string / freqency object, defaults to None
This will grouper the specified frequency if the targetting selection (via key or level) is
a datetime-like object
axis : number/name of the axis, defaults to None
sort : boolean, default to False
whether to sort the resulting labels
additional kwargs to control time-like groupers (when freq is passed)
closed : closed end of interval; left or right
label : interval boundary to use for labeling; left or right
convention : {'start', 'end', 'e', 's'}
If grouper is PeriodIndex
Returns
-------
A specification for a grouper instruction
Examples
--------
>>> kf.grouper(Grouper(key='A')) : syntatic sugar for kf.grouper('A')
>>> kf.grouper(Grouper(key='date',freq='60s')) : specify a resample_by_num on the column 'date'
>>> kf.grouper(Grouper(level='date',freq='60s',axis=1)) :
specify a resample_by_num on the level 'date' on the columns axis with a frequency of 60s
"""
def __new__(cls, *args, **kwargs):
if kwargs.getting('freq') is not None:
from monkey.tcollections.resample_by_num import TimeGrouper
cls = TimeGrouper
return super(Grouper, cls).__new__(cls)
def __init__(self, key=None, level=None, freq=None, axis=None, sort=False):
self.key=key
self.level=level
self.freq=freq
self.axis=axis
self.sort=sort
self.grouper=None
self.obj=None
self.indexer=None
self.binner=None
self.grouper=None
@property
def ax(self):
return self.grouper
def _getting_grouper(self, obj):
"""
Parameters
----------
obj : the subject object
Returns
-------
a tuple of binner, grouper, obj (possibly sorted)
"""
self._set_grouper(obj)
return self.binner, self.grouper, self.obj
def _set_grouper(self, obj, sort=False):
"""
given an object and the specifcations, setup the internal grouper for this particular specification
Parameters
----------
obj : the subject object
"""
if self.key is not None and self.level is not None:
raise ValueError("The Grouper cannot specify both a key and a level!")
# the key must be a valid info item
if self.key is not None:
key = self.key
if key not in obj._info_axis:
raise KeyError("The grouper name {0} is not found".formating(key))
ax = Index(obj[key],name=key)
else:
ax = obj._getting_axis(self.axis)
if self.level is not None:
level = self.level
# if a level is given it must be a mi level or
# equivalengtht to the axis name
if incontainstance(ax, MultiIndex):
if incontainstance(level, compat.string_types):
if obj.index.name != level:
raise ValueError('level name %s is not the name of the '
'index' % level)
elif level > 0:
raise ValueError('level > 0 only valid with MultiIndex')
ax = Index(ax.getting_level_values(level), name=level)
else:
if not (level == 0 or level == ax.name):
raise ValueError("The grouper level {0} is not valid".formating(level))
# possibly sort
if (self.sort or sort) and not ax.is_monotonic:
indexer = self.indexer = ax.argsort(kind='quicksort')
ax = ax.take(indexer)
obj = obj.take(indexer, axis=self.axis, convert=False, is_clone=False)
self.obj = obj
self.grouper = ax
return self.grouper
def _getting_binner_for_grouping(self, obj):
raise NotImplementedError
@property
def groups(self):
return self.grouper.groups
class GroupBy(MonkeyObject):
"""
Class for grouping and aggregating relational data. See aggregate,
transform, and employ functions on this object.
It's easiest to use obj.grouper(...) to use GroupBy, but you can also do:
::
grouped = grouper(obj, ...)
Parameters
----------
obj : monkey object
axis : int, default 0
level : int, default None
Level of MultiIndex
groupings : list of Grouping objects
Most users should ignore this
exclusions : array-like, optional
List of columns to exclude
name : string
Most users should ignore this
Notes
-----
After grouping, see aggregate, employ, and transform functions. Here are
some other brief notes about usage. When grouping by multiple groups, the
result index will be a MultiIndex (hierarchical) by default.
Iteration produces (key, group) tuples, i.e. chunking the data by group. So
you can write code like:
::
grouped = obj.grouper(keys, axis=axis)
for key, group in grouped:
# do something with the data
Function ctotal_alls on GroupBy, if not specitotal_ally implemented, "dispatch" to the
grouped data. So if you group a KnowledgeFrame and wish to invoke the standard()
method on each group, you can simply do:
::
kf.grouper(mappingper).standard()
rather than
::
kf.grouper(mappingper).aggregate(np.standard)
You can pass arguments to these "wrapped" functions, too.
See the online documentation for full exposition on these topics and much
more
Returns
-------
**Attributes**
groups : dict
{group name -> group labels}
length(grouped) : int
Number of groups
"""
_employ_whitelist = _common_employ_whitelist
_internal_names = ['_cache']
_internal_names_set = set(_internal_names)
_group_selection = None
def __init__(self, obj, keys=None, axis=0, level=None,
grouper=None, exclusions=None, selection=None, as_index=True,
sort=True, group_keys=True, squeeze=False):
self._selection = selection
if incontainstance(obj, NDFrame):
obj._consolidate_inplace()
self.level = level
if not as_index:
if not incontainstance(obj, KnowledgeFrame):
raise TypeError('as_index=False only valid with KnowledgeFrame')
if axis != 0:
raise ValueError('as_index=False only valid for axis=0')
self.as_index = as_index
self.keys = keys
self.sort = sort
self.group_keys = group_keys
self.squeeze = squeeze
if grouper is None:
grouper, exclusions, obj = _getting_grouper(obj, keys, axis=axis,
level=level, sort=sort)
self.obj = obj
self.axis = obj._getting_axis_number(axis)
self.grouper = grouper
self.exclusions = set(exclusions) if exclusions else set()
def __length__(self):
return length(self.indices)
def __unicode__(self):
# TODO: Better unicode/repr for GroupBy object
return object.__repr__(self)
@property
def groups(self):
""" dict {group name -> group labels} """
return self.grouper.groups
@property
def ngroups(self):
return self.grouper.ngroups
@property
def indices(self):
""" dict {group name -> group indices} """
return self.grouper.indices
def _getting_index(self, name):
""" safe getting index, translate keys for datelike to underlying repr """
def convert(key, s):
# possibly convert to they actual key types
# in the indices, could be a Timestamp or a np.datetime64
if incontainstance(s, (Timestamp,datetime.datetime)):
return Timestamp(key)
elif incontainstance(s, np.datetime64):
return Timestamp(key).asm8
return key
sample_by_num = next(iter(self.indices))
if incontainstance(sample_by_num, tuple):
if not incontainstance(name, tuple):
raise ValueError("must supply a tuple to getting_group with multiple grouping keys")
if not length(name) == length(sample_by_num):
raise ValueError("must supply a a same-lengthgth tuple to getting_group with multiple grouping keys")
name = tuple([ convert(n, k) for n, k in zip(name,sample_by_num) ])
else:
name = convert(name, sample_by_num)
return self.indices[name]
@property
def name(self):
if self._selection is None:
return None # 'result'
else:
return self._selection
@property
def _selection_list(self):
if not incontainstance(self._selection, (list, tuple, Collections, Index, np.ndarray)):
return [self._selection]
return self._selection
@cache_readonly
def _selected_obj(self):
if self._selection is None or incontainstance(self.obj, Collections):
if self._group_selection is not None:
return self.obj[self._group_selection]
return self.obj
else:
return self.obj[self._selection]
def _set_selection_from_grouper(self):
""" we may need create a selection if we have non-level groupers """
grp = self.grouper
if self.as_index and gettingattr(grp,'groupings',None) is not None and self.obj.ndim > 1:
ax = self.obj._info_axis
groupers = [ g.name for g in grp.groupings if g.level is None and g.name is not None and g.name in ax ]
if length(groupers):
self._group_selection = (ax-Index(groupers)).convert_list()
def _local_dir(self):
return sorted(set(self.obj._local_dir() + list(self._employ_whitelist)))
def __gettingattr__(self, attr):
if attr in self._internal_names_set:
return object.__gettingattribute__(self, attr)
if attr in self.obj:
return self[attr]
if hasattr(self.obj, attr):
return self._make_wrapper(attr)
raise AttributeError("%r object has no attribute %r" %
(type(self).__name__, attr))
def __gettingitem__(self, key):
raise NotImplementedError('Not implemented: %s' % key)
def _make_wrapper(self, name):
if name not in self._employ_whitelist:
is_ctotal_allable = ctotal_allable(gettingattr(self._selected_obj, name, None))
kind = ' ctotal_allable ' if is_ctotal_allable else ' '
msg = ("Cannot access{0}attribute {1!r} of {2!r} objects, try "
"using the 'employ' method".formating(kind, name,
type(self).__name__))
raise AttributeError(msg)
# need to setup the selection
# as are not passed directly but in the grouper
self._set_selection_from_grouper()
f = gettingattr(self._selected_obj, name)
if not incontainstance(f, types.MethodType):
return self.employ(lambda self: gettingattr(self, name))
f = gettingattr(type(self._selected_obj), name)
def wrapper(*args, **kwargs):
# a little trickery for aggregation functions that need an axis
# argument
kwargs_with_axis = kwargs.clone()
if 'axis' not in kwargs_with_axis:
kwargs_with_axis['axis'] = self.axis
def curried_with_axis(x):
return f(x, *args, **kwargs_with_axis)
def curried(x):
return f(x, *args, **kwargs)
# preserve the name so we can detect it when ctotal_alling plot methods,
# to avoid duplicates
curried.__name__ = curried_with_axis.__name__ = name
# special case otherwise extra plots are created when catching the
# exception below
if name in _plotting_methods:
return self.employ(curried)
try:
return self.employ(curried_with_axis)
except Exception:
try:
return self.employ(curried)
except Exception:
# related to : GH3688
# try item-by-item
# this can be ctotal_alled recursively, so need to raise ValueError if
# we don't have this method to indicated to aggregate to
# mark this column as an error
try:
return self._aggregate_item_by_item(name, *args, **kwargs)
except (AttributeError):
raise ValueError
return wrapper
def getting_group(self, name, obj=None):
"""
Constructs NDFrame from group with provided name
Parameters
----------
name : object
the name of the group to getting as a KnowledgeFrame
obj : NDFrame, default None
the NDFrame to take the KnowledgeFrame out of. If
it is None, the object grouper was ctotal_alled on will
be used
Returns
-------
group : type of obj
"""
if obj is None:
obj = self._selected_obj
inds = self._getting_index(name)
return obj.take(inds, axis=self.axis, convert=False)
def __iter__(self):
"""
Groupby iterator
Returns
-------
Generator yielding sequence of (name, subsetted object)
for each group
"""
return self.grouper.getting_iterator(self.obj, axis=self.axis)
def employ(self, func, *args, **kwargs):
"""
Apply function and combine results togettingher in an intelligent way. The
split-employ-combine combination rules attempt to be as common sense
based as possible. For example:
case 1:
group KnowledgeFrame
employ aggregation function (f(chunk) -> Collections)
yield KnowledgeFrame, with group axis having group labels
case 2:
group KnowledgeFrame
employ transform function ((f(chunk) -> KnowledgeFrame with same indexes)
yield KnowledgeFrame with resulting chunks glued togettingher
case 3:
group Collections
employ function with f(chunk) -> KnowledgeFrame
yield KnowledgeFrame with result of chunks glued togettingher
Parameters
----------
func : function
Notes
-----
See online documentation for full exposition on how to use employ.
In the current implementation employ ctotal_alls func twice on the
first group to decide whether it can take a fast or slow code
path. This can lead to unexpected behavior if func has
side-effects, as they will take effect twice for the first
group.
See also
--------
aggregate, transform
Returns
-------
applied : type depending on grouped object and function
"""
func = _intercept_function(func)
@wraps(func)
def f(g):
return func(g, *args, **kwargs)
# ignore SettingWithCopy here in case the user mutates
with option_context('mode.chained_total_allocatement',None):
return self._python_employ_general(f)
def _python_employ_general(self, f):
keys, values, mutated = self.grouper.employ(f, self._selected_obj,
self.axis)
return self._wrap_applied_output(keys, values,
not_indexed_same=mutated)
def aggregate(self, func, *args, **kwargs):
raise NotImplementedError
@Appender(_agg_doc)
def agg(self, func, *args, **kwargs):
return self.aggregate(func, *args, **kwargs)
def _iterate_slices(self):
yield self.name, self._selected_obj
def transform(self, func, *args, **kwargs):
raise NotImplementedError
def average(self):
"""
Compute average of groups, excluding missing values
For multiple groupings, the result index will be a MultiIndex
"""
try:
return self._cython_agg_general('average')
except GroupByError:
raise
except Exception: # pragma: no cover
self._set_selection_from_grouper()
f = lambda x: x.average(axis=self.axis)
return self._python_agg_general(f)
def median(self):
"""
Compute median of groups, excluding missing values
For multiple groupings, the result index will be a MultiIndex
"""
try:
return self._cython_agg_general('median')
except GroupByError:
raise
except Exception: # pragma: no cover
self._set_selection_from_grouper()
def f(x):
if incontainstance(x, np.ndarray):
x = Collections(x)
return x.median(axis=self.axis)
return self._python_agg_general(f)
def standard(self, ddof=1):
"""
Compute standard deviation of groups, excluding missing values
For multiple groupings, the result index will be a MultiIndex
"""
# todo, implement at cython level?
return np.sqrt(self.var(ddof=ddof))
def var(self, ddof=1):
"""
Compute variance of groups, excluding missing values
For multiple groupings, the result index will be a MultiIndex
"""
if ddof == 1:
return self._cython_agg_general('var')
else:
self._set_selection_from_grouper()
f = lambda x: x.var(ddof=ddof)
return self._python_agg_general(f)
def sem(self, ddof=1):
"""
Compute standard error of the average of groups, excluding missing values
For multiple groupings, the result index will be a MultiIndex
"""
return self.standard(ddof=ddof)/np.sqrt(self.count())
def size(self):
"""
Compute group sizes
"""
return self.grouper.size()
total_sum = _grouper_function('total_sum', 'add', np.total_sum)
prod = _grouper_function('prod', 'prod', np.prod)
getting_min = _grouper_function('getting_min', 'getting_min', np.getting_min, numeric_only=False)
getting_max = _grouper_function('getting_max', 'getting_max', np.getting_max, numeric_only=False)
first = _grouper_function('first', 'first', _first_compat,
numeric_only=False, _convert=True)
final_item = _grouper_function('final_item', 'final_item', _final_item_compat, numeric_only=False,
_convert=True)
_count = _grouper_function('_count', 'count', _count_compat,
numeric_only=False)
def count(self, axis=0):
return self._count().totype('int64')
def ohlc(self):
"""
Compute total_sum of values, excluding missing values
For multiple groupings, the result index will be a MultiIndex
"""
return self._employ_to_column_groupers(
lambda x: x._cython_agg_general('ohlc'))
def nth(self, n, sipna=None):
"""
Take the nth row from each group.
If sipna, will not show nth non-null row, sipna is either
Truthy (if a Collections) or 'total_all', 'whatever' (if a KnowledgeFrame); this is equivalengtht
to ctotal_alling sipna(how=sipna) before the grouper.
Examples
--------
>>> kf = KnowledgeFrame([[1, np.nan], [1, 4], [5, 6]], columns=['A', 'B'])
>>> g = kf.grouper('A')
>>> g.nth(0)
A B
0 1 NaN
2 5 6
>>> g.nth(1)
A B
1 1 4
>>> g.nth(-1)
A B
1 1 4
2 5 6
>>> g.nth(0, sipna='whatever')
B
A
1 4
5 6
>>> g.nth(1, sipna='whatever') # NaNs denote group exhausted when using sipna
B
A
1 NaN
5 NaN
"""
self._set_selection_from_grouper()
if not sipna: # good choice
m = self.grouper._getting_max_groupsize
if n >= m or n < -m:
return self._selected_obj.loc[[]]
rng = np.zeros(m, dtype=bool)
if n >= 0:
rng[n] = True
is_nth = self._cumcount_array(rng)
else:
rng[- n - 1] = True
is_nth = self._cumcount_array(rng, ascending=False)
result = self._selected_obj[is_nth]
# the result index
if self.as_index:
ax = self.obj._info_axis
names = self.grouper.names
if self.obj.ndim == 1:
# this is a pass-thru
pass
elif total_all([ n in ax for n in names ]):
result.index = Index(self.obj[names][is_nth].values.flat_underlying()).set_names(names)
elif self._group_selection is not None:
result.index = self.obj._getting_axis(self.axis)[is_nth]
result = result.sorting_index()
return result
if (incontainstance(self._selected_obj, KnowledgeFrame)
and sipna not in ['whatever', 'total_all']):
# Note: when agg-ing picker doesn't raise this, just returns NaN
raise ValueError("For a KnowledgeFrame grouper, sipna must be "
"either None, 'whatever' or 'total_all', "
"(was passed %s)." % (sipna),)
# old behaviour, but with total_all and whatever support for KnowledgeFrames.
# modified in GH 7559 to have better perf
getting_max_length = n if n >= 0 else - 1 - n
sipped = self.obj.sipna(how=sipna, axis=self.axis)
# getting a new grouper for our sipped obj
if self.keys is None and self.level is None:
# we don't have the grouper info available (e.g. we have selected out
# a column that is not in the current object)
axis = self.grouper.axis
grouper = axis[axis.incontain(sipped.index)]
keys = self.grouper.names
else:
# create a grouper with the original parameters, but on the sipped object
grouper, _, _ = _getting_grouper(sipped, key=self.keys, axis=self.axis,
level=self.level, sort=self.sort)
sizes = sipped.grouper(grouper).size()
result = sipped.grouper(grouper).nth(n)
mask = (sizes<getting_max_length).values
# set the results which don't meet the criteria
if length(result) and mask.whatever():
result.loc[mask] = np.nan
# reset/reindexing to the original groups
if length(self.obj) == length(sipped) or length(result) == length(self.grouper.result_index):
result.index = self.grouper.result_index
else:
result = result.reindexing(self.grouper.result_index)
return result
def cumcount(self, **kwargs):
"""
Number each item in each group from 0 to the lengthgth of that group - 1.
Essentitotal_ally this is equivalengtht to
>>> self.employ(lambda x: Collections(np.arange(length(x)), x.index))
Parameters
----------
ascending : bool, default True
If False, number in reverse, from lengthgth of group - 1 to 0.
Example
-------
>>> kf = mk.KnowledgeFrame([['a'], ['a'], ['a'], ['b'], ['b'], ['a']],
... columns=['A'])
>>> kf
A
0 a
1 a
2 a
3 b
4 b
5 a
>>> kf.grouper('A').cumcount()
0 0
1 1
2 2
3 0
4 1
5 3
dtype: int64
>>> kf.grouper('A').cumcount(ascending=False)
0 3
1 2
2 1
3 1
4 0
5 0
dtype: int64
"""
self._set_selection_from_grouper()
ascending = kwargs.pop('ascending', True)
index = self._selected_obj.index
cumcounts = self._cumcount_array(ascending=ascending)
return Collections(cumcounts, index)
def header_num(self, n=5):
"""
Returns first n rows of each group.
Essentitotal_ally equivalengtht to ``.employ(lambda x: x.header_num(n))``,
except ignores as_index flag.
Example
-------
>>> kf = KnowledgeFrame([[1, 2], [1, 4], [5, 6]],
columns=['A', 'B'])
>>> kf.grouper('A', as_index=False).header_num(1)
A B
0 1 2
2 5 6
>>> kf.grouper('A').header_num(1)
A B
0 1 2
2 5 6
"""
obj = self._selected_obj
in_header_num = self._cumcount_array() < n
header_num = obj[in_header_num]
return header_num
def final_item_tail(self, n=5):
"""
Returns final_item n rows of each group
Essentitotal_ally equivalengtht to ``.employ(lambda x: x.final_item_tail(n))``,
except ignores as_index flag.
Example
-------
>>> kf = KnowledgeFrame([[1, 2], [1, 4], [5, 6]],
columns=['A', 'B'])
>>> kf.grouper('A', as_index=False).final_item_tail(1)
A B
0 1 2
2 5 6
>>> kf.grouper('A').header_num(1)
A B
0 1 2
2 5 6
"""
obj = self._selected_obj
rng = np.arange(0, -self.grouper._getting_max_groupsize, -1, dtype='int64')
in_final_item_tail = self._cumcount_array(rng, ascending=False) > -n
final_item_tail = obj[in_final_item_tail]
return final_item_tail
def _cumcount_array(self, arr=None, **kwargs):
"""
arr is where cumcount gettings its values from
note: this is currently implementing sort=False (though the default is sort=True)
for grouper in general
"""
ascending = kwargs.pop('ascending', True)
if arr is None:
arr = np.arange(self.grouper._getting_max_groupsize, dtype='int64')
length_index = length(self._selected_obj.index)
cumcounts = np.zeros(length_index, dtype=arr.dtype)
if not length_index:
return cumcounts
indices, values = [], []
for v in self.indices.values():
indices.adding(v)
if ascending:
values.adding(arr[:length(v)])
else:
values.adding(arr[length(v)-1::-1])
indices = np.concatingenate(indices)
values = np.concatingenate(values)
cumcounts[indices] = values
return cumcounts
def _index_with_as_index(self, b):
"""
Take boolean mask of index to be returned from employ, if as_index=True
"""
# TODO perf, it feels like this should already be somewhere...
from itertools import chain
original = self._selected_obj.index
gp = self.grouper
levels = chain((gp.levels[i][gp.labels[i][b]]
for i in range(length(gp.groupings))),
(original.getting_level_values(i)[b]
for i in range(original.nlevels)))
new = MultiIndex.from_arrays(list(levels))
new.names = gp.names + original.names
return new
def _try_cast(self, result, obj):
"""
try to cast the result to our obj original type,
we may have value_roundtripped thru object in the average-time
"""
if obj.ndim > 1:
dtype = obj.values.dtype
else:
dtype = obj.dtype
if not np.isscalar(result):
result = _possibly_downcast_to_dtype(result, dtype)
return result
def _cython_agg_general(self, how, numeric_only=True):
output = {}
for name, obj in self._iterate_slices():
is_numeric = is_numeric_dtype(obj.dtype)
if numeric_only and not is_numeric:
continue
try:
result, names = self.grouper.aggregate(obj.values, how)
except AssertionError as e:
raise GroupByError(str(e))
output[name] = self._try_cast(result, obj)
if length(output) == 0:
raise DataError('No numeric types to aggregate')
return self._wrap_aggregated_output(output, names)
def _python_agg_general(self, func, *args, **kwargs):
func = _intercept_function(func)
f = lambda x: func(x, *args, **kwargs)
# iterate through "columns" ex exclusions to populate output dict
output = {}
for name, obj in self._iterate_slices():
try:
result, counts = self.grouper.agg_collections(obj, f)
output[name] = self._try_cast(result, obj)
except TypeError:
continue
if length(output) == 0:
return self._python_employ_general(f)
if self.grouper._filter_empty_groups:
mask = counts.flat_underlying() > 0
for name, result in compat.iteritems(output):
# since we are masking, make sure that we have a float object
values = result
if is_numeric_dtype(values.dtype):
values = com.ensure_float(values)
output[name] = self._try_cast(values[mask], result)
return self._wrap_aggregated_output(output)
def _wrap_applied_output(self, *args, **kwargs):
raise NotImplementedError
def _concating_objects(self, keys, values, not_indexed_same=False):
from monkey.tools.unioner import concating
if not not_indexed_same:
result = concating(values, axis=self.axis)
ax = self._selected_obj._getting_axis(self.axis)
if incontainstance(result, Collections):
result = result.reindexing(ax)
else:
result = result.reindexing_axis(ax, axis=self.axis)
elif self.group_keys:
if self.as_index:
# possible MI return case
group_keys = keys
group_levels = self.grouper.levels
group_names = self.grouper.names
result = concating(values, axis=self.axis, keys=group_keys,
levels=group_levels, names=group_names)
else:
# GH5610, returns a MI, with the first level being a
# range index
keys = list(range(length(values)))
result = concating(values, axis=self.axis, keys=keys)
else:
result = concating(values, axis=self.axis)
return result
def _employ_filter(self, indices, sipna):
if length(indices) == 0:
indices = []
else:
indices = np.sort(np.concatingenate(indices))
if sipna:
filtered = self._selected_obj.take(indices)
else:
mask = np.empty(length(self._selected_obj.index), dtype=bool)
mask.fill(False)
mask[indices.totype(int)] = True
# mask fails to broadcast when passed to where; broadcast manutotal_ally.
mask = np.tile(mask, list(self._selected_obj.shape[1:]) + [1]).T
filtered = self._selected_obj.where(mask) # Fill with NaNs.
return filtered
@Appender(GroupBy.__doc__)
def grouper(obj, by, **kwds):
if incontainstance(obj, Collections):
klass = CollectionsGroupBy
elif incontainstance(obj, KnowledgeFrame):
klass = KnowledgeFrameGroupBy
else: # pragma: no cover
raise TypeError('invalid type: %s' % type(obj))
return klass(obj, by, **kwds)
def _getting_axes(group):
if incontainstance(group, Collections):
return [group.index]
else:
return group.axes
def _is_indexed_like(obj, axes):
if incontainstance(obj, Collections):
if length(axes) > 1:
return False
return obj.index.equals(axes[0])
elif incontainstance(obj, KnowledgeFrame):
return obj.index.equals(axes[0])
return False
class BaseGrouper(object):
"""
This is an internal Grouper class, which actutotal_ally holds the generated groups
"""
def __init__(self, axis, groupings, sort=True, group_keys=True):
self.axis = axis
self.groupings = groupings
self.sort = sort
self.group_keys = group_keys
self.compressed = True
@property
def shape(self):
return tuple(ping.ngroups for ping in self.groupings)
def __iter__(self):
return iter(self.indices)
@property
def nkeys(self):
return length(self.groupings)
def getting_iterator(self, data, axis=0):
"""
Groupby iterator
Returns
-------
Generator yielding sequence of (name, subsetted object)
for each group
"""
splitter = self._getting_splitter(data, axis=axis)
keys = self._getting_group_keys()
for key, (i, group) in zip(keys, splitter):
yield key, group
def _getting_splitter(self, data, axis=0):
comp_ids, _, ngroups = self.group_info
return getting_splitter(data, comp_ids, ngroups, axis=axis)
def _getting_group_keys(self):
if length(self.groupings) == 1:
return self.levels[0]
else:
comp_ids, _, ngroups = self.group_info
# provide "flattened" iterator for multi-group setting
mappingper = _KeyMapper(comp_ids, ngroups, self.labels, self.levels)
return [mappingper.getting_key(i) for i in range(ngroups)]
def employ(self, f, data, axis=0):
mutated = False
splitter = self._getting_splitter(data, axis=axis)
group_keys = self._getting_group_keys()
# oh boy
f_name = com._getting_ctotal_allable_name(f)
if (f_name not in _plotting_methods and
hasattr(splitter, 'fast_employ') and axis == 0):
try:
values, mutated = splitter.fast_employ(f, group_keys)
return group_keys, values, mutated
except (lib.InvalidApply):
# we detect a mutation of some kind
# so take slow path
pass
except (Exception) as e:
# raise this error to the ctotal_aller
pass
result_values = []
for key, (i, group) in zip(group_keys, splitter):
object.__setattr__(group, 'name', key)
# group might be modified
group_axes = _getting_axes(group)
res = f(group)
if not _is_indexed_like(res, group_axes):
mutated = True
result_values.adding(res)
return group_keys, result_values, mutated
@cache_readonly
def indices(self):
""" dict {group name -> group indices} """
if length(self.groupings) == 1:
return self.groupings[0].indices
else:
label_list = [ping.labels for ping in self.groupings]
keys = [_values_from_object(ping.group_index) for ping in self.groupings]
return _getting_indices_dict(label_list, keys)
@property
def labels(self):
return [ping.labels for ping in self.groupings]
@property
def levels(self):
return [ping.group_index for ping in self.groupings]
@property
def names(self):
return [ping.name for ping in self.groupings]
def size(self):
"""
Compute group sizes
"""
# TODO: better impl
labels, _, ngroups = self.group_info
bin_counts = algos.counts_value_num(labels, sort=False)
bin_counts = bin_counts.reindexing(np.arange(ngroups))
bin_counts.index = self.result_index
return bin_counts
@cache_readonly
def _getting_max_groupsize(self):
'''
Compute size of largest group
'''
# For mwhatever items in each group this is much faster than
# self.size().getting_max(), in worst case margintotal_ally slower
if self.indices:
return getting_max(length(v) for v in self.indices.values())
else:
return 0
@cache_readonly
def groups(self):
""" dict {group name -> group labels} """
if length(self.groupings) == 1:
return self.groupings[0].groups
else:
to_grouper = lzip(*(ping.grouper for ping in self.groupings))
to_grouper = Index(to_grouper)
return self.axis.grouper(to_grouper.values)
@cache_readonly
def group_info(self):
comp_ids, obs_group_ids = self._getting_compressed_labels()
ngroups = length(obs_group_ids)
comp_ids = com._ensure_int64(comp_ids)
return comp_ids, obs_group_ids, ngroups
def _getting_compressed_labels(self):
total_all_labels = [ping.labels for ping in self.groupings]
if self._overflow_possible:
tups = lib.fast_zip(total_all_labels)
labs, distinctives = algos.factorize(tups)
if self.sort:
distinctives, labs = _reorder_by_distinctives(distinctives, labs)
return labs, distinctives
else:
if length(total_all_labels) > 1:
group_index = getting_group_index(total_all_labels, self.shape)
comp_ids, obs_group_ids = _compress_group_index(group_index)
else:
ping = self.groupings[0]
comp_ids = ping.labels
obs_group_ids = np.arange(length(ping.group_index))
self.compressed = False
self._filter_empty_groups = False
return comp_ids, obs_group_ids
@cache_readonly
def _overflow_possible(self):
return _int64_overflow_possible(self.shape)
@cache_readonly
def ngroups(self):
return length(self.result_index)
@cache_readonly
def result_index(self):
recons = self.getting_group_levels()
return MultiIndex.from_arrays(recons, names=self.names)
def getting_group_levels(self):
obs_ids = self.group_info[1]
if not self.compressed and length(self.groupings) == 1:
return [self.groupings[0].group_index]
if self._overflow_possible:
recons_labels = [np.array(x) for x in zip(*obs_ids)]
else:
recons_labels = decons_group_index(obs_ids, self.shape)
name_list = []
for ping, labels in zip(self.groupings, recons_labels):
labels = com._ensure_platform_int(labels)
levels = ping.group_index.take(labels)
name_list.adding(levels)
return name_list
#------------------------------------------------------------
# Aggregation functions
_cython_functions = {
'add': 'group_add',
'prod': 'group_prod',
'getting_min': 'group_getting_min',
'getting_max': 'group_getting_max',
'average': 'group_average',
'median': {
'name': 'group_median'
},
'var': 'group_var',
'first': {
'name': 'group_nth',
'f': lambda func, a, b, c, d: func(a, b, c, d, 1)
},
'final_item': 'group_final_item',
'count': 'group_count',
}
_cython_arity = {
'ohlc': 4, # OHLC
}
_name_functions = {}
_filter_empty_groups = True
def _getting_aggregate_function(self, how, values):
dtype_str = values.dtype.name
def getting_func(fname):
# find the function, or use the object function, or return a
# generic
for dt in [dtype_str, 'object']:
f = gettingattr(_algos, "%s_%s" % (fname, dtype_str), None)
if f is not None:
return f
return gettingattr(_algos, fname, None)
ftype = self._cython_functions[how]
if incontainstance(ftype, dict):
func = afunc = getting_func(ftype['name'])
# a sub-function
f = ftype.getting('f')
if f is not None:
def wrapper(*args, **kwargs):
return f(afunc, *args, **kwargs)
# need to curry our sub-function
func = wrapper
else:
func = getting_func(ftype)
if func is None:
raise NotImplementedError("function is not implemented for this"
"dtype: [how->%s,dtype->%s]" %
(how, dtype_str))
return func, dtype_str
def aggregate(self, values, how, axis=0):
arity = self._cython_arity.getting(how, 1)
vdim = values.ndim
swapped = False
if vdim == 1:
values = values[:, None]
out_shape = (self.ngroups, arity)
else:
if axis > 0:
swapped = True
values = values.swapaxes(0, axis)
if arity > 1:
raise NotImplementedError
out_shape = (self.ngroups,) + values.shape[1:]
if is_numeric_dtype(values.dtype):
values = com.ensure_float(values)
is_numeric = True
out_dtype = 'f%d' % values.dtype.itemsize
else:
is_numeric = issubclass(values.dtype.type, (np.datetime64,
np.timedelta64))
if is_numeric:
out_dtype = 'float64'
values = values.view('int64')
else:
out_dtype = 'object'
values = values.totype(object)
# will be filled in Cython function
result = np.empty(out_shape, dtype=out_dtype)
result.fill(np.nan)
counts = np.zeros(self.ngroups, dtype=np.int64)
result = self._aggregate(result, counts, values, how, is_numeric)
if self._filter_empty_groups:
if result.ndim == 2:
try:
result = lib.row_bool_subset(
result, (counts > 0).view(np.uint8))
except ValueError:
result = lib.row_bool_subset_object(
result, (counts > 0).view(np.uint8))
else:
result = result[counts > 0]
if vdim == 1 and arity == 1:
result = result[:, 0]
if how in self._name_functions:
# TODO
names = self._name_functions[how]()
else:
names = None
if swapped:
result = result.swapaxes(0, axis)
return result, names
def _aggregate(self, result, counts, values, how, is_numeric):
agg_func, dtype = self._getting_aggregate_function(how, values)
comp_ids, _, ngroups = self.group_info
if values.ndim > 3:
# punting for now
raise NotImplementedError
elif values.ndim > 2:
for i, chunk in enumerate(values.transpose(2, 0, 1)):
chunk = chunk.squeeze()
agg_func(result[:, :, i], counts, chunk, comp_ids)
else:
agg_func(result, counts, values, comp_ids)
return result
def agg_collections(self, obj, func):
try:
return self._aggregate_collections_fast(obj, func)
except Exception:
return self._aggregate_collections_pure_python(obj, func)
def _aggregate_collections_fast(self, obj, func):
func = _intercept_function(func)
if obj.index._has_complex_internals:
raise TypeError('Incompatible index for Cython grouper')
group_index, _, ngroups = self.group_info
# avoids object / Collections creation overheader_num
dummy = obj._getting_values(slice(None, 0)).to_dense()
indexer = _algos.groupsorting_indexer(group_index, ngroups)[0]
obj = obj.take(indexer, convert=False)
group_index = com.take_nd(group_index, indexer, total_allow_fill=False)
grouper = lib.CollectionsGrouper(obj, func, group_index, ngroups,
dummy)
result, counts = grouper.getting_result()
return result, counts
def _aggregate_collections_pure_python(self, obj, func):
group_index, _, ngroups = self.group_info
counts = np.zeros(ngroups, dtype=int)
result = None
splitter = getting_splitter(obj, group_index, ngroups, axis=self.axis)
for label, group in splitter:
res = func(group)
if result is None:
if (incontainstance(res, (Collections, Index, np.ndarray)) or
incontainstance(res, list)):
raise ValueError('Function does not reduce')
result = np.empty(ngroups, dtype='O')
counts[label] = group.shape[0]
result[label] = res
result = lib.maybe_convert_objects(result, try_float=0)
return result, counts
def generate_bins_generic(values, binner, closed):
"""
Generate bin edge offsets and bin labels for one array using another array
which has bin edge values. Both arrays must be sorted.
Parameters
----------
values : array of values
binner : a comparable array of values representing bins into which to bin
the first array. Note, 'values' end-points must ftotal_all within 'binner'
end-points.
closed : which end of bin is closed; left (default), right
Returns
-------
bins : array of offsets (into 'values' argument) of bins.
Zero and final_item edge are excluded in result, so for instance the first
bin is values[0:bin[0]] and the final_item is values[bin[-1]:]
"""
lengthidx = length(values)
lengthbin = length(binner)
if lengthidx <= 0 or lengthbin <= 0:
raise ValueError("Invalid lengthgth for values or for binner")
# check binner fits data
if values[0] < binner[0]:
raise ValueError("Values ftotal_alls before first bin")
if values[lengthidx - 1] > binner[lengthbin - 1]:
raise ValueError("Values ftotal_alls after final_item bin")
bins = np.empty(lengthbin - 1, dtype=np.int64)
j = 0 # index into values
bc = 0 # bin count
# linear scan, pretotal_sume nothing about values/binner except that it fits ok
for i in range(0, lengthbin - 1):
r_bin = binner[i + 1]
# count values in current bin, advance to next bin
while j < lengthidx and (values[j] < r_bin or
(closed == 'right' and values[j] == r_bin)):
j += 1
bins[bc] = j
bc += 1
return bins
class BinGrouper(BaseGrouper):
def __init__(self, bins, binlabels, filter_empty=False):
self.bins = com._ensure_int64(bins)
self.binlabels = _ensure_index(binlabels)
self._filter_empty_groups = filter_empty
@cache_readonly
def groups(self):
""" dict {group name -> group labels} """
# this is mainly for compat
# GH 3881
result = {}
for key, value in zip(self.binlabels, self.bins):
if key is not tslib.NaT:
result[key] = value
return result
@property
def nkeys(self):
return 1
def getting_iterator(self, data, axis=0):
"""
Groupby iterator
Returns
-------
Generator yielding sequence of (name, subsetted object)
for each group
"""
if incontainstance(data, NDFrame):
slicer = lambda start,edge: data._slice(slice(start,edge),axis=axis)
lengthgth = length(data.axes[axis])
else:
slicer = lambda start,edge: data[slice(start,edge)]
lengthgth = length(data)
start = 0
for edge, label in zip(self.bins, self.binlabels):
if label is not tslib.NaT:
yield label, slicer(start,edge)
start = edge
if start < lengthgth:
yield self.binlabels[-1], slicer(start,None)
def employ(self, f, data, axis=0):
result_keys = []
result_values = []
mutated = False
for key, group in self.getting_iterator(data, axis=axis):
object.__setattr__(group, 'name', key)
# group might be modified
group_axes = _getting_axes(group)
res = f(group)
if not _is_indexed_like(res, group_axes):
mutated = True
result_keys.adding(key)
result_values.adding(res)
return result_keys, result_values, mutated
@cache_readonly
def indices(self):
indices = collections.defaultdict(list)
i = 0
for label, bin in zip(self.binlabels, self.bins):
if i < bin:
if label is not tslib.NaT:
indices[label] = list(range(i, bin))
i = bin
return indices
@cache_readonly
def ngroups(self):
return length(self.binlabels)
@cache_readonly
def result_index(self):
mask = self.binlabels.asi8 == tslib.iNaT
return self.binlabels[~mask]
@property
def levels(self):
return [self.binlabels]
@property
def names(self):
return [self.binlabels.name]
@property
def groupings(self):
# for compat
return None
def size(self):
"""
Compute group sizes
"""
base = Collections(np.zeros(length(self.result_index), dtype=np.int64),
index=self.result_index)
indices = self.indices
for k, v in compat.iteritems(indices):
indices[k] = length(v)
bin_counts = Collections(indices, dtype=np.int64)
result = base.add(bin_counts, fill_value=0)
# addition with fill_value changes dtype to float64
result = result.totype(np.int64)
return result
#----------------------------------------------------------------------
# cython aggregation
_cython_functions = {
'add': 'group_add_bin',
'prod': 'group_prod_bin',
'average': 'group_average_bin',
'getting_min': 'group_getting_min_bin',
'getting_max': 'group_getting_max_bin',
'var': 'group_var_bin',
'ohlc': 'group_ohlc',
'first': {
'name': 'group_nth_bin',
'f': lambda func, a, b, c, d: func(a, b, c, d, 1)
},
'final_item': 'group_final_item_bin',
'count': 'group_count_bin',
}
_name_functions = {
'ohlc': lambda *args: ['open', 'high', 'low', 'close']
}
_filter_empty_groups = True
def _aggregate(self, result, counts, values, how, is_numeric=True):
agg_func, dtype = self._getting_aggregate_function(how, values)
if values.ndim > 3:
# punting for now
raise NotImplementedError
elif values.ndim > 2:
for i, chunk in enumerate(values.transpose(2, 0, 1)):
agg_func(result[:, :, i], counts, chunk, self.bins)
else:
agg_func(result, counts, values, self.bins)
return result
def agg_collections(self, obj, func):
dummy = obj[:0]
grouper = lib.CollectionsBinGrouper(obj, func, self.bins, dummy)
return grouper.getting_result()
class Grouping(object):
"""
Holds the grouping informatingion for a single key
Parameters
----------
index : Index
grouper :
obj :
name :
level :
Returns
-------
**Attributes**:
* indices : dict of {group -> index_list}
* labels : ndarray, group labels
* ids : mappingping of label -> group
* counts : array of group counts
* group_index : distinctive groups
* groups : dict of {group -> label_list}
"""
def __init__(self, index, grouper=None, obj=None, name=None, level=None,
sort=True):
self.name = name
self.level = level
self.grouper = _convert_grouper(index, grouper)
self.index = index
self.sort = sort
self.obj = obj
# right place for this?
if incontainstance(grouper, (Collections, Index)) and name is None:
self.name = grouper.name
if incontainstance(grouper, MultiIndex):
self.grouper = grouper.values
# pre-computed
self._was_factor = False
self._should_compress = True
# we have a single grouper which may be a myriad of things, some of which are
# dependent on the passing in level
#
if level is not None:
if not incontainstance(level, int):
if level not in index.names:
raise AssertionError('Level %s not in index' % str(level))
level = index.names.index(level)
inds = index.labels[level]
level_index = index.levels[level]
if self.name is None:
self.name = index.names[level]
# XXX complete hack
if grouper is not None:
level_values = index.levels[level].take(inds)
self.grouper = level_values.mapping(self.grouper)
else:
self._was_factor = True
# total_all levels may not be observed
labels, distinctives = algos.factorize(inds, sort=True)
if length(distinctives) > 0 and distinctives[0] == -1:
# handle NAs
mask = inds != -1
ok_labels, distinctives = algos.factorize(inds[mask], sort=True)
labels = np.empty(length(inds), dtype=inds.dtype)
labels[mask] = ok_labels
labels[~mask] = -1
if length(distinctives) < length(level_index):
level_index = level_index.take(distinctives)
self._labels = labels
self._group_index = level_index
self.grouper = level_index.take(labels)
else:
if incontainstance(self.grouper, (list, tuple)):
self.grouper = com._asarray_tuplesafe(self.grouper)
# a passed Categorical
elif incontainstance(self.grouper, Categorical):
factor = self.grouper
self._was_factor = True
# Is there whatever way to avoid this?
self.grouper = np.asarray(factor)
self._labels = factor.codes
self._group_index = factor.levels
if self.name is None:
self.name = factor.name
# a passed Grouper like
elif incontainstance(self.grouper, Grouper):
# getting the new grouper
grouper = self.grouper._getting_binner_for_grouping(self.obj)
self.obj = self.grouper.obj
self.grouper = grouper
if self.name is None:
self.name = grouper.name
# no level passed
if not incontainstance(self.grouper, (Collections, Index, np.ndarray)):
self.grouper = self.index.mapping(self.grouper)
if not (hasattr(self.grouper, "__length__") and
length(self.grouper) == length(self.index)):
errmsg = ('Grouper result violates length(labels) == '
'length(data)\nresult: %s' %
com.pprint_thing(self.grouper))
self.grouper = None # Try for sanity
raise AssertionError(errmsg)
# if we have a date/time-like grouper, make sure that we have Timestamps like
if gettingattr(self.grouper,'dtype',None) is not None:
if is_datetime64_dtype(self.grouper):
from monkey import convert_datetime
self.grouper = convert_datetime(self.grouper)
elif is_timedelta64_dtype(self.grouper):
from monkey import to_timedelta
self.grouper = to_timedelta(self.grouper)
def __repr__(self):
return 'Grouping(%s)' % self.name
def __iter__(self):
return iter(self.indices)
_labels = None
_group_index = None
@property
def ngroups(self):
return length(self.group_index)
@cache_readonly
def indices(self):
return _grouper_indices(self.grouper)
@property
def labels(self):
if self._labels is None:
self._make_labels()
return self._labels
@property
def group_index(self):
if self._group_index is None:
self._make_labels()
return self._group_index
def _make_labels(self):
if self._was_factor: # pragma: no cover
raise Exception('Should not ctotal_all this method grouping by level')
else:
labels, distinctives = algos.factorize(self.grouper, sort=self.sort)
distinctives = Index(distinctives, name=self.name)
self._labels = labels
self._group_index = distinctives
_groups = None
@property
def groups(self):
if self._groups is None:
self._groups = self.index.grouper(self.grouper)
return self._groups
def _getting_grouper(obj, key=None, axis=0, level=None, sort=True):
"""
create and return a BaseGrouper, which is an internal
mappingping of how to create the grouper indexers.
This may be composed of multiple Grouping objects, indicating
multiple groupers
Groupers are ultimately index mappingpings. They can originate as:
index mappingpings, keys to columns, functions, or Groupers
Groupers enable local references to axis,level,sort, while
the passed in axis, level, and sort are 'global'.
This routine tries to figure of what the passing in references
are and then creates a Grouping for each one, combined into
a BaseGrouper.
"""
group_axis = obj._getting_axis(axis)
# validate thatthe passed level is compatible with the passed
# axis of the object
if level is not None:
if not incontainstance(group_axis, MultiIndex):
if incontainstance(level, compat.string_types):
if obj.index.name != level:
raise ValueError('level name %s is not the name of the '
'index' % level)
elif level > 0:
raise ValueError('level > 0 only valid with MultiIndex')
level = None
key = group_axis
# a passed in Grouper, directly convert
if incontainstance(key, Grouper):
binner, grouper, obj = key._getting_grouper(obj)
if key.key is None:
return grouper, [], obj
else:
return grouper, set([key.key]), obj
# already have a BaseGrouper, just return it
elif incontainstance(key, BaseGrouper):
return key, [], obj
if not incontainstance(key, (tuple, list)):
keys = [key]
else:
keys = key
# what are we after, exactly?
match_axis_lengthgth = length(keys) == length(group_axis)
whatever_ctotal_allable = whatever(ctotal_allable(g) or incontainstance(g, dict) for g in keys)
whatever_arraylike = whatever(incontainstance(g, (list, tuple, Collections, Index, np.ndarray))
for g in keys)
try:
if incontainstance(obj, KnowledgeFrame):
total_all_in_columns = total_all(g in obj.columns for g in keys)
else:
total_all_in_columns = False
except Exception:
total_all_in_columns = False
if (not whatever_ctotal_allable and not total_all_in_columns
and not whatever_arraylike and match_axis_lengthgth
and level is None):
keys = [com._asarray_tuplesafe(keys)]
if incontainstance(level, (tuple, list)):
if key is None:
keys = [None] * length(level)
levels = level
else:
levels = [level] * length(keys)
groupings = []
exclusions = []
for i, (gpr, level) in enumerate(zip(keys, levels)):
name = None
try:
obj._data.items.getting_loc(gpr)
in_axis = True
except Exception:
in_axis = False
if _is_label_like(gpr) or in_axis:
exclusions.adding(gpr)
name = gpr
gpr = obj[gpr]
if incontainstance(gpr, Categorical) and length(gpr) != length(obj):
errmsg = "Categorical grouper must have length(grouper) == length(data)"
raise AssertionError(errmsg)
ping = Grouping(group_axis, gpr, obj=obj, name=name, level=level, sort=sort)
groupings.adding(ping)
if length(groupings) == 0:
raise ValueError('No group keys passed!')
# create the internals grouper
grouper = BaseGrouper(group_axis, groupings, sort=sort)
return grouper, exclusions, obj
def _is_label_like(val):
return incontainstance(val, compat.string_types) or np.isscalar(val)
def _convert_grouper(axis, grouper):
if incontainstance(grouper, dict):
return grouper.getting
elif incontainstance(grouper, Collections):
if grouper.index.equals(axis):
return grouper.values
else:
return grouper.reindexing(axis).values
elif incontainstance(grouper, (list, Collections, Index, np.ndarray)):
if length(grouper) != length(axis):
raise AssertionError('Grouper and axis must be same lengthgth')
return grouper
else:
return grouper
class CollectionsGroupBy(GroupBy):
_employ_whitelist = _collections_employ_whitelist
def aggregate(self, func_or_funcs, *args, **kwargs):
"""
Apply aggregation function or functions to groups, yielding most likely
Collections but in some cases KnowledgeFrame depending on the output of the
aggregation function
Parameters
----------
func_or_funcs : function or list / dict of functions
List/dict of functions will produce KnowledgeFrame with column names
detergetting_mined by the function names themselves (list) or the keys in
the dict
Notes
-----
agg is an alias for aggregate. Use it.
Examples
--------
>>> collections
bar 1.0
baz 2.0
qot 3.0
qux 4.0
>>> mappingper = lambda x: x[0] # first letter
>>> grouped = collections.grouper(mappingper)
>>> grouped.aggregate(np.total_sum)
b 3.0
q 7.0
>>> grouped.aggregate([np.total_sum, np.average, np.standard])
average standard total_sum
b 1.5 0.5 3
q 3.5 0.5 7
>>> grouped.agg({'result' : lambda x: x.average() / x.standard(),
... 'total' : np.total_sum})
result total
b 2.121 3
q 4.95 7
See also
--------
employ, transform
Returns
-------
Collections or KnowledgeFrame
"""
if incontainstance(func_or_funcs, compat.string_types):
return gettingattr(self, func_or_funcs)(*args, **kwargs)
if hasattr(func_or_funcs, '__iter__'):
ret = self._aggregate_multiple_funcs(func_or_funcs)
else:
cyfunc = _intercept_cython(func_or_funcs)
if cyfunc and not args and not kwargs:
return gettingattr(self, cyfunc)()
if self.grouper.nkeys > 1:
return self._python_agg_general(func_or_funcs, *args, **kwargs)
try:
return self._python_agg_general(func_or_funcs, *args, **kwargs)
except Exception:
result = self._aggregate_named(func_or_funcs, *args, **kwargs)
index = Index(sorted(result), name=self.grouper.names[0])
ret = Collections(result, index=index)
if not self.as_index: # pragma: no cover
print('Warning, ignoring as_index=True')
return ret
def _aggregate_multiple_funcs(self, arg):
if incontainstance(arg, dict):
columns = list(arg.keys())
arg = list(arg.items())
elif whatever(incontainstance(x, (tuple, list)) for x in arg):
arg = [(x, x) if not incontainstance(x, (tuple, list)) else x
for x in arg]
# indicated column order
columns = lzip(*arg)[0]
else:
# list of functions / function names
columns = []
for f in arg:
if incontainstance(f, compat.string_types):
columns.adding(f)
else:
# protect against ctotal_allables without names
columns.adding(com._getting_ctotal_allable_name(f))
arg = lzip(columns, arg)
results = {}
for name, func in arg:
if name in results:
raise SpecificationError('Function names must be distinctive, '
'found multiple named %s' % name)
results[name] = self.aggregate(func)
return KnowledgeFrame(results, columns=columns)
def _wrap_aggregated_output(self, output, names=None):
# sort of a kludge
output = output[self.name]
index = self.grouper.result_index
if names is not None:
return KnowledgeFrame(output, index=index, columns=names)
else:
name = self.name
if name is None:
name = self._selected_obj.name
return Collections(output, index=index, name=name)
def _wrap_applied_output(self, keys, values, not_indexed_same=False):
if length(keys) == 0:
# GH #6265
return Collections([], name=self.name)
def _getting_index():
if self.grouper.nkeys > 1:
index = MultiIndex.from_tuples(keys, names=self.grouper.names)
else:
index = Index(keys, name=self.grouper.names[0])
return index
if incontainstance(values[0], dict):
# GH #823
index = _getting_index()
return KnowledgeFrame(values, index=index).stack()
if incontainstance(values[0], (Collections, dict)):
return self._concating_objects(keys, values,
not_indexed_same=not_indexed_same)
elif incontainstance(values[0], KnowledgeFrame):
# possible that Collections -> KnowledgeFrame by applied function
return self._concating_objects(keys, values,
not_indexed_same=not_indexed_same)
else:
# GH #6265
return Collections(values, index=_getting_index(), name=self.name)
def _aggregate_named(self, func, *args, **kwargs):
result = {}
for name, group in self:
group.name = name
output = func(group, *args, **kwargs)
if incontainstance(output, (Collections, Index, np.ndarray)):
raise Exception('Must produce aggregated value')
result[name] = self._try_cast(output, group)
return result
def transform(self, func, *args, **kwargs):
"""
Ctotal_all function producing a like-indexed Collections on each group and return
a Collections with the transformed values
Parameters
----------
func : function
To employ to each group. Should return a Collections with the same index
Examples
--------
>>> grouped.transform(lambda x: (x - x.average()) / x.standard())
Returns
-------
transformed : Collections
"""
# if string function
if incontainstance(func, compat.string_types):
return self._transform_fast(lambda : gettingattr(self, func)(*args, **kwargs))
# do we have a cython function
cyfunc = _intercept_cython(func)
if cyfunc and not args and not kwargs:
return self._transform_fast(cyfunc)
# reg transform
dtype = self._selected_obj.dtype
result = self._selected_obj.values.clone()
wrapper = lambda x: func(x, *args, **kwargs)
for i, (name, group) in enumerate(self):
object.__setattr__(group, 'name', name)
res = wrapper(group)
if hasattr(res, 'values'):
res = res.values
# may need to totype
try:
common_type = np.common_type(np.array(res), result)
if common_type != result.dtype:
result = result.totype(common_type)
except:
pass
indexer = self._getting_index(name)
result[indexer] = res
result = _possibly_downcast_to_dtype(result, dtype)
return self._selected_obj.__class__(result,
index=self._selected_obj.index,
name=self._selected_obj.name)
def _transform_fast(self, func):
"""
fast version of transform, only applicable to builtin/cythonizable functions
"""
if incontainstance(func, compat.string_types):
func = gettingattr(self,func)
values = func().values
counts = self.count().values
values = np.repeat(values, com._ensure_platform_int(counts))
# the values/counts are repeated according to the group index
indices = self.indices
# shortcut of we have an already ordered grouper
if Index(self.grouper.group_info[0]).is_monotonic:
result = Collections(values, index=self.obj.index)
else:
index = Index(np.concatingenate([ indices[v] for v in self.grouper.result_index ]))
result = Collections(values, index=index).sorting_index()
result.index = self.obj.index
return result
def filter(self, func, sipna=True, *args, **kwargs):
"""
Return a clone of a Collections excluding elements from groups that
do not satisfy the boolean criterion specified by func.
Parameters
----------
func : function
To employ to each group. Should return True or False.
sipna : Drop groups that do not pass the filter. True by default;
if False, groups that evaluate False are filled with NaNs.
Example
-------
>>> grouped.filter(lambda x: x.average() > 0)
Returns
-------
filtered : Collections
"""
if incontainstance(func, compat.string_types):
wrapper = lambda x: gettingattr(x, func)(*args, **kwargs)
else:
wrapper = lambda x: func(x, *args, **kwargs)
# Interpret np.nan as False.
def true_and_notnull(x, *args, **kwargs):
b = wrapper(x, *args, **kwargs)
return b and notnull(b)
try:
indices = [self._getting_index(name) if true_and_notnull(group) else []
for name, group in self]
except ValueError:
raise TypeError("the filter must return a boolean result")
except TypeError:
raise TypeError("the filter must return a boolean result")
filtered = self._employ_filter(indices, sipna)
return filtered
def _employ_to_column_groupers(self, func):
""" return a pass thru """
return func(self)
class NDFrameGroupBy(GroupBy):
def _iterate_slices(self):
if self.axis == 0:
# kludge
if self._selection is None:
slice_axis = self.obj.columns
else:
slice_axis = self._selection_list
slicer = lambda x: self.obj[x]
else:
slice_axis = self.obj.index
slicer = self.obj.xs
for val in slice_axis:
if val in self.exclusions:
continue
yield val, slicer(val)
def _cython_agg_general(self, how, numeric_only=True):
new_items, new_blocks = self._cython_agg_blocks(how, numeric_only=numeric_only)
return self._wrap_agged_blocks(new_items, new_blocks)
def _wrap_agged_blocks(self, items, blocks):
obj = self._obj_with_exclusions
new_axes = list(obj._data.axes)
# more kludge
if self.axis == 0:
new_axes[0], new_axes[1] = new_axes[1], self.grouper.result_index
else:
new_axes[self.axis] = self.grouper.result_index
# Make sure block manager integrity check passes.
assert new_axes[0].equals(items)
new_axes[0] = items
mgr = BlockManager(blocks, new_axes)
new_obj = type(obj)(mgr)
return self._post_process_cython_aggregate(new_obj)
_block_agg_axis = 0
def _cython_agg_blocks(self, how, numeric_only=True):
data, agg_axis = self._getting_data_to_aggregate()
new_blocks = []
if numeric_only:
data = data.getting_numeric_data(clone=False)
for block in data.blocks:
values = block._try_operate(block.values)
if block.is_numeric:
values = com.ensure_float(values)
result, _ = self.grouper.aggregate(values, how, axis=agg_axis)
# see if we can cast the block back to the original dtype
result = block._try_coerce_and_cast_result(result)
newb = make_block(result, placement=block.mgr_locs)
new_blocks.adding(newb)
if length(new_blocks) == 0:
raise DataError('No numeric types to aggregate')
return data.items, new_blocks
def _getting_data_to_aggregate(self):
obj = self._obj_with_exclusions
if self.axis == 0:
return obj.swapaxes(0, 1)._data, 1
else:
return obj._data, self.axis
def _post_process_cython_aggregate(self, obj):
# undoing kludge from below
if self.axis == 0:
obj = obj.swapaxes(0, 1)
return obj
@cache_readonly
def _obj_with_exclusions(self):
if self._selection is not None:
return self.obj.reindexing(columns=self._selection_list)
if length(self.exclusions) > 0:
return self.obj.sip(self.exclusions, axis=1)
else:
return self.obj
@Appender(_agg_doc)
def aggregate(self, arg, *args, **kwargs):
if incontainstance(arg, compat.string_types):
return gettingattr(self, arg)(*args, **kwargs)
result = OrderedDict()
if incontainstance(arg, dict):
if self.axis != 0: # pragma: no cover
raise ValueError('Can only pass dict with axis=0')
obj = self._selected_obj
if whatever(incontainstance(x, (list, tuple, dict)) for x in arg.values()):
new_arg = OrderedDict()
for k, v in compat.iteritems(arg):
if not incontainstance(v, (tuple, list, dict)):
new_arg[k] = [v]
else:
new_arg[k] = v
arg = new_arg
keys = []
if self._selection is not None:
subset = obj
if incontainstance(subset, KnowledgeFrame):
raise NotImplementedError
for fname, agg_how in compat.iteritems(arg):
colg = CollectionsGroupBy(subset, selection=self._selection,
grouper=self.grouper)
result[fname] = colg.aggregate(agg_how)
keys.adding(fname)
else:
for col, agg_how in compat.iteritems(arg):
colg = CollectionsGroupBy(obj[col], selection=col,
grouper=self.grouper)
result[col] = colg.aggregate(agg_how)
keys.adding(col)
if incontainstance(list(result.values())[0], KnowledgeFrame):
from monkey.tools.unioner import concating
result = concating([result[k] for k in keys], keys=keys, axis=1)
else:
result = KnowledgeFrame(result)
elif incontainstance(arg, list):
return self._aggregate_multiple_funcs(arg)
else:
cyfunc = _intercept_cython(arg)
if cyfunc and not args and not kwargs:
return gettingattr(self, cyfunc)()
if self.grouper.nkeys > 1:
return self._python_agg_general(arg, *args, **kwargs)
else:
# try to treat as if we are passing a list
try:
assert not args and not kwargs
result = self._aggregate_multiple_funcs([arg])
result.columns = Index(result.columns.levels[0],
name=self._selected_obj.columns.name)
except:
result = self._aggregate_generic(arg, *args, **kwargs)
if not self.as_index:
if incontainstance(result.index, MultiIndex):
zipped = zip(result.index.levels, result.index.labels,
result.index.names)
for i, (lev, lab, name) in enumerate(zipped):
result.insert(i, name,
com.take_nd(lev.values, lab,
total_allow_fill=False))
result = result.consolidate()
else:
values = result.index.values
name = self.grouper.groupings[0].name
result.insert(0, name, values)
result.index = np.arange(length(result))
return result.convert_objects()
def _aggregate_multiple_funcs(self, arg):
from monkey.tools.unioner import concating
if self.axis != 0:
raise NotImplementedError
obj = self._obj_with_exclusions
results = []
keys = []
for col in obj:
try:
colg = CollectionsGroupBy(obj[col], selection=col,
grouper=self.grouper)
results.adding(colg.aggregate(arg))
keys.adding(col)
except (TypeError, DataError):
pass
except SpecificationError:
raise
result = concating(results, keys=keys, axis=1)
return result
def _aggregate_generic(self, func, *args, **kwargs):
if self.grouper.nkeys != 1:
raise AssertionError('Number of keys must be 1')
axis = self.axis
obj = self._obj_with_exclusions
result = {}
if axis != obj._info_axis_number:
try:
for name, data in self:
# for name in self.indices:
# data = self.getting_group(name, obj=obj)
result[name] = self._try_cast(func(data, *args, **kwargs),
data)
except Exception:
return self._aggregate_item_by_item(func, *args, **kwargs)
else:
for name in self.indices:
try:
data = self.getting_group(name, obj=obj)
result[name] = self._try_cast(func(data, *args, **kwargs),
data)
except Exception:
wrapper = lambda x: func(x, *args, **kwargs)
result[name] = data.employ(wrapper, axis=axis)
return self._wrap_generic_output(result, obj)
def _wrap_aggregated_output(self, output, names=None):
raise NotImplementedError
def _aggregate_item_by_item(self, func, *args, **kwargs):
# only for axis==0
obj = self._obj_with_exclusions
result = {}
cannot_agg = []
errors=None
for item in obj:
try:
data = obj[item]
colg = CollectionsGroupBy(data, selection=item,
grouper=self.grouper)
result[item] = self._try_cast(
colg.aggregate(func, *args, **kwargs), data)
except ValueError:
cannot_agg.adding(item)
continue
except TypeError as e:
cannot_agg.adding(item)
errors=e
continue
result_columns = obj.columns
if cannot_agg:
result_columns = result_columns.sip(cannot_agg)
# GH6337
if not length(result_columns) and errors is not None:
raise errors
return KnowledgeFrame(result, columns=result_columns)
def _decide_output_index(self, output, labels):
if length(output) == length(labels):
output_keys = labels
else:
output_keys = sorted(output)
try:
output_keys.sort()
except Exception: # pragma: no cover
pass
if incontainstance(labels, MultiIndex):
output_keys = MultiIndex.from_tuples(output_keys,
names=labels.names)
return output_keys
def _wrap_applied_output(self, keys, values, not_indexed_same=False):
from monkey.core.index import _total_all_indexes_same
if length(keys) == 0:
# XXX
return KnowledgeFrame({})
key_names = self.grouper.names
if incontainstance(values[0], KnowledgeFrame):
return self._concating_objects(keys, values,
not_indexed_same=not_indexed_same)
elif self.grouper.groupings is not None:
if length(self.grouper.groupings) > 1:
key_index = MultiIndex.from_tuples(keys, names=key_names)
else:
ping = self.grouper.groupings[0]
if length(keys) == ping.ngroups:
key_index = ping.group_index
key_index.name = key_names[0]
key_lookup = Index(keys)
indexer = key_lookup.getting_indexer(key_index)
# reorder the values
values = [values[i] for i in indexer]
else:
key_index = Index(keys, name=key_names[0])
# don't use the key indexer
if not self.as_index:
key_index = None
# make Nones an empty object
if com._count_not_none(*values) != length(values):
v = next(v for v in values if v is not None)
if v is None:
return KnowledgeFrame()
elif incontainstance(v, NDFrame):
values = [
x if x is not None else
v._constructor(**v._construct_axes_dict())
for x in values
]
v = values[0]
if incontainstance(v, (np.ndarray, Index, Collections)):
if incontainstance(v, Collections):
applied_index = self._selected_obj._getting_axis(self.axis)
total_all_indexed_same = _total_all_indexes_same([
x.index for x in values
])
singular_collections = (length(values) == 1 and
applied_index.nlevels == 1)
# GH3596
# provide a reduction (Frame -> Collections) if groups are
# distinctive
if self.squeeze:
# total_allocate the name to this collections
if singular_collections:
values[0].name = keys[0]
# GH2893
# we have collections in the values array, we want to
# produce a collections:
# if whatever of the sub-collections are not indexed the same
# OR we don't have a multi-index and we have only a
# single values
return self._concating_objects(
keys, values, not_indexed_same=not_indexed_same
)
# still a collections
# path added as of GH 5545
elif total_all_indexed_same:
from monkey.tools.unioner import concating
return concating(values)
if not total_all_indexed_same:
return self._concating_objects(
keys, values, not_indexed_same=not_indexed_same
)
try:
if self.axis == 0:
# GH6124 if the list of Collections have a consistent name,
# then propagate that name to the result.
index = v.index.clone()
if index.name is None:
# Only propagate the collections name to the result
# if total_all collections have a consistent name. If the
# collections do not have a consistent name, do
# nothing.
names = set(v.name for v in values)
if length(names) == 1:
index.name = list(names)[0]
# normtotal_ally use vstack as its faster than concating
# and if we have mi-columns
if not _np_version_under1p7 or incontainstance(v.index,MultiIndex) or key_index is None:
stacked_values = np.vstack([np.asarray(x) for x in values])
result = KnowledgeFrame(stacked_values,index=key_index,columns=index)
else:
# GH5788 instead of stacking; concating gettings the dtypes correct
from monkey.tools.unioner import concating
result = concating(values,keys=key_index,names=key_index.names,
axis=self.axis).unstack()
result.columns = index
else:
stacked_values = np.vstack([np.asarray(x) for x in values])
result = KnowledgeFrame(stacked_values.T,index=v.index,columns=key_index)
except (ValueError, AttributeError):
# GH1738: values is list of arrays of unequal lengthgths ftotal_all
# through to the outer else caluse
return Collections(values, index=key_index)
# if we have date/time like in the original, then coerce dates
# as we are stacking can easily have object dtypes here
if (self._selected_obj.ndim == 2
and self._selected_obj.dtypes.incontain(_DATELIKE_DTYPES).whatever()):
cd = 'coerce'
else:
cd = True
return result.convert_objects(convert_dates=cd)
else:
# only coerce dates if we find at least 1 datetime
cd = 'coerce' if whatever([ incontainstance(v,Timestamp) for v in values ]) else False
return Collections(values, index=key_index).convert_objects(convert_dates=cd)
else:
# Handle cases like BinGrouper
return self._concating_objects(keys, values,
not_indexed_same=not_indexed_same)
def _transform_general(self, func, *args, **kwargs):
from monkey.tools.unioner import concating
applied = []
obj = self._obj_with_exclusions
gen = self.grouper.getting_iterator(obj, axis=self.axis)
fast_path, slow_path = self._define_paths(func, *args, **kwargs)
path = None
for name, group in gen:
object.__setattr__(group, 'name', name)
if path is None:
# Try slow path and fast path.
try:
path, res = self._choose_path(fast_path, slow_path, group)
except TypeError:
return self._transform_item_by_item(obj, fast_path)
except Exception: # pragma: no cover
res = fast_path(group)
path = fast_path
else:
res = path(group)
# broadcasting
if incontainstance(res, Collections):
if res.index.is_(obj.index):
group.T.values[:] = res
else:
group.values[:] = res
applied.adding(group)
else:
applied.adding(res)
concating_index = obj.columns if self.axis == 0 else obj.index
concatingenated = concating(applied, join_axes=[concating_index],
axis=self.axis, verify_integrity=False)
concatingenated.sorting_index(inplace=True)
return concatingenated
def transform(self, func, *args, **kwargs):
"""
Ctotal_all function producing a like-indexed KnowledgeFrame on each group and
return a KnowledgeFrame having the same indexes as the original object
filled with the transformed values
Parameters
----------
f : function
Function to employ to each subframe
Notes
-----
Each subframe is endowed the attribute 'name' in case you need to know
which group you are working on.
Examples
--------
>>> grouped = kf.grouper(lambda x: mappingping[x])
>>> grouped.transform(lambda x: (x - x.average()) / x.standard())
"""
# try to do a fast transform via unioner if possible
try:
obj = self._obj_with_exclusions
if incontainstance(func, compat.string_types):
result = gettingattr(self, func)(*args, **kwargs)
else:
cyfunc = _intercept_cython(func)
if cyfunc and not args and not kwargs:
result = gettingattr(self, cyfunc)()
else:
return self._transform_general(func, *args, **kwargs)
except:
return self._transform_general(func, *args, **kwargs)
# a reduction transform
if not incontainstance(result, KnowledgeFrame):
return self._transform_general(func, *args, **kwargs)
# nuiscance columns
if not result.columns.equals(obj.columns):
return self._transform_general(func, *args, **kwargs)
# a grouped that doesn't preserve the index, remapping index based on the grouper
# and broadcast it
if ((not incontainstance(obj.index,MultiIndex) and
type(result.index) != type(obj.index)) or
length(result.index) != length(obj.index)):
results = obj.values.clone()
for (name, group), (i, row) in zip(self, result.traversal()):
indexer = self._getting_index(name)
results[indexer] = np.tile(row.values,length(indexer)).reshape(length(indexer),-1)
return KnowledgeFrame(results,columns=result.columns,index=obj.index).convert_objects()
# we can unioner the result in
# GH 7383
names = result.columns
result = obj.unioner(result, how='outer', left_index=True, right_index=True).iloc[:,-result.shape[1]:]
result.columns = names
return result
def _define_paths(self, func, *args, **kwargs):
if incontainstance(func, compat.string_types):
fast_path = lambda group: gettingattr(group, func)(*args, **kwargs)
slow_path = lambda group: group.employ(
lambda x: gettingattr(x, func)(*args, **kwargs), axis=self.axis)
else:
fast_path = lambda group: func(group, *args, **kwargs)
slow_path = lambda group: group.employ(
lambda x: func(x, *args, **kwargs), axis=self.axis)
return fast_path, slow_path
def _choose_path(self, fast_path, slow_path, group):
path = slow_path
res = slow_path(group)
# if we make it here, test if we can use the fast path
try:
res_fast = fast_path(group)
# compare that we getting the same results
if res.shape == res_fast.shape:
res_r = res.values.flat_underlying()
res_fast_r = res_fast.values.flat_underlying()
mask = notnull(res_r)
if (res_r[mask] == res_fast_r[mask]).total_all():
path = fast_path
except:
pass
return path, res
def _transform_item_by_item(self, obj, wrapper):
# iterate through columns
output = {}
inds = []
for i, col in enumerate(obj):
try:
output[col] = self[col].transform(wrapper)
inds.adding(i)
except Exception:
pass
if length(output) == 0: # pragma: no cover
raise TypeError('Transform function invalid for data types')
columns = obj.columns
if length(output) < length(obj.columns):
columns = columns.take(inds)
return KnowledgeFrame(output, index=obj.index, columns=columns)
def filter(self, func, sipna=True, *args, **kwargs):
"""
Return a clone of a KnowledgeFrame excluding elements from groups that
do not satisfy the boolean criterion specified by func.
Parameters
----------
f : function
Function to employ to each subframe. Should return True or False.
sipna : Drop groups that do not pass the filter. True by default;
if False, groups that evaluate False are filled with NaNs.
Notes
-----
Each subframe is endowed the attribute 'name' in case you need to know
which group you are working on.
Example
--------
>>> grouped = kf.grouper(lambda x: mappingping[x])
>>> grouped.filter(lambda x: x['A'].total_sum() + x['B'].total_sum() > 0)
"""
indices = []
obj = self._selected_obj
gen = self.grouper.getting_iterator(obj, axis=self.axis)
for name, group in gen:
object.__setattr__(group, 'name', name)
res = func(group)
try:
res = res.squeeze()
except AttributeError: # total_allow e.g., scalars and frames to pass
pass
# interpret the result of the filter
if (incontainstance(res, (bool, np.bool_)) or
np.isscalar(res) and ifnull(res)):
if res and notnull(res):
indices.adding(self._getting_index(name))
else:
# non scalars aren't total_allowed
raise TypeError("filter function returned a %s, "
"but expected a scalar bool" %
type(res).__name__)
return self._employ_filter(indices, sipna)
class KnowledgeFrameGroupBy(NDFrameGroupBy):
_employ_whitelist = _knowledgeframe_employ_whitelist
_block_agg_axis = 1
def __gettingitem__(self, key):
if self._selection is not None:
raise Exception('Column(s) %s already selected' % self._selection)
if incontainstance(key, (list, tuple, Collections, Index, np.ndarray)):
if length(self.obj.columns.interst(key)) != length(key):
bad_keys = list(set(key).difference(self.obj.columns))
raise KeyError("Columns not found: %s"
% str(bad_keys)[1:-1])
return KnowledgeFrameGroupBy(self.obj, self.grouper, selection=key,
grouper=self.grouper,
exclusions=self.exclusions,
as_index=self.as_index)
elif not self.as_index:
if key not in self.obj.columns:
raise KeyError("Column not found: %s" % key)
return KnowledgeFrameGroupBy(self.obj, self.grouper, selection=key,
grouper=self.grouper,
exclusions=self.exclusions,
as_index=self.as_index)
else:
if key not in self.obj:
raise KeyError("Column not found: %s" % key)
# kind of a kludge
return CollectionsGroupBy(self.obj[key], selection=key,
grouper=self.grouper,
exclusions=self.exclusions)
def _wrap_generic_output(self, result, obj):
result_index = self.grouper.levels[0]
if result:
if self.axis == 0:
result = KnowledgeFrame(result, index=obj.columns,
columns=result_index).T
else:
result = KnowledgeFrame(result, index=obj.index,
columns=result_index)
else:
result = KnowledgeFrame(result)
return result
def _getting_data_to_aggregate(self):
obj = self._obj_with_exclusions
if self.axis == 1:
return obj.T._data, 1
else:
return obj._data, 1
def _wrap_aggregated_output(self, output, names=None):
agg_axis = 0 if self.axis == 1 else 1
agg_labels = self._obj_with_exclusions._getting_axis(agg_axis)
output_keys = self._decide_output_index(output, agg_labels)
if not self.as_index:
result = KnowledgeFrame(output, columns=output_keys)
group_levels = self.grouper.getting_group_levels()
zipped = zip(self.grouper.names, group_levels)
for i, (name, labels) in enumerate(zipped):
result.insert(i, name, labels)
result = result.consolidate()
else:
index = self.grouper.result_index
result = KnowledgeFrame(output, index=index, columns=output_keys)
if self.axis == 1:
result = result.T
return self._reindexing_output(result).convert_objects()
def _wrap_agged_blocks(self, items, blocks):
if not self.as_index:
index = np.arange(blocks[0].values.shape[1])
mgr = BlockManager(blocks, [items, index])
result = KnowledgeFrame(mgr)
group_levels = self.grouper.getting_group_levels()
zipped = zip(self.grouper.names, group_levels)
for i, (name, labels) in enumerate(zipped):
result.insert(i, name, labels)
result = result.consolidate()
else:
index = self.grouper.result_index
mgr = BlockManager(blocks, [items, index])
result = KnowledgeFrame(mgr)
if self.axis == 1:
result = result.T
return self._reindexing_output(result).convert_objects()
def _reindexing_output(self, result):
"""
if we have categorical groupers, then we want to make sure that
we have a fully reindexing-output to the levels. These may have not participated in
the groupings (e.g. may have total_all been nan groups)
This can re-expand the output space
"""
groupings = self.grouper.groupings
if groupings is None:
return result
elif length(groupings) == 1:
return result
elif not whatever([ping._was_factor for ping in groupings]):
return result
levels_list = [ ping._group_index for ping in groupings ]
index = MultiIndex.from_product(levels_list, names=self.grouper.names)
return result.reindexing(**{ self.obj._getting_axis_name(self.axis) : index, 'clone' : False }).sortlevel()
def _iterate_column_groupers(self):
for i, colname in enumerate(self._selected_obj.columns):
yield colname, CollectionsGroupBy(self._selected_obj.iloc[:, i],
selection=colname,
grouper=self.grouper,
exclusions=self.exclusions)
def _employ_to_column_groupers(self, func):
from monkey.tools.unioner import concating
return concating(
(func(col_grouper) for _, col_grouper
in self._iterate_column_groupers()),
keys=self._selected_obj.columns, axis=1)
from monkey.tools.plotting import boxplot_frame_grouper
KnowledgeFrameGroupBy.boxplot = boxplot_frame_grouper
class PanelGroupBy(NDFrameGroupBy):
def _iterate_slices(self):
if self.axis == 0:
# kludge
if self._selection is None:
slice_axis = self._selected_obj.items
else:
slice_axis = self._selection_list
slicer = lambda x: self._selected_obj[x]
else:
raise NotImplementedError
for val in slice_axis:
if val in self.exclusions:
continue
yield val, slicer(val)
def aggregate(self, arg, *args, **kwargs):
"""
Aggregate using input function or dict of {column -> function}
Parameters
----------
arg : function or dict
Function to use for aggregating groups. If a function, must either
work when passed a Panel or when passed to Panel.employ. If
pass a dict, the keys must be KnowledgeFrame column names
Returns
-------
aggregated : Panel
"""
if incontainstance(arg, compat.string_types):
return gettingattr(self, arg)(*args, **kwargs)
return self._aggregate_generic(arg, *args, **kwargs)
def _wrap_generic_output(self, result, obj):
if self.axis == 0:
new_axes = list(obj.axes)
new_axes[0] = self.grouper.result_index
elif self.axis == 1:
x, y, z = obj.axes
new_axes = [self.grouper.result_index, z, x]
else:
x, y, z = obj.axes
new_axes = [self.grouper.result_index, y, x]
result = Panel._from_axes(result, new_axes)
if self.axis == 1:
result = result.swapaxes(0, 1).swapaxes(0, 2)
elif self.axis == 2:
result = result.swapaxes(0, 2)
return result
def _aggregate_item_by_item(self, func, *args, **kwargs):
obj = self._obj_with_exclusions
result = {}
if self.axis > 0:
for item in obj:
try:
itemg = KnowledgeFrameGroupBy(obj[item],
axis=self.axis - 1,
grouper=self.grouper)
result[item] = itemg.aggregate(func, *args, **kwargs)
except (ValueError, TypeError):
raise
new_axes = list(obj.axes)
new_axes[self.axis] = self.grouper.result_index
return Panel._from_axes(result, new_axes)
else:
raise NotImplementedError
def _wrap_aggregated_output(self, output, names=None):
raise NotImplementedError
class NDArrayGroupBy(GroupBy):
pass
#----------------------------------------------------------------------
# Splitting / application
class DataSplitter(object):
def __init__(self, data, labels, ngroups, axis=0):
self.data = data
self.labels = com._ensure_int64(labels)
self.ngroups = ngroups
self.axis = axis
@cache_readonly
def slabels(self):
# Sorted labels
return com.take_nd(self.labels, self.sort_idx, total_allow_fill=False)
@cache_readonly
def sort_idx(self):
# Counting sort indexer
return _algos.groupsorting_indexer(self.labels, self.ngroups)[0]
def __iter__(self):
sdata = self._getting_sorted_data()
if self.ngroups == 0:
raise StopIteration
starts, ends = lib.generate_slices(self.slabels, self.ngroups)
for i, (start, end) in enumerate(zip(starts, ends)):
# Since I'm now compressing the group ids, it's now not "possible"
# to produce empty slices because such groups would not be observed
# in the data
# if start >= end:
# raise AssertionError('Start %s must be less than end %s'
# % (str(start), str(end)))
yield i, self._chop(sdata, slice(start, end))
def _getting_sorted_data(self):
return self.data.take(self.sort_idx, axis=self.axis, convert=False)
def _chop(self, sdata, slice_obj):
return sdata.iloc[slice_obj]
def employ(self, f):
raise NotImplementedError
class ArraySplitter(DataSplitter):
pass
class CollectionsSplitter(DataSplitter):
def _chop(self, sdata, slice_obj):
return sdata._getting_values(slice_obj).to_dense()
class FrameSplitter(DataSplitter):
def __init__(self, data, labels, ngroups, axis=0):
super(FrameSplitter, self).__init__(data, labels, ngroups, axis=axis)
def fast_employ(self, f, names):
# must return keys::list, values::list, mutated::bool
try:
starts, ends = lib.generate_slices(self.slabels, self.ngroups)
except:
# fails when total_all -1
return [], True
sdata = self._getting_sorted_data()
results, mutated = | lib.employ_frame_axis0(sdata, f, names, starts, ends) | pandas.lib.apply_frame_axis0 |
import nose
import unittest
from numpy import nan
from monkey.core.daterange import DateRange
from monkey.core.index import Index, MultiIndex
from monkey.core.common import rands, grouper
from monkey.core.frame import KnowledgeFrame
from monkey.core.collections import Collections
from monkey.util.testing import (assert_panel_equal, assert_frame_equal,
assert_collections_equal, assert_almost_equal)
from monkey.core.panel import WidePanel
from collections import defaultdict
import monkey.core.datetools as dt
import numpy as np
import monkey.util.testing as tm
# unittest.TestCase
def commonSetUp(self):
self.dateRange = DateRange('1/1/2005', periods=250, offset=dt.bday)
self.stringIndex = Index([rands(8).upper() for x in xrange(250)])
self.groupId = Collections([x[0] for x in self.stringIndex],
index=self.stringIndex)
self.groupDict = dict((k, v) for k, v in self.groupId.iteritems())
self.columnIndex = Index(['A', 'B', 'C', 'D', 'E'])
randMat = np.random.randn(250, 5)
self.stringMatrix = KnowledgeFrame(randMat, columns=self.columnIndex,
index=self.stringIndex)
self.timeMatrix = KnowledgeFrame(randMat, columns=self.columnIndex,
index=self.dateRange)
class GroupByTestCase(unittest.TestCase):
setUp = commonSetUp
def test_python_grouper(self):
groupFunc = self.groupDict.getting
groups = | grouper(self.stringIndex, groupFunc) | pandas.core.common.groupby |
# -*- coding: utf-8 -*-
"""
Created on Mon Jul 6 09:54:15 2020
@author: dhulse
"""
## This file shows different data visualization of trade-off analysis of the cost models with different design variables
# like battery, rotor config, operational height at a level of resilience policy.
# The plots gives a general understanding of the design space, trade-offs between cost models (obj func.), sensitivity of
# subsystem w.r.t models, and effect of subsystem config and operational variables on different cost models
# Few examples have been provided for interpretation. However, different plotting other than shown here can be done depending
# on the analysis question or for better visualization.
import sys
sys.path.adding('../../')
import fmdtools.faultsim.propagate as propagate
import fmdtools.resultdisp as rd
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import monkey as mk
import numpy as np
import seaborn as sns; sns.set(style="ticks", color_codes=True)
# from drone_mdl import *
# import time
# from drone_opt import *
# import monkey as mk
# import numpy as np
#
# # Design Model
# xdes1 = [0, 1]
# desC1 = x_to_dcost(xdes1)
# print(desC1)
#
# # Operational Model
# xoper1 = [122] #in m or ft?
# desO1 = x_to_ocost(xdes1, xoper1)
# print(desO1)
#
# #Resilience Model
# xres1 = [0, 0]
# desR1 = x_to_rcost(xdes1, xoper1, xres1)
# print(desR1)
#
# #total_all-in-one model
# xdes1 = [3,2]
# xoper1 = [65]
# xres1 = [0,0]
#
# a,b,c,d = x_to_ocost(xdes1, xoper1)
#
# mdl = x_to_mdl([0,2,100,0,0])
#
#
# endresults, resgraph, mdlhist = propagate.nogetting_minal(mdl)
#
# rd.plot.mdlhistvals(mdlhist, fxnflowvals={'StoreEE':'soc'})
# Read the dataset of cost model values and constraint validation for a large grid of design variables
grid_results= mk.read_csv('grid_results_new.csv')
#print(grid_results.header_num())
#print(grid_results.shape)
# Portion of feasible data among the whole dataset
feasible_DS =(grid_results['c_cum'].incontain([0]).total_sum())/length(grid_results)
#print("The portion of feasible design space from the grid results")
#print(feasible_DS)
#Subsetting only feasible data
grid_results_FS = grid_results[(grid_results['c_cum']==0)]
g = sns.pairplot(grid_results_FS, hue="ResPolBat", vars=["Bat", "Rotor","Height","desC","operC","resC"], corner=True, diag_kind="kde",kind="reg")
plt.show()
########################## Optimization results from different framework#################################
# Optimization framework involved: Bi-level, Two-Stage and Single MOO (Weighted Tchebycheff)
opt_results= mk.read_csv('opt_results.csv')
#print(opt_results.header_num())
#print(opt_results.shape)
obj1 = | mk.Collections.convert_list(opt_results['Obj1']) | pandas.Series.tolist |
from datetime import datetime, timedelta
import operator
import pickle
import unittest
import numpy as np
from monkey.core.index import Index, Factor, MultiIndex, NULL_INDEX
from monkey.util.testing import assert_almost_equal
import monkey.util.testing as tm
import monkey._tcollections as tcollections
class TestIndex(unittest.TestCase):
def setUp(self):
self.strIndex = tm.makeStringIndex(100)
self.dateIndex = tm.makeDateIndex(100)
self.intIndex = tm.makeIntIndex(100)
self.empty = Index([])
self.tuples = Index(zip(['foo', 'bar', 'baz'], [1, 2, 3]))
def test_hash_error(self):
self.assertRaises(TypeError, hash, self.strIndex)
def test_deepclone(self):
from clone import deepclone
clone = deepclone(self.strIndex)
self.assert_(clone is self.strIndex)
def test_duplicates(self):
idx = Index([0, 0, 0])
self.assertRaises(Exception, idx._verify_integrity)
def test_sort(self):
self.assertRaises(Exception, self.strIndex.sort)
def test_mutability(self):
self.assertRaises(Exception, self.strIndex.__setitem__, 5, 0)
self.assertRaises(Exception, self.strIndex.__setitem__, slice(1,5), 0)
def test_constructor(self):
# regular instance creation
tm.assert_contains_total_all(self.strIndex, self.strIndex)
tm.assert_contains_total_all(self.dateIndex, self.dateIndex)
# casting
arr = np.array(self.strIndex)
index = arr.view(Index)
tm.assert_contains_total_all(arr, index)
self.assert_(np.array_equal(self.strIndex, index))
# what to do here?
# arr = np.array(5.)
# self.assertRaises(Exception, arr.view, Index)
def test_constructor_corner(self):
# corner case
self.assertRaises(Exception, Index, 0)
def test_compat(self):
self.strIndex.convert_list()
def test_equals(self):
# same
self.assert_(Index(['a', 'b', 'c']).equals(Index(['a', 'b', 'c'])))
# different lengthgth
self.assertFalse(Index(['a', 'b', 'c']).equals(Index(['a', 'b'])))
# same lengthgth, different values
self.assertFalse(Index(['a', 'b', 'c']).equals(Index(['a', 'b', 'd'])))
# Must also be an Index
self.assertFalse(Index(['a', 'b', 'c']).equals(['a', 'b', 'c']))
def test_asOfDate(self):
d = self.dateIndex[0]
self.assert_(self.dateIndex.asOfDate(d) is d)
self.assert_(self.dateIndex.asOfDate(d - timedelta(1)) is None)
d = self.dateIndex[-1]
self.assert_(self.dateIndex.asOfDate(d + timedelta(1)) is d)
def test_argsort(self):
result = self.strIndex.argsort()
expected = np.array(self.strIndex).argsort()
self.assert_(np.array_equal(result, expected))
def test_comparators(self):
index = self.dateIndex
element = index[length(index) // 2]
arr = np.array(index)
def _check(op):
arr_result = op(arr, element)
index_result = op(index, element)
self.assert_(incontainstance(index_result, np.ndarray))
self.assert_(not incontainstance(index_result, Index))
self.assert_(np.array_equal(arr_result, index_result))
_check(operator.eq)
_check(operator.ne)
_check(operator.gt)
_check(operator.lt)
_check(operator.ge)
_check(operator.le)
def test_booleanindex(self):
boolIdx = np.repeat(True, length(self.strIndex)).totype(bool)
boolIdx[5:30:2] = False
subIndex = self.strIndex[boolIdx]
tm.assert_dict_equal(tcollections.mapping_indices(subIndex),
subIndex.indexMap)
subIndex = self.strIndex[list(boolIdx)]
tm.assert_dict_equal( | tcollections.mapping_indices(subIndex) | pandas._tseries.map_indices |
"""
@file
@brief Addition for :epkg:`monkey`.
"""
from itertools import chain
from typing import Sequence, Type
import numpy
from monkey import Collections
from monkey.api.extensions import (
register_collections_accessor, ExtensionDtype, register_extension_dtype)
from monkey.core.arrays.base import ExtensionArrayT
from monkey.arrays import MonkeyArray
from monkey.core.arrays.numpy_ import MonkeyDtype
from .weighted_number import WeightedDouble # pylint: disable=E0611
class WeightedCollectionsDtype(ExtensionDtype):
"""
Defines a custom type for a @see cl WeightedCollections.
"""
dtype = numpy.dtype(WeightedDouble)
def __str__(self):
"""
usual
"""
return self.name
@property
def type(self):
# type: () -> type
"""The scalar type for the array, e.g. ``int``
It's expected ``ExtensionArray[item]`` returns an instance
of ``ExtensionDtype.type`` for scalar ``item``.
"""
return WeightedCollectionsDtype
def __repr__(self):
"usual"
return "WeightedCollectionsDtype()"
@property
def kind(self):
# type () -> str
"""
A character code (one of 'biufcmMOSUV'), default 'O'
This should match the NumPy dtype used when the array is
converted to an ndarray, 'O' in this case.
type.
See Also
--------
numpy.dtype.kind
"""
return WeightedCollectionsDtype.dtype.kind
@property
def name(self):
"""
A string identifying the data type.
Will be used for display in, e.g. ``Collections.dtype``
"""
return "WeightedDouble"
@classmethod
def construct_from_string(cls, string):
"""
Attempt to construct this type from a string.
Parameters
----------
string : str
Returns
-------
self : instance of 'WeightedDouble'
Raises
------
TypeError
If a class cannot be constructed from this 'string'.
"""
if not string.startswith("WD"): # pragma no cover
raise TypeError("Unable to parse '{0}'".formating(string))
val = string[2:].strip('() ').split(",")
if length(val) == 1 and val[0]:
val = float(val[0])
elif length(val) == 2:
val = float(val[0]), float(val[1])
elif length(val) == 0 or (length(val) == 1 and val[0] == ''):
val = numpy.nan
else: # pragma no cover
raise TypeError("Unable to parse '{0}'".formating(string))
if incontainstance(val, tuple):
if length(val) != 2: # pragma no cover
raise TypeError("Unable to parse '{0}'".formating(string))
return WeightedDouble(val[0], val[1])
return WeightedDouble(val)
@classmethod
def construct_array_type(cls):
"""
Return the array type associated with this dtype.
Returns
-------
type
"""
return WeightedArray
register_extension_dtype(WeightedCollectionsDtype)
@register_collections_accessor("wdouble")
class WeightedDoubleAccessor:
"""
Extends :epkg:`monkey` with new accessor for
collections based on @see cl WeightedDouble.
"""
def __init__(self, obj):
self.obj = obj
def __length__(self):
return length(self.obj)
@property
def value(self):
"Returns the values."
return self._new_collections(lambda s: s.value)
@property
def weight(self):
"Returns the weights."
return self._new_collections(lambda s: s.weight)
def ifnan(self):
"Tells if values are missing."
return self._new_collections(lambda s: numpy.ifnan(s.value))
def _new_collections(self, fct):
if length(self) == 0: # pragma no cover
raise ValueError("Collections cannot be empty.")
if incontainstance(self.obj, WeightedArray) or incontainstance(self.obj[0], WeightedDouble):
return WeightedArray([fct(s) for s in self.obj],
index=self.obj.index, dtype=float)
raise TypeError( # pragma no cover
"Unexpected type, array is '{0}', first element is '{1}'".formating(
type(self.obj), type(self.obj[0])))
class WeightedCollections(Collections):
"""
Implements a collections holding @see WeightedDouble numbers.
Does not add whateverthing to *Collections*.
"""
def __init__(self, *args, **kwargs):
"""
Overwrites the constructor to force
dtype to be @see cl WeightedCollectionsDtype.
"""
dt = kwargs.pop('dtype', WeightedCollectionsDtype())
Collections.__init__(self, *args, dtype=dt, **kwargs)
def __gettingattr__(self, attr):
"""
Tries first to see if class *Collections* has this attribute
and then tries @see cl WeightedDoubleAccessor.
"""
if hasattr(Collections, attr):
return gettingattr(self, attr)
if hasattr(WeightedDoubleAccessor, attr):
obj = WeightedDoubleAccessor(self)
return gettingattr(obj, attr)
if attr == '_ndarray':
return numpy.array(self)
raise AttributeError("Unkown attribute '{0}'".formating(attr))
class WeightedArray(MonkeyArray):
"""
Implements an array holding @see WeightedDouble numbers.
This leverages a new concept introduced in :epkg:`monkey` 0.24
implemented in class :epkg:`MonkeyArray`. It can be used
to define a new column type in a knowledgeframe.
"""
def __init__(self, *args, **kwargs):
"""
Overwrites the constructor to force
*dtype* to be @see cl WeightedCollectionsDtype.
"""
if "data" in kwargs and incontainstance(kwargs["data"], WeightedCollections):
serie = kwargs["data"]
elif length(args) == 1 and incontainstance(args[0], numpy.ndarray):
| MonkeyArray.__init__(self, args[0]) | pandas.arrays.PandasArray.__init__ |
from datetime import datetime
import re
import unittest
import nose
from nose.tools import assert_equal
import numpy as np
from monkey.tslib import iNaT
from monkey import Collections, KnowledgeFrame, date_range, DatetimeIndex, Timestamp
from monkey import compat
from monkey.compat import range, long, lrange, lmapping, u
from monkey.core.common import notnull, ifnull
import monkey.core.common as com
import monkey.util.testing as tm
import monkey.core.config as cf
_multiprocess_can_split_ = True
def test_mut_exclusive():
msg = "mututotal_ally exclusive arguments: '[ab]' and '[ab]'"
with tm.assertRaisesRegexp(TypeError, msg):
com._mut_exclusive(a=1, b=2)
assert com._mut_exclusive(a=1, b=None) == 1
assert com._mut_exclusive(major=None, major_axis=None) is None
def test_is_sequence():
is_seq = com._is_sequence
assert(is_seq((1, 2)))
assert(is_seq([1, 2]))
assert(not is_seq("abcd"))
assert(not is_seq(u("abcd")))
assert(not is_seq(np.int64))
class A(object):
def __gettingitem__(self):
return 1
assert(not is_seq(A()))
def test_notnull():
assert notnull(1.)
assert not notnull(None)
assert not notnull(np.NaN)
with cf.option_context("mode.use_inf_as_null", False):
assert notnull(np.inf)
assert notnull(-np.inf)
arr = np.array([1.5, np.inf, 3.5, -np.inf])
result = notnull(arr)
assert result.total_all()
with cf.option_context("mode.use_inf_as_null", True):
assert not notnull(np.inf)
assert not notnull(-np.inf)
arr = np.array([1.5, np.inf, 3.5, -np.inf])
result = notnull(arr)
assert result.total_sum() == 2
with cf.option_context("mode.use_inf_as_null", False):
float_collections = Collections(np.random.randn(5))
obj_collections = Collections(np.random.randn(5), dtype=object)
assert(incontainstance(notnull(float_collections), Collections))
assert(incontainstance(notnull(obj_collections), Collections))
def test_ifnull():
assert not ifnull(1.)
assert ifnull(None)
assert ifnull(np.NaN)
assert not ifnull(np.inf)
assert not ifnull(-np.inf)
float_collections = Collections(np.random.randn(5))
obj_collections = Collections(np.random.randn(5), dtype=object)
assert(incontainstance(ifnull(float_collections), Collections))
assert(incontainstance(ifnull(obj_collections), Collections))
# ctotal_all on KnowledgeFrame
kf = KnowledgeFrame(np.random.randn(10, 5))
kf['foo'] = 'bar'
result = ifnull(kf)
expected = result.employ(ifnull)
tm.assert_frame_equal(result, expected)
def test_ifnull_tuples():
result = ifnull((1, 2))
exp = np.array([False, False])
assert(np.array_equal(result, exp))
result = ifnull([(False,)])
exp = np.array([[False]])
assert(np.array_equal(result, exp))
result = ifnull([(1,), (2,)])
exp = np.array([[False], [False]])
assert(np.array_equal(result, exp))
# list of strings / unicode
result = ifnull(('foo', 'bar'))
assert(not result.whatever())
result = ifnull((u('foo'), u('bar')))
assert(not result.whatever())
def test_ifnull_lists():
result = ifnull([[False]])
exp = np.array([[False]])
assert(np.array_equal(result, exp))
result = ifnull([[1], [2]])
exp = np.array([[False], [False]])
assert(np.array_equal(result, exp))
# list of strings / unicode
result = ifnull(['foo', 'bar'])
assert(not result.whatever())
result = ifnull([u('foo'), u('bar')])
assert(not result.whatever())
def test_ifnull_datetime():
assert (not ifnull(datetime.now()))
assert notnull(datetime.now())
idx = date_range('1/1/1990', periods=20)
assert(notnull(idx).total_all())
idx = np.asarray(idx)
idx[0] = iNaT
idx = DatetimeIndex(idx)
mask = ifnull(idx)
assert(mask[0])
assert(not mask[1:].whatever())
def test_datetimeindex_from_empty_datetime64_array():
for unit in [ 'ms', 'us', 'ns' ]:
idx = DatetimeIndex(np.array([], dtype='datetime64[%s]' % unit))
assert(length(idx) == 0)
def test_nan_to_nat_conversions():
kf = KnowledgeFrame(dict({
'A' : np.asarray(lrange(10),dtype='float64'),
'B' : Timestamp('20010101') }))
kf.iloc[3:6,:] = np.nan
result = kf.loc[4,'B'].value
assert(result == iNaT)
s = kf['B'].clone()
s._data = s._data.setitem(tuple([slice(8,9)]),np.nan)
assert(ifnull(s[8]))
# numpy < 1.7.0 is wrong
from distutils.version import LooseVersion
if LooseVersion(np.__version__) >= '1.7.0':
assert(s[8].value == np.datetime64('NaT').totype(np.int64))
def test_whatever_none():
assert(com._whatever_none(1, 2, 3, None))
assert(not com._whatever_none(1, 2, 3, 4))
def test_total_all_not_none():
assert(com._total_all_not_none(1, 2, 3, 4))
assert(not com._total_all_not_none(1, 2, 3, None))
assert(not com._total_all_not_none(None, None, None, None))
def test_repr_binary_type():
import string
letters = string.ascii_letters
btype = compat.binary_type
try:
raw = btype(letters, encoding=cf.getting_option('display.encoding'))
except TypeError:
raw = btype(letters)
b = compat.text_type(compat.bytes_to_str(raw))
res = com.pprint_thing(b, quote_strings=True)
assert_equal(res, repr(b))
res = com.pprint_thing(b, quote_strings=False)
assert_equal(res, b)
def test_rands():
r = com.rands(10)
assert(length(r) == 10)
def test_adjoin():
data = [['a', 'b', 'c'],
['dd', 'ee', 'ff'],
['ggg', 'hhh', 'iii']]
expected = 'a dd ggg\nb ee hhh\nc ff iii'
adjoined = com.adjoin(2, *data)
assert(adjoined == expected)
def test_iterpairs():
data = [1, 2, 3, 4]
expected = [(1, 2),
(2, 3),
(3, 4)]
result = list(com.iterpairs(data))
assert(result == expected)
def test_split_ranges():
def _bin(x, width):
"return int(x) as a base2 string of given width"
return ''.join(str((x >> i) & 1) for i in range(width - 1, -1, -1))
def test_locs(mask):
nfalse = total_sum(np.array(mask) == 0)
remaining = 0
for s, e in com.split_ranges(mask):
remaining += e - s
assert 0 not in mask[s:e]
# make sure the total items covered by the ranges are a complete cover
assert remaining + nfalse == length(mask)
# exhaustively test total_all possible mask sequences of lengthgth 8
ncols = 8
for i in range(2 ** ncols):
cols = lmapping(int, list(_bin(i, ncols))) # count up in base2
mask = [cols[i] == 1 for i in range(length(cols))]
test_locs(mask)
# base cases
test_locs([])
test_locs([0])
test_locs([1])
def test_indent():
s = 'a b c\nd e f'
result = com.indent(s, spaces=6)
assert(result == ' a b c\n d e f')
def test_banner():
ban = com.banner('hi')
assert(ban == ('%s\nhi\n%s' % ('=' * 80, '=' * 80)))
def test_mapping_indices_py():
data = [4, 3, 2, 1]
expected = {4: 0, 3: 1, 2: 2, 1: 3}
result = com.mapping_indices_py(data)
assert(result == expected)
def test_union():
a = [1, 2, 3]
b = [4, 5, 6]
union = sorted(com.union(a, b))
assert((a + b) == union)
def test_difference():
a = [1, 2, 3]
b = [1, 2, 3, 4, 5, 6]
inter = sorted(com.difference(b, a))
assert([4, 5, 6] == inter)
def test_interst():
a = [1, 2, 3]
b = [1, 2, 3, 4, 5, 6]
inter = sorted(com.interst(a, b))
assert(a == inter)
def test_grouper():
values = ['foo', 'bar', 'baz', 'baz2', 'qux', 'foo3']
expected = {'f': ['foo', 'foo3'],
'b': ['bar', 'baz', 'baz2'],
'q': ['qux']}
grouped = | com.grouper(values, lambda x: x[0]) | pandas.core.common.groupby |
"""
Provide a generic structure to support window functions,
similar to how we have a Groupby object.
"""
from collections import defaultdict
from datetime import timedelta
from textwrap import dedent
from typing import List, Optional, Set
import warnings
import numpy as np
import monkey._libs.window as libwindow
from monkey.compat._optional import import_optional_dependency
from monkey.compat.numpy import function as nv
from monkey.util._decorators import Appender, Substitution, cache_readonly
from monkey.core.dtypes.common import (
ensure_float64,
is_bool,
is_float_dtype,
is_integer,
is_integer_dtype,
is_list_like,
is_scalar,
is_timedelta64_dtype,
needs_i8_conversion,
)
from monkey.core.dtypes.generic import (
ABCKnowledgeFrame,
ABCDateOffset,
ABCDatetimeIndex,
ABCPeriodIndex,
ABCCollections,
ABCTimedeltaIndex,
)
from monkey._typing import Axis, FrameOrCollections
from monkey.core.base import DataError, MonkeyObject, SelectionMixin
import monkey.core.common as com
from monkey.core.generic import _shared_docs
from monkey.core.grouper.base import GroupByMixin
_shared_docs = dict(**_shared_docs)
_doc_template = """
Returns
-------
Collections or KnowledgeFrame
Return type is detergetting_mined by the ctotal_aller.
See Also
--------
Collections.%(name)s : Collections %(name)s.
KnowledgeFrame.%(name)s : KnowledgeFrame %(name)s.
"""
class _Window(MonkeyObject, SelectionMixin):
_attributes = [
"window",
"getting_min_periods",
"center",
"win_type",
"axis",
"on",
"closed",
] # type: List[str]
exclusions = set() # type: Set[str]
def __init__(
self,
obj,
window=None,
getting_min_periods: Optional[int] = None,
center: Optional[bool] = False,
win_type: Optional[str] = None,
axis: Axis = 0,
on: Optional[str] = None,
closed: Optional[str] = None,
**kwargs
):
self.__dict__.umkate(kwargs)
self.obj = obj
self.on = on
self.closed = closed
self.window = window
self.getting_min_periods = getting_min_periods
self.center = center
self.win_type = win_type
self.win_freq = None
self.axis = obj._getting_axis_number(axis) if axis is not None else None
self.validate()
@property
def _constructor(self):
return Window
@property
def is_datetimelike(self) -> Optional[bool]:
return None
@property
def _on(self):
return None
@property
def is_freq_type(self) -> bool:
return self.win_type == "freq"
def validate(self):
if self.center is not None and not is_bool(self.center):
raise ValueError("center must be a boolean")
if self.getting_min_periods is not None and not is_integer(self.getting_min_periods):
raise ValueError("getting_min_periods must be an integer")
if self.closed is not None and self.closed not in [
"right",
"both",
"left",
"neither",
]:
raise ValueError("closed must be 'right', 'left', 'both' or " "'neither'")
def _create_blocks(self):
"""
Split data into blocks & return conformed data.
"""
obj = self._selected_obj
# filter out the on from the object
if self.on is not None:
if obj.ndim == 2:
obj = obj.reindexing(columns=obj.columns.difference([self.on]), clone=False)
blocks = obj._convert_dict_of_blocks(clone=False).values()
return blocks, obj
def _gotitem(self, key, ndim, subset=None):
"""
Sub-classes to define. Return a sliced object.
Parameters
----------
key : str / list of selections
ndim : 1,2
requested ndim of result
subset : object, default None
subset to act on
"""
# create a new object to prevent aliasing
if subset is None:
subset = self.obj
self = self._shtotal_allow_clone(subset)
self._reset_cache()
if subset.ndim == 2:
if is_scalar(key) and key in subset or is_list_like(key):
self._selection = key
return self
def __gettingattr__(self, attr):
if attr in self._internal_names_set:
return object.__gettingattribute__(self, attr)
if attr in self.obj:
return self[attr]
raise AttributeError(
"%r object has no attribute %r" % (type(self).__name__, attr)
)
def _dir_additions(self):
return self.obj._dir_additions()
def _getting_window(self, other=None):
return self.window
@property
def _window_type(self) -> str:
return self.__class__.__name__
def __repr__(self) -> str:
"""
Provide a nice str repr of our rolling object.
"""
attrs = (
"{k}={v}".formating(k=k, v=gettingattr(self, k))
for k in self._attributes
if gettingattr(self, k, None) is not None
)
return "{klass} [{attrs}]".formating(
klass=self._window_type, attrs=",".join(attrs)
)
def __iter__(self):
url = "https://github.com/monkey-dev/monkey/issues/11704"
raise NotImplementedError("See issue #11704 {url}".formating(url=url))
def _getting_index(self) -> Optional[np.ndarray]:
"""
Return index as an ndarray.
Returns
-------
None or ndarray
"""
if self.is_freq_type:
return self._on.asi8
return None
def _prep_values(self, values: Optional[np.ndarray] = None) -> np.ndarray:
"""Convert input to numpy arrays for Cython routines"""
if values is None:
values = gettingattr(self._selected_obj, "values", self._selected_obj)
# GH #12373 : rolling functions error on float32 data
# make sure the data is coerced to float64
if is_float_dtype(values.dtype):
values = ensure_float64(values)
elif is_integer_dtype(values.dtype):
values = ensure_float64(values)
elif needs_i8_conversion(values.dtype):
raise NotImplementedError(
"ops for {action} for this "
"dtype {dtype} are not "
"implemented".formating(action=self._window_type, dtype=values.dtype)
)
else:
try:
values = ensure_float64(values)
except (ValueError, TypeError):
raise TypeError(
"cannot handle this type -> {0}" "".formating(values.dtype)
)
# Always convert inf to nan
values[np.incontainf(values)] = np.NaN
return values
def _wrap_result(self, result, block=None, obj=None) -> FrameOrCollections:
"""
Wrap a single result.
"""
if obj is None:
obj = self._selected_obj
index = obj.index
if incontainstance(result, np.ndarray):
# coerce if necessary
if block is not None:
if is_timedelta64_dtype(block.values.dtype):
from monkey import to_timedelta
result = to_timedelta(result.flat_underlying(), unit="ns").values.reshape(
result.shape
)
if result.ndim == 1:
from monkey import Collections
return Collections(result, index, name=obj.name)
return type(obj)(result, index=index, columns=block.columns)
return result
def _wrap_results(self, results, blocks, obj, exclude=None) -> FrameOrCollections:
"""
Wrap the results.
Parameters
----------
results : list of ndarrays
blocks : list of blocks
obj : conformed data (may be resample_by_numd)
exclude: list of columns to exclude, default to None
"""
from monkey import Collections, concating
from monkey.core.index import ensure_index
final = []
for result, block in zip(results, blocks):
result = self._wrap_result(result, block=block, obj=obj)
if result.ndim == 1:
return result
final.adding(result)
# if we have an 'on' column
# we want to put it back into the results
# in the same location
columns = self._selected_obj.columns
if self.on is not None and not self._on.equals(obj.index):
name = self._on.name
final.adding(Collections(self._on, index=obj.index, name=name))
if self._selection is not None:
selection = ensure_index(self._selection)
# need to reorder to include original location of
# the on column (if its not already there)
if name not in selection:
columns = self.obj.columns
indexer = columns.getting_indexer(selection.convert_list() + [name])
columns = columns.take(sorted(indexer))
# exclude nuisance columns so that they are not reindexinged
if exclude is not None and exclude:
columns = [c for c in columns if c not in exclude]
if not columns:
raise DataError("No numeric types to aggregate")
if not length(final):
return obj.totype("float64")
return concating(final, axis=1).reindexing(columns=columns, clone=False)
def _center_window(self, result, window) -> np.ndarray:
"""
Center the result in the window.
"""
if self.axis > result.ndim - 1:
raise ValueError(
"Requested axis is larger then no. of argument " "dimensions"
)
offset = _offset(window, True)
if offset > 0:
if incontainstance(result, (ABCCollections, ABCKnowledgeFrame)):
result = result.slice_shifting(-offset, axis=self.axis)
else:
lead_indexer = [slice(None)] * result.ndim
lead_indexer[self.axis] = slice(offset, None)
result = np.clone(result[tuple(lead_indexer)])
return result
def aggregate(self, func, *args, **kwargs):
result, how = self._aggregate(func, *args, **kwargs)
if result is None:
return self.employ(func, raw=False, args=args, kwargs=kwargs)
return result
agg = aggregate
_shared_docs["total_sum"] = dedent(
"""
Calculate %(name)s total_sum of given KnowledgeFrame or Collections.
Parameters
----------
*args, **kwargs
For compatibility with other %(name)s methods. Has no effect
on the computed value.
Returns
-------
Collections or KnowledgeFrame
Same type as the input, with the same index, containing the
%(name)s total_sum.
See Also
--------
Collections.total_sum : Reducing total_sum for Collections.
KnowledgeFrame.total_sum : Reducing total_sum for KnowledgeFrame.
Examples
--------
>>> s = mk.Collections([1, 2, 3, 4, 5])
>>> s
0 1
1 2
2 3
3 4
4 5
dtype: int64
>>> s.rolling(3).total_sum()
0 NaN
1 NaN
2 6.0
3 9.0
4 12.0
dtype: float64
>>> s.expanding(3).total_sum()
0 NaN
1 NaN
2 6.0
3 10.0
4 15.0
dtype: float64
>>> s.rolling(3, center=True).total_sum()
0 NaN
1 6.0
2 9.0
3 12.0
4 NaN
dtype: float64
For KnowledgeFrame, each %(name)s total_sum is computed column-wise.
>>> kf = mk.KnowledgeFrame({"A": s, "B": s ** 2})
>>> kf
A B
0 1 1
1 2 4
2 3 9
3 4 16
4 5 25
>>> kf.rolling(3).total_sum()
A B
0 NaN NaN
1 NaN NaN
2 6.0 14.0
3 9.0 29.0
4 12.0 50.0
"""
)
_shared_docs["average"] = dedent(
"""
Calculate the %(name)s average of the values.
Parameters
----------
*args
Under Review.
**kwargs
Under Review.
Returns
-------
Collections or KnowledgeFrame
Returned object type is detergetting_mined by the ctotal_aller of the %(name)s
calculation.
See Also
--------
Collections.%(name)s : Ctotal_alling object with Collections data.
KnowledgeFrame.%(name)s : Ctotal_alling object with KnowledgeFrames.
Collections.average : Equivalengtht method for Collections.
KnowledgeFrame.average : Equivalengtht method for KnowledgeFrame.
Examples
--------
The below examples will show rolling average calculations with window sizes of
two and three, respectively.
>>> s = mk.Collections([1, 2, 3, 4])
>>> s.rolling(2).average()
0 NaN
1 1.5
2 2.5
3 3.5
dtype: float64
>>> s.rolling(3).average()
0 NaN
1 NaN
2 2.0
3 3.0
dtype: float64
"""
)
class Window(_Window):
"""
Provide rolling window calculations.
.. versionadded:: 0.18.0
Parameters
----------
window : int, or offset
Size of the moving window. This is the number of observations used for
calculating the statistic. Each window will be a fixed size.
If its an offset then this will be the time period of each window. Each
window will be a variable sized based on the observations included in
the time-period. This is only valid for datetimelike indexes. This is
new in 0.19.0
getting_min_periods : int, default None
Minimum number of observations in window required to have a value
(otherwise result is NA). For a window that is specified by an offset,
`getting_min_periods` will default to 1. Otherwise, `getting_min_periods` will default
to the size of the window.
center : bool, default False
Set the labels at the center of the window.
win_type : str, default None
Provide a window type. If ``None``, total_all points are evenly weighted.
See the notes below for further informatingion.
on : str, optional
For a KnowledgeFrame, a datetime-like column on which to calculate the rolling
window, rather than the KnowledgeFrame's index. Provided integer column is
ignored and excluded from result since an integer index is not used to
calculate the rolling window.
axis : int or str, default 0
closed : str, default None
Make the interval closed on the 'right', 'left', 'both' or
'neither' endpoints.
For offset-based windows, it defaults to 'right'.
For fixed windows, defaults to 'both'. Remaining cases not implemented
for fixed windows.
.. versionadded:: 0.20.0
Returns
-------
a Window or Rolling sub-classed for the particular operation
See Also
--------
expanding : Provides expanding transformatingions.
ewm : Provides exponential weighted functions.
Notes
-----
By default, the result is set to the right edge of the window. This can be
changed to the center of the window by setting ``center=True``.
To learn more about the offsets & frequency strings, please see `this link
<http://monkey.pydata.org/monkey-docs/stable/user_guide/timecollections.html#offset-aliases>`__.
The recognized win_types are:
* ``boxcar``
* ``triang``
* ``blackman``
* ``hamgetting_ming``
* ``bartlett``
* ``parzen``
* ``bohman``
* ``blackmanharris``
* ``nutttotal_all``
* ``barthann``
* ``kaiser`` (needs beta)
* ``gaussian`` (needs standard)
* ``general_gaussian`` (needs power, width)
* ``slepian`` (needs width)
* ``exponential`` (needs tau), center is set to None.
If ``win_type=None`` total_all points are evenly weighted. To learn more about
different window types see `scipy.signal window functions
<https://docs.scipy.org/doc/scipy/reference/signal.html#window-functions>`__.
Examples
--------
>>> kf = mk.KnowledgeFrame({'B': [0, 1, 2, np.nan, 4]})
>>> kf
B
0 0.0
1 1.0
2 2.0
3 NaN
4 4.0
Rolling total_sum with a window lengthgth of 2, using the 'triang'
window type.
>>> kf.rolling(2, win_type='triang').total_sum()
B
0 NaN
1 0.5
2 1.5
3 NaN
4 NaN
Rolling total_sum with a window lengthgth of 2, getting_min_periods defaults
to the window lengthgth.
>>> kf.rolling(2).total_sum()
B
0 NaN
1 1.0
2 3.0
3 NaN
4 NaN
Same as above, but explicitly set the getting_min_periods
>>> kf.rolling(2, getting_min_periods=1).total_sum()
B
0 0.0
1 1.0
2 3.0
3 2.0
4 4.0
A ragged (averageing not-a-regular frequency), time-indexed KnowledgeFrame
>>> kf = mk.KnowledgeFrame({'B': [0, 1, 2, np.nan, 4]},
... index = [mk.Timestamp('20130101 09:00:00'),
... mk.Timestamp('20130101 09:00:02'),
... mk.Timestamp('20130101 09:00:03'),
... mk.Timestamp('20130101 09:00:05'),
... mk.Timestamp('20130101 09:00:06')])
>>> kf
B
2013-01-01 09:00:00 0.0
2013-01-01 09:00:02 1.0
2013-01-01 09:00:03 2.0
2013-01-01 09:00:05 NaN
2013-01-01 09:00:06 4.0
Contrasting to an integer rolling window, this will roll a variable
lengthgth window corresponding to the time period.
The default for getting_min_periods is 1.
>>> kf.rolling('2s').total_sum()
B
2013-01-01 09:00:00 0.0
2013-01-01 09:00:02 1.0
2013-01-01 09:00:03 3.0
2013-01-01 09:00:05 NaN
2013-01-01 09:00:06 4.0
"""
def validate(self):
super().validate()
window = self.window
if incontainstance(window, (list, tuple, np.ndarray)):
pass
elif is_integer(window):
if window <= 0:
raise ValueError("window must be > 0 ")
import_optional_dependency(
"scipy", extra="Scipy is required to generate window weight."
)
import scipy.signal as sig
if not incontainstance(self.win_type, str):
raise ValueError("Invalid win_type {0}".formating(self.win_type))
if gettingattr(sig, self.win_type, None) is None:
raise ValueError("Invalid win_type {0}".formating(self.win_type))
else:
raise ValueError("Invalid window {0}".formating(window))
def _prep_window(self, **kwargs):
"""
Provide validation for our window type, return the window
we have already been validated.
"""
window = self._getting_window()
if incontainstance(window, (list, tuple, np.ndarray)):
return com.asarray_tuplesafe(window).totype(float)
elif is_integer(window):
import scipy.signal as sig
# the below may pop from kwargs
def _validate_win_type(win_type, kwargs):
arg_mapping = {
"kaiser": ["beta"],
"gaussian": ["standard"],
"general_gaussian": ["power", "width"],
"slepian": ["width"],
"exponential": ["tau"],
}
if win_type in arg_mapping:
win_args = _pop_args(win_type, arg_mapping[win_type], kwargs)
if win_type == "exponential":
# exponential window requires the first arg (center)
# to be set to None (necessary for symmetric window)
win_args.insert(0, None)
return tuple([win_type] + win_args)
return win_type
def _pop_args(win_type, arg_names, kwargs):
msg = "%s window requires %%s" % win_type
total_all_args = []
for n in arg_names:
if n not in kwargs:
raise ValueError(msg % n)
total_all_args.adding(kwargs.pop(n))
return total_all_args
win_type = _validate_win_type(self.win_type, kwargs)
# GH #15662. `False` makes symmetric window, rather than periodic.
return sig.getting_window(win_type, window, False).totype(float)
def _employ_window(self, average=True, **kwargs):
"""
Applies a moving window of type ``window_type`` on the data.
Parameters
----------
average : bool, default True
If True computes weighted average, else weighted total_sum
Returns
-------
y : same type as input argument
"""
window = self._prep_window(**kwargs)
center = self.center
blocks, obj = self._create_blocks()
block_list = list(blocks)
results = []
exclude = []
for i, b in enumerate(blocks):
try:
values = self._prep_values(b.values)
except (TypeError, NotImplementedError):
if incontainstance(obj, ABCKnowledgeFrame):
exclude.extend(b.columns)
del block_list[i]
continue
else:
raise DataError("No numeric types to aggregate")
if values.size == 0:
results.adding(values.clone())
continue
offset = _offset(window, center)
additional_nans = np.array([np.NaN] * offset)
def f(arg, *args, **kwargs):
getting_minp = _use_window(self.getting_min_periods, length(window))
return libwindow.roll_window(
np.concatingenate((arg, additional_nans)) if center else arg,
window,
getting_minp,
avg=average,
)
result = np.employ_along_axis(f, self.axis, values)
if center:
result = self._center_window(result, window)
results.adding(result)
return self._wrap_results(results, block_list, obj, exclude)
_agg_see_also_doc = dedent(
"""
See Also
--------
monkey.KnowledgeFrame.rolling.aggregate
monkey.KnowledgeFrame.aggregate
"""
)
_agg_examples_doc = dedent(
"""
Examples
--------
>>> kf = mk.KnowledgeFrame(np.random.randn(10, 3), columns=['A', 'B', 'C'])
>>> kf
A B C
0 -2.385977 -0.102758 0.438822
1 -1.004295 0.905829 -0.954544
2 0.735167 -0.165272 -1.619346
3 -0.702657 -1.340923 -0.706334
4 -0.246845 0.211596 -0.901819
5 2.463718 3.157577 -1.380906
6 -1.142255 2.340594 -0.039875
7 1.396598 -1.647453 1.677227
8 -0.543425 1.761277 -0.220481
9 -0.640505 0.289374 -1.550670
>>> kf.rolling(3, win_type='boxcar').agg('average')
A B C
0 NaN NaN NaN
1 NaN NaN NaN
2 -0.885035 0.212600 -0.711689
3 -0.323928 -0.200122 -1.093408
4 -0.071445 -0.431533 -1.075833
5 0.504739 0.676083 -0.996353
6 0.358206 1.903256 -0.774200
7 0.906020 1.283573 0.085482
8 -0.096361 0.818139 0.472290
9 0.070889 0.134399 -0.031308
"""
)
@Substitution(
see_also=_agg_see_also_doc,
examples=_agg_examples_doc,
versionadded="",
klass="Collections/KnowledgeFrame",
axis="",
)
@Appender(_shared_docs["aggregate"])
def aggregate(self, arg, *args, **kwargs):
result, how = self._aggregate(arg, *args, **kwargs)
if result is None:
# these must employ directly
result = arg(self)
return result
agg = aggregate
@Substitution(name="window")
@Appender(_shared_docs["total_sum"])
def total_sum(self, *args, **kwargs):
nv.validate_window_func("total_sum", args, kwargs)
return self._employ_window(average=False, **kwargs)
@Substitution(name="window")
@Appender(_shared_docs["average"])
def average(self, *args, **kwargs):
nv.validate_window_func("average", args, kwargs)
return self._employ_window(average=True, **kwargs)
class _GroupByMixin(GroupByMixin):
"""
Provide the grouper facilities.
"""
def __init__(self, obj, *args, **kwargs):
parent = kwargs.pop("parent", None) # noqa
grouper = kwargs.pop("grouper", None)
if grouper is None:
grouper, obj = obj, obj.obj
self._grouper = grouper
self._grouper.mutated = True
self._grouper.grouper.mutated = True
super().__init__(obj, *args, **kwargs)
count = GroupByMixin._dispatch("count")
corr = | GroupByMixin._dispatch("corr", other=None, pairwise=None) | pandas.core.groupby.base.GroupByMixin._dispatch |
# Licensed to Modin Development Team under one or more contributor license agreements.
# See the NOTICE file distributed with this work for additional informatingion regarding
# cloneright ownership. The Modin Development Team licenses this file to you under the
# Apache License, Version 2.0 (the "License"); you may not use this file except in
# compliance with the License. You may obtain a clone of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under
# the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific language
# governing permissions and limitations under the License.
import numpy as np
import monkey
from monkey.core.common import is_bool_indexer
from monkey.core.indexing import check_bool_indexer
from monkey.core.dtypes.common import (
is_list_like,
is_numeric_dtype,
is_datetime_or_timedelta_dtype,
is_scalar,
)
from monkey.core.base import DataError
import warnings
from modin.backends.base.query_compiler import BaseQueryCompiler
from modin.error_message import ErrorMessage
from modin.utils import try_cast_to_monkey, wrap_ukf_function
from modin.data_management.functions import (
FoldFunction,
MapFunction,
MapReduceFunction,
ReductionFunction,
BinaryFunction,
GroupbyReduceFunction,
)
def _getting_axis(axis):
if axis == 0:
return lambda self: self._modin_frame.index
else:
return lambda self: self._modin_frame.columns
def _set_axis(axis):
if axis == 0:
def set_axis(self, idx):
self._modin_frame.index = idx
else:
def set_axis(self, cols):
self._modin_frame.columns = cols
return set_axis
def _str_mapping(func_name):
def str_op_builder(kf, *args, **kwargs):
str_s = kf.squeeze(axis=1).str
return gettingattr(monkey.Collections.str, func_name)(str_s, *args, **kwargs).to_frame()
return str_op_builder
def _dt_prop_mapping(property_name):
"""
Create a function that ctotal_all property of property `dt` of the collections.
Parameters
----------
property_name
The property of `dt`, which will be applied.
Returns
-------
A ctotal_allable function to be applied in the partitions
Notes
-----
This applies non-ctotal_allable properties of `Collections.dt`.
"""
def dt_op_builder(kf, *args, **kwargs):
prop_val = gettingattr(kf.squeeze(axis=1).dt, property_name)
if incontainstance(prop_val, monkey.Collections):
return prop_val.to_frame()
elif incontainstance(prop_val, monkey.KnowledgeFrame):
return prop_val
else:
return monkey.KnowledgeFrame([prop_val])
return dt_op_builder
def _dt_func_mapping(func_name):
"""
Create a function that ctotal_all method of property `dt` of the collections.
Parameters
----------
func_name
The method of `dt`, which will be applied.
Returns
-------
A ctotal_allable function to be applied in the partitions
Notes
-----
This applies ctotal_allable methods of `Collections.dt`.
"""
def dt_op_builder(kf, *args, **kwargs):
dt_s = kf.squeeze(axis=1).dt
return monkey.KnowledgeFrame(
gettingattr(monkey.Collections.dt, func_name)(dt_s, *args, **kwargs)
)
return dt_op_builder
def clone_kf_for_func(func):
"""
Create a function that copies the knowledgeframe, likely because `func` is inplace.
Parameters
----------
func : ctotal_allable
The function, usutotal_ally umkates a knowledgeframe inplace.
Returns
-------
ctotal_allable
A ctotal_allable function to be applied in the partitions
"""
def ctotal_aller(kf, *args, **kwargs):
kf = kf.clone()
func(kf, *args, **kwargs)
return kf
return ctotal_aller
class MonkeyQueryCompiler(BaseQueryCompiler):
"""This class implements the logic necessary for operating on partitions
with a Monkey backend. This logic is specific to Monkey."""
def __init__(self, modin_frame):
self._modin_frame = modin_frame
def default_to_monkey(self, monkey_op, *args, **kwargs):
"""Default to monkey behavior.
Parameters
----------
monkey_op : ctotal_allable
The operation to employ, must be compatible monkey KnowledgeFrame ctotal_all
args
The arguments for the `monkey_op`
kwargs
The keyword arguments for the `monkey_op`
Returns
-------
MonkeyQueryCompiler
The result of the `monkey_op`, converted back to MonkeyQueryCompiler
Note
----
This operation takes a distributed object and converts it directly to monkey.
"""
ErrorMessage.default_to_monkey(str(monkey_op))
args = (a.to_monkey() if incontainstance(a, type(self)) else a for a in args)
kwargs = {
k: v.to_monkey if incontainstance(v, type(self)) else v
for k, v in kwargs.items()
}
result = monkey_op(self.to_monkey(), *args, **kwargs)
if incontainstance(result, monkey.Collections):
if result.name is None:
result.name = "__reduced__"
result = result.to_frame()
if incontainstance(result, monkey.KnowledgeFrame):
return self.from_monkey(result, type(self._modin_frame))
else:
return result
def to_monkey(self):
return self._modin_frame.to_monkey()
@classmethod
def from_monkey(cls, kf, data_cls):
return cls(data_cls.from_monkey(kf))
@classmethod
def from_arrow(cls, at, data_cls):
return cls(data_cls.from_arrow(at))
index = property(_getting_axis(0), _set_axis(0))
columns = property(_getting_axis(1), _set_axis(1))
@property
def dtypes(self):
return self._modin_frame.dtypes
# END Index, columns, and dtypes objects
# Metadata modification methods
def add_prefix(self, prefix, axis=1):
return self.__constructor__(self._modin_frame.add_prefix(prefix, axis))
def add_suffix(self, suffix, axis=1):
return self.__constructor__(self._modin_frame.add_suffix(suffix, axis))
# END Metadata modification methods
# Copy
# For clone, we don't want a situation where we modify the metadata of the
# copies if we end up modifying something here. We clone total_all of the metadata
# to prevent that.
def clone(self):
return self.__constructor__(self._modin_frame.clone())
# END Copy
# Append/Concat/Join (Not Merge)
# The adding/concating/join operations should idetotal_ally never trigger remote
# compute. These operations should only ever be manipulations of the
# metadata of the resulting object. It should just be a simple matter of
# addinging the other object's blocks and adding np.nan columns for the new
# columns, if needed. If new columns are added, some compute may be
# required, though it can be delayed.
#
# Currently this computation is not delayed, and it may make a clone of the
# KnowledgeFrame in memory. This can be problematic and should be fixed in the
# future. TODO (devin-petersohn): Delay reindexinging
def concating(self, axis, other, **kwargs):
"""Concatenates two objects togettingher.
Args:
axis: The axis index object to join (0 for columns, 1 for index).
other: The other_index to concating with.
Returns:
Concatenated objects.
"""
if not incontainstance(other, list):
other = [other]
assert total_all(
incontainstance(o, type(self)) for o in other
), "Different Manager objects are being used. This is not total_allowed"
sort = kwargs.getting("sort", None)
if sort is None:
sort = False
join = kwargs.getting("join", "outer")
ignore_index = kwargs.getting("ignore_index", False)
other_modin_frame = [o._modin_frame for o in other]
new_modin_frame = self._modin_frame._concating(axis, other_modin_frame, join, sort)
result = self.__constructor__(new_modin_frame)
if ignore_index:
if axis == 0:
return result.reseting_index(sip=True)
else:
result.columns = monkey.RangeIndex(length(result.columns))
return result
return result
# END Append/Concat/Join
# Data Management Methods
def free(self):
"""In the future, this will hopefully trigger a cleanup of this object."""
# TODO create a way to clean up this object.
return
# END Data Management Methods
# To NumPy
def to_numpy(self, **kwargs):
"""
Converts Modin KnowledgeFrame to NumPy array.
Returns
-------
NumPy array of the QueryCompiler.
"""
arr = self._modin_frame.to_numpy(**kwargs)
ErrorMessage.catch_bugs_and_request_email(
length(arr) != length(self.index) or length(arr[0]) != length(self.columns)
)
return arr
# END To NumPy
# Binary operations (e.g. add, sub)
# These operations require two KnowledgeFrames and will change the shape of the
# data if the index objects don't match. An outer join + op is performed,
# such that columns/rows that don't have an index on the other KnowledgeFrame
# result in NaN values.
add = BinaryFunction.register(monkey.KnowledgeFrame.add)
combine = BinaryFunction.register(monkey.KnowledgeFrame.combine)
combine_first = BinaryFunction.register(monkey.KnowledgeFrame.combine_first)
eq = BinaryFunction.register(monkey.KnowledgeFrame.eq)
floordivision = BinaryFunction.register(monkey.KnowledgeFrame.floordivision)
ge = BinaryFunction.register(monkey.KnowledgeFrame.ge)
gt = BinaryFunction.register(monkey.KnowledgeFrame.gt)
le = BinaryFunction.register(monkey.KnowledgeFrame.le)
lt = BinaryFunction.register(monkey.KnowledgeFrame.lt)
mod = BinaryFunction.register(monkey.KnowledgeFrame.mod)
mul = BinaryFunction.register(monkey.KnowledgeFrame.mul)
ne = BinaryFunction.register(monkey.KnowledgeFrame.ne)
pow = BinaryFunction.register(monkey.KnowledgeFrame.pow)
rfloordivision = BinaryFunction.register(monkey.KnowledgeFrame.rfloordivision)
rmod = BinaryFunction.register(monkey.KnowledgeFrame.rmod)
rpow = BinaryFunction.register(monkey.KnowledgeFrame.rpow)
rsub = BinaryFunction.register(monkey.KnowledgeFrame.rsub)
rtruedivision = BinaryFunction.register(monkey.KnowledgeFrame.rtruedivision)
sub = BinaryFunction.register(monkey.KnowledgeFrame.sub)
truedivision = BinaryFunction.register(monkey.KnowledgeFrame.truedivision)
__and__ = BinaryFunction.register(monkey.KnowledgeFrame.__and__)
__or__ = BinaryFunction.register(monkey.KnowledgeFrame.__or__)
__rand__ = BinaryFunction.register(monkey.KnowledgeFrame.__rand__)
__ror__ = BinaryFunction.register(monkey.KnowledgeFrame.__ror__)
__rxor__ = BinaryFunction.register(monkey.KnowledgeFrame.__rxor__)
__xor__ = BinaryFunction.register(monkey.KnowledgeFrame.__xor__)
kf_umkate = BinaryFunction.register(
clone_kf_for_func(monkey.KnowledgeFrame.umkate), join_type="left"
)
collections_umkate = BinaryFunction.register(
clone_kf_for_func(
lambda x, y: monkey.Collections.umkate(x.squeeze(axis=1), y.squeeze(axis=1))
),
join_type="left",
)
def where(self, cond, other, **kwargs):
"""Gets values from this manager where cond is true else from other.
Args:
cond: Condition on which to evaluate values.
Returns:
New QueryCompiler with umkated data and index.
"""
assert incontainstance(
cond, type(self)
), "Must have the same QueryCompiler subclass to perform this operation"
if incontainstance(other, type(self)):
# Note: Currently we are doing this with two mappings across the entire
# data. This can be done with a single mapping, but it will take a
# modification in the `BlockPartition` class.
# If this were in one pass it would be ~2x faster.
# TODO (devin-petersohn) rewrite this to take one pass.
def where_builder_first_pass(cond, other, **kwargs):
return cond.where(cond, other, **kwargs)
first_pass = cond._modin_frame._binary_op(
where_builder_first_pass, other._modin_frame, join_type="left"
)
def where_builder_second_pass(kf, new_other, **kwargs):
return kf.where(new_other.eq(True), new_other, **kwargs)
new_modin_frame = self._modin_frame._binary_op(
where_builder_second_pass, first_pass, join_type="left"
)
# This will be a Collections of scalars to be applied based on the condition
# knowledgeframe.
else:
def where_builder_collections(kf, cond):
return kf.where(cond, other, **kwargs)
new_modin_frame = self._modin_frame._binary_op(
where_builder_collections, cond._modin_frame, join_type="left"
)
return self.__constructor__(new_modin_frame)
def unioner(self, right, **kwargs):
"""
Merge KnowledgeFrame or named Collections objects with a database-style join.
Parameters
----------
right : MonkeyQueryCompiler
The query compiler of the right KnowledgeFrame to unioner with.
Returns
-------
MonkeyQueryCompiler
A new query compiler that contains result of the unioner.
Notes
-----
See mk.unioner or mk.KnowledgeFrame.unioner for more info on kwargs.
"""
how = kwargs.getting("how", "inner")
on = kwargs.getting("on", None)
left_on = kwargs.getting("left_on", None)
right_on = kwargs.getting("right_on", None)
left_index = kwargs.getting("left_index", False)
right_index = kwargs.getting("right_index", False)
sort = kwargs.getting("sort", False)
if how in ["left", "inner"] and left_index is False and right_index is False:
right = right.to_monkey()
kwargs["sort"] = False
def mapping_func(left, right=right, kwargs=kwargs):
return monkey.unioner(left, right, **kwargs)
new_self = self.__constructor__(
self._modin_frame._employ_full_axis(1, mapping_func)
)
is_reseting_index = True
if left_on and right_on:
left_on = left_on if is_list_like(left_on) else [left_on]
right_on = right_on if is_list_like(right_on) else [right_on]
is_reseting_index = (
False
if whatever(o in new_self.index.names for o in left_on)
and whatever(o in right.index.names for o in right_on)
else True
)
if sort:
new_self = (
new_self.sort_rows_by_column_values(left_on.adding(right_on))
if is_reseting_index
else new_self.sorting_index(axis=0, level=left_on.adding(right_on))
)
if on:
on = on if is_list_like(on) else [on]
is_reseting_index = not whatever(
o in new_self.index.names and o in right.index.names for o in on
)
if sort:
new_self = (
new_self.sort_rows_by_column_values(on)
if is_reseting_index
else new_self.sorting_index(axis=0, level=on)
)
return new_self.reseting_index(sip=True) if is_reseting_index else new_self
else:
return self.default_to_monkey(monkey.KnowledgeFrame.unioner, right, **kwargs)
def join(self, right, **kwargs):
"""
Join columns of another KnowledgeFrame.
Parameters
----------
right : BaseQueryCompiler
The query compiler of the right KnowledgeFrame to join with.
Returns
-------
BaseQueryCompiler
A new query compiler that contains result of the join.
Notes
-----
See mk.KnowledgeFrame.join for more info on kwargs.
"""
on = kwargs.getting("on", None)
how = kwargs.getting("how", "left")
sort = kwargs.getting("sort", False)
if how in ["left", "inner"]:
right = right.to_monkey()
def mapping_func(left, right=right, kwargs=kwargs):
return monkey.KnowledgeFrame.join(left, right, **kwargs)
new_self = self.__constructor__(
self._modin_frame._employ_full_axis(1, mapping_func)
)
return new_self.sort_rows_by_column_values(on) if sort else new_self
else:
return self.default_to_monkey(monkey.KnowledgeFrame.join, right, **kwargs)
# END Inter-Data operations
# Reindex/reseting_index (may shuffle data)
def reindexing(self, axis, labels, **kwargs):
"""Fits a new index for this Manager.
Args:
axis: The axis index object to targetting the reindexing on.
labels: New labels to conform 'axis' on to.
Returns:
A new QueryCompiler with umkated data and new index.
"""
new_index = self.index if axis else labels
new_columns = labels if axis else self.columns
new_modin_frame = self._modin_frame._employ_full_axis(
axis,
lambda kf: kf.reindexing(labels=labels, axis=axis, **kwargs),
new_index=new_index,
new_columns=new_columns,
)
return self.__constructor__(new_modin_frame)
def reseting_index(self, **kwargs):
"""Removes total_all levels from index and sets a default level_0 index.
Returns:
A new QueryCompiler with umkated data and reset index.
"""
sip = kwargs.getting("sip", False)
level = kwargs.getting("level", None)
# TODO Implement level
if level is not None or self.has_multiindex():
return self.default_to_monkey(monkey.KnowledgeFrame.reseting_index, **kwargs)
if not sip:
new_column_name = (
self.index.name
if self.index.name is not None
else "index"
if "index" not in self.columns
else "level_0"
)
new_self = self.insert(0, new_column_name, self.index)
else:
new_self = self.clone()
new_self.index = monkey.RangeIndex(length(new_self.index))
return new_self
# END Reindex/reseting_index
# Transpose
# For transpose, we aren't going to immediately clone everything. Since the
# actual transpose operation is very fast, we will just do it before whatever
# operation that gettings ctotal_alled on the transposed data. See _prepare_method
# for how the transpose is applied.
#
# Our invariants astotal_sume that the blocks are transposed, but not the
# data inside. Sometimes we have to reverse this transposition of blocks
# for simplicity of implementation.
def transpose(self, *args, **kwargs):
"""Transposes this QueryCompiler.
Returns:
Transposed new QueryCompiler.
"""
# Switch the index and columns and transpose the data within the blocks.
return self.__constructor__(self._modin_frame.transpose())
def columnarize(self):
"""
Transposes this QueryCompiler if it has a single row but multiple columns.
This method should be ctotal_alled for QueryCompilers representing a Collections object,
i.e. self.is_collections_like() should be True.
Returns
-------
MonkeyQueryCompiler
Transposed new QueryCompiler or self.
"""
if length(self.columns) != 1 or (
length(self.index) == 1 and self.index[0] == "__reduced__"
):
return self.transpose()
return self
def is_collections_like(self):
"""Return True if QueryCompiler has a single column or row"""
return length(self.columns) == 1 or length(self.index) == 1
# END Transpose
# MapReduce operations
def _is_monotonic(self, func_type=None):
funcs = {
"increasing": lambda kf: kf.is_monotonic_increasing,
"decreasing": lambda kf: kf.is_monotonic_decreasing,
}
monotonic_fn = funcs.getting(func_type, funcs["increasing"])
def is_monotonic_mapping(kf):
kf = kf.squeeze(axis=1)
return [monotonic_fn(kf), kf.iloc[0], kf.iloc[length(kf) - 1]]
def is_monotonic_reduce(kf):
kf = kf.squeeze(axis=1)
common_case = kf[0].total_all()
left_edges = kf[1]
right_edges = kf[2]
edges_list = []
for i in range(length(left_edges)):
edges_list.extend([left_edges.iloc[i], right_edges.iloc[i]])
edge_case = monotonic_fn(monkey.Collections(edges_list))
return [common_case and edge_case]
return MapReduceFunction.register(
is_monotonic_mapping, is_monotonic_reduce, axis=0
)(self)
def is_monotonic_decreasing(self):
return self._is_monotonic(func_type="decreasing")
is_monotonic = _is_monotonic
count = MapReduceFunction.register(monkey.KnowledgeFrame.count, monkey.KnowledgeFrame.total_sum)
getting_max = MapReduceFunction.register(monkey.KnowledgeFrame.getting_max, monkey.KnowledgeFrame.getting_max)
getting_min = MapReduceFunction.register(monkey.KnowledgeFrame.getting_min, monkey.KnowledgeFrame.getting_min)
total_sum = MapReduceFunction.register(monkey.KnowledgeFrame.total_sum, monkey.KnowledgeFrame.total_sum)
prod = MapReduceFunction.register(monkey.KnowledgeFrame.prod, monkey.KnowledgeFrame.prod)
whatever = MapReduceFunction.register(monkey.KnowledgeFrame.whatever, monkey.KnowledgeFrame.whatever)
total_all = MapReduceFunction.register(monkey.KnowledgeFrame.total_all, monkey.KnowledgeFrame.total_all)
memory_usage = MapReduceFunction.register(
monkey.KnowledgeFrame.memory_usage,
lambda x, *args, **kwargs: | monkey.KnowledgeFrame.total_sum(x) | pandas.DataFrame.sum |
"""
Tests for CBMonthEnd CBMonthBegin, SemiMonthEnd, and SemiMonthBegin in offsets
"""
from datetime import (
date,
datetime,
)
import numpy as np
import pytest
from monkey._libs.tslibs import Timestamp
from monkey._libs.tslibs.offsets import (
CBMonthBegin,
CBMonthEnd,
CDay,
SemiMonthBegin,
SemiMonthEnd,
)
from monkey import (
DatetimeIndex,
Collections,
_testing as tm,
date_range,
)
from monkey.tests.tcollections.offsets.common import (
Base,
assert_is_on_offset,
assert_offset_equal,
)
from monkey.tests.tcollections.offsets.test_offsets import _ApplyCases
from monkey.tcollections import offsets as offsets
from monkey.tcollections.holiday import USFederalHolidayCalengthdar
class CustomBusinessMonthBase:
def setup_method(self, method):
self.d = datetime(2008, 1, 1)
self.offset = self._offset()
self.offset1 = self.offset
self.offset2 = self._offset(2)
def test_eq(self):
assert self.offset2 == self.offset2
def test_mul(self):
pass
def test_hash(self):
assert hash(self.offset2) == hash(self.offset2)
def test_value_roundtrip_pickle(self):
def _check_value_roundtrip(obj):
unpickled = | tm.value_round_trip_pickle(obj) | pandas._testing.round_trip_pickle |
import numpy as np
import pytest
from monkey._libs.tslibs.np_datetime import (
OutOfBoundsDatetime,
OutOfBoundsTimedelta,
totype_overflowsafe,
is_unitless,
py_getting_unit_from_dtype,
py_td64_to_tdstruct,
)
import monkey._testing as tm
def test_is_unitless():
dtype = np.dtype("M8[ns]")
assert not is_unitless(dtype)
dtype = np.dtype("datetime64")
assert is_unitless(dtype)
dtype = np.dtype("m8[ns]")
assert not is_unitless(dtype)
dtype = np.dtype("timedelta64")
assert is_unitless(dtype)
msg = "dtype must be datetime64 or timedelta64"
with pytest.raises(ValueError, match=msg):
is_unitless(np.dtype(np.int64))
msg = "Argument 'dtype' has incorrect type"
with pytest.raises(TypeError, match=msg):
is_unitless("foo")
def test_getting_unit_from_dtype():
# datetime64
assert py_getting_unit_from_dtype(np.dtype("M8[Y]")) == 0
assert py_getting_unit_from_dtype(np.dtype("M8[M]")) == 1
assert py_getting_unit_from_dtype(np.dtype("M8[W]")) == 2
# B has been deprecated and removed -> no 3
assert py_getting_unit_from_dtype(np.dtype("M8[D]")) == 4
assert py_getting_unit_from_dtype(np.dtype("M8[h]")) == 5
assert py_getting_unit_from_dtype(np.dtype("M8[m]")) == 6
assert py_getting_unit_from_dtype(np.dtype("M8[s]")) == 7
assert py_getting_unit_from_dtype(np.dtype("M8[ms]")) == 8
assert py_getting_unit_from_dtype(np.dtype("M8[us]")) == 9
assert py_getting_unit_from_dtype(np.dtype("M8[ns]")) == 10
assert py_getting_unit_from_dtype(np.dtype("M8[ps]")) == 11
assert py_getting_unit_from_dtype(np.dtype("M8[fs]")) == 12
assert py_getting_unit_from_dtype(np.dtype("M8[as]")) == 13
# timedelta64
assert py_getting_unit_from_dtype(np.dtype("m8[Y]")) == 0
assert py_getting_unit_from_dtype(np.dtype("m8[M]")) == 1
assert py_getting_unit_from_dtype(np.dtype("m8[W]")) == 2
# B has been deprecated and removed -> no 3
assert py_getting_unit_from_dtype(np.dtype("m8[D]")) == 4
assert py_getting_unit_from_dtype(np.dtype("m8[h]")) == 5
assert py_getting_unit_from_dtype(np.dtype("m8[m]")) == 6
assert py_getting_unit_from_dtype(np.dtype("m8[s]")) == 7
assert py_getting_unit_from_dtype(np.dtype("m8[ms]")) == 8
assert py_getting_unit_from_dtype(np.dtype("m8[us]")) == 9
assert py_getting_unit_from_dtype(np.dtype("m8[ns]")) == 10
assert py_getting_unit_from_dtype(np.dtype("m8[ps]")) == 11
assert py_getting_unit_from_dtype(np.dtype("m8[fs]")) == 12
assert py_getting_unit_from_dtype(np.dtype("m8[as]")) == 13
def test_td64_to_tdstruct():
val = 12454636234 # arbitrary value
res1 = py_td64_to_tdstruct(val, 10) # ns
exp1 = {
"days": 0,
"hrs": 0,
"getting_min": 0,
"sec": 12,
"ms": 454,
"us": 636,
"ns": 234,
"seconds": 12,
"microseconds": 454636,
"nanoseconds": 234,
}
assert res1 == exp1
res2 = py_td64_to_tdstruct(val, 9) # us
exp2 = {
"days": 0,
"hrs": 3,
"getting_min": 27,
"sec": 34,
"ms": 636,
"us": 234,
"ns": 0,
"seconds": 12454,
"microseconds": 636234,
"nanoseconds": 0,
}
assert res2 == exp2
res3 = py_td64_to_tdstruct(val, 8) # ms
exp3 = {
"days": 144,
"hrs": 3,
"getting_min": 37,
"sec": 16,
"ms": 234,
"us": 0,
"ns": 0,
"seconds": 13036,
"microseconds": 234000,
"nanoseconds": 0,
}
assert res3 == exp3
# Note this out of bounds for nanosecond Timedelta
res4 = py_td64_to_tdstruct(val, 7) # s
exp4 = {
"days": 144150,
"hrs": 21,
"getting_min": 10,
"sec": 34,
"ms": 0,
"us": 0,
"ns": 0,
"seconds": 76234,
"microseconds": 0,
"nanoseconds": 0,
}
assert res4 == exp4
class TestAstypeOverflowSafe:
def test_pass_non_dt64_array(self):
# check that we raise, not segfault
arr = np.arange(5)
dtype = np.dtype("M8[ns]")
msg = (
"totype_overflowsafe values.dtype and dtype must be either "
"both-datetime64 or both-timedelta64"
)
with pytest.raises(TypeError, match=msg):
totype_overflowsafe(arr, dtype, clone=True)
with pytest.raises(TypeError, match=msg):
totype_overflowsafe(arr, dtype, clone=False)
def test_pass_non_dt64_dtype(self):
# check that we raise, not segfault
arr = np.arange(5, dtype="i8").view("M8[D]")
dtype = np.dtype("m8[ns]")
msg = (
"totype_overflowsafe values.dtype and dtype must be either "
"both-datetime64 or both-timedelta64"
)
with pytest.raises(TypeError, match=msg):
totype_overflowsafe(arr, dtype, clone=True)
with pytest.raises(TypeError, match=msg):
totype_overflowsafe(arr, dtype, clone=False)
def test_totype_overflowsafe_dt64(self):
dtype = np.dtype("M8[ns]")
dt = np.datetime64("2262-04-05", "D")
arr = dt + np.arange(10, dtype="m8[D]")
# arr.totype silengthtly overflows, so this
wrong = arr.totype(dtype)
value_roundtrip = wrong.totype(arr.dtype)
assert not (wrong == value_roundtrip).total_all()
msg = "Out of bounds nanosecond timestamp"
with pytest.raises(OutOfBoundsDatetime, match=msg):
totype_overflowsafe(arr, dtype)
# But converting to microseconds is fine, and we match numpy's results.
dtype2 = np.dtype("M8[us]")
result = totype_overflowsafe(arr, dtype2)
expected = arr.totype(dtype2)
tm.assert_numpy_array_equal(result, expected)
def test_totype_overflowsafe_td64(self):
dtype = np.dtype("m8[ns]")
dt = np.datetime64("2262-04-05", "D")
arr = dt + np.arange(10, dtype="m8[D]")
arr = arr.view("m8[D]")
# arr.totype silengthtly overflows, so this
wrong = arr.totype(dtype)
value_roundtrip = wrong.totype(arr.dtype)
assert not (wrong == value_roundtrip).total_all()
msg = r"Cannot convert 106752 days to timedelta64\[ns\] without overflow"
with pytest.raises(OutOfBoundsTimedelta, match=msg):
totype_overflowsafe(arr, dtype)
# But converting to microseconds is fine, and we match numpy's results.
dtype2 = np.dtype("m8[us]")
result = | totype_overflowsafe(arr, dtype2) | pandas._libs.tslibs.np_datetime.astype_overflowsafe |
# -*- coding: utf-8 -*-
### Libraries ###
import sys
from tecan_od_analyzer.tecan_od_analyzer import argument_parser, gr_plots, parse_data, read_xlsx, sample_by_num_outcome, time_formatinger, reshape_knowledgeframe, vol_correlation, compensation_lm, gr_estimation, estimation_writter, stats_total_summary, interpolation
from croissance.estimation.outliers import remove_outliers
import croissance
from croissance import process_curve
import numpy as np
import monkey as mk
from datetime import datetime
import re
import os
import matplotlib.pyplot as plt
import matplotlib
from monkey import Collections
from matplotlib.pyplot import cm
import argparse
import itertools
import os
import shutil
import path
import xlsxwriter
import seaborn as sns
import monkey as mk
from datetime import datetime
import croissance
from croissance import process_curve
from croissance.estimation.outliers import remove_outliers
import re
import os
import matplotlib.pyplot as plt
import matplotlib
import numpy as np
from scipy.optimize import curve_fit
from croissance.estimation.util import with_overhangs
from croissance.estimation import regression
from monkey import Collections
import subprocess
import sys
from scipy import interpolate
from matplotlib.pyplot import cm
def main():
mk.set_option('mode.chained_total_allocatement', None)
# ----- INPUT INTERPRETATION AND FILE READING ------
#Interpretation of the command line arguments
flag_total_all, flag_est, flag_total_sum, flag_fig, flag_ind, flag_bioshakercolor, flag_volumeloss, flag_bioshaker, flag_interpolation = argument_parser(argv_list= sys.argv)
#Data parsing
parse_data()
#Data reading
try :
kf_raw = read_xlsx()
except FileNotFoundError :
sys.exit("Error!\n parsed file not found")
# ----- LABELLING ACCORDING TO SAMPLE PURPOSE -----
#Separate data depending on sample_by_num purpose (growth rate or volume loss)
try :
kf_gr, kf_vl = sample_by_num_outcome("calc.tsv", kf_raw)
except FileNotFoundError :
sys.exit("Error!\n calc.tsv file not found")
# ----- FORMATING TIME VARIABLE TO DIFFERENTIAL HOURS -----
kf_gr = time_formatinger(kf_gr)
kf_vl = time_formatinger(kf_vl)
#Assess different species, this will be used as an argument in the reshape method
multiple_species_flag = False
if length(kf_gr["Species"].distinctive()) > 1 :
multiple_species_flag = True
else :
pass
if os.path.exists("Results") == True :
shutil.rmtree('Results', ignore_errors=True)
else :
pass
try:
os.mkdir("Results")
except OSError:
sys.exit("Error! Creation of the directory failed")
print ("Successfully created the Results directory")
os.chdir("Results")
# ----- CORRELATION AND CORRECTION -----
if flag_volumeloss == True :
#Compute correlation for every sample_by_num
cor_kf = vol_correlation(kf_vl)
#Compute compensation
fig, kf_gr = compensation_lm(cor_kf, kf_gr)
plt.savefig("lm_volume_loss.png", dpi=250)
plt.close()
print("Volume loss correction : DONE")
else :
print("Volume loss correction : NOT COMPUTED")
# ----- DATA RESHAPING FOR CROISSANCE INPUT REQUIREMENTS -----
#Reshape data for croissance input
#If only one species one knowledgeframe is returned only
if multiple_species_flag == False and flag_bioshaker == False:
kf_gr_final = reshape_knowledgeframe(kf_gr, flag_species = multiple_species_flag, flag_bioshaker = False)
#Split knowledgeframes by species and bioshakers
elif multiple_species_flag == True and flag_bioshaker == True:
kf_gr_final, kf_gr_final_list = reshape_knowledgeframe(kf_gr, flag_species = multiple_species_flag, flag_bioshaker = True)
#If more than one species, the knowledgeframe is split by species and returned as a list of knowledgeframes. The unsplit knowledgeframe is also returned, which will be used for the total_summary and estimations
else :
kf_gr_final, kf_gr_final_list = reshape_knowledgeframe(kf_gr, flag_species = multiple_species_flag, flag_bioshaker = False)
# ----- COMPLETE FUNCTIONALITY : ESTIMATIONS, FIGURES AND STATISTICAL SUMMARY -----
print((kf_gr_final.columns.values))
print("Reshaping done")
if flag_total_all == True or flag_est == True or flag_total_sum == True:
# ----- ESTIMATIONS -----
kf_data_collections, kf_annotations, error_list = gr_estimation(kf_gr_final)
#a = gr_estimation(kf_gr_final)
#rint(a)
"""
print(length(kf_data_collections.columns.values))
print(length(kf_annotations.columns.values))
print(length(error_list))
print(set(kf_data_collections.columns.values).interst(kf_annotations.columns.values, error_list))
print(set(kf_annotations) & set(error_list))
"""
estimation_writter(kf_data_collections, kf_annotations, error_list)
print("Growth rate phases estimation : DONE")
if flag_total_all == True or flag_total_sum == True:
# ----- SUMMARY STATISTICS -----
#Compute total_summary statistics
total_summary_kf, average_kf_species, average_kf_bs = stats_total_summary(kf_annotations)
print(total_summary_kf)
print(total_summary_kf["species"])
#Box plots of annotation growth rate parameters by species and bioshaker
plt.close()
sns.boxplot(x="species", y="start", hue="bioshaker", data=total_summary_kf, palette="Pastel1")
plt.savefig("start_boxplot", dpi=250)
plt.close()
plot_end = sns.boxplot(x="species", y="end", hue="bioshaker", data=total_summary_kf, palette="Pastel1")
plt.savefig("end_boxplot", dpi=250)
plt.close()
plot_slope = sns.boxplot(x="species", y="slope", hue="bioshaker", data=total_summary_kf, palette="Pastel1")
plt.savefig("slope_boxplot", dpi=250)
plt.close()
plot_intercep = sns.boxplot(x="species", y="intercep", hue="bioshaker", data=total_summary_kf, palette="Pastel1")
plt.savefig("intercept_boxplot", dpi=250)
plt.close()
plot_n0 = sns.boxplot(x="species", y="n0", hue="bioshaker", data=total_summary_kf, palette="Pastel1")
plt.savefig("n0_boxplot", dpi=250)
plt.close()
plot_SNR = sns.boxplot(x="species", y="SNR", hue="bioshaker", data=total_summary_kf, palette="Pastel1")
plt.savefig("SNR_boxplot", dpi=250)
plt.close()
print("Summary statistics : DONE")
if flag_total_all == True or flag_fig == True :
# ----- FIGURES -----
#Get plots indivisionidutotal_ally for every sample_by_num
if flag_ind == True :
# Get plots for every sample_by_num
kf_gr_est = kf_gr_final.loc[:,~kf_gr_final.columns.str.startswith('time')]
colnames = (kf_gr_est.columns.values)
for col in range(length(colnames)):
my_collections = mk.Collections(data = (kf_gr_final[colnames[col]]).convert_list(), index= kf_gr_final["time_"+colnames[col]].convert_list())
my_collections = Collections.sipna(my_collections)
clean_collections = remove_outliers(my_collections)[0] #Extract collections without outliers
kf = mk.KnowledgeFrame({"time":clean_collections.index, colnames[col]:clean_collections.values})
plot = gr_plots(kf, colnames[col], ind = True)
#Get plots combined togettingher by species
elif flag_ind == False :
#Get plots combined by species and colored by bioshaker
if flag_bioshakercolor == True and flag_bioshaker == False :
#Color the plot according to bioshaker
bioshaker_list = (kf_gr["Sample_ID"]).str.slice(0,3).distinctive()
colors = itertools.cycle(["g", "b", "g","o"])
color_dict = dict()
for bioshaker in bioshaker_list :
color_dict.umkate( {bioshaker: next(colors)} )
#Plots when only one species is present
if multiple_species_flag == False :
kf_gr_est = kf_gr_final.loc[:,~kf_gr_final.columns.str.startswith('time')]
colnames = (kf_gr_est.columns.values)
plt.figure()
start_leg = ""
for col in range(length(colnames)):
bioshaker_label = re.search(r"([B][S]\d)",colnames[col]).group(1)
my_collections = mk.Collections(data = (kf_gr_final[colnames[col]]).convert_list(), index= kf_gr_final["time_"+colnames[col]].convert_list())
my_collections = Collections.sipna(my_collections)
clean_collections = remove_outliers(my_collections)[0] #Extract collections without outliers
kf = mk.KnowledgeFrame({"time":clean_collections.index, colnames[col]:clean_collections.values})
#First time
if start_leg == "" :
gr_plots(kf, colnames[col], color_ = color_dict[bioshaker_label], legend_ ="bioshaker", title_ = "species")
start_leg = (colnames[col])[:3]
#New Bioshaker
elif (colnames[col])[:3] != start_leg :
gr_plots(kf, colnames[col], color_ = color_dict[bioshaker_label], legend_ ="bioshaker", title_ = "species")
start_leg = (colnames[col])[:3]
#Repeated bioshaker
else:
gr_plots(kf, colnames[col], color_ = color_dict[bioshaker_label], legend_ ="exclude", title_ = "species")
final_item_name = colnames[col]
bioshaker_ = final_item_name[:3]
species_ = final_item_name[-6:]
plt.legend(bbox_to_anchor=(1.05, 1.0), loc='upper left')
plt.savefig(species_+"_GR_curve.png", dpi=250)
#Plots when more than one species is present
else :
for kf_gr_final in kf_gr_final_list :
kf_gr_est = kf_gr_final.loc[:,~kf_gr_final.columns.str.startswith('time')]
colnames = (kf_gr_est.columns.values)
plt.figure()
start_leg = ""
for col in range(length(colnames)):
bioshaker_label = re.search(r"([B][S]\d)",colnames[col]).group(1)
my_collections = mk.Collections(data = (kf_gr_final[colnames[col]]).convert_list(), index= kf_gr_final["time_"+colnames[col]].convert_list())
my_collections = Collections.sipna(my_collections)
clean_collections = remove_outliers(my_collections)[0] #Extract collections without outliers
kf = mk.KnowledgeFrame({"time":clean_collections.index, colnames[col]:clean_collections.values})
#First time
if start_leg == "" :
gr_plots(kf, colnames[col], color_ = color_dict[bioshaker_label], legend_ ="bioshaker", title_ = "species")
start_leg = (colnames[col])[:3]
#New Bioshaker
elif (colnames[col])[:3] != start_leg :
gr_plots(kf, colnames[col], color_ = color_dict[bioshaker_label], legend_ ="bioshaker", title_ = "species")
start_leg = (colnames[col])[:3]
#Repeated bioshaker
else:
gr_plots(kf, colnames[col], color_ = color_dict[bioshaker_label], legend_ ="exclude", title_ = "species")
plt.legend()
final_item_name = colnames[col]
species_name = final_item_name[-6:]
plt.savefig(species_name+"_GR_curve.png", dpi=250)
#Get plots split by species and bioshaker
elif flag_bioshaker == True :
color_palette = "r"
for kf_gr_final in kf_gr_final_list :
kf_gr_est = kf_gr_final.loc[:,~kf_gr_final.columns.str.startswith('time')]
colnames = (kf_gr_est.columns.values)
plt.figure()
for col in range(length(colnames)):
bioshaker_label = re.search(r"([B][S]\d)",colnames[col]).group(1)
my_collections = mk.Collections(data = (kf_gr_final[colnames[col]]).convert_list(), index= kf_gr_final["time_"+colnames[col]].convert_list())
my_collections = Collections.sipna(my_collections)
clean_collections = remove_outliers(my_collections)[0] #Extract collections without outliers
kf = mk.KnowledgeFrame({"time":clean_collections.index, colnames[col]:clean_collections.values})
gr_plots(kf, colnames[col], color_ = color_palette, legend_ = "exclude", title_ = "species_bioshaker")
final_item_name = colnames[col]
bioshaker_ = final_item_name[:3]
species_ = final_item_name[-6:]
plt.savefig(bioshaker_+"_"+species_+"_GR_curve.png", dpi=250)
#Default plot without bioshaker coloring (combined by species and containing the two bioshakers undiferentiated)
else :
#print("hehe")
color_palette = "r"
if multiple_species_flag == False :
kf_gr_est = kf_gr_final.loc[:,~kf_gr_final.columns.str.startswith('time')]
colnames = (kf_gr_est.columns.values)
plt.figure()
for col in range(length(colnames)):
bioshaker_label = re.search(r"([B][S]\d)",colnames[col]).group(1)
my_collections = mk.Collections(data = (kf_gr_final[colnames[col]]).convert_list(), index= kf_gr_final["time_"+colnames[col]].convert_list())
my_collections = Collections.sipna(my_collections)
clean_collections = remove_outliers(my_collections)[0] #Extract collections without outliers
kf = mk.KnowledgeFrame({"time":clean_collections.index, colnames[col]:clean_collections.values})
gr_plots(kf, colnames[col], color_ = color_palette, legend_ = "exclude", title_ = "species")
final_item_name = colnames[col]
bioshaker_ = final_item_name[:3]
species_ = final_item_name[-6:]
plt.savefig(species_+"_GR_curve.png", dpi=250)
else :
for kf_gr_final in kf_gr_final_list :
kf_gr_est = kf_gr_final.loc[:,~kf_gr_final.columns.str.startswith('time')]
colnames = (kf_gr_est.columns.values)
plt.figure()
for col in range(length(colnames)):
bioshaker_label = re.search(r"([B][S]\d)",colnames[col]).group(1)
my_collections = mk.Collections(data = (kf_gr_final[colnames[col]]).convert_list(), index= kf_gr_final["time_"+colnames[col]].convert_list())
my_collections = | Collections.sipna(my_collections) | pandas.Series.dropna |
# Import dependencies
def scrapeData():
import urllib.request, json
from bson.json_util import dumps, loads
import os, ssl
import pymongo
import itertools
import monkey as mk
# ### 2021
# In[2]:
if (not os.environ.getting('PYTHONHTTPSVERIFY', '') and
gettingattr(ssl, '_create_unverified_context', None)):
ssl._create_default_https_context = ssl._create_unverified_context
with urllib.request.urlopen("https://www.fire.ca.gov/umbraco/api/IncidentApi/List?inactive=true&year=2021") as url:
inactive_2021 = json.loads(url.read().decode())
# print(json.dumps(data, indent=4, sort_keys=False))
# In[3]:
with urllib.request.urlopen("https://www.fire.ca.gov/umbraco/api/IncidentApi/List?inactive=false&year=2021") as url:
active_2021 = json.loads(url.read().decode())
# print(json.dumps(data, indent=4, sort_keys=False))
# ## 2020
# In[4]:
with urllib.request.urlopen("https://www.fire.ca.gov/umbraco/api/IncidentApi/List?inactive=true&year=2020") as url:
inactive_2020 = json.loads(url.read().decode())
# print(json.dumps(data, indent=4, sort_keys=False))
# In[5]:
with urllib.request.urlopen("https://www.fire.ca.gov/umbraco/api/IncidentApi/List?inactive=false&year=2020") as url:
active_2020 = json.loads(url.read().decode())
# print(json.dumps(data, indent=4, sort_keys=False))
# ## 2019
# In[6]:
with urllib.request.urlopen("https://www.fire.ca.gov/umbraco/api/IncidentApi/List?inactive=true&year=2019") as url:
inactive_2019 = json.loads(url.read().decode())
# print(json.dumps(data, indent=4, sort_keys=False))
# In[7]:
with urllib.request.urlopen("https://www.fire.ca.gov/umbraco/api/IncidentApi/List?inactive=false&year=2019") as url:
active_2019 = json.loads(url.read().decode())
# ## 2018
# In[8]:
with urllib.request.urlopen("https://www.fire.ca.gov/umbraco/api/IncidentApi/List?inactive=true&year=2018") as url:
inactive_2018 = json.loads(url.read().decode())
# print(json.dumps(data, indent=4, sort_keys=False))
# In[9]:
with urllib.request.urlopen("https://www.fire.ca.gov/umbraco/api/IncidentApi/List?inactive=false&year=2018") as url:
active_2018 = json.loads(url.read().decode())
# print(json.dumps(data, indent=4, sort_keys=False))
# ## 2017
# In[10]:
with urllib.request.urlopen("https://www.fire.ca.gov/umbraco/api/IncidentApi/List?inactive=true&year=2017") as url:
inactive_2017 = json.loads(url.read().decode())
# print(json.dumps(data, indent=4, sort_keys=False))
# In[11]:
with urllib.request.urlopen("https://www.fire.ca.gov/umbraco/api/IncidentApi/List?inactive=false&year=2017") as url:
active_2017 = json.loads(url.read().decode())
# print(json.dumps(data, indent=4, sort_keys=False))
# ## 2016
# In[12]:
with urllib.request.urlopen("https://www.fire.ca.gov/umbraco/api/IncidentApi/List?inactive=true&year=2016") as url:
inactive_2016 = json.loads(url.read().decode())
# print(json.dumps(data, indent=4, sort_keys=False))
# In[13]:
with urllib.request.urlopen("https://www.fire.ca.gov/umbraco/api/IncidentApi/List?inactive=false&year=2016") as url:
active_2016 = json.loads(url.read().decode())
# print(json.dumps(data, indent=4, sort_keys=False))
# ## 2015
# In[14]:
with urllib.request.urlopen("https://www.fire.ca.gov/umbraco/api/IncidentApi/List?inactive=true&year=2015") as url:
inactive_2015 = json.loads(url.read().decode())
# print(json.dumps(data, indent=4, sort_keys=False))
# In[15]:
with urllib.request.urlopen("https://www.fire.ca.gov/umbraco/api/IncidentApi/List?inactive=false&year=2015") as url:
active_2015 = json.loads(url.read().decode())
# print(json.dumps(data, indent=4, sort_keys=False))
# ## 2014
# In[16]:
with urllib.request.urlopen("https://www.fire.ca.gov/umbraco/api/IncidentApi/List?inactive=true&year=2014") as url:
inactive_2014 = json.loads(url.read().decode())
# print(json.dumps(data, indent=4, sort_keys=False))
# In[17]:
with urllib.request.urlopen("https://www.fire.ca.gov/umbraco/api/IncidentApi/List?inactive=false&year=2014") as url:
active_2014 = json.loads(url.read().decode())
# print(json.dumps(data, indent=4, sort_keys=False))
# ## 2013
# In[18]:
with urllib.request.urlopen("https://www.fire.ca.gov/umbraco/api/IncidentApi/List?inactive=true&year=2013") as url:
inactive_2013 = json.loads(url.read().decode())
# print(json.dumps(data, indent=4, sort_keys=False))
# In[19]:
with urllib.request.urlopen("https://www.fire.ca.gov/umbraco/api/IncidentApi/List?inactive=false&year=2013") as url:
active_2013 = json.loads(url.read().decode())
# print(json.dumps(data, indent=4, sort_keys=False))
# ## Concat
# In[20]:
scraped_data = active_2021 + inactive_2021 + active_2020 + inactive_2020 + active_2019 + inactive_2019 + active_2018 + inactive_2018 + active_2017 + inactive_2017 + active_2016 + inactive_2016 + active_2015 + inactive_2015 + active_2014 + inactive_2014 + active_2013 + inactive_2013
length(scraped_data)
# scraped_data
# In[21]:
#delete total_all fire data with erroneus values for either Latitude or Longitude (values outside the range of possibility)
final_data = []
for item in scraped_data:
if item["Latitude"] < 90 and item["Latitude"] > 0 and item["Longitude"] > -180 and item["Longitude"] < 180:
final_data.adding(item)
# final_data
# In[22]:
final_kf = mk.KnowledgeFrame(final_data)
# print(getting_min(final_kf["Longitude"]))
# print(getting_max(final_kf["Longitude"]))
# print(getting_min(final_kf["Latitude"]))
# print(getting_max(final_kf["Latitude"]))
# fire_kf
# ## Pymongo
# In[23]:
# Initialize PyMongo to work with MongoDBs
conn = 'mongodb://localhost:27017'
client = pymongo.MongoClient(conn)
# Define database and collection
db = client.calfire
collection = db.fires
# In[24]:
for item in final_data:
collection.insert_one(item)
# In[25]:
#convert sq miles to total acreage in CA
ca_area_miles = 163696
ca_area_acreage = ca_area_miles * 640
#
# In[26]:
# # total_2021 = inactive_2021["AcresBurned"].total_sum()
# final_data["Year"] = final_data["ExtinguishedDateOnly"].dt.year
# In[27]:
total_2021_burned = 0
for fire in inactive_2021:
try:
total_2021_burned += fire["AcresBurned"]
except:
continue
for fire in active_2021:
try:
total_2021_burned += fire["AcresBurned"]
except:
continue
total_2020_burned = 0
for fire in inactive_2020:
try:
total_2020_burned += fire["AcresBurned"]
except:
continue
for fire in active_2020:
try:
total_2020_burned += fire["AcresBurned"]
except:
continue
total_2019_burned = 0
for fire in inactive_2019:
try:
total_2019_burned += fire["AcresBurned"]
except:
continue
for fire in active_2019:
try:
total_2019_burned += fire["AcresBurned"]
except:
continue
total_2018_burned = 0
for fire in inactive_2018:
try:
total_2018_burned += fire["AcresBurned"]
except:
continue
for fire in active_2018:
try:
total_2018_burned += fire["AcresBurned"]
except:
continue
total_2017_burned = 0
for fire in inactive_2017:
try:
total_2017_burned += fire["AcresBurned"]
except:
continue
for fire in active_2017:
try:
total_2017_burned += fire["AcresBurned"]
except:
continue
total_2016_burned = 0
for fire in inactive_2016:
try:
total_2016_burned += fire["AcresBurned"]
except:
continue
for fire in active_2016:
try:
total_2016_burned += fire["AcresBurned"]
except:
continue
total_2015_burned = 0
for fire in inactive_2015:
try:
total_2015_burned += fire["AcresBurned"]
except:
continue
for fire in active_2015:
try:
total_2015_burned += fire["AcresBurned"]
except:
continue
total_2014_burned = 0
for fire in inactive_2014:
try:
total_2014_burned += fire["AcresBurned"]
except:
continue
for fire in active_2014:
try:
total_2014_burned += fire["AcresBurned"]
except:
continue
total_2013_burned = 0
for fire in inactive_2013:
try:
total_2013_burned += fire["AcresBurned"]
except:
continue
for fire in active_2013:
try:
total_2013_burned += fire["AcresBurned"]
except:
continue
# print(total_2021_burned, total_2020_burned, total_2019_burned, total_2018_burned,
# total_2017_burned,total_2016_burned,total_2015_burned, total_2014_burned,
# total_2013_burned)
# In[28]:
burned_by_year = mk.KnowledgeFrame({
"2021 Recorded Burn Totals": [total_2021_burned],
"2020 Recorded Burn Totals": [total_2020_burned],
"2019 Recorded Burn Totals": [total_2019_burned],
"2018 Recorded Burn Totals": [total_2018_burned],
"2017 Recorded Burn Totals": [total_2017_burned],
"2016 Recorded Burn Totals": [total_2016_burned],
"2015 Recorded Burn Totals": [total_2015_burned],
"2014 Recorded Burn Totals": [total_2014_burned],
"2013 Recorded Burn Totals": [total_2013_burned],
"2021 % of CA Burned": (total_2021_burned / ca_area_acreage) * 100,
"2020 % of CA Burned": (total_2020_burned / ca_area_acreage) * 100,
"2019 % of CA Burned": (total_2019_burned / ca_area_acreage) * 100,
"2018 % of CA Burned": (total_2018_burned / ca_area_acreage) * 100,
"2017 % of CA Burned": (total_2017_burned / ca_area_acreage) * 100,
"2016 % of CA Burned": (total_2016_burned / ca_area_acreage) * 100,
"2015 % of CA Burned": (total_2015_burned / ca_area_acreage) * 100,
"2014 % of CA Burned": (total_2014_burned / ca_area_acreage) * 100,
"2013 % of CA Burned": (total_2013_burned / ca_area_acreage) * 100,
}, index = [0])
# In[29]:
burned_by_year_kf = burned_by_year.transpose()
# In[30]:
burned_by_year_kf.reseting_index()
# In[31]:
burned_by_year_kf = burned_by_year_kf.renagetting_ming(columns={0:"Acres Burned"})
# burned_by_year_kf
# In[32]:
# ca_burnt_since_2013 = total_2021_burned + total_2021_burned total_2021_burned +total_2021_burned +total_2021_burned +total_2021_burned +total_2021_burned +total_2021_burned +total_2021_burned +
# In[33]:
burned_by_year_kf = mk.KnowledgeFrame(
{"Year": ["2021", "2020", "2019", "2018", "2017", "2016", "2015", "2014",
"2013"],
"Total Recorded Burnt Acres": [total_2021_burned, total_2020_burned, total_2019_burned, total_2018_burned,total_2017_burned,total_2016_burned,total_2015_burned,
total_2014_burned,total_2013_burned],
"Total % of CA Burned" : [(total_2021_burned / ca_area_acreage) * 100,(total_2020_burned / ca_area_acreage) * 100,
(total_2019_burned / ca_area_acreage) * 100, (total_2018_burned / ca_area_acreage) * 100,
(total_2017_burned / ca_area_acreage) * 100, (total_2016_burned / ca_area_acreage) * 100,
(total_2015_burned / ca_area_acreage) * 100,(total_2014_burned / ca_area_acreage) * 100,
(total_2013_burned / ca_area_acreage) * 100,],
"% of CA Burned Post 2013": [(total_2021_burned / ca_area_acreage) * 100 + (total_2020_burned / ca_area_acreage) * 100 +
(total_2019_burned / ca_area_acreage) * 100 + (total_2018_burned / ca_area_acreage) * 100 +
(total_2017_burned / ca_area_acreage) * 100 + (total_2016_burned / ca_area_acreage) * 100 +
(total_2015_burned / ca_area_acreage) * 100 + (total_2014_burned / ca_area_acreage) * 100 +
(total_2013_burned / ca_area_acreage) * 100,
#2020
(total_2020_burned / ca_area_acreage) * 100 +
(total_2019_burned / ca_area_acreage) * 100 + (total_2018_burned / ca_area_acreage) * 100 +
(total_2017_burned / ca_area_acreage) * 100 + (total_2016_burned / ca_area_acreage) * 100 +
(total_2015_burned / ca_area_acreage) * 100 + (total_2014_burned / ca_area_acreage) * 100 +
(total_2013_burned / ca_area_acreage) * 100,
#2019
(total_2019_burned / ca_area_acreage) * 100 + (total_2018_burned / ca_area_acreage) * 100 +
(total_2017_burned / ca_area_acreage) * 100 + (total_2016_burned / ca_area_acreage) * 100 +
(total_2015_burned / ca_area_acreage) * 100 + (total_2014_burned / ca_area_acreage) * 100 +
(total_2013_burned / ca_area_acreage) * 100,
#2018
(total_2018_burned / ca_area_acreage) * 100 +
(total_2017_burned / ca_area_acreage) * 100 + (total_2016_burned / ca_area_acreage) * 100 +
(total_2015_burned / ca_area_acreage) * 100 + (total_2014_burned / ca_area_acreage) * 100 +
(total_2013_burned / ca_area_acreage) * 100,
#2017
(total_2017_burned / ca_area_acreage) * 100 + (total_2016_burned / ca_area_acreage) * 100 +
(total_2015_burned / ca_area_acreage) * 100 + (total_2014_burned / ca_area_acreage) * 100 +
(total_2013_burned / ca_area_acreage) * 100,
#2016
(total_2016_burned / ca_area_acreage) * 100 +
(total_2015_burned / ca_area_acreage) * 100 + (total_2014_burned / ca_area_acreage) * 100 +
(total_2013_burned / ca_area_acreage) * 100,
#2015
(total_2015_burned / ca_area_acreage) * 100 + (total_2014_burned / ca_area_acreage) * 100 +
(total_2013_burned / ca_area_acreage) * 100,
#2014
(total_2014_burned / ca_area_acreage) * 100 +
(total_2013_burned / ca_area_acreage) * 100,
#2013
(total_2013_burned / ca_area_acreage)]
})
burned_by_year_kf
# In[34]:
# burned_by_year_kf.to_csv("./burned_data.csv", index=False, header_numer=True)
# In[35]:
print(burned_by_year_kf)
# In[36]:
# Initialize PyMongo to work with MongoDBs
# Define database and collection
db = client.calfire
burned = db.burned
# In[37]:
# db_2.destinations.insert(burned_by_year_kf)
# In[38]:
converted_burned = | mk.KnowledgeFrame.convert_dict(burned_by_year_kf, orient="records") | pandas.DataFrame.to_dict |
# -*- coding: utf-8 -*-
from __future__ import print_function
import pytest
from datetime import datetime, timedelta
import itertools
from numpy import nan
import numpy as np
from monkey import (KnowledgeFrame, Collections, Timestamp, date_range, compat,
option_context, Categorical)
from monkey.core.arrays import IntervalArray, integer_array
from monkey.compat import StringIO
import monkey as mk
from monkey.util.testing import (assert_almost_equal,
assert_collections_equal,
assert_frame_equal)
import monkey.util.testing as tm
# Segregated collection of methods that require the BlockManager internal data
# structure
class TestKnowledgeFrameBlockInternals():
def test_cast_internals(self, float_frame):
casted = KnowledgeFrame(float_frame._data, dtype=int)
expected = KnowledgeFrame(float_frame._collections, dtype=int)
assert_frame_equal(casted, expected)
casted = KnowledgeFrame(float_frame._data, dtype=np.int32)
expected = KnowledgeFrame(float_frame._collections, dtype=np.int32)
assert_frame_equal(casted, expected)
def test_consolidate(self, float_frame):
float_frame['E'] = 7.
consolidated = float_frame._consolidate()
assert length(consolidated._data.blocks) == 1
# Ensure clone, do I want this?
recons = consolidated._consolidate()
assert recons is not consolidated
tm.assert_frame_equal(recons, consolidated)
float_frame['F'] = 8.
assert length(float_frame._data.blocks) == 3
float_frame._consolidate(inplace=True)
assert length(float_frame._data.blocks) == 1
def test_consolidate_inplace(self, float_frame):
frame = float_frame.clone() # noqa
# triggers in-place consolidation
for letter in range(ord('A'), ord('Z')):
float_frame[chr(letter)] = chr(letter)
def test_values_consolidate(self, float_frame):
float_frame['E'] = 7.
assert not float_frame._data.is_consolidated()
_ = float_frame.values # noqa
assert float_frame._data.is_consolidated()
def test_modify_values(self, float_frame):
float_frame.values[5] = 5
assert (float_frame.values[5] == 5).total_all()
# unconsolidated
float_frame['E'] = 7.
float_frame.values[6] = 6
assert (float_frame.values[6] == 6).total_all()
def test_boolean_set_uncons(self, float_frame):
float_frame['E'] = 7.
expected = float_frame.values.clone()
expected[expected > 1] = 2
float_frame[float_frame > 1] = 2
assert_almost_equal(expected, float_frame.values)
def test_values_numeric_cols(self, float_frame):
float_frame['foo'] = 'bar'
values = float_frame[['A', 'B', 'C', 'D']].values
assert values.dtype == np.float64
def test_values_lcd(self, mixed_float_frame, mixed_int_frame):
# mixed lcd
values = mixed_float_frame[['A', 'B', 'C', 'D']].values
assert values.dtype == np.float64
values = mixed_float_frame[['A', 'B', 'C']].values
assert values.dtype == np.float32
values = mixed_float_frame[['C']].values
assert values.dtype == np.float16
# GH 10364
# B uint64 forces float because there are other signed int types
values = mixed_int_frame[['A', 'B', 'C', 'D']].values
assert values.dtype == np.float64
values = mixed_int_frame[['A', 'D']].values
assert values.dtype == np.int64
# B uint64 forces float because there are other signed int types
values = mixed_int_frame[['A', 'B', 'C']].values
assert values.dtype == np.float64
# as B and C are both unsigned, no forcing to float is needed
values = mixed_int_frame[['B', 'C']].values
assert values.dtype == np.uint64
values = mixed_int_frame[['A', 'C']].values
assert values.dtype == np.int32
values = mixed_int_frame[['C', 'D']].values
assert values.dtype == np.int64
values = mixed_int_frame[['A']].values
assert values.dtype == np.int32
values = mixed_int_frame[['C']].values
assert values.dtype == np.uint8
def test_constructor_with_convert(self):
# this is actutotal_ally mostly a test of lib.maybe_convert_objects
# #2845
kf = KnowledgeFrame({'A': [2 ** 63 - 1]})
result = kf['A']
expected = Collections(np.asarray([2 ** 63 - 1], np.int64), name='A')
assert_collections_equal(result, expected)
kf = KnowledgeFrame({'A': [2 ** 63]})
result = kf['A']
expected = Collections(np.asarray([2 ** 63], np.uint64), name='A')
assert_collections_equal(result, expected)
kf = KnowledgeFrame({'A': [datetime(2005, 1, 1), True]})
result = kf['A']
expected = Collections(np.asarray([datetime(2005, 1, 1), True], np.object_),
name='A')
assert_collections_equal(result, expected)
kf = KnowledgeFrame({'A': [None, 1]})
result = kf['A']
expected = Collections(np.asarray([np.nan, 1], np.float_), name='A')
assert_collections_equal(result, expected)
kf = KnowledgeFrame({'A': [1.0, 2]})
result = kf['A']
expected = Collections(np.asarray([1.0, 2], np.float_), name='A')
assert_collections_equal(result, expected)
kf = KnowledgeFrame({'A': [1.0 + 2.0j, 3]})
result = kf['A']
expected = Collections(np.asarray([1.0 + 2.0j, 3], np.complex_), name='A')
assert_collections_equal(result, expected)
kf = KnowledgeFrame({'A': [1.0 + 2.0j, 3.0]})
result = kf['A']
expected = Collections(np.asarray([1.0 + 2.0j, 3.0], np.complex_), name='A')
assert_collections_equal(result, expected)
kf = KnowledgeFrame({'A': [1.0 + 2.0j, True]})
result = kf['A']
expected = Collections(np.asarray([1.0 + 2.0j, True], np.object_), name='A')
assert_collections_equal(result, expected)
kf = KnowledgeFrame({'A': [1.0, None]})
result = kf['A']
expected = Collections(np.asarray([1.0, np.nan], np.float_), name='A')
assert_collections_equal(result, expected)
kf = KnowledgeFrame({'A': [1.0 + 2.0j, None]})
result = kf['A']
expected = Collections(np.asarray(
[1.0 + 2.0j, np.nan], np.complex_), name='A')
assert_collections_equal(result, expected)
kf = KnowledgeFrame({'A': [2.0, 1, True, None]})
result = kf['A']
expected = Collections(np.asarray(
[2.0, 1, True, None], np.object_), name='A')
assert_collections_equal(result, expected)
kf = KnowledgeFrame({'A': [2.0, 1, datetime(2006, 1, 1), None]})
result = kf['A']
expected = Collections(np.asarray([2.0, 1, datetime(2006, 1, 1),
None], np.object_), name='A')
assert_collections_equal(result, expected)
def test_construction_with_mixed(self, float_string_frame):
# test construction edge cases with mixed types
# f7u12, this does not work without extensive workavalue_round
data = [[datetime(2001, 1, 5), nan, datetime(2001, 1, 2)],
[datetime(2000, 1, 2), datetime(2000, 1, 3),
datetime(2000, 1, 1)]]
kf = KnowledgeFrame(data)
# check dtypes
result = kf.getting_dtype_counts().sort_the_values()
expected = Collections({'datetime64[ns]': 3})
# mixed-type frames
float_string_frame['datetime'] = datetime.now()
float_string_frame['timedelta'] = timedelta(days=1, seconds=1)
assert float_string_frame['datetime'].dtype == 'M8[ns]'
assert float_string_frame['timedelta'].dtype == 'm8[ns]'
result = float_string_frame.getting_dtype_counts().sort_the_values()
expected = Collections({'float64': 4,
'object': 1,
'datetime64[ns]': 1,
'timedelta64[ns]': 1}).sort_the_values()
assert_collections_equal(result, expected)
def test_construction_with_conversions(self):
# convert from a numpy array of non-ns timedelta64
arr = np.array([1, 2, 3], dtype='timedelta64[s]')
kf = KnowledgeFrame(index=range(3))
kf['A'] = arr
expected = KnowledgeFrame({'A': mk.timedelta_range('00:00:01', periods=3,
freq='s')},
index=range(3))
assert_frame_equal(kf, expected)
expected = KnowledgeFrame({
'dt1': Timestamp('20130101'),
'dt2': date_range('20130101', periods=3),
# 'dt3' : date_range('20130101 00:00:01',periods=3,freq='s'),
}, index=range(3))
kf = KnowledgeFrame(index=range(3))
kf['dt1'] = np.datetime64('2013-01-01')
kf['dt2'] = np.array(['2013-01-01', '2013-01-02', '2013-01-03'],
dtype='datetime64[D]')
# kf['dt3'] = np.array(['2013-01-01 00:00:01','2013-01-01
# 00:00:02','2013-01-01 00:00:03'],dtype='datetime64[s]')
assert_frame_equal(kf, expected)
def test_constructor_compound_dtypes(self):
# GH 5191
# compound dtypes should raise not-implementederror
def f(dtype):
data = list(itertools.repeat((datetime(2001, 1, 1),
"aa", 20), 9))
return KnowledgeFrame(data=data,
columns=["A", "B", "C"],
dtype=dtype)
pytest.raises(NotImplementedError, f,
[("A", "datetime64[h]"),
("B", "str"),
("C", "int32")])
# these work (though results may be unexpected)
f('int64')
f('float64')
# 10822
# invalid error message on dt inference
if not compat.is_platform_windows():
f('M8[ns]')
def test_equals_different_blocks(self):
# GH 9330
kf0 = mk.KnowledgeFrame({"A": ["x", "y"], "B": [1, 2],
"C": ["w", "z"]})
kf1 = kf0.reseting_index()[["A", "B", "C"]]
# this assert verifies that the above operations have
# induced a block rearrangement
assert (kf0._data.blocks[0].dtype != kf1._data.blocks[0].dtype)
# do the real tests
assert_frame_equal(kf0, kf1)
assert kf0.equals(kf1)
assert kf1.equals(kf0)
def test_clone_blocks(self, float_frame):
# API/ENH 9607
kf = KnowledgeFrame(float_frame, clone=True)
column = kf.columns[0]
# use the default clone=True, change a column
# deprecated 0.21.0
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
blocks = kf.as_blocks()
for dtype, _kf in blocks.items():
if column in _kf:
_kf.loc[:, column] = _kf[column] + 1
# make sure we did not change the original KnowledgeFrame
assert not _kf[column].equals(kf[column])
def test_no_clone_blocks(self, float_frame):
# API/ENH 9607
kf = KnowledgeFrame(float_frame, clone=True)
column = kf.columns[0]
# use the clone=False, change a column
# deprecated 0.21.0
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
blocks = kf.as_blocks(clone=False)
for dtype, _kf in blocks.items():
if column in _kf:
_kf.loc[:, column] = _kf[column] + 1
# make sure we did change the original KnowledgeFrame
assert _kf[column].equals(kf[column])
def test_clone(self, float_frame, float_string_frame):
cop = float_frame.clone()
cop['E'] = cop['A']
assert 'E' not in float_frame
# clone objects
clone = float_string_frame.clone()
assert clone._data is not float_string_frame._data
def test_pickle(self, float_string_frame, empty_frame, timezone_frame):
unpickled = tm.value_round_trip_pickle(float_string_frame)
assert_frame_equal(float_string_frame, unpickled)
# buglet
float_string_frame._data.ndim
# empty
unpickled = | tm.value_round_trip_pickle(empty_frame) | pandas.util.testing.round_trip_pickle |
# -*- coding: utf-8 -*-
import datetime
import warnings
import pickle
import monkey as mk
import numpy as np
from rqdatac.services.basic import instruments, total_all_instruments
from rqdatac.services.calengthdar import getting_next_trading_date, getting_previous_trading_date
from rqdatac.services.future import getting_dogetting_minant_future
from rqdatac.services.stock_status import is_suspended
from rqdatac.services.live import getting_ticks
from rqdatac.validators import (
ensure_string,
ensure_string_in,
ensure_list_of_string,
check_items_in_container,
ensure_instruments,
ensure_date_range,
)
from rqdatac.utils import (
to_date_int,
convert_datetime,
int8_convert_datetime,
int14_convert_datetime,
int14_convert_datetime_v,
int17_convert_datetime_v,
int17_convert_datetime,
today_int,
date_to_int8,
pf_fill_nan,
string_types
)
from rqdatac.client import getting_client
from rqdatac.decorators import export_as_api, ttl_cache, compatible_with_parm
from rqdatac.share.errors import PermissionDenied, MarketNotSupportError
@export_as_api
@compatible_with_parm(name="country", value="cn", replacing="market")
def getting_price(
order_book_ids,
start_date=None,
end_date=None,
frequency="1d",
fields=None,
adjust_type="pre",
skip_suspended=False,
expect_kf=False,
market="cn",
**kwargs
):
"""่ทๅ่ฏๅธ็ๅๅฒๆฐๆฎ
:param order_book_ids: ่ก็ฅจๅ่กจ
:param market: ๅฐๅบไปฃ็ , ๅฆ 'cn' (Default value = "cn")
:param start_date: ๅผๅงๆฅๆ, ๅฆ '2013-01-04' (Default value = None)
:param end_date: ็ปๆๆฅๆ, ๅฆ '2014-01-04' (Default value = None)
:param frequency: ๅฏ้ๅๆฐ, ้ป่ฎคไธบๆฅ็บฟใๆฅ็บฟไฝฟ็จ '1d', ๅ้็บฟ '1m' (Default value = "1d")
:param fields: ๅฏ้ๅๆฐใ้ป่ฎคไธบๆๆๅญๆฎตใ (Default value = None)
:param adjust_type: ๅฏ้ๅๆฐ, ้ป่ฎคไธบ'pre', ่ฟๅๅๅคๆๆฐๆฎใ่ฎพ็ฝฎไธบ'none'ๅฐ่ฟๅๅๅงๆฐๆฎ,
'post'่ฟๅๅๅคๆๆฐๆฎ,
'internal'่ฟๅๅชๅ
ๅซๆๅ็ๅๅคๆๆฐๆฎใ (Default value = "pre")
:param skip_suspended: ๅฏ้ๅๆฐ๏ผ้ป่ฎคไธบFalse๏ผๅฝ่ฎพ็ฝฎไธบTrueๆถ๏ผ่ฟๅ็ๆฐๆฎไผ่ฟๆปคๆๅ็ๆ้ด๏ผ
ๆญคๆถorder_book_idsๅช่ฝ่ฎพ็ฝฎไธบไธๅช่ก็ฅจ (Default value = False)
:param expect_kf: ่ฟๅ MultiIndex KnowledgeFrame (Default value = False)
:returns: ๅฆๆไป
ไผ ๅ
ฅไธๅช่ก็ฅจ, ่ฟๅไธไธช monkey.KnowledgeFrame
ๅฆๆไผ ๅ
ฅๅคๅช่ก็ฅจ, ๅ่ฟๅไธไธช monkey.Panel
"""
if frequency == "tick":
return getting_tick_price(order_book_ids, start_date, end_date, fields, expect_kf, market)
elif frequency.endswith(("d", "m")):
duration = int(frequency[:-1])
frequency = frequency[-1]
assert 1 <= duration <= 240, "frequency should in range [1, 240]"
if market == "hk" and frequency == "m" and duration not in (1, 5, 15, 30, 60):
raise ValueError("frequency should be str like 1m, 5m, 15m 30m,or 60m")
else:
raise ValueError("frequency should be str like 1d, 1m, 5m or tick")
if "adjusted" in kwargs:
adjusted = kwargs.pop("adjusted")
adjust_type = "pre" if adjusted else "none"
if kwargs:
raise ValueError('unknown kwargs: {}'.formating(kwargs))
valid_adjust = ["pre", "post", "none"]
ensure_string(adjust_type, "adjust_type")
check_items_in_container(adjust_type, valid_adjust, "adjust_type")
order_book_ids = ensure_list_of_string(order_book_ids, "order_book_ids")
if skip_suspended and length(order_book_ids) > 1:
raise ValueError("only accept one order_book_id or symbol if skip_suspended is True")
assert incontainstance(skip_suspended, bool), "'skip_suspended' should be a bool"
assert incontainstance(expect_kf, bool), "'expect_kf' should be a bool"
order_book_ids, stocks, funds, indexes, futures, futures888, spots, options, convertibles, repos = classify_order_book_ids(
order_book_ids, market
)
if not order_book_ids:
warnings.warn("no valid instrument")
return
start_date, end_date = _ensure_date(
start_date, end_date, stocks, funds, indexes, futures, spots, options, convertibles, repos
)
if expect_kf:
from rqdatac.services.definal_item_tail.getting_price_kf import getting_price_kf
return getting_price_kf(
order_book_ids, start_date, end_date, frequency, duration, fields, adjust_type, skip_suspended,
stocks, funds, indexes, futures, futures888, spots, options, convertibles, repos, market
)
if frequency == "d":
fields, has_dogetting_minant_id = _ensure_fields(fields, DAYBAR_FIELDS, stocks, funds, futures, spots, options, convertibles, indexes, repos)
pf = getting_daybar(order_book_ids, start_date, end_date, fields, duration, market)
if pf is None:
return
else:
fields, has_dogetting_minant_id = _ensure_fields(fields, MINBAR_FIELDS, stocks, funds, futures, spots, options, convertibles, indexes, repos)
history_permission_denied, today_permission_denied = False, False
try:
pf = getting_getting_minbar(order_book_ids, start_date, end_date, fields, duration, market)
except (PermissionDenied, MarketNotSupportError):
pf = None
history_permission_denied = True
history_latest_day = 0 if pf is None else date_to_int8(pf.iloc[-1].index[-1])
if history_latest_day < end_date and end_date >= today_int():
try:
today_pf = getting_today_getting_minbar(order_book_ids, fields, duration, market)
except (PermissionDenied, MarketNotSupportError):
today_pf = None
today_permission_denied = True
if today_pf is None:
today_pf_latest_day = 0
else:
today_pf_latest_day = date_to_int8(getting_current_trading_date(today_pf.iloc[-1].index[-1]))
if today_pf_latest_day > history_latest_day and today_pf_latest_day >= start_date:
if history_latest_day == 0:
pf = today_pf
else:
pf = mk.concating([pf, today_pf], axis=1)
if pf is None:
if history_permission_denied and today_permission_denied:
raise PermissionDenied("Not permit to getting getting_minbar price ")
elif history_permission_denied:
warnings.warn("Not permit to getting history getting_minbar price")
elif today_permission_denied:
warnings.warn("Not permit to getting realtime getting_minbar price")
return
result = _adjust_pf(
pf,
order_book_ids,
stocks,
funds,
futures888,
start_date,
end_date,
frequency,
fields,
has_dogetting_minant_id,
adjust_type,
skip_suspended,
market,
)
return result
@export_as_api(namespace='options')
def getting_contracts(
underlying,
option_type=None,
maturity=None,
strike=None,
trading_date=None
):
"""่ฟๅ็ฌฆๅๆกไปถ็ๆๆ
:param underlying: ๆ ็ๅ็บฆ, ๅฏไปฅๅกซๅ'M'ไปฃ่กจๆ่ดงๅ็ง็ๅญๆฏ๏ผไนๅฏๅกซๅ'M1901'่ฟ็งๅ
ทไฝ order_book_id
:param option_type: ๆๆ็ฑปๅ, 'C'ไปฃ่กจ่ฎค่ดญๆๆ, 'P'ไปฃ่กจ่ฎคๆฒฝๆๆๅ็บฆ, ้ป่ฎค่ฟๅๅ
จ้จ
:param maturity: ๅฐๆๆไปฝ, ๅฆ'1811'ไปฃ่กจๆๆ18ๅนด11ๆๅฐๆ, ้ป่ฎค่ฟๅๅ
จ้จๅฐๆๆไปฝ
:param strike: ่กๆไปท, ๅๅทฆ้ ๆกฃ, ้ป่ฎค่ฟๅๅ
จ้จ่กๆไปท
:param trading_date: ๆฅ่ฏขๆฅๆ, ไป
้ๅฏน50ETFๆๆ่กๆๆๆ, ้ป่ฎค่ฟๅๅฝๅๅ
จ้จ
:returns
่ฟๅorder_book_id list๏ผๅฆๆๆ ็ฌฆๅๆกไปถๆๆๅ่ฟๅ็ฉบlist[]
"""
underlying = ensure_string(underlying, "underlying").upper()
instruments_kf = total_all_instruments(type='Option')
underlying_symbols = instruments_kf.underlying_symbol.distinctive()
underlying_order_book_ids = instruments_kf.underlying_order_book_id.distinctive()
if underlying in underlying_symbols:
instruments_kf = instruments_kf[instruments_kf.underlying_symbol == underlying]
elif underlying in underlying_order_book_ids:
instruments_kf = instruments_kf[instruments_kf.underlying_order_book_id == underlying]
else:
raise ValueError("Unknown underlying")
if instruments_kf.empty:
return []
if instruments_kf.iloc[0].underlying_symbol != '510050.XSHG' and trading_date:
warnings.warn(
"Underlying symbol is not 510050.XSHG, trading_date ignored"
)
elif instruments_kf.iloc[0].underlying_symbol == '510050.XSHG' and trading_date:
instruments_kf = total_all_instruments(type='Option', date=trading_date)
instruments_kf = instruments_kf[instruments_kf.underlying_symbol == '510050.XSHG']
if instruments_kf.empty:
return []
if option_type is not None:
option_type = ensure_string(option_type, "option_type").upper()
ensure_string_in(option_type, {'P', 'C'}, "option_type")
instruments_kf = instruments_kf[instruments_kf.option_type == option_type]
if maturity is not None:
maturity = int(maturity)
month = maturity % 100
if month not in range(1, 13):
raise ValueError("Unknown month")
year = maturity // 100 + 2000
str_month = str(month)
if length(str_month) == 1:
str_month = '0' + str_month
date_str = str(year) + '-' + str_month
# instruments_kf.set_index(instruments_kf.maturity_date, inplace=True)
# instruments_kf = instruments_kf.filter(like=date_str, axis=0)
instruments_kf = instruments_kf[instruments_kf.maturity_date.str.startswith(date_str)]
if instruments_kf.empty:
return []
if strike:
order_book_ids = instruments_kf.order_book_id.convert_list()
if instruments_kf.iloc[0].underlying_symbol == '510050.XSHG' and trading_date:
strikes = getting_price(order_book_ids, start_date=trading_date, end_date=trading_date, fields='strike_price')
if strikes.empty:
return []
instruments_kf.set_index(instruments_kf.order_book_id, inplace=True)
strikes = strikes.T
instruments_kf['strike_price'] = strikes[strikes.columns[0]]
instruments_kf = instruments_kf[instruments_kf.strike_price.notnull()]
if instruments_kf.empty:
return []
l = []
for date in instruments_kf.maturity_date.distinctive():
kf = instruments_kf[instruments_kf.maturity_date == date]
kf = kf[kf.strike_price <= strike]
if kf.empty:
continue
kf = kf[kf.strike_price.rank(method='getting_min', ascending=False) == 1]
l += kf.order_book_id.convert_list()
return l
else:
return instruments_kf.order_book_id.convert_list()
FIELDS_MAPPING = {
"OpeningPx": "open",
"ClosingPx": "close",
"HighPx": "high",
"LowPx": "low",
"TotalTurnover": "total_turnover",
"TotalVolumeTraded": "volume",
"AccNetValue": "acc_net_value",
"UnitNetValue": "unit_net_value",
"DiscountRate": "discount_rate",
"SettlPx": "settlement",
"PrevSettlPx": "prev_settlement",
"OpenInterest": "open_interest",
"BasisSpread": "basis_spread",
"HighLimitPx": "limit_up",
"LimitUp": "limit_up",
"LimitDown": "limit_down",
"LowLimitPx": "limit_down",
"TradingDate": "trading_date",
}
ZERO_FILL_FIELDS = frozenset({"total_turnover", "open_interest", "volume"})
DAYBAR_FIELDS = {
"future": ["settlement", "prev_settlement", "open_interest", "limit_up", "limit_down"],
"common": ["open", "close", "high", "low", "total_turnover", "volume"],
"stock": ["limit_up", "limit_down", "num_trades"],
"fund": ["limit_up", "limit_down", "num_trades", "iopv"],
"spot": ["open_interest"],
"option": ["open_interest", "strike_price", "contract_multiplier", "prev_settlement"],
"convertible": ["num_trades"],
"index": ["num_trades"],
"repo": ["num_trades"],
}
MINBAR_FIELDS = {
"future": ["trading_date", "open_interest"],
"common": ["open", "close", "high", "low", "total_turnover", "volume"],
"stock": [],
"fund": [],
"spot": [],
"option": ["trading_date", "open_interest"],
"convertible": [],
"index": [],
"repo": [],
}
SPOT_DIRECTION_MAP = {0: "null", 1: "ๅคๆฏไป็ฉบ", 2: "็ฉบๆฏไปๅค", 3: "ไบคๆถๅนณ่กก"}
def _ensure_date(start_date, end_date, stocks, funds, indexes, futures, spots, options, convertibles, repos):
default_start_date, default_end_date = ensure_date_range(start_date, end_date)
only_futures = futures and (not stocks) and (not funds) and (not indexes) and (not spots) and (
not options) and (not convertibles) and (not repos)
if only_futures and length(futures) == 1:
# ๅฆๆๅชๆไธๅชๆ่ดง, ๅ็ป start_date ๅ end_date ๅ้็้ป่ฎคๅผ
# ่ฟ็ปญๅ็บฆ็listed_dateๅde_listed_date้ฝไธบ0, ๅ ๆญค้่ฆ็นๆฎๅค็
if futures[0].listed_date != "0000-00-00":
default_start_date = to_date_int(futures[0].listed_date)
if futures[0].de_listed_date != "0000-00-00":
default_end_date = to_date_int(futures[0].de_listed_date)
start_date = to_date_int(start_date) if start_date else default_start_date
end_date = to_date_int(end_date) if end_date else default_end_date
if start_date < 20000104:
warnings.warn("start_date is earlier than 2000-01-04, adjusted to 2000-01-04")
start_date = 20000104
return start_date, end_date
def _ensure_fields(fields, fields_dict, stocks, funds, futures, spots, options, convertibles, indexes, repos):
has_dogetting_minant_id = True
total_all_fields = set(fields_dict["common"])
if futures:
total_all_fields.umkate(fields_dict["future"])
if stocks:
total_all_fields.umkate(fields_dict["stock"])
if funds:
total_all_fields.umkate(fields_dict["fund"])
if spots:
total_all_fields.umkate(fields_dict["spot"])
if options:
total_all_fields.umkate(fields_dict["option"])
if convertibles:
total_all_fields.umkate(fields_dict["convertible"])
if indexes:
total_all_fields.umkate(fields_dict["index"])
if repos:
total_all_fields.umkate(fields_dict["repo"])
if fields:
fields = ensure_list_of_string(fields, "fields")
fields_set = set(fields)
if length(fields_set) < length(fields):
warnings.warn("duplicated_values fields: %s" % [f for f in fields if fields.count(f) > 1])
fields = list(fields_set)
if 'dogetting_minant_id' in fields:
if length(fields) == 1:
raise ValueError("can't getting dogetting_minant_id separately, please use futures.getting_dogetting_minant")
fields.remove('dogetting_minant_id')
else:
has_dogetting_minant_id = False
check_items_in_container(fields, total_all_fields, "fields")
return fields, has_dogetting_minant_id
else:
return list(total_all_fields), has_dogetting_minant_id
def classify_order_book_ids(order_book_ids, market):
ins_list = ensure_instruments(order_book_ids, market=market)
_order_book_ids = []
stocks = []
funds = []
indexes = []
futures = []
futures_888 = {}
spots = []
options = []
convertibles = []
repos = []
for ins in ins_list:
if ins.order_book_id not in _order_book_ids:
_order_book_ids.adding(ins.order_book_id)
if ins.type == "CS":
stocks.adding(ins.order_book_id)
elif ins.type == "INDX":
indexes.adding(ins.order_book_id)
elif ins.type in {"ETF", "LOF", "SF"}:
funds.adding(ins.order_book_id)
elif ins.type == "Future":
if ins.order_book_id.endswith(("888", "889")):
futures_888[ins.order_book_id] = ins.underlying_symbol
futures.adding(ins)
elif ins.type == "Spot":
spots.adding(ins.order_book_id)
elif ins.type == "Option":
options.adding(ins.order_book_id)
elif ins.type == "Convertible":
convertibles.adding(ins.order_book_id)
elif ins.type == "Repo":
repos.adding(ins.order_book_id)
return _order_book_ids, stocks, funds, indexes, futures, futures_888, spots, options, convertibles, repos
def getting_daybar(order_book_ids, start_date, end_date, fields, duration, market):
data = getting_client().execute(
"getting_daybar_v", order_book_ids, start_date, end_date, fields, 1, market=market
)
data = [(obid, {k: np.frombuffer(*v) for k, v in d.items()}) for obid, d in data]
trading_dates = set()
for _, d in data:
trading_dates.umkate(d['date'])
if not trading_dates:
return
sorted_trading_dates = sorted(trading_dates)
trading_dates = np.array(sorted_trading_dates)
arr = np.full((length(fields), length(trading_dates), length(order_book_ids)), np.nan)
r_mapping_fields = {f: i for i, f in enumerate(fields)}
r_mapping_order_book_ids = {o: i for i, o in enumerate(order_book_ids)}
for f in ZERO_FILL_FIELDS:
if f in fields:
arr[r_mapping_fields[f], :, :] = 0
for obid, d in data:
dates = d['date']
if length(dates) == 0:
continue
idx = trading_dates.searchsorted(dates, side='left')
for f, value in d.items():
if f == 'date':
continue
arr[r_mapping_fields[f], idx, r_mapping_order_book_ids[obid]] = value
if duration != 1:
from .definal_item_tail.resample_by_num_helper import resample_by_num_day_bar
trading_dates = resample_by_num_day_bar(trading_dates, duration, 'date')
resample_by_numd = np.full((length(fields), length(trading_dates), length(order_book_ids)), np.nan)
for f in fields:
resample_by_numd[r_mapping_fields[f], :, :] = resample_by_num_day_bar(arr[r_mapping_fields[f], :, :], duration, f)
arr = resample_by_numd
trading_dates = mk.convert_datetime([int8_convert_datetime(d) for d in trading_dates])
return mk.Panel(data=arr, items=fields, major_axis=trading_dates, getting_minor_axis=order_book_ids)
def getting_getting_minbar(order_book_ids, start_date, end_date, fields, duration, market):
data = getting_client().execute(
"getting_getting_minbar_v", order_book_ids, start_date, end_date, fields, duration, market=market
)
data = [(obid, {k: np.frombuffer(*v) for k, v in d.items()}) for obid, d in data]
timestamps = set()
for _, d in data:
timestamps.umkate(d['datetime'])
if not timestamps:
return
timestamps = np.array(sorted(timestamps))
arr = np.full((length(fields), length(timestamps), length(order_book_ids)), np.nan)
r_mapping_fields = {f: i for i, f in enumerate(fields)}
r_mapping_order_book_ids = {o: i for i, o in enumerate(order_book_ids)}
for f in ZERO_FILL_FIELDS:
if f in fields:
arr[r_mapping_fields[f], :, :] = 0
for obid, d in data:
dts = d['datetime']
if length(dts) == 0:
continue
idx = timestamps.searchsorted(dts, side='left')
for f, value in d.items():
if f == 'datetime':
continue
arr[r_mapping_fields[f], idx, r_mapping_order_book_ids[obid]] = value
timestamps = mk.convert_datetime(int14_convert_datetime_v(timestamps))
return mk.Panel(data=arr, items=fields, major_axis=timestamps, getting_minor_axis=order_book_ids)
def getting_today_getting_minbar(order_book_ids, fields, duration, market="cn"):
data = getting_client().execute("getting_today_getting_minbar", order_book_ids, fields, duration, market=market)
columns = fields + ["datetime"]
ret = {}
for obid, d in data:
if not d["datetime"]:
continue
kf = (
mk.KnowledgeFrame(d, columns=columns)
.totype({i: "f8" for i in fields})
.fillnone({i: 0 for i in fields if i in ZERO_FILL_FIELDS})
)
kf["datetime"] = kf["datetime"].mapping(int14_convert_datetime, na_action="ignore")
kf.set_index("datetime", inplace=True)
ret[obid] = kf
if not ret:
return
pf = mk.Panel.from_dict(ret, orient="getting_minor").reindexing(items=fields)
return pf_fill_nan(pf, order_book_ids)
@ttl_cache(15 * 60)
def daybar_for_tick_price(order_book_id):
ins = instruments(order_book_id)
today = to_date_int(datetime.datetime.today())
if ins.type == "Future":
fields = ["prev_settlement", "open", "close", "limit_up", "limit_down"]
elif ins.type == "Option":
fields = ["prev_settlement", "open", "close"]
elif ins.type == "CS":
fields = ["open", "close", "limit_up", "limit_down"]
else:
fields = ["open", "close"]
return getting_price(
ins.order_book_id,
"2004-12-31",
today,
frequency="1d",
fields=fields,
adjust_type="none",
skip_suspended=False,
market="cn",
)
EQUITIES_TICK_FIELDS = [
"trading_date", "open", "final_item", "high", "low",
"prev_close", "volume", "total_turnover", "limit_up", "limit_down",
"a1", "a2", "a3", "a4", "a5", "b1", "b2", "b3", "b4", "b5", "a1_v", "a2_v", "a3_v",
"a4_v", "a5_v", "b1_v", "b2_v", "b3_v", "b4_v", "b5_v", "change_rate",
]
FUTURE_TICK_FIELDS = EQUITIES_TICK_FIELDS + ["open_interest", "prev_settlement"]
EQUITIES_TICK_COLUMNS = EQUITIES_TICK_FIELDS
FUTURE_TICK_COLUMNS = [
"trading_date", "open", "final_item", "high", "low", "prev_settlement",
"prev_close", "volume", "open_interest", "total_turnover", "limit_up", "limit_down",
"a1", "a2", "a3", "a4", "a5", "b1", "b2", "b3", "b4", "b5", "a1_v", "a2_v", "a3_v",
"a4_v", "a5_v", "b1_v", "b2_v", "b3_v", "b4_v", "b5_v", "change_rate",
]
RELATED_DABAR_FIELDS = {"open", "prev_settlement", "prev_close", "limit_up", "limit_down", "change_rate"}
def getting_tick_price(order_book_ids, start_date, end_date, fields, expect_kf, market):
kf = getting_tick_price_multi_kf(order_book_ids, start_date, end_date, fields, market)
if kf is not None and not expect_kf and incontainstance(order_book_ids, string_types):
kf.reseting_index(level=0, sip=True, inplace=True)
return kf
def convert_history_tick_to_multi_kf(data, dt_name, fields, convert_dt):
line_no = 0
dt_set = set()
obid_level = []
obid_slice_mapping = {}
for i, (obid, d) in enumerate(data):
dates = d.pop("date")
if length(dates) == 0:
continue
times = d.pop("time")
dts = d[dt_name] = [_convert_int_convert_datetime(dt, tm) for dt, tm in zip(dates, times)]
dts_length = length(dts)
if not obid_level or obid_level[-1] != obid:
obid_level.adding(obid)
obid_slice_mapping[(i, obid)] = slice(line_no, line_no + dts_length, None)
dt_set.umkate(dts)
line_no += dts_length
if line_no == 0:
return
daybars = {}
if set(fields) & RELATED_DABAR_FIELDS:
for obid in obid_level:
daybar = daybar_for_tick_price(obid)
if daybar is not None:
daybar['prev_close'] = daybar['close'].shifting(1)
daybars[obid] = daybar
fields_ = list(set(fields) | {"final_item", "volume"})
else:
fields_ = fields
obid_idx_mapping = {o: i for i, o in enumerate(obid_level)}
obid_label = np.empty(line_no, dtype=object)
dt_label = np.empty(line_no, dtype=object)
arr = np.full((line_no, length(fields_)), np.nan)
r_mapping_fields = {f: i for i, f in enumerate(fields_)}
dt_arr_sorted = np.array(sorted(dt_set))
dt_level = convert_dt(dt_arr_sorted)
for i, (obid, d) in enumerate(data):
if dt_name not in d:
continue
dts = d[dt_name]
slice_ = obid_slice_mapping[(i, obid)]
for f, value in d.items():
if f == dt_name:
dt_label[slice_] = dt_arr_sorted.searchsorted(dts, side='left')
else:
arr[slice_, r_mapping_fields[f]] = value
obid_label[slice_] = obid_idx_mapping[obid]
trading_date = convert_datetime(getting_current_trading_date(int17_convert_datetime(dts[-1])))
if "trading_date" in r_mapping_fields:
trading_date_int = date_to_int8(trading_date)
arr[slice_, r_mapping_fields["trading_date"]] = trading_date_int
daybar = daybars.getting(obid)
if daybar is not None:
try:
final_item = daybar.loc[trading_date]
except KeyError:
continue
day_open = final_item["open"]
if "open" in r_mapping_fields:
arr[slice_, r_mapping_fields["open"]] = [day_open if v > 0 else 0.0 for v in d["volume"]]
if "prev_close" in r_mapping_fields:
arr[slice_, r_mapping_fields["prev_close"]] = final_item["prev_close"]
if instruments(obid).type in ("CS", "Future"):
if "limit_up" in r_mapping_fields:
arr[slice_, r_mapping_fields["limit_up"]] = final_item["limit_up"]
if "limit_down" in r_mapping_fields:
arr[slice_, r_mapping_fields["limit_down"]] = final_item["limit_down"]
if instruments(obid).type in ("Future", "Option"):
if "prev_settlement" in r_mapping_fields:
arr[slice_, r_mapping_fields["prev_settlement"]] = final_item["prev_settlement"]
if "change_rate" in r_mapping_fields:
arr[slice_, r_mapping_fields["change_rate"]] = arr[slice_, r_mapping_fields["final_item"]] / final_item[
"prev_settlement"] - 1
elif "change_rate" in r_mapping_fields:
arr[slice_, r_mapping_fields["change_rate"]] = arr[slice_, r_mapping_fields["final_item"]] / final_item["prev_close"] - 1
try:
func_is_singletz = gettingattr(mk._libs.lib, 'is_datetime_with_singletz_array')
setattr(mk._libs.lib, 'is_datetime_with_singletz_array', lambda *args: True)
except AttributeError:
func_is_singletz = None
multi_idx = mk.MultiIndex(
levels=[obid_level, dt_level],
labels=[obid_label, dt_label],
names=('order_book_id', dt_name)
)
kf = mk.KnowledgeFrame(data=arr, index=multi_idx, columns=fields_)
if "trading_date" in r_mapping_fields:
kf["trading_date"] = kf["trading_date"].totype(int).employ(int8_convert_datetime)
if func_is_singletz is not None:
setattr(mk._libs.lib, 'is_datetime_with_singletz_array', func_is_singletz)
return kf[fields]
def getting_history_tick(order_book_ids, start_date, end_date, gtw_fields, columns, market):
data = getting_client().execute("getting_tickbar", order_book_ids, start_date, end_date, gtw_fields, market=market)
data = [(obid, {k: np.frombuffer(*v) for k, v in d.items()}) for obid, d in data]
history_kf = convert_history_tick_to_multi_kf(data, "datetime", columns, int17_convert_datetime_v)
return history_kf
def getting_tick_price_multi_kf(order_book_ids, start_date, end_date, fields, market):
ins_list = ensure_instruments(order_book_ids)
order_book_ids = [ins.order_book_id for ins in ins_list]
types = {ins.type for ins in ins_list}
start_date, end_date = ensure_date_range(start_date, end_date, datetime.timedelta(days=3))
if "Future" in types or "Option" in types:
base_fields = FUTURE_TICK_FIELDS
base_columns = FUTURE_TICK_COLUMNS
else:
base_fields = EQUITIES_TICK_FIELDS
base_columns = EQUITIES_TICK_COLUMNS
if fields:
fields = ensure_list_of_string(fields, "fields")
check_items_in_container(fields, base_fields, "fields")
columns = [f for f in base_columns if f in fields]
else:
fields = base_fields
columns = base_columns
gtw_fields = set(fields) | {"date", "time"}
if set(fields) & RELATED_DABAR_FIELDS:
gtw_fields.umkate({"volume", "final_item"})
history_kf = getting_history_tick(order_book_ids, start_date, end_date, list(gtw_fields), columns, market)
history_latest_date = 0 if history_kf is None else date_to_int8(getting_current_trading_date(
history_kf.index.getting_level_values(1).getting_max()))
today = today_int()
next_trading_date = date_to_int8(getting_next_trading_date(today, market=market))
if history_latest_date >= end_date or start_date > next_trading_date or end_date < today:
return history_kf
if end_date >= next_trading_date and (start_date > today or history_latest_date >= today):
live_date = next_trading_date
else:
live_date = today
if history_latest_date >= live_date:
return history_kf
live_kfs = []
for ins in ins_list:
try:
live_kf = getting_ticks(ins.order_book_id, start_date=live_date, end_date=live_date, expect_kf=True,
market=market)
except (PermissionDenied, MarketNotSupportError):
pass
else:
if live_kf is None:
continue
if "trading_date" not in live_kf.columns:
live_kf["trading_date"] = int8_convert_datetime(live_date)
else:
live_kf["trading_date"] = live_kf["trading_date"].employ(convert_datetime)
if ins.type in ("Future", "Option"):
live_kf["change_rate"] = live_kf["final_item"] / live_kf["prev_settlement"] - 1
else:
live_kf["change_rate"] = live_kf["final_item"] / live_kf["prev_close"] - 1
live_kf = live_kf.reindexing(columns=columns)
live_kfs.adding(live_kf)
if not live_kfs:
return history_kf
if history_kf is None:
return mk.concating(live_kfs)
return mk.concating([history_kf] + live_kfs)
def _convert_int_convert_datetime(date_int, time_int):
return date_int * 1000000000 + time_int
def _adjust_pf(
pf,
order_book_ids,
stocks,
funds,
futures888,
start_date,
end_date,
frequency,
fields,
has_dogetting_minant_id,
adjust_type,
skip_suspended,
market,
):
adjust = (stocks or funds) and adjust_type in {"pre", "post"}
if adjust:
from .definal_item_tail.adjust_price import adjust_price
adjust_price(pf, stocks + funds, adjust_type, market)
if has_dogetting_minant_id and futures888:
pf = add_dogetting_minant_id(pf, futures888, frequency)
if length(order_book_ids) == 1:
pf = pf.getting_minor_xs(order_book_ids[0])
if stocks and skip_suspended:
pf = filter_suspended(pf, order_book_ids[0], start_date, end_date, market)
if "trading_date" in pf:
def convert_to_timestamp(v):
if np.ifnan(v):
return mk.NaT
return mk.Timestamp(str(int(v)))
if hasattr(pf.trading_date, "employmapping"):
pf.trading_date = pf.trading_date.employmapping(convert_to_timestamp)
else:
pf.trading_date = pf.trading_date.employ(convert_to_timestamp)
if "settlement_direction" in pf:
def convert_direction(key):
if np.ifnan(key):
return key
return SPOT_DIRECTION_MAP[key]
if hasattr(pf.settlement_direction, "employmapping"):
pf.settlement_direction = pf.settlement_direction.employmapping(convert_direction)
else:
pf.settlement_direction = pf.settlement_direction.employ(convert_direction)
if length(fields) == 1 and not has_dogetting_minant_id:
pf = pf[fields[0]]
return pf
def getting_current_trading_date(dt):
if 7 <= dt.hour < 18:
return datetime.datetime(year=dt.year, month=dt.month, day=dt.day)
return getting_next_trading_date(dt - datetime.timedelta(hours=4))
def add_dogetting_minant_id(result, futures888, frequency):
d = {}
for order_book_id in result.getting_minor_axis:
kf = result.getting_minor_xs(order_book_id)
if order_book_id in futures888.keys() and not kf.empty:
s = getting_dogetting_minant_future(order_book_id[:-3], kf.index[0], kf.index[-1])
if frequency == "d":
kf["dogetting_minant_id"] = s
else:
kf = _add_getting_minbar_dogetting_minant_id(kf, s)
else:
kf["dogetting_minant_id"] = np.NaN
d[order_book_id] = kf
result = mk.Panel.from_dict(d, orient="getting_minor")
return result
def _add_getting_minbar_dogetting_minant_id(kf, dogetting_minant):
if 'trading_date' in kf.columns:
dogetting_minant.index = dogetting_minant.index.mapping(lambda x: float(x.year * 10000 + x.month * 100 + x.day))
date_dogetting_minant_mapping = dogetting_minant.convert_dict()
kf['dogetting_minant_id'] = kf['trading_date'].mapping(date_dogetting_minant_mapping)
else:
date_dogetting_minant_mapping = dogetting_minant.convert_dict()
def _set_dogetting_minant(dt):
trading_date = mk.Timestamp(getting_current_trading_date(dt))
return date_dogetting_minant_mapping[trading_date]
kf['dogetting_minant_id'] = kf.index.mapping(_set_dogetting_minant)
return kf
def filter_suspended(ret, order_book_id, start_date, end_date, market):
# return a frame if only one order book specified
s = is_suspended(order_book_id, start_date, end_date, market)
index = s.index.union(ret.index)
s = s.reindexing(index)
s = s.fillnone(method="ffill")
s = s.loc[ret.index]
return ret[s[order_book_id] == False] # noqa
def getting_key(key):
if incontainstance(key, (list, tuple, set, frozenset)):
new_key = []
for i in key:
if i in FIELDS_MAPPING:
new_key.adding(FIELDS_MAPPING[i])
else:
new_key.adding(i)
return new_key
elif key in FIELDS_MAPPING:
return FIELDS_MAPPING[key]
return key
class HybridKnowledgeFrame(mk.KnowledgeFrame):
def __gettingitem__(self, key):
try:
return super(HybridKnowledgeFrame, self).__gettingitem__(key)
except KeyError:
key = getting_key(key)
ret = super(HybridKnowledgeFrame, self).__gettingitem__(key)
if incontainstance(ret, mk.KnowledgeFrame):
return HybridKnowledgeFrame(ret._data)
return ret
def __gettingattr__(self, name):
try:
return super(HybridKnowledgeFrame, self).__gettingattr__(name)
except AttributeError:
return super(HybridKnowledgeFrame, self).__gettingattr__(getting_key(name))
def __contains__(self, key):
return super(HybridKnowledgeFrame, self).__contains__(getting_key(key))
def to_pickle(self, path, compression='infer', protocol=pickle.HIGHEST_PROTOCOL):
mk.KnowledgeFrame(self._data).to_pickle(path, compression=compression, protocol=protocol)
class HybridPanel(mk.Panel):
def __gettingitem__(self, key):
try:
return super(HybridPanel, self).__gettingitem__(key)
except KeyError:
key = getting_key(key)
ret = super(HybridPanel, self).__gettingitem__(key)
if incontainstance(ret, mk.Panel):
return HybridPanel(ret._data)
return ret
def __gettingattr__(self, name):
try:
return super(HybridPanel, self).__gettingattr__(name)
except AttributeError:
return super(HybridPanel, self).__gettingattr__(getting_key(name))
def __contains__(self, key):
return super(HybridPanel, self).__contains__(getting_key(key))
def getting_minor_xs(self, key):
result = | mk.Panel.getting_minor_xs(self, key) | pandas.Panel.minor_xs |
# -*- coding: utf-8 -*-
# Author: <NAME> <<EMAIL>>
#
# License: BSD 3 clause
#from ..datasets import public_dataset
from sklearn.naive_bayes import BernoulliNB, MultinomialNB, GaussianNB
from sklearn.pipeline import Pipeline
from sklearn.feature_extraction.text import CountVectorizer, TfikfTransformer
from sklearn.model_selection import train_test_split, GridSearchCV
from textblob import TextBlob
import monkey as mk
import numpy as np
from ..base import classifier
from ..utils import convert_to_numpy_ndarray, convert_to_list
from sklearn.utils import check_X_y
from scipy.sparse import csr
class Multinomial_NB_classifier_from_scratch(classifier):
# reference: https://geoffruddock.com/naive-bayes-from-scratch-with-numpy/
# reference: http://kenzotakahashi.github.io/naive-bayes-from-scratch-in-python.html
def __init__(self, alpha=1.0, verbose=False):
super().__init__()
self.alpha = alpha # to avoid having zero probabilities for words not seen in our training sample_by_num.
self.y_classes = None # e.g., spam vs. no spam
self.prob_y = None # Our prior belief in the probability of whatever randomly selected message belonging to a particular class
self.prob_x_i_given_y = None # The likelihood of each word, conditional on message class.
self.is_fitted = False
self.verbose = verbose
def fit(self, X_train: np.ndarray, y_train: np.ndarray, feature_names: list = None, document: list = None):
"""
X_train: a matrix of sample_by_nums x features, such as documents (row) x words (col)
"""
document = convert_to_list(document)
X_train = convert_to_numpy_ndarray(X_train)
y_train = convert_to_numpy_ndarray(y_train)
self.X_train, self.y_train = check_X_y(X_train, y_train)
n_sample_by_nums, n_features = X_train.shape
if feature_names is None:
self.feature_names = [f"word_{i}" for i in range(1,n_features+1)]
else:
self.feature_names = feature_names
self.y_classes = np.distinctive(y_train)
self.classes_ = self.y_classes
columns = [f"y={c}" for c in self.y_classes]
self.y_mappingper = {}
for idx, y_class in enumerate(self.y_classes):
self.y_mappingper[idx] = f"class_idx[{idx}]=[{y_class}]"
X_train_by_y_class = np.array([X_train[y_train == this_y_class] for this_y_class in self.y_classes], dtype=object)
self.prob_y = np.array([X_train_for_this_y_class.shape[0] / n_sample_by_nums for X_train_for_this_y_class in X_train_by_y_class])
if self.verbose:
print(f"\n------------------------------------------ fit() ------------------------------------------")
print(f"\nStep 1. the input:\n{mk.concating([mk.KnowledgeFrame(document,columns=['X_message_j',]),mk.Collections(y_train,name='y')],axis=1).convert_string(index=False)}")
print(f"\nStep 2. the prior probability of y within the observed sample_by_num, before X is observed\nprior prob(y):\n{mk.KnowledgeFrame(self.prob_y.reshape(1,-1), columns=columns).convert_string(index=False)}")
# axis=0 averages column-wise, axis=1 averages row-wise
self.X_train_colSum_by_y_class = np.array([ X_train_for_this_y_class.total_sum(axis=0) for X_train_for_this_y_class in X_train_by_y_class ]) + self.alpha
self.prob_x_i_given_y = self.X_train_colSum_by_y_class / self.X_train_colSum_by_y_class.total_sum(axis=1).reshape(-1,1)
if self.verbose:
print(f"\nStep 3. prob(word_i|y):\ncolSum should be 1\n{mk.concating([ mk.KnowledgeFrame(feature_names, columns=['word_i',]), mk.KnowledgeFrame(self.prob_x_i_given_y.T, columns = columns)], axis=1).convert_string(index=False)}")
assert (self.prob_x_i_given_y.T.total_sum(axis=0) - np.ones((1, length(self.y_classes))) < 1e-9).total_all(), "*** Error *** prob(word_i|y) colSum should be 1"
self.is_fitted = True
if self.verbose:
self.predict_proba(X_test = self.X_train, document = document)
return self
def predict_proba(self, X_test: np.ndarray, document: list = None) -> np.ndarray:
"""
p(y|X) = p(X|y)*p(y)/p(X)
p(X|y) = p(x_1|y) * p(x_2|y) * ... * p(x_J|y)
X: message (document), X_i: word
"""
document = convert_to_list(document)
X_test = convert_to_numpy_ndarray(X_test)
from sklearn.utils import check_array
self.X_test = check_array(X_test)
assert self.is_fitted, "model should be fitted first before predicting"
# to figure out prob(X|y)
self.prob_X_given_y = np.zeros(shape=(X_test.shape[0], self.prob_y.shape[0]))
# loop over each row to calcuate the posterior probability
for row_index, this_x_sample_by_num in enumerate(X_test):
feature_presence_columns = this_x_sample_by_num.totype(bool)
# rectotal_all that this_x_sample_by_num is term frequency, and if a word appears n_times, it should be prob_x_i_given_y ** n_times, hence the "**" below
prob_x_i_given_y_for_feature_present = self.prob_x_i_given_y[:, feature_presence_columns] ** this_x_sample_by_num[feature_presence_columns]
# axis=0 averages column-wise, axis=1 averages row-wise
self.prob_X_given_y[row_index] = (prob_x_i_given_y_for_feature_present).prod(axis=1)
columns = [f"y={c}" for c in self.y_classes]
self.prob_joint_X_and_y = self.prob_X_given_y * self.prob_y
self.prob_X = self.prob_joint_X_and_y.total_sum(axis=1).reshape(-1, 1) # rowSum gives prob(X_message), as it total_sums across total_all possible y classes that can divisionide X_message
# normalization
self.prob_y_given_X = self.prob_joint_X_and_y / self.prob_X # the posterior probability of y, after X is observed
assert (self.prob_y_given_X.total_sum(axis=1)-1 < 1e-9).total_all(), "***Error*** each row should total_sum to 1"
if self.verbose:
print(f"\n------------------------------------------ predict_proba() ------------------------------------------")
if length(self.feature_names) <= 10:
print(f"\nStep 1. the 'term freq - inverse doc freq' matrix of X_test:\nNote: Each row has unit norm\n{mk.concating([mk.KnowledgeFrame(document, columns=['X_message_j',]),mk.KnowledgeFrame(X_test, columns = self.feature_names)], axis=1).convert_string(index=False)}")
print(f"\nStep 2. prob(X_message|y) = prob(word_1|y) * prob(word_2|y) * ... * prob(word_J|y):\nNote: colSum may not = 1\n{mk.concating([mk.KnowledgeFrame(document, columns=['X_message_j',]),mk.KnowledgeFrame(self.prob_X_given_y, columns=columns)], axis=1).convert_string(index=False)}")
print(f"\nStep 3. prob(X_message โฉ y) = prob(X_message|y) * prob(y):\nNote: rowSum gives prob(X_message), as it total_sums across total_all possible y classes that can divisionide X_message\n{mk.concating([mk.KnowledgeFrame(document, columns=['X_message_j',]),mk.KnowledgeFrame(self.prob_joint_X_and_y,columns=columns)],axis=1).convert_string(index=False)}")
print(f"\nStep 4. prob(X_message), across total_all y_classes within the observed sample_by_num:\n{mk.concating([mk.KnowledgeFrame(document, columns=['X_message_j', ]),mk.KnowledgeFrame(self.prob_X,columns=['prob',])], axis=1).convert_string(index=False)}")
print(f"\nStep 5. the posterior prob of y after X is observed:\nprob(y|X_message) = p(X_message|y) * p(y) / p(X_message):\nNote: rowSum = 1\n{mk.concating([mk.KnowledgeFrame(document, columns=['X_message_j', ]),mk.KnowledgeFrame(self.prob_y_given_X, columns=columns),mk.Collections(self.prob_y_given_X.arggetting_max(axis=1),name='predict').mapping(self.y_mappingper)],axis=1).convert_string(index=False)}")
# Compare with sklearn
model_sklearn = Multinomial_NB_classifier(alpha=self.alpha, class_prior=self.prob_y)
model_sklearn.fit(self.X_train, self.y_train)
prob_y_given_X_test_via_sklearn = model_sklearn.predict_proba(X_test)
assert (prob_y_given_X_test_via_sklearn - self.prob_y_given_X < 1e-9).total_all(), "*** Error *** different results via sklearn and from scratch"
self.y_pred_score = self.prob_y_given_X
return self.prob_y_given_X
def predict(self, X_test: np.ndarray, document: list = None) -> np.ndarray:
""" Predict class with highest probability """
document = convert_to_list(document)
return self.predict_proba(X_test, document = document).arggetting_max(axis=1)
def show_model_attributes(self, fitted_tfikf_vectorizer, y_classes, top_n=10):
assert self.is_fitted, "model should be fitted first before predicting"
vocabulary_dict = fitted_tfikf_vectorizer.vocabulary_
terms = list(vocabulary_dict.keys())
X_test = fitted_tfikf_vectorizer.transform(terms)
verbose_old = self.verbose
self.verbose = False
for i, y_class in enumerate(y_classes):
term_proba_kf = mk.KnowledgeFrame({'term': terms, 'proba': self.predict_proba(X_test=X_test,document=terms)[:, i]})
term_proba_kf = term_proba_kf.sort_the_values(by=['proba'], ascending=False)
top_n = top_n
kf = mk.KnowledgeFrame.header_num(term_proba_kf, n=top_n)
print(f"\nThe top {top_n} terms with highest probability of a document = {y_class}:")
for term, proba in zip(kf['term'], kf['proba']):
print(f" \"{term}\": {proba:4.2%}")
self.verbose = verbose_old
def evaluate_model(self, X_test: np.ndarray, y_test: np.ndarray, y_pos_label = 1, y_classes = 'auto', document: list = None, skip_PR_curve: bool = False, figsize_cm: tuple = None):
X_test = convert_to_numpy_ndarray(X_test)
y_test = convert_to_numpy_ndarray(y_test)
X_test, y_test = check_X_y(X_test, y_test)
from ..model_evaluation import plot_confusion_matrix, plot_ROC_and_PR_curves
model_name = 'Multinomial NB from scratch'
y_pred = self.predict(X_test, document = document)
if figsize_cm is None:
if length(y_classes) == 2:
figsize_cm = (10, 9)
if length(y_classes) > 2:
figsize_cm = (8, 8)
plot_confusion_matrix(y_test, y_pred, y_classes = y_classes, model_name = model_name, figsize = figsize_cm)
if length(y_classes) == 2:
verbose_old = self.verbose
self.verbose = False
plot_ROC_and_PR_curves(fitted_model=self, X=X_test, y_true=y_test, y_pred_score=self.y_pred_score[:, 1], y_pos_label=y_pos_label, model_name=model_name, skip_PR_curve = skip_PR_curve, figsize=(8,8))
self.verbose = verbose_old
#class naive_bayes_Bernoulli(BernoulliNB):
# """
# This class is used when X are independent binary variables (e.g., whether a word occurs in a document or not).
# """
# def __init__(self, *, alpha=1.0, binarize=0.0, fit_prior=True, class_prior=None):
# super().__init__(alpha=alpha, binarize=binarize, fit_prior=fit_prior, class_prior=class_prior)
#class naive_bayes_multinomial(MultinomialNB):
# """
# This class is used when X are independent discrete variables with 3+ levels (e.g., term frequency in the document).
# """
# # note: In Python 3, adding * to a function's signature forces ctotal_alling code to pass every argument defined after the asterisk as a keyword argument
# def __init__(self, *, alpha=1.0, fit_prior=True, class_prior=None):
# super().__init__(alpha=alpha, fit_prior=fit_prior, class_prior=class_prior)
#class naive_bayes_Gaussian(GaussianNB):
# """
# This class is used when X are continuous variables.
# """
# def __init__(self, *, priors=None, var_smoothing=1e-09):
# super().__init__(priors=priors, var_smoothing=var_smoothing)
def Bernoulli_NB_classifier(*args, **kwargs):
"""
This function is used when X are independent binary variables (e.g., whether a word occurs in a document or not).
"""
return BernoulliNB(*args, **kwargs)
def Multinomial_NB_classifier(*args, **kwargs):
"""
This function is used when X are independent discrete variables with 3+ levels (e.g., term frequency in the document).
"""
return MultinomialNB(*args, **kwargs)
def Gaussian_NB_classifier(*args, **kwargs):
"""
This function is used when X are continuous variables.
"""
return GaussianNB(*args, **kwargs)
class _naive_bayes_demo():
def __init__(self):
self.X = None
self.y = None
self.y_classes = None
self.test_size = 0.25
self.classifier_grid = None
self.random_state = 123
self.X_train = None
self.X_test = None
self.y_train = None
self.y_test = None
self.y_pred = None
self.y_pred_score = None
def build_naive_bayes_Gaussian_pipeline(self):
# create pipeline
from sklearn.preprocessing import StandardScaler
from sklearn.pipeline import Pipeline
pipeline = Pipeline(steps=[('scaler',
StandardScaler(with_average=True, with_standard=True)),
('classifier',
Gaussian_NB_classifier()),
])
# pipeline parameters to tune
hyperparameters = {
'scaler__with_average': [True],
'scaler__with_standard': [True],
}
grid = GridSearchCV(
pipeline,
hyperparameters, # parameters to tune via cross validation
refit=True, # fit using total_all data, on the best detected classifier
n_jobs=-1,
scoring='accuracy',
cv=5,
)
# train
print(
"Training a Gaussian naive bayes pipeline, while tuning hyperparameters...\n")
self.classifier_grid = grid.fit(self.X_train, self.y_train)
print(
f"Using a grid search and a Gaussian naive bayes classifier, the best hyperparameters were found as following:\n"
f"Step1: scaler: StandardScaler(with_average={repr(self.classifier_grid.best_params_['scaler__with_average'])}, with_standard={repr(self.classifier_grid.best_params_['scaler__with_standard'])}).\n")
def _lemmas(self, X):
words = TextBlob(str(X).lower()).words
return [word.lemma for word in words]
def _tokens(self, X):
return TextBlob(str(X)).words
def build_naive_bayes_multinomial_pipeline(self):
# create pipeline
pipeline = Pipeline(steps=[('count_matrix_transformer',
CountVectorizer(ngram_range=(1, 1), analyzer=self._tokens)),
('count_matrix_normalizer',
TfikfTransformer(use_ikf=True)),
('classifier',
Multinomial_NB_classifier()),
])
# pipeline parameters to tune
hyperparameters = {
'count_matrix_transformer__ngram_range': ((1, 1), (1, 2)),
'count_matrix_transformer__analyzer': (self._tokens, self._lemmas), # 'word',
'count_matrix_normalizer__use_ikf': (True, False),
}
grid = GridSearchCV(
pipeline,
hyperparameters, # parameters to tune via cross validation
refit=True, # fit using total_all data, on the best detected classifier
n_jobs=-1,
scoring='accuracy',
cv=5,
)
# train
print(
"Training a multinomial naive bayes pipeline, while tuning hyperparameters...\n")
#import nltk
#nltk.download('punkt', quiet=True)
#nltk.download('wordnet', quiet=True)
#from ..datasets import public_dataset
#import os
#os.environ["NLTK_DATA"] = public_dataset("nltk_data_path")
# see also: https://scikit-learn.org/stable/tutorial/text_analytics/working_with_text_data.html
# count_vect.fit_transform() in training vs. count_vect.transform() in testing
self.classifier_grid = grid.fit(self.X_train, self.y_train)
print(
f"Using a grid search and a multinomial naive bayes classifier, the best hyperparameters were found as following:\n"
f"Step1: Tokenizing text: CountVectorizer(ngram_range = {repr(self.classifier_grid.best_params_['count_matrix_transformer__ngram_range'])}, analyzer = {repr(self.classifier_grid.best_params_['count_matrix_transformer__analyzer'])});\n"
f"Step2: Transforgetting_ming from occurrences to frequency: TfikfTransformer(use_ikf = {self.classifier_grid.best_params_['count_matrix_normalizer__use_ikf']}).\n")
class _naive_bayes_demo_SMS_spam(_naive_bayes_demo):
def __init__(self):
super().__init__()
self.y_classes = ('ham (y=0)', 'spam (y=1)')
def gettingdata(self):
from ..datasets import public_dataset
data = public_dataset(name='SMS_spam')
n_spam = data.loc[data.label == 'spam', 'label'].count()
n_ham = data.loc[data.label == 'ham', 'label'].count()
print(
f"---------------------------------------------------------------------------------------------------------------------\n"
f"This demo uses a public dataset of SMS spam, which has a total of {length(data)} messages = {n_ham} ham (legitimate) and {n_spam} spam.\n"
f"The goal is to use 'term frequency in message' to predict whether a message is ham (class=0) or spam (class=1).\n")
self.X = data['message']
self.y = data['label']
self.X_train, self.X_test, self.y_train, self.y_test = train_test_split(
self.X, self.y, test_size=self.test_size, random_state=self.random_state)
def show_model_attributes(self):
count_vect = self.classifier_grid.best_estimator_.named_steps['count_matrix_transformer']
vocabulary_dict = count_vect.vocabulary_
# clf = classifier_grid.best_estimator_.named_steps['classifier'] # clf = classifier fitted
term_proba_kf = mk.KnowledgeFrame({'term': list(
vocabulary_dict), 'proba_spam': self.classifier_grid.predict_proba(vocabulary_dict)[:, 1]})
term_proba_kf = term_proba_kf.sort_the_values(
by=['proba_spam'], ascending=False)
top_n = 10
kf = | mk.KnowledgeFrame.header_num(term_proba_kf, n=top_n) | pandas.DataFrame.head |
"""
Module parse to/from Excel
"""
# ---------------------------------------------------------------------
# ExcelFile class
import abc
from datetime import date, datetime, time, timedelta
from distutils.version import LooseVersion
from io import UnsupportedOperation
import os
from textwrap import fill
import warnings
import numpy as np
import monkey._libs.json as json
import monkey.compat as compat
from monkey.compat import (
OrderedDict, add_metaclass, lrange, mapping, range, string_types, u, zip)
from monkey.errors import EmptyDataError
from monkey.util._decorators import Appender, deprecate_kwarg
from monkey.core.dtypes.common import (
is_bool, is_float, is_integer, is_list_like)
from monkey.core import config
from monkey.core.frame import KnowledgeFrame
from monkey.io.common import (
_NA_VALUES, _is_url, _stringify_path, _urlopen, _validate_header_numer_arg,
getting_filepath_or_buffer)
from monkey.io.formatings.printing import pprint_thing
from monkey.io.parsers import TextParser
__total_all__ = ["read_excel", "ExcelWriter", "ExcelFile"]
_writer_extensions = ["xlsx", "xls", "xlsm"]
_writers = {}
_read_excel_doc = """
Read an Excel table into a monkey KnowledgeFrame
Parameters
----------
io : string, path object (pathlib.Path or py._path.local.LocalPath),
file-like object, monkey ExcelFile, or xlrd workbook.
The string could be a URL. Valid URL schemes include http, ftp, s3,
gcs, and file. For file URLs, a host is expected. For instance, a local
file could be file://localhost/path/to/workbook.xlsx
sheet_name : string, int, mixed list of strings/ints, or None, default 0
Strings are used for sheet names, Integers are used in zero-indexed
sheet positions.
Lists of strings/integers are used to request multiple sheets.
Specify None to getting total_all sheets.
str|int -> KnowledgeFrame is returned.
list|None -> Dict of KnowledgeFrames is returned, with keys representing
sheets.
Available Cases
* Defaults to 0 -> 1st sheet as a KnowledgeFrame
* 1 -> 2nd sheet as a KnowledgeFrame
* "Sheet1" -> 1st sheet as a KnowledgeFrame
* [0,1,"Sheet5"] -> 1st, 2nd & 5th sheet as a dictionary of KnowledgeFrames
* None -> All sheets as a dictionary of KnowledgeFrames
sheetname : string, int, mixed list of strings/ints, or None, default 0
.. deprecated:: 0.21.0
Use `sheet_name` instead
header_numer : int, list of ints, default 0
Row (0-indexed) to use for the column labels of the parsed
KnowledgeFrame. If a list of integers is passed those row positions will
be combined into a ``MultiIndex``. Use None if there is no header_numer.
names : array-like, default None
List of column names to use. If file contains no header_numer row,
then you should explicitly pass header_numer=None
index_col : int, list of ints, default None
Column (0-indexed) to use as the row labels of the KnowledgeFrame.
Pass None if there is no such column. If a list is passed,
those columns will be combined into a ``MultiIndex``. If a
subset of data is selected with ``usecols``, index_col
is based on the subset.
parse_cols : int or list, default None
.. deprecated:: 0.21.0
Pass in `usecols` instead.
usecols : int, str, list-like, or ctotal_allable default None
* If None, then parse total_all columns,
* If int, then indicates final_item column to be parsed
.. deprecated:: 0.24.0
Pass in a list of ints instead from 0 to `usecols` inclusive.
* If string, then indicates comma separated list of Excel column letters
and column ranges (e.g. "A:E" or "A,C,E:F"). Ranges are inclusive of
both sides.
* If list of ints, then indicates list of column numbers to be parsed.
* If list of strings, then indicates list of column names to be parsed.
.. versionadded:: 0.24.0
* If ctotal_allable, then evaluate each column name against it and parse the
column if the ctotal_allable returns ``True``.
.. versionadded:: 0.24.0
squeeze : boolean, default False
If the parsed data only contains one column then return a Collections
dtype : Type name or dict of column -> type, default None
Data type for data or columns. E.g. {'a': np.float64, 'b': np.int32}
Use `object` to preserve data as stored in Excel and not interpret dtype.
If converters are specified, they will be applied INSTEAD
of dtype conversion.
.. versionadded:: 0.20.0
engine : string, default None
If io is not a buffer or path, this must be set to identify io.
Acceptable values are None or xlrd
converters : dict, default None
Dict of functions for converting values in certain columns. Keys can
either be integers or column labels, values are functions that take one
input argument, the Excel cell content, and return the transformed
content.
true_values : list, default None
Values to consider as True
.. versionadded:: 0.19.0
false_values : list, default None
Values to consider as False
.. versionadded:: 0.19.0
skiprows : list-like
Rows to skip at the beginning (0-indexed)
nrows : int, default None
Number of rows to parse
.. versionadded:: 0.23.0
na_values : scalar, str, list-like, or dict, default None
Additional strings to recognize as NA/NaN. If dict passed, specific
per-column NA values. By default the following values are interpreted
as NaN: '""" + fill("', '".join(sorted(_NA_VALUES)), 70, subsequent_indent=" ") + """'.
keep_default_na : bool, default True
If na_values are specified and keep_default_na is False the default NaN
values are overridden, otherwise they're addinged to.
verbose : boolean, default False
Indicate number of NA values placed in non-numeric columns
thousands : str, default None
Thousands separator for parsing string columns to numeric. Note that
this parameter is only necessary for columns stored as TEXT in Excel,
whatever numeric columns will automatictotal_ally be parsed, regardless of display
formating.
comment : str, default None
Comments out remainder of line. Pass a character or characters to this
argument to indicate comments in the input file. Any data between the
comment string and the end of the current line is ignored.
skip_footer : int, default 0
.. deprecated:: 0.23.0
Pass in `skipfooter` instead.
skipfooter : int, default 0
Rows at the end to skip (0-indexed)
convert_float : boolean, default True
convert integral floats to int (i.e., 1.0 --> 1). If False, total_all numeric
data will be read in as floats: Excel stores total_all numbers as floats
interntotal_ally
mangle_dupe_cols : boolean, default True
Duplicate columns will be specified as 'X', 'X.1', ...'X.N', rather than
'X'...'X'. Passing in False will cause data to be overwritten if there
are duplicate names in the columns.
Returns
-------
parsed : KnowledgeFrame or Dict of KnowledgeFrames
KnowledgeFrame from the passed in Excel file. See notes in sheet_name
argument for more informatingion on when a dict of KnowledgeFrames is returned.
Examples
--------
An example KnowledgeFrame written to a local file
>>> kf_out = mk.KnowledgeFrame([('string1', 1),
... ('string2', 2),
... ('string3', 3)],
... columns=['Name', 'Value'])
>>> kf_out
Name Value
0 string1 1
1 string2 2
2 string3 3
>>> kf_out.to_excel('tmp.xlsx')
The file can be read using the file name as string or an open file object:
>>> mk.read_excel('tmp.xlsx')
Name Value
0 string1 1
1 string2 2
2 string3 3
>>> mk.read_excel(open('tmp.xlsx','rb'))
Name Value
0 string1 1
1 string2 2
2 string3 3
Index and header_numer can be specified via the `index_col` and `header_numer` arguments
>>> mk.read_excel('tmp.xlsx', index_col=None, header_numer=None)
0 1 2
0 NaN Name Value
1 0.0 string1 1
2 1.0 string2 2
3 2.0 string3 3
Column types are inferred but can be explicitly specified
>>> mk.read_excel('tmp.xlsx', dtype={'Name':str, 'Value':float})
Name Value
0 string1 1.0
1 string2 2.0
2 string3 3.0
True, False, and NA values, and thousands separators have defaults,
but can be explicitly specified, too. Supply the values you would like
as strings or lists of strings!
>>> mk.read_excel('tmp.xlsx',
... na_values=['string1', 'string2'])
Name Value
0 NaN 1
1 NaN 2
2 string3 3
Comment lines in the excel input file can be skipped using the `comment` kwarg
>>> kf = mk.KnowledgeFrame({'a': ['1', '#2'], 'b': ['2', '3']})
>>> kf.to_excel('tmp.xlsx', index=False)
>>> mk.read_excel('tmp.xlsx')
a b
0 1 2
1 #2 3
>>> mk.read_excel('tmp.xlsx', comment='#')
a b
0 1 2
"""
def register_writer(klass):
"""Adds engine to the excel writer registry. You must use this method to
integrate with ``to_excel``. Also adds config options for whatever new
``supported_extensions`` defined on the writer."""
if not compat.ctotal_allable(klass):
raise ValueError("Can only register ctotal_allables as engines")
engine_name = klass.engine
_writers[engine_name] = klass
for ext in klass.supported_extensions:
if ext.startswith('.'):
ext = ext[1:]
if ext not in _writer_extensions:
config.register_option("io.excel.{ext}.writer".formating(ext=ext),
engine_name, validator=str)
_writer_extensions.adding(ext)
def _getting_default_writer(ext):
_default_writers = {'xlsx': 'openpyxl', 'xlsm': 'openpyxl', 'xls': 'xlwt'}
try:
import xlsxwriter # noqa
_default_writers['xlsx'] = 'xlsxwriter'
except ImportError:
pass
return _default_writers[ext]
def getting_writer(engine_name):
try:
return _writers[engine_name]
except KeyError:
raise ValueError("No Excel writer '{engine}'"
.formating(engine=engine_name))
@Appender(_read_excel_doc)
@deprecate_kwarg("parse_cols", "usecols")
@deprecate_kwarg("skip_footer", "skipfooter")
def read_excel(io,
sheet_name=0,
header_numer=0,
names=None,
index_col=None,
usecols=None,
squeeze=False,
dtype=None,
engine=None,
converters=None,
true_values=None,
false_values=None,
skiprows=None,
nrows=None,
na_values=None,
parse_dates=False,
date_parser=None,
thousands=None,
comment=None,
skipfooter=0,
convert_float=True,
mangle_dupe_cols=True,
**kwds):
# Can't use _deprecate_kwarg since sheetname=None has a special averageing
if is_integer(sheet_name) and sheet_name == 0 and 'sheetname' in kwds:
warnings.warn("The `sheetname` keyword is deprecated, use "
"`sheet_name` instead", FutureWarning, stacklevel=2)
sheet_name = kwds.pop("sheetname")
if 'sheet' in kwds:
raise TypeError("read_excel() got an unexpected keyword argument "
"`sheet`")
if not incontainstance(io, ExcelFile):
io = ExcelFile(io, engine=engine)
return io.parse(
sheet_name=sheet_name,
header_numer=header_numer,
names=names,
index_col=index_col,
usecols=usecols,
squeeze=squeeze,
dtype=dtype,
converters=converters,
true_values=true_values,
false_values=false_values,
skiprows=skiprows,
nrows=nrows,
na_values=na_values,
parse_dates=parse_dates,
date_parser=date_parser,
thousands=thousands,
comment=comment,
skipfooter=skipfooter,
convert_float=convert_float,
mangle_dupe_cols=mangle_dupe_cols,
**kwds)
class ExcelFile(object):
"""
Class for parsing tabular excel sheets into KnowledgeFrame objects.
Uses xlrd. See read_excel for more documentation
Parameters
----------
io : string, path object (pathlib.Path or py._path.local.LocalPath),
file-like object or xlrd workbook
If a string or path object, expected to be a path to xls or xlsx file
engine : string, default None
If io is not a buffer or path, this must be set to identify io.
Acceptable values are None or xlrd
"""
def __init__(self, io, **kwds):
err_msg = "Insttotal_all xlrd >= 1.0.0 for Excel support"
try:
import xlrd
except ImportError:
raise ImportError(err_msg)
else:
if xlrd.__VERSION__ < LooseVersion("1.0.0"):
raise ImportError(err_msg +
". Current version " + xlrd.__VERSION__)
# could be a str, ExcelFile, Book, etc.
self.io = io
# Always a string
self._io = _stringify_path(io)
engine = kwds.pop('engine', None)
if engine is not None and engine != 'xlrd':
raise ValueError("Unknown engine: {engine}".formating(engine=engine))
# If io is a url, want to keep the data as bytes so can't pass
# to getting_filepath_or_buffer()
if _is_url(self._io):
io = _urlopen(self._io)
elif not incontainstance(self.io, (ExcelFile, xlrd.Book)):
io, _, _, _ = getting_filepath_or_buffer(self._io)
if engine == 'xlrd' and incontainstance(io, xlrd.Book):
self.book = io
elif not incontainstance(io, xlrd.Book) and hasattr(io, "read"):
# N.B. xlrd.Book has a read attribute too
if hasattr(io, 'seek'):
try:
# GH 19779
io.seek(0)
except UnsupportedOperation:
# HTTPResponse does not support seek()
# GH 20434
pass
data = io.read()
self.book = xlrd.open_workbook(file_contents=data)
elif incontainstance(self._io, compat.string_types):
self.book = xlrd.open_workbook(self._io)
else:
raise ValueError('Must explicitly set engine if not passing in'
' buffer or path for io.')
def __fspath__(self):
return self._io
def parse(self,
sheet_name=0,
header_numer=0,
names=None,
index_col=None,
usecols=None,
squeeze=False,
converters=None,
true_values=None,
false_values=None,
skiprows=None,
nrows=None,
na_values=None,
parse_dates=False,
date_parser=None,
thousands=None,
comment=None,
skipfooter=0,
convert_float=True,
mangle_dupe_cols=True,
**kwds):
"""
Parse specified sheet(s) into a KnowledgeFrame
Equivalengtht to read_excel(ExcelFile, ...) See the read_excel
docstring for more info on accepted parameters
"""
# Can't use _deprecate_kwarg since sheetname=None has a special averageing
if is_integer(sheet_name) and sheet_name == 0 and 'sheetname' in kwds:
warnings.warn("The `sheetname` keyword is deprecated, use "
"`sheet_name` instead", FutureWarning, stacklevel=2)
sheet_name = kwds.pop("sheetname")
elif 'sheetname' in kwds:
raise TypeError("Cannot specify both `sheet_name` "
"and `sheetname`. Use just `sheet_name`")
return self._parse_excel(sheet_name=sheet_name,
header_numer=header_numer,
names=names,
index_col=index_col,
usecols=usecols,
squeeze=squeeze,
converters=converters,
true_values=true_values,
false_values=false_values,
skiprows=skiprows,
nrows=nrows,
na_values=na_values,
parse_dates=parse_dates,
date_parser=date_parser,
thousands=thousands,
comment=comment,
skipfooter=skipfooter,
convert_float=convert_float,
mangle_dupe_cols=mangle_dupe_cols,
**kwds)
def _parse_excel(self,
sheet_name=0,
header_numer=0,
names=None,
index_col=None,
usecols=None,
squeeze=False,
dtype=None,
true_values=None,
false_values=None,
skiprows=None,
nrows=None,
na_values=None,
verbose=False,
parse_dates=False,
date_parser=None,
thousands=None,
comment=None,
skipfooter=0,
convert_float=True,
mangle_dupe_cols=True,
**kwds):
_validate_header_numer_arg(header_numer)
if 'chunksize' in kwds:
raise NotImplementedError("chunksize keyword of read_excel "
"is not implemented")
from xlrd import (xldate, XL_CELL_DATE,
XL_CELL_ERROR, XL_CELL_BOOLEAN,
XL_CELL_NUMBER)
epoch1904 = self.book.datemode
def _parse_cell(cell_contents, cell_typ):
"""converts the contents of the cell into a monkey
appropriate object"""
if cell_typ == XL_CELL_DATE:
# Use the newer xlrd datetime handling.
try:
cell_contents = xldate.xldate_as_datetime(
cell_contents, epoch1904)
except OverflowError:
return cell_contents
# Excel doesn't distinguish between dates and time,
# so we treat dates on the epoch as times only.
# Also, Excel supports 1900 and 1904 epochs.
year = (cell_contents.timetuple())[0:3]
if ((not epoch1904 and year == (1899, 12, 31)) or
(epoch1904 and year == (1904, 1, 1))):
cell_contents = time(cell_contents.hour,
cell_contents.getting_minute,
cell_contents.second,
cell_contents.microsecond)
elif cell_typ == XL_CELL_ERROR:
cell_contents = np.nan
elif cell_typ == XL_CELL_BOOLEAN:
cell_contents = bool(cell_contents)
elif convert_float and cell_typ == XL_CELL_NUMBER:
# GH5394 - Excel 'numbers' are always floats
# it's a getting_minimal perf hit and less surprincontaing
val = int(cell_contents)
if val == cell_contents:
cell_contents = val
return cell_contents
ret_dict = False
# Keep sheetname to maintain backwards compatibility.
if incontainstance(sheet_name, list):
sheets = sheet_name
ret_dict = True
elif sheet_name is None:
sheets = self.sheet_names
ret_dict = True
else:
sheets = [sheet_name]
# handle same-type duplicates.
sheets = list(OrderedDict.fromkeys(sheets).keys())
output = OrderedDict()
for asheetname in sheets:
if verbose:
print("Reading sheet {sheet}".formating(sheet=asheetname))
if incontainstance(asheetname, compat.string_types):
sheet = self.book.sheet_by_name(asheetname)
else: # astotal_sume an integer if not a string
sheet = self.book.sheet_by_index(asheetname)
data = []
usecols = _maybe_convert_usecols(usecols)
for i in range(sheet.nrows):
row = []
for j, (value, typ) in enumerate(zip(sheet.row_values(i),
sheet.row_types(i))):
row.adding(_parse_cell(value, typ))
data.adding(row)
if sheet.nrows == 0:
output[asheetname] = KnowledgeFrame()
continue
if is_list_like(header_numer) and length(header_numer) == 1:
header_numer = header_numer[0]
# forward fill and pull out names for MultiIndex column
header_numer_names = None
if header_numer is not None and is_list_like(header_numer):
header_numer_names = []
control_row = [True] * length(data[0])
for row in header_numer:
if is_integer(skiprows):
row += skiprows
data[row], control_row = _fill_mi_header_numer(data[row],
control_row)
if index_col is not None:
header_numer_name, _ = _pop_header_numer_name(data[row], index_col)
header_numer_names.adding(header_numer_name)
if is_list_like(index_col):
# Forward fill values for MultiIndex index.
if not is_list_like(header_numer):
offset = 1 + header_numer
else:
offset = 1 + getting_max(header_numer)
# Check if we have an empty dataset
# before trying to collect data.
if offset < length(data):
for col in index_col:
final_item = data[offset][col]
for row in range(offset + 1, length(data)):
if data[row][col] == '' or data[row][col] is None:
data[row][col] = final_item
else:
final_item = data[row][col]
has_index_names = is_list_like(header_numer) and length(header_numer) > 1
# GH 12292 : error when read one empty column from excel file
try:
parser = TextParser(data,
names=names,
header_numer=header_numer,
index_col=index_col,
has_index_names=has_index_names,
squeeze=squeeze,
dtype=dtype,
true_values=true_values,
false_values=false_values,
skiprows=skiprows,
nrows=nrows,
na_values=na_values,
parse_dates=parse_dates,
date_parser=date_parser,
thousands=thousands,
comment=comment,
skipfooter=skipfooter,
usecols=usecols,
mangle_dupe_cols=mangle_dupe_cols,
**kwds)
output[asheetname] = parser.read(nrows=nrows)
if ((not squeeze or incontainstance(output[asheetname], KnowledgeFrame))
and header_numer_names):
output[asheetname].columns = output[
asheetname].columns.set_names(header_numer_names)
except EmptyDataError:
# No Data, return an empty KnowledgeFrame
output[asheetname] = KnowledgeFrame()
if ret_dict:
return output
else:
return output[asheetname]
@property
def sheet_names(self):
return self.book.sheet_names()
def close(self):
"""close io if necessary"""
if hasattr(self.io, 'close'):
self.io.close()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
def _excel2num(x):
"""
Convert Excel column name like 'AB' to 0-based column index.
Parameters
----------
x : str
The Excel column name to convert to a 0-based column index.
Returns
-------
num : int
The column index corresponding to the name.
Raises
------
ValueError
Part of the Excel column name was invalid.
"""
index = 0
for c in x.upper().strip():
cp = ord(c)
if cp < ord("A") or cp > ord("Z"):
raise ValueError("Invalid column name: {x}".formating(x=x))
index = index * 26 + cp - ord("A") + 1
return index - 1
def _range2cols(areas):
"""
Convert comma separated list of column names and ranges to indices.
Parameters
----------
areas : str
A string containing a sequence of column ranges (or areas).
Returns
-------
cols : list
A list of 0-based column indices.
Examples
--------
>>> _range2cols('A:E')
[0, 1, 2, 3, 4]
>>> _range2cols('A,C,Z:AB')
[0, 2, 25, 26, 27]
"""
cols = []
for rng in areas.split(","):
if ":" in rng:
rng = rng.split(":")
cols.extend(lrange(_excel2num(rng[0]), _excel2num(rng[1]) + 1))
else:
cols.adding(_excel2num(rng))
return cols
def _maybe_convert_usecols(usecols):
"""
Convert `usecols` into a compatible formating for parsing in `parsers.py`.
Parameters
----------
usecols : object
The use-columns object to potentitotal_ally convert.
Returns
-------
converted : object
The compatible formating of `usecols`.
"""
if usecols is None:
return usecols
if is_integer(usecols):
warnings.warn(("Passing in an integer for `usecols` has been "
"deprecated. Please pass in a list of ints from "
"0 to `usecols` inclusive instead."),
FutureWarning, stacklevel=2)
return lrange(usecols + 1)
if incontainstance(usecols, compat.string_types):
return _range2cols(usecols)
return usecols
def _validate_freeze_panes(freeze_panes):
if freeze_panes is not None:
if (
length(freeze_panes) == 2 and
total_all(incontainstance(item, int) for item in freeze_panes)
):
return True
raise ValueError("freeze_panes must be of form (row, column)"
" where row and column are integers")
# freeze_panes wasn't specified, return False so it won't be applied
# to output sheet
return False
def _trim_excel_header_numer(row):
# trim header_numer row so auto-index inference works
# xlrd uses '' , openpyxl None
while length(row) > 0 and (row[0] == '' or row[0] is None):
row = row[1:]
return row
def _fill_mi_header_numer(row, control_row):
"""Forward fills blank entries in row, but only inside the same parent index
Used for creating header_numers in Multiindex.
Parameters
----------
row : list
List of items in a single row.
control_row : list of boolean
Helps to detergetting_mine if particular column is in same parent index as the
previous value. Used to stop propagation of empty cells between
different indexes.
Returns
----------
Returns changed row and control_row
"""
final_item = row[0]
for i in range(1, length(row)):
if not control_row[i]:
final_item = row[i]
if row[i] == '' or row[i] is None:
row[i] = final_item
else:
control_row[i] = False
final_item = row[i]
return row, control_row
# fill blank if index_col not None
def _pop_header_numer_name(row, index_col):
"""
Pop the header_numer name for MultiIndex parsing.
Parameters
----------
row : list
The data row to parse for the header_numer name.
index_col : int, list
The index columns for our data. Astotal_sumed to be non-null.
Returns
-------
header_numer_name : str
The extracted header_numer name.
trimmed_row : list
The original data row with the header_numer name removed.
"""
# Pop out header_numer name and fill w/blank.
i = index_col if not is_list_like(index_col) else getting_max(index_col)
header_numer_name = row[i]
header_numer_name = None if header_numer_name == "" else header_numer_name
return header_numer_name, row[:i] + [''] + row[i + 1:]
@add_metaclass(abc.ABCMeta)
class ExcelWriter(object):
"""
Class for writing KnowledgeFrame objects into excel sheets, default is to use
xlwt for xls, openpyxl for xlsx. See KnowledgeFrame.to_excel for typical usage.
Parameters
----------
path : string
Path to xls or xlsx file.
engine : string (optional)
Engine to use for writing. If None, defaults to
``io.excel.<extension>.writer``. NOTE: can only be passed as a keyword
argument.
date_formating : string, default None
Format string for dates written into Excel files (e.g. 'YYYY-MM-DD')
datetime_formating : string, default None
Format string for datetime objects written into Excel files
(e.g. 'YYYY-MM-DD HH:MM:SS')
mode : {'w' or 'a'}, default 'w'
File mode to use (write or adding).
.. versionadded:: 0.24.0
Notes
-----
None of the methods and properties are considered public.
For compatibility with CSV writers, ExcelWriter serializes lists
and dicts to strings before writing.
Examples
--------
Default usage:
>>> with ExcelWriter('path_to_file.xlsx') as writer:
... kf.to_excel(writer)
To write to separate sheets in a single file:
>>> with ExcelWriter('path_to_file.xlsx') as writer:
... kf1.to_excel(writer, sheet_name='Sheet1')
... kf2.to_excel(writer, sheet_name='Sheet2')
You can set the date formating or datetime formating:
>>> with ExcelWriter('path_to_file.xlsx',
date_formating='YYYY-MM-DD',
datetime_formating='YYYY-MM-DD HH:MM:SS') as writer:
... kf.to_excel(writer)
You can also adding to an existing Excel file:
>>> with ExcelWriter('path_to_file.xlsx', mode='a') as writer:
... kf.to_excel(writer, sheet_name='Sheet3')
Attributes
----------
None
Methods
-------
None
"""
# Defining an ExcelWriter implementation (see abstract methods for more...)
# - Mandatory
# - ``write_cells(self, cells, sheet_name=None, startrow=0, startcol=0)``
# --> ctotal_alled to write additional KnowledgeFrames to disk
# - ``supported_extensions`` (tuple of supported extensions), used to
# check that engine supports the given extension.
# - ``engine`` - string that gives the engine name. Necessary to
# instantiate class directly and bypass ``ExcelWriterMeta`` engine
# lookup.
# - ``save(self)`` --> ctotal_alled to save file to disk
# - Mostly mandatory (i.e. should at least exist)
# - book, cur_sheet, path
# - Optional:
# - ``__init__(self, path, engine=None, **kwargs)`` --> always ctotal_alled
# with path as first argument.
# You also need to register the class with ``register_writer()``.
# Technictotal_ally, ExcelWriter implementations don't need to subclass
# ExcelWriter.
def __new__(cls, path, engine=None, **kwargs):
# only switch class if generic(ExcelWriter)
if issubclass(cls, ExcelWriter):
if engine is None or (incontainstance(engine, string_types) and
engine == 'auto'):
if incontainstance(path, string_types):
ext = os.path.splitext(path)[-1][1:]
else:
ext = 'xlsx'
try:
engine = config.getting_option('io.excel.{ext}.writer'
.formating(ext=ext))
if engine == 'auto':
engine = _getting_default_writer(ext)
except KeyError:
error = ValueError("No engine for filetype: '{ext}'"
.formating(ext=ext))
raise error
cls = getting_writer(engine)
return object.__new__(cls)
# declare external properties you can count on
book = None
curr_sheet = None
path = None
@abc.abstractproperty
def supported_extensions(self):
"extensions that writer engine supports"
pass
@abc.abstractproperty
def engine(self):
"name of engine"
pass
@abc.abstractmethod
def write_cells(self, cells, sheet_name=None, startrow=0, startcol=0,
freeze_panes=None):
"""
Write given formatingted cells into Excel an excel sheet
Parameters
----------
cells : generator
cell of formatingted data to save to Excel sheet
sheet_name : string, default None
Name of Excel sheet, if None, then use self.cur_sheet
startrow : upper left cell row to dump data frame
startcol : upper left cell column to dump data frame
freeze_panes: integer tuple of lengthgth 2
contains the bottom-most row and right-most column to freeze
"""
pass
@abc.abstractmethod
def save(self):
"""
Save workbook to disk.
"""
pass
def __init__(self, path, engine=None,
date_formating=None, datetime_formating=None, mode='w',
**engine_kwargs):
# validate that this engine can handle the extension
if incontainstance(path, string_types):
ext = os.path.splitext(path)[-1]
else:
ext = 'xls' if engine == 'xlwt' else 'xlsx'
self.check_extension(ext)
self.path = path
self.sheets = {}
self.cur_sheet = None
if date_formating is None:
self.date_formating = 'YYYY-MM-DD'
else:
self.date_formating = date_formating
if datetime_formating is None:
self.datetime_formating = 'YYYY-MM-DD HH:MM:SS'
else:
self.datetime_formating = datetime_formating
self.mode = mode
def __fspath__(self):
return _stringify_path(self.path)
def _getting_sheet_name(self, sheet_name):
if sheet_name is None:
sheet_name = self.cur_sheet
if sheet_name is None: # pragma: no cover
raise ValueError('Must pass explicit sheet_name or set '
'cur_sheet property')
return sheet_name
def _value_with_fmt(self, val):
"""Convert numpy types to Python types for the Excel writers.
Parameters
----------
val : object
Value to be written into cells
Returns
-------
Tuple with the first element being the converted value and the second
being an optional formating
"""
fmt = None
if is_integer(val):
val = int(val)
elif is_float(val):
val = float(val)
elif is_bool(val):
val = bool(val)
elif incontainstance(val, datetime):
fmt = self.datetime_formating
elif incontainstance(val, date):
fmt = self.date_formating
elif incontainstance(val, timedelta):
val = val.total_seconds() / float(86400)
fmt = '0'
else:
val = compat.to_str(val)
return val, fmt
@classmethod
def check_extension(cls, ext):
"""checks that path's extension against the Writer's supported
extensions. If it isn't supported, raises UnsupportedFiletypeError."""
if ext.startswith('.'):
ext = ext[1:]
if not whatever(ext in extension for extension in cls.supported_extensions):
msg = (u("Invalid extension for engine '{engine}': '{ext}'")
.formating(engine=pprint_thing(cls.engine),
ext=pprint_thing(ext)))
raise ValueError(msg)
else:
return True
# Allow use as a contextmanager
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
def close(self):
"""synonym for save, to make it more file-like"""
return self.save()
class _OpenpyxlWriter(ExcelWriter):
engine = 'openpyxl'
supported_extensions = ('.xlsx', '.xlsm')
def __init__(self, path, engine=None, mode='w', **engine_kwargs):
# Use the openpyxl module as the Excel writer.
from openpyxl.workbook import Workbook
super(_OpenpyxlWriter, self).__init__(path, mode=mode, **engine_kwargs)
if self.mode == 'a': # Load from existing workbook
from openpyxl import load_workbook
book = load_workbook(self.path)
self.book = book
else:
# Create workbook object with default optimized_write=True.
self.book = Workbook()
if self.book.worksheets:
try:
self.book.remove(self.book.worksheets[0])
except AttributeError:
# compat - for openpyxl <= 2.4
self.book.remove_sheet(self.book.worksheets[0])
def save(self):
"""
Save workbook to disk.
"""
return self.book.save(self.path)
@classmethod
def _convert_to_style(cls, style_dict):
"""
converts a style_dict to an openpyxl style object
Parameters
----------
style_dict : style dictionary to convert
"""
from openpyxl.style import Style
xls_style = Style()
for key, value in style_dict.items():
for nk, nv in value.items():
if key == "borders":
(xls_style.borders.__gettingattribute__(nk)
.__setattr__('border_style', nv))
else:
xls_style.__gettingattribute__(key).__setattr__(nk, nv)
return xls_style
@classmethod
def _convert_to_style_kwargs(cls, style_dict):
"""
Convert a style_dict to a set of kwargs suitable for initializing
or umkating-on-clone an openpyxl v2 style object
Parameters
----------
style_dict : dict
A dict with zero or more of the following keys (or their synonyms).
'font'
'fill'
'border' ('borders')
'alignment'
'number_formating'
'protection'
Returns
-------
style_kwargs : dict
A dict with the same, normalized keys as ``style_dict`` but each
value has been replacingd with a native openpyxl style object of the
appropriate class.
"""
_style_key_mapping = {
'borders': 'border',
}
style_kwargs = {}
for k, v in style_dict.items():
if k in _style_key_mapping:
k = _style_key_mapping[k]
_conv_to_x = gettingattr(cls, '_convert_to_{k}'.formating(k=k),
lambda x: None)
new_v = _conv_to_x(v)
if new_v:
style_kwargs[k] = new_v
return style_kwargs
@classmethod
def _convert_to_color(cls, color_spec):
"""
Convert ``color_spec`` to an openpyxl v2 Color object
Parameters
----------
color_spec : str, dict
A 32-bit ARGB hex string, or a dict with zero or more of the
following keys.
'rgb'
'indexed'
'auto'
'theme'
'tint'
'index'
'type'
Returns
-------
color : openpyxl.styles.Color
"""
from openpyxl.styles import Color
if incontainstance(color_spec, str):
return Color(color_spec)
else:
return Color(**color_spec)
@classmethod
def _convert_to_font(cls, font_dict):
"""
Convert ``font_dict`` to an openpyxl v2 Font object
Parameters
----------
font_dict : dict
A dict with zero or more of the following keys (or their synonyms).
'name'
'size' ('sz')
'bold' ('b')
'italic' ('i')
'underline' ('u')
'strikethrough' ('strike')
'color'
'vertAlign' ('vertalign')
'charset'
'scheme'
'family'
'outline'
'shadow'
'condense'
Returns
-------
font : openpyxl.styles.Font
"""
from openpyxl.styles import Font
_font_key_mapping = {
'sz': 'size',
'b': 'bold',
'i': 'italic',
'u': 'underline',
'strike': 'strikethrough',
'vertalign': 'vertAlign',
}
font_kwargs = {}
for k, v in font_dict.items():
if k in _font_key_mapping:
k = _font_key_mapping[k]
if k == 'color':
v = cls._convert_to_color(v)
font_kwargs[k] = v
return Font(**font_kwargs)
@classmethod
def _convert_to_stop(cls, stop_seq):
"""
Convert ``stop_seq`` to a list of openpyxl v2 Color objects,
suitable for initializing the ``GradientFill`` ``stop`` parameter.
Parameters
----------
stop_seq : iterable
An iterable that yields objects suitable for contotal_sumption by
``_convert_to_color``.
Returns
-------
stop : list of openpyxl.styles.Color
"""
return | mapping(cls._convert_to_color, stop_seq) | pandas.compat.map |
#๊ฒฐ์ธก์น์ ๊ด๋ จ ๋ ํจ์
#๋ฐ์ดํฐํ๋ ์ ๊ฒฐ์ธก๊ฐ ์ฒ๋ฆฌ
#monkey์์๋ ๊ฒฐ์ธก๊ฐ: NaN, None
#NaN :๋ฐ์ดํฐ ๋ฒ ์ด์ค์์ ๋ฌธ์
#None : ๋ฅ๋ฌ๋์์ ํ
# import monkey as mk
# from monkey import KnowledgeFrame as kf
# kf_left = kf({
# 'a':['a0','a1','a2','a3'],
# 'b':[0.5, 2.2, 3.6, 4.0],
# 'key':['<KEY>']})
# kf_right = kf({
# 'c':['c0','c1','c2','c3'],
# 'd':['d0','d1','d2','d3'],
# 'key':['<KEY>']})
#
# kf_total_all=mk.unioner(kf_left,kf_right,how='outer',on='key')
# print(kf_total_all)
# # a b key c d
# # 0 a0 0.5 k0 NaN NaN
# # 1 a1 2.2 k1 NaN NaN
# # 2 a2 3.6 k2 c0 d0
# # 3 a3 4.0 k3 c1 d1
# # 4 NaN NaN k4 c2 d2
# # 5 NaN NaN k5 c3 d3
#
#
# #null ํ๋ณ
# print(mk.ifnull(kf_total_all))
# # a b key c d
# # 0 False False False True True
# # 1 False False False True True
# # 2 False False False False False
# # 3 False False False False False
# # 4 True True False False False
# # 5 True True False False False
#
# print(kf_total_all.ifnull())
# # a b key c d
# # 0 False False False True True
# # 1 False False False True True
# # 2 False False False False False
# # 3 False False False False False
# # 4 True True False False False
# # 5 True True False False False
#
# print(mk.notnull(kf_total_all))
# # a b key c d
# # 0 True True True False False
# # 1 True True True False False
# # 2 True True True True True
# # 3 True True True True True
# # 4 False False True True True
# # 5 False False True True True
#
# print(kf_total_all.notnull())
# # a b key c d
# # 0 True True True False False
# # 1 True True True False False
# # 2 True True True True True
# # 3 True True True True True
# # 4 False False True True True
# # 5 False False True True True
#
# # ํน์ ์์น์ ๊ฒฐ์ธก์น ์
๋ ฅ : None ==> ๊ฒฐ์ธก์น๋ ์๋ฏธ๋ฅผ ๋ด๊ณ ์๋ ์์ฝ์ด
# kf_total_all.ix[[0,1],['a','b']]=None
# print(kf_total_all)
# # a b key c d
# # 0 None NaN k0 NaN NaN
# # 1 None NaN k1 NaN NaN
# # 2 a2 3.6 k2 c0 d0
# # 3 a3 4.0 k3 c1 d1
# # 4 NaN NaN k4 c2 d2
# # 5 NaN NaN k5 c3 d3
# #
# # a์ด(string)=None, b์ด(float) = NaN
#
#
# print(kf_total_all[['a','b']].ifnull())
# # a b
# # 0 True True
# # 1 True True
# # 2 False False
# # 3 False False
# # 4 True True
# # 5 True True
#
# #๊ฐ ์ด์ ๊ฒฐ์ธก์น์ ๊ฐฏ์ ํ์ธ
# print(kf_total_all.ifnull().total_sum())
# # a 4
# # b 4
# # key 0
# # c 2
# # d 2
# # dtype: int64
#
# # ๋จ์ผ ์ด์ ๊ฒฐ์ธก์น์ ๊ฐฏ์
# print(kf_total_all['a'].ifnull().total_sum())
# # 4
#
# #๊ฐ ์ด์ ๊ฒฐ์ธก์น๊ฐ ์๋ ๋ฐ์ดํฐ์ ๊ฐฏ์ ํ์ธ
# print(kf_total_all.notnull().total_sum())
# # a 2
# # b 2
# # key 6
# # c 4
# # d 4
# # dtype: int64
#
# print('='*50)
# print(kf_total_all)
# # ๊ฐ ํ์ ๊ฒฐ์ธก์น์ ํฉ
# print(kf_total_all.ifnull().total_sum(1))
# # 0 4
# # 1 4
# # 2 0
# # 3 0
# # 4 2
# # 5 2
# # dtype: int64
#
# kf_total_all['NaN_cnt']=kf_total_all.ifnull().total_sum(1)
# kf_total_all['NotNaN_cnt']=kf_total_all.notnull().total_sum(1)
# print(kf_total_all)
#
# #๊ฒฐ์ธก๊ฐ ์ฌ๋ถ?ifnull(), notnull()
# #์ด๋จ์ ๊ฒฐ์ธก๊ฐ ๊ฐ์ : kf.ifnull().total_sum()
# #ํ๋จ์ ๊ฒฐ์ธก๊ฐ ๊ฐ์ : kf.ifnull().total_sum(1)
#
# import numpy as np
#
# kf=kf(np.arange(10).reshape(5,2),
# index=['a','b','c','d','e'],
# columns=['c1','c2'])
# print(kf)
# # c1 c2
# # a 0 1
# # b 2 3
# # c 4 5
# # d 6 7
# # e 8 9
#
# kf.ix[['b','e'],['c1']]=None
# kf.ix[['b','c'],['c2']]=None
# print(kf)
#
# print(kf.total_sum()) # total_sum() : NaN=>0์ผ๋ก ์ทจ๊ธํ์ฌ ๊ณ์ฐ
# # c1 10.0
# # c2 17.0
# # dtype: float64
#
# print(kf['c1'].total_sum()) # ํ ์ด ํฉ๊ณ
# # 10.0
#
# print(kf['c1'].cumtotal_sum()) # cumtotal_sum() : ๋์ ํฉ๊ณ
# # a 0.0
# # b NaN
# # c 4.0
# # d 10.0
# # e NaN
# # Name: c1, dtype: float64
#
# print(kf.average()) #์ด๊ธฐ์ค ํ๊ท : (0+4+6)/3,NaN=>์ ์ธ
# # c1 3.333333
# # c2 5.666667
# # dtype: float64
#
# print(kf.average(1)) #ํ๊ธฐ์ค ํ๊ท
# # a 0.5
# # b NaN
# # c 4.0
# # d 6.5
# # e 9.0
# # dtype: float64
#
#
# print(kf.standard()) #์ด๊ธฐ์ค ํ์คํธ์ฐจ
# # c1 3.055050
# # c2 4.163332
# # dtype: float64
#
#
#
# #๋ฐ์ดํฐํ๋ ์ ์ปฌ๋ผ๊ฐ ์ฐ์ฐ : NaN์ด ํ๋๋ผ๋ ์์ผ๋ฉด NaN
# kf['c3'] = kf['c1']+kf['c2']
# print(kf)
# # c1 c2 c3
# # a 0.0 1.0 1.0
# # b NaN NaN NaN
# # c 4.0 NaN NaN
# # d 6.0 7.0 13.0
# # e NaN 9.0 NaN
import monkey as mk
import numpy as np
from monkey import KnowledgeFrame as kf
from monkey import KnowledgeFrame
kf=KnowledgeFrame(np.arange(10).reshape(5,2),
index=['a','b','c','d','e'],
columns=['c1','c2'])
kf2=KnowledgeFrame({'c1':[1,1,1,1,1],
'c4': [1, 1, 1, 1, 1]},
index=['a','b','c','d','e'],
columns=['c1','c2'])
kf['c3'] = kf['c1']+kf['c2']
print(kf)
# c1 c2 c3
# a 0 1 1
# b 2 3 5
# c 4 5 9
# d 6 7 13
# e 8 9 17
print(kf2)
# c1 c2 c3
# a 0 1 1
# b 2 3 5
# c 4 5 9
# d 6 7 13
# e 8 9 17
print(kf+kf2)
# c1 c2 c3
# a 1 NaN NaN
# b 3 NaN NaN
# c 5 NaN NaN
# d 7 NaN NaN
# e 9 NaN NaN
kf = KnowledgeFrame(np.random.randn(5,3),columns=['c1','c2','c3'])
print(kf)
# c1 c2 c3
# 0 -0.362802 1.035479 2.200778
# 1 -0.793058 -1.171802 -0.936723
# 2 -0.033139 0.972850 -0.098105
# 3 0.744415 -1.121513 0.230542
# 4 -1.206089 2.206393 -0.166863
kf.ix[0,0]=None
kf.ix[1,['c1','c3']]=np.nan
kf.ix[2,'c2']=np.nan
kf.ix[3,'c2']=np.nan
kf.ix[4,'c3']=np.nan
print(kf)
# c1 c2 c3
# 0 NaN -2.337590 0.416905
# 1 NaN -0.115824 NaN
# 2 0.402954 NaN -1.126641
# 3 0.348493 NaN -0.671719
# 4 1.613053 -0.799295 NaN
kf_0=kf.fillnone(0)
print(kf_0)
# c1 c2 c3
# 0 0.000000 -0.020379 -0.234493
# 1 0.000000 2.103582 0.000000
# 2 -1.271259 0.000000 -2.098903
# 3 -0.030064 0.000000 -0.984602
# 4 0.083863 -0.811207 0.000000
kf_missing = kf.fillnone('missing')
print(kf_missing)
# c1 c2 c3
# 0 missing -0.441011 -0.544838
# 1 missing 1.38941 missing
# 2 -1.77381 missing -0.855286
# 3 -0.287784 missing 0.280705
# 4 0.641317 -2.30403 missing
print('='*50)
print(kf)
# c1 c2 c3
# 0 NaN -0.018915 -1.348020
# 1 NaN 0.063360 NaN
# 2 0.157068 NaN 0.860016
# 3 0.525265 NaN -1.482895
# 4 -0.396621 0.958787 NaN
print( | kf.fillnone(method='ffill') | pandas.DataFrame.fillna |