hexsha
stringlengths 40
40
| size
int64 4
1.02M
| ext
stringclasses 8
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
209
| max_stars_repo_name
stringlengths 5
121
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
209
| max_issues_repo_name
stringlengths 5
121
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
209
| max_forks_repo_name
stringlengths 5
121
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 4
1.02M
| avg_line_length
float64 1.07
66.1k
| max_line_length
int64 4
266k
| alphanum_fraction
float64 0.01
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
474e39f932f09701d13fdc237d0528baa6a57177 | 13,299 | py | Python | quant/utils/web.py | guantau/thenextquant | 99942f4c0b70c24921f17eeba7beb76b4ac62930 | [
"MIT"
]
| 1 | 2022-02-21T15:08:23.000Z | 2022-02-21T15:08:23.000Z | quant/utils/web.py | mrganer/thenextquant | 52fb22f5df20d43cb275a08adad81dc97f25a712 | [
"MIT"
]
| null | null | null | quant/utils/web.py | mrganer/thenextquant | 52fb22f5df20d43cb275a08adad81dc97f25a712 | [
"MIT"
]
| 2 | 2020-07-18T17:03:59.000Z | 2021-02-11T00:28:13.000Z | # -*- coding:utf-8 -*-
"""
Web module.
Author: HuangTao
Date: 2018/08/26
Email: [email protected]
"""
import json
import base64
import aiohttp
from aiohttp import web
from urllib.parse import urlparse
from quant.utils import tools
from quant.utils import logger
from quant.config import config
from quant.utils import exceptions
from quant.tasks import LoopRunTask, SingleTask
__all__ = ("routes", "WebViewBase", "AuthToken", "auth_middleware", "error_middleware", "options_middleware",
"Websocket", "AsyncHttpRequests")
routes = web.RouteTableDef()
class WebViewBase(web.View):
""" Web view base.
"""
@property
def query_params(self):
if not hasattr(self, "_query_params"):
self._query_params = dict(self.request.query)
return self._query_params
@classmethod
def success(cls, data=None, msg="success", headers=None):
return cls.write(0, data, msg, headers)
@classmethod
def error(cls, code=400, msg="error", data=None):
return cls.write(code, data, msg, status_code=code, reason=msg)
@classmethod
def write(cls, code, data, msg, headers=None, status_code=200, reason=None):
if config.http_server.get("cors"): # Cross domain.
headers = headers if headers else {}
headers["Access-Control-Allow-Origin"] = "*"
headers["Access-Control-Allow-Headers"] = "*"
headers["Access-Control-Allow-Methods"] = "*"
result = {
"code": code,
"msg": msg,
"data": data
}
return web.json_response(result, headers=headers, status=status_code, reason=reason)
class AuthToken(object):
""" Token encode & decode.
"""
@classmethod
def encode(cls, user_id, username):
info = {
"user_id": user_id,
"username": username,
"timestamp": tools.get_cur_timestamp()
}
bdata = json.dumps(info).encode("ascii")
token = base64.b64encode(bdata)
return token.decode()
@classmethod
def decode(cls, token):
sdata = base64.b64decode(token).decode("utf-8")
items = json.loads(sdata)
user_id = items["user_id"]
username = items["username"]
timestamp = items["timestamp"]
return user_id, username, timestamp
@web.middleware
async def auth_middleware(request, handler):
""" Authentication middleware.
"""
ext_uri = config.http_server.get("ext_uri", [])
if request.path not in ext_uri:
token = request.headers.get("Token")
if not token:
token = request.query.get("Token")
if not token:
raise exceptions.AuthenticationFailed(msg="Token miss.")
try:
user_id, username, timestamp = AuthToken.decode(token)
except:
raise exceptions.AuthenticationFailed(msg="Token error.")
if tools.get_cur_timestamp() - timestamp > 60 * 60 * 24: # expire time.
raise exceptions.AuthenticationFailed(msg="Token expired.")
request.user_id = user_id
response = await handler(request)
return response
@web.middleware
async def error_middleware(request, handler):
""" Catch custom exception in http handler.
"""
try:
response = await handler(request)
except Exception as e:
logger.exception("Error:", e)
if isinstance(e, exceptions.CustomException):
response = WebViewBase.error(e.code, e.msg, e.data)
else:
response = WebViewBase.error(500, "INTERNAL SEVER ERROR")
return response
@web.middleware
async def options_middleware(request, handler):
""" All OPTION method.
"""
if request.method == "OPTIONS":
response = WebViewBase.success()
else:
response = await handler(request)
return response
class Websocket:
""" Websocket connection.
Attributes:
url: Websocket connection url.
connected_callback: Asynchronous callback function will be called after connected to Websocket server successfully.
process_callback: Asynchronous callback function will be called if any stream data receive from Websocket
connection, this function only callback `text/json` message. e.g.
async def process_callback(json_message): pass
process_binary_callback: Asynchronous callback function will be called if any stream data receive from Websocket
connection, this function only callback `binary` message. e.g.
async def process_binary_callback(binary_message): pass
check_conn_interval: Check Websocket connection interval time(seconds), default is 10s.
"""
def __init__(self, url, connected_callback=None, process_callback=None, process_binary_callback=None,
check_conn_interval=10):
"""Initialize."""
self._url = url
self._connected_callback = connected_callback
self._process_callback = process_callback
self._process_binary_callback = process_binary_callback
self._check_conn_interval = check_conn_interval
self._ws = None # Websocket connection object.
@property
def ws(self):
return self._ws
def initialize(self):
LoopRunTask.register(self._check_connection, self._check_conn_interval)
SingleTask.run(self._connect)
async def _connect(self):
logger.info("url:", self._url, caller=self)
proxy = config.proxy
session = aiohttp.ClientSession()
try:
self._ws = await session.ws_connect(self._url, proxy=proxy)
except aiohttp.client_exceptions.ClientConnectorError:
logger.error("connect to Websocket server error! url:", self._url, caller=self)
return
if self._connected_callback:
SingleTask.run(self._connected_callback)
SingleTask.run(self._receive)
async def _reconnect(self):
"""Re-connect to Websocket server."""
logger.warn("reconnecting to Websocket server right now!", caller=self)
await self._connect()
async def _receive(self):
"""Receive stream message from Websocket connection."""
async for msg in self.ws:
if msg.type == aiohttp.WSMsgType.TEXT:
if self._process_callback:
try:
data = json.loads(msg.data)
except:
data = msg.data
SingleTask.run(self._process_callback, data)
elif msg.type == aiohttp.WSMsgType.BINARY:
if self._process_binary_callback:
SingleTask.run(self._process_binary_callback, msg.data)
elif msg.type == aiohttp.WSMsgType.CLOSED:
logger.warn("receive event CLOSED:", msg, caller=self)
SingleTask.run(self._reconnect)
elif msg.type == aiohttp.WSMsgType.ERROR:
logger.error("receive event ERROR:", msg, caller=self)
else:
logger.warn("unhandled msg:", msg, caller=self)
async def _check_connection(self, *args, **kwargs):
"""Check Websocket connection, if connection closed, re-connect immediately."""
if not self.ws:
logger.warn("Websocket connection not connected yet!", caller=self)
return
if self.ws.closed:
SingleTask.run(self._reconnect)
async def send(self, data):
""" Send message to Websocket server.
Args:
data: Message content, must be dict or string.
Returns:
If send successfully, return True, otherwise return False.
"""
if not self.ws:
logger.warn("Websocket connection not connected yet!", caller=self)
return False
if isinstance(data, dict):
await self.ws.send_json(data)
elif isinstance(data, str):
await self.ws.send_str(data)
else:
logger.error("send message failed:", data, caller=self)
return False
logger.debug("send message:", data, caller=self)
return True
class AsyncHttpRequests(object):
""" Asynchronous HTTP Request Client.
"""
# Every domain name holds a connection session, for less system resource utilization and faster request speed.
_SESSIONS = {} # {"domain-name": session, ... }
@classmethod
async def fetch(cls, method, url, params=None, body=None, data=None, headers=None, timeout=30, **kwargs):
""" Create a HTTP request.
Args:
method: HTTP request method. (GET/POST/PUT/DELETE)
url: Request url.
params: HTTP query params.
body: HTTP request body, string or bytes format.
data: HTTP request body, dict format.
headers: HTTP request header.
timeout: HTTP request timeout(seconds), default is 30s.
kwargs:
proxy: HTTP proxy.
Return:
code: HTTP response code.
success: HTTP response data. If something wrong, this field is None.
error: If something wrong, this field will holding a Error information, otherwise it's None.
Raises:
HTTP request exceptions or response data parse exceptions. All the exceptions will be captured and return
Error information.
"""
session = cls._get_session(url)
if not kwargs.get("proxy"):
kwargs["proxy"] = config.proxy # If there is a HTTP PROXY assigned in config file?
try:
if method == "GET":
response = await session.get(url, params=params, headers=headers, timeout=timeout, **kwargs)
elif method == "POST":
response = await session.post(url, params=params, data=body, json=data, headers=headers,
timeout=timeout, **kwargs)
elif method == "PUT":
response = await session.put(url, params=params, data=body, json=data, headers=headers,
timeout=timeout, **kwargs)
elif method == "DELETE":
response = await session.delete(url, params=params, data=body, json=data, headers=headers,
timeout=timeout, **kwargs)
else:
error = "http method error!"
return None, None, error
except Exception as e:
logger.error("method:", method, "url:", url, "headers:", headers, "params:", params, "body:", body,
"data:", data, "Error:", e, caller=cls)
return None, None, e
code = response.status
if code not in (200, 201, 202, 203, 204, 205, 206):
text = await response.text()
logger.error("method:", method, "url:", url, "headers:", headers, "params:", params, "body:", body,
"data:", data, "code:", code, "result:", text, caller=cls)
return code, None, text
try:
result = await response.json()
except:
result = await response.text()
logger.warn("response data is not json format!", "method:", method, "url:", url, "headers:", headers,
"params:", params, "body:", body, "data:", data, "code:", code, "result:", result, caller=cls)
logger.debug("method:", method, "url:", url, "headers:", headers, "params:", params, "body:", body,
"data:", data, "code:", code, "result:", json.dumps(result), caller=cls)
return code, result, None
@classmethod
async def get(cls, url, params=None, body=None, data=None, headers=None, timeout=30, **kwargs):
""" HTTP GET
"""
result = await cls.fetch("GET", url, params, body, data, headers, timeout, **kwargs)
return result
@classmethod
async def post(cls, url, params=None, body=None, data=None, headers=None, timeout=30, **kwargs):
""" HTTP POST
"""
result = await cls.fetch("POST", url, params, body, data, headers, timeout, **kwargs)
return result
@classmethod
async def delete(cls, url, params=None, body=None, data=None, headers=None, timeout=30, **kwargs):
""" HTTP DELETE
"""
result = await cls.fetch("DELETE", url, params, body, data, headers, timeout, **kwargs)
return result
@classmethod
async def put(cls, url, params=None, body=None, data=None, headers=None, timeout=30, **kwargs):
""" HTTP PUT
"""
result = await cls.fetch("PUT", url, params, body, data, headers, timeout, **kwargs)
return result
@classmethod
def _get_session(cls, url):
""" Get the connection session for url's domain, if no session, create a new.
Args:
url: HTTP request url.
Returns:
session: HTTP request session.
"""
parsed_url = urlparse(url)
key = parsed_url.netloc or parsed_url.hostname
if key not in cls._SESSIONS:
session = aiohttp.ClientSession()
cls._SESSIONS[key] = session
return cls._SESSIONS[key]
| 37.674221 | 123 | 0.604181 |
d822e5f44ceea068728afd67803ad06ec489acfc | 28,162 | py | Python | cleanrl/ppo_atari_visual.py | limberc/cleanrl | 61c7f4572bfbcfccb061f247e53e83119146e0db | [
"MIT"
]
| null | null | null | cleanrl/ppo_atari_visual.py | limberc/cleanrl | 61c7f4572bfbcfccb061f247e53e83119146e0db | [
"MIT"
]
| null | null | null | cleanrl/ppo_atari_visual.py | limberc/cleanrl | 61c7f4572bfbcfccb061f247e53e83119146e0db | [
"MIT"
]
| null | null | null | # https://github.com/facebookresearch/torchbeast/blob/master/torchbeast/core/environment.py
from collections import deque
import cv2
import gym
from gym import spaces
cv2.ocl.setUseOpenCL(False)
class NoopResetEnv(gym.Wrapper):
def __init__(self, env, noop_max=30):
"""Sample initial states by taking random number of no-ops on reset.
No-op is assumed to be action 0.
"""
gym.Wrapper.__init__(self, env)
self.noop_max = noop_max
self.override_num_noops = None
self.noop_action = 0
assert env.unwrapped.get_action_meanings()[0] == 'NOOP'
def reset(self, **kwargs):
""" Do no-op action for a number of steps in [1, noop_max]."""
self.env.reset(**kwargs)
if self.override_num_noops is not None:
noops = self.override_num_noops
else:
noops = self.unwrapped.np_random.randint(1, self.noop_max + 1) # pylint: disable=E1101
assert noops > 0
obs = None
for _ in range(noops):
obs, _, done, _ = self.env.step(self.noop_action)
if done:
obs = self.env.reset(**kwargs)
return obs
def step(self, ac):
return self.env.step(ac)
class FireResetEnv(gym.Wrapper):
def __init__(self, env):
"""Take action on reset for environments that are fixed until firing."""
gym.Wrapper.__init__(self, env)
assert env.unwrapped.get_action_meanings()[1] == 'FIRE'
assert len(env.unwrapped.get_action_meanings()) >= 3
def reset(self, **kwargs):
self.env.reset(**kwargs)
obs, _, done, _ = self.env.step(1)
if done:
self.env.reset(**kwargs)
obs, _, done, _ = self.env.step(2)
if done:
self.env.reset(**kwargs)
return obs
def step(self, ac):
return self.env.step(ac)
class EpisodicLifeEnv(gym.Wrapper):
def __init__(self, env):
"""Make end-of-life == end-of-episode, but only reset on true game over.
Done by DeepMind for the DQN and co. since it helps value estimation.
"""
gym.Wrapper.__init__(self, env)
self.lives = 0
self.was_real_done = True
def step(self, action):
obs, reward, done, info = self.env.step(action)
self.was_real_done = done
# check current lives, make loss of life terminal,
# then update lives to handle bonus lives
lives = self.env.unwrapped.ale.lives()
if lives < self.lives and lives > 0:
# for Qbert sometimes we stay in lives == 0 condition for a few frames
# so it's important to keep lives > 0, so that we only reset once
# the environment advertises done.
done = True
self.lives = lives
return obs, reward, done, info
def reset(self, **kwargs):
"""Reset only when lives are exhausted.
This way all states are still reachable even though lives are episodic,
and the learner need not know about any of this behind-the-scenes.
"""
if self.was_real_done:
obs = self.env.reset(**kwargs)
else:
# no-op step to advance from terminal/lost life state
obs, _, _, _ = self.env.step(0)
self.lives = self.env.unwrapped.ale.lives()
return obs
class MaxAndSkipEnv(gym.Wrapper):
def __init__(self, env, skip=4):
"""Return only every `skip`-th frame"""
gym.Wrapper.__init__(self, env)
# most recent raw observations (for max pooling across time steps)
self._obs_buffer = np.zeros((2,) + env.observation_space.shape, dtype=np.uint8)
self._skip = skip
def step(self, action):
"""Repeat action, sum reward, and max over last observations."""
total_reward = 0.0
done = None
for i in range(self._skip):
obs, reward, done, info = self.env.step(action)
if i == self._skip - 2: self._obs_buffer[0] = obs
if i == self._skip - 1: self._obs_buffer[1] = obs
total_reward += reward
if done:
break
# Note that the observation on the done=True frame
# doesn't matter
max_frame = self._obs_buffer.max(axis=0)
return max_frame, total_reward, done, info
def reset(self, **kwargs):
return self.env.reset(**kwargs)
class ClipRewardEnv(gym.RewardWrapper):
def __init__(self, env):
gym.RewardWrapper.__init__(self, env)
def reward(self, reward):
"""Bin reward to {+1, 0, -1} by its sign."""
return np.sign(reward)
class WarpFrame(gym.ObservationWrapper):
def __init__(self, env, width=84, height=84, grayscale=True, dict_space_key=None):
"""
Warp frames to 84x84 as done in the Nature paper and later work.
If the environment uses dictionary observations, `dict_space_key` can be specified which indicates which
observation should be warped.
"""
super().__init__(env)
self._width = width
self._height = height
self._grayscale = grayscale
self._key = dict_space_key
if self._grayscale:
num_colors = 1
else:
num_colors = 3
new_space = gym.spaces.Box(
low=0,
high=255,
shape=(self._height, self._width, num_colors),
dtype=np.uint8,
)
if self._key is None:
original_space = self.observation_space
self.observation_space = new_space
else:
original_space = self.observation_space.spaces[self._key]
self.observation_space.spaces[self._key] = new_space
assert original_space.dtype == np.uint8 and len(original_space.shape) == 3
def observation(self, obs):
if self._key is None:
frame = obs
else:
frame = obs[self._key]
if self._grayscale:
frame = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY)
frame = cv2.resize(
frame, (self._width, self._height), interpolation=cv2.INTER_AREA
)
if self._grayscale:
frame = np.expand_dims(frame, -1)
if self._key is None:
obs = frame
else:
obs = obs.copy()
obs[self._key] = frame
return obs
class FrameStack(gym.Wrapper):
def __init__(self, env, k):
"""Stack k last frames.
Returns lazy array, which is much more memory efficient.
See Also
--------
baselines.common.atari_wrappers.LazyFrames
"""
gym.Wrapper.__init__(self, env)
self.k = k
self.frames = deque([], maxlen=k)
shp = env.observation_space.shape
self.observation_space = spaces.Box(low=0, high=255, shape=(shp[:-1] + (shp[-1] * k,)),
dtype=env.observation_space.dtype)
def reset(self):
ob = self.env.reset()
for _ in range(self.k):
self.frames.append(ob)
return self._get_ob()
def step(self, action):
ob, reward, done, info = self.env.step(action)
self.frames.append(ob)
return self._get_ob(), reward, done, info
def _get_ob(self):
assert len(self.frames) == self.k
return LazyFrames(list(self.frames))
class ScaledFloatFrame(gym.ObservationWrapper):
def __init__(self, env):
gym.ObservationWrapper.__init__(self, env)
self.observation_space = gym.spaces.Box(low=0, high=1, shape=env.observation_space.shape, dtype=np.float32)
def observation(self, observation):
# careful! This undoes the memory optimization, use
# with smaller replay buffers only.
return np.array(observation).astype(np.float32) / 255.0
class LazyFrames(object):
def __init__(self, frames):
"""This object ensures that common frames between the observations are only stored once.
It exists purely to optimize memory usage which can be huge for DQN's 1M frames replay
buffers.
This object should only be converted to numpy array before being passed to the model.
You'd not believe how complex the previous solution was."""
self._frames = frames
self._out = None
def _force(self):
if self._out is None:
self._out = np.concatenate(self._frames, axis=-1)
self._frames = None
return self._out
def __array__(self, dtype=None):
out = self._force()
if dtype is not None:
out = out.astype(dtype)
return out
def __len__(self):
return len(self._force())
def __getitem__(self, i):
return self._force()[i]
def count(self):
frames = self._force()
return frames.shape[frames.ndim - 1]
def frame(self, i):
return self._force()[..., i]
def wrap_atari(env, max_episode_steps=None):
assert 'NoFrameskip' in env.spec.id
env = NoopResetEnv(env, noop_max=30)
env = MaxAndSkipEnv(env, skip=4)
assert max_episode_steps is None
return env
def wrap_deepmind(env, episode_life=True, clip_rewards=True, frame_stack=False, scale=False):
"""Configure environment for DeepMind-style Atari.
"""
if episode_life:
env = EpisodicLifeEnv(env)
if 'FIRE' in env.unwrapped.get_action_meanings():
env = FireResetEnv(env)
env = WarpFrame(env)
if scale:
env = ScaledFloatFrame(env)
if clip_rewards:
env = ClipRewardEnv(env)
if frame_stack:
env = FrameStack(env, 4)
return env
class ImageToPyTorch(gym.ObservationWrapper):
"""
Image shape to channels x weight x height
"""
def __init__(self, env):
super(ImageToPyTorch, self).__init__(env)
old_shape = self.observation_space.shape
self.observation_space = gym.spaces.Box(
low=0,
high=255,
shape=(old_shape[-1], old_shape[0], old_shape[1]),
dtype=np.uint8,
)
def observation(self, observation):
return np.transpose(observation, axes=(2, 0, 1))
def wrap_pytorch(env):
return ImageToPyTorch(env)
import torch
import torch.nn as nn
import torch.optim as optim
from torch.distributions.categorical import Categorical
from torch.utils.tensorboard import SummaryWriter
import argparse
from distutils.util import strtobool
import numpy as np
import gym
from gym.wrappers import Monitor
from gym.spaces import Discrete
import time
import random
import os
from stable_baselines3.common.vec_env import DummyVecEnv, VecEnvWrapper
import matplotlib
matplotlib.use('Agg')
import seaborn as sns
import matplotlib.pyplot as plt
import pandas as pd
from PIL import Image
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='PPO agent')
# Common arguments
parser.add_argument('--exp-name', type=str, default=os.path.basename(__file__).rstrip(".py"),
help='the name of this experiment')
parser.add_argument('--gym-id', type=str, default="BreakoutNoFrameskip-v4",
help='the id of the gym environment')
parser.add_argument('--learning-rate', type=float, default=2.5e-4,
help='the learning rate of the optimizer')
parser.add_argument('--seed', type=int, default=1,
help='seed of the experiment')
parser.add_argument('--total-timesteps', type=int, default=10000000,
help='total timesteps of the experiments')
parser.add_argument('--torch-deterministic', type=lambda x: bool(strtobool(x)), default=True, nargs='?', const=True,
help='if toggled, `torch.backends.cudnn.deterministic=False`')
parser.add_argument('--cuda', type=lambda x: bool(strtobool(x)), default=True, nargs='?', const=True,
help='if toggled, cuda will not be enabled by default')
parser.add_argument('--prod-mode', type=lambda x: bool(strtobool(x)), default=False, nargs='?', const=True,
help='run the script in production mode and use wandb to log outputs')
parser.add_argument('--capture-video', type=lambda x: bool(strtobool(x)), default=False, nargs='?', const=True,
help='weather to capture videos of the agent performances (check out `videos` folder)')
parser.add_argument('--wandb-project-name', type=str, default="cleanRL",
help="the wandb's project name")
parser.add_argument('--wandb-entity', type=str, default=None,
help="the entity (team) of wandb's project")
# Algorithm specific arguments
parser.add_argument('--n-minibatch', type=int, default=4,
help='the number of mini batch')
parser.add_argument('--num-envs', type=int, default=8,
help='the number of parallel game environment')
parser.add_argument('--num-steps', type=int, default=128,
help='the number of steps per game environment')
parser.add_argument('--gamma', type=float, default=0.99,
help='the discount factor gamma')
parser.add_argument('--gae-lambda', type=float, default=0.95,
help='the lambda for the general advantage estimation')
parser.add_argument('--ent-coef', type=float, default=0.01,
help="coefficient of the entropy")
parser.add_argument('--vf-coef', type=float, default=0.5,
help="coefficient of the value function")
parser.add_argument('--max-grad-norm', type=float, default=0.5,
help='the maximum norm for the gradient clipping')
parser.add_argument('--clip-coef', type=float, default=0.1,
help="the surrogate clipping coefficient")
parser.add_argument('--update-epochs', type=int, default=4,
help="the K epochs to update the policy")
parser.add_argument('--kle-stop', type=lambda x: bool(strtobool(x)), default=False, nargs='?', const=True,
help='If toggled, the policy updates will be early stopped w.r.t target-kl')
parser.add_argument('--kle-rollback', type=lambda x: bool(strtobool(x)), default=False, nargs='?', const=True,
help='If toggled, the policy updates will roll back to previous policy if KL exceeds target-kl')
parser.add_argument('--target-kl', type=float, default=0.03,
help='the target-kl variable that is referred by --kl')
parser.add_argument('--gae', type=lambda x: bool(strtobool(x)), default=True, nargs='?', const=True,
help='Use GAE for advantage computation')
parser.add_argument('--norm-adv', type=lambda x: bool(strtobool(x)), default=True, nargs='?', const=True,
help="Toggles advantages normalization")
parser.add_argument('--anneal-lr', type=lambda x: bool(strtobool(x)), default=True, nargs='?', const=True,
help="Toggle learning rate annealing for policy and value networks")
parser.add_argument('--clip-vloss', type=lambda x: bool(strtobool(x)), default=True, nargs='?', const=True,
help='Toggles wheter or not to use a clipped loss for the value function, as per the paper.')
args = parser.parse_args()
if not args.seed:
args.seed = int(time.time())
args.batch_size = int(args.num_envs * args.num_steps)
args.minibatch_size = int(args.batch_size // args.n_minibatch)
class VecPyTorch(VecEnvWrapper):
def __init__(self, venv, device):
super(VecPyTorch, self).__init__(venv)
self.device = device
def reset(self):
obs = self.venv.reset()
obs = torch.from_numpy(obs).float().to(self.device)
return obs
def step_async(self, actions):
actions = actions.cpu().numpy()
self.venv.step_async(actions)
def step_wait(self):
obs, reward, done, info = self.venv.step_wait()
obs = torch.from_numpy(obs).float().to(self.device)
reward = torch.from_numpy(reward).unsqueeze(dim=1).float()
return obs, reward, done, info
class ProbsVisualizationWrapper(gym.Wrapper):
def __init__(self, env):
super().__init__(env)
self.env.reset()
self.image_shape = self.env.render(mode="rgb_array").shape
self.probs = [[0., 0., 0., 0.]]
# self.metadata['video.frames_per_second'] = 60
def set_probs(self, probs):
self.probs = probs
def render(self, mode="human"):
if mode == "rgb_array":
env_rgb_array = super().render(mode)
fig, ax = plt.subplots(figsize=(self.image_shape[1] / 100, self.image_shape[0] / 100),
constrained_layout=True, dpi=100)
df = pd.DataFrame(np.array(self.probs).T)
sns.barplot(x=df.index, y=0, data=df, ax=ax)
ax.set(xlabel='actions', ylabel='probs')
fig.canvas.draw()
X = np.array(fig.canvas.renderer.buffer_rgba())
Image.fromarray(X)
# Image.fromarray(X)
rgb_image = np.array(Image.fromarray(X).convert('RGB'))
plt.close(fig)
q_value_rgb_array = rgb_image
return np.append(env_rgb_array, q_value_rgb_array, axis=1)
else:
super().render(mode)
# TRY NOT TO MODIFY: setup the environment
experiment_name = f"{args.gym_id}__{args.exp_name}__{args.seed}__{int(time.time())}"
writer = SummaryWriter(f"runs/{experiment_name}")
writer.add_text('hyperparameters', "|param|value|\n|-|-|\n%s" % (
'\n'.join([f"|{key}|{value}|" for key, value in vars(args).items()])))
if args.prod_mode:
import wandb
wandb.init(project=args.wandb_project_name, entity=args.wandb_entity, sync_tensorboard=True, config=vars(args),
name=experiment_name, monitor_gym=True, save_code=True)
writer = SummaryWriter(f"/tmp/{experiment_name}")
# TRY NOT TO MODIFY: seeding
device = torch.device('cuda' if torch.cuda.is_available() and args.cuda else 'cpu')
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
torch.backends.cudnn.deterministic = args.torch_deterministic
def make_env(gym_id, seed, idx):
def thunk():
env = gym.make(gym_id)
env = wrap_atari(env)
env = gym.wrappers.RecordEpisodeStatistics(env)
if args.capture_video:
if idx == 0:
env = ProbsVisualizationWrapper(env)
env = Monitor(env, f'videos/{experiment_name}')
env = wrap_pytorch(
wrap_deepmind(
env,
clip_rewards=True,
frame_stack=True,
scale=False,
)
)
env.seed(seed)
env.action_space.seed(seed)
env.observation_space.seed(seed)
return env
return thunk
envs = VecPyTorch(DummyVecEnv([make_env(args.gym_id, args.seed + i, i) for i in range(args.num_envs)]), device)
# if args.prod_mode:
# envs = VecPyTorch(
# SubprocVecEnv([make_env(args.gym_id, args.seed+i, i) for i in range(args.num_envs)], "fork"),
# device
# )
assert isinstance(envs.action_space, Discrete), "only discrete action space is supported"
# ALGO LOGIC: initialize agent here:
class Scale(nn.Module):
def __init__(self, scale):
super().__init__()
self.scale = scale
def forward(self, x):
return x * self.scale
def layer_init(layer, std=np.sqrt(2), bias_const=0.0):
torch.nn.init.orthogonal_(layer.weight, std)
torch.nn.init.constant_(layer.bias, bias_const)
return layer
class Agent(nn.Module):
def __init__(self, envs, frames=4):
super(Agent, self).__init__()
self.network = nn.Sequential(
Scale(1 / 255),
layer_init(nn.Conv2d(frames, 32, 8, stride=4)),
nn.ReLU(),
layer_init(nn.Conv2d(32, 64, 4, stride=2)),
nn.ReLU(),
layer_init(nn.Conv2d(64, 64, 3, stride=1)),
nn.ReLU(),
nn.Flatten(),
layer_init(nn.Linear(3136, 512)),
nn.ReLU()
)
self.actor = layer_init(nn.Linear(512, envs.action_space.n), std=0.01)
self.critic = layer_init(nn.Linear(512, 1), std=1)
def forward(self, x):
return self.network(x)
def get_action(self, x, action=None):
logits = self.actor(self.forward(x))
probs = Categorical(logits=logits)
if action is None:
action = probs.sample()
return action, probs.log_prob(action), probs.entropy()
def get_value(self, x):
return self.critic(self.forward(x))
agent = Agent(envs).to(device)
optimizer = optim.Adam(agent.parameters(), lr=args.learning_rate, eps=1e-5)
if args.anneal_lr:
# https://github.com/openai/baselines/blob/ea25b9e8b234e6ee1bca43083f8f3cf974143998/baselines/ppo2/defaults.py#L20
lr = lambda f: f * args.learning_rate
# ALGO Logic: Storage for epoch data
obs = torch.zeros((args.num_steps, args.num_envs) + envs.observation_space.shape).to(device)
actions = torch.zeros((args.num_steps, args.num_envs) + envs.action_space.shape).to(device)
logprobs = torch.zeros((args.num_steps, args.num_envs)).to(device)
rewards = torch.zeros((args.num_steps, args.num_envs)).to(device)
dones = torch.zeros((args.num_steps, args.num_envs)).to(device)
values = torch.zeros((args.num_steps, args.num_envs)).to(device)
# TRY NOT TO MODIFY: start the game
global_step = 0
# Note how `next_obs` and `next_done` are used; their usage is equivalent to
# https://github.com/ikostrikov/pytorch-a2c-ppo-acktr-gail/blob/84a7582477fb0d5c82ad6d850fe476829dddd2e1/a2c_ppo_acktr/storage.py#L60
next_obs = envs.reset()
next_done = torch.zeros(args.num_envs).to(device)
num_updates = args.total_timesteps // args.batch_size
for update in range(1, num_updates + 1):
# Annealing the rate if instructed to do so.
if args.anneal_lr:
frac = 1.0 - (update - 1.0) / num_updates
lrnow = lr(frac)
optimizer.param_groups[0]['lr'] = lrnow
# TRY NOT TO MODIFY: prepare the execution of the game.
for step in range(0, args.num_steps):
global_step += 1 * args.num_envs
obs[step] = next_obs
dones[step] = next_done
# ALGO LOGIC: put action logic here
with torch.no_grad():
values[step] = agent.get_value(obs[step]).flatten()
action, logproba, _ = agent.get_action(obs[step])
# visualization
if args.capture_video:
probs_list = np.array(Categorical(
logits=agent.actor(agent.forward(obs[step]))).probs[0:1].tolist())
envs.env_method("set_probs", probs_list, indices=0)
actions[step] = action
logprobs[step] = logproba
# TRY NOT TO MODIFY: execute the game and log data.
next_obs, rs, ds, infos = envs.step(action)
rewards[step], next_done = rs.view(-1), torch.Tensor(ds).to(device)
for info in infos:
if 'episode' in info.keys():
print(f"global_step={global_step}, episode_reward={info['episode']['r']}")
writer.add_scalar("charts/episode_reward", info['episode']['r'], global_step)
break
# bootstrap reward if not done. reached the batch limit
with torch.no_grad():
last_value = agent.get_value(next_obs.to(device)).reshape(1, -1)
if args.gae:
advantages = torch.zeros_like(rewards).to(device)
lastgaelam = 0
for t in reversed(range(args.num_steps)):
if t == args.num_steps - 1:
nextnonterminal = 1.0 - next_done
nextvalues = last_value
else:
nextnonterminal = 1.0 - dones[t + 1]
nextvalues = values[t + 1]
delta = rewards[t] + args.gamma * nextvalues * nextnonterminal - values[t]
advantages[t] = lastgaelam = delta + args.gamma * args.gae_lambda * nextnonterminal * lastgaelam
returns = advantages + values
else:
returns = torch.zeros_like(rewards).to(device)
for t in reversed(range(args.num_steps)):
if t == args.num_steps - 1:
nextnonterminal = 1.0 - next_done
next_return = last_value
else:
nextnonterminal = 1.0 - dones[t + 1]
next_return = returns[t + 1]
returns[t] = rewards[t] + args.gamma * nextnonterminal * next_return
advantages = returns - values
# flatten the batch
b_obs = obs.reshape((-1,) + envs.observation_space.shape)
b_logprobs = logprobs.reshape(-1)
b_actions = actions.reshape((-1,) + envs.action_space.shape)
b_advantages = advantages.reshape(-1)
b_returns = returns.reshape(-1)
b_values = values.reshape(-1)
# Optimizaing the policy and value network
target_agent = Agent(envs).to(device)
inds = np.arange(args.batch_size, )
for i_epoch_pi in range(args.update_epochs):
np.random.shuffle(inds)
target_agent.load_state_dict(agent.state_dict())
for start in range(0, args.batch_size, args.minibatch_size):
end = start + args.minibatch_size
minibatch_ind = inds[start:end]
mb_advantages = b_advantages[minibatch_ind]
if args.norm_adv:
mb_advantages = (mb_advantages - mb_advantages.mean()) / (mb_advantages.std() + 1e-8)
_, newlogproba, entropy = agent.get_action(b_obs[minibatch_ind], b_actions.long()[minibatch_ind])
ratio = (newlogproba - b_logprobs[minibatch_ind]).exp()
# Stats
approx_kl = (b_logprobs[minibatch_ind] - newlogproba).mean()
# Policy loss
pg_loss1 = -mb_advantages * ratio
pg_loss2 = -mb_advantages * torch.clamp(ratio, 1 - args.clip_coef, 1 + args.clip_coef)
pg_loss = torch.max(pg_loss1, pg_loss2).mean()
entropy_loss = entropy.mean()
# Value loss
new_values = agent.get_value(b_obs[minibatch_ind]).view(-1)
if args.clip_vloss:
v_loss_unclipped = ((new_values - b_returns[minibatch_ind]) ** 2)
v_clipped = b_values[minibatch_ind] + torch.clamp(new_values - b_values[minibatch_ind], -args.clip_coef,
args.clip_coef)
v_loss_clipped = (v_clipped - b_returns[minibatch_ind]) ** 2
v_loss_max = torch.max(v_loss_unclipped, v_loss_clipped)
v_loss = 0.5 * v_loss_max.mean()
else:
v_loss = 0.5 * ((new_values - b_returns[minibatch_ind]) ** 2).mean()
loss = pg_loss - args.ent_coef * entropy_loss + v_loss * args.vf_coef
optimizer.zero_grad()
loss.backward()
nn.utils.clip_grad_norm_(agent.parameters(), args.max_grad_norm)
optimizer.step()
if args.kle_stop:
if approx_kl > args.target_kl:
break
if args.kle_rollback:
if (b_logprobs[minibatch_ind] - agent.get_action(b_obs[minibatch_ind], b_actions.long()[minibatch_ind])[
1]).mean() > args.target_kl:
agent.load_state_dict(target_agent.state_dict())
break
# TRY NOT TO MODIFY: record rewards for plotting purposes
writer.add_scalar("charts/learning_rate", optimizer.param_groups[0]['lr'], global_step)
writer.add_scalar("losses/value_loss", v_loss.item(), global_step)
writer.add_scalar("losses/policy_loss", pg_loss.item(), global_step)
writer.add_scalar("losses/entropy", entropy.mean().item(), global_step)
writer.add_scalar("losses/approx_kl", approx_kl.item(), global_step)
if args.kle_stop or args.kle_rollback:
writer.add_scalar("debug/pg_stop_iter", i_epoch_pi, global_step)
envs.close()
writer.close()
| 38.790634 | 133 | 0.616931 |
c99a58ea8d2a3b8358703a64c0aaa8fd82aac883 | 4,139 | py | Python | instabot/bot/bot_like.py | StefanoPoma97/insta-bot1 | 94720c241a697ed3fc67d6f4bc94bd34ee20b27a | [
"Apache-2.0"
]
| null | null | null | instabot/bot/bot_like.py | StefanoPoma97/insta-bot1 | 94720c241a697ed3fc67d6f4bc94bd34ee20b27a | [
"Apache-2.0"
]
| null | null | null | instabot/bot/bot_like.py | StefanoPoma97/insta-bot1 | 94720c241a697ed3fc67d6f4bc94bd34ee20b27a | [
"Apache-2.0"
]
| null | null | null | from tqdm import tqdm
def like(self, media_id):
if not self.reached_limit('likes'):
self.delay('like')
if not self.check_media(media_id):
return False
if self.api.like(media_id):
self.logger.info("Liked media %d." % media_id)
self.total['likes'] += 1
return True
else:
self.logger.info("Out of likes for today.")
return False
def like_comment(self, comment_id):
if self.api.like_comment(comment_id):
return True
return False
def like_media_comments(self, media_id):
broken_items = []
media_comments = self.get_media_comments(media_id)
comment_ids = [item["pk"] for item in media_comments if not item["has_liked_comment"]]
if not comment_ids:
self.logger.info("None comments received: comments not found or comments have been filtered.")
return broken_items
self.logger.info("Going to like %d comments." % (len(comment_ids)))
for comment in tqdm(comment_ids):
if not self.like_comment(comment):
self.error_delay()
broken_items = comment_ids[comment_ids.index(comment):]
self.logger.info("DONE: Liked {count} comments.".format(
count=len(comment_ids) - len(broken_items)
))
return broken_items
def like_medias(self, medias):
broken_items = []
if not medias:
self.logger.info("Nothing to like.")
return broken_items
self.logger.info("Going to like %d medias." % (len(medias)))
for media in tqdm(medias):
if not self.like(media):
self.error_delay()
broken_items.append(media)
self.logger.info("DONE: Total liked %d medias." % self.total['likes'])
return broken_items
def like_timeline(self, amount=None):
self.logger.info("Liking timeline feed:")
medias = self.get_timeline_medias()[:amount]
return self.like_medias(medias)
def like_user(self, user_id, amount=None, filtration=True):
""" Likes last user_id's medias """
if filtration:
if not self.check_user(user_id):
return False
self.logger.info("Liking user_%s's feed:" % user_id)
user_id = self.convert_to_user_id(user_id)
medias = self.get_user_medias(user_id, filtration=filtration)
if not medias:
self.logger.info(
"None medias received: account is closed or medias have been filtered.")
return False
return self.like_medias(medias[:amount])
def like_users(self, user_ids, nlikes=None, filtration=True):
for user_id in user_ids:
if self.reached_limit('likes'):
self.logger.info("Out of likes for today.")
return
self.like_user(user_id, amount=nlikes, filtration=filtration)
def like_hashtag(self, hashtag, amount=None):
""" Likes last medias from hashtag """
self.logger.info("Going to like media with hashtag #%s." % hashtag)
medias = self.get_total_hashtag_medias(hashtag, amount)
return self.like_medias(medias)
def like_geotag(self, geotag, amount=None):
# TODO: like medias by geotag
pass
def like_followers(self, user_id, nlikes=None, nfollows=None):
self.logger.info("Like followers of: %s." % user_id)
if self.reached_limit('likes'):
self.logger.info("Out of likes for today.")
return
if not user_id:
self.logger.info("User not found.")
return
follower_ids = self.get_user_followers(user_id, nfollows)
if not follower_ids:
self.logger.info("%s not found / closed / has no followers." % user_id)
else:
self.like_users(follower_ids[:nfollows], nlikes)
def like_following(self, user_id, nlikes=None):
self.logger.info("Like following of: %s." % user_id)
if self.reached_limit('likes'):
self.logger.info("Out of likes for today.")
return
if not user_id:
self.logger.info("User not found.")
return
following_ids = self.get_user_following(user_id)
if not following_ids:
self.logger.info("%s not found / closed / has no following." % user_id)
else:
self.like_users(following_ids, nlikes)
| 32.335938 | 102 | 0.658855 |
6ca7a36338f1bf7f31c1bbd1b37d8f695bfb71fd | 397 | py | Python | basics/solutions/array_operations.py | carlosal1015/ACM-Python-Tutorials-KAUST-2015 | 688acf1017dba7687254a8c880b7f19c6f939c3f | [
"CC-BY-3.0"
]
| 5 | 2019-01-16T14:43:43.000Z | 2021-06-29T02:20:47.000Z | basics/solutions/array_operations.py | carlosal1015/ACM-Python-Tutorials-KAUST-2015 | 688acf1017dba7687254a8c880b7f19c6f939c3f | [
"CC-BY-3.0"
]
| null | null | null | basics/solutions/array_operations.py | carlosal1015/ACM-Python-Tutorials-KAUST-2015 | 688acf1017dba7687254a8c880b7f19c6f939c3f | [
"CC-BY-3.0"
]
| 3 | 2017-02-21T06:19:19.000Z | 2021-06-29T02:20:54.000Z | print('Part 1:')
print 10+np.arange(90)
print('\nPart 2:')
print np.nonzero([1,2,0,0,4,0])[0]
print('\nPart 3:')
chess = np.zeros((8,8))
chess[1::2,::2] = 1
chess[::2,1::2] = 1
print chess
print('\nPart 4:')
print np.fromfunction(lambda i, j: (i +1)* (j+1)**2, (5, 3), dtype=int)
print('\nPart 5:')
normal_dist = np.random.normal(3, 1.5, (4,4))
print normal_dist
print normal_dist.sum(axis=1)
| 19.85 | 71 | 0.619647 |
76516803db1be76f7c5fa9baf54d0b106b37884c | 5,395 | py | Python | library/py/sphinxextras/cmakedomain.py | eblot/tde-base | 0be99e58cbe8ee70c896645131be6e0ad3c75744 | [
"MIT"
]
| null | null | null | library/py/sphinxextras/cmakedomain.py | eblot/tde-base | 0be99e58cbe8ee70c896645131be6e0ad3c75744 | [
"MIT"
]
| null | null | null | library/py/sphinxextras/cmakedomain.py | eblot/tde-base | 0be99e58cbe8ee70c896645131be6e0ad3c75744 | [
"MIT"
]
| null | null | null | # -*- coding: utf-8 -*-
"""
sphinx_domain_cmake
~~~~~~~~~~~~~~~~~~~
A CMake domain.
:copyright: 2012 by Kay-Uwe (Kiwi) Lorenz, ModuleWorks GmbH
:license: BSD, see LICENSE for details.
"""
from .domaintools import custom_domain
import re
from docutils import nodes
from sphinx import addnodes
from sphinx.domains import Domain, ObjType
from sphinx.domains.std import GenericObject
from sphinx.locale import l_, _
from sphinx.directives import ObjectDescription
from sphinx.roles import XRefRole
from sphinx.util.nodes import make_refnode
cmake_param_desc_re = re.compile(
r'([-_a-zA-Z0-9]+)\s+(.*)')
macro_sig_re = re.compile(r'(\w+)\(([^)]*)\)')
_term = r'''
<\w+>(?:\.\.\.)?
|
\[
(?: [^\[\]]+
| (?=\[) \[ [^\[\]]+ \] # allow one nesting level
)+
\]
|
\{
(?:[^{}]+
| (?=\{) \{ [^{}]+ \} # allow one nesting level
)+
\}(?:\.\.\.)?
'''
# Neotion syntax
#macro_param_re = re.compile(r'''
# (?:\[(?P<key>[^\s]+)(?:\=|\s+)(?P<value>[^\s]+)\]) | (?P<flag>[^\s]+)
# ''', re.VERBOSE)
# Default syntax
macro_param_re = re.compile(r'''
%s | (?P<key>[^\s]+)\s+(?P<value>(?:%s)) | (?P<flag>[^\s]+)
''' % (_term,_term), re.VERBOSE)
class desc_cmake_argumentlist(nodes.Part, nodes.Inline, nodes.TextElement):
"""Node for a general parameter list."""
child_text_separator = ' '
def argumentlist_visit(self, node):
self.visit_desc_parameterlist(node)
def argumentlist_depart(self, node):
self.depart_desc_parameterlist(node)
def html_argumentlist_visit(self, node):
self.visit_desc_parameterlist(node)
if len(node.children) > 3:
self.body.append('<span class="long-argument-list">')
else:
self.body.append('<span class="argument-list">')
def html_argumentlist_depart(self, node):
self.body.append('</span>')
self.depart_desc_parameterlist(node)
class desc_cmake_argument(nodes.Part, nodes.Inline, nodes.TextElement):
"""Node for an argument wrapper"""
def argument_visit(self, node):
pass
def argument_depart(self, node):
pass
def html_argument_visit(self, node):
self.body.append('<span class="arg"><em>')
def html_argument_depart(self, node):
self.body.append("</em></span> ")
#class desc_cmake_argumentlist(addnodes.desc_parameterlist):
#child_text_separator = " "
# sphinx.HTMLTranslator should be derived from GenericNodeVisitor, but it
# is not
#import sphinx.writers.html as _html
#kjksetattr(_html.
def _get_param_node(m):
if m.group('key'):
node = addnodes.desc_parameter()
key = nodes.strong(m.group('key'), m.group('key'))
key['classes'].append('arg-key')
node += key
node += nodes.Text(" ", " ")
value = nodes.inline(m.group('value'), m.group('value'))
value['classes'].append('arg-value')
node += value
return node
elif m.group('flag'):
node = addnodes.desc_parameter()
flag = nodes.strong(m.group('flag'), m.group('flag'))
flag['classes'].append('arg-flag')
node += flag
return flag
else:
return addnodes.desc_parameter(m.group(0), m.group(0))
def parse_macro(env, sig, signode):
#import rpdb2 ; rpdb2.start_embedded_debugger('foo')
m = macro_sig_re.match(sig)
if not m:
signode += addnodes.desc_name(sig, sig)
return sig
name, args = m.groups()
signode += addnodes.desc_name(name, name)
plist = desc_cmake_argumentlist()
for m in macro_param_re.finditer(args):
arg = m.group(0)
if arg.startswith('['):
arg = arg[1:-1].strip()
x = desc_cmake_argument()
opt = addnodes.desc_optional()
x += opt
m = macro_param_re.match(arg)
assert m is not None, "%s does not match %s" % (arg, macro_param_re.pattern)
opt += _get_param_node(m)
plist += x
# elif arg.startswith('{') choice
else:
x = desc_cmake_argument()
x += _get_param_node(m)
plist += x
signode += plist
return name
def setup(app):
from sphinx.util.docfields import GroupedField
app.add_node(
node = desc_cmake_argumentlist,
html = (html_argumentlist_visit, html_argumentlist_depart),
latex = (argumentlist_visit, argumentlist_depart),
)
app.add_node(
node = desc_cmake_argument,
html = (html_argument_visit, html_argument_depart),
latex = (argument_visit, argument_depart),
)
app.add_domain(custom_domain('CMakeDomain',
name = 'cmake',
label = "CMake",
elements = dict(
macro = dict(
# role = 'xxx' if differs from macro
# objtype = 'xxx' if differs from macro
objname = "CMake Macro",
indextemplate = "pair: %s; CMake macro",
parse = parse_macro,
fields = [
GroupedField('parameter',
label = "Parameters",
names = [ 'param' ])
]
),
var = dict(
objname = "CMake Variable",
indextemplate = "pair: %s; CMake variable"
),
)))
# vim: ts=4 : sw=4 : et
| 25.56872 | 88 | 0.572567 |
d0e5b4c55d35e4d4463a595d56e44b850631ca29 | 25,976 | py | Python | kafka_consumer/datadog_checks/kafka_consumer/legacy_0_10_2.py | tanner-bruce/integrations-core | 36337b84fefb73e94d4f1ee28aaeb669dc12fb59 | [
"BSD-3-Clause"
]
| null | null | null | kafka_consumer/datadog_checks/kafka_consumer/legacy_0_10_2.py | tanner-bruce/integrations-core | 36337b84fefb73e94d4f1ee28aaeb669dc12fb59 | [
"BSD-3-Clause"
]
| null | null | null | kafka_consumer/datadog_checks/kafka_consumer/legacy_0_10_2.py | tanner-bruce/integrations-core | 36337b84fefb73e94d4f1ee28aaeb669dc12fb59 | [
"BSD-3-Clause"
]
| null | null | null | # (C) Datadog, Inc. 2019-present
# All rights reserved
# Licensed under Simplified BSD License (see LICENSE)
from __future__ import division
from collections import defaultdict
from time import time
from kafka import errors as kafka_errors
from kafka.client import KafkaClient
from kafka.protocol.commit import GroupCoordinatorRequest, OffsetFetchRequest
from kafka.protocol.offset import OffsetRequest, OffsetResetStrategy, OffsetResponse
from kazoo.client import KazooClient
from kazoo.exceptions import NoNodeError
from six import string_types
from datadog_checks.base import AgentCheck, ConfigurationError, is_affirmative
from .constants import CONTEXT_UPPER_BOUND, DEFAULT_KAFKA_TIMEOUT, KAFKA_INTERNAL_TOPICS
class LegacyKafkaCheck_0_10_2(AgentCheck):
"""
Check the offsets and lag of Kafka consumers. This check also returns broker highwater offsets.
This is the legacy codepath which is used when either broker version < 0.10.2 or zk_connect_str has a value.
"""
__NAMESPACE__ = 'kafka'
def __init__(self, name, init_config, instances):
super(LegacyKafkaCheck_0_10_2, self).__init__(name, init_config, instances)
self._context_limit = int(init_config.get('max_partition_contexts', CONTEXT_UPPER_BOUND))
self._custom_tags = self.instance.get('tags', [])
self._monitor_unlisted_consumer_groups = is_affirmative(
self.instance.get('monitor_unlisted_consumer_groups', False)
)
self._monitor_all_broker_highwatermarks = is_affirmative(
self.instance.get('monitor_all_broker_highwatermarks', False)
)
self._consumer_groups = self.instance.get('consumer_groups', {})
# Note: We cannot skip validation if monitor_unlisted_consumer_groups is True because this legacy check only
# supports that functionality for Zookeeper, not Kafka.
self._validate_explicit_consumer_groups()
self._kafka_client = self._create_kafka_client()
self._zk_hosts_ports = self.instance.get('zk_connect_str')
# If we are collecting from Zookeeper, then create a long-lived zk client
if self._zk_hosts_ports is not None:
# any chroot prefix gets appended onto the host string or the last item on the host list
chroot = self.instance.get('zk_prefix')
if chroot is not None:
if isinstance(self._zk_hosts_ports, string_types):
self._zk_hosts_ports += chroot
elif isinstance(self._zk_hosts_ports, list):
self._zk_hosts_ports.append(chroot)
else:
raise ConfigurationError("zk_connect_str must be a string or list of strings")
self._zk_client = KazooClient(hosts=self._zk_hosts_ports, timeout=int(init_config.get('zk_timeout', 5)))
self._zk_client.start()
def check(self, instance):
"""The main entrypoint of the check."""
self.log.debug("Running legacy Kafka Consumer check.")
self._zk_consumer_offsets = {} # Expected format: {(consumer_group, topic, partition): offset}
self._kafka_consumer_offsets = {} # Expected format: {(consumer_group, topic, partition): offset}
self._highwater_offsets = {} # Expected format: {(topic, partition): offset}
# For calculating consumer lag, we have to fetch both the consumer offset and the broker highwater offset.
# There's a potential race condition because whichever one we check first may be outdated by the time we check
# the other. Better to check consumer offsets before checking broker offsets because worst case is that
# overstates consumer lag a little. Doing it the other way can understate consumer lag to the point of having
# negative consumer lag, which just creates confusion because it's theoretically impossible.
# Fetch consumer group offsets from Zookeeper
if self._zk_hosts_ports is not None:
try:
self._get_zk_consumer_offsets()
except Exception:
self.log.exception("There was a problem collecting consumer offsets from Zookeeper.")
# don't raise because we might get valid broker offsets
# Fetch consumer group offsets from Kafka
# Support for storing offsets in Kafka not available until Kafka 0.8.2. Also, for legacy reasons, this check
# only fetches consumer offsets from Kafka if Zookeeper is omitted or kafka_consumer_offsets is True.
if self._kafka_client.config.get('api_version') >= (0, 8, 2) and is_affirmative(
instance.get('kafka_consumer_offsets', self._zk_hosts_ports is None)
):
try:
self._get_kafka_consumer_offsets()
except Exception:
self.log.exception("There was a problem collecting consumer offsets from Kafka.")
# don't raise because we might get valid broker offsets
# Fetch the broker highwater offsets
try:
self._get_highwater_offsets()
except Exception:
self.log.exception('There was a problem collecting the highwater mark offsets')
# Unlike consumer offsets, fail immediately because we can't calculate consumer lag w/o highwater_offsets
raise
total_contexts = sum(
[len(self._zk_consumer_offsets), len(self._kafka_consumer_offsets), len(self._highwater_offsets)]
)
if total_contexts > self._context_limit:
self.warning(
"""Discovered %s metric contexts - this exceeds the maximum number of %s contexts permitted by the
check. Please narrow your target by specifying in your kafka_consumer.yaml the consumer groups, topics
and partitions you wish to monitor.""",
total_contexts,
self._context_limit,
)
# Report the metics
self._report_highwater_offsets()
self._report_consumer_offsets_and_lag(self._kafka_consumer_offsets)
# if someone is in the middle of migrating their offset storage from zookeeper to kafka, they need to identify
# which source is reporting which offsets. So we tag zookeeper with 'source:zk'
self._report_consumer_offsets_and_lag(self._zk_consumer_offsets, source='zk')
def _create_kafka_client(self):
kafka_conn_str = self.instance.get('kafka_connect_str')
if not isinstance(kafka_conn_str, (string_types, list)):
raise ConfigurationError('kafka_connect_str should be string or list of strings')
kafka_version = self.instance.get('kafka_client_api_version')
if isinstance(kafka_version, str):
kafka_version = tuple(map(int, kafka_version.split(".")))
kafka_client = KafkaClient(
bootstrap_servers=kafka_conn_str,
client_id='dd-agent',
request_timeout_ms=self.init_config.get('kafka_timeout', DEFAULT_KAFKA_TIMEOUT) * 1000,
# if `kafka_client_api_version` is not set, then kafka-python automatically probes the cluster for broker
# version during the bootstrapping process. Note that probing randomly picks a broker to probe, so in a
# mixed-version cluster probing returns a non-deterministic result.
api_version=kafka_version,
# While we check for SSL params, if not present they will default to the kafka-python values for plaintext
# connections
security_protocol=self.instance.get('security_protocol', 'PLAINTEXT'),
sasl_mechanism=self.instance.get('sasl_mechanism'),
sasl_plain_username=self.instance.get('sasl_plain_username'),
sasl_plain_password=self.instance.get('sasl_plain_password'),
sasl_kerberos_service_name=self.instance.get('sasl_kerberos_service_name', 'kafka'),
sasl_kerberos_domain_name=self.instance.get('sasl_kerberos_domain_name'),
ssl_cafile=self.instance.get('ssl_cafile'),
ssl_check_hostname=self.instance.get('ssl_check_hostname', True),
ssl_certfile=self.instance.get('ssl_certfile'),
ssl_keyfile=self.instance.get('ssl_keyfile'),
ssl_crlfile=self.instance.get('ssl_crlfile'),
ssl_password=self.instance.get('ssl_password'),
)
# Force initial population of the local cluster metadata cache
kafka_client.poll(future=kafka_client.cluster.request_update())
if kafka_client.cluster.topics(exclude_internal_topics=False) is None:
raise RuntimeError("Local cluster metadata cache did not populate.")
return kafka_client
def _make_blocking_req(self, request, node_id=None):
if node_id is None:
node_id = self._kafka_client.least_loaded_node()
while not self._kafka_client.ready(node_id):
# poll until the connection to broker is ready, otherwise send() will fail with NodeNotReadyError
self._kafka_client.poll()
future = self._kafka_client.send(node_id, request)
self._kafka_client.poll(future=future) # block until we get response.
if future.failed():
raise future.exception # pylint: disable-msg=raising-bad-type
response = future.value
return response
def _get_highwater_offsets(self):
"""
Fetch highwater offsets for topic_partitions in the Kafka cluster.
If monitor_all_broker_highwatermarks is True, will fetch for all partitions in the cluster. Otherwise highwater
mark offsets will only be fetched for topic partitions where this check run has already fetched a consumer
offset.
Internal Kafka topics like __consumer_offsets, __transaction_state, etc are always excluded.
Any partitions that don't currently have a leader will be skipped.
Sends one OffsetRequest per broker to get offsets for all partitions where that broker is the leader:
https://cwiki.apache.org/confluence/display/KAFKA/A+Guide+To+The+Kafka+Protocol#AGuideToTheKafkaProtocol-OffsetAPI(AKAListOffset)
"""
# If we aren't fetching all broker highwater offsets, then construct the unique set of topic partitions for
# which this run of the check has at least once saved consumer offset. This is later used as a filter for
# excluding partitions.
if not self._monitor_all_broker_highwatermarks:
tps_with_consumer_offset = {(topic, partition) for (_, topic, partition) in self._kafka_consumer_offsets}
tps_with_consumer_offset.update({(topic, partition) for (_, topic, partition) in self._zk_consumer_offsets})
for broker in self._kafka_client.cluster.brokers():
broker_led_partitions = self._kafka_client.cluster.partitions_for_broker(broker.nodeId)
if broker_led_partitions is None:
continue
# Take the partitions for which this broker is the leader and group them by topic in order to construct the
# OffsetRequest while simultaneously filtering out partitions we want to exclude
partitions_grouped_by_topic = defaultdict(list)
for topic, partition in broker_led_partitions:
# No sense fetching highwater offsets for internal topics
if topic not in KAFKA_INTERNAL_TOPICS and (
self._monitor_all_broker_highwatermarks or (topic, partition) in tps_with_consumer_offset
):
partitions_grouped_by_topic[topic].append(partition)
# Construct the OffsetRequest
max_offsets = 1
request = OffsetRequest[0](
replica_id=-1,
topics=[
(topic, [(partition, OffsetResetStrategy.LATEST, max_offsets) for partition in partitions])
for topic, partitions in partitions_grouped_by_topic.items()
],
)
response = self._make_blocking_req(request, node_id=broker.nodeId)
self._process_highwater_offsets(response)
def _process_highwater_offsets(self, response):
"""Parse an OffsetFetchResponse and save it to the highwater_offsets dict."""
if type(response) not in OffsetResponse:
raise RuntimeError("response type should be OffsetResponse, but instead was %s." % type(response))
for topic, partitions_data in response.topics:
for partition, error_code, offsets in partitions_data:
error_type = kafka_errors.for_code(error_code)
if error_type is kafka_errors.NoError:
self._highwater_offsets[(topic, partition)] = offsets[0]
elif error_type is kafka_errors.NotLeaderForPartitionError:
self.log.warning(
"Kafka broker returned %s (error_code %s) for topic %s, partition: %s. This should only happen "
"if the broker that was the partition leader when kafka_admin_client last fetched metadata is "
"no longer the leader.",
error_type.message,
error_type.errno,
topic,
partition,
)
self._kafka_client.cluster.request_update() # force metadata update on next poll()
elif error_type is kafka_errors.UnknownTopicOrPartitionError:
self.log.warning(
"Kafka broker returned %s (error_code %s) for topic: %s, partition: %s. This should only "
"happen if the topic is currently being deleted or the check configuration lists non-existent "
"topic partitions.",
error_type.message,
error_type.errno,
topic,
partition,
)
else:
raise error_type(
"Unexpected error encountered while attempting to fetch the highwater offsets for topic: %s, "
"partition: %s." % (topic, partition)
)
def _report_highwater_offsets(self):
"""Report the broker highwater offsets."""
for (topic, partition), highwater_offset in self._highwater_offsets.items():
broker_tags = ['topic:%s' % topic, 'partition:%s' % partition]
broker_tags.extend(self._custom_tags)
self.gauge('broker_offset', highwater_offset, tags=broker_tags)
def _report_consumer_offsets_and_lag(self, consumer_offsets, **kwargs):
"""Report the consumer group offsets and consumer lag."""
for (consumer_group, topic, partition), consumer_offset in consumer_offsets.items():
consumer_group_tags = ['topic:%s' % topic, 'partition:%s' % partition, 'consumer_group:%s' % consumer_group]
if 'source' in kwargs:
consumer_group_tags.append('source:%s' % kwargs['source'])
consumer_group_tags.extend(self._custom_tags)
if partition in self._kafka_client.cluster.partitions_for_topic(topic):
# report consumer offset if the partition is valid because even if leaderless the consumer offset will
# be valid once the leader failover completes
self.gauge('consumer_offset', consumer_offset, tags=consumer_group_tags)
if (topic, partition) not in self._highwater_offsets:
self.log.warning(
"Consumer group: %s has offsets for topic: %s partition: %s, but no stored highwater offset "
"(likely the partition is in the middle of leader failover) so cannot calculate consumer lag.",
consumer_group,
topic,
partition,
)
continue
consumer_lag = self._highwater_offsets[(topic, partition)] - consumer_offset
self.gauge('consumer_lag', consumer_lag, tags=consumer_group_tags)
if consumer_lag < 0: # this will effectively result in data loss, so emit an event for max visibility
title = "Negative consumer lag for group: {}.".format(consumer_group)
message = (
"Consumer group: {}, topic: {}, partition: {} has negative consumer lag. This should never "
"happen and will result in the consumer skipping new messages until the lag turns "
"positive.".format(consumer_group, topic, partition)
)
key = "{}:{}:{}".format(consumer_group, topic, partition)
self._send_event(title, message, consumer_group_tags, 'consumer_lag', key, severity="error")
self.log.debug(message)
else:
self.log.warning(
"Consumer group: %s has offsets for topic: %s, partition: %s, but that topic partition doesn't "
"appear to exist in the cluster so skipping reporting these offsets.",
consumer_group,
topic,
partition,
)
self._kafka_client.cluster.request_update() # force metadata update on next poll()
def _get_zk_path_children(self, zk_path, name_for_error):
"""Fetch child nodes for a given Zookeeper path."""
children = []
try:
children = self._zk_client.get_children(zk_path)
except NoNodeError:
self.log.info('No zookeeper node at %s', zk_path)
except Exception:
self.log.exception('Could not read %s from %s', name_for_error, zk_path)
return children
def _get_zk_consumer_offsets(self):
"""
Fetch Consumer Group offsets from Zookeeper.
Also fetch consumer_groups, topics, and partitions if not
already specified in consumer_groups.
:param dict consumer_groups: The consumer groups, topics, and partitions
that you want to fetch offsets for. If consumer_groups is None, will
fetch offsets for all consumer_groups. For examples of what this
dict can look like, see _validate_explicit_consumer_groups().
"""
# Construct the Zookeeper path pattern
# /consumers/[groupId]/offsets/[topic]/[partitionId]
zk_path_consumer = '/consumers/'
zk_path_topic_tmpl = zk_path_consumer + '{group}/offsets/'
zk_path_partition_tmpl = zk_path_topic_tmpl + '{topic}/'
if self._monitor_unlisted_consumer_groups:
# don't overwrite self._consumer_groups because that holds the static config values which are always used
# when fetching consumer offsets from Kafka. Also, these dynamically fetched groups may change on each run.
consumer_groups = {
consumer_group: None
for consumer_group in self._get_zk_path_children(zk_path_consumer, 'consumer groups')
}
else:
consumer_groups = self._consumer_groups
for consumer_group, topics in consumer_groups.items():
if not topics: # If topics are't specified, fetch them from ZK
zk_path_topics = zk_path_topic_tmpl.format(group=consumer_group)
topics = {topic: None for topic in self._get_zk_path_children(zk_path_topics, 'topics')}
for topic, partitions in topics.items():
if not partitions: # If partitions aren't specified, fetch them from ZK
zk_path_partitions = zk_path_partition_tmpl.format(group=consumer_group, topic=topic)
# Zookeeper returns the partition IDs as strings because they are extracted from the node path
partitions = [int(x) for x in self._get_zk_path_children(zk_path_partitions, 'partitions')]
for partition in partitions:
zk_path = (zk_path_partition_tmpl + '{partition}/').format(
group=consumer_group, topic=topic, partition=partition
)
try:
consumer_offset = int(self._zk_client.get(zk_path)[0])
key = (consumer_group, topic, partition)
self._zk_consumer_offsets[key] = consumer_offset
except NoNodeError:
self.log.info('No zookeeper node at %s', zk_path)
continue
except Exception:
self.log.exception('Could not read consumer offset from %s', zk_path)
def _get_kafka_consumer_offsets(self):
"""
Fetch Consumer Group offsets from Kafka.
These offsets are stored in the __consumer_offsets topic rather than in Zookeeper.
"""
for consumer_group, topic_partitions in self._consumer_groups.items():
if not topic_partitions:
raise ConfigurationError(
'Invalid configuration - if you are collecting consumer offsets from Kafka, and your brokers are '
'older than 0.10.2, then you _must_ specify consumer groups and their topics. Older brokers lack '
'the necessary protocol support to determine which topics a consumer is consuming. See KIP-88 for '
'details.'
)
try: # catch exceptions on a group-by-group basis so that if one fails we still fetch the other groups
for topic, partitions in topic_partitions.items():
if not partitions:
# If partitions omitted, then we assume the group is consuming all partitions for the topic.
# Fetch consumer offsets even for unavailable partitions because those will be valid once the
# partition finishes leader failover.
topic_partitions[topic] = self._kafka_client.cluster.partitions_for_topic(topic)
coordinator_id = self._get_group_coordinator(consumer_group)
if coordinator_id is not None:
# Kafka protocol uses OffsetFetchRequests to retrieve consumer offsets:
# https://kafka.apache.org/protocol#The_Messages_OffsetFetch
# https://cwiki.apache.org/confluence/display/KAFKA/A+Guide+To+The+Kafka+Protocol#AGuideToTheKafkaProtocol-OffsetFetchRequest
request = OffsetFetchRequest[1](consumer_group, list(topic_partitions.items()))
response = self._make_blocking_req(request, node_id=coordinator_id)
for (topic, partition_offsets) in response.topics:
for partition, offset, _metadata, error_code in partition_offsets:
error_type = kafka_errors.for_code(error_code)
# If the OffsetFetchRequest explicitly specified partitions, the offset could returned as
# -1, meaning there is no recorded offset for that partition... for example, if the
# partition doesn't exist in the cluster. So ignore it.
if offset == -1 or error_type is not kafka_errors.NoError:
self._kafka_client.cluster.request_update() # force metadata update on next poll()
continue
key = (consumer_group, topic, partition)
self._kafka_consumer_offsets[key] = offset
else:
self.log.info("unable to find group coordinator for %s", consumer_group)
except Exception:
self.log.exception('Could not read consumer offsets from Kafka for group: %s', consumer_group)
def _get_group_coordinator(self, group):
"""Determine which broker is the Group Coordinator for a specific consumer group."""
request = GroupCoordinatorRequest[0](group)
response = self._make_blocking_req(request)
error_type = kafka_errors.for_code(response.error_code)
if error_type is kafka_errors.NoError:
return response.coordinator_id
def _validate_explicit_consumer_groups(self):
"""Validate any explicitly specified consumer groups.
While the check does not require specifying consumer groups,
if they are specified this method should be used to validate them.
consumer_groups = {'consumer_group': {'topic': [0, 1]}}
"""
assert isinstance(self._consumer_groups, dict)
for consumer_group, topics in self._consumer_groups.items():
assert isinstance(consumer_group, string_types)
assert isinstance(topics, dict) or topics is None # topics are optional
if topics is not None:
for topic, partitions in topics.items():
assert isinstance(topic, string_types)
assert isinstance(partitions, (list, tuple)) or partitions is None # partitions are optional
if partitions is not None:
for partition in partitions:
assert isinstance(partition, int)
def _send_event(self, title, text, tags, event_type, aggregation_key, severity='info'):
"""Emit an event to the Datadog Event Stream."""
event_dict = {
'timestamp': int(time()),
'msg_title': title,
'event_type': event_type,
'alert_type': severity,
'msg_text': text,
'tags': tags,
'aggregation_key': aggregation_key,
}
self.event(event_dict)
| 56.225108 | 145 | 0.641207 |
1fa86c4d5d8174d3d773eedbd654e0cdf91b5194 | 2,613 | py | Python | CIM16/CDPSM/Asset/IEC61968/AssetModels/OverheadConductorInfo.py | MaximeBaudette/PyCIM | d68ee5ccfc1d32d44c5cd09fb173142fb5ff4f14 | [
"MIT"
]
| null | null | null | CIM16/CDPSM/Asset/IEC61968/AssetModels/OverheadConductorInfo.py | MaximeBaudette/PyCIM | d68ee5ccfc1d32d44c5cd09fb173142fb5ff4f14 | [
"MIT"
]
| null | null | null | CIM16/CDPSM/Asset/IEC61968/AssetModels/OverheadConductorInfo.py | MaximeBaudette/PyCIM | d68ee5ccfc1d32d44c5cd09fb173142fb5ff4f14 | [
"MIT"
]
| 1 | 2021-04-02T18:04:49.000Z | 2021-04-02T18:04:49.000Z | # Copyright (C) 2010-2011 Richard Lincoln
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
from CIM16.CDPSM.Asset.IEC61968.AssetModels.ConductorInfo import ConductorInfo
class OverheadConductorInfo(ConductorInfo):
"""Overhead conductor data.
"""
def __init__(self, neutralInsulationThickness=0.0, phaseConductorSpacing=0.0, phaseConductorCount=0, *args, **kw_args):
"""Initialises a new 'OverheadConductorInfo' instance.
@param neutralInsulationThickness: (if applicable) Insulation thickness of the neutral conductor.
@param phaseConductorSpacing: Distance between conductor strands in a symmetrical bundle.
@param phaseConductorCount: Number of conductor strands in the symmetrical bundle (1-12).
"""
#: (if applicable) Insulation thickness of the neutral conductor.
self.neutralInsulationThickness = neutralInsulationThickness
#: Distance between conductor strands in a symmetrical bundle.
self.phaseConductorSpacing = phaseConductorSpacing
#: Number of conductor strands in the symmetrical bundle (1-12).
self.phaseConductorCount = phaseConductorCount
super(OverheadConductorInfo, self).__init__(*args, **kw_args)
_attrs = ["neutralInsulationThickness", "phaseConductorSpacing", "phaseConductorCount"]
_attr_types = {"neutralInsulationThickness": float, "phaseConductorSpacing": float, "phaseConductorCount": int}
_defaults = {"neutralInsulationThickness": 0.0, "phaseConductorSpacing": 0.0, "phaseConductorCount": 0}
_enums = {}
_refs = []
_many_refs = []
| 50.25 | 123 | 0.753923 |
af73e8ab81964641c973296e681546e9a9cd2d01 | 9,087 | py | Python | .history/classes/Menu_20171107203023.py | reecebenson/DADSA-Tennis-PartA | d0763f819b300fcd0ce27041f5bc4ef0519c00bf | [
"MIT"
]
| null | null | null | .history/classes/Menu_20171107203023.py | reecebenson/DADSA-Tennis-PartA | d0763f819b300fcd0ce27041f5bc4ef0519c00bf | [
"MIT"
]
| null | null | null | .history/classes/Menu_20171107203023.py | reecebenson/DADSA-Tennis-PartA | d0763f819b300fcd0ce27041f5bc4ef0519c00bf | [
"MIT"
]
| null | null | null | # DADSA - Assignment 1
# Reece Benson
from functools import partial
from os import system as call
from collections import OrderedDict
class Menu():
# Define the variables we will be using
_app = None
_menu = None
_current = [ "main" ]
_current_menu = "main"
just_called_back = False
def __init__(self, app):
# Set our Application
self._app = app
def load(self):
# Define our Menu
self._menu = { }
# Create our Menu
self._menu['main'] = { "new_season": "New Season", "load_season": "Load Season" }
self._menu['back'] = lambda: self.go_back()
self._menu['new_season'] = { "ns_players": "Players", "ns_tournaments": "Tournaments", "ns_prizemoney": "Prize Money", "ns_difficulty": "Difficulty", "back": "Back" }
self._menu['ns_players'] = { "ns_viewplayers": "View Players", "ns_viewplayer": "View Player", "back": "Back" }
self._menu['ns_tournaments'] = { "ns_viewtournaments": "Example Tournament 1", "back": "Back" }
self._menu['ns_prizemoney'] = { "ns_setprizemoney": "Set Prize Money", "ns_viewprizemoney": "View Prize Money", "back": "Back" }
self._menu['ns_difficulty'] = { "ns_setdifficulty": "Set Difficulty", "ns_viewdifficulty": "View Difficulty", "back": "Back" }
self._menu['load_season'] = { }
# Append our Seasons to the "Load Season" Menu
for seasonId in self._app.handler.get_seasons():
season = self._app.handler.get_season(seasonId)
seasonVar = 'ls_'+str(seasonId)
self._menu['load_season'].update({ seasonVar: season.name() })
# Create our menu option for loading a season
self._menu[seasonVar] = { seasonVar+"_select": "Select Tournament", seasonVar+"_players": "View Players", seasonVar+"_details": "View Details", "back": "Back" }
# Create our menu options
self._menu[seasonVar+"_select"] = { }
self._menu[seasonVar+"_players"] = { }
self._menu[seasonVar+"_details"] = lambda: print(season.display("details"))
# Fill our menu options with extra options
# > "Select Tournament"
for tournament_name in season.tournaments():
self._menu[seasonVar+"_select"].update({ seasonVar+"_select_"+tournament_name: "Select {0}".format(tournament_name) })
# Set our gender specifiers within the tournament
self._menu[seasonVar+"_select_"+tournament_name] = { }
for gdr in season.rounds():
self._menu[seasonVar+"_select_"+tournament_name].update({ seasonVar+"_select_"+tournament_name+"_"+gdr: "View {0} Rounds".format(gdr) })
self._menu[seasonVar+"_select_"+tournament_name+"_"+gdr] = { }
for rnd in season.rounds()[gdr]:
self._menu[seasonVar+"_select_"+tournament_name+"_"+gdr].update({ seasonVar+"_select_"+tournament_name+"-"+gdr+"-"+rnd: "Round {0}".format(rnd) })
self._menu[seasonVar+"_select_"+tournament_name+"-"+gdr+"-"+rnd] = partial(print, season.name(), tournament_name, gdr, rnd)
self._menu[seasonVar+"_select_"+tournament_name+"_"+gdr].update({ "back": "Back" })
# Add tournament specific options
self._menu[seasonVar+"_select_"+tournament_name].update({ seasonVar+"_select_"+tournament_name+"_difficulty": "View Difficulty", seasonVar+"_select_"+tournament_name+"_prizemoney": "View Prize Money" })
self._menu[seasonVar+"_select_"+tournament_name+"_difficulty"] = partial(print, season.tournament(tournament_name).display("difficulty"))
self._menu[seasonVar+"_select_"+tournament_name+"_prizemoney"] = partial(print, season.tournament(tournament_name).display("prize_money"))
# Add our back option
self._menu[seasonVar+"_select_"+tournament_name].update({ "back": "Back" })
# > "View Players"
for gdr in season.players():
self._menu[seasonVar+"_players"].update({ seasonVar+"_players_"+gdr: "List {0}s".format(gdr) })
self._menu[seasonVar+"_players_"+gdr] = lambda: print(season.display("players", gdr))
# > Add the back options to each submenu
self._menu[seasonVar+"_select"].update({ "back": "Back" })
self._menu[seasonVar+"_players"].update({ "back": "Back" })
self._menu["load_season"].update({ "back": "Back" })
# Display our Menu
self.display("main")
def go_back(self):
# Set our flag to true
self.just_called_back = True
# Pop off the last item of the list
self._current.pop()
# Set our current menu to the last element of the list
self._current_menu = self._current[-1]
def strike(self, text):
result = ''
for c in text:
result = result + c + '\u0336'
return result
def display(self, index = None, error = None):
# Clear our terminal window
call("cls")
# Define our variables
cur_count = 0
menu_item = self.get_menu(index or "main")
# Error Handling
if(error != None):
print("\n", "Error!", error, "\n")
# Menu Title, set tree
print("Please select an option: ({})".format("/".join(self._current)))
menu_counter = 0
for m in menu_item:
# Get our menu name
menu_name = menu_item[m]
# Increase our Counter
menu_counter += 1
# Check that the menu option is available
if(m in self._menu):
# Is the Menu Item a Function?
m_type = None
if(callable(self._menu[m])):
m_type = ""
else:
m_type = "->"
# Print our Menu Item
print("{0}. {1} {2}".format(menu_counter, menu_name, m_type))
else:
print(self.strike("{0}. {1} [?]".format(menu_counter, menu_name)))
# Get User Input
self.get_input()
def validate_menu(self, index):
try:
menu_name = [ (v) for k,v in enumerate(self._menu) if(k == index) ][0]
return menu_name
except IndexError:
return None
def get_menu(self, menu_name):
# Check our Menu exists
if(not menu_name in self._menu):
return None
else:
return self._menu[menu_name]
def menu_exists(self, index):
# Find our indexed menu
menu_item = self.get_menu(self._current_menu)
menu_found = None
menu_counter = 0
for m in menu_item:
# Get our menu name
menu_name = menu_item[m]
# Increase our Counter
menu_counter += 1
# Check that the menu option is available
if(m in self._menu):
# Has our menu been found?
if(menu_counter == index):
# Check if it's a function or a submenu
if(callable(self._menu[m])):
# Call our function
menu_found = self._menu[m]
else:
menu_found = m
else:
menu_found = None
return menu_found
def get_input(self):
# Wrap this in a try/except to validate any errors with input
try:
# Get users input
resp = int(input('>>> '))
# Validate some set input calls
if(resp == "exit"):
raise KeyboardInterrupt
elif(resp == ""):
return self.display(self._current_menu, "Please select a valid option!")
# Validate input from current menu
menu_selected = self.menu_exists(resp)
if(menu_selected != None and callable(menu_selected) != True):
print(menu_selected)
self._current.append(menu_selected)
self._current_menu = menu_selected
self.display(menu_selected)
elif(callable(menu_selected)):
# Clear our screen
call("cls")
# Call our function
menu_selected()
# Hold the user so they can see the result (if back hasn't just been called)
if(self.just_called_back == False):
input("\n>>> Press <Return> to continue...")
else:
self.just_called_back = False
# Display our menu again to stop from program termination
self.display(self._current_menu)
else:
self.display(self._current_menu, "Please select a valid option!")
except KeyboardInterrupt:
self._app.exit()
except ValueError:
self.display(self._current_menu, "Please select a valid option!")
| 40.386667 | 218 | 0.563222 |
a468a36888be87334e7aaf4f9c3ff4e8cd661019 | 868 | py | Python | troposphere/resourcegroups.py | pierretr/troposphere | 1bd6a010a3132aa3436ffe6b892f352876face4b | [
"BSD-2-Clause"
]
| 4,573 | 2015-01-02T20:31:04.000Z | 2022-03-31T17:15:32.000Z | troposphere/resourcegroups.py | pierretr/troposphere | 1bd6a010a3132aa3436ffe6b892f352876face4b | [
"BSD-2-Clause"
]
| 1,730 | 2015-01-02T19:24:47.000Z | 2022-03-31T23:22:52.000Z | troposphere/resourcegroups.py | pierretr/troposphere | 1bd6a010a3132aa3436ffe6b892f352876face4b | [
"BSD-2-Clause"
]
| 1,753 | 2015-01-01T01:24:12.000Z | 2022-03-27T05:36:17.000Z | # Copyright (c) 2020, Mark Peek <[email protected]>
# All rights reserved.
#
# See LICENSE file for full license.
from . import AWSObject, AWSProperty, Tags
from .validators import resourcequery_type
class TagFilter(AWSProperty):
props = {
"Key": (str, False),
"Values": ([str], False),
}
class Query(AWSProperty):
props = {
"ResourceTypeFilters": ([str], False),
"StackIdentifier": (str, False),
"TagFilters": ([TagFilter], False),
}
class ResourceQuery(AWSProperty):
props = {
"Query": (Query, False),
"Type": (resourcequery_type, False),
}
class Group(AWSObject):
resource_type = "AWS::ResourceGroups::Group"
props = {
"Description": (str, False),
"Name": (str, True),
"ResourceQuery": (ResourceQuery, False),
"Tags": (Tags, False),
}
| 21.170732 | 48 | 0.592166 |
934801b3e77c4ea931cb7dcee3d83459e05d42d4 | 2,650 | py | Python | 2016/slides/ComputationalGeometry/arc.py | MercerBinaryBears/ProgrammingTeam | 5a4819753ad14d16022778388c772da100e22517 | [
"MIT"
]
| 1 | 2016-07-18T04:14:50.000Z | 2016-07-18T04:14:50.000Z | 2016/slides/ComputationalGeometry/arc.py | MercerBinaryBears/ProgrammingTeam | 5a4819753ad14d16022778388c772da100e22517 | [
"MIT"
]
| 7 | 2016-08-10T22:56:45.000Z | 2016-08-11T13:12:19.000Z | 2016/slides/ComputationalGeometry/arc.py | tylerburnham42/ProgrammingTeam | 5a4819753ad14d16022778388c772da100e22517 | [
"MIT"
]
| null | null | null | from math import cos, sin, pi
def distance(p1, p2):
dx = p1[0] - p2[0]
dy = p1[1] - p2[1]
return (dx * dx + dy * dy)**0.5
def equals(p1, p2, epsilon=0.001):
return distance(p1, p2) <= epsilon
def add(p1, p2):
return p1[0] + p2[0], p1[1] + p2[1]
def subtract(p1, p2):
return p1[0] - p2[0], p1[1] - p2[1]
def scale(point, factor):
return vector[0] * factor, vector[1] * factor
memo = {}
def hash_key(currentLocation, phi, theta, arcsAvailable):
return '{0:0.5f},{1:0.5f},{2:0.5f},{3:0.5f},{4:d}'.format(currentLocation[0], currentLocation[1], phi, theta, arcsAvailable)
def memoize(currentLocation, phi, theta, arcsAvailable, value):
global memo
memo[hash_key(currentLocation, phi, theta, arcsAvailable)] = value
return value
###
# phi = current direction
###
def findPoint(point, currentLocation, phi, theta, arcsAvailable):
# Check if we've already calculated this value
global memo
key = hash_key(currentLocation, phi, theta, arcsAvailable)
if key in memo:
return memo[key]
# If we're out of moves, but we landed at the start, we've got a match
if arcsAvailable == 0 and equals(point, currentLocation):
return 1
# We're stil out of moves but not close by. Not a match
elif arcsAvailable == 0:
return 0
# Do some "pruning" to stop chasing paths that are too far away: If we're further away than we have steps left
# it's impossible to finish, so we're wasting our time
if distance(point, currentLocation) > theta * arcsAvailable:
return 0
# try both a left and right turn. These are essentially a closed form of a rotation matrix. I think you're just
# going to have to do the algebra...
leftDirection = cos(phi) * sin(theta) - sin(phi) * (1 - cos(theta)), sin(phi) * sin(theta) + cos(phi) * (1 - cos(theta))
leftLocation = add(currentLocation, leftDirection)
totalFromLeft = findPoint(point, leftLocation, phi + theta, theta, arcsAvailable - 1)
rightDirection = cos(phi) * sin(theta) - sin(phi) * (cos(theta) - 1), sin(phi) * sin(theta) + cos(phi) * (cos(theta) - 1)
rightLocation = add(currentLocation, rightDirection)
totalFromRight = findPoint(point, rightLocation, phi - theta, theta, arcsAvailable - 1)
#return totalFromLeft + totalFromRight
return memoize(currentLocation, phi, theta, arcsAvailable, totalFromLeft + totalFromRight)
# read the number of cases (at most 15)
N = int(raw_input().strip())
for i in range(N):
# read in input (at most 30 steps, with 7th of circle division)
stepCount, arcDistance = map(int, raw_input().strip().split())
# calculate the "rotation angle" for a single turn
theta = 2 * pi / arcDistance
print findPoint((0, 0), (0, 0), 0, theta, stepCount)
| 35.333333 | 125 | 0.698868 |
bbb6476e66e84ce6287eea92f265a0996785164f | 1,806 | py | Python | du/gerrit/ssh/File.py | spiricn/DevUtils | 58a035a08a7c58035c25f992c1b8aa33cc997cd2 | [
"MIT"
]
| 1 | 2021-12-21T13:18:08.000Z | 2021-12-21T13:18:08.000Z | du/gerrit/ssh/File.py | spiricn/DevUtils | 58a035a08a7c58035c25f992c1b8aa33cc997cd2 | [
"MIT"
]
| null | null | null | du/gerrit/ssh/File.py | spiricn/DevUtils | 58a035a08a7c58035c25f992c1b8aa33cc997cd2 | [
"MIT"
]
| null | null | null | from enum import Enum
class File:
"""
Information about a patch on a file
Schema described here
https://gerrit-review.googlesource.com/Documentation/json.html#file
"""
class Type(Enum):
"""
The type of change
"""
# The file is being created/introduced by this patch.
ADDED = 0
# The file already exists, and has updated content
MODIFIED = 1
# The file existed, but is being removed by this patch.
DELETED = 2
# The file is renamed.
RENAMED = 3
# The file is copied from another file
COPIED = 4
# Sufficient amount of content changed to claim the file was rewritten
REWRITE = 5
def __init__(self, jsonObject):
self._file = jsonObject.get("file", None)
self._fileOld = jsonObject.get("fileOld", None)
self._type = jsonObject.get("type", None)
if self._type:
self._type = self.Type[self._type]
self._insertions = jsonObject.get("insertions", None)
self._deletions = jsonObject.get("deletions", None)
@property
def file(self):
"""
The name of the file. If the file is renamed, the new name
"""
return self._file
@property
def fileOld(self):
"""
The old name of the file, if the file is renamed
"""
return self._fileOld
@property
def type(self):
"""
The type of change
"""
return self._type
@property
def insertions(self):
"""
number of insertions of this patch
"""
return self._insertions
@property
def deletions(self):
"""
number of deletions of this patch
"""
return self._deletions
| 22.860759 | 78 | 0.567553 |
8013e246f1f8cd3643568089e03bfcae6e6fade2 | 594 | py | Python | reviews/migrations/0002_review_room.py | alstn2468/Django_Airbnb_Clone | eeb61e4a36320a0b269d96f47cc6755dbc4c40f8 | [
"MIT"
]
| 5 | 2019-11-26T00:34:24.000Z | 2021-01-04T06:04:48.000Z | reviews/migrations/0002_review_room.py | alstn2468/Django_Airbnb_Clone | eeb61e4a36320a0b269d96f47cc6755dbc4c40f8 | [
"MIT"
]
| 3 | 2021-06-09T19:05:40.000Z | 2021-09-08T01:49:01.000Z | reviews/migrations/0002_review_room.py | alstn2468/Django_Airbnb_Clone | eeb61e4a36320a0b269d96f47cc6755dbc4c40f8 | [
"MIT"
]
| 6 | 2019-11-24T11:47:09.000Z | 2021-08-16T20:21:35.000Z | # Generated by Django 2.2.5 on 2019-12-22 12:48
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
("rooms", "0001_initial"),
("reviews", "0001_initial"),
]
operations = [
migrations.AddField(
model_name="review",
name="room",
field=models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name="reviews",
to="rooms.Room",
),
),
]
| 22 | 60 | 0.553872 |
f230ecac51fed70dea728667639fbb6bf63023fb | 12,398 | py | Python | zvmsdk/sdkwsgi/validation/parameter_types.py | IBMCaoBiao/python-zvm-sdk | 655e701cf3f90c57b0969c1f9971541ab38cedc4 | [
"Apache-2.0"
]
| 1 | 2017-08-17T06:31:33.000Z | 2017-08-17T06:31:33.000Z | zvmsdk/sdkwsgi/validation/parameter_types.py | SharpRazor/python-zvm-sdk | 655e701cf3f90c57b0969c1f9971541ab38cedc4 | [
"Apache-2.0"
]
| 1 | 2019-12-10T08:38:01.000Z | 2019-12-10T08:38:01.000Z | zvmsdk/sdkwsgi/validation/parameter_types.py | SharpRazor/python-zvm-sdk | 655e701cf3f90c57b0969c1f9971541ab38cedc4 | [
"Apache-2.0"
]
| null | null | null | # Copyright 2017,2020 IBM Corp.
# Copyright 2013 NEC Corporation.
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import re
import unicodedata
import six
def single_param(schema):
ret = multi_params(schema)
ret['maxItems'] = 1
return ret
def multi_params(schema):
return {'type': 'array', 'items': schema}
class ValidationRegex(object):
def __init__(self, regex, reason):
self.regex = regex
self.reason = reason
def _is_printable(char):
category = unicodedata.category(char)
return (not category.startswith("C") and
(not category.startswith("Z") or category == "Zs"))
def _get_all_chars():
for i in range(0xFFFF):
yield six.unichr(i)
def _build_regex_range(ws=True, invert=False, exclude=None):
if exclude is None:
exclude = []
regex = ""
in_range = False
last = None
last_added = None
def valid_char(char):
if char in exclude:
result = False
elif ws:
result = _is_printable(char)
else:
# Zs is the unicode class for space characters, of which
# there are about 10 in this range.
result = (_is_printable(char) and
unicodedata.category(char) != "Zs")
if invert is True:
return not result
return result
# iterate through the entire character range. in_
for c in _get_all_chars():
if valid_char(c):
if not in_range:
regex += re.escape(c)
last_added = c
in_range = True
else:
if in_range and last != last_added:
regex += "-" + re.escape(last)
in_range = False
last = c
else:
if in_range:
regex += "-" + re.escape(c)
return regex
valid_name_regex_base = '^(?![%s])[%s]*(?<![%s])$'
valid_name_regex = ValidationRegex(
valid_name_regex_base % (
_build_regex_range(ws=False, invert=True),
_build_regex_range(),
_build_regex_range(ws=False, invert=True)),
"printable characters. Can not start or end with whitespace.")
name = {
'type': 'string', 'minLength': 1, 'maxLength': 255,
'format': 'name'
}
ipl_from = {
'type': 'string', 'minLength': 0, 'maxLength': 255,
}
ipl_param = {
'type': 'string', 'minLength': 0, 'maxLength': 255,
}
ipl_loadparam = {
'type': 'string', 'minLength': 0, 'maxLength': 255,
}
loaddev = {
'type': 'object',
'properties': {
'portname': {'type': 'string',
'minLength': 1,
'maxLength': 16,
'pattern': '^[0-9a-fA-F]{,16}$'},
'lun': {'type': 'string',
'minLength': 1,
'maxLength': 16,
'pattern': '^[0-9a-fA-F]{,16}$'},
},
'additionalProperties': False
}
dedicate_vdevs = {
'type': 'array',
'minItems': 0,
'items': {
'type': 'string',
'pattern': '^[0-9a-fA-F]{,4}$'
},
'uniqueItems': True
}
positive_integer = {
'type': ['integer', 'string'],
'pattern': '^[0-9]*$', 'minimum': 1
}
non_negative_integer = {
'type': ['integer', 'string'],
'pattern': '^[0-9]*$', 'minimum': 0
}
ipv4 = {
'type': 'string', 'format': 'ipv4'
}
nic_info = {
'type': 'array',
'items': {
'type': 'object',
'properties': {
'nic_id': {'type': 'string'},
'mac_addr': {'type': 'string'}
},
'additionalProperties': False
}
}
boolean = {
'type': ['boolean', 'string'],
'enum': [True, 'True', 'TRUE', 'true', '1', 'ON', 'On', 'on',
'YES', 'Yes', 'yes',
False, 'False', 'FALSE', 'false', '0', 'OFF', 'Off', 'off',
'NO', 'No', 'no']
}
rdev_list = {
'oneOf': [
{'type': 'null'},
{'type': 'string',
'pattern': '^([0-9a-fA-F]{,4})(\s+[0-9a-fA-F]{,4}){,2}$'}
]
}
rdev = {
'type': ['string'], 'minLength': 1, 'maxLength': 4,
'pattern': '^[0-9a-fA-F]{,4}$'
}
vdev_or_None = {
'oneOf': [
{'type': 'null'},
{'type': ['string'], 'minLength': 1, 'maxLength': 4,
'pattern': '^[0-9a-fA-F]{,4}$'}
]
}
vdev = {
'type': ['string'], 'minLength': 1, 'maxLength': 4,
'pattern': '^[0-9a-fA-F]{,4}$'
}
vdev_list = {
'type': 'array',
'minItems': 1,
'items': {
'type': 'string',
'pattern': '^[0-9a-fA-F]{,4}$'
},
'uniqueItems': True
}
image_list = {
'maxItems': 1,
'items': {
'format': 'name',
'maxLength': 255,
'minLength': 1,
'type': 'string'
},
'type': 'array'
}
url = {
'type': ['string'],
'pattern': '^https?:/{2}|^file:/{3}\w.+$'
}
mac_address = {
'type': 'string',
'pattern': '^([0-9a-fA-F]{2})(:[0-9a-fA-F]{2}){5}$'
}
remotehost = {
'type': ['string'],
'pattern': '^[a-zA-Z0-9\-]+\@([0-9]{1,3}(.[0-9]{1,3}){3}$|'
'[a-zA-Z0-9\-]+(.[a-zA-Z0-9\-]){1,}$)'
}
userid = {
'type': ['string'],
'minLength': 1,
'maxLength': 8,
'pattern': '^(\w{,8})$'
}
vswitch_name = {
'type': ['string'], 'minLength': 1, 'maxLength': 8
}
controller = {
'type': ['string'],
'anyOf': [
{'pattern': '\*'},
{'minLength': 1, 'maxLength': 8}
]
}
nic_id = {
'type': ['string']
}
cidr = {
'type': ['string'],
'format': 'cidr'
}
userid_list = {
'type': ['string'],
# TODO:validate userid_list in inspect APIs
'pattern': '^(\s*\w{1,8}\s*)(,\s*\w{1,8}\s*){0,}$'
}
userid_list_array = {
'items': {
'type': ['string'],
'minLength': 1,
'pattern': '^(\s*\w{1,8}\s*)(,\s*\w{1,8}\s*){0,}$'
},
'type': 'array'
}
file_type = {
'type': 'string',
'enum': ['ext2', 'ext3', 'ext4', 'xfs', 'swap', 'none']
}
disk_pool = {
'type': 'string',
'pattern': '^\w+:\w+$'
}
disk_pool_list = {
'maxItems': 1,
'items': {
'type': 'string',
'pattern': '^\w+:\w+$',
},
'type': 'array'
}
disk_list = {
'type': 'array',
'items': {
'type': 'object',
'properties': {
'size': {'type': 'string'},
'format': file_type,
'is_boot_disk': boolean,
'vdev': vdev,
'disk_pool': {'type': 'string', 'pattern': '^\w+:\w+$'}
},
'required': ['size'],
'additionalProperties': False
}
}
live_migrate_parms = {
'type': 'object',
'properties': {
'maxtotal': {'type': 'integer'},
'maxquiesce': {'type': 'integer'},
'immediate': {'type': 'string'},
'forcearch': {'type': 'string'},
'forcedomain': {'type': 'string'},
'forcestorage': {'type': 'string'}
},
'additionalProperties': False
}
disk_conf = {
'type': 'array',
'items': {
'type': 'object',
'properties': {
'vdev': vdev,
'format': file_type,
'mntdir': {'type': 'string'},
'size': {'type': 'string'}
},
'required': ['format'],
'additionalProperties': False
}
}
# For redhat linux, it will match rhelX, rhelX.Y, redhatX, redhatX.Y,
# where X is 6 or 7, Y is 0 to 9, all case insensitive
# For suse linux, it will match slesX, slesX.Y, slesXspY, suseX,
# suseX.Y, suseXspY, where X is 11 or 12, Y is 0 to 9,
# all case insensitive
# For ubuntu linux, it will match ubuntuX, ubuntuX.Y, ubuntuX.Y.Z,
# where X is 16, Y is 01 to 10, Z is 0 to 9, such as ubuntu16.04.3,
# all case insensitive
# For red hat cores linux, it will match rhcosX, where X is 4,
# such as rhcos4, all case insensitive
os_version = {
'oneOf': [
{'type': 'string',
'pattern':
'^((r|R)(h|H)(e|E)(l|L))(6|7|8){1}([.][0-9])?$'},
{'type': 'string',
'pattern':
'^((r|R)(e|E)(d|D)(h|H)(a|A)(t|T))(6|7){1}([.][0-9])?$'},
{'type': 'string',
'pattern':
'^((s|S)(l|L)(e|E)(s|E))(11|12){1}(([.]|((s|S)(p|P)))[0-9])?$'},
{'type': 'string',
'pattern':
'^((s|S)(u|U)(s|S)(e|E))(11|12){1}(([.]|((s|S)(p|P)))[0-9])?$'},
{'type': 'string',
'pattern':
'^((u|U)(b|B)(u|U)(n|N)(t|T)(u|U))(16){1}([.][0-9]{2})?([.][0-9])?$'},
{'type': 'string',
'pattern':
'^((r|R)(h|H)(c|C)(o|O)(s|S))(4){1}?$'}
]
}
disk_type = {
'type': 'string',
'enum': ['DASD', 'dasd', 'SCSI', 'scsi']
}
image_meta = {
'type': 'object',
'properties': {
'os_version': os_version,
# md5 shoule be 32 hexadeciaml numbers
'md5sum': {'type': 'string', 'pattern': '^[0-9a-fA-F]{32}$'},
'disk_type': disk_type
},
'required': ['os_version'],
'additionalProperties': False
}
command = {
'type': 'string'
}
network_list = {
'type': 'array',
'items': {
'type': 'object',
'properties': {
'ip_addr': ipv4,
'dns_addr': {'type': 'array',
'items': ipv4},
'gateway_addr': ipv4,
'mac_addr': mac_address,
'cidr': cidr,
'nic_vdev': vdev,
'nic_id': {'type': 'string'},
'osa_device': vdev},
'dependencies': {
'ip_addr': ['cidr']
}
},
'additionalProperties': False
}
capture_type = {
'type': 'string',
'enum': ['rootonly', 'alldisks']
}
compress_level = {
'type': ['integer'],
'pattern': '^[0-9]$'
}
user_vlan_id = {
'type': 'object',
'properties': {
'userid': userid,
'vlanid': {'type': ['integer'],
'minimum': 1,
'maximum': 4094,
}
},
'required': ['userid', "vlanid"],
'additionalProperties': False
}
fcp = {
'type': 'array',
'items': {
'type': 'string',
'minLength': 4,
'maxLength': 4,
'pattern': '^[0-9a-fA-F]{4}$'
}
}
wwpn = {
'type': 'array',
'items': {
'type': 'string',
'minLength': 18,
'maxLength': 18,
'pattern': '^0x[0-9a-fA-F]{16}$'
}
}
lun = {
'type': ['string'], 'minLength': 18, 'maxLength': 18,
'pattern': '^0x[0-9a-fA-F]{16}$'
}
connection_info = {
'type': 'object',
'properties': {
'assigner_id': userid,
'zvm_fcp': fcp,
'target_wwpn': wwpn,
'target_lun': lun,
'os_version': os_version,
'multipath': boolean,
'mount_point': {'type': 'string'},
'is_root_volume': boolean,
},
'required': ['assigner_id', 'zvm_fcp', 'target_wwpn',
'target_lun', 'multipath', 'os_version',
'mount_point'],
'additionalProperties': False
}
connection_type = {
'type': 'string',
'enum': ['CONnect', 'CONNECT', 'connect',
'DISCONnect', 'DISCONNECT', 'disconnect',
'NOUPLINK', 'nouplink']
}
router_type = {
'type': 'string',
'enum': ['NONrouter', 'NONROUTER', 'nonrouter',
'PRIrouter', 'PRIROUTER', 'prirouter']
}
network_type = {
'type': 'string',
'enum': ['IP', 'ip', 'ETHernet', 'ethernet', 'ETHERNET']
}
vid_type = {
'oneOf': [
{'type': 'string', 'enum': ['UNAWARE', 'unaware', 'AWARE', 'aware']},
{'type': 'integer', 'minimum': 1, 'maximum': 4094}
]
}
port_type = {
'type': 'string',
'enum': ['ACCESS', 'access', 'TRUNK', 'trunk']
}
gvrp_type = {
'type': 'string',
'enum': ['GVRP', 'gvrp', 'NOGVRP', 'nogvrp']
}
native_vid_type = {
'oneOf': [
{'type': 'null'},
{'type': 'integer', 'minimum': 1, 'maximum': 4094}
]
}
max_cpu = {
'type': 'integer',
'minimum': 1,
'maximum': 64
}
max_mem = {
'type': 'string',
'pattern': '^[1-9][0-9]{0,3}[m|M|g|G]$'
}
hostname = {
'oneOf': [
{'type': 'null'},
{'type': 'string', 'minLength': 1, 'maxLength': 255,
'pattern': '^[a-zA-Z0-9-._]*$'}
]
}
| 21.904594 | 78 | 0.492337 |
8c10bb7f88ef8f92fc43cb390c1c21924dd070d5 | 20,241 | py | Python | torba/torba/client/basetransaction.py | gpjacobs/lbry-sdk | 3562234f9d80552196113d51736321a41dcc91d9 | [
"MIT"
]
| null | null | null | torba/torba/client/basetransaction.py | gpjacobs/lbry-sdk | 3562234f9d80552196113d51736321a41dcc91d9 | [
"MIT"
]
| null | null | null | torba/torba/client/basetransaction.py | gpjacobs/lbry-sdk | 3562234f9d80552196113d51736321a41dcc91d9 | [
"MIT"
]
| null | null | null | import logging
import typing
from typing import List, Iterable, Optional, Tuple
from binascii import hexlify
from torba.client.basescript import BaseInputScript, BaseOutputScript
from torba.client.baseaccount import BaseAccount
from torba.client.constants import COIN, NULL_HASH32
from torba.client.bcd_data_stream import BCDataStream
from torba.client.hash import sha256, TXRef, TXRefImmutable
from torba.client.util import ReadOnlyList
from torba.client.errors import InsufficientFundsError
if typing.TYPE_CHECKING:
from torba.client import baseledger, wallet as basewallet
log = logging.getLogger()
class TXRefMutable(TXRef):
__slots__ = ('tx',)
def __init__(self, tx: 'BaseTransaction') -> None:
super().__init__()
self.tx = tx
@property
def id(self):
if self._id is None:
self._id = hexlify(self.hash[::-1]).decode()
return self._id
@property
def hash(self):
if self._hash is None:
self._hash = sha256(sha256(self.tx.raw_sans_segwit))
return self._hash
@property
def height(self):
return self.tx.height
def reset(self):
self._id = None
self._hash = None
class TXORef:
__slots__ = 'tx_ref', 'position'
def __init__(self, tx_ref: TXRef, position: int) -> None:
self.tx_ref = tx_ref
self.position = position
@property
def id(self):
return f'{self.tx_ref.id}:{self.position}'
@property
def hash(self):
return self.tx_ref.hash + BCDataStream.uint32.pack(self.position)
@property
def is_null(self):
return self.tx_ref.is_null
@property
def txo(self) -> Optional['BaseOutput']:
return None
class TXORefResolvable(TXORef):
__slots__ = ('_txo',)
def __init__(self, txo: 'BaseOutput') -> None:
assert txo.tx_ref is not None
assert txo.position is not None
super().__init__(txo.tx_ref, txo.position)
self._txo = txo
@property
def txo(self):
return self._txo
class InputOutput:
__slots__ = 'tx_ref', 'position'
def __init__(self, tx_ref: TXRef = None, position: int = None) -> None:
self.tx_ref = tx_ref
self.position = position
@property
def size(self) -> int:
""" Size of this input / output in bytes. """
stream = BCDataStream()
self.serialize_to(stream)
return len(stream.get_bytes())
def get_fee(self, ledger):
return self.size * ledger.fee_per_byte
def serialize_to(self, stream, alternate_script=None):
raise NotImplementedError
class BaseInput(InputOutput):
script_class = BaseInputScript
NULL_SIGNATURE = b'\x00'*72
NULL_PUBLIC_KEY = b'\x00'*33
__slots__ = 'txo_ref', 'sequence', 'coinbase', 'script'
def __init__(self, txo_ref: TXORef, script: BaseInputScript, sequence: int = 0xFFFFFFFF,
tx_ref: TXRef = None, position: int = None) -> None:
super().__init__(tx_ref, position)
self.txo_ref = txo_ref
self.sequence = sequence
self.coinbase = script if txo_ref.is_null else None
self.script = script if not txo_ref.is_null else None
@property
def is_coinbase(self):
return self.coinbase is not None
@classmethod
def spend(cls, txo: 'BaseOutput') -> 'BaseInput':
""" Create an input to spend the output."""
assert txo.script.is_pay_pubkey_hash, 'Attempting to spend unsupported output.'
script = cls.script_class.redeem_pubkey_hash(cls.NULL_SIGNATURE, cls.NULL_PUBLIC_KEY)
return cls(txo.ref, script)
@property
def amount(self) -> int:
""" Amount this input adds to the transaction. """
if self.txo_ref.txo is None:
raise ValueError('Cannot resolve output to get amount.')
return self.txo_ref.txo.amount
@property
def is_my_account(self) -> Optional[bool]:
""" True if the output this input spends is yours. """
if self.txo_ref.txo is None:
return False
return self.txo_ref.txo.is_my_account
@classmethod
def deserialize_from(cls, stream):
tx_ref = TXRefImmutable.from_hash(stream.read(32), -1)
position = stream.read_uint32()
script = stream.read_string()
sequence = stream.read_uint32()
return cls(
TXORef(tx_ref, position),
cls.script_class(script) if not tx_ref.is_null else script,
sequence
)
def serialize_to(self, stream, alternate_script=None):
stream.write(self.txo_ref.tx_ref.hash)
stream.write_uint32(self.txo_ref.position)
if alternate_script is not None:
stream.write_string(alternate_script)
else:
if self.is_coinbase:
stream.write_string(self.coinbase)
else:
stream.write_string(self.script.source)
stream.write_uint32(self.sequence)
class BaseOutputEffectiveAmountEstimator:
__slots__ = 'txo', 'txi', 'fee', 'effective_amount'
def __init__(self, ledger: 'baseledger.BaseLedger', txo: 'BaseOutput') -> None:
self.txo = txo
self.txi = ledger.transaction_class.input_class.spend(txo)
self.fee: int = self.txi.get_fee(ledger)
self.effective_amount: int = txo.amount - self.fee
def __lt__(self, other):
return self.effective_amount < other.effective_amount
class BaseOutput(InputOutput):
script_class = BaseOutputScript
estimator_class = BaseOutputEffectiveAmountEstimator
__slots__ = 'amount', 'script', 'is_change', 'is_my_account'
def __init__(self, amount: int, script: BaseOutputScript,
tx_ref: TXRef = None, position: int = None,
is_change: Optional[bool] = None, is_my_account: Optional[bool] = None
) -> None:
super().__init__(tx_ref, position)
self.amount = amount
self.script = script
self.is_change = is_change
self.is_my_account = is_my_account
def update_annotations(self, annotated):
if annotated is None:
self.is_change = False
self.is_my_account = False
else:
self.is_change = annotated.is_change
self.is_my_account = annotated.is_my_account
@property
def ref(self):
return TXORefResolvable(self)
@property
def id(self):
return self.ref.id
@property
def pubkey_hash(self):
return self.script.values['pubkey_hash']
@property
def has_address(self):
return 'pubkey_hash' in self.script.values
def get_address(self, ledger):
return ledger.hash160_to_address(self.pubkey_hash)
def get_estimator(self, ledger):
return self.estimator_class(ledger, self)
@classmethod
def pay_pubkey_hash(cls, amount, pubkey_hash):
return cls(amount, cls.script_class.pay_pubkey_hash(pubkey_hash))
@classmethod
def deserialize_from(cls, stream):
return cls(
amount=stream.read_uint64(),
script=cls.script_class(stream.read_string())
)
def serialize_to(self, stream, alternate_script=None):
stream.write_uint64(self.amount)
stream.write_string(self.script.source)
class BaseTransaction:
input_class = BaseInput
output_class = BaseOutput
def __init__(self, raw=None, version: int = 1, locktime: int = 0, is_verified: bool = False,
height: int = -2, position: int = -1) -> None:
self._raw = raw
self._raw_sans_segwit = None
self.is_segwit_flag = 0
self.witnesses: List[bytes] = []
self.ref = TXRefMutable(self)
self.version = version
self.locktime = locktime
self._inputs: List[BaseInput] = []
self._outputs: List[BaseOutput] = []
self.is_verified = is_verified
# Height Progression
# -2: not broadcast
# -1: in mempool but has unconfirmed inputs
# 0: in mempool and all inputs confirmed
# +num: confirmed in a specific block (height)
self.height = height
self.position = position
if raw is not None:
self._deserialize()
@property
def is_broadcast(self):
return self.height > -2
@property
def is_mempool(self):
return self.height in (-1, 0)
@property
def is_confirmed(self):
return self.height > 0
@property
def id(self):
return self.ref.id
@property
def hash(self):
return self.ref.hash
@property
def raw(self):
if self._raw is None:
self._raw = self._serialize()
return self._raw
@property
def raw_sans_segwit(self):
if self.is_segwit_flag:
if self._raw_sans_segwit is None:
self._raw_sans_segwit = self._serialize(sans_segwit=True)
return self._raw_sans_segwit
return self.raw
def _reset(self):
self._raw = None
self._raw_sans_segwit = None
self.ref.reset()
@property
def inputs(self) -> ReadOnlyList[BaseInput]:
return ReadOnlyList(self._inputs)
@property
def outputs(self) -> ReadOnlyList[BaseOutput]:
return ReadOnlyList(self._outputs)
def _add(self, existing_ios: List, new_ios: Iterable[InputOutput], reset=False) -> 'BaseTransaction':
for txio in new_ios:
txio.tx_ref = self.ref
txio.position = len(existing_ios)
existing_ios.append(txio)
if reset:
self._reset()
return self
def add_inputs(self, inputs: Iterable[BaseInput]) -> 'BaseTransaction':
return self._add(self._inputs, inputs, True)
def add_outputs(self, outputs: Iterable[BaseOutput]) -> 'BaseTransaction':
return self._add(self._outputs, outputs, True)
@property
def size(self) -> int:
""" Size in bytes of the entire transaction. """
return len(self.raw)
@property
def base_size(self) -> int:
""" Size of transaction without inputs or outputs in bytes. """
return (
self.size
- sum(txi.size for txi in self._inputs)
- sum(txo.size for txo in self._outputs)
)
@property
def input_sum(self):
return sum(i.amount for i in self.inputs if i.txo_ref.txo is not None)
@property
def output_sum(self):
return sum(o.amount for o in self.outputs)
@property
def net_account_balance(self) -> int:
balance = 0
for txi in self.inputs:
if txi.txo_ref.txo is None:
continue
if txi.is_my_account is None:
raise ValueError(
"Cannot access net_account_balance if inputs/outputs do not "
"have is_my_account set properly."
)
if txi.is_my_account:
balance -= txi.amount
for txo in self.outputs:
if txo.is_my_account is None:
raise ValueError(
"Cannot access net_account_balance if inputs/outputs do not "
"have is_my_account set properly."
)
if txo.is_my_account:
balance += txo.amount
return balance
@property
def fee(self) -> int:
return self.input_sum - self.output_sum
def get_base_fee(self, ledger) -> int:
""" Fee for base tx excluding inputs and outputs. """
return self.base_size * ledger.fee_per_byte
def get_effective_input_sum(self, ledger) -> int:
""" Sum of input values *minus* the cost involved to spend them. """
return sum(txi.amount - txi.get_fee(ledger) for txi in self._inputs)
def get_total_output_sum(self, ledger) -> int:
""" Sum of output values *plus* the cost involved to spend them. """
return sum(txo.amount + txo.get_fee(ledger) for txo in self._outputs)
def _serialize(self, with_inputs: bool = True, sans_segwit: bool = False) -> bytes:
stream = BCDataStream()
stream.write_uint32(self.version)
if with_inputs:
stream.write_compact_size(len(self._inputs))
for txin in self._inputs:
txin.serialize_to(stream)
stream.write_compact_size(len(self._outputs))
for txout in self._outputs:
txout.serialize_to(stream)
stream.write_uint32(self.locktime)
return stream.get_bytes()
def _serialize_for_signature(self, signing_input: int) -> bytes:
stream = BCDataStream()
stream.write_uint32(self.version)
stream.write_compact_size(len(self._inputs))
for i, txin in enumerate(self._inputs):
if signing_input == i:
assert txin.txo_ref.txo is not None
txin.serialize_to(stream, txin.txo_ref.txo.script.source)
else:
txin.serialize_to(stream, b'')
stream.write_compact_size(len(self._outputs))
for txout in self._outputs:
txout.serialize_to(stream)
stream.write_uint32(self.locktime)
stream.write_uint32(self.signature_hash_type(1)) # signature hash type: SIGHASH_ALL
return stream.get_bytes()
def _deserialize(self):
if self._raw is not None:
stream = BCDataStream(self._raw)
self.version = stream.read_uint32()
input_count = stream.read_compact_size()
if input_count == 0:
self.is_segwit_flag = stream.read_uint8()
input_count = stream.read_compact_size()
self._add(self._inputs, [
self.input_class.deserialize_from(stream) for _ in range(input_count)
])
output_count = stream.read_compact_size()
self._add(self._outputs, [
self.output_class.deserialize_from(stream) for _ in range(output_count)
])
if self.is_segwit_flag:
# drain witness portion of transaction
# too many witnesses for no crime
self.witnesses = []
for _ in range(input_count):
for _ in range(stream.read_compact_size()):
self.witnesses.append(stream.read(stream.read_compact_size()))
self.locktime = stream.read_uint32()
@classmethod
def ensure_all_have_same_ledger_and_wallet(
cls, funding_accounts: Iterable[BaseAccount],
change_account: BaseAccount = None) -> Tuple['baseledger.BaseLedger', 'basewallet.Wallet']:
ledger = wallet = None
for account in funding_accounts:
if ledger is None:
ledger = account.ledger
wallet = account.wallet
if ledger != account.ledger:
raise ValueError(
'All funding accounts used to create a transaction must be on the same ledger.'
)
if wallet != account.wallet:
raise ValueError(
'All funding accounts used to create a transaction must be from the same wallet.'
)
if change_account is not None:
if change_account.ledger != ledger:
raise ValueError('Change account must use same ledger as funding accounts.')
if change_account.wallet != wallet:
raise ValueError('Change account must use same wallet as funding accounts.')
if ledger is None:
raise ValueError('No ledger found.')
if wallet is None:
raise ValueError('No wallet found.')
return ledger, wallet
@classmethod
async def create(cls, inputs: Iterable[BaseInput], outputs: Iterable[BaseOutput],
funding_accounts: Iterable[BaseAccount], change_account: BaseAccount,
sign: bool = True):
""" Find optimal set of inputs when only outputs are provided; add change
outputs if only inputs are provided or if inputs are greater than outputs. """
tx = cls() \
.add_inputs(inputs) \
.add_outputs(outputs)
ledger, _ = cls.ensure_all_have_same_ledger_and_wallet(funding_accounts, change_account)
# value of the outputs plus associated fees
cost = (
tx.get_base_fee(ledger) +
tx.get_total_output_sum(ledger)
)
# value of the inputs less the cost to spend those inputs
payment = tx.get_effective_input_sum(ledger)
try:
for _ in range(5):
if payment < cost:
deficit = cost - payment
spendables = await ledger.get_spendable_utxos(deficit, funding_accounts)
if not spendables:
raise InsufficientFundsError('Not enough funds to cover this transaction.')
payment += sum(s.effective_amount for s in spendables)
tx.add_inputs(s.txi for s in spendables)
cost_of_change = (
tx.get_base_fee(ledger) +
cls.output_class.pay_pubkey_hash(COIN, NULL_HASH32).get_fee(ledger)
)
if payment > cost:
change = payment - cost
if change > cost_of_change:
change_address = await change_account.change.get_or_create_usable_address()
change_hash160 = change_account.ledger.address_to_hash160(change_address)
change_amount = change - cost_of_change
change_output = cls.output_class.pay_pubkey_hash(change_amount, change_hash160)
change_output.is_change = True
tx.add_outputs([cls.output_class.pay_pubkey_hash(change_amount, change_hash160)])
if tx._outputs:
break
# this condition and the outer range(5) loop cover an edge case
# whereby a single input is just enough to cover the fee and
# has some change left over, but the change left over is less
# than the cost_of_change: thus the input is completely
# consumed and no output is added, which is an invalid tx.
# to be able to spend this input we must increase the cost
# of the TX and run through the balance algorithm a second time
# adding an extra input and change output, making tx valid.
# we do this 5 times in case the other UTXOs added are also
# less than the fee, after 5 attempts we give up and go home
cost += cost_of_change + 1
if sign:
await tx.sign(funding_accounts)
except Exception as e:
log.exception('Failed to create transaction:')
await ledger.release_tx(tx)
raise e
return tx
@staticmethod
def signature_hash_type(hash_type):
return hash_type
async def sign(self, funding_accounts: Iterable[BaseAccount]):
ledger, wallet = self.ensure_all_have_same_ledger_and_wallet(funding_accounts)
for i, txi in enumerate(self._inputs):
assert txi.script is not None
assert txi.txo_ref.txo is not None
txo_script = txi.txo_ref.txo.script
if txo_script.is_pay_pubkey_hash:
address = ledger.hash160_to_address(txo_script.values['pubkey_hash'])
private_key = await ledger.get_private_key_for_address(wallet, address)
assert private_key is not None, 'Cannot find private key for signing output.'
tx = self._serialize_for_signature(i)
txi.script.values['signature'] = \
private_key.sign(tx) + bytes((self.signature_hash_type(1),))
txi.script.values['pubkey'] = private_key.public_key.pubkey_bytes
txi.script.generate()
else:
raise NotImplementedError("Don't know how to spend this output.")
self._reset()
| 34.898276 | 105 | 0.613507 |
b30b8b3827cbfc1424e492fe819ce8edc3c79e8d | 2,454 | py | Python | analyses/Outdated/nmhc_YearlyComparisons/ethaneMethane.py | ARLlab/Summit | 1aa0d85cedebe85fab969df004409cc8f28595b3 | [
"Apache-2.0"
]
| null | null | null | analyses/Outdated/nmhc_YearlyComparisons/ethaneMethane.py | ARLlab/Summit | 1aa0d85cedebe85fab969df004409cc8f28595b3 | [
"Apache-2.0"
]
| 4 | 2020-04-30T01:43:09.000Z | 2022-02-12T07:05:51.000Z | analyses/Outdated/nmhc_YearlyComparisons/ethaneMethane.py | ARLlab/Summit | 1aa0d85cedebe85fab969df004409cc8f28595b3 | [
"Apache-2.0"
]
| 4 | 2019-05-13T20:23:03.000Z | 2020-01-24T23:51:45.000Z | """
Created on Tuesday, March 19th, 2019.
This script plots the following ratio in a similar fashion to plotTest.py
1) Ethane / Methane
This code was written in Spyder via Anaconda Distribution [Python 3.7]
"""
## Import Libraries
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from matplotlib.pyplot import figure
figure(num=None, figsize=(8, 6), dpi=160, facecolor='w', edgecolor='k')
## Import Data Sets
from fileInput import fileLoad
nmhcData = fileLoad(r"C:\Users\ARL\Desktop\Python Code\Data\NMHC.XLSX")
methaneData = fileLoad(r"C:\Users\ARL\Desktop\Python Code\Data\Methane.XLSX")
numYears = np.linspace(2012,2018,num=((2018 - 2012)+1)) # number of years total
nmhcDate = nmhcData.loc[:,'DecYear'] # Variable describing the decimal Year
ch4Date = methaneData.loc[:,'DecYear']
hrs3 = 3 * 60 * 60 # three hours in seconds
from isleapyear import isleapyear
for i in numYears:
## Define date variables for given year
ethaneDate = nmhcData.loc[(nmhcDate >= i) & (nmhcDate < (i+1)),'DecYear'].values # Past 2012
ethaneDate = (ethaneDate - i) * (365 + isleapyear(i)) * 24 * 60 * 60 # Convert to seconds
methaneDate= methaneData.loc[(ch4Date >= i) & (ch4Date < (i+1)),'DecYear'].values
methaneDate = (methaneDate - i) * (365 + isleapyear(i))* 24 * 60* 60
ethane = nmhcData.loc[(nmhcDate >= i) & (nmhcDate < (i+1)),'ethane'].values # Gets ethane column, past 2012
methane = methaneData.loc[(ch4Date >= i) & (ch4Date < (i+1)),'MR'].values
ethaneMethane = np.zeros(np.size(ethane)) # Preallocate ethaneMethane matrix
## Iterate over each value in ethane
for j,value in np.ndenumerate(ethane):
high = ethaneDate[j] + hrs3 # Current Ethane timestep in seconds + 3 hours
low = ethaneDate[j] - hrs3 # current ethane timestep in seconds - 3 hours
# Get the average of all methane values between high and low
methaneAverage = np.mean(methane[(methaneDate[:] <= high) & (methaneDate[:] >= low)])
ethaneMethane[j] = value / methaneAverage # Puts ratios in matrix for plotting
## Plotting
plt.plot((ethaneDate/60/60/24),ethaneMethane,'.',alpha=0.5,label='%i'%i)
plt.xlabel('Day of Year',fontdict=None,labelpad=None) # Plot Xlabel
plt.ylabel('Mixing Ratio',fontdict=None,labelpad=None) # Plot Ylabel
plt.title('Summit Ethane / Methane from 2012-2018',fontdict=None,pad=None)
plt.legend(bbox_to_anchor=(1.04,1),loc="upper left")
| 43.821429 | 111 | 0.695192 |
7615226de6ac97e451ea6009c9dcaf05007b6f03 | 14,214 | py | Python | dolphinscheduler-python/pydolphinscheduler/src/pydolphinscheduler/core/process_definition.py | guoshupei/dolphinscheduler | 48d526f275f941d2068985b22747fd017a4d48e0 | [
"Apache-2.0"
]
| 2 | 2021-11-16T13:01:44.000Z | 2022-03-11T03:10:08.000Z | dolphinscheduler-python/pydolphinscheduler/src/pydolphinscheduler/core/process_definition.py | guoshupei/dolphinscheduler | 48d526f275f941d2068985b22747fd017a4d48e0 | [
"Apache-2.0"
]
| null | null | null | dolphinscheduler-python/pydolphinscheduler/src/pydolphinscheduler/core/process_definition.py | guoshupei/dolphinscheduler | 48d526f275f941d2068985b22747fd017a4d48e0 | [
"Apache-2.0"
]
| 3 | 2022-03-14T07:16:22.000Z | 2022-03-21T03:04:55.000Z | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Module process definition, core class for workflow define."""
import json
from datetime import datetime
from typing import Any, Dict, List, Optional, Set
from pydolphinscheduler.constants import ProcessDefinitionReleaseState, TaskType
from pydolphinscheduler.core import configuration
from pydolphinscheduler.core.base import Base
from pydolphinscheduler.exceptions import PyDSParamException, PyDSTaskNoFoundException
from pydolphinscheduler.java_gateway import launch_gateway
from pydolphinscheduler.side import Project, Tenant, User
from pydolphinscheduler.utils.date import MAX_DATETIME, conv_from_str, conv_to_schedule
class ProcessDefinitionContext:
"""Class process definition context, use when task get process definition from context expression."""
_context_managed_process_definition: Optional["ProcessDefinition"] = None
@classmethod
def set(cls, pd: "ProcessDefinition") -> None:
"""Set attribute self._context_managed_process_definition."""
cls._context_managed_process_definition = pd
@classmethod
def get(cls) -> Optional["ProcessDefinition"]:
"""Get attribute self._context_managed_process_definition."""
return cls._context_managed_process_definition
@classmethod
def delete(cls) -> None:
"""Delete attribute self._context_managed_process_definition."""
cls._context_managed_process_definition = None
class ProcessDefinition(Base):
"""process definition object, will define process definition attribute, task, relation.
TODO: maybe we should rename this class, currently use DS object name.
:param user: The user for current process definition. Will create a new one if it do not exists. If your
parameter ``project`` already exists but project's create do not belongs to ``user``, will grant
``project`` to ``user`` automatically.
:param project: The project for current process definition. You could see the workflow in this project
thought Web UI after it :func:`submit` or :func:`run`. It will create a new project belongs to
``user`` if it does not exists. And when ``project`` exists but project's create do not belongs
to ``user``, will grant `project` to ``user`` automatically.
"""
# key attribute for identify ProcessDefinition object
_KEY_ATTR = {
"name",
"project",
"tenant",
"release_state",
"param",
}
_DEFINE_ATTR = {
"name",
"description",
"_project",
"_tenant",
"worker_group",
"timeout",
"release_state",
"param",
"tasks",
"task_definition_json",
"task_relation_json",
}
def __init__(
self,
name: str,
description: Optional[str] = None,
schedule: Optional[str] = None,
start_time: Optional[str] = None,
end_time: Optional[str] = None,
timezone: Optional[str] = configuration.WORKFLOW_TIME_ZONE,
user: Optional[str] = configuration.WORKFLOW_USER,
project: Optional[str] = configuration.WORKFLOW_PROJECT,
tenant: Optional[str] = configuration.WORKFLOW_TENANT,
worker_group: Optional[str] = configuration.WORKFLOW_WORKER_GROUP,
timeout: Optional[int] = 0,
release_state: Optional[str] = ProcessDefinitionReleaseState.ONLINE,
param: Optional[Dict] = None,
):
super().__init__(name, description)
self.schedule = schedule
self._start_time = start_time
self._end_time = end_time
self.timezone = timezone
self._user = user
self._project = project
self._tenant = tenant
self.worker_group = worker_group
self.timeout = timeout
self.release_state = release_state
self.param = param
self.tasks: dict = {}
# TODO how to fix circle import
self._task_relations: set["TaskRelation"] = set() # noqa: F821
self._process_definition_code = None
def __enter__(self) -> "ProcessDefinition":
ProcessDefinitionContext.set(self)
return self
def __exit__(self, exc_type, exc_val, exc_tb) -> None:
ProcessDefinitionContext.delete()
@property
def tenant(self) -> Tenant:
"""Get attribute tenant."""
return Tenant(self._tenant)
@tenant.setter
def tenant(self, tenant: Tenant) -> None:
"""Set attribute tenant."""
self._tenant = tenant.name
@property
def project(self) -> Project:
"""Get attribute project."""
return Project(self._project)
@project.setter
def project(self, project: Project) -> None:
"""Set attribute project."""
self._project = project.name
@property
def user(self) -> User:
"""Get user object.
For now we just get from python side but not from java gateway side, so it may not correct.
"""
return User(name=self._user, tenant=self._tenant)
@staticmethod
def _parse_datetime(val: Any) -> Any:
if val is None or isinstance(val, datetime):
return val
elif isinstance(val, str):
return conv_from_str(val)
else:
raise PyDSParamException("Do not support value type %s for now", type(val))
@property
def start_time(self) -> Any:
"""Get attribute start_time."""
return self._parse_datetime(self._start_time)
@start_time.setter
def start_time(self, val) -> None:
"""Set attribute start_time."""
self._start_time = val
@property
def end_time(self) -> Any:
"""Get attribute end_time."""
return self._parse_datetime(self._end_time)
@end_time.setter
def end_time(self, val) -> None:
"""Set attribute end_time."""
self._end_time = val
@property
def param_json(self) -> Optional[List[Dict]]:
"""Return param json base on self.param."""
# Handle empty dict and None value
if not self.param:
return []
return [
{
"prop": k,
"direct": "IN",
"type": "VARCHAR",
"value": v,
}
for k, v in self.param.items()
]
@property
def task_definition_json(self) -> List[Dict]:
"""Return all tasks definition in list of dict."""
if not self.tasks:
return [self.tasks]
else:
return [task.get_define() for task in self.tasks.values()]
@property
def task_relation_json(self) -> List[Dict]:
"""Return all relation between tasks pair in list of dict."""
if not self.tasks:
return [self.tasks]
else:
self._handle_root_relation()
return [tr.get_define() for tr in self._task_relations]
@property
def schedule_json(self) -> Optional[Dict]:
"""Get schedule parameter json object. This is requests from java gateway interface."""
if not self.schedule:
return None
else:
start_time = conv_to_schedule(
self.start_time if self.start_time else datetime.now()
)
end_time = conv_to_schedule(
self.end_time if self.end_time else MAX_DATETIME
)
return {
"startTime": start_time,
"endTime": end_time,
"crontab": self.schedule,
"timezoneId": self.timezone,
}
# TODO inti DAG's tasks are in the same location with default {x: 0, y: 0}
@property
def task_location(self) -> List[Dict]:
"""Return all tasks location for all process definition.
For now, we only set all location with same x and y valued equal to 0. Because we do not
find a good way to set task locations. This is requests from java gateway interface.
"""
if not self.tasks:
return [self.tasks]
else:
return [{"taskCode": task_code, "x": 0, "y": 0} for task_code in self.tasks]
@property
def task_list(self) -> List["Task"]: # noqa: F821
"""Return list of tasks objects."""
return list(self.tasks.values())
def _handle_root_relation(self):
"""Handle root task property :class:`pydolphinscheduler.core.task.TaskRelation`.
Root task in DAG do not have dominant upstream node, but we have to add an exactly default
upstream task with task_code equal to `0`. This is requests from java gateway interface.
"""
from pydolphinscheduler.core.task import TaskRelation
post_relation_code = set()
for relation in self._task_relations:
post_relation_code.add(relation.post_task_code)
for task in self.task_list:
if task.code not in post_relation_code:
root_relation = TaskRelation(pre_task_code=0, post_task_code=task.code)
self._task_relations.add(root_relation)
def add_task(self, task: "Task") -> None: # noqa: F821
"""Add a single task to process definition."""
self.tasks[task.code] = task
task._process_definition = self
def add_tasks(self, tasks: List["Task"]) -> None: # noqa: F821
"""Add task sequence to process definition, it a wrapper of :func:`add_task`."""
for task in tasks:
self.add_task(task)
def get_task(self, code: str) -> "Task": # noqa: F821
"""Get task object from process definition by given code."""
if code not in self.tasks:
raise PyDSTaskNoFoundException(
"Task with code %s can not found in process definition %",
(code, self.name),
)
return self.tasks[code]
# TODO which tying should return in this case
def get_tasks_by_name(self, name: str) -> Set["Task"]: # noqa: F821
"""Get tasks object by given name, if will return all tasks with this name."""
find = set()
for task in self.tasks.values():
if task.name == name:
find.add(task)
return find
def get_one_task_by_name(self, name: str) -> "Task": # noqa: F821
"""Get exact one task from process definition by given name.
Function always return one task even though this process definition have more than one task with
this name.
"""
tasks = self.get_tasks_by_name(name)
if not tasks:
raise PyDSTaskNoFoundException(f"Can not find task with name {name}.")
return tasks.pop()
def run(self):
"""Submit and Start ProcessDefinition instance.
Shortcut for function :func:`submit` and function :func:`start`. Only support manual start workflow
for now, and schedule run will coming soon.
:return:
"""
self.submit()
self.start()
def _ensure_side_model_exists(self):
"""Ensure process definition side model exists.
For now, side object including :class:`pydolphinscheduler.side.project.Project`,
:class:`pydolphinscheduler.side.tenant.Tenant`, :class:`pydolphinscheduler.side.user.User`.
If these model not exists, would create default value in
:class:`pydolphinscheduler.constants.ProcessDefinitionDefault`.
"""
# TODO used metaclass for more pythonic
self.user.create_if_not_exists()
# Project model need User object exists
self.project.create_if_not_exists(self._user)
def _pre_submit_check(self):
"""Check specific condition satisfy before.
This method should be called before process definition submit to java gateway
For now, we have below checker:
* `self.param` should be set if task `switch` in this workflow.
"""
if (
any([task.task_type == TaskType.SWITCH for task in self.tasks.values()])
and self.param is None
):
raise PyDSParamException(
"Parameter param must be provider if task Switch in process definition."
)
def submit(self) -> int:
"""Submit ProcessDefinition instance to java gateway."""
self._ensure_side_model_exists()
self._pre_submit_check()
gateway = launch_gateway()
self._process_definition_code = gateway.entry_point.createOrUpdateProcessDefinition(
self._user,
self._project,
self.name,
str(self.description) if self.description else "",
json.dumps(self.param_json),
json.dumps(self.schedule_json) if self.schedule_json else None,
json.dumps(self.task_location),
self.timeout,
self.worker_group,
self._tenant,
# TODO add serialization function
json.dumps(self.task_relation_json),
json.dumps(self.task_definition_json),
None,
)
return self._process_definition_code
def start(self) -> None:
"""Create and start ProcessDefinition instance.
which post to `start-process-instance` to java gateway
"""
gateway = launch_gateway()
gateway.entry_point.execProcessInstance(
self._user,
self._project,
self.name,
"",
self.worker_group,
24 * 3600,
)
| 36.539846 | 108 | 0.632756 |
8660096cf23d7e280e43ba5ea068b556a0973643 | 1,042 | py | Python | app.py | mnh78614/Project | ed18860d00ea5ec576e8c908320535d7a3df9a92 | [
"MIT"
]
| null | null | null | app.py | mnh78614/Project | ed18860d00ea5ec576e8c908320535d7a3df9a92 | [
"MIT"
]
| null | null | null | app.py | mnh78614/Project | ed18860d00ea5ec576e8c908320535d7a3df9a92 | [
"MIT"
]
| null | null | null | #Create a calculator that asks the user for two numbers
m = "multiplication"
d = "division"
a = "addition"
s = "subtraction"
#function for multiplication
def multiplication():
num1 = input("Okay, enter a number: ")
num2 = input("Okay enter another number: ")
print(float(num1) * float(num2))
#function for division
def division():
num1 = input("Okay, enter a number: ")
num2 = input("Okay enter another number: ")
print(float(num1) / float(num2))
#function for addition
def addition ():
num1 = input("Okay, enter a number: ")
num2 = input("Okay enter another number: ")
print(float(num1) + float(num2))
#fuction for subtraction
def subtraction ():
num1 = input("Okay, enter a number: ")
num2 = input("Okay enter another number: ")
print(float(num1) - float(num2))
mathType = input("What kind of math do you want to do? [m], [d], [a], or [s]?")
if mathType == "m":
multiplication()
elif mathType == "d":
division()
elif mathType == "a":
addition()
else:
subtraction()
| 22.652174 | 79 | 0.643954 |
5c24163809d3a089d60146898329a58ce2084250 | 758 | py | Python | bookworm/speech.py | xingkong0113/bookworm | 7214067f48e7a951198806a1f9170e3fd8fc0cce | [
"MIT"
]
| 36 | 2020-11-15T03:21:39.000Z | 2022-03-05T01:11:26.000Z | bookworm/speech.py | xingkong0113/bookworm | 7214067f48e7a951198806a1f9170e3fd8fc0cce | [
"MIT"
]
| 90 | 2020-10-06T14:46:07.000Z | 2022-03-31T03:03:34.000Z | bookworm/speech.py | xingkong0113/bookworm | 7214067f48e7a951198806a1f9170e3fd8fc0cce | [
"MIT"
]
| 20 | 2020-09-30T17:40:44.000Z | 2022-03-17T19:59:53.000Z | # coding: utf-8
"""Screen reader and braille output."""
from accessible_output2.outputs.auto import Auto
from bookworm import config
from bookworm.logger import logger
log = logger.getChild(__name__)
_auto_output = None
def announce(message, urgent=False):
"""Speak and braille a message related to UI."""
global _auto_output
if not config.conf["general"]["announce_ui_messages"]:
return
if _auto_output is None:
try:
_auto_output = Auto()
except AttributeError:
import shutil, win32com
shutil.rmtree(win32com.__gen_path__, ignore_errors=True)
return announce(message, urgent)
_auto_output.speak(message, interrupt=urgent)
_auto_output.braille(message)
| 24.451613 | 68 | 0.692612 |
474c6de9c1c531dcade629c4208d125f51290cba | 1,361 | py | Python | license_protected_downloads/tests/test_pep8.py | NexellCorp/infrastructure_server_fileserver | b2d0cd30b7658735f914c29e401a670d9bb42f92 | [
"Net-SNMP",
"Xnet",
"Info-ZIP",
"OML"
]
| null | null | null | license_protected_downloads/tests/test_pep8.py | NexellCorp/infrastructure_server_fileserver | b2d0cd30b7658735f914c29e401a670d9bb42f92 | [
"Net-SNMP",
"Xnet",
"Info-ZIP",
"OML"
]
| null | null | null | license_protected_downloads/tests/test_pep8.py | NexellCorp/infrastructure_server_fileserver | b2d0cd30b7658735f914c29e401a670d9bb42f92 | [
"Net-SNMP",
"Xnet",
"Info-ZIP",
"OML"
]
| null | null | null | # Copyright (C) 2012 Linaro Ltd.
#
# Author: Loic Minier <[email protected]>
#
# This file is part of Linaro Image Tools.
#
# Linaro Image Tools is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Linaro Image Tools is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import subprocess
from testtools import TestCase
class TestPep8(TestCase):
def test_pep8(self):
# Errors we have to ignore for now: use pep8 error codes like 'E202'.
ignore = []
# Ignore return code.
proc = subprocess.Popen(['pep8',
'--repeat',
'--ignore=%s' % ','.join(ignore),
'--exclude=static',
'.'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
(stdout, stderr) = proc.communicate()
self.assertEquals('', stdout)
self.assertEquals('', stderr)
| 34.897436 | 77 | 0.667891 |
d17410c31930bec59be37a76440c7071a09e0749 | 500 | py | Python | test/mapreduce/avro_mapred.py | kikkomep/pydoop | 4f855ef775b925b8c9f2adf1c0ef13337323ee24 | [
"Apache-2.0"
]
| null | null | null | test/mapreduce/avro_mapred.py | kikkomep/pydoop | 4f855ef775b925b8c9f2adf1c0ef13337323ee24 | [
"Apache-2.0"
]
| null | null | null | test/mapreduce/avro_mapred.py | kikkomep/pydoop | 4f855ef775b925b8c9f2adf1c0ef13337323ee24 | [
"Apache-2.0"
]
| null | null | null | #!/usr/bin/env python
import pydoop.mapreduce.api as api
import pydoop.mapreduce.pipes as pp
from pydoop.avrolib import AvroContext
class Mapper(api.Mapper):
def map(self, context):
context.emit('', context.value['population'])
class Reducer(api.Reducer):
def reduce(self, context):
context.emit('',sum(context.values))
FACTORY = pp.Factory(Mapper, Reducer)
CONTEXT = AvroContext
def __main__():
pp.run_task(FACTORY, private_encoding=True, context_class=CONTEXT)
| 20.833333 | 70 | 0.724 |
c492ea6afbcf73f7483d1b3e5225dad397d4ff0d | 5,769 | py | Python | zoe_master/preprocessing.py | AtosCodex/atos-zoe | 36919a787b7675a571674c2b3c61d3b8f98522cc | [
"Apache-2.0"
]
| null | null | null | zoe_master/preprocessing.py | AtosCodex/atos-zoe | 36919a787b7675a571674c2b3c61d3b8f98522cc | [
"Apache-2.0"
]
| null | null | null | zoe_master/preprocessing.py | AtosCodex/atos-zoe | 36919a787b7675a571674c2b3c61d3b8f98522cc | [
"Apache-2.0"
]
| null | null | null | # Copyright (c) 2016, Daniele Venzano
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Layer in front of the scheduler to perform request pre-processing."""
import logging
import os
import shutil
from zoe_lib.state import Execution, SQLManager
from zoe_lib.config import get_conf
from zoe_master.scheduler import ZoeBaseScheduler
from zoe_master.backends.interface import terminate_execution, node_list, list_available_images
log = logging.getLogger(__name__)
def _digest_application_description(state: SQLManager, execution: Execution):
"""Read an application description and expand it into services that can be deployed."""
if get_conf().backend == 'DockerEngine':
nodes = node_list()
images = []
for node in nodes:
images += list_available_images(node)
images = [name for image in images for name in image['names']]
if len(images) == 0:
log.warning('The image list reported by the back-end is empty')
for service_descr in execution.description['services']:
if service_descr['image'] not in images:
execution.set_error()
execution.set_error_message('image {} is not available'.format(service_descr['image']))
return False
for service_descr in execution.description['services']:
essential_count = service_descr['essential_count']
total_count = service_descr['total_count']
if essential_count > total_count:
execution.set_error()
execution.set_error_message('total_count is less than essential_count for service {}'.format(service_descr['name']))
return False
elastic_count = total_count - essential_count
counter = 0
for service_n_ in range(essential_count):
name = "{}{}".format(service_descr['name'], counter)
sid = state.services.insert(execution.id, name, service_descr['name'], service_descr, True)
# Ports
for port_descr in service_descr['ports']:
port_internal = str(port_descr['port_number']) + '/' + port_descr['protocol']
state.ports.insert(sid, port_internal, port_descr)
counter += 1
for service_n_ in range(elastic_count):
name = "{}{}".format(service_descr['name'], counter)
sid = state.services.insert(execution.id, name, service_descr['name'], service_descr, False)
# Ports
for port_descr in service_descr['ports']:
port_internal = str(port_descr['port_number']) + '/' + port_descr['protocol']
state.ports.insert(sid, port_internal, port_descr)
counter += 1
assert counter == total_count
if get_conf().scheduler_policy == 'DYNSIZE':
execution.set_size(execution.total_reservations.cores.min * execution.total_reservations.memory.min)
return True
def execution_submit(state: SQLManager, scheduler: ZoeBaseScheduler, execution: Execution):
"""Submit a new execution to the scheduler."""
if execution.status != execution.SUBMIT_STATUS:
log.warning('Trying to start an execution in state {}'.format(execution.status))
return
if _digest_application_description(state, execution):
execution.set_queued()
scheduler.incoming(execution)
def execution_terminate(scheduler: ZoeBaseScheduler, execution: Execution, reason: str):
"""Remove an execution from the scheduler."""
if execution.is_running or execution.status == execution.QUEUED_STATUS:
execution.set_cleaning_up()
execution.set_error_message(reason)
scheduler.terminate(execution)
elif execution.status == execution.CLEANING_UP_STATUS:
scheduler.terminate(execution)
elif execution.status == execution.SUBMIT_STATUS:
execution.set_terminated(reason)
elif execution.status == execution.STARTING_STATUS:
return # It is unsafe to terminate executions in these statuses
elif execution.status == execution.ERROR_STATUS:
terminate_execution(execution, reason)
elif execution.status == execution.TERMINATED_STATUS:
return
def restart_resubmit_scheduler(state: SQLManager, scheduler: ZoeBaseScheduler):
"""Restart work after a restart of the process."""
submitted_execs = state.executions.select(status=Execution.SUBMIT_STATUS)
for e in submitted_execs:
execution_submit(state, scheduler, e)
sched_execs = state.executions.select(status=Execution.QUEUED_STATUS)
for e in sched_execs:
scheduler.incoming(e)
clean_up_execs = state.executions.select(status=Execution.CLEANING_UP_STATUS)
for e in clean_up_execs:
scheduler.terminate(e)
starting_execs = state.executions.select(status=Execution.STARTING_STATUS)
for e in starting_execs:
scheduler.terminate(e)
scheduler.incoming(e)
def execution_delete(execution: Execution):
"""Remove an execution, must only be called if the execution is NOT running."""
assert not execution.is_active
path = os.path.join(get_conf().service_logs_base_path, get_conf().deployment_name, str(execution.id))
if path is None:
return
shutil.rmtree(path, ignore_errors=True)
| 40.914894 | 128 | 0.699775 |
96a94cd453926150ee7af04cd62e419d33a55392 | 3,154 | py | Python | BuildingApp/settings.py | sayakray111/App_Folder | 7cf966efb4bbff6dfb9b364198220fde062f0a7f | [
"MIT"
]
| null | null | null | BuildingApp/settings.py | sayakray111/App_Folder | 7cf966efb4bbff6dfb9b364198220fde062f0a7f | [
"MIT"
]
| null | null | null | BuildingApp/settings.py | sayakray111/App_Folder | 7cf966efb4bbff6dfb9b364198220fde062f0a7f | [
"MIT"
]
| null | null | null | """
Django settings for BuildingApp project.
Generated by 'django-admin startproject' using Django 3.0.3.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '#%3trn(y#5izr=j@bret()krc)x%$kxo-r8xnmd*i(=*m83zc-'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'DesignApp',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'BuildingApp.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR,"templates")],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'BuildingApp.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
STATIC_URL = '/static/'
| 25.852459 | 91 | 0.695942 |
dde1571c05813f229b1011378c800326ecaa0d11 | 2,072 | py | Python | Temp.py | Milo980412/HealthReport | 904b056f884bb52a467da19b3338a9a22b6b1a8f | [
"Apache-2.0"
]
| null | null | null | Temp.py | Milo980412/HealthReport | 904b056f884bb52a467da19b3338a9a22b6b1a8f | [
"Apache-2.0"
]
| null | null | null | Temp.py | Milo980412/HealthReport | 904b056f884bb52a467da19b3338a9a22b6b1a8f | [
"Apache-2.0"
]
| null | null | null | import random
from selenium import webdriver
import time
import datetime
#import pandas as pd
#username = ["学号1","学号2",""]
username = ["学号"]
#password = ["密码1","密码2",""]
password = ["密码"]
#可以用pandas导入excel文件
#data = pd.read_excel('account.xlsx', dtype={'username': str})
def fill(username, password):
# 登录
# 通过 executable_path 指定 chrome或Edge 驱动文件所在路径
driver = webdriver.Chrome(executable_path="chromedriver.exe")
#driver = webdriver.Chrome(executable_path="C:\Program Files (x86)\Microsoft\Edge\Application\msedgedriver.exe")
#driver = webdriver.Chrome(executable_path="/usr/local/bin/chromedriver")
#driver = webdriver.Edge("C:\Program Files (x86)\Microsoft\Edge\Application\msedgedriver.exe")
driver.maximize_window() # 页面最大化
driver.get("https://web-vpn.sues.edu.cn/") # 打开网址
usr = driver.find_element_by_id('username')
usr.send_keys(username)
pwd = driver.find_element_by_id('password')
pwd.send_keys(password)
driver.find_element_by_id("passbutton").click()
time.sleep(2)
print(datetime.datetime.now())
print(str(username), "登录成功!")
# 填报
driver.find_element_by_class_name("block-group__item__wrap").click()
driver.switch_to.window(driver.window_handles[1]) # 控制新页面
time.sleep(3)
temperature = random.uniform(36.1, 36.9) # 获取随机体温
temperature = format(temperature, '.1f')
print(temperature)
time.sleep(2)
TwInput = driver.find_element_by_xpath(
"/html/body/div[1]/div/div/div/div[1]/div[2]/div/form/div[18]/div[1]/div/div[2]/div/div/input")
TwInput.clear() # 清空当前体温
time.sleep(1)
TwInput.send_keys(temperature) # 应用新体温
driver.find_element_by_id("post").click()
time.sleep(1)
driver.find_element_by_class_name("layui-layer-btn0").click()
time.sleep(1)
print("填报成功!")
driver.quit()
#def multiple():
#for x in range(len(username)):
#fill(username[x], password[x])
#time.sleep(1)
if __name__ == '__main__':
#个人使用
fill(username[0], password[0])
#多人使用,可以用multiple,循环执行fill
#multiple()
| 31.876923 | 116 | 0.684363 |
1277178230a3f9e4fa4b0946fb9828f0373f9806 | 29,727 | py | Python | custom_components/fpl_api/fpl_mod.py | Hojland/hass-fpl | 0492e47fe7c0bab17494f6b78c934fd83fba3abd | [
"Apache-2.0"
]
| null | null | null | custom_components/fpl_api/fpl_mod.py | Hojland/hass-fpl | 0492e47fe7c0bab17494f6b78c934fd83fba3abd | [
"Apache-2.0"
]
| 2 | 2021-10-11T21:15:11.000Z | 2021-10-11T21:52:21.000Z | custom_components/fpl_api/fpl_mod.py | Hojland/hass-fpl | 0492e47fe7c0bab17494f6b78c934fd83fba3abd | [
"Apache-2.0"
]
| null | null | null | """
The FPL module.
Fantasy Premier League API:
* /bootstrap-static
* /bootstrap-dynamic
* /elements
* /element-summary/{player_id}
* /entry/{user_id}
* /entry/{user_id}/cup
* /entry/{user_id}/event/{event_id}/picks
* /entry/{user_id}/history
* /entry/{user_id}/transfers
* /events
* /event/{event_id}/live
* /fixtures/?event={event_id}
* /game-settings
* /leagues-classic-standings/{league_id}
* /leagues-classic-standings/{league_id}
* /leagues-entries-and-h2h-matches/league/{league_id}
* /leagues-h2h-standings/{league_id}
* /my-team/{user_id}
* /teams
* /transfers
"""
import asyncio
import itertools
import os
import json
from urllib.request import urlopen
from fpl.constants import API_URLS
from fpl.models.classic_league import ClassicLeague
from fpl.models.fixture import Fixture
from fpl.models.gameweek import Gameweek
from fpl.models.h2h_league import H2HLeague
from fpl.models.player import Player, PlayerSummary
from fpl.models.team import Team
from fpl.models.user import User
from fpl.utils import (
average,
fetch,
get_current_user,
logged_in,
position_converter,
scale,
team_converter,
)
class FPL:
"""The FPL class."""
def __init__(self, session):
self.session = session
def init(self):
static = self.open_static_urls()
for k, v in static.items():
try:
v = {w["id"]: w for w in v}
except (KeyError, TypeError):
pass
setattr(self, k, v)
try:
setattr(
self,
"current_gameweek",
next(event for event in static["events"] if event["is_current"])["id"],
)
except StopIteration:
setattr(self, "current_gameweek", 0)
async def async_init(self, hass):
static = await hass.async_add_executor_job(self.open_static_urls)
for k, v in static.items():
try:
v = {w["id"]: w for w in v}
except (KeyError, TypeError):
pass
setattr(self, k, v)
try:
setattr(
self,
"current_gameweek",
next(event for event in static["events"] if event["is_current"])["id"],
)
except StopIteration:
setattr(self, "current_gameweek", 0)
def open_static_urls(self):
return json.loads(urlopen(API_URLS["static"]).read().decode("utf-8"))
async def get_user(self, user_id=None, return_json=False):
"""Returns the user with the given ``user_id``.
Information is taken from e.g.:
https://fantasy.premierleague.com/api/entry/91928/
:param user_id: A user's ID.
:type user_id: string or int
:param return_json: (optional) Boolean. If ``True`` returns a ``dict``,
if ``False`` returns a :class:`User` object. Defaults to ``False``.
:type return_json: bool
:rtype: :class:`User` or `dict`
"""
if user_id:
assert int(user_id) > 0, "User ID must be a positive number."
else:
# If no user ID provided get it from current session
try:
user = await get_current_user(self.session)
user_id = user["player"]["entry"]
except TypeError:
raise Exception(
"You must log in before using `get_user` if "
"you do not provide a user ID."
)
url = API_URLS["user"].format(user_id)
user = await fetch(self.session, url)
if return_json:
return user
return User(user, session=self.session)
async def get_teams(self, team_ids=None, return_json=False):
"""Returns either a list of *all* teams, or a list of teams with IDs in
the optional ``team_ids`` list.
Information is taken from:
https://fantasy.premierleague.com/api/bootstrap-static/
:param list team_ids: (optional) List containing the IDs of teams.
If not set a list of *all* teams will be returned.
:param return_json: (optional) Boolean. If ``True`` returns a list of
``dict``s, if ``False`` returns a list of :class:`Team` objects.
Defaults to ``False``.
:type return_json: bool
:rtype: list
"""
teams = getattr(self, "teams")
if team_ids:
team_ids = set(team_ids)
teams = [team for team in teams.values() if team["id"] in team_ids]
else:
teams = [team for team in teams.values()]
if return_json:
return teams
return [Team(team_information, self.session) for team_information in teams]
async def get_team(self, team_id, return_json=False):
"""Returns the team with the given ``team_id``.
Information is taken from:
https://fantasy.premierleague.com/api/bootstrap-static/
:param team_id: A team's ID.
:type team_id: string or int
:param return_json: (optional) Boolean. If ``True`` returns a ``dict``,
if ``False`` returns a :class:`Team` object. Defaults to ``False``.
:type return_json: bool
:rtype: :class:`Team` or ``dict``
For reference here is the mapping from team ID to team name:
.. code-block:: none
1 - Arsenal
2 - Aston Villa
3 - Brighton
4 - Burnley
5 - Chelsea
6 - Crystal Palace
7 - Everton
8 - Fulham
9 - Leicester
10 - Leeds
11 - Liverpool
12 - Man City
13 - Man Utd
14 - Newcastle
15 - Sheffield Utd
16 - Southampton
17 - Spurs
18 - West Brom
19 - West Ham
20 - Wolves
"""
assert 0 < int(team_id) < 21, "Team ID must be a number between 1 and 20."
teams = getattr(self, "teams")
team = next(team for team in teams.values() if team["id"] == int(team_id))
if return_json:
return team
return Team(team, self.session)
async def get_player_summary(self, player_id, return_json=False):
"""Returns a summary of the player with the given ``player_id``.
Information is taken from e.g.:
https://fantasy.premierleague.com/api/element-summary/1/
:param int player_id: A player's ID.
:param return_json: (optional) Boolean. If ``True`` returns a ``dict``,
if ``False`` returns a :class:`PlayerSummary` object. Defaults to
``False``.
:type return_json: bool
:rtype: :class:`PlayerSummary` or ``dict``
"""
assert int(player_id) > 0, "Player's ID must be a positive number"
url = API_URLS["player"].format(player_id)
player_summary = await fetch(self.session, url)
if return_json:
return player_summary
return PlayerSummary(player_summary)
async def get_player_summaries(self, player_ids, return_json=False):
"""Returns a list of summaries of players whose ID are
in the ``player_ids`` list.
Information is taken from e.g.:
https://fantasy.premierleague.com/api/element-summary/1/
:param list player_ids: A list of player IDs.
:param return_json: (optional) Boolean. If ``True`` returns a list of
``dict``s, if ``False`` returns a list of :class:`PlayerSummary`
objects. Defaults to ``False``.
:type return_json: bool
:rtype: list
"""
if not player_ids:
return []
tasks = [
asyncio.ensure_future(
fetch(self.session, API_URLS["player"].format(player_id))
)
for player_id in player_ids
]
player_summaries = await asyncio.gather(*tasks)
if return_json:
return player_summaries
return [PlayerSummary(player_summary) for player_summary in player_summaries]
async def get_player(
self, player_id, players=None, include_summary=False, return_json=False
):
"""Returns the player with the given ``player_id``.
Information is taken from e.g.:
https://fantasy.premierleague.com/api/bootstrap-static/
https://fantasy.premierleague.com/api/element-summary/1/ (optional)
:param player_id: A player's ID.
:type player_id: string or int
:param list players: (optional) A list of players.
:param bool include_summary: (optional) Includes a player's summary
if ``True``.
:param return_json: (optional) Boolean. If ``True`` returns a ``dict``,
if ``False`` returns a :class:`Player` object. Defaults to
``False``.
:rtype: :class:`Player` or ``dict``
:raises ValueError: Player with ``player_id`` not found
"""
if not players:
players = getattr(self, "elements")
try:
player = next(
player for player in players.values() if player["id"] == player_id
)
except StopIteration:
raise ValueError(f"Player with ID {player_id} not found")
if include_summary:
player_summary = await self.get_player_summary(
player["id"], return_json=True
)
player.update(player_summary)
if return_json:
return player
return Player(player, self.session)
async def get_players(
self, player_ids=None, include_summary=False, return_json=False
):
"""Returns either a list of *all* players, or a list of players whose
IDs are in the given ``player_ids`` list.
Information is taken from e.g.:
https://fantasy.premierleague.com/api/bootstrap-static/
https://fantasy.premierleague.com/api/element-summary/1/ (optional)
:param list player_ids: (optional) A list of player IDs
:param boolean include_summary: (optional) Includes a player's summary
if ``True``.
:param return_json: (optional) Boolean. If ``True`` returns a list of
``dict``s, if ``False`` returns a list of :class:`Player`
objects. Defaults to ``False``.
:type return_json: bool
:rtype: list
"""
players = getattr(self, "elements")
if not include_summary:
if player_ids:
players = [
player for player in players.values() if player["id"] in player_ids
]
else:
players = players.values()
if not return_json:
players = [Player(player, self.session) for player in players]
return players
if not player_ids:
player_ids = [player["id"] for player in players.values()]
tasks = [
asyncio.ensure_future(
self.get_player(player_id, players, include_summary, return_json)
)
for player_id in player_ids
]
players = await asyncio.gather(*tasks)
return players
async def get_fixture(self, fixture_id, return_json=False):
"""Returns the fixture with the given ``fixture_id``.
Information is taken from e.g.:
https://fantasy.premierleague.com/api/fixtures/
https://fantasy.premierleague.com/api/fixtures/?event=1
:param int fixture_id: The fixture's ID.
:param return_json: (optional) Boolean. If ``True`` returns a ``dict``,
if ``False`` returns a :class:`Fixture` object. Defaults to
``False``.
:type return_json: bool
:rtype: :class:`Fixture` or ``dict``
:raises ValueError: if fixture with ``fixture_id`` not found
"""
fixtures = await fetch(self.session, API_URLS["fixtures"])
try:
fixture = next(
fixture for fixture in fixtures if fixture["id"] == fixture_id
)
except StopIteration:
raise ValueError(f"Fixture with ID {fixture_id} not found")
fixture_gameweek = fixture["event"]
gameweek_fixtures = await fetch(
self.session, API_URLS["gameweek_fixtures"].format(fixture_gameweek)
)
try:
fixture = next(
fixture for fixture in gameweek_fixtures if fixture["id"] == fixture_id
)
except StopIteration:
raise ValueError(
f"Fixture with ID {fixture_id} not found in gameweek fixtures"
)
if return_json:
return fixture
return Fixture(fixture)
async def get_fixtures_by_id(self, fixture_ids, return_json=False):
"""Returns a list of all fixtures with IDs included in the
`fixture_ids` list.
Information is taken from e.g.:
https://fantasy.premierleague.com/api/fixtures/
https://fantasy.premierleague.com/api/fixtures/?event=1
:param list fixture_ids: A list of fixture IDs.
:param return_json: (optional) Boolean. If ``True`` returns a list of
``dict``s, if ``False`` returns a list of :class:`Fixture`
objects. Defaults to ``False``.
:type return_json: bool
:rtype: list
"""
if not fixture_ids:
return []
fixtures = await fetch(self.session, API_URLS["fixtures"])
fixture_gameweeks = set(
fixture["event"] for fixture in fixtures if fixture["id"] in fixture_ids
)
tasks = [
asyncio.ensure_future(
fetch(self.session, API_URLS["gameweek_fixtures"].format(gameweek))
)
for gameweek in fixture_gameweeks
]
gameweek_fixtures = await asyncio.gather(*tasks)
merged_fixtures = list(itertools.chain(*gameweek_fixtures))
fixtures = [
fixture for fixture in merged_fixtures if fixture["id"] in fixture_ids
]
if return_json:
return fixtures
return [Fixture(fixture) for fixture in fixtures]
async def get_fixtures_by_gameweek(self, gameweek, return_json=False):
"""Returns a list of all fixtures of the given ``gameweek``.
Information is taken from e.g.:
https://fantasy.premierleague.com/api/fixtures/
https://fantasy.premierleague.com/api/fixtures/?event=1
:param gameweek: A gameweek.
:type gameweek: string or int
:param return_json: (optional) Boolean. If ``True`` returns a list of
``dict``s, if ``False`` returns a list of :class:`Player`
objects. Defaults to ``False``.
:type return_json: bool
:rtype: list
"""
fixtures = await fetch(
self.session, API_URLS["gameweek_fixtures"].format(gameweek)
)
if return_json:
return fixtures
return [Fixture(fixture) for fixture in fixtures]
async def get_fixtures(self, return_json=False):
"""Returns a list of *all* fixtures.
Information is taken from e.g.:
https://fantasy.premierleague.com/api/fixtures/
https://fantasy.premierleague.com/api/fixtures/?event=1
:param return_json: (optional) Boolean. If ``True`` returns a list of
``dicts``, if ``False`` returns a list of :class:`Fixture`
objects. Defaults to ``False``.
:type return_json: bool
:rtype: list
"""
gameweeks = range(1, 39)
tasks = [
asyncio.ensure_future(
fetch(self.session, API_URLS["gameweek_fixtures"].format(gameweek))
)
for gameweek in gameweeks
]
gameweek_fixtures = await asyncio.gather(*tasks)
fixtures = list(itertools.chain(*gameweek_fixtures))
if return_json:
return fixtures
return [Fixture(fixture) for fixture in fixtures]
async def get_gameweek(self, gameweek_id, include_live=False, return_json=False):
"""Returns the gameweek with the ID ``gameweek_id``.
Information is taken from e.g.:
https://fantasy.premierleague.com/api/bootstrap-static/
https://fantasy.premierleague.com/api/event/1/live/
:param int gameweek_id: A gameweek's ID.
:param bool include_summary: (optional) Includes a gameweek's live data
if ``True``.
:param return_json: (optional) Boolean. If ``True`` returns a ``dict``,
if ``False`` returns a :class:`Gameweek` object. Defaults to
``False``.
:type return_json: bool
:rtype: :class:`Gameweek` or ``dict``
"""
static_gameweeks = getattr(self, "events")
try:
static_gameweek = next(
gameweek
for gameweek in static_gameweeks.values()
if gameweek["id"] == gameweek_id
)
except StopIteration:
raise ValueError(f"Gameweek with ID {gameweek_id} not found")
if include_live:
live_gameweek = await fetch(
self.session, API_URLS["gameweek_live"].format(gameweek_id)
)
# Convert element list to dict
live_gameweek["elements"] = {
element["id"]: element for element in live_gameweek["elements"]
}
# Include live bonus points
if not static_gameweek["finished"]:
fixtures = await self.get_fixtures_by_gameweek(gameweek_id)
fixtures = filter(lambda f: not f.finished, fixtures)
bonus_for_gameweek = []
for fixture in fixtures:
bonus = fixture.get_bonus(provisional=True)
bonus_for_gameweek.extend(bonus["a"] + bonus["h"])
bonus_for_gameweek = {
bonus["element"]: bonus["value"] for bonus in bonus_for_gameweek
}
for player_id, bonus_points in bonus_for_gameweek.items():
if live_gameweek["elements"][player_id]["stats"]["bonus"] == 0:
live_gameweek["elements"][player_id]["stats"][
"bonus"
] += bonus_points
live_gameweek["elements"][player_id]["stats"][
"total_points"
] += bonus_points
static_gameweek.update(live_gameweek)
if return_json:
return static_gameweek
return Gameweek(static_gameweek)
async def get_gameweeks(
self, gameweek_ids=None, include_live=False, return_json=False
):
"""Returns either a list of *all* gamweeks, or a list of gameweeks
whose IDs are in the ``gameweek_ids`` list.
Information is taken from e.g.:
https://fantasy.premierleague.com/api/bootstrap-static/
https://fantasy.premierleague.com/api/event/1/live/
:param list gameweek_ids: (optional) A list of gameweek IDs.
:param return_json: (optional) Boolean. If ``True`` returns a list of
``dict``s, if ``False`` returns a list of :class:`Gameweek`
objects. Defaults to ``False``.
:type return_json: bool
:rtype: list
"""
if not gameweek_ids:
gameweek_ids = range(1, 39)
tasks = [
asyncio.ensure_future(
self.get_gameweek(gameweek_id, include_live, return_json)
)
for gameweek_id in gameweek_ids
]
gameweeks = await asyncio.gather(*tasks)
return gameweeks
async def get_classic_league(self, league_id, return_json=False):
"""Returns the classic league with the given ``league_id``. Requires
the user to have logged in using ``fpl.login()``.
Information is taken from e.g.:
https://fantasy.premierleague.com/api/leagues-classic/967/standings/
:param string league_id: A classic league's ID.
:type league_id: string or int
:param return_json: (optional) Boolean. If ``True`` returns a ``dict``,
if ``False`` returns a :class:`ClassicLeague` object. Defaults to
``False``.
:type return_json: bool
:rtype: :class:`ClassicLeague` or ``dict``
"""
if not logged_in(self.session):
raise Exception("User must be logged in.")
url = API_URLS["league_classic"].format(league_id)
league = await fetch(self.session, url)
if return_json:
return league
return ClassicLeague(league, session=self.session)
async def get_h2h_league(self, league_id, return_json=False):
"""Returns a `H2HLeague` object with the given `league_id`. Requires
the user to have logged in using ``fpl.login()``.
Information is taken from e.g.:
https://fantasy.premierleague.com/api/leagues-h2h-matches/league/946125/
:param league_id: A H2H league's ID.
:type league_id: string or int
:param return_json: (optional) Boolean. If ``True`` returns a ``dict``,
if ``False`` returns a :class:`H2HLeague` object. Defaults to
``False``.
:type return_json: bool
:rtype: :class:`H2HLeague` or ``dict``
"""
if not logged_in(self.session):
raise Exception("User must be logged in.")
url = API_URLS["league_h2h"].format(league_id)
league = await fetch(self.session, url)
if return_json:
return league
return H2HLeague(league, session=self.session)
async def login(self, email=None, password=None):
"""Returns a requests session with FPL login authentication.
:param string email: Email address for the user's Fantasy Premier
League account.
:param string password: Password for the user's Fantasy Premier League
account.
"""
if not email and not password:
email = os.getenv("FPL_EMAIL", None)
password = os.getenv("FPL_PASSWORD", None)
if not email or not password:
raise ValueError("Email and password must be set")
payload = {
"login": email,
"password": password,
"app": "plfpl-web",
"redirect_uri": "https://fantasy.premierleague.com/a/login",
}
login_url = "https://users.premierleague.com/accounts/login/"
async with self.session.post(login_url, data=payload) as response:
state = response.url.query["state"]
if state == "fail":
reason = response.url.query["reason"]
raise ValueError(f"Login not successful, reason: {reason}")
async def get_points_against(self):
"""Returns a dictionary containing the points scored against all teams
in the Premier League, split by position and location.
An example:
.. code-block:: javascript
{
"Man City": {
"all": {
"H": [3, ..., 1],
"A": [2, ..., 2]
},
"goalkeeper": {
"H": [3, ..., 3],
"A": [2, ..., 3]
},
"defender": {
"H": [1, ..., 2],
"A": [4, ..., 1]
},
"midfielder": {
"H": [2, ..., 1],
"A": [2, ..., 2]
},
"forward": {
"H": [1, ..., 2],
"A": [6, ..., 1]
}
},
...
}
:rtype: dict
"""
players = await self.get_players(include_summary=True, return_json=True)
points_against = {}
for player in players:
position = position_converter(player["element_type"]).lower()
for fixture in player["history"]:
if fixture["minutes"] == 0:
continue
points = fixture["total_points"]
opponent = team_converter(fixture["opponent_team"])
location = "H" if fixture["was_home"] else "A"
points_against.setdefault(
opponent,
{
"all": {"H": [], "A": []},
"goalkeeper": {"H": [], "A": []},
"defender": {"H": [], "A": []},
"midfielder": {"H": [], "A": []},
"forward": {"H": [], "A": []},
},
)
points_against[opponent]["all"][location].append(points)
points_against[opponent][position][location].append(points)
return points_against
async def FDR(self):
"""Creates a new Fixture Difficulty Ranking (FDR) based on the number
of points each team gives up to players in the Fantasy Premier League.
These numbers are also between 1.0 and 5.0 to give a similar ranking
system to the official FDR.
An example:
.. code-block:: javascript
{
"Man City": {
"all": {
"H": 4.4524439427082,
"A": 5
},
"goalkeeper": {
"H": 3.6208195949129,
"A": 5
},
"defender": {
"H": 3.747999604078,
"A": 5
},
"midfielder": {
"H": 4.6103045986504,
"A": 5
},
"forward": {
"H": 5,
"A": 3.9363219561895
}
},
...,
"Arsenal": {
"all": {
"H": 3.4414041151234,
"A": 4.2904529162594
},
"goalkeeper": {
"H": 4.1106924163919,
"A": 4.3867595818815
},
"defender": {
"H": 3.6720291204673,
"A": 4.3380917450181
},
"midfielder": {
"H": 3.3537357534825,
"A": 4.0706443384718
},
"forward": {
"H": 2.5143403441683,
"A": 4.205298013245
}
}
}
:rtype: dict
"""
def average_points_against(points_against):
"""Returns a dict with the average points scored against all teams,
per position and location.
:param dict points_against: A dict containing the points scored
against each team in the Premier League.
:rtype: dict
"""
for team, positions in points_against.items():
for position in positions.values():
position["H"] = average(position["H"])
position["A"] = average(position["A"])
points_against[team] = positions
return points_against
def get_extrema(points_against):
"""Returns the extrema for each position and location.
:param dict points_against: A dict containing the points scored
against each team in the Premier League.
:rtype: dict
"""
averages = {}
for _, positions in points_against.items():
for position, average in positions.items():
averages.setdefault(position, {"H": [], "A": []})
averages[position]["H"].append(average["H"])
averages[position]["A"].append(average["A"])
for position, locations in averages.items():
min_h = min(locations["H"])
min_a = min(locations["A"])
max_h = max(locations["H"])
max_a = max(locations["A"])
averages[position]["H"] = [min_h, max_h]
averages[position]["A"] = [min_a, max_a]
return averages
def calculate_fdr(average_points, extrema):
"""Returns a dict containing the FDR for each team, which is
calculated by scaling the average points conceded per position
between 1.0 and 5.0 using the given extrema.
:param dict points_against: A dict containing the points scored
against each team in the Premier League.
:param dict extrema: A dict containing the extrema for each
position and location.
:rtype: dict
"""
for team, positions in average_points.items():
for position, locations in positions.items():
min_h, max_h = extrema[position]["H"]
min_a, max_a = extrema[position]["A"]
fdr_h = scale(locations["H"], 5.0, 1.0, min_h, max_h)
fdr_a = scale(locations["A"], 5.0, 1.0, min_a, max_a)
average_points[team][position]["H"] = fdr_h
average_points[team][position]["A"] = fdr_a
return average_points
points_against = await self.get_points_against()
average_points = average_points_against(points_against)
extrema = get_extrema(average_points)
fdr = calculate_fdr(average_points, extrema)
return fdr
| 34.687281 | 87 | 0.550072 |
63a46f3517865f77bf9a258a93ef84259da5567e | 4,662 | py | Python | examples/text_correction/ernie-csc/sighan_evaluate.py | JeremyZhao1998/PaddleNLP | 5a34684a7f0c8a186043fed386be4b62cb85fb15 | [
"Apache-2.0"
]
| 7,091 | 2021-02-05T13:56:25.000Z | 2022-03-31T11:42:50.000Z | examples/text_correction/ernie-csc/sighan_evaluate.py | JeremyZhao1998/PaddleNLP | 5a34684a7f0c8a186043fed386be4b62cb85fb15 | [
"Apache-2.0"
]
| 844 | 2021-02-10T01:09:29.000Z | 2022-03-31T12:12:58.000Z | examples/text_correction/ernie-csc/sighan_evaluate.py | JeremyZhao1998/PaddleNLP | 5a34684a7f0c8a186043fed386be4b62cb85fb15 | [
"Apache-2.0"
]
| 1,035 | 2021-02-05T14:26:48.000Z | 2022-03-31T11:42:57.000Z | # Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--pred_file", "-p", required=True, type=str, help="")
parser.add_argument("--truth_file", "-t", required=True, type=str, help="")
args = parser.parse_args()
def main(args):
detect_tp, correct_tp, pos, neg, fp = 0, 0, 0, 0, 0
pred_dict = dict()
truth_dict = dict()
fpred = open(args.pred_file, 'r', encoding='utf-8')
ftruth = open(args.truth_file, 'r', encoding='utf-8')
for idx, (pred, truth) in enumerate(zip(fpred, ftruth)):
pred_tokens = pred.strip().split(" ")
truth_tokens = truth.strip().split(" ")
pred_id = pred_tokens[0]
truth_id = truth_tokens[0]
pred_tokens = pred_tokens[1:]
truth_tokens = truth_tokens[1:]
detect_truth_positions = [
int(truth_token.strip(","))
for i, truth_token in enumerate(truth_tokens) if i % 2 == 0
]
correct_truth_tokens = [
truth_token.strip(",") for i, truth_token in enumerate(truth_tokens)
if i % 2 == 1
]
detect_pred_positions = [
int(pred_token.strip(","))
for i, pred_token in enumerate(pred_tokens) if i % 2 == 0
]
correct_pred_tokens = [
pred_token.strip(",") for i, pred_token in enumerate(pred_tokens)
if i % 2 == 1
]
pred_dict[pred_id] = (detect_pred_positions, correct_pred_tokens)
truth_dict[truth_id] = (detect_truth_positions, correct_truth_tokens)
assert sorted(pred_dict.keys()) == sorted(truth_dict.keys(
)), "Prediction file should have all prediction result in truth file"
for pid, predition in pred_dict.items():
truth = truth_dict[pid]
if predition[0][0] != 0:
pos += 1
if sorted(zip(*predition)) == sorted(zip(*truth)):
correct_tp += 1
if truth[0][0] == 0:
fp += 1
if truth[0][0] != 0:
if sorted(predition[0]) == sorted(truth[0]):
detect_tp += 1
neg += 1
eps = 1e-9
# Detection level
detect_pos = detect_tp + fp
if detect_pos > 0 and neg > 0:
detect_precision = detect_tp * 1.0 / detect_pos
detect_recall = detect_tp * 1.0 / neg
detect_f1 = 2. * detect_precision * detect_recall / (
detect_precision + detect_recall + eps)
else:
detect_precision = 0
detect_recall = 0
detect_f1 = 0
# Correction level
correct_pos = correct_tp + fp
if correct_pos > 0 and neg > 0:
correct_precision = correct_tp * 1.0 / correct_pos
correct_recall = correct_tp * 1.0 / neg
correct_f1 = 2. * correct_precision * correct_recall / (
correct_precision + correct_recall + eps)
else:
correct_precision = 0
correct_recall = 0
correct_f1 = 0
print("==========================================================")
print("Overall Performance")
print("==========================================================")
print("\nDetection Level")
print("\tPrecision = {:.4f} ({}/{})".format(detect_precision, detect_tp,
detect_pos))
print("\tRecall = {:.4f} ({}/{})".format(detect_recall, detect_tp, neg))
print("\tF1-Score = {:.4f} ((2*{:.4f}*{:.4f})/({:.4f}+{:.4f}))".format(
detect_f1, detect_precision, detect_recall, detect_precision,
detect_recall))
print("\nCorrection Level")
print("\tPrecision = {:.4f} ({}/{})".format(correct_precision, correct_tp,
correct_pos))
print("\tRecall = {:.4f} ({}/{})".format(correct_recall, correct_tp, neg))
print("\tF1-Score = {:.4f} ((2*{:.4f}*{:.4f})/({:.4f}+{:.4f}))".format(
correct_f1, correct_precision, correct_recall, correct_precision,
correct_recall))
print("==========================================================\n")
if __name__ == "__main__":
main(args)
| 37 | 80 | 0.573574 |
64ec0801b5d8adc663cb05b52f674b1948a72d78 | 354,336 | py | Python | pandas/core/frame.py | AnglinaBhambra/pandas | c03164e8daa812193baa99378fded16695f3ef51 | [
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
]
| 1 | 2021-04-18T09:51:04.000Z | 2021-04-18T09:51:04.000Z | pandas/core/frame.py | AnglinaBhambra/pandas | c03164e8daa812193baa99378fded16695f3ef51 | [
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
]
| 1 | 2021-05-11T00:05:40.000Z | 2021-05-11T00:05:40.000Z | pandas/core/frame.py | AnglinaBhambra/pandas | c03164e8daa812193baa99378fded16695f3ef51 | [
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
]
| 1 | 2018-08-02T06:29:14.000Z | 2018-08-02T06:29:14.000Z | """
DataFrame
---------
An efficient 2D container for potentially mixed-type time series or other
labeled data series.
Similar to its R counterpart, data.frame, except providing automatic data
alignment and a host of useful data manipulation methods having to do with the
labeling information
"""
from __future__ import annotations
import collections
from collections import abc
import datetime
import functools
from io import StringIO
import itertools
import mmap
from textwrap import dedent
from typing import (
IO,
TYPE_CHECKING,
Any,
AnyStr,
Callable,
Hashable,
Iterable,
Iterator,
Sequence,
cast,
overload,
)
import warnings
import numpy as np
import numpy.ma as ma
from pandas._config import get_option
from pandas._libs import (
algos as libalgos,
lib,
properties,
)
from pandas._libs.lib import no_default
from pandas._typing import (
AggFuncType,
AnyArrayLike,
ArrayLike,
Axes,
Axis,
ColspaceArgType,
CompressionOptions,
Dtype,
FilePathOrBuffer,
FloatFormatType,
FormattersType,
FrameOrSeriesUnion,
Frequency,
IndexKeyFunc,
IndexLabel,
Level,
NpDtype,
PythonFuncType,
Renamer,
Scalar,
StorageOptions,
Suffixes,
ValueKeyFunc,
)
from pandas.compat._optional import import_optional_dependency
from pandas.compat.numpy import function as nv
from pandas.util._decorators import (
Appender,
Substitution,
deprecate_kwarg,
doc,
rewrite_axis_style_signature,
)
from pandas.util._validators import (
validate_axis_style_args,
validate_bool_kwarg,
validate_percentile,
)
from pandas.core.dtypes.cast import (
construct_1d_arraylike_from_scalar,
construct_2d_arraylike_from_scalar,
find_common_type,
infer_dtype_from_scalar,
invalidate_string_dtypes,
maybe_box_native,
maybe_convert_platform,
maybe_downcast_to_dtype,
validate_numeric_casting,
)
from pandas.core.dtypes.common import (
ensure_platform_int,
infer_dtype_from_object,
is_bool_dtype,
is_dataclass,
is_datetime64_any_dtype,
is_dict_like,
is_dtype_equal,
is_extension_array_dtype,
is_float,
is_float_dtype,
is_hashable,
is_integer,
is_integer_dtype,
is_iterator,
is_list_like,
is_object_dtype,
is_scalar,
is_sequence,
pandas_dtype,
)
from pandas.core.dtypes.dtypes import ExtensionDtype
from pandas.core.dtypes.missing import (
isna,
notna,
)
from pandas.core import (
algorithms,
common as com,
generic,
nanops,
ops,
)
from pandas.core.accessor import CachedAccessor
from pandas.core.aggregation import (
reconstruct_func,
relabel_result,
)
from pandas.core.array_algos.take import take_2d_multi
from pandas.core.arraylike import OpsMixin
from pandas.core.arrays import ExtensionArray
from pandas.core.arrays.sparse import SparseFrameAccessor
from pandas.core.construction import (
extract_array,
sanitize_array,
sanitize_masked_array,
)
from pandas.core.generic import (
NDFrame,
_shared_docs,
)
from pandas.core.indexers import check_key_length
from pandas.core.indexes import base as ibase
from pandas.core.indexes.api import (
DatetimeIndex,
Index,
PeriodIndex,
ensure_index,
ensure_index_from_sequences,
)
from pandas.core.indexes.multi import (
MultiIndex,
maybe_droplevels,
)
from pandas.core.indexing import (
check_bool_indexer,
convert_to_index_sliceable,
)
from pandas.core.internals import (
ArrayManager,
BlockManager,
)
from pandas.core.internals.construction import (
arrays_to_mgr,
dataclasses_to_dicts,
dict_to_mgr,
mgr_to_mgr,
ndarray_to_mgr,
nested_data_to_arrays,
rec_array_to_mgr,
reorder_arrays,
to_arrays,
treat_as_nested,
)
from pandas.core.reshape.melt import melt
from pandas.core.series import Series
from pandas.core.sorting import (
get_group_index,
lexsort_indexer,
nargsort,
)
from pandas.io.common import get_handle
from pandas.io.formats import (
console,
format as fmt,
)
from pandas.io.formats.info import (
BaseInfo,
DataFrameInfo,
)
import pandas.plotting
if TYPE_CHECKING:
from typing import Literal
from pandas._typing import (
TimedeltaConvertibleTypes,
TimestampConvertibleTypes,
)
from pandas.core.groupby.generic import DataFrameGroupBy
from pandas.core.resample import Resampler
from pandas.io.formats.style import Styler
# ---------------------------------------------------------------------
# Docstring templates
_shared_doc_kwargs = {
"axes": "index, columns",
"klass": "DataFrame",
"axes_single_arg": "{0 or 'index', 1 or 'columns'}",
"axis": """axis : {0 or 'index', 1 or 'columns'}, default 0
If 0 or 'index': apply function to each column.
If 1 or 'columns': apply function to each row.""",
"inplace": """
inplace : bool, default False
If True, performs operation inplace and returns None.""",
"optional_by": """
by : str or list of str
Name or list of names to sort by.
- if `axis` is 0 or `'index'` then `by` may contain index
levels and/or column labels.
- if `axis` is 1 or `'columns'` then `by` may contain column
levels and/or index labels.""",
"optional_labels": """labels : array-like, optional
New labels / index to conform the axis specified by 'axis' to.""",
"optional_axis": """axis : int or str, optional
Axis to target. Can be either the axis name ('index', 'columns')
or number (0, 1).""",
"replace_iloc": """
This differs from updating with ``.loc`` or ``.iloc``, which require
you to specify a location to update with some value.""",
}
_numeric_only_doc = """numeric_only : bool or None, default None
Include only float, int, boolean data. If None, will attempt to use
everything, then use only numeric data
"""
_merge_doc = """
Merge DataFrame or named Series objects with a database-style join.
The join is done on columns or indexes. If joining columns on
columns, the DataFrame indexes *will be ignored*. Otherwise if joining indexes
on indexes or indexes on a column or columns, the index will be passed on.
When performing a cross merge, no column specifications to merge on are
allowed.
Parameters
----------%s
right : DataFrame or named Series
Object to merge with.
how : {'left', 'right', 'outer', 'inner', 'cross'}, default 'inner'
Type of merge to be performed.
* left: use only keys from left frame, similar to a SQL left outer join;
preserve key order.
* right: use only keys from right frame, similar to a SQL right outer join;
preserve key order.
* outer: use union of keys from both frames, similar to a SQL full outer
join; sort keys lexicographically.
* inner: use intersection of keys from both frames, similar to a SQL inner
join; preserve the order of the left keys.
* cross: creates the cartesian product from both frames, preserves the order
of the left keys.
.. versionadded:: 1.2.0
on : label or list
Column or index level names to join on. These must be found in both
DataFrames. If `on` is None and not merging on indexes then this defaults
to the intersection of the columns in both DataFrames.
left_on : label or list, or array-like
Column or index level names to join on in the left DataFrame. Can also
be an array or list of arrays of the length of the left DataFrame.
These arrays are treated as if they are columns.
right_on : label or list, or array-like
Column or index level names to join on in the right DataFrame. Can also
be an array or list of arrays of the length of the right DataFrame.
These arrays are treated as if they are columns.
left_index : bool, default False
Use the index from the left DataFrame as the join key(s). If it is a
MultiIndex, the number of keys in the other DataFrame (either the index
or a number of columns) must match the number of levels.
right_index : bool, default False
Use the index from the right DataFrame as the join key. Same caveats as
left_index.
sort : bool, default False
Sort the join keys lexicographically in the result DataFrame. If False,
the order of the join keys depends on the join type (how keyword).
suffixes : list-like, default is ("_x", "_y")
A length-2 sequence where each element is optionally a string
indicating the suffix to add to overlapping column names in
`left` and `right` respectively. Pass a value of `None` instead
of a string to indicate that the column name from `left` or
`right` should be left as-is, with no suffix. At least one of the
values must not be None.
copy : bool, default True
If False, avoid copy if possible.
indicator : bool or str, default False
If True, adds a column to the output DataFrame called "_merge" with
information on the source of each row. The column can be given a different
name by providing a string argument. The column will have a Categorical
type with the value of "left_only" for observations whose merge key only
appears in the left DataFrame, "right_only" for observations
whose merge key only appears in the right DataFrame, and "both"
if the observation's merge key is found in both DataFrames.
validate : str, optional
If specified, checks if merge is of specified type.
* "one_to_one" or "1:1": check if merge keys are unique in both
left and right datasets.
* "one_to_many" or "1:m": check if merge keys are unique in left
dataset.
* "many_to_one" or "m:1": check if merge keys are unique in right
dataset.
* "many_to_many" or "m:m": allowed, but does not result in checks.
Returns
-------
DataFrame
A DataFrame of the two merged objects.
See Also
--------
merge_ordered : Merge with optional filling/interpolation.
merge_asof : Merge on nearest keys.
DataFrame.join : Similar method using indices.
Notes
-----
Support for specifying index levels as the `on`, `left_on`, and
`right_on` parameters was added in version 0.23.0
Support for merging named Series objects was added in version 0.24.0
Examples
--------
>>> df1 = pd.DataFrame({'lkey': ['foo', 'bar', 'baz', 'foo'],
... 'value': [1, 2, 3, 5]})
>>> df2 = pd.DataFrame({'rkey': ['foo', 'bar', 'baz', 'foo'],
... 'value': [5, 6, 7, 8]})
>>> df1
lkey value
0 foo 1
1 bar 2
2 baz 3
3 foo 5
>>> df2
rkey value
0 foo 5
1 bar 6
2 baz 7
3 foo 8
Merge df1 and df2 on the lkey and rkey columns. The value columns have
the default suffixes, _x and _y, appended.
>>> df1.merge(df2, left_on='lkey', right_on='rkey')
lkey value_x rkey value_y
0 foo 1 foo 5
1 foo 1 foo 8
2 foo 5 foo 5
3 foo 5 foo 8
4 bar 2 bar 6
5 baz 3 baz 7
Merge DataFrames df1 and df2 with specified left and right suffixes
appended to any overlapping columns.
>>> df1.merge(df2, left_on='lkey', right_on='rkey',
... suffixes=('_left', '_right'))
lkey value_left rkey value_right
0 foo 1 foo 5
1 foo 1 foo 8
2 foo 5 foo 5
3 foo 5 foo 8
4 bar 2 bar 6
5 baz 3 baz 7
Merge DataFrames df1 and df2, but raise an exception if the DataFrames have
any overlapping columns.
>>> df1.merge(df2, left_on='lkey', right_on='rkey', suffixes=(False, False))
Traceback (most recent call last):
...
ValueError: columns overlap but no suffix specified:
Index(['value'], dtype='object')
>>> df1 = pd.DataFrame({'a': ['foo', 'bar'], 'b': [1, 2]})
>>> df2 = pd.DataFrame({'a': ['foo', 'baz'], 'c': [3, 4]})
>>> df1
a b
0 foo 1
1 bar 2
>>> df2
a c
0 foo 3
1 baz 4
>>> df1.merge(df2, how='inner', on='a')
a b c
0 foo 1 3
>>> df1.merge(df2, how='left', on='a')
a b c
0 foo 1 3.0
1 bar 2 NaN
>>> df1 = pd.DataFrame({'left': ['foo', 'bar']})
>>> df2 = pd.DataFrame({'right': [7, 8]})
>>> df1
left
0 foo
1 bar
>>> df2
right
0 7
1 8
>>> df1.merge(df2, how='cross')
left right
0 foo 7
1 foo 8
2 bar 7
3 bar 8
"""
# -----------------------------------------------------------------------
# DataFrame class
class DataFrame(NDFrame, OpsMixin):
"""
Two-dimensional, size-mutable, potentially heterogeneous tabular data.
Data structure also contains labeled axes (rows and columns).
Arithmetic operations align on both row and column labels. Can be
thought of as a dict-like container for Series objects. The primary
pandas data structure.
Parameters
----------
data : ndarray (structured or homogeneous), Iterable, dict, or DataFrame
Dict can contain Series, arrays, constants, dataclass or list-like objects. If
data is a dict, column order follows insertion-order.
.. versionchanged:: 0.25.0
If data is a list of dicts, column order follows insertion-order.
index : Index or array-like
Index to use for resulting frame. Will default to RangeIndex if
no indexing information part of input data and no index provided.
columns : Index or array-like
Column labels to use for resulting frame when data does not have them,
defaulting to RangeIndex(0, 1, 2, ..., n). If data contains column labels,
will perform column selection instead.
dtype : dtype, default None
Data type to force. Only a single dtype is allowed. If None, infer.
copy : bool or None, default None
Copy data from inputs.
For dict data, the default of None behaves like ``copy=True``. For DataFrame
or 2d ndarray input, the default of None behaves like ``copy=False``.
.. versionchanged:: 1.3.0
See Also
--------
DataFrame.from_records : Constructor from tuples, also record arrays.
DataFrame.from_dict : From dicts of Series, arrays, or dicts.
read_csv : Read a comma-separated values (csv) file into DataFrame.
read_table : Read general delimited file into DataFrame.
read_clipboard : Read text from clipboard into DataFrame.
Examples
--------
Constructing DataFrame from a dictionary.
>>> d = {'col1': [1, 2], 'col2': [3, 4]}
>>> df = pd.DataFrame(data=d)
>>> df
col1 col2
0 1 3
1 2 4
Notice that the inferred dtype is int64.
>>> df.dtypes
col1 int64
col2 int64
dtype: object
To enforce a single dtype:
>>> df = pd.DataFrame(data=d, dtype=np.int8)
>>> df.dtypes
col1 int8
col2 int8
dtype: object
Constructing DataFrame from numpy ndarray:
>>> df2 = pd.DataFrame(np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]),
... columns=['a', 'b', 'c'])
>>> df2
a b c
0 1 2 3
1 4 5 6
2 7 8 9
Constructing DataFrame from a numpy ndarray that has labeled columns:
>>> data = np.array([(1, 2, 3), (4, 5, 6), (7, 8, 9)],
... dtype=[("a", "i4"), ("b", "i4"), ("c", "i4")])
>>> df3 = pd.DataFrame(data, columns=['c', 'a'])
...
>>> df3
c a
0 3 1
1 6 4
2 9 7
Constructing DataFrame from dataclass:
>>> from dataclasses import make_dataclass
>>> Point = make_dataclass("Point", [("x", int), ("y", int)])
>>> pd.DataFrame([Point(0, 0), Point(0, 3), Point(2, 3)])
x y
0 0 0
1 0 3
2 2 3
"""
_internal_names_set = {"columns", "index"} | NDFrame._internal_names_set
_typ = "dataframe"
_HANDLED_TYPES = (Series, Index, ExtensionArray, np.ndarray)
_accessors: set[str] = {"sparse"}
_hidden_attrs: frozenset[str] = NDFrame._hidden_attrs | frozenset([])
_mgr: BlockManager | ArrayManager
@property
def _constructor(self) -> type[DataFrame]:
return DataFrame
_constructor_sliced: type[Series] = Series
# ----------------------------------------------------------------------
# Constructors
def __init__(
self,
data=None,
index: Axes | None = None,
columns: Axes | None = None,
dtype: Dtype | None = None,
copy: bool | None = None,
):
if copy is None:
if isinstance(data, dict) or data is None:
# retain pre-GH#38939 default behavior
copy = True
else:
copy = False
if data is None:
data = {}
if dtype is not None:
dtype = self._validate_dtype(dtype)
if isinstance(data, DataFrame):
data = data._mgr
if isinstance(data, (BlockManager, ArrayManager)):
# first check if a Manager is passed without any other arguments
# -> use fastpath (without checking Manager type)
if index is None and columns is None and dtype is None and not copy:
# GH#33357 fastpath
NDFrame.__init__(self, data)
return
manager = get_option("mode.data_manager")
if isinstance(data, (BlockManager, ArrayManager)):
mgr = self._init_mgr(
data, axes={"index": index, "columns": columns}, dtype=dtype, copy=copy
)
elif isinstance(data, dict):
# GH#38939 de facto copy defaults to False only in non-dict cases
mgr = dict_to_mgr(data, index, columns, dtype=dtype, copy=copy, typ=manager)
elif isinstance(data, ma.MaskedArray):
import numpy.ma.mrecords as mrecords
# masked recarray
if isinstance(data, mrecords.MaskedRecords):
mgr = rec_array_to_mgr(
data,
index,
columns,
dtype,
copy,
typ=manager,
)
warnings.warn(
"Support for MaskedRecords is deprecated and will be "
"removed in a future version. Pass "
"{name: data[name] for name in data.dtype.names} instead.",
FutureWarning,
stacklevel=2,
)
# a masked array
else:
data = sanitize_masked_array(data)
mgr = ndarray_to_mgr(
data,
index,
columns,
dtype=dtype,
copy=copy,
typ=manager,
)
elif isinstance(data, (np.ndarray, Series, Index)):
if data.dtype.names:
# i.e. numpy structured array
mgr = rec_array_to_mgr(
data,
index,
columns,
dtype,
copy,
typ=manager,
)
elif getattr(data, "name", None) is not None:
# i.e. Series/Index with non-None name
mgr = dict_to_mgr(
# error: Item "ndarray" of "Union[ndarray, Series, Index]" has no
# attribute "name"
{data.name: data}, # type: ignore[union-attr]
index,
columns,
dtype=dtype,
typ=manager,
)
else:
mgr = ndarray_to_mgr(
data,
index,
columns,
dtype=dtype,
copy=copy,
typ=manager,
)
# For data is list-like, or Iterable (will consume into list)
elif is_list_like(data):
if not isinstance(data, (abc.Sequence, ExtensionArray)):
data = list(data)
if len(data) > 0:
if is_dataclass(data[0]):
data = dataclasses_to_dicts(data)
if treat_as_nested(data):
if columns is not None:
# error: Argument 1 to "ensure_index" has incompatible type
# "Collection[Any]"; expected "Union[Union[Union[ExtensionArray,
# ndarray], Index, Series], Sequence[Any]]"
columns = ensure_index(columns) # type: ignore[arg-type]
arrays, columns, index = nested_data_to_arrays(
# error: Argument 3 to "nested_data_to_arrays" has incompatible
# type "Optional[Collection[Any]]"; expected "Optional[Index]"
data,
columns,
index, # type: ignore[arg-type]
dtype,
)
mgr = arrays_to_mgr(
arrays,
columns,
index,
columns,
dtype=dtype,
typ=manager,
)
else:
mgr = ndarray_to_mgr(
data,
index,
columns,
dtype=dtype,
copy=copy,
typ=manager,
)
else:
mgr = dict_to_mgr(
{},
index,
columns,
dtype=dtype,
typ=manager,
)
# For data is scalar
else:
if index is None or columns is None:
raise ValueError("DataFrame constructor not properly called!")
if not dtype:
dtype, _ = infer_dtype_from_scalar(data, pandas_dtype=True)
# For data is a scalar extension dtype
if isinstance(dtype, ExtensionDtype):
# TODO(EA2D): special case not needed with 2D EAs
values = [
construct_1d_arraylike_from_scalar(data, len(index), dtype)
for _ in range(len(columns))
]
mgr = arrays_to_mgr(
values, columns, index, columns, dtype=None, typ=manager
)
else:
arr2d = construct_2d_arraylike_from_scalar(
data,
len(index),
len(columns),
dtype,
copy,
)
mgr = ndarray_to_mgr(
arr2d,
index,
columns,
dtype=arr2d.dtype,
copy=False,
typ=manager,
)
# ensure correct Manager type according to settings
mgr = mgr_to_mgr(mgr, typ=manager)
NDFrame.__init__(self, mgr)
# ----------------------------------------------------------------------
@property
def axes(self) -> list[Index]:
"""
Return a list representing the axes of the DataFrame.
It has the row axis labels and column axis labels as the only members.
They are returned in that order.
Examples
--------
>>> df = pd.DataFrame({'col1': [1, 2], 'col2': [3, 4]})
>>> df.axes
[RangeIndex(start=0, stop=2, step=1), Index(['col1', 'col2'],
dtype='object')]
"""
return [self.index, self.columns]
@property
def shape(self) -> tuple[int, int]:
"""
Return a tuple representing the dimensionality of the DataFrame.
See Also
--------
ndarray.shape : Tuple of array dimensions.
Examples
--------
>>> df = pd.DataFrame({'col1': [1, 2], 'col2': [3, 4]})
>>> df.shape
(2, 2)
>>> df = pd.DataFrame({'col1': [1, 2], 'col2': [3, 4],
... 'col3': [5, 6]})
>>> df.shape
(2, 3)
"""
return len(self.index), len(self.columns)
@property
def _is_homogeneous_type(self) -> bool:
"""
Whether all the columns in a DataFrame have the same type.
Returns
-------
bool
See Also
--------
Index._is_homogeneous_type : Whether the object has a single
dtype.
MultiIndex._is_homogeneous_type : Whether all the levels of a
MultiIndex have the same dtype.
Examples
--------
>>> DataFrame({"A": [1, 2], "B": [3, 4]})._is_homogeneous_type
True
>>> DataFrame({"A": [1, 2], "B": [3.0, 4.0]})._is_homogeneous_type
False
Items with the same type but different sizes are considered
different types.
>>> DataFrame({
... "A": np.array([1, 2], dtype=np.int32),
... "B": np.array([1, 2], dtype=np.int64)})._is_homogeneous_type
False
"""
if isinstance(self._mgr, ArrayManager):
return len({arr.dtype for arr in self._mgr.arrays}) == 1
if self._mgr.any_extension_types:
return len({block.dtype for block in self._mgr.blocks}) == 1
else:
return not self._is_mixed_type
@property
def _can_fast_transpose(self) -> bool:
"""
Can we transpose this DataFrame without creating any new array objects.
"""
if isinstance(self._mgr, ArrayManager):
return False
blocks = self._mgr.blocks
if len(blocks) != 1:
return False
return not self._mgr.any_extension_types
# ----------------------------------------------------------------------
# Rendering Methods
def _repr_fits_vertical_(self) -> bool:
"""
Check length against max_rows.
"""
max_rows = get_option("display.max_rows")
return len(self) <= max_rows
def _repr_fits_horizontal_(self, ignore_width: bool = False) -> bool:
"""
Check if full repr fits in horizontal boundaries imposed by the display
options width and max_columns.
In case of non-interactive session, no boundaries apply.
`ignore_width` is here so ipynb+HTML output can behave the way
users expect. display.max_columns remains in effect.
GH3541, GH3573
"""
width, height = console.get_console_size()
max_columns = get_option("display.max_columns")
nb_columns = len(self.columns)
# exceed max columns
if (max_columns and nb_columns > max_columns) or (
(not ignore_width) and width and nb_columns > (width // 2)
):
return False
# used by repr_html under IPython notebook or scripts ignore terminal
# dims
if ignore_width or not console.in_interactive_session():
return True
if get_option("display.width") is not None or console.in_ipython_frontend():
# check at least the column row for excessive width
max_rows = 1
else:
max_rows = get_option("display.max_rows")
# when auto-detecting, so width=None and not in ipython front end
# check whether repr fits horizontal by actually checking
# the width of the rendered repr
buf = StringIO()
# only care about the stuff we'll actually print out
# and to_string on entire frame may be expensive
d = self
if max_rows is not None: # unlimited rows
# min of two, where one may be None
d = d.iloc[: min(max_rows, len(d))]
else:
return True
d.to_string(buf=buf)
value = buf.getvalue()
repr_width = max(len(line) for line in value.split("\n"))
return repr_width < width
def _info_repr(self) -> bool:
"""
True if the repr should show the info view.
"""
info_repr_option = get_option("display.large_repr") == "info"
return info_repr_option and not (
self._repr_fits_horizontal_() and self._repr_fits_vertical_()
)
def __repr__(self) -> str:
"""
Return a string representation for a particular DataFrame.
"""
buf = StringIO("")
if self._info_repr():
self.info(buf=buf)
return buf.getvalue()
max_rows = get_option("display.max_rows")
min_rows = get_option("display.min_rows")
max_cols = get_option("display.max_columns")
max_colwidth = get_option("display.max_colwidth")
show_dimensions = get_option("display.show_dimensions")
if get_option("display.expand_frame_repr"):
width, _ = console.get_console_size()
else:
width = None
self.to_string(
buf=buf,
max_rows=max_rows,
min_rows=min_rows,
max_cols=max_cols,
line_width=width,
max_colwidth=max_colwidth,
show_dimensions=show_dimensions,
)
return buf.getvalue()
def _repr_html_(self) -> str | None:
"""
Return a html representation for a particular DataFrame.
Mainly for IPython notebook.
"""
if self._info_repr():
buf = StringIO("")
self.info(buf=buf)
# need to escape the <class>, should be the first line.
val = buf.getvalue().replace("<", r"<", 1)
val = val.replace(">", r">", 1)
return "<pre>" + val + "</pre>"
if get_option("display.notebook_repr_html"):
max_rows = get_option("display.max_rows")
min_rows = get_option("display.min_rows")
max_cols = get_option("display.max_columns")
show_dimensions = get_option("display.show_dimensions")
formatter = fmt.DataFrameFormatter(
self,
columns=None,
col_space=None,
na_rep="NaN",
formatters=None,
float_format=None,
sparsify=None,
justify=None,
index_names=True,
header=True,
index=True,
bold_rows=True,
escape=True,
max_rows=max_rows,
min_rows=min_rows,
max_cols=max_cols,
show_dimensions=show_dimensions,
decimal=".",
)
return fmt.DataFrameRenderer(formatter).to_html(notebook=True)
else:
return None
@Substitution(
header_type="bool or sequence",
header="Write out the column names. If a list of strings "
"is given, it is assumed to be aliases for the "
"column names",
col_space_type="int, list or dict of int",
col_space="The minimum width of each column",
)
@Substitution(shared_params=fmt.common_docstring, returns=fmt.return_docstring)
def to_string(
self,
buf: FilePathOrBuffer[str] | None = None,
columns: Sequence[str] | None = None,
col_space: int | None = None,
header: bool | Sequence[str] = True,
index: bool = True,
na_rep: str = "NaN",
formatters: fmt.FormattersType | None = None,
float_format: fmt.FloatFormatType | None = None,
sparsify: bool | None = None,
index_names: bool = True,
justify: str | None = None,
max_rows: int | None = None,
min_rows: int | None = None,
max_cols: int | None = None,
show_dimensions: bool = False,
decimal: str = ".",
line_width: int | None = None,
max_colwidth: int | None = None,
encoding: str | None = None,
) -> str | None:
"""
Render a DataFrame to a console-friendly tabular output.
%(shared_params)s
line_width : int, optional
Width to wrap a line in characters.
max_colwidth : int, optional
Max width to truncate each column in characters. By default, no limit.
.. versionadded:: 1.0.0
encoding : str, default "utf-8"
Set character encoding.
.. versionadded:: 1.0
%(returns)s
See Also
--------
to_html : Convert DataFrame to HTML.
Examples
--------
>>> d = {'col1': [1, 2, 3], 'col2': [4, 5, 6]}
>>> df = pd.DataFrame(d)
>>> print(df.to_string())
col1 col2
0 1 4
1 2 5
2 3 6
"""
from pandas import option_context
with option_context("display.max_colwidth", max_colwidth):
formatter = fmt.DataFrameFormatter(
self,
columns=columns,
col_space=col_space,
na_rep=na_rep,
formatters=formatters,
float_format=float_format,
sparsify=sparsify,
justify=justify,
index_names=index_names,
header=header,
index=index,
min_rows=min_rows,
max_rows=max_rows,
max_cols=max_cols,
show_dimensions=show_dimensions,
decimal=decimal,
)
return fmt.DataFrameRenderer(formatter).to_string(
buf=buf,
encoding=encoding,
line_width=line_width,
)
# ----------------------------------------------------------------------
@property
def style(self) -> Styler:
"""
Returns a Styler object.
Contains methods for building a styled HTML representation of the DataFrame.
See Also
--------
io.formats.style.Styler : Helps style a DataFrame or Series according to the
data with HTML and CSS.
"""
from pandas.io.formats.style import Styler
return Styler(self)
_shared_docs[
"items"
] = r"""
Iterate over (column name, Series) pairs.
Iterates over the DataFrame columns, returning a tuple with
the column name and the content as a Series.
Yields
------
label : object
The column names for the DataFrame being iterated over.
content : Series
The column entries belonging to each label, as a Series.
See Also
--------
DataFrame.iterrows : Iterate over DataFrame rows as
(index, Series) pairs.
DataFrame.itertuples : Iterate over DataFrame rows as namedtuples
of the values.
Examples
--------
>>> df = pd.DataFrame({'species': ['bear', 'bear', 'marsupial'],
... 'population': [1864, 22000, 80000]},
... index=['panda', 'polar', 'koala'])
>>> df
species population
panda bear 1864
polar bear 22000
koala marsupial 80000
>>> for label, content in df.items():
... print(f'label: {label}')
... print(f'content: {content}', sep='\n')
...
label: species
content:
panda bear
polar bear
koala marsupial
Name: species, dtype: object
label: population
content:
panda 1864
polar 22000
koala 80000
Name: population, dtype: int64
"""
@Appender(_shared_docs["items"])
def items(self) -> Iterable[tuple[Hashable, Series]]:
if self.columns.is_unique and hasattr(self, "_item_cache"):
for k in self.columns:
yield k, self._get_item_cache(k)
else:
for i, k in enumerate(self.columns):
yield k, self._ixs(i, axis=1)
@Appender(_shared_docs["items"])
def iteritems(self) -> Iterable[tuple[Hashable, Series]]:
yield from self.items()
def iterrows(self) -> Iterable[tuple[Hashable, Series]]:
"""
Iterate over DataFrame rows as (index, Series) pairs.
Yields
------
index : label or tuple of label
The index of the row. A tuple for a `MultiIndex`.
data : Series
The data of the row as a Series.
See Also
--------
DataFrame.itertuples : Iterate over DataFrame rows as namedtuples of the values.
DataFrame.items : Iterate over (column name, Series) pairs.
Notes
-----
1. Because ``iterrows`` returns a Series for each row,
it does **not** preserve dtypes across the rows (dtypes are
preserved across columns for DataFrames). For example,
>>> df = pd.DataFrame([[1, 1.5]], columns=['int', 'float'])
>>> row = next(df.iterrows())[1]
>>> row
int 1.0
float 1.5
Name: 0, dtype: float64
>>> print(row['int'].dtype)
float64
>>> print(df['int'].dtype)
int64
To preserve dtypes while iterating over the rows, it is better
to use :meth:`itertuples` which returns namedtuples of the values
and which is generally faster than ``iterrows``.
2. You should **never modify** something you are iterating over.
This is not guaranteed to work in all cases. Depending on the
data types, the iterator returns a copy and not a view, and writing
to it will have no effect.
"""
columns = self.columns
klass = self._constructor_sliced
for k, v in zip(self.index, self.values):
s = klass(v, index=columns, name=k)
yield k, s
def itertuples(
self, index: bool = True, name: str | None = "Pandas"
) -> Iterable[tuple[Any, ...]]:
"""
Iterate over DataFrame rows as namedtuples.
Parameters
----------
index : bool, default True
If True, return the index as the first element of the tuple.
name : str or None, default "Pandas"
The name of the returned namedtuples or None to return regular
tuples.
Returns
-------
iterator
An object to iterate over namedtuples for each row in the
DataFrame with the first field possibly being the index and
following fields being the column values.
See Also
--------
DataFrame.iterrows : Iterate over DataFrame rows as (index, Series)
pairs.
DataFrame.items : Iterate over (column name, Series) pairs.
Notes
-----
The column names will be renamed to positional names if they are
invalid Python identifiers, repeated, or start with an underscore.
On python versions < 3.7 regular tuples are returned for DataFrames
with a large number of columns (>254).
Examples
--------
>>> df = pd.DataFrame({'num_legs': [4, 2], 'num_wings': [0, 2]},
... index=['dog', 'hawk'])
>>> df
num_legs num_wings
dog 4 0
hawk 2 2
>>> for row in df.itertuples():
... print(row)
...
Pandas(Index='dog', num_legs=4, num_wings=0)
Pandas(Index='hawk', num_legs=2, num_wings=2)
By setting the `index` parameter to False we can remove the index
as the first element of the tuple:
>>> for row in df.itertuples(index=False):
... print(row)
...
Pandas(num_legs=4, num_wings=0)
Pandas(num_legs=2, num_wings=2)
With the `name` parameter set we set a custom name for the yielded
namedtuples:
>>> for row in df.itertuples(name='Animal'):
... print(row)
...
Animal(Index='dog', num_legs=4, num_wings=0)
Animal(Index='hawk', num_legs=2, num_wings=2)
"""
arrays = []
fields = list(self.columns)
if index:
arrays.append(self.index)
fields.insert(0, "Index")
# use integer indexing because of possible duplicate column names
arrays.extend(self.iloc[:, k] for k in range(len(self.columns)))
if name is not None:
# https://github.com/python/mypy/issues/9046
# error: namedtuple() expects a string literal as the first argument
itertuple = collections.namedtuple( # type: ignore[misc]
name, fields, rename=True
)
return map(itertuple._make, zip(*arrays))
# fallback to regular tuples
return zip(*arrays)
def __len__(self) -> int:
"""
Returns length of info axis, but here we use the index.
"""
return len(self.index)
@overload
def dot(self, other: Series) -> Series:
...
@overload
def dot(self, other: DataFrame | Index | ArrayLike) -> DataFrame:
...
def dot(self, other: AnyArrayLike | FrameOrSeriesUnion) -> FrameOrSeriesUnion:
"""
Compute the matrix multiplication between the DataFrame and other.
This method computes the matrix product between the DataFrame and the
values of an other Series, DataFrame or a numpy array.
It can also be called using ``self @ other`` in Python >= 3.5.
Parameters
----------
other : Series, DataFrame or array-like
The other object to compute the matrix product with.
Returns
-------
Series or DataFrame
If other is a Series, return the matrix product between self and
other as a Series. If other is a DataFrame or a numpy.array, return
the matrix product of self and other in a DataFrame of a np.array.
See Also
--------
Series.dot: Similar method for Series.
Notes
-----
The dimensions of DataFrame and other must be compatible in order to
compute the matrix multiplication. In addition, the column names of
DataFrame and the index of other must contain the same values, as they
will be aligned prior to the multiplication.
The dot method for Series computes the inner product, instead of the
matrix product here.
Examples
--------
Here we multiply a DataFrame with a Series.
>>> df = pd.DataFrame([[0, 1, -2, -1], [1, 1, 1, 1]])
>>> s = pd.Series([1, 1, 2, 1])
>>> df.dot(s)
0 -4
1 5
dtype: int64
Here we multiply a DataFrame with another DataFrame.
>>> other = pd.DataFrame([[0, 1], [1, 2], [-1, -1], [2, 0]])
>>> df.dot(other)
0 1
0 1 4
1 2 2
Note that the dot method give the same result as @
>>> df @ other
0 1
0 1 4
1 2 2
The dot method works also if other is an np.array.
>>> arr = np.array([[0, 1], [1, 2], [-1, -1], [2, 0]])
>>> df.dot(arr)
0 1
0 1 4
1 2 2
Note how shuffling of the objects does not change the result.
>>> s2 = s.reindex([1, 0, 2, 3])
>>> df.dot(s2)
0 -4
1 5
dtype: int64
"""
if isinstance(other, (Series, DataFrame)):
common = self.columns.union(other.index)
if len(common) > len(self.columns) or len(common) > len(other.index):
raise ValueError("matrices are not aligned")
left = self.reindex(columns=common, copy=False)
right = other.reindex(index=common, copy=False)
lvals = left.values
rvals = right._values
else:
left = self
lvals = self.values
rvals = np.asarray(other)
if lvals.shape[1] != rvals.shape[0]:
raise ValueError(
f"Dot product shape mismatch, {lvals.shape} vs {rvals.shape}"
)
if isinstance(other, DataFrame):
return self._constructor(
np.dot(lvals, rvals), index=left.index, columns=other.columns
)
elif isinstance(other, Series):
return self._constructor_sliced(np.dot(lvals, rvals), index=left.index)
elif isinstance(rvals, (np.ndarray, Index)):
result = np.dot(lvals, rvals)
if result.ndim == 2:
return self._constructor(result, index=left.index)
else:
return self._constructor_sliced(result, index=left.index)
else: # pragma: no cover
raise TypeError(f"unsupported type: {type(other)}")
@overload
def __matmul__(self, other: Series) -> Series:
...
@overload
def __matmul__(
self, other: AnyArrayLike | FrameOrSeriesUnion
) -> FrameOrSeriesUnion:
...
def __matmul__(
self, other: AnyArrayLike | FrameOrSeriesUnion
) -> FrameOrSeriesUnion:
"""
Matrix multiplication using binary `@` operator in Python>=3.5.
"""
return self.dot(other)
def __rmatmul__(self, other):
"""
Matrix multiplication using binary `@` operator in Python>=3.5.
"""
try:
return self.T.dot(np.transpose(other)).T
except ValueError as err:
if "shape mismatch" not in str(err):
raise
# GH#21581 give exception message for original shapes
msg = f"shapes {np.shape(other)} and {self.shape} not aligned"
raise ValueError(msg) from err
# ----------------------------------------------------------------------
# IO methods (to / from other formats)
@classmethod
def from_dict(
cls,
data,
orient: str = "columns",
dtype: Dtype | None = None,
columns=None,
) -> DataFrame:
"""
Construct DataFrame from dict of array-like or dicts.
Creates DataFrame object from dictionary by columns or by index
allowing dtype specification.
Parameters
----------
data : dict
Of the form {field : array-like} or {field : dict}.
orient : {'columns', 'index'}, default 'columns'
The "orientation" of the data. If the keys of the passed dict
should be the columns of the resulting DataFrame, pass 'columns'
(default). Otherwise if the keys should be rows, pass 'index'.
dtype : dtype, default None
Data type to force, otherwise infer.
columns : list, default None
Column labels to use when ``orient='index'``. Raises a ValueError
if used with ``orient='columns'``.
Returns
-------
DataFrame
See Also
--------
DataFrame.from_records : DataFrame from structured ndarray, sequence
of tuples or dicts, or DataFrame.
DataFrame : DataFrame object creation using constructor.
Examples
--------
By default the keys of the dict become the DataFrame columns:
>>> data = {'col_1': [3, 2, 1, 0], 'col_2': ['a', 'b', 'c', 'd']}
>>> pd.DataFrame.from_dict(data)
col_1 col_2
0 3 a
1 2 b
2 1 c
3 0 d
Specify ``orient='index'`` to create the DataFrame using dictionary
keys as rows:
>>> data = {'row_1': [3, 2, 1, 0], 'row_2': ['a', 'b', 'c', 'd']}
>>> pd.DataFrame.from_dict(data, orient='index')
0 1 2 3
row_1 3 2 1 0
row_2 a b c d
When using the 'index' orientation, the column names can be
specified manually:
>>> pd.DataFrame.from_dict(data, orient='index',
... columns=['A', 'B', 'C', 'D'])
A B C D
row_1 3 2 1 0
row_2 a b c d
"""
index = None
orient = orient.lower()
if orient == "index":
if len(data) > 0:
# TODO speed up Series case
if isinstance(list(data.values())[0], (Series, dict)):
data = _from_nested_dict(data)
else:
data, index = list(data.values()), list(data.keys())
elif orient == "columns":
if columns is not None:
raise ValueError("cannot use columns parameter with orient='columns'")
else: # pragma: no cover
raise ValueError("only recognize index or columns for orient")
return cls(data, index=index, columns=columns, dtype=dtype)
def to_numpy(
self,
dtype: NpDtype | None = None,
copy: bool = False,
na_value=lib.no_default,
) -> np.ndarray:
"""
Convert the DataFrame to a NumPy array.
.. versionadded:: 0.24.0
By default, the dtype of the returned array will be the common NumPy
dtype of all types in the DataFrame. For example, if the dtypes are
``float16`` and ``float32``, the results dtype will be ``float32``.
This may require copying data and coercing values, which may be
expensive.
Parameters
----------
dtype : str or numpy.dtype, optional
The dtype to pass to :meth:`numpy.asarray`.
copy : bool, default False
Whether to ensure that the returned value is not a view on
another array. Note that ``copy=False`` does not *ensure* that
``to_numpy()`` is no-copy. Rather, ``copy=True`` ensure that
a copy is made, even if not strictly necessary.
na_value : Any, optional
The value to use for missing values. The default value depends
on `dtype` and the dtypes of the DataFrame columns.
.. versionadded:: 1.1.0
Returns
-------
numpy.ndarray
See Also
--------
Series.to_numpy : Similar method for Series.
Examples
--------
>>> pd.DataFrame({"A": [1, 2], "B": [3, 4]}).to_numpy()
array([[1, 3],
[2, 4]])
With heterogeneous data, the lowest common type will have to
be used.
>>> df = pd.DataFrame({"A": [1, 2], "B": [3.0, 4.5]})
>>> df.to_numpy()
array([[1. , 3. ],
[2. , 4.5]])
For a mix of numeric and non-numeric types, the output array will
have object dtype.
>>> df['C'] = pd.date_range('2000', periods=2)
>>> df.to_numpy()
array([[1, 3.0, Timestamp('2000-01-01 00:00:00')],
[2, 4.5, Timestamp('2000-01-02 00:00:00')]], dtype=object)
"""
self._consolidate_inplace()
result = self._mgr.as_array(
transpose=self._AXIS_REVERSED, dtype=dtype, copy=copy, na_value=na_value
)
if result.dtype is not dtype:
result = np.array(result, dtype=dtype, copy=False)
return result
def to_dict(self, orient: str = "dict", into=dict):
"""
Convert the DataFrame to a dictionary.
The type of the key-value pairs can be customized with the parameters
(see below).
Parameters
----------
orient : str {'dict', 'list', 'series', 'split', 'records', 'index'}
Determines the type of the values of the dictionary.
- 'dict' (default) : dict like {column -> {index -> value}}
- 'list' : dict like {column -> [values]}
- 'series' : dict like {column -> Series(values)}
- 'split' : dict like
{'index' -> [index], 'columns' -> [columns], 'data' -> [values]}
- 'records' : list like
[{column -> value}, ... , {column -> value}]
- 'index' : dict like {index -> {column -> value}}
Abbreviations are allowed. `s` indicates `series` and `sp`
indicates `split`.
into : class, default dict
The collections.abc.Mapping subclass used for all Mappings
in the return value. Can be the actual class or an empty
instance of the mapping type you want. If you want a
collections.defaultdict, you must pass it initialized.
Returns
-------
dict, list or collections.abc.Mapping
Return a collections.abc.Mapping object representing the DataFrame.
The resulting transformation depends on the `orient` parameter.
See Also
--------
DataFrame.from_dict: Create a DataFrame from a dictionary.
DataFrame.to_json: Convert a DataFrame to JSON format.
Examples
--------
>>> df = pd.DataFrame({'col1': [1, 2],
... 'col2': [0.5, 0.75]},
... index=['row1', 'row2'])
>>> df
col1 col2
row1 1 0.50
row2 2 0.75
>>> df.to_dict()
{'col1': {'row1': 1, 'row2': 2}, 'col2': {'row1': 0.5, 'row2': 0.75}}
You can specify the return orientation.
>>> df.to_dict('series')
{'col1': row1 1
row2 2
Name: col1, dtype: int64,
'col2': row1 0.50
row2 0.75
Name: col2, dtype: float64}
>>> df.to_dict('split')
{'index': ['row1', 'row2'], 'columns': ['col1', 'col2'],
'data': [[1, 0.5], [2, 0.75]]}
>>> df.to_dict('records')
[{'col1': 1, 'col2': 0.5}, {'col1': 2, 'col2': 0.75}]
>>> df.to_dict('index')
{'row1': {'col1': 1, 'col2': 0.5}, 'row2': {'col1': 2, 'col2': 0.75}}
You can also specify the mapping type.
>>> from collections import OrderedDict, defaultdict
>>> df.to_dict(into=OrderedDict)
OrderedDict([('col1', OrderedDict([('row1', 1), ('row2', 2)])),
('col2', OrderedDict([('row1', 0.5), ('row2', 0.75)]))])
If you want a `defaultdict`, you need to initialize it:
>>> dd = defaultdict(list)
>>> df.to_dict('records', into=dd)
[defaultdict(<class 'list'>, {'col1': 1, 'col2': 0.5}),
defaultdict(<class 'list'>, {'col1': 2, 'col2': 0.75})]
"""
if not self.columns.is_unique:
warnings.warn(
"DataFrame columns are not unique, some columns will be omitted.",
UserWarning,
stacklevel=2,
)
# GH16122
into_c = com.standardize_mapping(into)
orient = orient.lower()
# GH32515
if orient.startswith(("d", "l", "s", "r", "i")) and orient not in {
"dict",
"list",
"series",
"split",
"records",
"index",
}:
warnings.warn(
"Using short name for 'orient' is deprecated. Only the "
"options: ('dict', list, 'series', 'split', 'records', 'index') "
"will be used in a future version. Use one of the above "
"to silence this warning.",
FutureWarning,
)
if orient.startswith("d"):
orient = "dict"
elif orient.startswith("l"):
orient = "list"
elif orient.startswith("sp"):
orient = "split"
elif orient.startswith("s"):
orient = "series"
elif orient.startswith("r"):
orient = "records"
elif orient.startswith("i"):
orient = "index"
if orient == "dict":
return into_c((k, v.to_dict(into)) for k, v in self.items())
elif orient == "list":
return into_c((k, v.tolist()) for k, v in self.items())
elif orient == "split":
return into_c(
(
("index", self.index.tolist()),
("columns", self.columns.tolist()),
(
"data",
[
list(map(maybe_box_native, t))
for t in self.itertuples(index=False, name=None)
],
),
)
)
elif orient == "series":
return into_c((k, v) for k, v in self.items())
elif orient == "records":
columns = self.columns.tolist()
rows = (
dict(zip(columns, row))
for row in self.itertuples(index=False, name=None)
)
return [
into_c((k, maybe_box_native(v)) for k, v in row.items()) for row in rows
]
elif orient == "index":
if not self.index.is_unique:
raise ValueError("DataFrame index must be unique for orient='index'.")
return into_c(
(t[0], dict(zip(self.columns, t[1:])))
for t in self.itertuples(name=None)
)
else:
raise ValueError(f"orient '{orient}' not understood")
def to_gbq(
self,
destination_table: str,
project_id: str | None = None,
chunksize: int | None = None,
reauth: bool = False,
if_exists: str = "fail",
auth_local_webserver: bool = False,
table_schema: list[dict[str, str]] | None = None,
location: str | None = None,
progress_bar: bool = True,
credentials=None,
) -> None:
"""
Write a DataFrame to a Google BigQuery table.
This function requires the `pandas-gbq package
<https://pandas-gbq.readthedocs.io>`__.
See the `How to authenticate with Google BigQuery
<https://pandas-gbq.readthedocs.io/en/latest/howto/authentication.html>`__
guide for authentication instructions.
Parameters
----------
destination_table : str
Name of table to be written, in the form ``dataset.tablename``.
project_id : str, optional
Google BigQuery Account project ID. Optional when available from
the environment.
chunksize : int, optional
Number of rows to be inserted in each chunk from the dataframe.
Set to ``None`` to load the whole dataframe at once.
reauth : bool, default False
Force Google BigQuery to re-authenticate the user. This is useful
if multiple accounts are used.
if_exists : str, default 'fail'
Behavior when the destination table exists. Value can be one of:
``'fail'``
If table exists raise pandas_gbq.gbq.TableCreationError.
``'replace'``
If table exists, drop it, recreate it, and insert data.
``'append'``
If table exists, insert data. Create if does not exist.
auth_local_webserver : bool, default False
Use the `local webserver flow`_ instead of the `console flow`_
when getting user credentials.
.. _local webserver flow:
https://google-auth-oauthlib.readthedocs.io/en/latest/reference/google_auth_oauthlib.flow.html#google_auth_oauthlib.flow.InstalledAppFlow.run_local_server
.. _console flow:
https://google-auth-oauthlib.readthedocs.io/en/latest/reference/google_auth_oauthlib.flow.html#google_auth_oauthlib.flow.InstalledAppFlow.run_console
*New in version 0.2.0 of pandas-gbq*.
table_schema : list of dicts, optional
List of BigQuery table fields to which according DataFrame
columns conform to, e.g. ``[{'name': 'col1', 'type':
'STRING'},...]``. If schema is not provided, it will be
generated according to dtypes of DataFrame columns. See
BigQuery API documentation on available names of a field.
*New in version 0.3.1 of pandas-gbq*.
location : str, optional
Location where the load job should run. See the `BigQuery locations
documentation
<https://cloud.google.com/bigquery/docs/dataset-locations>`__ for a
list of available locations. The location must match that of the
target dataset.
*New in version 0.5.0 of pandas-gbq*.
progress_bar : bool, default True
Use the library `tqdm` to show the progress bar for the upload,
chunk by chunk.
*New in version 0.5.0 of pandas-gbq*.
credentials : google.auth.credentials.Credentials, optional
Credentials for accessing Google APIs. Use this parameter to
override default credentials, such as to use Compute Engine
:class:`google.auth.compute_engine.Credentials` or Service
Account :class:`google.oauth2.service_account.Credentials`
directly.
*New in version 0.8.0 of pandas-gbq*.
.. versionadded:: 0.24.0
See Also
--------
pandas_gbq.to_gbq : This function in the pandas-gbq library.
read_gbq : Read a DataFrame from Google BigQuery.
"""
from pandas.io import gbq
gbq.to_gbq(
self,
destination_table,
project_id=project_id,
chunksize=chunksize,
reauth=reauth,
if_exists=if_exists,
auth_local_webserver=auth_local_webserver,
table_schema=table_schema,
location=location,
progress_bar=progress_bar,
credentials=credentials,
)
@classmethod
def from_records(
cls,
data,
index=None,
exclude=None,
columns=None,
coerce_float: bool = False,
nrows: int | None = None,
) -> DataFrame:
"""
Convert structured or record ndarray to DataFrame.
Creates a DataFrame object from a structured ndarray, sequence of
tuples or dicts, or DataFrame.
Parameters
----------
data : structured ndarray, sequence of tuples or dicts, or DataFrame
Structured input data.
index : str, list of fields, array-like
Field of array to use as the index, alternately a specific set of
input labels to use.
exclude : sequence, default None
Columns or fields to exclude.
columns : sequence, default None
Column names to use. If the passed data do not have names
associated with them, this argument provides names for the
columns. Otherwise this argument indicates the order of the columns
in the result (any names not found in the data will become all-NA
columns).
coerce_float : bool, default False
Attempt to convert values of non-string, non-numeric objects (like
decimal.Decimal) to floating point, useful for SQL result sets.
nrows : int, default None
Number of rows to read if data is an iterator.
Returns
-------
DataFrame
See Also
--------
DataFrame.from_dict : DataFrame from dict of array-like or dicts.
DataFrame : DataFrame object creation using constructor.
Examples
--------
Data can be provided as a structured ndarray:
>>> data = np.array([(3, 'a'), (2, 'b'), (1, 'c'), (0, 'd')],
... dtype=[('col_1', 'i4'), ('col_2', 'U1')])
>>> pd.DataFrame.from_records(data)
col_1 col_2
0 3 a
1 2 b
2 1 c
3 0 d
Data can be provided as a list of dicts:
>>> data = [{'col_1': 3, 'col_2': 'a'},
... {'col_1': 2, 'col_2': 'b'},
... {'col_1': 1, 'col_2': 'c'},
... {'col_1': 0, 'col_2': 'd'}]
>>> pd.DataFrame.from_records(data)
col_1 col_2
0 3 a
1 2 b
2 1 c
3 0 d
Data can be provided as a list of tuples with corresponding columns:
>>> data = [(3, 'a'), (2, 'b'), (1, 'c'), (0, 'd')]
>>> pd.DataFrame.from_records(data, columns=['col_1', 'col_2'])
col_1 col_2
0 3 a
1 2 b
2 1 c
3 0 d
"""
# Make a copy of the input columns so we can modify it
if columns is not None:
columns = ensure_index(columns)
if is_iterator(data):
if nrows == 0:
return cls()
try:
first_row = next(data)
except StopIteration:
return cls(index=index, columns=columns)
dtype = None
if hasattr(first_row, "dtype") and first_row.dtype.names:
dtype = first_row.dtype
values = [first_row]
if nrows is None:
values += data
else:
values.extend(itertools.islice(data, nrows - 1))
if dtype is not None:
data = np.array(values, dtype=dtype)
else:
data = values
if isinstance(data, dict):
if columns is None:
columns = arr_columns = ensure_index(sorted(data))
arrays = [data[k] for k in columns]
else:
arrays = []
arr_columns_list = []
for k, v in data.items():
if k in columns:
arr_columns_list.append(k)
arrays.append(v)
arr_columns = Index(arr_columns_list)
arrays, arr_columns = reorder_arrays(arrays, arr_columns, columns)
elif isinstance(data, (np.ndarray, DataFrame)):
arrays, columns = to_arrays(data, columns)
arr_columns = columns
else:
arrays, arr_columns = to_arrays(data, columns)
if coerce_float:
for i, arr in enumerate(arrays):
if arr.dtype == object:
# error: Argument 1 to "maybe_convert_objects" has
# incompatible type "Union[ExtensionArray, ndarray]";
# expected "ndarray"
arrays[i] = lib.maybe_convert_objects(
arr, # type: ignore[arg-type]
try_float=True,
)
arr_columns = ensure_index(arr_columns)
if columns is None:
columns = arr_columns
if exclude is None:
exclude = set()
else:
exclude = set(exclude)
result_index = None
if index is not None:
if isinstance(index, str) or not hasattr(index, "__iter__"):
i = columns.get_loc(index)
exclude.add(index)
if len(arrays) > 0:
result_index = Index(arrays[i], name=index)
else:
result_index = Index([], name=index)
else:
try:
index_data = [arrays[arr_columns.get_loc(field)] for field in index]
except (KeyError, TypeError):
# raised by get_loc, see GH#29258
result_index = index
else:
result_index = ensure_index_from_sequences(index_data, names=index)
exclude.update(index)
if any(exclude):
arr_exclude = [x for x in exclude if x in arr_columns]
to_remove = [arr_columns.get_loc(col) for col in arr_exclude]
arrays = [v for i, v in enumerate(arrays) if i not in to_remove]
arr_columns = arr_columns.drop(arr_exclude)
columns = columns.drop(exclude)
manager = get_option("mode.data_manager")
mgr = arrays_to_mgr(arrays, arr_columns, result_index, columns, typ=manager)
return cls(mgr)
def to_records(
self, index=True, column_dtypes=None, index_dtypes=None
) -> np.recarray:
"""
Convert DataFrame to a NumPy record array.
Index will be included as the first field of the record array if
requested.
Parameters
----------
index : bool, default True
Include index in resulting record array, stored in 'index'
field or using the index label, if set.
column_dtypes : str, type, dict, default None
.. versionadded:: 0.24.0
If a string or type, the data type to store all columns. If
a dictionary, a mapping of column names and indices (zero-indexed)
to specific data types.
index_dtypes : str, type, dict, default None
.. versionadded:: 0.24.0
If a string or type, the data type to store all index levels. If
a dictionary, a mapping of index level names and indices
(zero-indexed) to specific data types.
This mapping is applied only if `index=True`.
Returns
-------
numpy.recarray
NumPy ndarray with the DataFrame labels as fields and each row
of the DataFrame as entries.
See Also
--------
DataFrame.from_records: Convert structured or record ndarray
to DataFrame.
numpy.recarray: An ndarray that allows field access using
attributes, analogous to typed columns in a
spreadsheet.
Examples
--------
>>> df = pd.DataFrame({'A': [1, 2], 'B': [0.5, 0.75]},
... index=['a', 'b'])
>>> df
A B
a 1 0.50
b 2 0.75
>>> df.to_records()
rec.array([('a', 1, 0.5 ), ('b', 2, 0.75)],
dtype=[('index', 'O'), ('A', '<i8'), ('B', '<f8')])
If the DataFrame index has no label then the recarray field name
is set to 'index'. If the index has a label then this is used as the
field name:
>>> df.index = df.index.rename("I")
>>> df.to_records()
rec.array([('a', 1, 0.5 ), ('b', 2, 0.75)],
dtype=[('I', 'O'), ('A', '<i8'), ('B', '<f8')])
The index can be excluded from the record array:
>>> df.to_records(index=False)
rec.array([(1, 0.5 ), (2, 0.75)],
dtype=[('A', '<i8'), ('B', '<f8')])
Data types can be specified for the columns:
>>> df.to_records(column_dtypes={"A": "int32"})
rec.array([('a', 1, 0.5 ), ('b', 2, 0.75)],
dtype=[('I', 'O'), ('A', '<i4'), ('B', '<f8')])
As well as for the index:
>>> df.to_records(index_dtypes="<S2")
rec.array([(b'a', 1, 0.5 ), (b'b', 2, 0.75)],
dtype=[('I', 'S2'), ('A', '<i8'), ('B', '<f8')])
>>> index_dtypes = f"<S{df.index.str.len().max()}"
>>> df.to_records(index_dtypes=index_dtypes)
rec.array([(b'a', 1, 0.5 ), (b'b', 2, 0.75)],
dtype=[('I', 'S1'), ('A', '<i8'), ('B', '<f8')])
"""
if index:
if isinstance(self.index, MultiIndex):
# array of tuples to numpy cols. copy copy copy
ix_vals = list(map(np.array, zip(*self.index._values)))
else:
# error: List item 0 has incompatible type "ArrayLike"; expected
# "ndarray"
ix_vals = [self.index.values] # type: ignore[list-item]
arrays = ix_vals + [
np.asarray(self.iloc[:, i]) for i in range(len(self.columns))
]
index_names = list(self.index.names)
if isinstance(self.index, MultiIndex):
count = 0
for i, n in enumerate(index_names):
if n is None:
index_names[i] = f"level_{count}"
count += 1
elif index_names[0] is None:
index_names = ["index"]
names = [str(name) for name in itertools.chain(index_names, self.columns)]
else:
arrays = [np.asarray(self.iloc[:, i]) for i in range(len(self.columns))]
names = [str(c) for c in self.columns]
index_names = []
index_len = len(index_names)
formats = []
for i, v in enumerate(arrays):
index = i
# When the names and arrays are collected, we
# first collect those in the DataFrame's index,
# followed by those in its columns.
#
# Thus, the total length of the array is:
# len(index_names) + len(DataFrame.columns).
#
# This check allows us to see whether we are
# handling a name / array in the index or column.
if index < index_len:
dtype_mapping = index_dtypes
name = index_names[index]
else:
index -= index_len
dtype_mapping = column_dtypes
name = self.columns[index]
# We have a dictionary, so we get the data type
# associated with the index or column (which can
# be denoted by its name in the DataFrame or its
# position in DataFrame's array of indices or
# columns, whichever is applicable.
if is_dict_like(dtype_mapping):
if name in dtype_mapping:
dtype_mapping = dtype_mapping[name]
elif index in dtype_mapping:
dtype_mapping = dtype_mapping[index]
else:
dtype_mapping = None
# If no mapping can be found, use the array's
# dtype attribute for formatting.
#
# A valid dtype must either be a type or
# string naming a type.
if dtype_mapping is None:
formats.append(v.dtype)
elif isinstance(dtype_mapping, (type, np.dtype, str)):
# error: Argument 1 to "append" of "list" has incompatible type
# "Union[type, dtype, str]"; expected "dtype"
formats.append(dtype_mapping) # type: ignore[arg-type]
else:
element = "row" if i < index_len else "column"
msg = f"Invalid dtype {dtype_mapping} specified for {element} {name}"
raise ValueError(msg)
return np.rec.fromarrays(arrays, dtype={"names": names, "formats": formats})
@classmethod
def _from_arrays(
cls,
arrays,
columns,
index,
dtype: Dtype | None = None,
verify_integrity: bool = True,
) -> DataFrame:
"""
Create DataFrame from a list of arrays corresponding to the columns.
Parameters
----------
arrays : list-like of arrays
Each array in the list corresponds to one column, in order.
columns : list-like, Index
The column names for the resulting DataFrame.
index : list-like, Index
The rows labels for the resulting DataFrame.
dtype : dtype, optional
Optional dtype to enforce for all arrays.
verify_integrity : bool, default True
Validate and homogenize all input. If set to False, it is assumed
that all elements of `arrays` are actual arrays how they will be
stored in a block (numpy ndarray or ExtensionArray), have the same
length as and are aligned with the index, and that `columns` and
`index` are ensured to be an Index object.
Returns
-------
DataFrame
"""
if dtype is not None:
dtype = pandas_dtype(dtype)
manager = get_option("mode.data_manager")
mgr = arrays_to_mgr(
arrays,
columns,
index,
columns,
dtype=dtype,
verify_integrity=verify_integrity,
typ=manager,
)
return cls(mgr)
@doc(storage_options=generic._shared_docs["storage_options"])
@deprecate_kwarg(old_arg_name="fname", new_arg_name="path")
def to_stata(
self,
path: FilePathOrBuffer,
convert_dates: dict[Hashable, str] | None = None,
write_index: bool = True,
byteorder: str | None = None,
time_stamp: datetime.datetime | None = None,
data_label: str | None = None,
variable_labels: dict[Hashable, str] | None = None,
version: int | None = 114,
convert_strl: Sequence[Hashable] | None = None,
compression: CompressionOptions = "infer",
storage_options: StorageOptions = None,
) -> None:
"""
Export DataFrame object to Stata dta format.
Writes the DataFrame to a Stata dataset file.
"dta" files contain a Stata dataset.
Parameters
----------
path : str, buffer or path object
String, path object (pathlib.Path or py._path.local.LocalPath) or
object implementing a binary write() function. If using a buffer
then the buffer will not be automatically closed after the file
data has been written.
.. versionchanged:: 1.0.0
Previously this was "fname"
convert_dates : dict
Dictionary mapping columns containing datetime types to stata
internal format to use when writing the dates. Options are 'tc',
'td', 'tm', 'tw', 'th', 'tq', 'ty'. Column can be either an integer
or a name. Datetime columns that do not have a conversion type
specified will be converted to 'tc'. Raises NotImplementedError if
a datetime column has timezone information.
write_index : bool
Write the index to Stata dataset.
byteorder : str
Can be ">", "<", "little", or "big". default is `sys.byteorder`.
time_stamp : datetime
A datetime to use as file creation date. Default is the current
time.
data_label : str, optional
A label for the data set. Must be 80 characters or smaller.
variable_labels : dict
Dictionary containing columns as keys and variable labels as
values. Each label must be 80 characters or smaller.
version : {{114, 117, 118, 119, None}}, default 114
Version to use in the output dta file. Set to None to let pandas
decide between 118 or 119 formats depending on the number of
columns in the frame. Version 114 can be read by Stata 10 and
later. Version 117 can be read by Stata 13 or later. Version 118
is supported in Stata 14 and later. Version 119 is supported in
Stata 15 and later. Version 114 limits string variables to 244
characters or fewer while versions 117 and later allow strings
with lengths up to 2,000,000 characters. Versions 118 and 119
support Unicode characters, and version 119 supports more than
32,767 variables.
Version 119 should usually only be used when the number of
variables exceeds the capacity of dta format 118. Exporting
smaller datasets in format 119 may have unintended consequences,
and, as of November 2020, Stata SE cannot read version 119 files.
.. versionchanged:: 1.0.0
Added support for formats 118 and 119.
convert_strl : list, optional
List of column names to convert to string columns to Stata StrL
format. Only available if version is 117. Storing strings in the
StrL format can produce smaller dta files if strings have more than
8 characters and values are repeated.
compression : str or dict, default 'infer'
For on-the-fly compression of the output dta. If string, specifies
compression mode. If dict, value at key 'method' specifies
compression mode. Compression mode must be one of {{'infer', 'gzip',
'bz2', 'zip', 'xz', None}}. If compression mode is 'infer' and
`fname` is path-like, then detect compression from the following
extensions: '.gz', '.bz2', '.zip', or '.xz' (otherwise no
compression). If dict and compression mode is one of {{'zip',
'gzip', 'bz2'}}, or inferred as one of the above, other entries
passed as additional compression options.
.. versionadded:: 1.1.0
{storage_options}
.. versionadded:: 1.2.0
Raises
------
NotImplementedError
* If datetimes contain timezone information
* Column dtype is not representable in Stata
ValueError
* Columns listed in convert_dates are neither datetime64[ns]
or datetime.datetime
* Column listed in convert_dates is not in DataFrame
* Categorical label contains more than 32,000 characters
See Also
--------
read_stata : Import Stata data files.
io.stata.StataWriter : Low-level writer for Stata data files.
io.stata.StataWriter117 : Low-level writer for version 117 files.
Examples
--------
>>> df = pd.DataFrame({{'animal': ['falcon', 'parrot', 'falcon',
... 'parrot'],
... 'speed': [350, 18, 361, 15]}})
>>> df.to_stata('animals.dta') # doctest: +SKIP
"""
if version not in (114, 117, 118, 119, None):
raise ValueError("Only formats 114, 117, 118 and 119 are supported.")
if version == 114:
if convert_strl is not None:
raise ValueError("strl is not supported in format 114")
from pandas.io.stata import StataWriter as statawriter
elif version == 117:
# mypy: Name 'statawriter' already defined (possibly by an import)
from pandas.io.stata import ( # type: ignore[no-redef]
StataWriter117 as statawriter,
)
else: # versions 118 and 119
# mypy: Name 'statawriter' already defined (possibly by an import)
from pandas.io.stata import ( # type: ignore[no-redef]
StataWriterUTF8 as statawriter,
)
kwargs: dict[str, Any] = {}
if version is None or version >= 117:
# strl conversion is only supported >= 117
kwargs["convert_strl"] = convert_strl
if version is None or version >= 118:
# Specifying the version is only supported for UTF8 (118 or 119)
kwargs["version"] = version
# mypy: Too many arguments for "StataWriter"
writer = statawriter( # type: ignore[call-arg]
path,
self,
convert_dates=convert_dates,
byteorder=byteorder,
time_stamp=time_stamp,
data_label=data_label,
write_index=write_index,
variable_labels=variable_labels,
compression=compression,
storage_options=storage_options,
**kwargs,
)
writer.write_file()
@deprecate_kwarg(old_arg_name="fname", new_arg_name="path")
def to_feather(self, path: FilePathOrBuffer[AnyStr], **kwargs) -> None:
"""
Write a DataFrame to the binary Feather format.
Parameters
----------
path : str or file-like object
If a string, it will be used as Root Directory path.
**kwargs :
Additional keywords passed to :func:`pyarrow.feather.write_feather`.
Starting with pyarrow 0.17, this includes the `compression`,
`compression_level`, `chunksize` and `version` keywords.
.. versionadded:: 1.1.0
"""
from pandas.io.feather_format import to_feather
to_feather(self, path, **kwargs)
@doc(
Series.to_markdown,
klass=_shared_doc_kwargs["klass"],
storage_options=_shared_docs["storage_options"],
examples="""Examples
--------
>>> df = pd.DataFrame(
... data={"animal_1": ["elk", "pig"], "animal_2": ["dog", "quetzal"]}
... )
>>> print(df.to_markdown())
| | animal_1 | animal_2 |
|---:|:-----------|:-----------|
| 0 | elk | dog |
| 1 | pig | quetzal |
Output markdown with a tabulate option.
>>> print(df.to_markdown(tablefmt="grid"))
+----+------------+------------+
| | animal_1 | animal_2 |
+====+============+============+
| 0 | elk | dog |
+----+------------+------------+
| 1 | pig | quetzal |
+----+------------+------------+
""",
)
def to_markdown(
self,
buf: IO[str] | str | None = None,
mode: str = "wt",
index: bool = True,
storage_options: StorageOptions = None,
**kwargs,
) -> str | None:
if "showindex" in kwargs:
warnings.warn(
"'showindex' is deprecated. Only 'index' will be used "
"in a future version. Use 'index' to silence this warning.",
FutureWarning,
stacklevel=2,
)
kwargs.setdefault("headers", "keys")
kwargs.setdefault("tablefmt", "pipe")
kwargs.setdefault("showindex", index)
tabulate = import_optional_dependency("tabulate")
result = tabulate.tabulate(self, **kwargs)
if buf is None:
return result
with get_handle(buf, mode, storage_options=storage_options) as handles:
assert not isinstance(handles.handle, (str, mmap.mmap))
handles.handle.writelines(result)
return None
@doc(storage_options=generic._shared_docs["storage_options"])
@deprecate_kwarg(old_arg_name="fname", new_arg_name="path")
def to_parquet(
self,
path: FilePathOrBuffer | None = None,
engine: str = "auto",
compression: str | None = "snappy",
index: bool | None = None,
partition_cols: list[str] | None = None,
storage_options: StorageOptions = None,
**kwargs,
) -> bytes | None:
"""
Write a DataFrame to the binary parquet format.
This function writes the dataframe as a `parquet file
<https://parquet.apache.org/>`_. You can choose different parquet
backends, and have the option of compression. See
:ref:`the user guide <io.parquet>` for more details.
Parameters
----------
path : str or file-like object, default None
If a string, it will be used as Root Directory path
when writing a partitioned dataset. By file-like object,
we refer to objects with a write() method, such as a file handle
(e.g. via builtin open function) or io.BytesIO. The engine
fastparquet does not accept file-like objects. If path is None,
a bytes object is returned.
.. versionchanged:: 1.2.0
Previously this was "fname"
engine : {{'auto', 'pyarrow', 'fastparquet'}}, default 'auto'
Parquet library to use. If 'auto', then the option
``io.parquet.engine`` is used. The default ``io.parquet.engine``
behavior is to try 'pyarrow', falling back to 'fastparquet' if
'pyarrow' is unavailable.
compression : {{'snappy', 'gzip', 'brotli', None}}, default 'snappy'
Name of the compression to use. Use ``None`` for no compression.
index : bool, default None
If ``True``, include the dataframe's index(es) in the file output.
If ``False``, they will not be written to the file.
If ``None``, similar to ``True`` the dataframe's index(es)
will be saved. However, instead of being saved as values,
the RangeIndex will be stored as a range in the metadata so it
doesn't require much space and is faster. Other indexes will
be included as columns in the file output.
.. versionadded:: 0.24.0
partition_cols : list, optional, default None
Column names by which to partition the dataset.
Columns are partitioned in the order they are given.
Must be None if path is not a string.
.. versionadded:: 0.24.0
{storage_options}
.. versionadded:: 1.2.0
**kwargs
Additional arguments passed to the parquet library. See
:ref:`pandas io <io.parquet>` for more details.
Returns
-------
bytes if no path argument is provided else None
See Also
--------
read_parquet : Read a parquet file.
DataFrame.to_csv : Write a csv file.
DataFrame.to_sql : Write to a sql table.
DataFrame.to_hdf : Write to hdf.
Notes
-----
This function requires either the `fastparquet
<https://pypi.org/project/fastparquet>`_ or `pyarrow
<https://arrow.apache.org/docs/python/>`_ library.
Examples
--------
>>> df = pd.DataFrame(data={{'col1': [1, 2], 'col2': [3, 4]}})
>>> df.to_parquet('df.parquet.gzip',
... compression='gzip') # doctest: +SKIP
>>> pd.read_parquet('df.parquet.gzip') # doctest: +SKIP
col1 col2
0 1 3
1 2 4
If you want to get a buffer to the parquet content you can use a io.BytesIO
object, as long as you don't use partition_cols, which creates multiple files.
>>> import io
>>> f = io.BytesIO()
>>> df.to_parquet(f)
>>> f.seek(0)
0
>>> content = f.read()
"""
from pandas.io.parquet import to_parquet
return to_parquet(
self,
path,
engine,
compression=compression,
index=index,
partition_cols=partition_cols,
storage_options=storage_options,
**kwargs,
)
@Substitution(
header_type="bool",
header="Whether to print column labels, default True",
col_space_type="str or int, list or dict of int or str",
col_space="The minimum width of each column in CSS length "
"units. An int is assumed to be px units.\n\n"
" .. versionadded:: 0.25.0\n"
" Ability to use str",
)
@Substitution(shared_params=fmt.common_docstring, returns=fmt.return_docstring)
def to_html(
self,
buf: FilePathOrBuffer[str] | None = None,
columns: Sequence[str] | None = None,
col_space: ColspaceArgType | None = None,
header: bool | Sequence[str] = True,
index: bool = True,
na_rep: str = "NaN",
formatters: FormattersType | None = None,
float_format: FloatFormatType | None = None,
sparsify: bool | None = None,
index_names: bool = True,
justify: str | None = None,
max_rows: int | None = None,
max_cols: int | None = None,
show_dimensions: bool | str = False,
decimal: str = ".",
bold_rows: bool = True,
classes: str | list | tuple | None = None,
escape: bool = True,
notebook: bool = False,
border: int | None = None,
table_id: str | None = None,
render_links: bool = False,
encoding: str | None = None,
):
"""
Render a DataFrame as an HTML table.
%(shared_params)s
bold_rows : bool, default True
Make the row labels bold in the output.
classes : str or list or tuple, default None
CSS class(es) to apply to the resulting html table.
escape : bool, default True
Convert the characters <, >, and & to HTML-safe sequences.
notebook : {True, False}, default False
Whether the generated HTML is for IPython Notebook.
border : int
A ``border=border`` attribute is included in the opening
`<table>` tag. Default ``pd.options.display.html.border``.
encoding : str, default "utf-8"
Set character encoding.
.. versionadded:: 1.0
table_id : str, optional
A css id is included in the opening `<table>` tag if specified.
render_links : bool, default False
Convert URLs to HTML links.
.. versionadded:: 0.24.0
%(returns)s
See Also
--------
to_string : Convert DataFrame to a string.
"""
if justify is not None and justify not in fmt._VALID_JUSTIFY_PARAMETERS:
raise ValueError("Invalid value for justify parameter")
formatter = fmt.DataFrameFormatter(
self,
columns=columns,
col_space=col_space,
na_rep=na_rep,
header=header,
index=index,
formatters=formatters,
float_format=float_format,
bold_rows=bold_rows,
sparsify=sparsify,
justify=justify,
index_names=index_names,
escape=escape,
decimal=decimal,
max_rows=max_rows,
max_cols=max_cols,
show_dimensions=show_dimensions,
)
# TODO: a generic formatter wld b in DataFrameFormatter
return fmt.DataFrameRenderer(formatter).to_html(
buf=buf,
classes=classes,
notebook=notebook,
border=border,
encoding=encoding,
table_id=table_id,
render_links=render_links,
)
@doc(storage_options=generic._shared_docs["storage_options"])
def to_xml(
self,
path_or_buffer: FilePathOrBuffer | None = None,
index: bool = True,
root_name: str | None = "data",
row_name: str | None = "row",
na_rep: str | None = None,
attr_cols: str | list[str] | None = None,
elem_cols: str | list[str] | None = None,
namespaces: dict[str | None, str] | None = None,
prefix: str | None = None,
encoding: str = "utf-8",
xml_declaration: bool | None = True,
pretty_print: bool | None = True,
parser: str | None = "lxml",
stylesheet: FilePathOrBuffer | None = None,
compression: CompressionOptions = "infer",
storage_options: StorageOptions = None,
) -> str | None:
"""
Render a DataFrame to an XML document.
.. versionadded:: 1.3.0
Parameters
----------
path_or_buffer : str, path object or file-like object, optional
File to write output to. If None, the output is returned as a
string.
index : bool, default True
Whether to include index in XML document.
root_name : str, default 'data'
The name of root element in XML document.
row_name : str, default 'row'
The name of row element in XML document.
na_rep : str, optional
Missing data representation.
attr_cols : list-like, optional
List of columns to write as attributes in row element.
Hierarchical columns will be flattened with underscore
delimiting the different levels.
elem_cols : list-like, optional
List of columns to write as children in row element. By default,
all columns output as children of row element. Hierarchical
columns will be flattened with underscore delimiting the
different levels.
namespaces : dict, optional
All namespaces to be defined in root element. Keys of dict
should be prefix names and values of dict corresponding URIs.
Default namespaces should be given empty string key. For
example, ::
namespaces = {{"": "https://example.com"}}
prefix : str, optional
Namespace prefix to be used for every element and/or attribute
in document. This should be one of the keys in ``namespaces``
dict.
encoding : str, default 'utf-8'
Encoding of the resulting document.
xml_declaration : bool, default True
Whether to include the XML declaration at start of document.
pretty_print : bool, default True
Whether output should be pretty printed with indentation and
line breaks.
parser : {{'lxml','etree'}}, default 'lxml'
Parser module to use for building of tree. Only 'lxml' and
'etree' are supported. With 'lxml', the ability to use XSLT
stylesheet is supported.
stylesheet : str, path object or file-like object, optional
A URL, file-like object, or a raw string containing an XSLT
script used to transform the raw XML output. Script should use
layout of elements and attributes from original output. This
argument requires ``lxml`` to be installed. Only XSLT 1.0
scripts and not later versions is currently supported.
compression : {{'infer', 'gzip', 'bz2', 'zip', 'xz', None}}, default 'infer'
For on-the-fly decompression of on-disk data. If 'infer', then use
gzip, bz2, zip or xz if path_or_buffer is a string ending in
'.gz', '.bz2', '.zip', or 'xz', respectively, and no decompression
otherwise. If using 'zip', the ZIP file must contain only one data
file to be read in. Set to None for no decompression.
{storage_options}
Returns
-------
None or str
If ``io`` is None, returns the resulting XML format as a
string. Otherwise returns None.
See Also
--------
to_json : Convert the pandas object to a JSON string.
to_html : Convert DataFrame to a html.
Examples
--------
>>> df = pd.DataFrame({{'shape': ['square', 'circle', 'triangle'],
... 'degrees': [360, 360, 180],
... 'sides': [4, np.nan, 3]}})
>>> df.to_xml() # doctest: +SKIP
<?xml version='1.0' encoding='utf-8'?>
<data>
<row>
<index>0</index>
<shape>square</shape>
<degrees>360</degrees>
<sides>4.0</sides>
</row>
<row>
<index>1</index>
<shape>circle</shape>
<degrees>360</degrees>
<sides/>
</row>
<row>
<index>2</index>
<shape>triangle</shape>
<degrees>180</degrees>
<sides>3.0</sides>
</row>
</data>
>>> df.to_xml(attr_cols=[
... 'index', 'shape', 'degrees', 'sides'
... ]) # doctest: +SKIP
<?xml version='1.0' encoding='utf-8'?>
<data>
<row index="0" shape="square" degrees="360" sides="4.0"/>
<row index="1" shape="circle" degrees="360"/>
<row index="2" shape="triangle" degrees="180" sides="3.0"/>
</data>
>>> df.to_xml(namespaces={{"doc": "https://example.com"}},
... prefix="doc") # doctest: +SKIP
<?xml version='1.0' encoding='utf-8'?>
<doc:data xmlns:doc="https://example.com">
<doc:row>
<doc:index>0</doc:index>
<doc:shape>square</doc:shape>
<doc:degrees>360</doc:degrees>
<doc:sides>4.0</doc:sides>
</doc:row>
<doc:row>
<doc:index>1</doc:index>
<doc:shape>circle</doc:shape>
<doc:degrees>360</doc:degrees>
<doc:sides/>
</doc:row>
<doc:row>
<doc:index>2</doc:index>
<doc:shape>triangle</doc:shape>
<doc:degrees>180</doc:degrees>
<doc:sides>3.0</doc:sides>
</doc:row>
</doc:data>
"""
from pandas.io.formats.xml import (
EtreeXMLFormatter,
LxmlXMLFormatter,
)
lxml = import_optional_dependency("lxml.etree", errors="ignore")
TreeBuilder: type[EtreeXMLFormatter] | type[LxmlXMLFormatter]
if parser == "lxml":
if lxml is not None:
TreeBuilder = LxmlXMLFormatter
else:
raise ImportError(
"lxml not found, please install or use the etree parser."
)
elif parser == "etree":
TreeBuilder = EtreeXMLFormatter
else:
raise ValueError("Values for parser can only be lxml or etree.")
xml_formatter = TreeBuilder(
self,
path_or_buffer=path_or_buffer,
index=index,
root_name=root_name,
row_name=row_name,
na_rep=na_rep,
attr_cols=attr_cols,
elem_cols=elem_cols,
namespaces=namespaces,
prefix=prefix,
encoding=encoding,
xml_declaration=xml_declaration,
pretty_print=pretty_print,
stylesheet=stylesheet,
compression=compression,
storage_options=storage_options,
)
return xml_formatter.write_output()
# ----------------------------------------------------------------------
@Substitution(
klass="DataFrame",
type_sub=" and columns",
max_cols_sub=dedent(
"""\
max_cols : int, optional
When to switch from the verbose to the truncated output. If the
DataFrame has more than `max_cols` columns, the truncated output
is used. By default, the setting in
``pandas.options.display.max_info_columns`` is used."""
),
show_counts_sub=dedent(
"""\
show_counts : bool, optional
Whether to show the non-null counts. By default, this is shown
only if the DataFrame is smaller than
``pandas.options.display.max_info_rows`` and
``pandas.options.display.max_info_columns``. A value of True always
shows the counts, and False never shows the counts.
null_counts : bool, optional
.. deprecated:: 1.2.0
Use show_counts instead."""
),
examples_sub=dedent(
"""\
>>> int_values = [1, 2, 3, 4, 5]
>>> text_values = ['alpha', 'beta', 'gamma', 'delta', 'epsilon']
>>> float_values = [0.0, 0.25, 0.5, 0.75, 1.0]
>>> df = pd.DataFrame({"int_col": int_values, "text_col": text_values,
... "float_col": float_values})
>>> df
int_col text_col float_col
0 1 alpha 0.00
1 2 beta 0.25
2 3 gamma 0.50
3 4 delta 0.75
4 5 epsilon 1.00
Prints information of all columns:
>>> df.info(verbose=True)
<class 'pandas.core.frame.DataFrame'>
RangeIndex: 5 entries, 0 to 4
Data columns (total 3 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 int_col 5 non-null int64
1 text_col 5 non-null object
2 float_col 5 non-null float64
dtypes: float64(1), int64(1), object(1)
memory usage: 248.0+ bytes
Prints a summary of columns count and its dtypes but not per column
information:
>>> df.info(verbose=False)
<class 'pandas.core.frame.DataFrame'>
RangeIndex: 5 entries, 0 to 4
Columns: 3 entries, int_col to float_col
dtypes: float64(1), int64(1), object(1)
memory usage: 248.0+ bytes
Pipe output of DataFrame.info to buffer instead of sys.stdout, get
buffer content and writes to a text file:
>>> import io
>>> buffer = io.StringIO()
>>> df.info(buf=buffer)
>>> s = buffer.getvalue()
>>> with open("df_info.txt", "w",
... encoding="utf-8") as f: # doctest: +SKIP
... f.write(s)
260
The `memory_usage` parameter allows deep introspection mode, specially
useful for big DataFrames and fine-tune memory optimization:
>>> random_strings_array = np.random.choice(['a', 'b', 'c'], 10 ** 6)
>>> df = pd.DataFrame({
... 'column_1': np.random.choice(['a', 'b', 'c'], 10 ** 6),
... 'column_2': np.random.choice(['a', 'b', 'c'], 10 ** 6),
... 'column_3': np.random.choice(['a', 'b', 'c'], 10 ** 6)
... })
>>> df.info()
<class 'pandas.core.frame.DataFrame'>
RangeIndex: 1000000 entries, 0 to 999999
Data columns (total 3 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 column_1 1000000 non-null object
1 column_2 1000000 non-null object
2 column_3 1000000 non-null object
dtypes: object(3)
memory usage: 22.9+ MB
>>> df.info(memory_usage='deep')
<class 'pandas.core.frame.DataFrame'>
RangeIndex: 1000000 entries, 0 to 999999
Data columns (total 3 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 column_1 1000000 non-null object
1 column_2 1000000 non-null object
2 column_3 1000000 non-null object
dtypes: object(3)
memory usage: 165.9 MB"""
),
see_also_sub=dedent(
"""\
DataFrame.describe: Generate descriptive statistics of DataFrame
columns.
DataFrame.memory_usage: Memory usage of DataFrame columns."""
),
version_added_sub="",
)
@doc(BaseInfo.render)
def info(
self,
verbose: bool | None = None,
buf: IO[str] | None = None,
max_cols: int | None = None,
memory_usage: bool | str | None = None,
show_counts: bool | None = None,
null_counts: bool | None = None,
) -> None:
if null_counts is not None:
if show_counts is not None:
raise ValueError("null_counts used with show_counts. Use show_counts.")
warnings.warn(
"null_counts is deprecated. Use show_counts instead",
FutureWarning,
stacklevel=2,
)
show_counts = null_counts
info = DataFrameInfo(
data=self,
memory_usage=memory_usage,
)
info.render(
buf=buf,
max_cols=max_cols,
verbose=verbose,
show_counts=show_counts,
)
def memory_usage(self, index: bool = True, deep: bool = False) -> Series:
"""
Return the memory usage of each column in bytes.
The memory usage can optionally include the contribution of
the index and elements of `object` dtype.
This value is displayed in `DataFrame.info` by default. This can be
suppressed by setting ``pandas.options.display.memory_usage`` to False.
Parameters
----------
index : bool, default True
Specifies whether to include the memory usage of the DataFrame's
index in returned Series. If ``index=True``, the memory usage of
the index is the first item in the output.
deep : bool, default False
If True, introspect the data deeply by interrogating
`object` dtypes for system-level memory consumption, and include
it in the returned values.
Returns
-------
Series
A Series whose index is the original column names and whose values
is the memory usage of each column in bytes.
See Also
--------
numpy.ndarray.nbytes : Total bytes consumed by the elements of an
ndarray.
Series.memory_usage : Bytes consumed by a Series.
Categorical : Memory-efficient array for string values with
many repeated values.
DataFrame.info : Concise summary of a DataFrame.
Examples
--------
>>> dtypes = ['int64', 'float64', 'complex128', 'object', 'bool']
>>> data = dict([(t, np.ones(shape=5000, dtype=int).astype(t))
... for t in dtypes])
>>> df = pd.DataFrame(data)
>>> df.head()
int64 float64 complex128 object bool
0 1 1.0 1.0+0.0j 1 True
1 1 1.0 1.0+0.0j 1 True
2 1 1.0 1.0+0.0j 1 True
3 1 1.0 1.0+0.0j 1 True
4 1 1.0 1.0+0.0j 1 True
>>> df.memory_usage()
Index 128
int64 40000
float64 40000
complex128 80000
object 40000
bool 5000
dtype: int64
>>> df.memory_usage(index=False)
int64 40000
float64 40000
complex128 80000
object 40000
bool 5000
dtype: int64
The memory footprint of `object` dtype columns is ignored by default:
>>> df.memory_usage(deep=True)
Index 128
int64 40000
float64 40000
complex128 80000
object 180000
bool 5000
dtype: int64
Use a Categorical for efficient storage of an object-dtype column with
many repeated values.
>>> df['object'].astype('category').memory_usage(deep=True)
5244
"""
result = self._constructor_sliced(
[c.memory_usage(index=False, deep=deep) for col, c in self.items()],
index=self.columns,
)
if index:
result = self._constructor_sliced(
self.index.memory_usage(deep=deep), index=["Index"]
).append(result)
return result
def transpose(self, *args, copy: bool = False) -> DataFrame:
"""
Transpose index and columns.
Reflect the DataFrame over its main diagonal by writing rows as columns
and vice-versa. The property :attr:`.T` is an accessor to the method
:meth:`transpose`.
Parameters
----------
*args : tuple, optional
Accepted for compatibility with NumPy.
copy : bool, default False
Whether to copy the data after transposing, even for DataFrames
with a single dtype.
Note that a copy is always required for mixed dtype DataFrames,
or for DataFrames with any extension types.
Returns
-------
DataFrame
The transposed DataFrame.
See Also
--------
numpy.transpose : Permute the dimensions of a given array.
Notes
-----
Transposing a DataFrame with mixed dtypes will result in a homogeneous
DataFrame with the `object` dtype. In such a case, a copy of the data
is always made.
Examples
--------
**Square DataFrame with homogeneous dtype**
>>> d1 = {'col1': [1, 2], 'col2': [3, 4]}
>>> df1 = pd.DataFrame(data=d1)
>>> df1
col1 col2
0 1 3
1 2 4
>>> df1_transposed = df1.T # or df1.transpose()
>>> df1_transposed
0 1
col1 1 2
col2 3 4
When the dtype is homogeneous in the original DataFrame, we get a
transposed DataFrame with the same dtype:
>>> df1.dtypes
col1 int64
col2 int64
dtype: object
>>> df1_transposed.dtypes
0 int64
1 int64
dtype: object
**Non-square DataFrame with mixed dtypes**
>>> d2 = {'name': ['Alice', 'Bob'],
... 'score': [9.5, 8],
... 'employed': [False, True],
... 'kids': [0, 0]}
>>> df2 = pd.DataFrame(data=d2)
>>> df2
name score employed kids
0 Alice 9.5 False 0
1 Bob 8.0 True 0
>>> df2_transposed = df2.T # or df2.transpose()
>>> df2_transposed
0 1
name Alice Bob
score 9.5 8.0
employed False True
kids 0 0
When the DataFrame has mixed dtypes, we get a transposed DataFrame with
the `object` dtype:
>>> df2.dtypes
name object
score float64
employed bool
kids int64
dtype: object
>>> df2_transposed.dtypes
0 object
1 object
dtype: object
"""
nv.validate_transpose(args, {})
# construct the args
dtypes = list(self.dtypes)
if self._is_homogeneous_type and dtypes and is_extension_array_dtype(dtypes[0]):
# We have EAs with the same dtype. We can preserve that dtype in transpose.
dtype = dtypes[0]
arr_type = dtype.construct_array_type()
values = self.values
new_values = [arr_type._from_sequence(row, dtype=dtype) for row in values]
result = self._constructor(
dict(zip(self.index, new_values)), index=self.columns
)
else:
new_arr = self.values.T
if copy:
new_arr = new_arr.copy()
result = self._constructor(new_arr, index=self.columns, columns=self.index)
return result.__finalize__(self, method="transpose")
@property
def T(self) -> DataFrame:
return self.transpose()
# ----------------------------------------------------------------------
# Indexing Methods
def _ixs(self, i: int, axis: int = 0):
"""
Parameters
----------
i : int
axis : int
Notes
-----
If slice passed, the resulting data will be a view.
"""
# irow
if axis == 0:
new_values = self._mgr.fast_xs(i)
# if we are a copy, mark as such
copy = isinstance(new_values, np.ndarray) and new_values.base is None
result = self._constructor_sliced(
new_values,
index=self.columns,
name=self.index[i],
dtype=new_values.dtype,
)
result._set_is_copy(self, copy=copy)
return result
# icol
else:
label = self.columns[i]
values = self._mgr.iget(i)
result = self._box_col_values(values, i)
# this is a cached value, mark it so
result._set_as_cached(label, self)
return result
def _get_column_array(self, i: int) -> ArrayLike:
"""
Get the values of the i'th column (ndarray or ExtensionArray, as stored
in the Block)
"""
return self._mgr.iget_values(i)
def _iter_column_arrays(self) -> Iterator[ArrayLike]:
"""
Iterate over the arrays of all columns in order.
This returns the values as stored in the Block (ndarray or ExtensionArray).
"""
for i in range(len(self.columns)):
yield self._get_column_array(i)
def __getitem__(self, key):
key = lib.item_from_zerodim(key)
key = com.apply_if_callable(key, self)
if is_hashable(key):
# shortcut if the key is in columns
if self.columns.is_unique and key in self.columns:
if isinstance(self.columns, MultiIndex):
return self._getitem_multilevel(key)
return self._get_item_cache(key)
# Do we have a slicer (on rows)?
indexer = convert_to_index_sliceable(self, key)
if indexer is not None:
if isinstance(indexer, np.ndarray):
indexer = lib.maybe_indices_to_slice(
indexer.astype(np.intp, copy=False), len(self)
)
# either we have a slice or we have a string that can be converted
# to a slice for partial-string date indexing
return self._slice(indexer, axis=0)
# Do we have a (boolean) DataFrame?
if isinstance(key, DataFrame):
return self.where(key)
# Do we have a (boolean) 1d indexer?
if com.is_bool_indexer(key):
return self._getitem_bool_array(key)
# We are left with two options: a single key, and a collection of keys,
# We interpret tuples as collections only for non-MultiIndex
is_single_key = isinstance(key, tuple) or not is_list_like(key)
if is_single_key:
if self.columns.nlevels > 1:
return self._getitem_multilevel(key)
indexer = self.columns.get_loc(key)
if is_integer(indexer):
indexer = [indexer]
else:
if is_iterator(key):
key = list(key)
indexer = self.loc._get_listlike_indexer(key, axis=1, raise_missing=True)[1]
# take() does not accept boolean indexers
if getattr(indexer, "dtype", None) == bool:
indexer = np.where(indexer)[0]
data = self._take_with_is_copy(indexer, axis=1)
if is_single_key:
# What does looking for a single key in a non-unique index return?
# The behavior is inconsistent. It returns a Series, except when
# - the key itself is repeated (test on data.shape, #9519), or
# - we have a MultiIndex on columns (test on self.columns, #21309)
if data.shape[1] == 1 and not isinstance(self.columns, MultiIndex):
# GH#26490 using data[key] can cause RecursionError
return data._get_item_cache(key)
return data
def _getitem_bool_array(self, key):
# also raises Exception if object array with NA values
# warning here just in case -- previously __setitem__ was
# reindexing but __getitem__ was not; it seems more reasonable to
# go with the __setitem__ behavior since that is more consistent
# with all other indexing behavior
if isinstance(key, Series) and not key.index.equals(self.index):
warnings.warn(
"Boolean Series key will be reindexed to match DataFrame index.",
UserWarning,
stacklevel=3,
)
elif len(key) != len(self.index):
raise ValueError(
f"Item wrong length {len(key)} instead of {len(self.index)}."
)
# check_bool_indexer will throw exception if Series key cannot
# be reindexed to match DataFrame rows
key = check_bool_indexer(self.index, key)
indexer = key.nonzero()[0]
return self._take_with_is_copy(indexer, axis=0)
def _getitem_multilevel(self, key):
# self.columns is a MultiIndex
loc = self.columns.get_loc(key)
if isinstance(loc, (slice, np.ndarray)):
new_columns = self.columns[loc]
result_columns = maybe_droplevels(new_columns, key)
if self._is_mixed_type:
result = self.reindex(columns=new_columns)
result.columns = result_columns
else:
new_values = self.values[:, loc]
result = self._constructor(
new_values, index=self.index, columns=result_columns
)
result = result.__finalize__(self)
# If there is only one column being returned, and its name is
# either an empty string, or a tuple with an empty string as its
# first element, then treat the empty string as a placeholder
# and return the column as if the user had provided that empty
# string in the key. If the result is a Series, exclude the
# implied empty string from its name.
if len(result.columns) == 1:
top = result.columns[0]
if isinstance(top, tuple):
top = top[0]
if top == "":
result = result[""]
if isinstance(result, Series):
result = self._constructor_sliced(
result, index=self.index, name=key
)
result._set_is_copy(self)
return result
else:
# loc is neither a slice nor ndarray, so must be an int
return self._ixs(loc, axis=1)
def _get_value(self, index, col, takeable: bool = False) -> Scalar:
"""
Quickly retrieve single value at passed column and index.
Parameters
----------
index : row label
col : column label
takeable : interpret the index/col as indexers, default False
Returns
-------
scalar
"""
if takeable:
series = self._ixs(col, axis=1)
return series._values[index]
series = self._get_item_cache(col)
engine = self.index._engine
try:
loc = engine.get_loc(index)
return series._values[loc]
except KeyError:
# GH 20629
if self.index.nlevels > 1:
# partial indexing forbidden
raise
# we cannot handle direct indexing
# use positional
col = self.columns.get_loc(col)
index = self.index.get_loc(index)
return self._get_value(index, col, takeable=True)
def __setitem__(self, key, value):
key = com.apply_if_callable(key, self)
# see if we can slice the rows
indexer = convert_to_index_sliceable(self, key)
if indexer is not None:
# either we have a slice or we have a string that can be converted
# to a slice for partial-string date indexing
return self._setitem_slice(indexer, value)
if isinstance(key, DataFrame) or getattr(key, "ndim", None) == 2:
self._setitem_frame(key, value)
elif isinstance(key, (Series, np.ndarray, list, Index)):
self._setitem_array(key, value)
elif isinstance(value, DataFrame):
self._set_item_frame_value(key, value)
elif is_list_like(value) and 1 < len(
self.columns.get_indexer_for([key])
) == len(value):
# Column to set is duplicated
self._setitem_array([key], value)
else:
# set column
self._set_item(key, value)
def _setitem_slice(self, key: slice, value):
# NB: we can't just use self.loc[key] = value because that
# operates on labels and we need to operate positional for
# backwards-compat, xref GH#31469
self._check_setitem_copy()
self.iloc[key] = value
def _setitem_array(self, key, value):
# also raises Exception if object array with NA values
if com.is_bool_indexer(key):
# bool indexer is indexing along rows
if len(key) != len(self.index):
raise ValueError(
f"Item wrong length {len(key)} instead of {len(self.index)}!"
)
key = check_bool_indexer(self.index, key)
indexer = key.nonzero()[0]
self._check_setitem_copy()
if isinstance(value, DataFrame):
# GH#39931 reindex since iloc does not align
value = value.reindex(self.index.take(indexer))
self.iloc[indexer] = value
else:
if isinstance(value, DataFrame):
check_key_length(self.columns, key, value)
for k1, k2 in zip(key, value.columns):
self[k1] = value[k2]
elif not is_list_like(value):
for col in key:
self[col] = value
elif isinstance(value, np.ndarray) and value.ndim == 2:
self._iset_not_inplace(key, value)
elif np.ndim(value) > 1:
# list of lists
value = DataFrame(value).values
return self._setitem_array(key, value)
else:
self._iset_not_inplace(key, value)
def _iset_not_inplace(self, key, value):
# GH#39510 when setting with df[key] = obj with a list-like key and
# list-like value, we iterate over those listlikes and set columns
# one at a time. This is different from dispatching to
# `self.loc[:, key]= value` because loc.__setitem__ may overwrite
# data inplace, whereas this will insert new arrays.
def igetitem(obj, i: int):
# Note: we catch DataFrame obj before getting here, but
# hypothetically would return obj.iloc[:, i]
if isinstance(obj, np.ndarray):
return obj[..., i]
else:
return obj[i]
if self.columns.is_unique:
if np.shape(value)[-1] != len(key):
raise ValueError("Columns must be same length as key")
for i, col in enumerate(key):
self[col] = igetitem(value, i)
else:
ilocs = self.columns.get_indexer_non_unique(key)[0]
if (ilocs < 0).any():
# key entries not in self.columns
raise NotImplementedError
if np.shape(value)[-1] != len(ilocs):
raise ValueError("Columns must be same length as key")
assert np.ndim(value) <= 2
orig_columns = self.columns
# Using self.iloc[:, i] = ... may set values inplace, which
# by convention we do not do in __setitem__
try:
self.columns = Index(range(len(self.columns)))
for i, iloc in enumerate(ilocs):
self[iloc] = igetitem(value, i)
finally:
self.columns = orig_columns
def _setitem_frame(self, key, value):
# support boolean setting with DataFrame input, e.g.
# df[df > df2] = 0
if isinstance(key, np.ndarray):
if key.shape != self.shape:
raise ValueError("Array conditional must be same shape as self")
key = self._constructor(key, **self._construct_axes_dict())
if key.size and not is_bool_dtype(key.values):
raise TypeError(
"Must pass DataFrame or 2-d ndarray with boolean values only"
)
self._check_inplace_setting(value)
self._check_setitem_copy()
self._where(-key, value, inplace=True)
def _set_item_frame_value(self, key, value: DataFrame) -> None:
self._ensure_valid_index(value)
# align columns
if key in self.columns:
loc = self.columns.get_loc(key)
cols = self.columns[loc]
len_cols = 1 if is_scalar(cols) else len(cols)
if len_cols != len(value.columns):
raise ValueError("Columns must be same length as key")
# align right-hand-side columns if self.columns
# is multi-index and self[key] is a sub-frame
if isinstance(self.columns, MultiIndex) and isinstance(
loc, (slice, Series, np.ndarray, Index)
):
cols = maybe_droplevels(cols, key)
if len(cols) and not cols.equals(value.columns):
value = value.reindex(cols, axis=1)
# now align rows
arraylike = _reindex_for_setitem(value, self.index)
self._set_item_mgr(key, arraylike)
def _iset_item_mgr(self, loc: int | slice | np.ndarray, value) -> None:
# when called from _set_item_mgr loc can be anything returned from get_loc
self._mgr.iset(loc, value)
self._clear_item_cache()
def _set_item_mgr(self, key, value: ArrayLike) -> None:
try:
loc = self._info_axis.get_loc(key)
except KeyError:
# This item wasn't present, just insert at end
self._mgr.insert(len(self._info_axis), key, value)
else:
self._iset_item_mgr(loc, value)
# check if we are modifying a copy
# try to set first as we want an invalid
# value exception to occur first
if len(self):
self._check_setitem_copy()
def _iset_item(self, loc: int, value) -> None:
arraylike = self._sanitize_column(value)
self._iset_item_mgr(loc, arraylike)
# check if we are modifying a copy
# try to set first as we want an invalid
# value exception to occur first
if len(self):
self._check_setitem_copy()
def _set_item(self, key, value) -> None:
"""
Add series to DataFrame in specified column.
If series is a numpy-array (not a Series/TimeSeries), it must be the
same length as the DataFrames index or an error will be thrown.
Series/TimeSeries will be conformed to the DataFrames index to
ensure homogeneity.
"""
value = self._sanitize_column(value)
if (
key in self.columns
and value.ndim == 1
and not is_extension_array_dtype(value)
):
# broadcast across multiple columns if necessary
if not self.columns.is_unique or isinstance(self.columns, MultiIndex):
existing_piece = self[key]
if isinstance(existing_piece, DataFrame):
value = np.tile(value, (len(existing_piece.columns), 1)).T
self._set_item_mgr(key, value)
def _set_value(
self, index: IndexLabel, col, value: Scalar, takeable: bool = False
) -> None:
"""
Put single value at passed column and index.
Parameters
----------
index : Label
row label
col : Label
column label
value : scalar
takeable : bool, default False
Sets whether or not index/col interpreted as indexers
"""
try:
if takeable:
series = self._ixs(col, axis=1)
series._set_value(index, value, takeable=True)
return
series = self._get_item_cache(col)
engine = self.index._engine
loc = engine.get_loc(index)
validate_numeric_casting(series.dtype, value)
series._values[loc] = value
# Note: trying to use series._set_value breaks tests in
# tests.frame.indexing.test_indexing and tests.indexing.test_partial
except (KeyError, TypeError):
# set using a non-recursive method & reset the cache
if takeable:
self.iloc[index, col] = value
else:
self.loc[index, col] = value
self._item_cache.pop(col, None)
def _ensure_valid_index(self, value) -> None:
"""
Ensure that if we don't have an index, that we can create one from the
passed value.
"""
# GH5632, make sure that we are a Series convertible
if not len(self.index) and is_list_like(value) and len(value):
if not isinstance(value, DataFrame):
try:
value = Series(value)
except (ValueError, NotImplementedError, TypeError) as err:
raise ValueError(
"Cannot set a frame with no defined index "
"and a value that cannot be converted to a Series"
) from err
# GH31368 preserve name of index
index_copy = value.index.copy()
if self.index.name is not None:
index_copy.name = self.index.name
self._mgr = self._mgr.reindex_axis(index_copy, axis=1, fill_value=np.nan)
def _box_col_values(self, values, loc: int) -> Series:
"""
Provide boxed values for a column.
"""
# Lookup in columns so that if e.g. a str datetime was passed
# we attach the Timestamp object as the name.
name = self.columns[loc]
klass = self._constructor_sliced
return klass(values, index=self.index, name=name, fastpath=True)
# ----------------------------------------------------------------------
# Lookup Caching
def _clear_item_cache(self) -> None:
self._item_cache.clear()
def _get_item_cache(self, item: Hashable) -> Series:
"""Return the cached item, item represents a label indexer."""
cache = self._item_cache
res = cache.get(item)
if res is None:
# All places that call _get_item_cache have unique columns,
# pending resolution of GH#33047
loc = self.columns.get_loc(item)
values = self._mgr.iget(loc)
res = self._box_col_values(values, loc).__finalize__(self)
cache[item] = res
res._set_as_cached(item, self)
# for a chain
res._is_copy = self._is_copy
return res
def _reset_cacher(self) -> None:
# no-op for DataFrame
pass
def _maybe_cache_changed(self, item, value: Series) -> None:
"""
The object has called back to us saying maybe it has changed.
"""
loc = self._info_axis.get_loc(item)
arraylike = value._values
self._mgr.iset(loc, arraylike)
# ----------------------------------------------------------------------
# Unsorted
def query(self, expr: str, inplace: bool = False, **kwargs):
"""
Query the columns of a DataFrame with a boolean expression.
Parameters
----------
expr : str
The query string to evaluate.
You can refer to variables
in the environment by prefixing them with an '@' character like
``@a + b``.
You can refer to column names that are not valid Python variable names
by surrounding them in backticks. Thus, column names containing spaces
or punctuations (besides underscores) or starting with digits must be
surrounded by backticks. (For example, a column named "Area (cm^2)" would
be referenced as ```Area (cm^2)```). Column names which are Python keywords
(like "list", "for", "import", etc) cannot be used.
For example, if one of your columns is called ``a a`` and you want
to sum it with ``b``, your query should be ```a a` + b``.
.. versionadded:: 0.25.0
Backtick quoting introduced.
.. versionadded:: 1.0.0
Expanding functionality of backtick quoting for more than only spaces.
inplace : bool
Whether the query should modify the data in place or return
a modified copy.
**kwargs
See the documentation for :func:`eval` for complete details
on the keyword arguments accepted by :meth:`DataFrame.query`.
Returns
-------
DataFrame or None
DataFrame resulting from the provided query expression or
None if ``inplace=True``.
See Also
--------
eval : Evaluate a string describing operations on
DataFrame columns.
DataFrame.eval : Evaluate a string describing operations on
DataFrame columns.
Notes
-----
The result of the evaluation of this expression is first passed to
:attr:`DataFrame.loc` and if that fails because of a
multidimensional key (e.g., a DataFrame) then the result will be passed
to :meth:`DataFrame.__getitem__`.
This method uses the top-level :func:`eval` function to
evaluate the passed query.
The :meth:`~pandas.DataFrame.query` method uses a slightly
modified Python syntax by default. For example, the ``&`` and ``|``
(bitwise) operators have the precedence of their boolean cousins,
:keyword:`and` and :keyword:`or`. This *is* syntactically valid Python,
however the semantics are different.
You can change the semantics of the expression by passing the keyword
argument ``parser='python'``. This enforces the same semantics as
evaluation in Python space. Likewise, you can pass ``engine='python'``
to evaluate an expression using Python itself as a backend. This is not
recommended as it is inefficient compared to using ``numexpr`` as the
engine.
The :attr:`DataFrame.index` and
:attr:`DataFrame.columns` attributes of the
:class:`~pandas.DataFrame` instance are placed in the query namespace
by default, which allows you to treat both the index and columns of the
frame as a column in the frame.
The identifier ``index`` is used for the frame index; you can also
use the name of the index to identify it in a query. Please note that
Python keywords may not be used as identifiers.
For further details and examples see the ``query`` documentation in
:ref:`indexing <indexing.query>`.
*Backtick quoted variables*
Backtick quoted variables are parsed as literal Python code and
are converted internally to a Python valid identifier.
This can lead to the following problems.
During parsing a number of disallowed characters inside the backtick
quoted string are replaced by strings that are allowed as a Python identifier.
These characters include all operators in Python, the space character, the
question mark, the exclamation mark, the dollar sign, and the euro sign.
For other characters that fall outside the ASCII range (U+0001..U+007F)
and those that are not further specified in PEP 3131,
the query parser will raise an error.
This excludes whitespace different than the space character,
but also the hashtag (as it is used for comments) and the backtick
itself (backtick can also not be escaped).
In a special case, quotes that make a pair around a backtick can
confuse the parser.
For example, ```it's` > `that's``` will raise an error,
as it forms a quoted string (``'s > `that'``) with a backtick inside.
See also the Python documentation about lexical analysis
(https://docs.python.org/3/reference/lexical_analysis.html)
in combination with the source code in :mod:`pandas.core.computation.parsing`.
Examples
--------
>>> df = pd.DataFrame({'A': range(1, 6),
... 'B': range(10, 0, -2),
... 'C C': range(10, 5, -1)})
>>> df
A B C C
0 1 10 10
1 2 8 9
2 3 6 8
3 4 4 7
4 5 2 6
>>> df.query('A > B')
A B C C
4 5 2 6
The previous expression is equivalent to
>>> df[df.A > df.B]
A B C C
4 5 2 6
For columns with spaces in their name, you can use backtick quoting.
>>> df.query('B == `C C`')
A B C C
0 1 10 10
The previous expression is equivalent to
>>> df[df.B == df['C C']]
A B C C
0 1 10 10
"""
inplace = validate_bool_kwarg(inplace, "inplace")
if not isinstance(expr, str):
msg = f"expr must be a string to be evaluated, {type(expr)} given"
raise ValueError(msg)
kwargs["level"] = kwargs.pop("level", 0) + 1
kwargs["target"] = None
res = self.eval(expr, **kwargs)
try:
result = self.loc[res]
except ValueError:
# when res is multi-dimensional loc raises, but this is sometimes a
# valid query
result = self[res]
if inplace:
self._update_inplace(result)
return None
else:
return result
def eval(self, expr: str, inplace: bool = False, **kwargs):
"""
Evaluate a string describing operations on DataFrame columns.
Operates on columns only, not specific rows or elements. This allows
`eval` to run arbitrary code, which can make you vulnerable to code
injection if you pass user input to this function.
Parameters
----------
expr : str
The expression string to evaluate.
inplace : bool, default False
If the expression contains an assignment, whether to perform the
operation inplace and mutate the existing DataFrame. Otherwise,
a new DataFrame is returned.
**kwargs
See the documentation for :func:`eval` for complete details
on the keyword arguments accepted by
:meth:`~pandas.DataFrame.query`.
Returns
-------
ndarray, scalar, pandas object, or None
The result of the evaluation or None if ``inplace=True``.
See Also
--------
DataFrame.query : Evaluates a boolean expression to query the columns
of a frame.
DataFrame.assign : Can evaluate an expression or function to create new
values for a column.
eval : Evaluate a Python expression as a string using various
backends.
Notes
-----
For more details see the API documentation for :func:`~eval`.
For detailed examples see :ref:`enhancing performance with eval
<enhancingperf.eval>`.
Examples
--------
>>> df = pd.DataFrame({'A': range(1, 6), 'B': range(10, 0, -2)})
>>> df
A B
0 1 10
1 2 8
2 3 6
3 4 4
4 5 2
>>> df.eval('A + B')
0 11
1 10
2 9
3 8
4 7
dtype: int64
Assignment is allowed though by default the original DataFrame is not
modified.
>>> df.eval('C = A + B')
A B C
0 1 10 11
1 2 8 10
2 3 6 9
3 4 4 8
4 5 2 7
>>> df
A B
0 1 10
1 2 8
2 3 6
3 4 4
4 5 2
Use ``inplace=True`` to modify the original DataFrame.
>>> df.eval('C = A + B', inplace=True)
>>> df
A B C
0 1 10 11
1 2 8 10
2 3 6 9
3 4 4 8
4 5 2 7
Multiple columns can be assigned to using multi-line expressions:
>>> df.eval(
... '''
... C = A + B
... D = A - B
... '''
... )
A B C D
0 1 10 11 -9
1 2 8 10 -6
2 3 6 9 -3
3 4 4 8 0
4 5 2 7 3
"""
from pandas.core.computation.eval import eval as _eval
inplace = validate_bool_kwarg(inplace, "inplace")
resolvers = kwargs.pop("resolvers", None)
kwargs["level"] = kwargs.pop("level", 0) + 1
if resolvers is None:
index_resolvers = self._get_index_resolvers()
column_resolvers = self._get_cleaned_column_resolvers()
resolvers = column_resolvers, index_resolvers
if "target" not in kwargs:
kwargs["target"] = self
kwargs["resolvers"] = kwargs.get("resolvers", ()) + tuple(resolvers)
return _eval(expr, inplace=inplace, **kwargs)
def select_dtypes(self, include=None, exclude=None) -> DataFrame:
"""
Return a subset of the DataFrame's columns based on the column dtypes.
Parameters
----------
include, exclude : scalar or list-like
A selection of dtypes or strings to be included/excluded. At least
one of these parameters must be supplied.
Returns
-------
DataFrame
The subset of the frame including the dtypes in ``include`` and
excluding the dtypes in ``exclude``.
Raises
------
ValueError
* If both of ``include`` and ``exclude`` are empty
* If ``include`` and ``exclude`` have overlapping elements
* If any kind of string dtype is passed in.
See Also
--------
DataFrame.dtypes: Return Series with the data type of each column.
Notes
-----
* To select all *numeric* types, use ``np.number`` or ``'number'``
* To select strings you must use the ``object`` dtype, but note that
this will return *all* object dtype columns
* See the `numpy dtype hierarchy
<https://numpy.org/doc/stable/reference/arrays.scalars.html>`__
* To select datetimes, use ``np.datetime64``, ``'datetime'`` or
``'datetime64'``
* To select timedeltas, use ``np.timedelta64``, ``'timedelta'`` or
``'timedelta64'``
* To select Pandas categorical dtypes, use ``'category'``
* To select Pandas datetimetz dtypes, use ``'datetimetz'`` (new in
0.20.0) or ``'datetime64[ns, tz]'``
Examples
--------
>>> df = pd.DataFrame({'a': [1, 2] * 3,
... 'b': [True, False] * 3,
... 'c': [1.0, 2.0] * 3})
>>> df
a b c
0 1 True 1.0
1 2 False 2.0
2 1 True 1.0
3 2 False 2.0
4 1 True 1.0
5 2 False 2.0
>>> df.select_dtypes(include='bool')
b
0 True
1 False
2 True
3 False
4 True
5 False
>>> df.select_dtypes(include=['float64'])
c
0 1.0
1 2.0
2 1.0
3 2.0
4 1.0
5 2.0
>>> df.select_dtypes(exclude=['int64'])
b c
0 True 1.0
1 False 2.0
2 True 1.0
3 False 2.0
4 True 1.0
5 False 2.0
"""
if not is_list_like(include):
include = (include,) if include is not None else ()
if not is_list_like(exclude):
exclude = (exclude,) if exclude is not None else ()
selection = (frozenset(include), frozenset(exclude))
if not any(selection):
raise ValueError("at least one of include or exclude must be nonempty")
# convert the myriad valid dtypes object to a single representation
def check_int_infer_dtype(dtypes):
converted_dtypes = []
for dtype in dtypes:
# Numpy maps int to different types (int32, in64) on Windows and Linux
# see https://github.com/numpy/numpy/issues/9464
if (isinstance(dtype, str) and dtype == "int") or (dtype is int):
converted_dtypes.append(np.int32)
# error: Argument 1 to "append" of "list" has incompatible type
# "Type[signedinteger[Any]]"; expected "Type[signedinteger[Any]]"
converted_dtypes.append(np.int64) # type: ignore[arg-type]
else:
# error: Argument 1 to "append" of "list" has incompatible type
# "Union[dtype[Any], ExtensionDtype]"; expected
# "Type[signedinteger[Any]]"
converted_dtypes.append(
infer_dtype_from_object(dtype) # type: ignore[arg-type]
)
return frozenset(converted_dtypes)
include = check_int_infer_dtype(include)
exclude = check_int_infer_dtype(exclude)
for dtypes in (include, exclude):
invalidate_string_dtypes(dtypes)
# can't both include AND exclude!
if not include.isdisjoint(exclude):
raise ValueError(f"include and exclude overlap on {(include & exclude)}")
# We raise when both include and exclude are empty
# Hence, we can just shrink the columns we want to keep
keep_these = np.full(self.shape[1], True)
def extract_unique_dtypes_from_dtypes_set(
dtypes_set: frozenset[Dtype], unique_dtypes: np.ndarray
) -> list[Dtype]:
extracted_dtypes = [
unique_dtype
for unique_dtype in unique_dtypes
if (
issubclass(
# error: Argument 1 to "tuple" has incompatible type
# "FrozenSet[Union[ExtensionDtype, Union[str, Any], Type[str],
# Type[float], Type[int], Type[complex], Type[bool],
# Type[object]]]"; expected "Iterable[Union[type, Tuple[Any,
# ...]]]"
unique_dtype.type,
tuple(dtypes_set), # type: ignore[arg-type]
)
or (
np.number in dtypes_set
and getattr(unique_dtype, "_is_numeric", False)
)
)
]
return extracted_dtypes
unique_dtypes = self.dtypes.unique()
if include:
included_dtypes = extract_unique_dtypes_from_dtypes_set(
include, unique_dtypes
)
keep_these &= self.dtypes.isin(included_dtypes)
if exclude:
excluded_dtypes = extract_unique_dtypes_from_dtypes_set(
exclude, unique_dtypes
)
keep_these &= ~self.dtypes.isin(excluded_dtypes)
# error: "ndarray" has no attribute "values"
return self.iloc[:, keep_these.values] # type: ignore[attr-defined]
def insert(self, loc, column, value, allow_duplicates: bool = False) -> None:
"""
Insert column into DataFrame at specified location.
Raises a ValueError if `column` is already contained in the DataFrame,
unless `allow_duplicates` is set to True.
Parameters
----------
loc : int
Insertion index. Must verify 0 <= loc <= len(columns).
column : str, number, or hashable object
Label of the inserted column.
value : int, Series, or array-like
allow_duplicates : bool, optional
See Also
--------
Index.insert : Insert new item by index.
Examples
--------
>>> df = pd.DataFrame({'col1': [1, 2], 'col2': [3, 4]})
>>> df
col1 col2
0 1 3
1 2 4
>>> df.insert(1, "newcol", [99, 99])
>>> df
col1 newcol col2
0 1 99 3
1 2 99 4
>>> df.insert(0, "col1", [100, 100], allow_duplicates=True)
>>> df
col1 col1 newcol col2
0 100 1 99 3
1 100 2 99 4
Notice that pandas uses index alignment in case of `value` from type `Series`:
>>> df.insert(0, "col0", pd.Series([5, 6], index=[1, 2]))
>>> df
col0 col1 col1 newcol col2
0 NaN 100 1 99 3
1 5.0 100 2 99 4
"""
if allow_duplicates and not self.flags.allows_duplicate_labels:
raise ValueError(
"Cannot specify 'allow_duplicates=True' when "
"'self.flags.allows_duplicate_labels' is False."
)
if not allow_duplicates and column in self.columns:
# Should this be a different kind of error??
raise ValueError(f"cannot insert {column}, already exists")
if not isinstance(loc, int):
raise TypeError("loc must be int")
value = self._sanitize_column(value)
self._mgr.insert(loc, column, value)
def assign(self, **kwargs) -> DataFrame:
r"""
Assign new columns to a DataFrame.
Returns a new object with all original columns in addition to new ones.
Existing columns that are re-assigned will be overwritten.
Parameters
----------
**kwargs : dict of {str: callable or Series}
The column names are keywords. If the values are
callable, they are computed on the DataFrame and
assigned to the new columns. The callable must not
change input DataFrame (though pandas doesn't check it).
If the values are not callable, (e.g. a Series, scalar, or array),
they are simply assigned.
Returns
-------
DataFrame
A new DataFrame with the new columns in addition to
all the existing columns.
Notes
-----
Assigning multiple columns within the same ``assign`` is possible.
Later items in '\*\*kwargs' may refer to newly created or modified
columns in 'df'; items are computed and assigned into 'df' in order.
Examples
--------
>>> df = pd.DataFrame({'temp_c': [17.0, 25.0]},
... index=['Portland', 'Berkeley'])
>>> df
temp_c
Portland 17.0
Berkeley 25.0
Where the value is a callable, evaluated on `df`:
>>> df.assign(temp_f=lambda x: x.temp_c * 9 / 5 + 32)
temp_c temp_f
Portland 17.0 62.6
Berkeley 25.0 77.0
Alternatively, the same behavior can be achieved by directly
referencing an existing Series or sequence:
>>> df.assign(temp_f=df['temp_c'] * 9 / 5 + 32)
temp_c temp_f
Portland 17.0 62.6
Berkeley 25.0 77.0
You can create multiple columns within the same assign where one
of the columns depends on another one defined within the same assign:
>>> df.assign(temp_f=lambda x: x['temp_c'] * 9 / 5 + 32,
... temp_k=lambda x: (x['temp_f'] + 459.67) * 5 / 9)
temp_c temp_f temp_k
Portland 17.0 62.6 290.15
Berkeley 25.0 77.0 298.15
"""
data = self.copy()
for k, v in kwargs.items():
data[k] = com.apply_if_callable(v, data)
return data
def _sanitize_column(self, value) -> ArrayLike:
"""
Ensures new columns (which go into the BlockManager as new blocks) are
always copied and converted into an array.
Parameters
----------
value : scalar, Series, or array-like
Returns
-------
numpy.ndarray or ExtensionArray
"""
self._ensure_valid_index(value)
# We should never get here with DataFrame value
if isinstance(value, Series):
value = _reindex_for_setitem(value, self.index)
elif isinstance(value, ExtensionArray):
# Explicitly copy here
value = value.copy()
com.require_length_match(value, self.index)
elif is_sequence(value):
com.require_length_match(value, self.index)
# turn me into an ndarray
if not isinstance(value, (np.ndarray, Index)):
if isinstance(value, list) and len(value) > 0:
value = maybe_convert_platform(value)
else:
value = com.asarray_tuplesafe(value)
elif isinstance(value, Index):
value = value.copy(deep=True)._values
else:
value = value.copy()
# possibly infer to datetimelike
if is_object_dtype(value.dtype):
value = sanitize_array(value, None)
else:
value = construct_1d_arraylike_from_scalar(value, len(self), dtype=None)
return value
@property
def _series(self):
return {
item: Series(
self._mgr.iget(idx), index=self.index, name=item, fastpath=True
)
for idx, item in enumerate(self.columns)
}
def lookup(
self, row_labels: Sequence[IndexLabel], col_labels: Sequence[IndexLabel]
) -> np.ndarray:
"""
Label-based "fancy indexing" function for DataFrame.
Given equal-length arrays of row and column labels, return an
array of the values corresponding to each (row, col) pair.
.. deprecated:: 1.2.0
DataFrame.lookup is deprecated,
use DataFrame.melt and DataFrame.loc instead.
For further details see
:ref:`Looking up values by index/column labels <indexing.lookup>`.
Parameters
----------
row_labels : sequence
The row labels to use for lookup.
col_labels : sequence
The column labels to use for lookup.
Returns
-------
numpy.ndarray
The found values.
"""
msg = (
"The 'lookup' method is deprecated and will be"
"removed in a future version."
"You can use DataFrame.melt and DataFrame.loc"
"as a substitute."
)
warnings.warn(msg, FutureWarning, stacklevel=2)
n = len(row_labels)
if n != len(col_labels):
raise ValueError("Row labels must have same size as column labels")
if not (self.index.is_unique and self.columns.is_unique):
# GH#33041
raise ValueError("DataFrame.lookup requires unique index and columns")
thresh = 1000
if not self._is_mixed_type or n > thresh:
values = self.values
ridx = self.index.get_indexer(row_labels)
cidx = self.columns.get_indexer(col_labels)
if (ridx == -1).any():
raise KeyError("One or more row labels was not found")
if (cidx == -1).any():
raise KeyError("One or more column labels was not found")
flat_index = ridx * len(self.columns) + cidx
result = values.flat[flat_index]
else:
result = np.empty(n, dtype="O")
for i, (r, c) in enumerate(zip(row_labels, col_labels)):
result[i] = self._get_value(r, c)
if is_object_dtype(result):
result = lib.maybe_convert_objects(result)
return result
# ----------------------------------------------------------------------
# Reindexing and alignment
def _reindex_axes(self, axes, level, limit, tolerance, method, fill_value, copy):
frame = self
columns = axes["columns"]
if columns is not None:
frame = frame._reindex_columns(
columns, method, copy, level, fill_value, limit, tolerance
)
index = axes["index"]
if index is not None:
frame = frame._reindex_index(
index, method, copy, level, fill_value, limit, tolerance
)
return frame
def _reindex_index(
self,
new_index,
method,
copy: bool,
level: Level,
fill_value=np.nan,
limit=None,
tolerance=None,
):
new_index, indexer = self.index.reindex(
new_index, method=method, level=level, limit=limit, tolerance=tolerance
)
return self._reindex_with_indexers(
{0: [new_index, indexer]},
copy=copy,
fill_value=fill_value,
allow_dups=False,
)
def _reindex_columns(
self,
new_columns,
method,
copy: bool,
level: Level,
fill_value=None,
limit=None,
tolerance=None,
):
new_columns, indexer = self.columns.reindex(
new_columns, method=method, level=level, limit=limit, tolerance=tolerance
)
return self._reindex_with_indexers(
{1: [new_columns, indexer]},
copy=copy,
fill_value=fill_value,
allow_dups=False,
)
def _reindex_multi(self, axes, copy: bool, fill_value) -> DataFrame:
"""
We are guaranteed non-Nones in the axes.
"""
new_index, row_indexer = self.index.reindex(axes["index"])
new_columns, col_indexer = self.columns.reindex(axes["columns"])
if row_indexer is not None and col_indexer is not None:
indexer = row_indexer, col_indexer
# error: Argument 2 to "take_2d_multi" has incompatible type "Tuple[Any,
# Any]"; expected "ndarray"
new_values = take_2d_multi(self.values, indexer, fill_value=fill_value)
return self._constructor(new_values, index=new_index, columns=new_columns)
else:
return self._reindex_with_indexers(
{0: [new_index, row_indexer], 1: [new_columns, col_indexer]},
copy=copy,
fill_value=fill_value,
)
@doc(NDFrame.align, **_shared_doc_kwargs)
def align(
self,
other,
join: str = "outer",
axis: Axis | None = None,
level: Level | None = None,
copy: bool = True,
fill_value=None,
method: str | None = None,
limit=None,
fill_axis: Axis = 0,
broadcast_axis: Axis | None = None,
) -> DataFrame:
return super().align(
other,
join=join,
axis=axis,
level=level,
copy=copy,
fill_value=fill_value,
method=method,
limit=limit,
fill_axis=fill_axis,
broadcast_axis=broadcast_axis,
)
@overload
def set_axis(
self, labels, axis: Axis = ..., inplace: Literal[False] = ...
) -> DataFrame:
...
@overload
def set_axis(self, labels, axis: Axis, inplace: Literal[True]) -> None:
...
@overload
def set_axis(self, labels, *, inplace: Literal[True]) -> None:
...
@overload
def set_axis(
self, labels, axis: Axis = ..., inplace: bool = ...
) -> DataFrame | None:
...
@Appender(
"""
Examples
--------
>>> df = pd.DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]})
Change the row labels.
>>> df.set_axis(['a', 'b', 'c'], axis='index')
A B
a 1 4
b 2 5
c 3 6
Change the column labels.
>>> df.set_axis(['I', 'II'], axis='columns')
I II
0 1 4
1 2 5
2 3 6
Now, update the labels inplace.
>>> df.set_axis(['i', 'ii'], axis='columns', inplace=True)
>>> df
i ii
0 1 4
1 2 5
2 3 6
"""
)
@Substitution(
**_shared_doc_kwargs,
extended_summary_sub=" column or",
axis_description_sub=", and 1 identifies the columns",
see_also_sub=" or columns",
)
@Appender(NDFrame.set_axis.__doc__)
def set_axis(self, labels, axis: Axis = 0, inplace: bool = False):
return super().set_axis(labels, axis=axis, inplace=inplace)
@Substitution(**_shared_doc_kwargs)
@Appender(NDFrame.reindex.__doc__)
@rewrite_axis_style_signature(
"labels",
[
("method", None),
("copy", True),
("level", None),
("fill_value", np.nan),
("limit", None),
("tolerance", None),
],
)
def reindex(self, *args, **kwargs) -> DataFrame:
axes = validate_axis_style_args(self, args, kwargs, "labels", "reindex")
kwargs.update(axes)
# Pop these, since the values are in `kwargs` under different names
kwargs.pop("axis", None)
kwargs.pop("labels", None)
return super().reindex(**kwargs)
def drop(
self,
labels=None,
axis: Axis = 0,
index=None,
columns=None,
level: Level | None = None,
inplace: bool = False,
errors: str = "raise",
):
"""
Drop specified labels from rows or columns.
Remove rows or columns by specifying label names and corresponding
axis, or by specifying directly index or column names. When using a
multi-index, labels on different levels can be removed by specifying
the level.
Parameters
----------
labels : single label or list-like
Index or column labels to drop.
axis : {0 or 'index', 1 or 'columns'}, default 0
Whether to drop labels from the index (0 or 'index') or
columns (1 or 'columns').
index : single label or list-like
Alternative to specifying axis (``labels, axis=0``
is equivalent to ``index=labels``).
columns : single label or list-like
Alternative to specifying axis (``labels, axis=1``
is equivalent to ``columns=labels``).
level : int or level name, optional
For MultiIndex, level from which the labels will be removed.
inplace : bool, default False
If False, return a copy. Otherwise, do operation
inplace and return None.
errors : {'ignore', 'raise'}, default 'raise'
If 'ignore', suppress error and only existing labels are
dropped.
Returns
-------
DataFrame or None
DataFrame without the removed index or column labels or
None if ``inplace=True``.
Raises
------
KeyError
If any of the labels is not found in the selected axis.
See Also
--------
DataFrame.loc : Label-location based indexer for selection by label.
DataFrame.dropna : Return DataFrame with labels on given axis omitted
where (all or any) data are missing.
DataFrame.drop_duplicates : Return DataFrame with duplicate rows
removed, optionally only considering certain columns.
Series.drop : Return Series with specified index labels removed.
Examples
--------
>>> df = pd.DataFrame(np.arange(12).reshape(3, 4),
... columns=['A', 'B', 'C', 'D'])
>>> df
A B C D
0 0 1 2 3
1 4 5 6 7
2 8 9 10 11
Drop columns
>>> df.drop(['B', 'C'], axis=1)
A D
0 0 3
1 4 7
2 8 11
>>> df.drop(columns=['B', 'C'])
A D
0 0 3
1 4 7
2 8 11
Drop a row by index
>>> df.drop([0, 1])
A B C D
2 8 9 10 11
Drop columns and/or rows of MultiIndex DataFrame
>>> midx = pd.MultiIndex(levels=[['lama', 'cow', 'falcon'],
... ['speed', 'weight', 'length']],
... codes=[[0, 0, 0, 1, 1, 1, 2, 2, 2],
... [0, 1, 2, 0, 1, 2, 0, 1, 2]])
>>> df = pd.DataFrame(index=midx, columns=['big', 'small'],
... data=[[45, 30], [200, 100], [1.5, 1], [30, 20],
... [250, 150], [1.5, 0.8], [320, 250],
... [1, 0.8], [0.3, 0.2]])
>>> df
big small
lama speed 45.0 30.0
weight 200.0 100.0
length 1.5 1.0
cow speed 30.0 20.0
weight 250.0 150.0
length 1.5 0.8
falcon speed 320.0 250.0
weight 1.0 0.8
length 0.3 0.2
>>> df.drop(index='cow', columns='small')
big
lama speed 45.0
weight 200.0
length 1.5
falcon speed 320.0
weight 1.0
length 0.3
>>> df.drop(index='length', level=1)
big small
lama speed 45.0 30.0
weight 200.0 100.0
cow speed 30.0 20.0
weight 250.0 150.0
falcon speed 320.0 250.0
weight 1.0 0.8
"""
return super().drop(
labels=labels,
axis=axis,
index=index,
columns=columns,
level=level,
inplace=inplace,
errors=errors,
)
@rewrite_axis_style_signature(
"mapper",
[("copy", True), ("inplace", False), ("level", None), ("errors", "ignore")],
)
def rename(
self,
mapper: Renamer | None = None,
*,
index: Renamer | None = None,
columns: Renamer | None = None,
axis: Axis | None = None,
copy: bool = True,
inplace: bool = False,
level: Level | None = None,
errors: str = "ignore",
) -> DataFrame | None:
"""
Alter axes labels.
Function / dict values must be unique (1-to-1). Labels not contained in
a dict / Series will be left as-is. Extra labels listed don't throw an
error.
See the :ref:`user guide <basics.rename>` for more.
Parameters
----------
mapper : dict-like or function
Dict-like or function transformations to apply to
that axis' values. Use either ``mapper`` and ``axis`` to
specify the axis to target with ``mapper``, or ``index`` and
``columns``.
index : dict-like or function
Alternative to specifying axis (``mapper, axis=0``
is equivalent to ``index=mapper``).
columns : dict-like or function
Alternative to specifying axis (``mapper, axis=1``
is equivalent to ``columns=mapper``).
axis : {0 or 'index', 1 or 'columns'}, default 0
Axis to target with ``mapper``. Can be either the axis name
('index', 'columns') or number (0, 1). The default is 'index'.
copy : bool, default True
Also copy underlying data.
inplace : bool, default False
Whether to return a new DataFrame. If True then value of copy is
ignored.
level : int or level name, default None
In case of a MultiIndex, only rename labels in the specified
level.
errors : {'ignore', 'raise'}, default 'ignore'
If 'raise', raise a `KeyError` when a dict-like `mapper`, `index`,
or `columns` contains labels that are not present in the Index
being transformed.
If 'ignore', existing keys will be renamed and extra keys will be
ignored.
Returns
-------
DataFrame or None
DataFrame with the renamed axis labels or None if ``inplace=True``.
Raises
------
KeyError
If any of the labels is not found in the selected axis and
"errors='raise'".
See Also
--------
DataFrame.rename_axis : Set the name of the axis.
Examples
--------
``DataFrame.rename`` supports two calling conventions
* ``(index=index_mapper, columns=columns_mapper, ...)``
* ``(mapper, axis={'index', 'columns'}, ...)``
We *highly* recommend using keyword arguments to clarify your
intent.
Rename columns using a mapping:
>>> df = pd.DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]})
>>> df.rename(columns={"A": "a", "B": "c"})
a c
0 1 4
1 2 5
2 3 6
Rename index using a mapping:
>>> df.rename(index={0: "x", 1: "y", 2: "z"})
A B
x 1 4
y 2 5
z 3 6
Cast index labels to a different type:
>>> df.index
RangeIndex(start=0, stop=3, step=1)
>>> df.rename(index=str).index
Index(['0', '1', '2'], dtype='object')
>>> df.rename(columns={"A": "a", "B": "b", "C": "c"}, errors="raise")
Traceback (most recent call last):
KeyError: ['C'] not found in axis
Using axis-style parameters:
>>> df.rename(str.lower, axis='columns')
a b
0 1 4
1 2 5
2 3 6
>>> df.rename({1: 2, 2: 4}, axis='index')
A B
0 1 4
2 2 5
4 3 6
"""
return super().rename(
mapper=mapper,
index=index,
columns=columns,
axis=axis,
copy=copy,
inplace=inplace,
level=level,
errors=errors,
)
@doc(NDFrame.fillna, **_shared_doc_kwargs)
def fillna(
self,
value=None,
method: str | None = None,
axis: Axis | None = None,
inplace: bool = False,
limit=None,
downcast=None,
) -> DataFrame | None:
return super().fillna(
value=value,
method=method,
axis=axis,
inplace=inplace,
limit=limit,
downcast=downcast,
)
def pop(self, item: Hashable) -> Series:
"""
Return item and drop from frame. Raise KeyError if not found.
Parameters
----------
item : label
Label of column to be popped.
Returns
-------
Series
Examples
--------
>>> df = pd.DataFrame([('falcon', 'bird', 389.0),
... ('parrot', 'bird', 24.0),
... ('lion', 'mammal', 80.5),
... ('monkey', 'mammal', np.nan)],
... columns=('name', 'class', 'max_speed'))
>>> df
name class max_speed
0 falcon bird 389.0
1 parrot bird 24.0
2 lion mammal 80.5
3 monkey mammal NaN
>>> df.pop('class')
0 bird
1 bird
2 mammal
3 mammal
Name: class, dtype: object
>>> df
name max_speed
0 falcon 389.0
1 parrot 24.0
2 lion 80.5
3 monkey NaN
"""
return super().pop(item=item)
@doc(NDFrame.replace, **_shared_doc_kwargs)
def replace(
self,
to_replace=None,
value=None,
inplace: bool = False,
limit=None,
regex: bool = False,
method: str = "pad",
):
return super().replace(
to_replace=to_replace,
value=value,
inplace=inplace,
limit=limit,
regex=regex,
method=method,
)
def _replace_columnwise(
self, mapping: dict[Hashable, tuple[Any, Any]], inplace: bool, regex
):
"""
Dispatch to Series.replace column-wise.
Parameters
----------
mapping : dict
of the form {col: (target, value)}
inplace : bool
regex : bool or same types as `to_replace` in DataFrame.replace
Returns
-------
DataFrame or None
"""
# Operate column-wise
res = self if inplace else self.copy()
ax = self.columns
for i in range(len(ax)):
if ax[i] in mapping:
ser = self.iloc[:, i]
target, value = mapping[ax[i]]
newobj = ser.replace(target, value, regex=regex)
res.iloc[:, i] = newobj
if inplace:
return
return res.__finalize__(self)
@doc(NDFrame.shift, klass=_shared_doc_kwargs["klass"])
def shift(
self,
periods=1,
freq: Frequency | None = None,
axis: Axis = 0,
fill_value=lib.no_default,
) -> DataFrame:
axis = self._get_axis_number(axis)
ncols = len(self.columns)
if (
axis == 1
and periods != 0
and ncols > 0
and (fill_value is lib.no_default or len(self._mgr.arrays) > 1)
):
# Exclude single-array-with-fill_value case so we issue a FutureWarning
# if an integer is passed with datetimelike dtype GH#31971
from pandas import concat
# tail: the data that is still in our shifted DataFrame
if periods > 0:
tail = self.iloc[:, :-periods]
else:
tail = self.iloc[:, -periods:]
# pin a simple Index to avoid costly casting
tail.columns = range(len(tail.columns))
if fill_value is not lib.no_default:
# GH#35488
# TODO(EA2D): with 2D EAs we could construct other directly
ser = Series(fill_value, index=self.index)
else:
# We infer fill_value to match the closest column
if periods > 0:
ser = self.iloc[:, 0].shift(len(self))
else:
ser = self.iloc[:, -1].shift(len(self))
width = min(abs(periods), ncols)
other = concat([ser] * width, axis=1)
if periods > 0:
result = concat([other, tail], axis=1)
else:
result = concat([tail, other], axis=1)
result = cast(DataFrame, result)
result.columns = self.columns.copy()
return result
return super().shift(
periods=periods, freq=freq, axis=axis, fill_value=fill_value
)
def set_index(
self,
keys,
drop: bool = True,
append: bool = False,
inplace: bool = False,
verify_integrity: bool = False,
):
"""
Set the DataFrame index using existing columns.
Set the DataFrame index (row labels) using one or more existing
columns or arrays (of the correct length). The index can replace the
existing index or expand on it.
Parameters
----------
keys : label or array-like or list of labels/arrays
This parameter can be either a single column key, a single array of
the same length as the calling DataFrame, or a list containing an
arbitrary combination of column keys and arrays. Here, "array"
encompasses :class:`Series`, :class:`Index`, ``np.ndarray``, and
instances of :class:`~collections.abc.Iterator`.
drop : bool, default True
Delete columns to be used as the new index.
append : bool, default False
Whether to append columns to existing index.
inplace : bool, default False
If True, modifies the DataFrame in place (do not create a new object).
verify_integrity : bool, default False
Check the new index for duplicates. Otherwise defer the check until
necessary. Setting to False will improve the performance of this
method.
Returns
-------
DataFrame or None
Changed row labels or None if ``inplace=True``.
See Also
--------
DataFrame.reset_index : Opposite of set_index.
DataFrame.reindex : Change to new indices or expand indices.
DataFrame.reindex_like : Change to same indices as other DataFrame.
Examples
--------
>>> df = pd.DataFrame({'month': [1, 4, 7, 10],
... 'year': [2012, 2014, 2013, 2014],
... 'sale': [55, 40, 84, 31]})
>>> df
month year sale
0 1 2012 55
1 4 2014 40
2 7 2013 84
3 10 2014 31
Set the index to become the 'month' column:
>>> df.set_index('month')
year sale
month
1 2012 55
4 2014 40
7 2013 84
10 2014 31
Create a MultiIndex using columns 'year' and 'month':
>>> df.set_index(['year', 'month'])
sale
year month
2012 1 55
2014 4 40
2013 7 84
2014 10 31
Create a MultiIndex using an Index and a column:
>>> df.set_index([pd.Index([1, 2, 3, 4]), 'year'])
month sale
year
1 2012 1 55
2 2014 4 40
3 2013 7 84
4 2014 10 31
Create a MultiIndex using two Series:
>>> s = pd.Series([1, 2, 3, 4])
>>> df.set_index([s, s**2])
month year sale
1 1 1 2012 55
2 4 4 2014 40
3 9 7 2013 84
4 16 10 2014 31
"""
inplace = validate_bool_kwarg(inplace, "inplace")
self._check_inplace_and_allows_duplicate_labels(inplace)
if not isinstance(keys, list):
keys = [keys]
err_msg = (
'The parameter "keys" may be a column key, one-dimensional '
"array, or a list containing only valid column keys and "
"one-dimensional arrays."
)
missing: list[Hashable] = []
for col in keys:
if isinstance(col, (Index, Series, np.ndarray, list, abc.Iterator)):
# arrays are fine as long as they are one-dimensional
# iterators get converted to list below
if getattr(col, "ndim", 1) != 1:
raise ValueError(err_msg)
else:
# everything else gets tried as a key; see GH 24969
try:
found = col in self.columns
except TypeError as err:
raise TypeError(
f"{err_msg}. Received column of type {type(col)}"
) from err
else:
if not found:
missing.append(col)
if missing:
raise KeyError(f"None of {missing} are in the columns")
if inplace:
frame = self
else:
frame = self.copy()
arrays = []
names: list[Hashable] = []
if append:
names = list(self.index.names)
if isinstance(self.index, MultiIndex):
for i in range(self.index.nlevels):
arrays.append(self.index._get_level_values(i))
else:
arrays.append(self.index)
to_remove: list[Hashable] = []
for col in keys:
if isinstance(col, MultiIndex):
for n in range(col.nlevels):
arrays.append(col._get_level_values(n))
names.extend(col.names)
elif isinstance(col, (Index, Series)):
# if Index then not MultiIndex (treated above)
# error: Argument 1 to "append" of "list" has incompatible type
# "Union[Index, Series]"; expected "Index"
arrays.append(col) # type:ignore[arg-type]
names.append(col.name)
elif isinstance(col, (list, np.ndarray)):
# error: Argument 1 to "append" of "list" has incompatible type
# "Union[List[Any], ndarray]"; expected "Index"
arrays.append(col) # type: ignore[arg-type]
names.append(None)
elif isinstance(col, abc.Iterator):
# error: Argument 1 to "append" of "list" has incompatible type
# "List[Any]"; expected "Index"
arrays.append(list(col)) # type: ignore[arg-type]
names.append(None)
# from here, col can only be a column label
else:
arrays.append(frame[col]._values)
names.append(col)
if drop:
to_remove.append(col)
if len(arrays[-1]) != len(self):
# check newest element against length of calling frame, since
# ensure_index_from_sequences would not raise for append=False.
raise ValueError(
f"Length mismatch: Expected {len(self)} rows, "
f"received array of length {len(arrays[-1])}"
)
index = ensure_index_from_sequences(arrays, names)
if verify_integrity and not index.is_unique:
duplicates = index[index.duplicated()].unique()
raise ValueError(f"Index has duplicate keys: {duplicates}")
# use set to handle duplicate column names gracefully in case of drop
for c in set(to_remove):
del frame[c]
# clear up memory usage
index._cleanup()
frame.index = index
if not inplace:
return frame
@overload
def reset_index(
self,
level: Hashable | Sequence[Hashable] | None = ...,
drop: bool = ...,
inplace: Literal[False] = ...,
col_level: Hashable = ...,
col_fill: Hashable = ...,
) -> DataFrame:
...
@overload
def reset_index(
self,
level: Hashable | Sequence[Hashable] | None,
drop: bool,
inplace: Literal[True],
col_level: Hashable = ...,
col_fill: Hashable = ...,
) -> None:
...
@overload
def reset_index(
self,
*,
drop: bool,
inplace: Literal[True],
col_level: Hashable = ...,
col_fill: Hashable = ...,
) -> None:
...
@overload
def reset_index(
self,
level: Hashable | Sequence[Hashable] | None,
*,
inplace: Literal[True],
col_level: Hashable = ...,
col_fill: Hashable = ...,
) -> None:
...
@overload
def reset_index(
self,
*,
inplace: Literal[True],
col_level: Hashable = ...,
col_fill: Hashable = ...,
) -> None:
...
@overload
def reset_index(
self,
level: Hashable | Sequence[Hashable] | None = ...,
drop: bool = ...,
inplace: bool = ...,
col_level: Hashable = ...,
col_fill: Hashable = ...,
) -> DataFrame | None:
...
def reset_index(
self,
level: Hashable | Sequence[Hashable] | None = None,
drop: bool = False,
inplace: bool = False,
col_level: Hashable = 0,
col_fill: Hashable = "",
) -> DataFrame | None:
"""
Reset the index, or a level of it.
Reset the index of the DataFrame, and use the default one instead.
If the DataFrame has a MultiIndex, this method can remove one or more
levels.
Parameters
----------
level : int, str, tuple, or list, default None
Only remove the given levels from the index. Removes all levels by
default.
drop : bool, default False
Do not try to insert index into dataframe columns. This resets
the index to the default integer index.
inplace : bool, default False
Modify the DataFrame in place (do not create a new object).
col_level : int or str, default 0
If the columns have multiple levels, determines which level the
labels are inserted into. By default it is inserted into the first
level.
col_fill : object, default ''
If the columns have multiple levels, determines how the other
levels are named. If None then the index name is repeated.
Returns
-------
DataFrame or None
DataFrame with the new index or None if ``inplace=True``.
See Also
--------
DataFrame.set_index : Opposite of reset_index.
DataFrame.reindex : Change to new indices or expand indices.
DataFrame.reindex_like : Change to same indices as other DataFrame.
Examples
--------
>>> df = pd.DataFrame([('bird', 389.0),
... ('bird', 24.0),
... ('mammal', 80.5),
... ('mammal', np.nan)],
... index=['falcon', 'parrot', 'lion', 'monkey'],
... columns=('class', 'max_speed'))
>>> df
class max_speed
falcon bird 389.0
parrot bird 24.0
lion mammal 80.5
monkey mammal NaN
When we reset the index, the old index is added as a column, and a
new sequential index is used:
>>> df.reset_index()
index class max_speed
0 falcon bird 389.0
1 parrot bird 24.0
2 lion mammal 80.5
3 monkey mammal NaN
We can use the `drop` parameter to avoid the old index being added as
a column:
>>> df.reset_index(drop=True)
class max_speed
0 bird 389.0
1 bird 24.0
2 mammal 80.5
3 mammal NaN
You can also use `reset_index` with `MultiIndex`.
>>> index = pd.MultiIndex.from_tuples([('bird', 'falcon'),
... ('bird', 'parrot'),
... ('mammal', 'lion'),
... ('mammal', 'monkey')],
... names=['class', 'name'])
>>> columns = pd.MultiIndex.from_tuples([('speed', 'max'),
... ('species', 'type')])
>>> df = pd.DataFrame([(389.0, 'fly'),
... ( 24.0, 'fly'),
... ( 80.5, 'run'),
... (np.nan, 'jump')],
... index=index,
... columns=columns)
>>> df
speed species
max type
class name
bird falcon 389.0 fly
parrot 24.0 fly
mammal lion 80.5 run
monkey NaN jump
If the index has multiple levels, we can reset a subset of them:
>>> df.reset_index(level='class')
class speed species
max type
name
falcon bird 389.0 fly
parrot bird 24.0 fly
lion mammal 80.5 run
monkey mammal NaN jump
If we are not dropping the index, by default, it is placed in the top
level. We can place it in another level:
>>> df.reset_index(level='class', col_level=1)
speed species
class max type
name
falcon bird 389.0 fly
parrot bird 24.0 fly
lion mammal 80.5 run
monkey mammal NaN jump
When the index is inserted under another level, we can specify under
which one with the parameter `col_fill`:
>>> df.reset_index(level='class', col_level=1, col_fill='species')
species speed species
class max type
name
falcon bird 389.0 fly
parrot bird 24.0 fly
lion mammal 80.5 run
monkey mammal NaN jump
If we specify a nonexistent level for `col_fill`, it is created:
>>> df.reset_index(level='class', col_level=1, col_fill='genus')
genus speed species
class max type
name
falcon bird 389.0 fly
parrot bird 24.0 fly
lion mammal 80.5 run
monkey mammal NaN jump
"""
inplace = validate_bool_kwarg(inplace, "inplace")
self._check_inplace_and_allows_duplicate_labels(inplace)
if inplace:
new_obj = self
else:
new_obj = self.copy()
new_index = ibase.default_index(len(new_obj))
if level is not None:
if not isinstance(level, (tuple, list)):
level = [level]
level = [self.index._get_level_number(lev) for lev in level]
if len(level) < self.index.nlevels:
new_index = self.index.droplevel(level)
if not drop:
to_insert: Iterable[tuple[Any, Any | None]]
if isinstance(self.index, MultiIndex):
names = [
(n if n is not None else f"level_{i}")
for i, n in enumerate(self.index.names)
]
to_insert = zip(self.index.levels, self.index.codes)
else:
default = "index" if "index" not in self else "level_0"
names = [default] if self.index.name is None else [self.index.name]
to_insert = ((self.index, None),)
multi_col = isinstance(self.columns, MultiIndex)
for i, (lev, lab) in reversed(list(enumerate(to_insert))):
if level is not None and i not in level:
continue
name = names[i]
if multi_col:
col_name = list(name) if isinstance(name, tuple) else [name]
if col_fill is None:
if len(col_name) not in (1, self.columns.nlevels):
raise ValueError(
"col_fill=None is incompatible "
f"with incomplete column name {name}"
)
col_fill = col_name[0]
lev_num = self.columns._get_level_number(col_level)
name_lst = [col_fill] * lev_num + col_name
missing = self.columns.nlevels - len(name_lst)
name_lst += [col_fill] * missing
name = tuple(name_lst)
# to ndarray and maybe infer different dtype
level_values = lev._values
if level_values.dtype == np.object_:
level_values = lib.maybe_convert_objects(level_values)
if lab is not None:
# if we have the codes, extract the values with a mask
level_values = algorithms.take(
level_values, lab, allow_fill=True, fill_value=lev._na_value
)
new_obj.insert(0, name, level_values)
new_obj.index = new_index
if not inplace:
return new_obj
return None
# ----------------------------------------------------------------------
# Reindex-based selection methods
@doc(NDFrame.isna, klass=_shared_doc_kwargs["klass"])
def isna(self) -> DataFrame:
result = self._constructor(self._mgr.isna(func=isna))
return result.__finalize__(self, method="isna")
@doc(NDFrame.isna, klass=_shared_doc_kwargs["klass"])
def isnull(self) -> DataFrame:
return self.isna()
@doc(NDFrame.notna, klass=_shared_doc_kwargs["klass"])
def notna(self) -> DataFrame:
return ~self.isna()
@doc(NDFrame.notna, klass=_shared_doc_kwargs["klass"])
def notnull(self) -> DataFrame:
return ~self.isna()
def dropna(
self,
axis: Axis = 0,
how: str = "any",
thresh=None,
subset=None,
inplace: bool = False,
):
"""
Remove missing values.
See the :ref:`User Guide <missing_data>` for more on which values are
considered missing, and how to work with missing data.
Parameters
----------
axis : {0 or 'index', 1 or 'columns'}, default 0
Determine if rows or columns which contain missing values are
removed.
* 0, or 'index' : Drop rows which contain missing values.
* 1, or 'columns' : Drop columns which contain missing value.
.. versionchanged:: 1.0.0
Pass tuple or list to drop on multiple axes.
Only a single axis is allowed.
how : {'any', 'all'}, default 'any'
Determine if row or column is removed from DataFrame, when we have
at least one NA or all NA.
* 'any' : If any NA values are present, drop that row or column.
* 'all' : If all values are NA, drop that row or column.
thresh : int, optional
Require that many non-NA values.
subset : array-like, optional
Labels along other axis to consider, e.g. if you are dropping rows
these would be a list of columns to include.
inplace : bool, default False
If True, do operation inplace and return None.
Returns
-------
DataFrame or None
DataFrame with NA entries dropped from it or None if ``inplace=True``.
See Also
--------
DataFrame.isna: Indicate missing values.
DataFrame.notna : Indicate existing (non-missing) values.
DataFrame.fillna : Replace missing values.
Series.dropna : Drop missing values.
Index.dropna : Drop missing indices.
Examples
--------
>>> df = pd.DataFrame({"name": ['Alfred', 'Batman', 'Catwoman'],
... "toy": [np.nan, 'Batmobile', 'Bullwhip'],
... "born": [pd.NaT, pd.Timestamp("1940-04-25"),
... pd.NaT]})
>>> df
name toy born
0 Alfred NaN NaT
1 Batman Batmobile 1940-04-25
2 Catwoman Bullwhip NaT
Drop the rows where at least one element is missing.
>>> df.dropna()
name toy born
1 Batman Batmobile 1940-04-25
Drop the columns where at least one element is missing.
>>> df.dropna(axis='columns')
name
0 Alfred
1 Batman
2 Catwoman
Drop the rows where all elements are missing.
>>> df.dropna(how='all')
name toy born
0 Alfred NaN NaT
1 Batman Batmobile 1940-04-25
2 Catwoman Bullwhip NaT
Keep only the rows with at least 2 non-NA values.
>>> df.dropna(thresh=2)
name toy born
1 Batman Batmobile 1940-04-25
2 Catwoman Bullwhip NaT
Define in which columns to look for missing values.
>>> df.dropna(subset=['name', 'toy'])
name toy born
1 Batman Batmobile 1940-04-25
2 Catwoman Bullwhip NaT
Keep the DataFrame with valid entries in the same variable.
>>> df.dropna(inplace=True)
>>> df
name toy born
1 Batman Batmobile 1940-04-25
"""
inplace = validate_bool_kwarg(inplace, "inplace")
if isinstance(axis, (tuple, list)):
# GH20987
raise TypeError("supplying multiple axes to axis is no longer supported.")
axis = self._get_axis_number(axis)
agg_axis = 1 - axis
agg_obj = self
if subset is not None:
ax = self._get_axis(agg_axis)
indices = ax.get_indexer_for(subset)
check = indices == -1
if check.any():
raise KeyError(list(np.compress(check, subset)))
agg_obj = self.take(indices, axis=agg_axis)
count = agg_obj.count(axis=agg_axis)
if thresh is not None:
mask = count >= thresh
elif how == "any":
mask = count == len(agg_obj._get_axis(agg_axis))
elif how == "all":
mask = count > 0
else:
if how is not None:
raise ValueError(f"invalid how option: {how}")
else:
raise TypeError("must specify how or thresh")
result = self.loc(axis=axis)[mask]
if inplace:
self._update_inplace(result)
else:
return result
def drop_duplicates(
self,
subset: Hashable | Sequence[Hashable] | None = None,
keep: str | bool = "first",
inplace: bool = False,
ignore_index: bool = False,
) -> DataFrame | None:
"""
Return DataFrame with duplicate rows removed.
Considering certain columns is optional. Indexes, including time indexes
are ignored.
Parameters
----------
subset : column label or sequence of labels, optional
Only consider certain columns for identifying duplicates, by
default use all of the columns.
keep : {'first', 'last', False}, default 'first'
Determines which duplicates (if any) to keep.
- ``first`` : Drop duplicates except for the first occurrence.
- ``last`` : Drop duplicates except for the last occurrence.
- False : Drop all duplicates.
inplace : bool, default False
Whether to drop duplicates in place or to return a copy.
ignore_index : bool, default False
If True, the resulting axis will be labeled 0, 1, …, n - 1.
.. versionadded:: 1.0.0
Returns
-------
DataFrame or None
DataFrame with duplicates removed or None if ``inplace=True``.
See Also
--------
DataFrame.value_counts: Count unique combinations of columns.
Examples
--------
Consider dataset containing ramen rating.
>>> df = pd.DataFrame({
... 'brand': ['Yum Yum', 'Yum Yum', 'Indomie', 'Indomie', 'Indomie'],
... 'style': ['cup', 'cup', 'cup', 'pack', 'pack'],
... 'rating': [4, 4, 3.5, 15, 5]
... })
>>> df
brand style rating
0 Yum Yum cup 4.0
1 Yum Yum cup 4.0
2 Indomie cup 3.5
3 Indomie pack 15.0
4 Indomie pack 5.0
By default, it removes duplicate rows based on all columns.
>>> df.drop_duplicates()
brand style rating
0 Yum Yum cup 4.0
2 Indomie cup 3.5
3 Indomie pack 15.0
4 Indomie pack 5.0
To remove duplicates on specific column(s), use ``subset``.
>>> df.drop_duplicates(subset=['brand'])
brand style rating
0 Yum Yum cup 4.0
2 Indomie cup 3.5
To remove duplicates and keep last occurrences, use ``keep``.
>>> df.drop_duplicates(subset=['brand', 'style'], keep='last')
brand style rating
1 Yum Yum cup 4.0
2 Indomie cup 3.5
4 Indomie pack 5.0
"""
if self.empty:
return self.copy()
inplace = validate_bool_kwarg(inplace, "inplace")
ignore_index = validate_bool_kwarg(ignore_index, "ignore_index")
duplicated = self.duplicated(subset, keep=keep)
result = self[-duplicated]
if ignore_index:
result.index = ibase.default_index(len(result))
if inplace:
self._update_inplace(result)
return None
else:
return result
def duplicated(
self,
subset: Hashable | Sequence[Hashable] | None = None,
keep: str | bool = "first",
) -> Series:
"""
Return boolean Series denoting duplicate rows.
Considering certain columns is optional.
Parameters
----------
subset : column label or sequence of labels, optional
Only consider certain columns for identifying duplicates, by
default use all of the columns.
keep : {'first', 'last', False}, default 'first'
Determines which duplicates (if any) to mark.
- ``first`` : Mark duplicates as ``True`` except for the first occurrence.
- ``last`` : Mark duplicates as ``True`` except for the last occurrence.
- False : Mark all duplicates as ``True``.
Returns
-------
Series
Boolean series for each duplicated rows.
See Also
--------
Index.duplicated : Equivalent method on index.
Series.duplicated : Equivalent method on Series.
Series.drop_duplicates : Remove duplicate values from Series.
DataFrame.drop_duplicates : Remove duplicate values from DataFrame.
Examples
--------
Consider dataset containing ramen rating.
>>> df = pd.DataFrame({
... 'brand': ['Yum Yum', 'Yum Yum', 'Indomie', 'Indomie', 'Indomie'],
... 'style': ['cup', 'cup', 'cup', 'pack', 'pack'],
... 'rating': [4, 4, 3.5, 15, 5]
... })
>>> df
brand style rating
0 Yum Yum cup 4.0
1 Yum Yum cup 4.0
2 Indomie cup 3.5
3 Indomie pack 15.0
4 Indomie pack 5.0
By default, for each set of duplicated values, the first occurrence
is set on False and all others on True.
>>> df.duplicated()
0 False
1 True
2 False
3 False
4 False
dtype: bool
By using 'last', the last occurrence of each set of duplicated values
is set on False and all others on True.
>>> df.duplicated(keep='last')
0 True
1 False
2 False
3 False
4 False
dtype: bool
By setting ``keep`` on False, all duplicates are True.
>>> df.duplicated(keep=False)
0 True
1 True
2 False
3 False
4 False
dtype: bool
To find duplicates on specific column(s), use ``subset``.
>>> df.duplicated(subset=['brand'])
0 False
1 True
2 False
3 True
4 True
dtype: bool
"""
from pandas._libs.hashtable import duplicated_int64
if self.empty:
return self._constructor_sliced(dtype=bool)
def f(vals):
labels, shape = algorithms.factorize(vals, size_hint=len(self))
return labels.astype("i8", copy=False), len(shape)
if subset is None:
subset = self.columns
elif (
not np.iterable(subset)
or isinstance(subset, str)
or isinstance(subset, tuple)
and subset in self.columns
):
subset = (subset,)
# needed for mypy since can't narrow types using np.iterable
subset = cast(Iterable, subset)
# Verify all columns in subset exist in the queried dataframe
# Otherwise, raise a KeyError, same as if you try to __getitem__ with a
# key that doesn't exist.
diff = Index(subset).difference(self.columns)
if not diff.empty:
raise KeyError(diff)
vals = (col.values for name, col in self.items() if name in subset)
labels, shape = map(list, zip(*map(f, vals)))
ids = get_group_index(labels, shape, sort=False, xnull=False)
result = self._constructor_sliced(duplicated_int64(ids, keep), index=self.index)
return result.__finalize__(self, method="duplicated")
# ----------------------------------------------------------------------
# Sorting
# TODO: Just move the sort_values doc here.
@Substitution(**_shared_doc_kwargs)
@Appender(NDFrame.sort_values.__doc__)
# error: Signature of "sort_values" incompatible with supertype "NDFrame"
def sort_values( # type: ignore[override]
self,
by,
axis: Axis = 0,
ascending=True,
inplace: bool = False,
kind: str = "quicksort",
na_position: str = "last",
ignore_index: bool = False,
key: ValueKeyFunc = None,
):
inplace = validate_bool_kwarg(inplace, "inplace")
axis = self._get_axis_number(axis)
if not isinstance(by, list):
by = [by]
if is_sequence(ascending) and len(by) != len(ascending):
raise ValueError(
f"Length of ascending ({len(ascending)}) != length of by ({len(by)})"
)
if len(by) > 1:
keys = [self._get_label_or_level_values(x, axis=axis) for x in by]
# need to rewrap columns in Series to apply key function
if key is not None:
# error: List comprehension has incompatible type List[Series];
# expected List[ndarray]
keys = [
Series(k, name=name) # type: ignore[misc]
for (k, name) in zip(keys, by)
]
indexer = lexsort_indexer(
keys, orders=ascending, na_position=na_position, key=key
)
indexer = ensure_platform_int(indexer)
elif len(by):
by = by[0]
k = self._get_label_or_level_values(by, axis=axis)
# need to rewrap column in Series to apply key function
if key is not None:
# error: Incompatible types in assignment (expression has type
# "Series", variable has type "ndarray")
k = Series(k, name=by) # type: ignore[assignment]
if isinstance(ascending, (tuple, list)):
ascending = ascending[0]
indexer = nargsort(
k, kind=kind, ascending=ascending, na_position=na_position, key=key
)
else:
return self.copy()
new_data = self._mgr.take(
indexer, axis=self._get_block_manager_axis(axis), verify=False
)
if ignore_index:
new_data.set_axis(
self._get_block_manager_axis(axis), ibase.default_index(len(indexer))
)
result = self._constructor(new_data)
if inplace:
return self._update_inplace(result)
else:
return result.__finalize__(self, method="sort_values")
def sort_index(
self,
axis: Axis = 0,
level: Level | None = None,
ascending: bool | int | Sequence[bool | int] = True,
inplace: bool = False,
kind: str = "quicksort",
na_position: str = "last",
sort_remaining: bool = True,
ignore_index: bool = False,
key: IndexKeyFunc = None,
):
"""
Sort object by labels (along an axis).
Returns a new DataFrame sorted by label if `inplace` argument is
``False``, otherwise updates the original DataFrame and returns None.
Parameters
----------
axis : {0 or 'index', 1 or 'columns'}, default 0
The axis along which to sort. The value 0 identifies the rows,
and 1 identifies the columns.
level : int or level name or list of ints or list of level names
If not None, sort on values in specified index level(s).
ascending : bool or list-like of bools, default True
Sort ascending vs. descending. When the index is a MultiIndex the
sort direction can be controlled for each level individually.
inplace : bool, default False
If True, perform operation in-place.
kind : {'quicksort', 'mergesort', 'heapsort', 'stable'}, default 'quicksort'
Choice of sorting algorithm. See also :func:`numpy.sort` for more
information. `mergesort` and `stable` are the only stable algorithms. For
DataFrames, this option is only applied when sorting on a single
column or label.
na_position : {'first', 'last'}, default 'last'
Puts NaNs at the beginning if `first`; `last` puts NaNs at the end.
Not implemented for MultiIndex.
sort_remaining : bool, default True
If True and sorting by level and index is multilevel, sort by other
levels too (in order) after sorting by specified level.
ignore_index : bool, default False
If True, the resulting axis will be labeled 0, 1, …, n - 1.
.. versionadded:: 1.0.0
key : callable, optional
If not None, apply the key function to the index values
before sorting. This is similar to the `key` argument in the
builtin :meth:`sorted` function, with the notable difference that
this `key` function should be *vectorized*. It should expect an
``Index`` and return an ``Index`` of the same shape. For MultiIndex
inputs, the key is applied *per level*.
.. versionadded:: 1.1.0
Returns
-------
DataFrame or None
The original DataFrame sorted by the labels or None if ``inplace=True``.
See Also
--------
Series.sort_index : Sort Series by the index.
DataFrame.sort_values : Sort DataFrame by the value.
Series.sort_values : Sort Series by the value.
Examples
--------
>>> df = pd.DataFrame([1, 2, 3, 4, 5], index=[100, 29, 234, 1, 150],
... columns=['A'])
>>> df.sort_index()
A
1 4
29 2
100 1
150 5
234 3
By default, it sorts in ascending order, to sort in descending order,
use ``ascending=False``
>>> df.sort_index(ascending=False)
A
234 3
150 5
100 1
29 2
1 4
A key function can be specified which is applied to the index before
sorting. For a ``MultiIndex`` this is applied to each level separately.
>>> df = pd.DataFrame({"a": [1, 2, 3, 4]}, index=['A', 'b', 'C', 'd'])
>>> df.sort_index(key=lambda x: x.str.lower())
a
A 1
b 2
C 3
d 4
"""
return super().sort_index(
axis,
level,
ascending,
inplace,
kind,
na_position,
sort_remaining,
ignore_index,
key,
)
def value_counts(
self,
subset: Sequence[Hashable] | None = None,
normalize: bool = False,
sort: bool = True,
ascending: bool = False,
):
"""
Return a Series containing counts of unique rows in the DataFrame.
.. versionadded:: 1.1.0
Parameters
----------
subset : list-like, optional
Columns to use when counting unique combinations.
normalize : bool, default False
Return proportions rather than frequencies.
sort : bool, default True
Sort by frequencies.
ascending : bool, default False
Sort in ascending order.
Returns
-------
Series
See Also
--------
Series.value_counts: Equivalent method on Series.
Notes
-----
The returned Series will have a MultiIndex with one level per input
column. By default, rows that contain any NA values are omitted from
the result. By default, the resulting Series will be in descending
order so that the first element is the most frequently-occurring row.
Examples
--------
>>> df = pd.DataFrame({'num_legs': [2, 4, 4, 6],
... 'num_wings': [2, 0, 0, 0]},
... index=['falcon', 'dog', 'cat', 'ant'])
>>> df
num_legs num_wings
falcon 2 2
dog 4 0
cat 4 0
ant 6 0
>>> df.value_counts()
num_legs num_wings
4 0 2
2 2 1
6 0 1
dtype: int64
>>> df.value_counts(sort=False)
num_legs num_wings
2 2 1
4 0 2
6 0 1
dtype: int64
>>> df.value_counts(ascending=True)
num_legs num_wings
2 2 1
6 0 1
4 0 2
dtype: int64
>>> df.value_counts(normalize=True)
num_legs num_wings
4 0 0.50
2 2 0.25
6 0 0.25
dtype: float64
"""
if subset is None:
subset = self.columns.tolist()
counts = self.groupby(subset).grouper.size()
if sort:
counts = counts.sort_values(ascending=ascending)
if normalize:
counts /= counts.sum()
# Force MultiIndex for single column
if len(subset) == 1:
counts.index = MultiIndex.from_arrays(
[counts.index], names=[counts.index.name]
)
return counts
def nlargest(self, n, columns, keep: str = "first") -> DataFrame:
"""
Return the first `n` rows ordered by `columns` in descending order.
Return the first `n` rows with the largest values in `columns`, in
descending order. The columns that are not specified are returned as
well, but not used for ordering.
This method is equivalent to
``df.sort_values(columns, ascending=False).head(n)``, but more
performant.
Parameters
----------
n : int
Number of rows to return.
columns : label or list of labels
Column label(s) to order by.
keep : {'first', 'last', 'all'}, default 'first'
Where there are duplicate values:
- `first` : prioritize the first occurrence(s)
- `last` : prioritize the last occurrence(s)
- ``all`` : do not drop any duplicates, even it means
selecting more than `n` items.
.. versionadded:: 0.24.0
Returns
-------
DataFrame
The first `n` rows ordered by the given columns in descending
order.
See Also
--------
DataFrame.nsmallest : Return the first `n` rows ordered by `columns` in
ascending order.
DataFrame.sort_values : Sort DataFrame by the values.
DataFrame.head : Return the first `n` rows without re-ordering.
Notes
-----
This function cannot be used with all column types. For example, when
specifying columns with `object` or `category` dtypes, ``TypeError`` is
raised.
Examples
--------
>>> df = pd.DataFrame({'population': [59000000, 65000000, 434000,
... 434000, 434000, 337000, 11300,
... 11300, 11300],
... 'GDP': [1937894, 2583560 , 12011, 4520, 12128,
... 17036, 182, 38, 311],
... 'alpha-2': ["IT", "FR", "MT", "MV", "BN",
... "IS", "NR", "TV", "AI"]},
... index=["Italy", "France", "Malta",
... "Maldives", "Brunei", "Iceland",
... "Nauru", "Tuvalu", "Anguilla"])
>>> df
population GDP alpha-2
Italy 59000000 1937894 IT
France 65000000 2583560 FR
Malta 434000 12011 MT
Maldives 434000 4520 MV
Brunei 434000 12128 BN
Iceland 337000 17036 IS
Nauru 11300 182 NR
Tuvalu 11300 38 TV
Anguilla 11300 311 AI
In the following example, we will use ``nlargest`` to select the three
rows having the largest values in column "population".
>>> df.nlargest(3, 'population')
population GDP alpha-2
France 65000000 2583560 FR
Italy 59000000 1937894 IT
Malta 434000 12011 MT
When using ``keep='last'``, ties are resolved in reverse order:
>>> df.nlargest(3, 'population', keep='last')
population GDP alpha-2
France 65000000 2583560 FR
Italy 59000000 1937894 IT
Brunei 434000 12128 BN
When using ``keep='all'``, all duplicate items are maintained:
>>> df.nlargest(3, 'population', keep='all')
population GDP alpha-2
France 65000000 2583560 FR
Italy 59000000 1937894 IT
Malta 434000 12011 MT
Maldives 434000 4520 MV
Brunei 434000 12128 BN
To order by the largest values in column "population" and then "GDP",
we can specify multiple columns like in the next example.
>>> df.nlargest(3, ['population', 'GDP'])
population GDP alpha-2
France 65000000 2583560 FR
Italy 59000000 1937894 IT
Brunei 434000 12128 BN
"""
return algorithms.SelectNFrame(self, n=n, keep=keep, columns=columns).nlargest()
def nsmallest(self, n, columns, keep: str = "first") -> DataFrame:
"""
Return the first `n` rows ordered by `columns` in ascending order.
Return the first `n` rows with the smallest values in `columns`, in
ascending order. The columns that are not specified are returned as
well, but not used for ordering.
This method is equivalent to
``df.sort_values(columns, ascending=True).head(n)``, but more
performant.
Parameters
----------
n : int
Number of items to retrieve.
columns : list or str
Column name or names to order by.
keep : {'first', 'last', 'all'}, default 'first'
Where there are duplicate values:
- ``first`` : take the first occurrence.
- ``last`` : take the last occurrence.
- ``all`` : do not drop any duplicates, even it means
selecting more than `n` items.
.. versionadded:: 0.24.0
Returns
-------
DataFrame
See Also
--------
DataFrame.nlargest : Return the first `n` rows ordered by `columns` in
descending order.
DataFrame.sort_values : Sort DataFrame by the values.
DataFrame.head : Return the first `n` rows without re-ordering.
Examples
--------
>>> df = pd.DataFrame({'population': [59000000, 65000000, 434000,
... 434000, 434000, 337000, 337000,
... 11300, 11300],
... 'GDP': [1937894, 2583560 , 12011, 4520, 12128,
... 17036, 182, 38, 311],
... 'alpha-2': ["IT", "FR", "MT", "MV", "BN",
... "IS", "NR", "TV", "AI"]},
... index=["Italy", "France", "Malta",
... "Maldives", "Brunei", "Iceland",
... "Nauru", "Tuvalu", "Anguilla"])
>>> df
population GDP alpha-2
Italy 59000000 1937894 IT
France 65000000 2583560 FR
Malta 434000 12011 MT
Maldives 434000 4520 MV
Brunei 434000 12128 BN
Iceland 337000 17036 IS
Nauru 337000 182 NR
Tuvalu 11300 38 TV
Anguilla 11300 311 AI
In the following example, we will use ``nsmallest`` to select the
three rows having the smallest values in column "population".
>>> df.nsmallest(3, 'population')
population GDP alpha-2
Tuvalu 11300 38 TV
Anguilla 11300 311 AI
Iceland 337000 17036 IS
When using ``keep='last'``, ties are resolved in reverse order:
>>> df.nsmallest(3, 'population', keep='last')
population GDP alpha-2
Anguilla 11300 311 AI
Tuvalu 11300 38 TV
Nauru 337000 182 NR
When using ``keep='all'``, all duplicate items are maintained:
>>> df.nsmallest(3, 'population', keep='all')
population GDP alpha-2
Tuvalu 11300 38 TV
Anguilla 11300 311 AI
Iceland 337000 17036 IS
Nauru 337000 182 NR
To order by the smallest values in column "population" and then "GDP", we can
specify multiple columns like in the next example.
>>> df.nsmallest(3, ['population', 'GDP'])
population GDP alpha-2
Tuvalu 11300 38 TV
Anguilla 11300 311 AI
Nauru 337000 182 NR
"""
return algorithms.SelectNFrame(
self, n=n, keep=keep, columns=columns
).nsmallest()
def swaplevel(self, i: Axis = -2, j: Axis = -1, axis: Axis = 0) -> DataFrame:
"""
Swap levels i and j in a MultiIndex on a particular axis.
Parameters
----------
i, j : int or str
Levels of the indices to be swapped. Can pass level name as string.
axis : {0 or 'index', 1 or 'columns'}, default 0
The axis to swap levels on. 0 or 'index' for row-wise, 1 or
'columns' for column-wise.
Returns
-------
DataFrame
Examples
--------
>>> df = pd.DataFrame(
... {"Grade": ["A", "B", "A", "C"]},
... index=[
... ["Final exam", "Final exam", "Coursework", "Coursework"],
... ["History", "Geography", "History", "Geography"],
... ["January", "February", "March", "April"],
... ],
... )
>>> df
Grade
Final exam History January A
Geography February B
Coursework History March A
Geography April C
In the following example, we will swap the levels of the indices.
Here, we will swap the levels column-wise, but levels can be swapped row-wise
in a similar manner. Note that column-wise is the default behaviour.
By not supplying any arguments for i and j, we swap the last and second to
last indices.
>>> df.swaplevel()
Grade
Final exam January History A
February Geography B
Coursework March History A
April Geography C
By supplying one argument, we can choose which index to swap the last
index with. We can for example swap the first index with the last one as
follows.
>>> df.swaplevel(0)
Grade
January History Final exam A
February Geography Final exam B
March History Coursework A
April Geography Coursework C
We can also define explicitly which indices we want to swap by supplying values
for both i and j. Here, we for example swap the first and second indices.
>>> df.swaplevel(0, 1)
Grade
History Final exam January A
Geography Final exam February B
History Coursework March A
Geography Coursework April C
"""
result = self.copy()
axis = self._get_axis_number(axis)
if not isinstance(result._get_axis(axis), MultiIndex): # pragma: no cover
raise TypeError("Can only swap levels on a hierarchical axis.")
if axis == 0:
assert isinstance(result.index, MultiIndex)
result.index = result.index.swaplevel(i, j)
else:
assert isinstance(result.columns, MultiIndex)
result.columns = result.columns.swaplevel(i, j)
return result
def reorder_levels(self, order: Sequence[Axis], axis: Axis = 0) -> DataFrame:
"""
Rearrange index levels using input order. May not drop or duplicate levels.
Parameters
----------
order : list of int or list of str
List representing new level order. Reference level by number
(position) or by key (label).
axis : {0 or 'index', 1 or 'columns'}, default 0
Where to reorder levels.
Returns
-------
DataFrame
"""
axis = self._get_axis_number(axis)
if not isinstance(self._get_axis(axis), MultiIndex): # pragma: no cover
raise TypeError("Can only reorder levels on a hierarchical axis.")
result = self.copy()
if axis == 0:
assert isinstance(result.index, MultiIndex)
result.index = result.index.reorder_levels(order)
else:
assert isinstance(result.columns, MultiIndex)
result.columns = result.columns.reorder_levels(order)
return result
# ----------------------------------------------------------------------
# Arithmetic Methods
def _cmp_method(self, other, op):
axis = 1 # only relevant for Series other case
self, other = ops.align_method_FRAME(self, other, axis, flex=False, level=None)
# See GH#4537 for discussion of scalar op behavior
new_data = self._dispatch_frame_op(other, op, axis=axis)
return self._construct_result(new_data)
def _arith_method(self, other, op):
if ops.should_reindex_frame_op(self, other, op, 1, 1, None, None):
return ops.frame_arith_method_with_reindex(self, other, op)
axis = 1 # only relevant for Series other case
self, other = ops.align_method_FRAME(self, other, axis, flex=True, level=None)
new_data = self._dispatch_frame_op(other, op, axis=axis)
return self._construct_result(new_data)
_logical_method = _arith_method
def _dispatch_frame_op(self, right, func: Callable, axis: int | None = None):
"""
Evaluate the frame operation func(left, right) by evaluating
column-by-column, dispatching to the Series implementation.
Parameters
----------
right : scalar, Series, or DataFrame
func : arithmetic or comparison operator
axis : {None, 0, 1}
Returns
-------
DataFrame
"""
# Get the appropriate array-op to apply to each column/block's values.
array_op = ops.get_array_op(func)
right = lib.item_from_zerodim(right)
if not is_list_like(right):
# i.e. scalar, faster than checking np.ndim(right) == 0
with np.errstate(all="ignore"):
bm = self._mgr.apply(array_op, right=right)
return type(self)(bm)
elif isinstance(right, DataFrame):
assert self.index.equals(right.index)
assert self.columns.equals(right.columns)
# TODO: The previous assertion `assert right._indexed_same(self)`
# fails in cases with empty columns reached via
# _frame_arith_method_with_reindex
# TODO operate_blockwise expects a manager of the same type
with np.errstate(all="ignore"):
bm = self._mgr.operate_blockwise(
# error: Argument 1 to "operate_blockwise" of "ArrayManager" has
# incompatible type "Union[ArrayManager, BlockManager]"; expected
# "ArrayManager"
# error: Argument 1 to "operate_blockwise" of "BlockManager" has
# incompatible type "Union[ArrayManager, BlockManager]"; expected
# "BlockManager"
right._mgr, # type: ignore[arg-type]
array_op,
)
return type(self)(bm)
elif isinstance(right, Series) and axis == 1:
# axis=1 means we want to operate row-by-row
assert right.index.equals(self.columns)
right = right._values
# maybe_align_as_frame ensures we do not have an ndarray here
assert not isinstance(right, np.ndarray)
with np.errstate(all="ignore"):
arrays = [
array_op(_left, _right)
for _left, _right in zip(self._iter_column_arrays(), right)
]
elif isinstance(right, Series):
assert right.index.equals(self.index) # Handle other cases later
right = right._values
with np.errstate(all="ignore"):
arrays = [array_op(left, right) for left in self._iter_column_arrays()]
else:
# Remaining cases have less-obvious dispatch rules
raise NotImplementedError(right)
return type(self)._from_arrays(
arrays, self.columns, self.index, verify_integrity=False
)
def _combine_frame(self, other: DataFrame, func, fill_value=None):
# at this point we have `self._indexed_same(other)`
if fill_value is None:
# since _arith_op may be called in a loop, avoid function call
# overhead if possible by doing this check once
_arith_op = func
else:
def _arith_op(left, right):
# for the mixed_type case where we iterate over columns,
# _arith_op(left, right) is equivalent to
# left._binop(right, func, fill_value=fill_value)
left, right = ops.fill_binop(left, right, fill_value)
return func(left, right)
new_data = self._dispatch_frame_op(other, _arith_op)
return new_data
def _construct_result(self, result) -> DataFrame:
"""
Wrap the result of an arithmetic, comparison, or logical operation.
Parameters
----------
result : DataFrame
Returns
-------
DataFrame
"""
out = self._constructor(result, copy=False)
# Pin columns instead of passing to constructor for compat with
# non-unique columns case
out.columns = self.columns
out.index = self.index
return out
def __divmod__(self, other) -> tuple[DataFrame, DataFrame]:
# Naive implementation, room for optimization
div = self // other
mod = self - div * other
return div, mod
def __rdivmod__(self, other) -> tuple[DataFrame, DataFrame]:
# Naive implementation, room for optimization
div = other // self
mod = other - div * self
return div, mod
# ----------------------------------------------------------------------
# Combination-Related
@doc(
_shared_docs["compare"],
"""
Returns
-------
DataFrame
DataFrame that shows the differences stacked side by side.
The resulting index will be a MultiIndex with 'self' and 'other'
stacked alternately at the inner level.
Raises
------
ValueError
When the two DataFrames don't have identical labels or shape.
See Also
--------
Series.compare : Compare with another Series and show differences.
DataFrame.equals : Test whether two objects contain the same elements.
Notes
-----
Matching NaNs will not appear as a difference.
Can only compare identically-labeled
(i.e. same shape, identical row and column labels) DataFrames
Examples
--------
>>> df = pd.DataFrame(
... {{
... "col1": ["a", "a", "b", "b", "a"],
... "col2": [1.0, 2.0, 3.0, np.nan, 5.0],
... "col3": [1.0, 2.0, 3.0, 4.0, 5.0]
... }},
... columns=["col1", "col2", "col3"],
... )
>>> df
col1 col2 col3
0 a 1.0 1.0
1 a 2.0 2.0
2 b 3.0 3.0
3 b NaN 4.0
4 a 5.0 5.0
>>> df2 = df.copy()
>>> df2.loc[0, 'col1'] = 'c'
>>> df2.loc[2, 'col3'] = 4.0
>>> df2
col1 col2 col3
0 c 1.0 1.0
1 a 2.0 2.0
2 b 3.0 4.0
3 b NaN 4.0
4 a 5.0 5.0
Align the differences on columns
>>> df.compare(df2)
col1 col3
self other self other
0 a c NaN NaN
2 NaN NaN 3.0 4.0
Stack the differences on rows
>>> df.compare(df2, align_axis=0)
col1 col3
0 self a NaN
other c NaN
2 self NaN 3.0
other NaN 4.0
Keep the equal values
>>> df.compare(df2, keep_equal=True)
col1 col3
self other self other
0 a c 1.0 1.0
2 b b 3.0 4.0
Keep all original rows and columns
>>> df.compare(df2, keep_shape=True)
col1 col2 col3
self other self other self other
0 a c NaN NaN NaN NaN
1 NaN NaN NaN NaN NaN NaN
2 NaN NaN NaN NaN 3.0 4.0
3 NaN NaN NaN NaN NaN NaN
4 NaN NaN NaN NaN NaN NaN
Keep all original rows and columns and also all original values
>>> df.compare(df2, keep_shape=True, keep_equal=True)
col1 col2 col3
self other self other self other
0 a c 1.0 1.0 1.0 1.0
1 a a 2.0 2.0 2.0 2.0
2 b b 3.0 3.0 3.0 4.0
3 b b NaN NaN 4.0 4.0
4 a a 5.0 5.0 5.0 5.0
""",
klass=_shared_doc_kwargs["klass"],
)
def compare(
self,
other: DataFrame,
align_axis: Axis = 1,
keep_shape: bool = False,
keep_equal: bool = False,
) -> DataFrame:
return super().compare(
other=other,
align_axis=align_axis,
keep_shape=keep_shape,
keep_equal=keep_equal,
)
def combine(
self, other: DataFrame, func, fill_value=None, overwrite: bool = True
) -> DataFrame:
"""
Perform column-wise combine with another DataFrame.
Combines a DataFrame with `other` DataFrame using `func`
to element-wise combine columns. The row and column indexes of the
resulting DataFrame will be the union of the two.
Parameters
----------
other : DataFrame
The DataFrame to merge column-wise.
func : function
Function that takes two series as inputs and return a Series or a
scalar. Used to merge the two dataframes column by columns.
fill_value : scalar value, default None
The value to fill NaNs with prior to passing any column to the
merge func.
overwrite : bool, default True
If True, columns in `self` that do not exist in `other` will be
overwritten with NaNs.
Returns
-------
DataFrame
Combination of the provided DataFrames.
See Also
--------
DataFrame.combine_first : Combine two DataFrame objects and default to
non-null values in frame calling the method.
Examples
--------
Combine using a simple function that chooses the smaller column.
>>> df1 = pd.DataFrame({'A': [0, 0], 'B': [4, 4]})
>>> df2 = pd.DataFrame({'A': [1, 1], 'B': [3, 3]})
>>> take_smaller = lambda s1, s2: s1 if s1.sum() < s2.sum() else s2
>>> df1.combine(df2, take_smaller)
A B
0 0 3
1 0 3
Example using a true element-wise combine function.
>>> df1 = pd.DataFrame({'A': [5, 0], 'B': [2, 4]})
>>> df2 = pd.DataFrame({'A': [1, 1], 'B': [3, 3]})
>>> df1.combine(df2, np.minimum)
A B
0 1 2
1 0 3
Using `fill_value` fills Nones prior to passing the column to the
merge function.
>>> df1 = pd.DataFrame({'A': [0, 0], 'B': [None, 4]})
>>> df2 = pd.DataFrame({'A': [1, 1], 'B': [3, 3]})
>>> df1.combine(df2, take_smaller, fill_value=-5)
A B
0 0 -5.0
1 0 4.0
However, if the same element in both dataframes is None, that None
is preserved
>>> df1 = pd.DataFrame({'A': [0, 0], 'B': [None, 4]})
>>> df2 = pd.DataFrame({'A': [1, 1], 'B': [None, 3]})
>>> df1.combine(df2, take_smaller, fill_value=-5)
A B
0 0 -5.0
1 0 3.0
Example that demonstrates the use of `overwrite` and behavior when
the axis differ between the dataframes.
>>> df1 = pd.DataFrame({'A': [0, 0], 'B': [4, 4]})
>>> df2 = pd.DataFrame({'B': [3, 3], 'C': [-10, 1], }, index=[1, 2])
>>> df1.combine(df2, take_smaller)
A B C
0 NaN NaN NaN
1 NaN 3.0 -10.0
2 NaN 3.0 1.0
>>> df1.combine(df2, take_smaller, overwrite=False)
A B C
0 0.0 NaN NaN
1 0.0 3.0 -10.0
2 NaN 3.0 1.0
Demonstrating the preference of the passed in dataframe.
>>> df2 = pd.DataFrame({'B': [3, 3], 'C': [1, 1], }, index=[1, 2])
>>> df2.combine(df1, take_smaller)
A B C
0 0.0 NaN NaN
1 0.0 3.0 NaN
2 NaN 3.0 NaN
>>> df2.combine(df1, take_smaller, overwrite=False)
A B C
0 0.0 NaN NaN
1 0.0 3.0 1.0
2 NaN 3.0 1.0
"""
other_idxlen = len(other.index) # save for compare
this, other = self.align(other, copy=False)
new_index = this.index
if other.empty and len(new_index) == len(self.index):
return self.copy()
if self.empty and len(other) == other_idxlen:
return other.copy()
# sorts if possible
new_columns = this.columns.union(other.columns)
do_fill = fill_value is not None
result = {}
for col in new_columns:
series = this[col]
otherSeries = other[col]
this_dtype = series.dtype
other_dtype = otherSeries.dtype
this_mask = isna(series)
other_mask = isna(otherSeries)
# don't overwrite columns unnecessarily
# DO propagate if this column is not in the intersection
if not overwrite and other_mask.all():
result[col] = this[col].copy()
continue
if do_fill:
series = series.copy()
otherSeries = otherSeries.copy()
series[this_mask] = fill_value
otherSeries[other_mask] = fill_value
if col not in self.columns:
# If self DataFrame does not have col in other DataFrame,
# try to promote series, which is all NaN, as other_dtype.
new_dtype = other_dtype
try:
series = series.astype(new_dtype, copy=False)
except ValueError:
# e.g. new_dtype is integer types
pass
else:
# if we have different dtypes, possibly promote
new_dtype = find_common_type([this_dtype, other_dtype])
if not is_dtype_equal(this_dtype, new_dtype):
series = series.astype(new_dtype)
if not is_dtype_equal(other_dtype, new_dtype):
otherSeries = otherSeries.astype(new_dtype)
arr = func(series, otherSeries)
arr = maybe_downcast_to_dtype(arr, new_dtype)
result[col] = arr
# convert_objects just in case
return self._constructor(result, index=new_index, columns=new_columns)
def combine_first(self, other: DataFrame) -> DataFrame:
"""
Update null elements with value in the same location in `other`.
Combine two DataFrame objects by filling null values in one DataFrame
with non-null values from other DataFrame. The row and column indexes
of the resulting DataFrame will be the union of the two.
Parameters
----------
other : DataFrame
Provided DataFrame to use to fill null values.
Returns
-------
DataFrame
The result of combining the provided DataFrame with the other object.
See Also
--------
DataFrame.combine : Perform series-wise operation on two DataFrames
using a given function.
Examples
--------
>>> df1 = pd.DataFrame({'A': [None, 0], 'B': [None, 4]})
>>> df2 = pd.DataFrame({'A': [1, 1], 'B': [3, 3]})
>>> df1.combine_first(df2)
A B
0 1.0 3.0
1 0.0 4.0
Null values still persist if the location of that null value
does not exist in `other`
>>> df1 = pd.DataFrame({'A': [None, 0], 'B': [4, None]})
>>> df2 = pd.DataFrame({'B': [3, 3], 'C': [1, 1]}, index=[1, 2])
>>> df1.combine_first(df2)
A B C
0 NaN 4.0 NaN
1 0.0 3.0 1.0
2 NaN 3.0 1.0
"""
import pandas.core.computation.expressions as expressions
def combiner(x, y):
mask = extract_array(isna(x))
x_values = extract_array(x, extract_numpy=True)
y_values = extract_array(y, extract_numpy=True)
# If the column y in other DataFrame is not in first DataFrame,
# just return y_values.
if y.name not in self.columns:
return y_values
return expressions.where(mask, y_values, x_values)
combined = self.combine(other, combiner, overwrite=False)
dtypes = {
col: find_common_type([self.dtypes[col], other.dtypes[col]])
for col in self.columns.intersection(other.columns)
if not is_dtype_equal(combined.dtypes[col], self.dtypes[col])
}
if dtypes:
combined = combined.astype(dtypes)
return combined
def update(
self,
other,
join: str = "left",
overwrite: bool = True,
filter_func=None,
errors: str = "ignore",
) -> None:
"""
Modify in place using non-NA values from another DataFrame.
Aligns on indices. There is no return value.
Parameters
----------
other : DataFrame, or object coercible into a DataFrame
Should have at least one matching index/column label
with the original DataFrame. If a Series is passed,
its name attribute must be set, and that will be
used as the column name to align with the original DataFrame.
join : {'left'}, default 'left'
Only left join is implemented, keeping the index and columns of the
original object.
overwrite : bool, default True
How to handle non-NA values for overlapping keys:
* True: overwrite original DataFrame's values
with values from `other`.
* False: only update values that are NA in
the original DataFrame.
filter_func : callable(1d-array) -> bool 1d-array, optional
Can choose to replace values other than NA. Return True for values
that should be updated.
errors : {'raise', 'ignore'}, default 'ignore'
If 'raise', will raise a ValueError if the DataFrame and `other`
both contain non-NA data in the same place.
.. versionchanged:: 0.24.0
Changed from `raise_conflict=False|True`
to `errors='ignore'|'raise'`.
Returns
-------
None : method directly changes calling object
Raises
------
ValueError
* When `errors='raise'` and there's overlapping non-NA data.
* When `errors` is not either `'ignore'` or `'raise'`
NotImplementedError
* If `join != 'left'`
See Also
--------
dict.update : Similar method for dictionaries.
DataFrame.merge : For column(s)-on-column(s) operations.
Examples
--------
>>> df = pd.DataFrame({'A': [1, 2, 3],
... 'B': [400, 500, 600]})
>>> new_df = pd.DataFrame({'B': [4, 5, 6],
... 'C': [7, 8, 9]})
>>> df.update(new_df)
>>> df
A B
0 1 4
1 2 5
2 3 6
The DataFrame's length does not increase as a result of the update,
only values at matching index/column labels are updated.
>>> df = pd.DataFrame({'A': ['a', 'b', 'c'],
... 'B': ['x', 'y', 'z']})
>>> new_df = pd.DataFrame({'B': ['d', 'e', 'f', 'g', 'h', 'i']})
>>> df.update(new_df)
>>> df
A B
0 a d
1 b e
2 c f
For Series, its name attribute must be set.
>>> df = pd.DataFrame({'A': ['a', 'b', 'c'],
... 'B': ['x', 'y', 'z']})
>>> new_column = pd.Series(['d', 'e'], name='B', index=[0, 2])
>>> df.update(new_column)
>>> df
A B
0 a d
1 b y
2 c e
>>> df = pd.DataFrame({'A': ['a', 'b', 'c'],
... 'B': ['x', 'y', 'z']})
>>> new_df = pd.DataFrame({'B': ['d', 'e']}, index=[1, 2])
>>> df.update(new_df)
>>> df
A B
0 a x
1 b d
2 c e
If `other` contains NaNs the corresponding values are not updated
in the original dataframe.
>>> df = pd.DataFrame({'A': [1, 2, 3],
... 'B': [400, 500, 600]})
>>> new_df = pd.DataFrame({'B': [4, np.nan, 6]})
>>> df.update(new_df)
>>> df
A B
0 1 4.0
1 2 500.0
2 3 6.0
"""
import pandas.core.computation.expressions as expressions
# TODO: Support other joins
if join != "left": # pragma: no cover
raise NotImplementedError("Only left join is supported")
if errors not in ["ignore", "raise"]:
raise ValueError("The parameter errors must be either 'ignore' or 'raise'")
if not isinstance(other, DataFrame):
other = DataFrame(other)
other = other.reindex_like(self)
for col in self.columns:
this = self[col]._values
that = other[col]._values
if filter_func is not None:
with np.errstate(all="ignore"):
mask = ~filter_func(this) | isna(that)
else:
if errors == "raise":
mask_this = notna(that)
mask_that = notna(this)
if any(mask_this & mask_that):
raise ValueError("Data overlaps.")
if overwrite:
mask = isna(that)
else:
mask = notna(this)
# don't overwrite columns unnecessarily
if mask.all():
continue
self[col] = expressions.where(mask, this, that)
# ----------------------------------------------------------------------
# Data reshaping
@Appender(
"""
Examples
--------
>>> df = pd.DataFrame({'Animal': ['Falcon', 'Falcon',
... 'Parrot', 'Parrot'],
... 'Max Speed': [380., 370., 24., 26.]})
>>> df
Animal Max Speed
0 Falcon 380.0
1 Falcon 370.0
2 Parrot 24.0
3 Parrot 26.0
>>> df.groupby(['Animal']).mean()
Max Speed
Animal
Falcon 375.0
Parrot 25.0
**Hierarchical Indexes**
We can groupby different levels of a hierarchical index
using the `level` parameter:
>>> arrays = [['Falcon', 'Falcon', 'Parrot', 'Parrot'],
... ['Captive', 'Wild', 'Captive', 'Wild']]
>>> index = pd.MultiIndex.from_arrays(arrays, names=('Animal', 'Type'))
>>> df = pd.DataFrame({'Max Speed': [390., 350., 30., 20.]},
... index=index)
>>> df
Max Speed
Animal Type
Falcon Captive 390.0
Wild 350.0
Parrot Captive 30.0
Wild 20.0
>>> df.groupby(level=0).mean()
Max Speed
Animal
Falcon 370.0
Parrot 25.0
>>> df.groupby(level="Type").mean()
Max Speed
Type
Captive 210.0
Wild 185.0
We can also choose to include NA in group keys or not by setting
`dropna` parameter, the default setting is `True`:
>>> l = [[1, 2, 3], [1, None, 4], [2, 1, 3], [1, 2, 2]]
>>> df = pd.DataFrame(l, columns=["a", "b", "c"])
>>> df.groupby(by=["b"]).sum()
a c
b
1.0 2 3
2.0 2 5
>>> df.groupby(by=["b"], dropna=False).sum()
a c
b
1.0 2 3
2.0 2 5
NaN 1 4
>>> l = [["a", 12, 12], [None, 12.3, 33.], ["b", 12.3, 123], ["a", 1, 1]]
>>> df = pd.DataFrame(l, columns=["a", "b", "c"])
>>> df.groupby(by="a").sum()
b c
a
a 13.0 13.0
b 12.3 123.0
>>> df.groupby(by="a", dropna=False).sum()
b c
a
a 13.0 13.0
b 12.3 123.0
NaN 12.3 33.0
"""
)
@Appender(_shared_docs["groupby"] % _shared_doc_kwargs)
def groupby(
self,
by=None,
axis: Axis = 0,
level: Level | None = None,
as_index: bool = True,
sort: bool = True,
group_keys: bool = True,
squeeze: bool | lib.NoDefault = no_default,
observed: bool = False,
dropna: bool = True,
) -> DataFrameGroupBy:
from pandas.core.groupby.generic import DataFrameGroupBy
if squeeze is not no_default:
warnings.warn(
(
"The `squeeze` parameter is deprecated and "
"will be removed in a future version."
),
FutureWarning,
stacklevel=2,
)
else:
squeeze = False
if level is None and by is None:
raise TypeError("You have to supply one of 'by' and 'level'")
axis = self._get_axis_number(axis)
# error: Argument "squeeze" to "DataFrameGroupBy" has incompatible type
# "Union[bool, NoDefault]"; expected "bool"
return DataFrameGroupBy(
obj=self,
keys=by,
axis=axis,
level=level,
as_index=as_index,
sort=sort,
group_keys=group_keys,
squeeze=squeeze, # type: ignore[arg-type]
observed=observed,
dropna=dropna,
)
_shared_docs[
"pivot"
] = """
Return reshaped DataFrame organized by given index / column values.
Reshape data (produce a "pivot" table) based on column values. Uses
unique values from specified `index` / `columns` to form axes of the
resulting DataFrame. This function does not support data
aggregation, multiple values will result in a MultiIndex in the
columns. See the :ref:`User Guide <reshaping>` for more on reshaping.
Parameters
----------%s
index : str or object or a list of str, optional
Column to use to make new frame's index. If None, uses
existing index.
.. versionchanged:: 1.1.0
Also accept list of index names.
columns : str or object or a list of str
Column to use to make new frame's columns.
.. versionchanged:: 1.1.0
Also accept list of columns names.
values : str, object or a list of the previous, optional
Column(s) to use for populating new frame's values. If not
specified, all remaining columns will be used and the result will
have hierarchically indexed columns.
Returns
-------
DataFrame
Returns reshaped DataFrame.
Raises
------
ValueError:
When there are any `index`, `columns` combinations with multiple
values. `DataFrame.pivot_table` when you need to aggregate.
See Also
--------
DataFrame.pivot_table : Generalization of pivot that can handle
duplicate values for one index/column pair.
DataFrame.unstack : Pivot based on the index values instead of a
column.
wide_to_long : Wide panel to long format. Less flexible but more
user-friendly than melt.
Notes
-----
For finer-tuned control, see hierarchical indexing documentation along
with the related stack/unstack methods.
Examples
--------
>>> df = pd.DataFrame({'foo': ['one', 'one', 'one', 'two', 'two',
... 'two'],
... 'bar': ['A', 'B', 'C', 'A', 'B', 'C'],
... 'baz': [1, 2, 3, 4, 5, 6],
... 'zoo': ['x', 'y', 'z', 'q', 'w', 't']})
>>> df
foo bar baz zoo
0 one A 1 x
1 one B 2 y
2 one C 3 z
3 two A 4 q
4 two B 5 w
5 two C 6 t
>>> df.pivot(index='foo', columns='bar', values='baz')
bar A B C
foo
one 1 2 3
two 4 5 6
>>> df.pivot(index='foo', columns='bar')['baz']
bar A B C
foo
one 1 2 3
two 4 5 6
>>> df.pivot(index='foo', columns='bar', values=['baz', 'zoo'])
baz zoo
bar A B C A B C
foo
one 1 2 3 x y z
two 4 5 6 q w t
You could also assign a list of column names or a list of index names.
>>> df = pd.DataFrame({
... "lev1": [1, 1, 1, 2, 2, 2],
... "lev2": [1, 1, 2, 1, 1, 2],
... "lev3": [1, 2, 1, 2, 1, 2],
... "lev4": [1, 2, 3, 4, 5, 6],
... "values": [0, 1, 2, 3, 4, 5]})
>>> df
lev1 lev2 lev3 lev4 values
0 1 1 1 1 0
1 1 1 2 2 1
2 1 2 1 3 2
3 2 1 2 4 3
4 2 1 1 5 4
5 2 2 2 6 5
>>> df.pivot(index="lev1", columns=["lev2", "lev3"],values="values")
lev2 1 2
lev3 1 2 1 2
lev1
1 0.0 1.0 2.0 NaN
2 4.0 3.0 NaN 5.0
>>> df.pivot(index=["lev1", "lev2"], columns=["lev3"],values="values")
lev3 1 2
lev1 lev2
1 1 0.0 1.0
2 2.0 NaN
2 1 4.0 3.0
2 NaN 5.0
A ValueError is raised if there are any duplicates.
>>> df = pd.DataFrame({"foo": ['one', 'one', 'two', 'two'],
... "bar": ['A', 'A', 'B', 'C'],
... "baz": [1, 2, 3, 4]})
>>> df
foo bar baz
0 one A 1
1 one A 2
2 two B 3
3 two C 4
Notice that the first two rows are the same for our `index`
and `columns` arguments.
>>> df.pivot(index='foo', columns='bar', values='baz')
Traceback (most recent call last):
...
ValueError: Index contains duplicate entries, cannot reshape
"""
@Substitution("")
@Appender(_shared_docs["pivot"])
def pivot(self, index=None, columns=None, values=None) -> DataFrame:
from pandas.core.reshape.pivot import pivot
return pivot(self, index=index, columns=columns, values=values)
_shared_docs[
"pivot_table"
] = """
Create a spreadsheet-style pivot table as a DataFrame.
The levels in the pivot table will be stored in MultiIndex objects
(hierarchical indexes) on the index and columns of the result DataFrame.
Parameters
----------%s
values : column to aggregate, optional
index : column, Grouper, array, or list of the previous
If an array is passed, it must be the same length as the data. The
list can contain any of the other types (except list).
Keys to group by on the pivot table index. If an array is passed,
it is being used as the same manner as column values.
columns : column, Grouper, array, or list of the previous
If an array is passed, it must be the same length as the data. The
list can contain any of the other types (except list).
Keys to group by on the pivot table column. If an array is passed,
it is being used as the same manner as column values.
aggfunc : function, list of functions, dict, default numpy.mean
If list of functions passed, the resulting pivot table will have
hierarchical columns whose top level are the function names
(inferred from the function objects themselves)
If dict is passed, the key is column to aggregate and value
is function or list of functions.
fill_value : scalar, default None
Value to replace missing values with (in the resulting pivot table,
after aggregation).
margins : bool, default False
Add all row / columns (e.g. for subtotal / grand totals).
dropna : bool, default True
Do not include columns whose entries are all NaN.
margins_name : str, default 'All'
Name of the row / column that will contain the totals
when margins is True.
observed : bool, default False
This only applies if any of the groupers are Categoricals.
If True: only show observed values for categorical groupers.
If False: show all values for categorical groupers.
.. versionchanged:: 0.25.0
Returns
-------
DataFrame
An Excel style pivot table.
See Also
--------
DataFrame.pivot : Pivot without aggregation that can handle
non-numeric data.
DataFrame.melt: Unpivot a DataFrame from wide to long format,
optionally leaving identifiers set.
wide_to_long : Wide panel to long format. Less flexible but more
user-friendly than melt.
Examples
--------
>>> df = pd.DataFrame({"A": ["foo", "foo", "foo", "foo", "foo",
... "bar", "bar", "bar", "bar"],
... "B": ["one", "one", "one", "two", "two",
... "one", "one", "two", "two"],
... "C": ["small", "large", "large", "small",
... "small", "large", "small", "small",
... "large"],
... "D": [1, 2, 2, 3, 3, 4, 5, 6, 7],
... "E": [2, 4, 5, 5, 6, 6, 8, 9, 9]})
>>> df
A B C D E
0 foo one small 1 2
1 foo one large 2 4
2 foo one large 2 5
3 foo two small 3 5
4 foo two small 3 6
5 bar one large 4 6
6 bar one small 5 8
7 bar two small 6 9
8 bar two large 7 9
This first example aggregates values by taking the sum.
>>> table = pd.pivot_table(df, values='D', index=['A', 'B'],
... columns=['C'], aggfunc=np.sum)
>>> table
C large small
A B
bar one 4.0 5.0
two 7.0 6.0
foo one 4.0 1.0
two NaN 6.0
We can also fill missing values using the `fill_value` parameter.
>>> table = pd.pivot_table(df, values='D', index=['A', 'B'],
... columns=['C'], aggfunc=np.sum, fill_value=0)
>>> table
C large small
A B
bar one 4 5
two 7 6
foo one 4 1
two 0 6
The next example aggregates by taking the mean across multiple columns.
>>> table = pd.pivot_table(df, values=['D', 'E'], index=['A', 'C'],
... aggfunc={'D': np.mean,
... 'E': np.mean})
>>> table
D E
A C
bar large 5.500000 7.500000
small 5.500000 8.500000
foo large 2.000000 4.500000
small 2.333333 4.333333
We can also calculate multiple types of aggregations for any given
value column.
>>> table = pd.pivot_table(df, values=['D', 'E'], index=['A', 'C'],
... aggfunc={'D': np.mean,
... 'E': [min, max, np.mean]})
>>> table
D E
mean max mean min
A C
bar large 5.500000 9.0 7.500000 6.0
small 5.500000 9.0 8.500000 8.0
foo large 2.000000 5.0 4.500000 4.0
small 2.333333 6.0 4.333333 2.0
"""
@Substitution("")
@Appender(_shared_docs["pivot_table"])
def pivot_table(
self,
values=None,
index=None,
columns=None,
aggfunc="mean",
fill_value=None,
margins=False,
dropna=True,
margins_name="All",
observed=False,
) -> DataFrame:
from pandas.core.reshape.pivot import pivot_table
return pivot_table(
self,
values=values,
index=index,
columns=columns,
aggfunc=aggfunc,
fill_value=fill_value,
margins=margins,
dropna=dropna,
margins_name=margins_name,
observed=observed,
)
def stack(self, level: Level = -1, dropna: bool = True):
"""
Stack the prescribed level(s) from columns to index.
Return a reshaped DataFrame or Series having a multi-level
index with one or more new inner-most levels compared to the current
DataFrame. The new inner-most levels are created by pivoting the
columns of the current dataframe:
- if the columns have a single level, the output is a Series;
- if the columns have multiple levels, the new index
level(s) is (are) taken from the prescribed level(s) and
the output is a DataFrame.
Parameters
----------
level : int, str, list, default -1
Level(s) to stack from the column axis onto the index
axis, defined as one index or label, or a list of indices
or labels.
dropna : bool, default True
Whether to drop rows in the resulting Frame/Series with
missing values. Stacking a column level onto the index
axis can create combinations of index and column values
that are missing from the original dataframe. See Examples
section.
Returns
-------
DataFrame or Series
Stacked dataframe or series.
See Also
--------
DataFrame.unstack : Unstack prescribed level(s) from index axis
onto column axis.
DataFrame.pivot : Reshape dataframe from long format to wide
format.
DataFrame.pivot_table : Create a spreadsheet-style pivot table
as a DataFrame.
Notes
-----
The function is named by analogy with a collection of books
being reorganized from being side by side on a horizontal
position (the columns of the dataframe) to being stacked
vertically on top of each other (in the index of the
dataframe).
Examples
--------
**Single level columns**
>>> df_single_level_cols = pd.DataFrame([[0, 1], [2, 3]],
... index=['cat', 'dog'],
... columns=['weight', 'height'])
Stacking a dataframe with a single level column axis returns a Series:
>>> df_single_level_cols
weight height
cat 0 1
dog 2 3
>>> df_single_level_cols.stack()
cat weight 0
height 1
dog weight 2
height 3
dtype: int64
**Multi level columns: simple case**
>>> multicol1 = pd.MultiIndex.from_tuples([('weight', 'kg'),
... ('weight', 'pounds')])
>>> df_multi_level_cols1 = pd.DataFrame([[1, 2], [2, 4]],
... index=['cat', 'dog'],
... columns=multicol1)
Stacking a dataframe with a multi-level column axis:
>>> df_multi_level_cols1
weight
kg pounds
cat 1 2
dog 2 4
>>> df_multi_level_cols1.stack()
weight
cat kg 1
pounds 2
dog kg 2
pounds 4
**Missing values**
>>> multicol2 = pd.MultiIndex.from_tuples([('weight', 'kg'),
... ('height', 'm')])
>>> df_multi_level_cols2 = pd.DataFrame([[1.0, 2.0], [3.0, 4.0]],
... index=['cat', 'dog'],
... columns=multicol2)
It is common to have missing values when stacking a dataframe
with multi-level columns, as the stacked dataframe typically
has more values than the original dataframe. Missing values
are filled with NaNs:
>>> df_multi_level_cols2
weight height
kg m
cat 1.0 2.0
dog 3.0 4.0
>>> df_multi_level_cols2.stack()
height weight
cat kg NaN 1.0
m 2.0 NaN
dog kg NaN 3.0
m 4.0 NaN
**Prescribing the level(s) to be stacked**
The first parameter controls which level or levels are stacked:
>>> df_multi_level_cols2.stack(0)
kg m
cat height NaN 2.0
weight 1.0 NaN
dog height NaN 4.0
weight 3.0 NaN
>>> df_multi_level_cols2.stack([0, 1])
cat height m 2.0
weight kg 1.0
dog height m 4.0
weight kg 3.0
dtype: float64
**Dropping missing values**
>>> df_multi_level_cols3 = pd.DataFrame([[None, 1.0], [2.0, 3.0]],
... index=['cat', 'dog'],
... columns=multicol2)
Note that rows where all values are missing are dropped by
default but this behaviour can be controlled via the dropna
keyword parameter:
>>> df_multi_level_cols3
weight height
kg m
cat NaN 1.0
dog 2.0 3.0
>>> df_multi_level_cols3.stack(dropna=False)
height weight
cat kg NaN NaN
m 1.0 NaN
dog kg NaN 2.0
m 3.0 NaN
>>> df_multi_level_cols3.stack(dropna=True)
height weight
cat m 1.0 NaN
dog kg NaN 2.0
m 3.0 NaN
"""
from pandas.core.reshape.reshape import (
stack,
stack_multiple,
)
if isinstance(level, (tuple, list)):
result = stack_multiple(self, level, dropna=dropna)
else:
result = stack(self, level, dropna=dropna)
return result.__finalize__(self, method="stack")
def explode(self, column: str | tuple, ignore_index: bool = False) -> DataFrame:
"""
Transform each element of a list-like to a row, replicating index values.
.. versionadded:: 0.25.0
Parameters
----------
column : str or tuple
Column to explode.
ignore_index : bool, default False
If True, the resulting index will be labeled 0, 1, …, n - 1.
.. versionadded:: 1.1.0
Returns
-------
DataFrame
Exploded lists to rows of the subset columns;
index will be duplicated for these rows.
Raises
------
ValueError :
if columns of the frame are not unique.
See Also
--------
DataFrame.unstack : Pivot a level of the (necessarily hierarchical)
index labels.
DataFrame.melt : Unpivot a DataFrame from wide format to long format.
Series.explode : Explode a DataFrame from list-like columns to long format.
Notes
-----
This routine will explode list-likes including lists, tuples, sets,
Series, and np.ndarray. The result dtype of the subset rows will
be object. Scalars will be returned unchanged, and empty list-likes will
result in a np.nan for that row. In addition, the ordering of rows in the
output will be non-deterministic when exploding sets.
Examples
--------
>>> df = pd.DataFrame({'A': [[1, 2, 3], 'foo', [], [3, 4]], 'B': 1})
>>> df
A B
0 [1, 2, 3] 1
1 foo 1
2 [] 1
3 [3, 4] 1
>>> df.explode('A')
A B
0 1 1
0 2 1
0 3 1
1 foo 1
2 NaN 1
3 3 1
3 4 1
"""
if not (is_scalar(column) or isinstance(column, tuple)):
raise ValueError("column must be a scalar")
if not self.columns.is_unique:
raise ValueError("columns must be unique")
df = self.reset_index(drop=True)
result = df[column].explode()
result = df.drop([column], axis=1).join(result)
if ignore_index:
result.index = ibase.default_index(len(result))
else:
result.index = self.index.take(result.index)
result = result.reindex(columns=self.columns, copy=False)
return result
def unstack(self, level: Level = -1, fill_value=None):
"""
Pivot a level of the (necessarily hierarchical) index labels.
Returns a DataFrame having a new level of column labels whose inner-most level
consists of the pivoted index labels.
If the index is not a MultiIndex, the output will be a Series
(the analogue of stack when the columns are not a MultiIndex).
Parameters
----------
level : int, str, or list of these, default -1 (last level)
Level(s) of index to unstack, can pass level name.
fill_value : int, str or dict
Replace NaN with this value if the unstack produces missing values.
Returns
-------
Series or DataFrame
See Also
--------
DataFrame.pivot : Pivot a table based on column values.
DataFrame.stack : Pivot a level of the column labels (inverse operation
from `unstack`).
Examples
--------
>>> index = pd.MultiIndex.from_tuples([('one', 'a'), ('one', 'b'),
... ('two', 'a'), ('two', 'b')])
>>> s = pd.Series(np.arange(1.0, 5.0), index=index)
>>> s
one a 1.0
b 2.0
two a 3.0
b 4.0
dtype: float64
>>> s.unstack(level=-1)
a b
one 1.0 2.0
two 3.0 4.0
>>> s.unstack(level=0)
one two
a 1.0 3.0
b 2.0 4.0
>>> df = s.unstack(level=0)
>>> df.unstack()
one a 1.0
b 2.0
two a 3.0
b 4.0
dtype: float64
"""
from pandas.core.reshape.reshape import unstack
result = unstack(self, level, fill_value)
return result.__finalize__(self, method="unstack")
@Appender(_shared_docs["melt"] % {"caller": "df.melt(", "other": "melt"})
def melt(
self,
id_vars=None,
value_vars=None,
var_name=None,
value_name="value",
col_level: Level | None = None,
ignore_index: bool = True,
) -> DataFrame:
return melt(
self,
id_vars=id_vars,
value_vars=value_vars,
var_name=var_name,
value_name=value_name,
col_level=col_level,
ignore_index=ignore_index,
)
# ----------------------------------------------------------------------
# Time series-related
@doc(
Series.diff,
klass="Dataframe",
extra_params="axis : {0 or 'index', 1 or 'columns'}, default 0\n "
"Take difference over rows (0) or columns (1).\n",
other_klass="Series",
examples=dedent(
"""
Difference with previous row
>>> df = pd.DataFrame({'a': [1, 2, 3, 4, 5, 6],
... 'b': [1, 1, 2, 3, 5, 8],
... 'c': [1, 4, 9, 16, 25, 36]})
>>> df
a b c
0 1 1 1
1 2 1 4
2 3 2 9
3 4 3 16
4 5 5 25
5 6 8 36
>>> df.diff()
a b c
0 NaN NaN NaN
1 1.0 0.0 3.0
2 1.0 1.0 5.0
3 1.0 1.0 7.0
4 1.0 2.0 9.0
5 1.0 3.0 11.0
Difference with previous column
>>> df.diff(axis=1)
a b c
0 NaN 0 0
1 NaN -1 3
2 NaN -1 7
3 NaN -1 13
4 NaN 0 20
5 NaN 2 28
Difference with 3rd previous row
>>> df.diff(periods=3)
a b c
0 NaN NaN NaN
1 NaN NaN NaN
2 NaN NaN NaN
3 3.0 2.0 15.0
4 3.0 4.0 21.0
5 3.0 6.0 27.0
Difference with following row
>>> df.diff(periods=-1)
a b c
0 -1.0 0.0 -3.0
1 -1.0 -1.0 -5.0
2 -1.0 -1.0 -7.0
3 -1.0 -2.0 -9.0
4 -1.0 -3.0 -11.0
5 NaN NaN NaN
Overflow in input dtype
>>> df = pd.DataFrame({'a': [1, 0]}, dtype=np.uint8)
>>> df.diff()
a
0 NaN
1 255.0"""
),
)
def diff(self, periods: int = 1, axis: Axis = 0) -> DataFrame:
if not isinstance(periods, int):
if not (is_float(periods) and periods.is_integer()):
raise ValueError("periods must be an integer")
periods = int(periods)
axis = self._get_axis_number(axis)
if axis == 1 and periods != 0:
return self - self.shift(periods, axis=axis)
new_data = self._mgr.diff(n=periods, axis=axis)
return self._constructor(new_data).__finalize__(self, "diff")
# ----------------------------------------------------------------------
# Function application
def _gotitem(
self,
key: IndexLabel,
ndim: int,
subset: FrameOrSeriesUnion | None = None,
) -> FrameOrSeriesUnion:
"""
Sub-classes to define. Return a sliced object.
Parameters
----------
key : string / list of selections
ndim : {1, 2}
requested ndim of result
subset : object, default None
subset to act on
"""
if subset is None:
subset = self
elif subset.ndim == 1: # is Series
return subset
# TODO: _shallow_copy(subset)?
return subset[key]
_agg_summary_and_see_also_doc = dedent(
"""
The aggregation operations are always performed over an axis, either the
index (default) or the column axis. This behavior is different from
`numpy` aggregation functions (`mean`, `median`, `prod`, `sum`, `std`,
`var`), where the default is to compute the aggregation of the flattened
array, e.g., ``numpy.mean(arr_2d)`` as opposed to
``numpy.mean(arr_2d, axis=0)``.
`agg` is an alias for `aggregate`. Use the alias.
See Also
--------
DataFrame.apply : Perform any type of operations.
DataFrame.transform : Perform transformation type operations.
core.groupby.GroupBy : Perform operations over groups.
core.resample.Resampler : Perform operations over resampled bins.
core.window.Rolling : Perform operations over rolling window.
core.window.Expanding : Perform operations over expanding window.
core.window.ExponentialMovingWindow : Perform operation over exponential weighted
window.
"""
)
_agg_examples_doc = dedent(
"""
Examples
--------
>>> df = pd.DataFrame([[1, 2, 3],
... [4, 5, 6],
... [7, 8, 9],
... [np.nan, np.nan, np.nan]],
... columns=['A', 'B', 'C'])
Aggregate these functions over the rows.
>>> df.agg(['sum', 'min'])
A B C
sum 12.0 15.0 18.0
min 1.0 2.0 3.0
Different aggregations per column.
>>> df.agg({'A' : ['sum', 'min'], 'B' : ['min', 'max']})
A B
sum 12.0 NaN
min 1.0 2.0
max NaN 8.0
Aggregate different functions over the columns and rename the index of the resulting
DataFrame.
>>> df.agg(x=('A', max), y=('B', 'min'), z=('C', np.mean))
A B C
x 7.0 NaN NaN
y NaN 2.0 NaN
z NaN NaN 6.0
Aggregate over the columns.
>>> df.agg("mean", axis="columns")
0 2.0
1 5.0
2 8.0
3 NaN
dtype: float64
"""
)
@doc(
_shared_docs["aggregate"],
klass=_shared_doc_kwargs["klass"],
axis=_shared_doc_kwargs["axis"],
see_also=_agg_summary_and_see_also_doc,
examples=_agg_examples_doc,
)
def aggregate(self, func=None, axis: Axis = 0, *args, **kwargs):
from pandas.core.apply import frame_apply
axis = self._get_axis_number(axis)
relabeling, func, columns, order = reconstruct_func(func, **kwargs)
op = frame_apply(self, func=func, axis=axis, args=args, kwargs=kwargs)
result = op.agg()
if relabeling:
# This is to keep the order to columns occurrence unchanged, and also
# keep the order of new columns occurrence unchanged
# For the return values of reconstruct_func, if relabeling is
# False, columns and order will be None.
assert columns is not None
assert order is not None
result_in_dict = relabel_result(result, func, columns, order)
result = DataFrame(result_in_dict, index=columns)
return result
agg = aggregate
@doc(
_shared_docs["transform"],
klass=_shared_doc_kwargs["klass"],
axis=_shared_doc_kwargs["axis"],
)
def transform(
self, func: AggFuncType, axis: Axis = 0, *args, **kwargs
) -> DataFrame:
from pandas.core.apply import frame_apply
op = frame_apply(self, func=func, axis=axis, args=args, kwargs=kwargs)
result = op.transform()
assert isinstance(result, DataFrame)
return result
def apply(
self,
func: AggFuncType,
axis: Axis = 0,
raw: bool = False,
result_type=None,
args=(),
**kwargs,
):
"""
Apply a function along an axis of the DataFrame.
Objects passed to the function are Series objects whose index is
either the DataFrame's index (``axis=0``) or the DataFrame's columns
(``axis=1``). By default (``result_type=None``), the final return type
is inferred from the return type of the applied function. Otherwise,
it depends on the `result_type` argument.
Parameters
----------
func : function
Function to apply to each column or row.
axis : {0 or 'index', 1 or 'columns'}, default 0
Axis along which the function is applied:
* 0 or 'index': apply function to each column.
* 1 or 'columns': apply function to each row.
raw : bool, default False
Determines if row or column is passed as a Series or ndarray object:
* ``False`` : passes each row or column as a Series to the
function.
* ``True`` : the passed function will receive ndarray objects
instead.
If you are just applying a NumPy reduction function this will
achieve much better performance.
result_type : {'expand', 'reduce', 'broadcast', None}, default None
These only act when ``axis=1`` (columns):
* 'expand' : list-like results will be turned into columns.
* 'reduce' : returns a Series if possible rather than expanding
list-like results. This is the opposite of 'expand'.
* 'broadcast' : results will be broadcast to the original shape
of the DataFrame, the original index and columns will be
retained.
The default behaviour (None) depends on the return value of the
applied function: list-like results will be returned as a Series
of those. However if the apply function returns a Series these
are expanded to columns.
args : tuple
Positional arguments to pass to `func` in addition to the
array/series.
**kwargs
Additional keyword arguments to pass as keywords arguments to
`func`.
Returns
-------
Series or DataFrame
Result of applying ``func`` along the given axis of the
DataFrame.
See Also
--------
DataFrame.applymap: For elementwise operations.
DataFrame.aggregate: Only perform aggregating type operations.
DataFrame.transform: Only perform transforming type operations.
Notes
-----
Functions that mutate the passed object can produce unexpected
behavior or errors and are not supported. See :ref:`udf-mutation`
for more details.
Examples
--------
>>> df = pd.DataFrame([[4, 9]] * 3, columns=['A', 'B'])
>>> df
A B
0 4 9
1 4 9
2 4 9
Using a numpy universal function (in this case the same as
``np.sqrt(df)``):
>>> df.apply(np.sqrt)
A B
0 2.0 3.0
1 2.0 3.0
2 2.0 3.0
Using a reducing function on either axis
>>> df.apply(np.sum, axis=0)
A 12
B 27
dtype: int64
>>> df.apply(np.sum, axis=1)
0 13
1 13
2 13
dtype: int64
Returning a list-like will result in a Series
>>> df.apply(lambda x: [1, 2], axis=1)
0 [1, 2]
1 [1, 2]
2 [1, 2]
dtype: object
Passing ``result_type='expand'`` will expand list-like results
to columns of a Dataframe
>>> df.apply(lambda x: [1, 2], axis=1, result_type='expand')
0 1
0 1 2
1 1 2
2 1 2
Returning a Series inside the function is similar to passing
``result_type='expand'``. The resulting column names
will be the Series index.
>>> df.apply(lambda x: pd.Series([1, 2], index=['foo', 'bar']), axis=1)
foo bar
0 1 2
1 1 2
2 1 2
Passing ``result_type='broadcast'`` will ensure the same shape
result, whether list-like or scalar is returned by the function,
and broadcast it along the axis. The resulting column names will
be the originals.
>>> df.apply(lambda x: [1, 2], axis=1, result_type='broadcast')
A B
0 1 2
1 1 2
2 1 2
"""
from pandas.core.apply import frame_apply
op = frame_apply(
self,
func=func,
axis=axis,
raw=raw,
result_type=result_type,
args=args,
kwargs=kwargs,
)
return op.apply()
def applymap(
self, func: PythonFuncType, na_action: str | None = None, **kwargs
) -> DataFrame:
"""
Apply a function to a Dataframe elementwise.
This method applies a function that accepts and returns a scalar
to every element of a DataFrame.
Parameters
----------
func : callable
Python function, returns a single value from a single value.
na_action : {None, 'ignore'}, default None
If ‘ignore’, propagate NaN values, without passing them to func.
.. versionadded:: 1.2
**kwargs
Additional keyword arguments to pass as keywords arguments to
`func`.
.. versionadded:: 1.3
Returns
-------
DataFrame
Transformed DataFrame.
See Also
--------
DataFrame.apply : Apply a function along input axis of DataFrame.
Examples
--------
>>> df = pd.DataFrame([[1, 2.12], [3.356, 4.567]])
>>> df
0 1
0 1.000 2.120
1 3.356 4.567
>>> df.applymap(lambda x: len(str(x)))
0 1
0 3 4
1 5 5
Like Series.map, NA values can be ignored:
>>> df_copy = df.copy()
>>> df_copy.iloc[0, 0] = pd.NA
>>> df_copy.applymap(lambda x: len(str(x)), na_action='ignore')
0 1
0 <NA> 4
1 5 5
Note that a vectorized version of `func` often exists, which will
be much faster. You could square each number elementwise.
>>> df.applymap(lambda x: x**2)
0 1
0 1.000000 4.494400
1 11.262736 20.857489
But it's better to avoid applymap in that case.
>>> df ** 2
0 1
0 1.000000 4.494400
1 11.262736 20.857489
"""
if na_action not in {"ignore", None}:
raise ValueError(
f"na_action must be 'ignore' or None. Got {repr(na_action)}"
)
ignore_na = na_action == "ignore"
func = functools.partial(func, **kwargs)
# if we have a dtype == 'M8[ns]', provide boxed values
def infer(x):
if x.empty:
return lib.map_infer(x, func, ignore_na=ignore_na)
return lib.map_infer(x.astype(object)._values, func, ignore_na=ignore_na)
return self.apply(infer).__finalize__(self, "applymap")
# ----------------------------------------------------------------------
# Merging / joining methods
def append(
self,
other,
ignore_index: bool = False,
verify_integrity: bool = False,
sort: bool = False,
) -> DataFrame:
"""
Append rows of `other` to the end of caller, returning a new object.
Columns in `other` that are not in the caller are added as new columns.
Parameters
----------
other : DataFrame or Series/dict-like object, or list of these
The data to append.
ignore_index : bool, default False
If True, the resulting axis will be labeled 0, 1, …, n - 1.
verify_integrity : bool, default False
If True, raise ValueError on creating index with duplicates.
sort : bool, default False
Sort columns if the columns of `self` and `other` are not aligned.
.. versionchanged:: 1.0.0
Changed to not sort by default.
Returns
-------
DataFrame
See Also
--------
concat : General function to concatenate DataFrame or Series objects.
Notes
-----
If a list of dict/series is passed and the keys are all contained in
the DataFrame's index, the order of the columns in the resulting
DataFrame will be unchanged.
Iteratively appending rows to a DataFrame can be more computationally
intensive than a single concatenate. A better solution is to append
those rows to a list and then concatenate the list with the original
DataFrame all at once.
Examples
--------
>>> df = pd.DataFrame([[1, 2], [3, 4]], columns=list('AB'))
>>> df
A B
0 1 2
1 3 4
>>> df2 = pd.DataFrame([[5, 6], [7, 8]], columns=list('AB'))
>>> df.append(df2)
A B
0 1 2
1 3 4
0 5 6
1 7 8
With `ignore_index` set to True:
>>> df.append(df2, ignore_index=True)
A B
0 1 2
1 3 4
2 5 6
3 7 8
The following, while not recommended methods for generating DataFrames,
show two ways to generate a DataFrame from multiple data sources.
Less efficient:
>>> df = pd.DataFrame(columns=['A'])
>>> for i in range(5):
... df = df.append({'A': i}, ignore_index=True)
>>> df
A
0 0
1 1
2 2
3 3
4 4
More efficient:
>>> pd.concat([pd.DataFrame([i], columns=['A']) for i in range(5)],
... ignore_index=True)
A
0 0
1 1
2 2
3 3
4 4
"""
if isinstance(other, (Series, dict)):
if isinstance(other, dict):
if not ignore_index:
raise TypeError("Can only append a dict if ignore_index=True")
other = Series(other)
if other.name is None and not ignore_index:
raise TypeError(
"Can only append a Series if ignore_index=True "
"or if the Series has a name"
)
index = Index([other.name], name=self.index.name)
idx_diff = other.index.difference(self.columns)
try:
combined_columns = self.columns.append(idx_diff)
except TypeError:
combined_columns = self.columns.astype(object).append(idx_diff)
other = (
other.reindex(combined_columns, copy=False)
.to_frame()
.T.infer_objects()
.rename_axis(index.names, copy=False)
)
if not self.columns.equals(combined_columns):
self = self.reindex(columns=combined_columns)
elif isinstance(other, list):
if not other:
pass
elif not isinstance(other[0], DataFrame):
other = DataFrame(other)
if (self.columns.get_indexer(other.columns) >= 0).all():
other = other.reindex(columns=self.columns)
from pandas.core.reshape.concat import concat
if isinstance(other, (list, tuple)):
to_concat = [self, *other]
else:
to_concat = [self, other]
return (
concat(
to_concat,
ignore_index=ignore_index,
verify_integrity=verify_integrity,
sort=sort,
)
).__finalize__(self, method="append")
def join(
self,
other: FrameOrSeriesUnion,
on: IndexLabel | None = None,
how: str = "left",
lsuffix: str = "",
rsuffix: str = "",
sort: bool = False,
) -> DataFrame:
"""
Join columns of another DataFrame.
Join columns with `other` DataFrame either on index or on a key
column. Efficiently join multiple DataFrame objects by index at once by
passing a list.
Parameters
----------
other : DataFrame, Series, or list of DataFrame
Index should be similar to one of the columns in this one. If a
Series is passed, its name attribute must be set, and that will be
used as the column name in the resulting joined DataFrame.
on : str, list of str, or array-like, optional
Column or index level name(s) in the caller to join on the index
in `other`, otherwise joins index-on-index. If multiple
values given, the `other` DataFrame must have a MultiIndex. Can
pass an array as the join key if it is not already contained in
the calling DataFrame. Like an Excel VLOOKUP operation.
how : {'left', 'right', 'outer', 'inner'}, default 'left'
How to handle the operation of the two objects.
* left: use calling frame's index (or column if on is specified)
* right: use `other`'s index.
* outer: form union of calling frame's index (or column if on is
specified) with `other`'s index, and sort it.
lexicographically.
* inner: form intersection of calling frame's index (or column if
on is specified) with `other`'s index, preserving the order
of the calling's one.
lsuffix : str, default ''
Suffix to use from left frame's overlapping columns.
rsuffix : str, default ''
Suffix to use from right frame's overlapping columns.
sort : bool, default False
Order result DataFrame lexicographically by the join key. If False,
the order of the join key depends on the join type (how keyword).
Returns
-------
DataFrame
A dataframe containing columns from both the caller and `other`.
See Also
--------
DataFrame.merge : For column(s)-on-column(s) operations.
Notes
-----
Parameters `on`, `lsuffix`, and `rsuffix` are not supported when
passing a list of `DataFrame` objects.
Support for specifying index levels as the `on` parameter was added
in version 0.23.0.
Examples
--------
>>> df = pd.DataFrame({'key': ['K0', 'K1', 'K2', 'K3', 'K4', 'K5'],
... 'A': ['A0', 'A1', 'A2', 'A3', 'A4', 'A5']})
>>> df
key A
0 K0 A0
1 K1 A1
2 K2 A2
3 K3 A3
4 K4 A4
5 K5 A5
>>> other = pd.DataFrame({'key': ['K0', 'K1', 'K2'],
... 'B': ['B0', 'B1', 'B2']})
>>> other
key B
0 K0 B0
1 K1 B1
2 K2 B2
Join DataFrames using their indexes.
>>> df.join(other, lsuffix='_caller', rsuffix='_other')
key_caller A key_other B
0 K0 A0 K0 B0
1 K1 A1 K1 B1
2 K2 A2 K2 B2
3 K3 A3 NaN NaN
4 K4 A4 NaN NaN
5 K5 A5 NaN NaN
If we want to join using the key columns, we need to set key to be
the index in both `df` and `other`. The joined DataFrame will have
key as its index.
>>> df.set_index('key').join(other.set_index('key'))
A B
key
K0 A0 B0
K1 A1 B1
K2 A2 B2
K3 A3 NaN
K4 A4 NaN
K5 A5 NaN
Another option to join using the key columns is to use the `on`
parameter. DataFrame.join always uses `other`'s index but we can use
any column in `df`. This method preserves the original DataFrame's
index in the result.
>>> df.join(other.set_index('key'), on='key')
key A B
0 K0 A0 B0
1 K1 A1 B1
2 K2 A2 B2
3 K3 A3 NaN
4 K4 A4 NaN
5 K5 A5 NaN
"""
return self._join_compat(
other, on=on, how=how, lsuffix=lsuffix, rsuffix=rsuffix, sort=sort
)
def _join_compat(
self,
other: FrameOrSeriesUnion,
on: IndexLabel | None = None,
how: str = "left",
lsuffix: str = "",
rsuffix: str = "",
sort: bool = False,
):
from pandas.core.reshape.concat import concat
from pandas.core.reshape.merge import merge
if isinstance(other, Series):
if other.name is None:
raise ValueError("Other Series must have a name")
other = DataFrame({other.name: other})
if isinstance(other, DataFrame):
if how == "cross":
return merge(
self,
other,
how=how,
on=on,
suffixes=(lsuffix, rsuffix),
sort=sort,
)
return merge(
self,
other,
left_on=on,
how=how,
left_index=on is None,
right_index=True,
suffixes=(lsuffix, rsuffix),
sort=sort,
)
else:
if on is not None:
raise ValueError(
"Joining multiple DataFrames only supported for joining on index"
)
frames = [self] + list(other)
can_concat = all(df.index.is_unique for df in frames)
# join indexes only using concat
if can_concat:
if how == "left":
res = concat(
frames, axis=1, join="outer", verify_integrity=True, sort=sort
)
return res.reindex(self.index, copy=False)
else:
return concat(
frames, axis=1, join=how, verify_integrity=True, sort=sort
)
joined = frames[0]
for frame in frames[1:]:
joined = merge(
joined, frame, how=how, left_index=True, right_index=True
)
return joined
@Substitution("")
@Appender(_merge_doc, indents=2)
def merge(
self,
right: FrameOrSeriesUnion,
how: str = "inner",
on: IndexLabel | None = None,
left_on: IndexLabel | None = None,
right_on: IndexLabel | None = None,
left_index: bool = False,
right_index: bool = False,
sort: bool = False,
suffixes: Suffixes = ("_x", "_y"),
copy: bool = True,
indicator: bool = False,
validate: str | None = None,
) -> DataFrame:
from pandas.core.reshape.merge import merge
return merge(
self,
right,
how=how,
on=on,
left_on=left_on,
right_on=right_on,
left_index=left_index,
right_index=right_index,
sort=sort,
suffixes=suffixes,
copy=copy,
indicator=indicator,
validate=validate,
)
def round(
self, decimals: int | dict[IndexLabel, int] | Series = 0, *args, **kwargs
) -> DataFrame:
"""
Round a DataFrame to a variable number of decimal places.
Parameters
----------
decimals : int, dict, Series
Number of decimal places to round each column to. If an int is
given, round each column to the same number of places.
Otherwise dict and Series round to variable numbers of places.
Column names should be in the keys if `decimals` is a
dict-like, or in the index if `decimals` is a Series. Any
columns not included in `decimals` will be left as is. Elements
of `decimals` which are not columns of the input will be
ignored.
*args
Additional keywords have no effect but might be accepted for
compatibility with numpy.
**kwargs
Additional keywords have no effect but might be accepted for
compatibility with numpy.
Returns
-------
DataFrame
A DataFrame with the affected columns rounded to the specified
number of decimal places.
See Also
--------
numpy.around : Round a numpy array to the given number of decimals.
Series.round : Round a Series to the given number of decimals.
Examples
--------
>>> df = pd.DataFrame([(.21, .32), (.01, .67), (.66, .03), (.21, .18)],
... columns=['dogs', 'cats'])
>>> df
dogs cats
0 0.21 0.32
1 0.01 0.67
2 0.66 0.03
3 0.21 0.18
By providing an integer each column is rounded to the same number
of decimal places
>>> df.round(1)
dogs cats
0 0.2 0.3
1 0.0 0.7
2 0.7 0.0
3 0.2 0.2
With a dict, the number of places for specific columns can be
specified with the column names as key and the number of decimal
places as value
>>> df.round({'dogs': 1, 'cats': 0})
dogs cats
0 0.2 0.0
1 0.0 1.0
2 0.7 0.0
3 0.2 0.0
Using a Series, the number of places for specific columns can be
specified with the column names as index and the number of
decimal places as value
>>> decimals = pd.Series([0, 1], index=['cats', 'dogs'])
>>> df.round(decimals)
dogs cats
0 0.2 0.0
1 0.0 1.0
2 0.7 0.0
3 0.2 0.0
"""
from pandas.core.reshape.concat import concat
def _dict_round(df, decimals):
for col, vals in df.items():
try:
yield _series_round(vals, decimals[col])
except KeyError:
yield vals
def _series_round(s, decimals):
if is_integer_dtype(s) or is_float_dtype(s):
return s.round(decimals)
return s
nv.validate_round(args, kwargs)
if isinstance(decimals, (dict, Series)):
if isinstance(decimals, Series):
if not decimals.index.is_unique:
raise ValueError("Index of decimals must be unique")
new_cols = list(_dict_round(self, decimals))
elif is_integer(decimals):
# Dispatch to Series.round
new_cols = [_series_round(v, decimals) for _, v in self.items()]
else:
raise TypeError("decimals must be an integer, a dict-like or a Series")
if len(new_cols) > 0:
return self._constructor(
concat(new_cols, axis=1), index=self.index, columns=self.columns
)
else:
return self
# ----------------------------------------------------------------------
# Statistical methods, etc.
def corr(
self,
method: str | Callable[[np.ndarray, np.ndarray], float] = "pearson",
min_periods: int = 1,
) -> DataFrame:
"""
Compute pairwise correlation of columns, excluding NA/null values.
Parameters
----------
method : {'pearson', 'kendall', 'spearman'} or callable
Method of correlation:
* pearson : standard correlation coefficient
* kendall : Kendall Tau correlation coefficient
* spearman : Spearman rank correlation
* callable: callable with input two 1d ndarrays
and returning a float. Note that the returned matrix from corr
will have 1 along the diagonals and will be symmetric
regardless of the callable's behavior.
.. versionadded:: 0.24.0
min_periods : int, optional
Minimum number of observations required per pair of columns
to have a valid result.
Returns
-------
DataFrame
Correlation matrix.
See Also
--------
DataFrame.corrwith : Compute pairwise correlation with another
DataFrame or Series.
Series.corr : Compute the correlation between two Series.
Examples
--------
>>> def histogram_intersection(a, b):
... v = np.minimum(a, b).sum().round(decimals=1)
... return v
>>> df = pd.DataFrame([(.2, .3), (.0, .6), (.6, .0), (.2, .1)],
... columns=['dogs', 'cats'])
>>> df.corr(method=histogram_intersection)
dogs cats
dogs 1.0 0.3
cats 0.3 1.0
"""
numeric_df = self._get_numeric_data()
cols = numeric_df.columns
idx = cols.copy()
mat = numeric_df.to_numpy(dtype=float, na_value=np.nan, copy=False)
if method == "pearson":
correl = libalgos.nancorr(mat, minp=min_periods)
elif method == "spearman":
correl = libalgos.nancorr_spearman(mat, minp=min_periods)
elif method == "kendall":
correl = libalgos.nancorr_kendall(mat, minp=min_periods)
elif callable(method):
if min_periods is None:
min_periods = 1
mat = mat.T
corrf = nanops.get_corr_func(method)
K = len(cols)
correl = np.empty((K, K), dtype=float)
mask = np.isfinite(mat)
for i, ac in enumerate(mat):
for j, bc in enumerate(mat):
if i > j:
continue
valid = mask[i] & mask[j]
if valid.sum() < min_periods:
c = np.nan
elif i == j:
c = 1.0
elif not valid.all():
c = corrf(ac[valid], bc[valid])
else:
c = corrf(ac, bc)
correl[i, j] = c
correl[j, i] = c
else:
raise ValueError(
"method must be either 'pearson', "
"'spearman', 'kendall', or a callable, "
f"'{method}' was supplied"
)
return self._constructor(correl, index=idx, columns=cols)
def cov(self, min_periods: int | None = None, ddof: int | None = 1) -> DataFrame:
"""
Compute pairwise covariance of columns, excluding NA/null values.
Compute the pairwise covariance among the series of a DataFrame.
The returned data frame is the `covariance matrix
<https://en.wikipedia.org/wiki/Covariance_matrix>`__ of the columns
of the DataFrame.
Both NA and null values are automatically excluded from the
calculation. (See the note below about bias from missing values.)
A threshold can be set for the minimum number of
observations for each value created. Comparisons with observations
below this threshold will be returned as ``NaN``.
This method is generally used for the analysis of time series data to
understand the relationship between different measures
across time.
Parameters
----------
min_periods : int, optional
Minimum number of observations required per pair of columns
to have a valid result.
ddof : int, default 1
Delta degrees of freedom. The divisor used in calculations
is ``N - ddof``, where ``N`` represents the number of elements.
.. versionadded:: 1.1.0
Returns
-------
DataFrame
The covariance matrix of the series of the DataFrame.
See Also
--------
Series.cov : Compute covariance with another Series.
core.window.ExponentialMovingWindow.cov: Exponential weighted sample covariance.
core.window.Expanding.cov : Expanding sample covariance.
core.window.Rolling.cov : Rolling sample covariance.
Notes
-----
Returns the covariance matrix of the DataFrame's time series.
The covariance is normalized by N-ddof.
For DataFrames that have Series that are missing data (assuming that
data is `missing at random
<https://en.wikipedia.org/wiki/Missing_data#Missing_at_random>`__)
the returned covariance matrix will be an unbiased estimate
of the variance and covariance between the member Series.
However, for many applications this estimate may not be acceptable
because the estimate covariance matrix is not guaranteed to be positive
semi-definite. This could lead to estimate correlations having
absolute values which are greater than one, and/or a non-invertible
covariance matrix. See `Estimation of covariance matrices
<https://en.wikipedia.org/w/index.php?title=Estimation_of_covariance_
matrices>`__ for more details.
Examples
--------
>>> df = pd.DataFrame([(1, 2), (0, 3), (2, 0), (1, 1)],
... columns=['dogs', 'cats'])
>>> df.cov()
dogs cats
dogs 0.666667 -1.000000
cats -1.000000 1.666667
>>> np.random.seed(42)
>>> df = pd.DataFrame(np.random.randn(1000, 5),
... columns=['a', 'b', 'c', 'd', 'e'])
>>> df.cov()
a b c d e
a 0.998438 -0.020161 0.059277 -0.008943 0.014144
b -0.020161 1.059352 -0.008543 -0.024738 0.009826
c 0.059277 -0.008543 1.010670 -0.001486 -0.000271
d -0.008943 -0.024738 -0.001486 0.921297 -0.013692
e 0.014144 0.009826 -0.000271 -0.013692 0.977795
**Minimum number of periods**
This method also supports an optional ``min_periods`` keyword
that specifies the required minimum number of non-NA observations for
each column pair in order to have a valid result:
>>> np.random.seed(42)
>>> df = pd.DataFrame(np.random.randn(20, 3),
... columns=['a', 'b', 'c'])
>>> df.loc[df.index[:5], 'a'] = np.nan
>>> df.loc[df.index[5:10], 'b'] = np.nan
>>> df.cov(min_periods=12)
a b c
a 0.316741 NaN -0.150812
b NaN 1.248003 0.191417
c -0.150812 0.191417 0.895202
"""
numeric_df = self._get_numeric_data()
cols = numeric_df.columns
idx = cols.copy()
mat = numeric_df.to_numpy(dtype=float, na_value=np.nan, copy=False)
if notna(mat).all():
if min_periods is not None and min_periods > len(mat):
base_cov = np.empty((mat.shape[1], mat.shape[1]))
base_cov.fill(np.nan)
else:
base_cov = np.cov(mat.T, ddof=ddof)
base_cov = base_cov.reshape((len(cols), len(cols)))
else:
base_cov = libalgos.nancorr(mat, cov=True, minp=min_periods)
return self._constructor(base_cov, index=idx, columns=cols)
def corrwith(self, other, axis: Axis = 0, drop=False, method="pearson") -> Series:
"""
Compute pairwise correlation.
Pairwise correlation is computed between rows or columns of
DataFrame with rows or columns of Series or DataFrame. DataFrames
are first aligned along both axes before computing the
correlations.
Parameters
----------
other : DataFrame, Series
Object with which to compute correlations.
axis : {0 or 'index', 1 or 'columns'}, default 0
The axis to use. 0 or 'index' to compute column-wise, 1 or 'columns' for
row-wise.
drop : bool, default False
Drop missing indices from result.
method : {'pearson', 'kendall', 'spearman'} or callable
Method of correlation:
* pearson : standard correlation coefficient
* kendall : Kendall Tau correlation coefficient
* spearman : Spearman rank correlation
* callable: callable with input two 1d ndarrays
and returning a float.
.. versionadded:: 0.24.0
Returns
-------
Series
Pairwise correlations.
See Also
--------
DataFrame.corr : Compute pairwise correlation of columns.
"""
axis = self._get_axis_number(axis)
this = self._get_numeric_data()
if isinstance(other, Series):
return this.apply(lambda x: other.corr(x, method=method), axis=axis)
other = other._get_numeric_data()
left, right = this.align(other, join="inner", copy=False)
if axis == 1:
left = left.T
right = right.T
if method == "pearson":
# mask missing values
left = left + right * 0
right = right + left * 0
# demeaned data
ldem = left - left.mean()
rdem = right - right.mean()
num = (ldem * rdem).sum()
dom = (left.count() - 1) * left.std() * right.std()
correl = num / dom
elif method in ["kendall", "spearman"] or callable(method):
def c(x):
return nanops.nancorr(x[0], x[1], method=method)
correl = self._constructor_sliced(
map(c, zip(left.values.T, right.values.T)), index=left.columns
)
else:
raise ValueError(
f"Invalid method {method} was passed, "
"valid methods are: 'pearson', 'kendall', "
"'spearman', or callable"
)
if not drop:
# Find non-matching labels along the given axis
# and append missing correlations (GH 22375)
raxis = 1 if axis == 0 else 0
result_index = this._get_axis(raxis).union(other._get_axis(raxis))
idx_diff = result_index.difference(correl.index)
if len(idx_diff) > 0:
correl = correl.append(Series([np.nan] * len(idx_diff), index=idx_diff))
return correl
# ----------------------------------------------------------------------
# ndarray-like stats methods
def count(
self, axis: Axis = 0, level: Level | None = None, numeric_only: bool = False
):
"""
Count non-NA cells for each column or row.
The values `None`, `NaN`, `NaT`, and optionally `numpy.inf` (depending
on `pandas.options.mode.use_inf_as_na`) are considered NA.
Parameters
----------
axis : {0 or 'index', 1 or 'columns'}, default 0
If 0 or 'index' counts are generated for each column.
If 1 or 'columns' counts are generated for each row.
level : int or str, optional
If the axis is a `MultiIndex` (hierarchical), count along a
particular `level`, collapsing into a `DataFrame`.
A `str` specifies the level name.
numeric_only : bool, default False
Include only `float`, `int` or `boolean` data.
Returns
-------
Series or DataFrame
For each column/row the number of non-NA/null entries.
If `level` is specified returns a `DataFrame`.
See Also
--------
Series.count: Number of non-NA elements in a Series.
DataFrame.value_counts: Count unique combinations of columns.
DataFrame.shape: Number of DataFrame rows and columns (including NA
elements).
DataFrame.isna: Boolean same-sized DataFrame showing places of NA
elements.
Examples
--------
Constructing DataFrame from a dictionary:
>>> df = pd.DataFrame({"Person":
... ["John", "Myla", "Lewis", "John", "Myla"],
... "Age": [24., np.nan, 21., 33, 26],
... "Single": [False, True, True, True, False]})
>>> df
Person Age Single
0 John 24.0 False
1 Myla NaN True
2 Lewis 21.0 True
3 John 33.0 True
4 Myla 26.0 False
Notice the uncounted NA values:
>>> df.count()
Person 5
Age 4
Single 5
dtype: int64
Counts for each **row**:
>>> df.count(axis='columns')
0 3
1 2
2 3
3 3
4 3
dtype: int64
Counts for one level of a `MultiIndex`:
>>> df.set_index(["Person", "Single"]).count(level="Person")
Age
Person
John 2
Lewis 1
Myla 1
"""
axis = self._get_axis_number(axis)
if level is not None:
return self._count_level(level, axis=axis, numeric_only=numeric_only)
if numeric_only:
frame = self._get_numeric_data()
else:
frame = self
# GH #423
if len(frame._get_axis(axis)) == 0:
result = self._constructor_sliced(0, index=frame._get_agg_axis(axis))
else:
if frame._is_mixed_type or frame._mgr.any_extension_types:
# the or any_extension_types is really only hit for single-
# column frames with an extension array
result = notna(frame).sum(axis=axis)
else:
# GH13407
series_counts = notna(frame).sum(axis=axis)
counts = series_counts.values
result = self._constructor_sliced(
counts, index=frame._get_agg_axis(axis)
)
return result.astype("int64")
def _count_level(self, level: Level, axis: int = 0, numeric_only: bool = False):
if numeric_only:
frame = self._get_numeric_data()
else:
frame = self
count_axis = frame._get_axis(axis)
agg_axis = frame._get_agg_axis(axis)
if not isinstance(count_axis, MultiIndex):
raise TypeError(
f"Can only count levels on hierarchical {self._get_axis_name(axis)}."
)
# Mask NaNs: Mask rows or columns where the index level is NaN, and all
# values in the DataFrame that are NaN
if frame._is_mixed_type:
# Since we have mixed types, calling notna(frame.values) might
# upcast everything to object
values_mask = notna(frame).values
else:
# But use the speedup when we have homogeneous dtypes
values_mask = notna(frame.values)
index_mask = notna(count_axis.get_level_values(level=level))
if axis == 1:
mask = index_mask & values_mask
else:
mask = index_mask.reshape(-1, 1) & values_mask
if isinstance(level, str):
level = count_axis._get_level_number(level)
level_name = count_axis._names[level]
level_index = count_axis.levels[level]._rename(name=level_name)
level_codes = ensure_platform_int(count_axis.codes[level])
counts = lib.count_level_2d(mask, level_codes, len(level_index), axis=axis)
if axis == 1:
result = self._constructor(counts, index=agg_axis, columns=level_index)
else:
result = self._constructor(counts, index=level_index, columns=agg_axis)
return result
def _reduce(
self,
op,
name: str,
*,
axis: Axis = 0,
skipna: bool = True,
numeric_only: bool | None = None,
filter_type=None,
**kwds,
):
min_count = kwds.get("min_count", 0)
assert filter_type is None or filter_type == "bool", filter_type
out_dtype = "bool" if filter_type == "bool" else None
own_dtypes = [arr.dtype for arr in self._iter_column_arrays()]
dtype_is_dt = np.array(
[is_datetime64_any_dtype(dtype) for dtype in own_dtypes],
dtype=bool,
)
if numeric_only is None and name in ["mean", "median"] and dtype_is_dt.any():
warnings.warn(
"DataFrame.mean and DataFrame.median with numeric_only=None "
"will include datetime64 and datetime64tz columns in a "
"future version.",
FutureWarning,
stacklevel=5,
)
cols = self.columns[~dtype_is_dt]
self = self[cols]
# TODO: Make other agg func handle axis=None properly GH#21597
axis = self._get_axis_number(axis)
labels = self._get_agg_axis(axis)
assert axis in [0, 1]
def func(values: np.ndarray):
# We only use this in the case that operates on self.values
return op(values, axis=axis, skipna=skipna, **kwds)
def blk_func(values, axis=1):
if isinstance(values, ExtensionArray):
if values.ndim == 2:
# i.e. DatetimeArray, TimedeltaArray
return values._reduce(name, axis=1, skipna=skipna, **kwds)
return values._reduce(name, skipna=skipna, **kwds)
else:
return op(values, axis=axis, skipna=skipna, **kwds)
def _get_data() -> DataFrame:
if filter_type is None:
data = self._get_numeric_data()
else:
# GH#25101, GH#24434
assert filter_type == "bool"
data = self._get_bool_data()
return data
if (numeric_only is not None or axis == 0) and min_count == 0:
# For numeric_only non-None and axis non-None, we know
# which blocks to use and no try/except is needed.
# For numeric_only=None only the case with axis==0 and no object
# dtypes are unambiguous can be handled with BlockManager.reduce
# Case with EAs see GH#35881
df = self
if numeric_only is True:
df = _get_data()
if axis == 1:
df = df.T
axis = 0
ignore_failures = numeric_only is None
# After possibly _get_data and transposing, we are now in the
# simple case where we can use BlockManager.reduce
res, _ = df._mgr.reduce(blk_func, ignore_failures=ignore_failures)
out = df._constructor(res).iloc[0]
if out_dtype is not None:
out = out.astype(out_dtype)
if axis == 0 and len(self) == 0 and name in ["sum", "prod"]:
# Even if we are object dtype, follow numpy and return
# float64, see test_apply_funcs_over_empty
out = out.astype(np.float64)
return out
assert numeric_only is None
data = self
values = data.values
try:
result = func(values)
except TypeError:
# e.g. in nanops trying to convert strs to float
data = _get_data()
labels = data._get_agg_axis(axis)
values = data.values
with np.errstate(all="ignore"):
result = func(values)
if hasattr(result, "dtype"):
if filter_type == "bool" and notna(result).all():
result = result.astype(np.bool_)
elif filter_type is None and is_object_dtype(result.dtype):
try:
result = result.astype(np.float64)
except (ValueError, TypeError):
# try to coerce to the original dtypes item by item if we can
pass
result = self._constructor_sliced(result, index=labels)
return result
def nunique(self, axis: Axis = 0, dropna: bool = True) -> Series:
"""
Count distinct observations over requested axis.
Return Series with number of distinct observations. Can ignore NaN
values.
Parameters
----------
axis : {0 or 'index', 1 or 'columns'}, default 0
The axis to use. 0 or 'index' for row-wise, 1 or 'columns' for
column-wise.
dropna : bool, default True
Don't include NaN in the counts.
Returns
-------
Series
See Also
--------
Series.nunique: Method nunique for Series.
DataFrame.count: Count non-NA cells for each column or row.
Examples
--------
>>> df = pd.DataFrame({'A': [1, 2, 3], 'B': [1, 1, 1]})
>>> df.nunique()
A 3
B 1
dtype: int64
>>> df.nunique(axis=1)
0 1
1 2
2 2
dtype: int64
"""
return self.apply(Series.nunique, axis=axis, dropna=dropna)
def idxmin(self, axis: Axis = 0, skipna: bool = True) -> Series:
"""
Return index of first occurrence of minimum over requested axis.
NA/null values are excluded.
Parameters
----------
axis : {0 or 'index', 1 or 'columns'}, default 0
The axis to use. 0 or 'index' for row-wise, 1 or 'columns' for column-wise.
skipna : bool, default True
Exclude NA/null values. If an entire row/column is NA, the result
will be NA.
Returns
-------
Series
Indexes of minima along the specified axis.
Raises
------
ValueError
* If the row/column is empty
See Also
--------
Series.idxmin : Return index of the minimum element.
Notes
-----
This method is the DataFrame version of ``ndarray.argmin``.
Examples
--------
Consider a dataset containing food consumption in Argentina.
>>> df = pd.DataFrame({'consumption': [10.51, 103.11, 55.48],
... 'co2_emissions': [37.2, 19.66, 1712]},
... index=['Pork', 'Wheat Products', 'Beef'])
>>> df
consumption co2_emissions
Pork 10.51 37.20
Wheat Products 103.11 19.66
Beef 55.48 1712.00
By default, it returns the index for the minimum value in each column.
>>> df.idxmin()
consumption Pork
co2_emissions Wheat Products
dtype: object
To return the index for the minimum value in each row, use ``axis="columns"``.
>>> df.idxmin(axis="columns")
Pork consumption
Wheat Products co2_emissions
Beef consumption
dtype: object
"""
axis = self._get_axis_number(axis)
res = self._reduce(
nanops.nanargmin, "argmin", axis=axis, skipna=skipna, numeric_only=False
)
indices = res._values
# indices will always be np.ndarray since axis is not None and
# values is a 2d array for DataFrame
# error: Item "int" of "Union[int, Any]" has no attribute "__iter__"
assert isinstance(indices, np.ndarray) # for mypy
index = self._get_axis(axis)
result = [index[i] if i >= 0 else np.nan for i in indices]
return self._constructor_sliced(result, index=self._get_agg_axis(axis))
def idxmax(self, axis: Axis = 0, skipna: bool = True) -> Series:
"""
Return index of first occurrence of maximum over requested axis.
NA/null values are excluded.
Parameters
----------
axis : {0 or 'index', 1 or 'columns'}, default 0
The axis to use. 0 or 'index' for row-wise, 1 or 'columns' for column-wise.
skipna : bool, default True
Exclude NA/null values. If an entire row/column is NA, the result
will be NA.
Returns
-------
Series
Indexes of maxima along the specified axis.
Raises
------
ValueError
* If the row/column is empty
See Also
--------
Series.idxmax : Return index of the maximum element.
Notes
-----
This method is the DataFrame version of ``ndarray.argmax``.
Examples
--------
Consider a dataset containing food consumption in Argentina.
>>> df = pd.DataFrame({'consumption': [10.51, 103.11, 55.48],
... 'co2_emissions': [37.2, 19.66, 1712]},
... index=['Pork', 'Wheat Products', 'Beef'])
>>> df
consumption co2_emissions
Pork 10.51 37.20
Wheat Products 103.11 19.66
Beef 55.48 1712.00
By default, it returns the index for the maximum value in each column.
>>> df.idxmax()
consumption Wheat Products
co2_emissions Beef
dtype: object
To return the index for the maximum value in each row, use ``axis="columns"``.
>>> df.idxmax(axis="columns")
Pork co2_emissions
Wheat Products consumption
Beef co2_emissions
dtype: object
"""
axis = self._get_axis_number(axis)
res = self._reduce(
nanops.nanargmax, "argmax", axis=axis, skipna=skipna, numeric_only=False
)
indices = res._values
# indices will always be np.ndarray since axis is not None and
# values is a 2d array for DataFrame
# error: Item "int" of "Union[int, Any]" has no attribute "__iter__"
assert isinstance(indices, np.ndarray) # for mypy
index = self._get_axis(axis)
result = [index[i] if i >= 0 else np.nan for i in indices]
return self._constructor_sliced(result, index=self._get_agg_axis(axis))
def _get_agg_axis(self, axis_num: int) -> Index:
"""
Let's be explicit about this.
"""
if axis_num == 0:
return self.columns
elif axis_num == 1:
return self.index
else:
raise ValueError(f"Axis must be 0 or 1 (got {repr(axis_num)})")
def mode(
self, axis: Axis = 0, numeric_only: bool = False, dropna: bool = True
) -> DataFrame:
"""
Get the mode(s) of each element along the selected axis.
The mode of a set of values is the value that appears most often.
It can be multiple values.
Parameters
----------
axis : {0 or 'index', 1 or 'columns'}, default 0
The axis to iterate over while searching for the mode:
* 0 or 'index' : get mode of each column
* 1 or 'columns' : get mode of each row.
numeric_only : bool, default False
If True, only apply to numeric columns.
dropna : bool, default True
Don't consider counts of NaN/NaT.
.. versionadded:: 0.24.0
Returns
-------
DataFrame
The modes of each column or row.
See Also
--------
Series.mode : Return the highest frequency value in a Series.
Series.value_counts : Return the counts of values in a Series.
Examples
--------
>>> df = pd.DataFrame([('bird', 2, 2),
... ('mammal', 4, np.nan),
... ('arthropod', 8, 0),
... ('bird', 2, np.nan)],
... index=('falcon', 'horse', 'spider', 'ostrich'),
... columns=('species', 'legs', 'wings'))
>>> df
species legs wings
falcon bird 2 2.0
horse mammal 4 NaN
spider arthropod 8 0.0
ostrich bird 2 NaN
By default, missing values are not considered, and the mode of wings
are both 0 and 2. Because the resulting DataFrame has two rows,
the second row of ``species`` and ``legs`` contains ``NaN``.
>>> df.mode()
species legs wings
0 bird 2.0 0.0
1 NaN NaN 2.0
Setting ``dropna=False`` ``NaN`` values are considered and they can be
the mode (like for wings).
>>> df.mode(dropna=False)
species legs wings
0 bird 2 NaN
Setting ``numeric_only=True``, only the mode of numeric columns is
computed, and columns of other types are ignored.
>>> df.mode(numeric_only=True)
legs wings
0 2.0 0.0
1 NaN 2.0
To compute the mode over columns and not rows, use the axis parameter:
>>> df.mode(axis='columns', numeric_only=True)
0 1
falcon 2.0 NaN
horse 4.0 NaN
spider 0.0 8.0
ostrich 2.0 NaN
"""
data = self if not numeric_only else self._get_numeric_data()
def f(s):
return s.mode(dropna=dropna)
data = data.apply(f, axis=axis)
# Ensure index is type stable (should always use int index)
if data.empty:
data.index = ibase.default_index(0)
return data
def quantile(
self,
q=0.5,
axis: Axis = 0,
numeric_only: bool = True,
interpolation: str = "linear",
):
"""
Return values at the given quantile over requested axis.
Parameters
----------
q : float or array-like, default 0.5 (50% quantile)
Value between 0 <= q <= 1, the quantile(s) to compute.
axis : {0, 1, 'index', 'columns'}, default 0
Equals 0 or 'index' for row-wise, 1 or 'columns' for column-wise.
numeric_only : bool, default True
If False, the quantile of datetime and timedelta data will be
computed as well.
interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'}
This optional parameter specifies the interpolation method to use,
when the desired quantile lies between two data points `i` and `j`:
* linear: `i + (j - i) * fraction`, where `fraction` is the
fractional part of the index surrounded by `i` and `j`.
* lower: `i`.
* higher: `j`.
* nearest: `i` or `j` whichever is nearest.
* midpoint: (`i` + `j`) / 2.
Returns
-------
Series or DataFrame
If ``q`` is an array, a DataFrame will be returned where the
index is ``q``, the columns are the columns of self, and the
values are the quantiles.
If ``q`` is a float, a Series will be returned where the
index is the columns of self and the values are the quantiles.
See Also
--------
core.window.Rolling.quantile: Rolling quantile.
numpy.percentile: Numpy function to compute the percentile.
Examples
--------
>>> df = pd.DataFrame(np.array([[1, 1], [2, 10], [3, 100], [4, 100]]),
... columns=['a', 'b'])
>>> df.quantile(.1)
a 1.3
b 3.7
Name: 0.1, dtype: float64
>>> df.quantile([.1, .5])
a b
0.1 1.3 3.7
0.5 2.5 55.0
Specifying `numeric_only=False` will also compute the quantile of
datetime and timedelta data.
>>> df = pd.DataFrame({'A': [1, 2],
... 'B': [pd.Timestamp('2010'),
... pd.Timestamp('2011')],
... 'C': [pd.Timedelta('1 days'),
... pd.Timedelta('2 days')]})
>>> df.quantile(0.5, numeric_only=False)
A 1.5
B 2010-07-02 12:00:00
C 1 days 12:00:00
Name: 0.5, dtype: object
"""
validate_percentile(q)
if not is_list_like(q):
# BlockManager.quantile expects listlike, so we wrap and unwrap here
res = self.quantile(
[q], axis=axis, numeric_only=numeric_only, interpolation=interpolation
)
return res.iloc[0]
q = Index(q, dtype=np.float64)
data = self._get_numeric_data() if numeric_only else self
axis = self._get_axis_number(axis)
if axis == 1:
data = data.T
if len(data.columns) == 0:
# GH#23925 _get_numeric_data may have dropped all columns
cols = Index([], name=self.columns.name)
if is_list_like(q):
return self._constructor([], index=q, columns=cols)
return self._constructor_sliced([], index=cols, name=q, dtype=np.float64)
res = data._mgr.quantile(qs=q, axis=1, interpolation=interpolation)
result = self._constructor(res)
return result
@doc(NDFrame.asfreq, **_shared_doc_kwargs)
def asfreq(
self,
freq: Frequency,
method=None,
how: str | None = None,
normalize: bool = False,
fill_value=None,
) -> DataFrame:
return super().asfreq(
freq=freq,
method=method,
how=how,
normalize=normalize,
fill_value=fill_value,
)
@doc(NDFrame.resample, **_shared_doc_kwargs)
def resample(
self,
rule,
axis=0,
closed: str | None = None,
label: str | None = None,
convention: str = "start",
kind: str | None = None,
loffset=None,
base: int | None = None,
on=None,
level=None,
origin: str | TimestampConvertibleTypes = "start_day",
offset: TimedeltaConvertibleTypes | None = None,
) -> Resampler:
return super().resample(
rule=rule,
axis=axis,
closed=closed,
label=label,
convention=convention,
kind=kind,
loffset=loffset,
base=base,
on=on,
level=level,
origin=origin,
offset=offset,
)
def to_timestamp(
self,
freq: Frequency | None = None,
how: str = "start",
axis: Axis = 0,
copy: bool = True,
) -> DataFrame:
"""
Cast to DatetimeIndex of timestamps, at *beginning* of period.
Parameters
----------
freq : str, default frequency of PeriodIndex
Desired frequency.
how : {'s', 'e', 'start', 'end'}
Convention for converting period to timestamp; start of period
vs. end.
axis : {0 or 'index', 1 or 'columns'}, default 0
The axis to convert (the index by default).
copy : bool, default True
If False then underlying input data is not copied.
Returns
-------
DataFrame with DatetimeIndex
"""
new_obj = self.copy(deep=copy)
axis_name = self._get_axis_name(axis)
old_ax = getattr(self, axis_name)
if not isinstance(old_ax, PeriodIndex):
raise TypeError(f"unsupported Type {type(old_ax).__name__}")
new_ax = old_ax.to_timestamp(freq=freq, how=how)
setattr(new_obj, axis_name, new_ax)
return new_obj
def to_period(
self, freq: Frequency | None = None, axis: Axis = 0, copy: bool = True
) -> DataFrame:
"""
Convert DataFrame from DatetimeIndex to PeriodIndex.
Convert DataFrame from DatetimeIndex to PeriodIndex with desired
frequency (inferred from index if not passed).
Parameters
----------
freq : str, default
Frequency of the PeriodIndex.
axis : {0 or 'index', 1 or 'columns'}, default 0
The axis to convert (the index by default).
copy : bool, default True
If False then underlying input data is not copied.
Returns
-------
DataFrame with PeriodIndex
"""
new_obj = self.copy(deep=copy)
axis_name = self._get_axis_name(axis)
old_ax = getattr(self, axis_name)
if not isinstance(old_ax, DatetimeIndex):
raise TypeError(f"unsupported Type {type(old_ax).__name__}")
new_ax = old_ax.to_period(freq=freq)
setattr(new_obj, axis_name, new_ax)
return new_obj
def isin(self, values) -> DataFrame:
"""
Whether each element in the DataFrame is contained in values.
Parameters
----------
values : iterable, Series, DataFrame or dict
The result will only be true at a location if all the
labels match. If `values` is a Series, that's the index. If
`values` is a dict, the keys must be the column names,
which must match. If `values` is a DataFrame,
then both the index and column labels must match.
Returns
-------
DataFrame
DataFrame of booleans showing whether each element in the DataFrame
is contained in values.
See Also
--------
DataFrame.eq: Equality test for DataFrame.
Series.isin: Equivalent method on Series.
Series.str.contains: Test if pattern or regex is contained within a
string of a Series or Index.
Examples
--------
>>> df = pd.DataFrame({'num_legs': [2, 4], 'num_wings': [2, 0]},
... index=['falcon', 'dog'])
>>> df
num_legs num_wings
falcon 2 2
dog 4 0
When ``values`` is a list check whether every value in the DataFrame
is present in the list (which animals have 0 or 2 legs or wings)
>>> df.isin([0, 2])
num_legs num_wings
falcon True True
dog False True
When ``values`` is a dict, we can pass values to check for each
column separately:
>>> df.isin({'num_wings': [0, 3]})
num_legs num_wings
falcon False False
dog False True
When ``values`` is a Series or DataFrame the index and column must
match. Note that 'falcon' does not match based on the number of legs
in df2.
>>> other = pd.DataFrame({'num_legs': [8, 2], 'num_wings': [0, 2]},
... index=['spider', 'falcon'])
>>> df.isin(other)
num_legs num_wings
falcon True True
dog False False
"""
if isinstance(values, dict):
from pandas.core.reshape.concat import concat
values = collections.defaultdict(list, values)
return concat(
(
self.iloc[:, [i]].isin(values[col])
for i, col in enumerate(self.columns)
),
axis=1,
)
elif isinstance(values, Series):
if not values.index.is_unique:
raise ValueError("cannot compute isin with a duplicate axis.")
return self.eq(values.reindex_like(self), axis="index")
elif isinstance(values, DataFrame):
if not (values.columns.is_unique and values.index.is_unique):
raise ValueError("cannot compute isin with a duplicate axis.")
return self.eq(values.reindex_like(self))
else:
if not is_list_like(values):
raise TypeError(
"only list-like or dict-like objects are allowed "
"to be passed to DataFrame.isin(), "
f"you passed a '{type(values).__name__}'"
)
return self._constructor(
algorithms.isin(self.values.ravel(), values).reshape(self.shape),
self.index,
self.columns,
)
# ----------------------------------------------------------------------
# Add index and columns
_AXIS_ORDERS = ["index", "columns"]
_AXIS_TO_AXIS_NUMBER: dict[Axis, int] = {
**NDFrame._AXIS_TO_AXIS_NUMBER,
1: 1,
"columns": 1,
}
_AXIS_REVERSED = True
_AXIS_LEN = len(_AXIS_ORDERS)
_info_axis_number = 1
_info_axis_name = "columns"
index: Index = properties.AxisProperty(
axis=1, doc="The index (row labels) of the DataFrame."
)
columns: Index = properties.AxisProperty(
axis=0, doc="The column labels of the DataFrame."
)
@property
def _AXIS_NUMBERS(self) -> dict[str, int]:
""".. deprecated:: 1.1.0"""
super()._AXIS_NUMBERS
return {"index": 0, "columns": 1}
@property
def _AXIS_NAMES(self) -> dict[int, str]:
""".. deprecated:: 1.1.0"""
super()._AXIS_NAMES
return {0: "index", 1: "columns"}
# ----------------------------------------------------------------------
# Add plotting methods to DataFrame
plot = CachedAccessor("plot", pandas.plotting.PlotAccessor)
hist = pandas.plotting.hist_frame
boxplot = pandas.plotting.boxplot_frame
sparse = CachedAccessor("sparse", SparseFrameAccessor)
DataFrame._add_numeric_operations()
ops.add_flex_arithmetic_methods(DataFrame)
def _from_nested_dict(data) -> collections.defaultdict:
new_data: collections.defaultdict = collections.defaultdict(dict)
for index, s in data.items():
for col, v in s.items():
new_data[col][index] = v
return new_data
def _reindex_for_setitem(value: FrameOrSeriesUnion, index: Index) -> ArrayLike:
# reindex if necessary
if value.index.equals(index) or not len(index):
return value._values.copy()
# GH#4107
try:
reindexed_value = value.reindex(index)._values
except ValueError as err:
# raised in MultiIndex.from_tuples, see test_insert_error_msmgs
if not value.index.is_unique:
# duplicate axis
raise err
raise TypeError(
"incompatible index of inserted column with frame index"
) from err
return reindexed_value
| 34.218831 | 170 | 0.528078 |
1c9e8f0c5d24c9eeba6ec6e42e594ceb7ddc8bc5 | 1,223 | py | Python | Algo and DSA/LeetCode-Solutions-master/Python/maximum-number-of-people-that-can-be-caught-in-tag.py | Sourav692/FAANG-Interview-Preparation | f523e5c94d582328b3edc449ea16ac6ab28cdc81 | [
"Unlicense"
]
| 3,269 | 2018-10-12T01:29:40.000Z | 2022-03-31T17:58:41.000Z | Algo and DSA/LeetCode-Solutions-master/Python/maximum-number-of-people-that-can-be-caught-in-tag.py | Sourav692/FAANG-Interview-Preparation | f523e5c94d582328b3edc449ea16ac6ab28cdc81 | [
"Unlicense"
]
| 53 | 2018-12-16T22:54:20.000Z | 2022-02-25T08:31:20.000Z | Algo and DSA/LeetCode-Solutions-master/Python/maximum-number-of-people-that-can-be-caught-in-tag.py | Sourav692/FAANG-Interview-Preparation | f523e5c94d582328b3edc449ea16ac6ab28cdc81 | [
"Unlicense"
]
| 1,236 | 2018-10-12T02:51:40.000Z | 2022-03-30T13:30:37.000Z | # Time: O(n)
# Space: O(1)
# greedy with two pointers solution
class Solution(object):
def catchMaximumAmountofPeople(self, team, dist):
"""
:type team: List[int]
:type dist: int
:rtype: int
"""
result = i = j = 0
while i < len(team) and j < len(team):
if i+dist < j or team[i] != 1:
i += 1
elif j+dist < i or team[j] != 0:
j += 1
else:
result += 1
i += 1
j += 1
return result
# Time: O(n)
# Space: O(1)
# greedy with sliding window solution
class Solution2(object):
def catchMaximumAmountofPeople(self, team, dist):
"""
:type team: List[int]
:type dist: int
:rtype: int
"""
result = j = 0
for i in xrange(len(team)):
if not team[i]:
continue
while j < i-dist:
j += 1
while j <= min(i+dist, len(team)-1):
if team[j] == 0:
break
j += 1
if j <= min(i+dist, len(team)-1):
result += 1
j += 1
return result
| 24.959184 | 53 | 0.415372 |
4697dbb039112dc172c920771991f5c696ec9c1d | 514 | py | Python | bootalert.py | andyseubert/doorbell | ab65d44aa36fcad407275e33d8aa3140d17d3c35 | [
"Unlicense"
]
| null | null | null | bootalert.py | andyseubert/doorbell | ab65d44aa36fcad407275e33d8aa3140d17d3c35 | [
"Unlicense"
]
| null | null | null | bootalert.py | andyseubert/doorbell | ab65d44aa36fcad407275e33d8aa3140d17d3c35 | [
"Unlicense"
]
| null | null | null | #!/usr/bin/python
import time
from time import sleep
import datetime
import os
import sys
import subprocess
from subprocess import Popen
import pynma
p = pynma.PyNMA( "12842c4d5f6061eb9543674248c3518edda9dd83343ebe19" )
application="alertpi boot"
event="DoorBell OnBoot"
description="doorbell just turned on"
priority=2
p.push(application, event, description)
subprocess.Popen([sys.executable, "/opt/doorbell/sendsms.py BootedUpJustNow" ], stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE)
| 25.7 | 150 | 0.81323 |
61ade65f2a68fce9fc8b8594973ce9dbb7d9c83c | 3,120 | py | Python | IMU/VTK-6.2.0/Filters/Core/Testing/Python/SliceOrder.py | timkrentz/SunTracker | 9a189cc38f45e5fbc4e4c700d7295a871d022795 | [
"MIT"
]
| 4 | 2019-05-30T01:52:12.000Z | 2021-09-29T21:12:13.000Z | IMU/VTK-6.2.0/Filters/Core/Testing/Python/SliceOrder.py | timkrentz/SunTracker | 9a189cc38f45e5fbc4e4c700d7295a871d022795 | [
"MIT"
]
| null | null | null | IMU/VTK-6.2.0/Filters/Core/Testing/Python/SliceOrder.py | timkrentz/SunTracker | 9a189cc38f45e5fbc4e4c700d7295a871d022795 | [
"MIT"
]
| 2 | 2019-08-30T23:36:13.000Z | 2019-11-08T16:52:01.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
=========================================================================
Program: Visualization Toolkit
Module: TestNamedColorsIntegration.py
Copyright (c) Ken Martin, Will Schroeder, Bill Lorensen
All rights reserved.
See Copyright.txt or http://www.kitware.com/Copyright.htm for details.
This software is distributed WITHOUT ANY WARRANTY; without even
the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
PURPOSE. See the above copyright notice for more information.
=========================================================================
'''
import vtk
class SliceOrder(object):
'''
These transformations permute medical image data to maintain proper
orientation regardless of the acqusition order.
After applying these transforms with vtkTransformFilter,
a view up of 0,-1,0 will result in the body part
facing the viewer.
NOTE: some transformations have a -1 scale factor
for one of the components.
To ensure proper polygon orientation and normal direction,
you must apply the vtkPolyDataNormals filter.
Naming:
si - superior to inferior (top to bottom)
iss - inferior to superior (bottom to top)
ap - anterior to posterior (front to back)
pa - posterior to anterior (back to front)
lr - left to right
rl - right to left
'''
si = vtk.vtkTransform()
si.SetMatrix([1, 0, 0, 0, 0, 0, 1, 0, 0, -1, 0, 0, 0, 0, 0, 1])
# is is a reserved word in Python so use iss
iss = vtk.vtkTransform()
iss.SetMatrix([1, 0, 0, 0, 0, 0, -1, 0, 0, -1, 0, 0, 0, 0, 0, 1])
ap = vtk.vtkTransform()
ap.Scale(1, -1, 1)
pa = vtk.vtkTransform()
pa.Scale(1, -1, -1)
lr = vtk.vtkTransform()
lr.SetMatrix([0, 0, -1, 0, 0, -1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1])
rl = vtk.vtkTransform()
rl.SetMatrix([0, 0, 1, 0, 0, -1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1])
#
# the previous transforms assume radiological views of the slices
# (viewed from the feet).
# Othermodalities such as physical sectioning may view from the head.
# These transforms modify the original with a 180 rotation about y
#
hf = vtk.vtkTransform()
hf.SetMatrix([-1, 0, 0, 0, 0, 1, 0, 0, 0, 0, -1, 0, 0, 0, 0, 1])
hfsi = vtk.vtkTransform()
hfsi.Concatenate(hf.GetMatrix())
hfsi.Concatenate(si.GetMatrix())
hfis = vtk.vtkTransform()
hfis.Concatenate(hf.GetMatrix())
hfis.Concatenate(iss.GetMatrix())
hfap = vtk.vtkTransform()
hfap.Concatenate(hf.GetMatrix())
hfap.Concatenate(ap.GetMatrix())
hfpa = vtk.vtkTransform()
hfpa.Concatenate(hf.GetMatrix())
hfpa.Concatenate(pa.GetMatrix())
hflr = vtk.vtkTransform()
hflr.Concatenate(hf.GetMatrix())
hflr.Concatenate(lr.GetMatrix())
hfrl = vtk.vtkTransform()
hfrl.Concatenate(hf.GetMatrix())
hfrl.Concatenate(rl.GetMatrix())
| 32.5 | 76 | 0.584615 |
e523d2a99df244efa27b36194a427edd212f75e5 | 2,536 | py | Python | roadmap/spring-boot.py | yuvarajsanjeevi/jhipster-lite | 90b9d010266b4d3317204c89e9e20a5cbe8c2a0f | [
"Apache-2.0"
]
| null | null | null | roadmap/spring-boot.py | yuvarajsanjeevi/jhipster-lite | 90b9d010266b4d3317204c89e9e20a5cbe8c2a0f | [
"Apache-2.0"
]
| 49 | 2021-12-17T22:12:42.000Z | 2022-03-31T19:47:40.000Z | roadmap/spring-boot.py | MSaguer/jhipster-lite | 77a68e77f8792dcc23b10c28aa50af6f902c4c68 | [
"Apache-2.0"
]
| null | null | null | from urllib.request import urlretrieve
from diagrams import Cluster, Diagram
from diagrams.aws.compute import EC2
from diagrams.aws.database import RDS
from diagrams.aws.network import ELB
from diagrams.onprem.database import PostgreSQL, Mysql, Mariadb, Mongodb, Neo4J, Cassandra, Couchbase, Mssql
from diagrams.custom import Custom
from diagrams.onprem.vcs import Git
from diagrams.programming.language import Java
from diagrams.programming.framework import Spring
jhipster_url = "https://raw.githubusercontent.com/jhipster/jhipster-artwork/main/logos/JHipster%20bowtie.png"
jhipster_icon = "jhipster.png"
urlretrieve(jhipster_url, jhipster_icon)
maven_url = "https://raw.githubusercontent.com/jhipster/jhipster.github.io/main/images/logo/icons/maven.png"
maven_icon = "maven.png"
urlretrieve(maven_url, maven_icon)
gradle_url = "https://avatars.githubusercontent.com/u/124156"
gradle_icon = "gradle.png"
urlretrieve(gradle_url, gradle_icon)
spring_security_url = "https://pbs.twimg.com/profile_images/1235983944463585281/AWCKLiJh_400x400.png"
spring_security_icon = "spring_security.png"
urlretrieve(spring_security_url, spring_security_icon)
liquibase_url = "https://avatars.githubusercontent.com/u/438548"
liquibase_icon = "liquibase.png"
urlretrieve(liquibase_url, liquibase_icon)
with Diagram("Spring Boot Project", show=False, direction="TB"):
# init = Custom("init", jhipster_icon)
init = Git("init")
with Cluster("Build Tool"):
# gradle = Custom("Gradle", gradle_icon)
maven = Custom("", maven_icon)
with Cluster("Spring Boot"):
java = Java("")
spring_boot = Spring("Spring Boot")
with Cluster("Server"):
spring_boot_mvc = Spring("Spring MVC")
spring_security_jwt = Custom("Security JWT", spring_security_icon)
with Cluster("Database"):
# neo4j = Neo4J("Neo4J")
# couchbase = Couchbase("Couchbase")
# cassandra = Cassandra("Cassandra")
# mongodb = Mongodb("MongoDB")
# mssql = Mssql("MSSQL")
# mariadb = Mariadb("MariaDB")
# mysql = Mysql("MySQL")
postgresql = PostgreSQL("PostgreSQL")
liquibase = Custom("Liquibase", liquibase_icon)
init >> [ maven ] >> java
java >> spring_boot >> spring_boot_mvc
spring_boot_mvc >> spring_security_jwt
spring_boot >> [ postgresql ]
[ postgresql ] >> liquibase
# spring_boot >> [ mysql, postgresql, mariadb, mssql, mongodb, neo4j, cassandra, couchbase ]
# [ mysql, postgresql, mariadb, mssql ] >> liquibase
| 37.294118 | 109 | 0.724369 |
275307b85c097fa74665cdc829a64b483aa3887d | 7,832 | py | Python | msibi/pair.py | jennyfothergill/msibi | 0e309eff836dc13016d87889fe8d8f6960a13599 | [
"MIT"
]
| 7 | 2021-03-24T17:01:09.000Z | 2022-01-03T21:53:26.000Z | msibi/pair.py | jennyfothergill/msibi | 0e309eff836dc13016d87889fe8d8f6960a13599 | [
"MIT"
]
| 22 | 2021-03-24T18:09:57.000Z | 2022-02-24T18:36:49.000Z | msibi/pair.py | jennyfothergill/msibi | 0e309eff836dc13016d87889fe8d8f6960a13599 | [
"MIT"
]
| 2 | 2021-03-24T16:19:56.000Z | 2021-03-24T17:04:48.000Z | import os
import matplotlib.pyplot as plt
import numpy as np
from cmeutils.structure import gsd_rdf
from msibi.potentials import alpha_array, head_correction, tail_correction
from msibi.utils.error_calculation import calc_similarity
from msibi.utils.exceptions import UnsupportedEngine
from msibi.utils.general import find_nearest
from msibi.utils.smoothing import savitzky_golay
class Pair(object):
"""A pair interaction to be optimized.
Parameters
----------
type1 : str, required
The name of one particle type on the particle pair.
Must match the names found in the State's .gsd trajectory file.
See gsd.hoomd.ParticleData.types
type2 : str, required
The name of one particle type on the particle pair.
Must match the names found in the State's .gsd trajectory file.
See gsd.hoomd.ParticleData.types
potential :
Attributes
----------
name : str
Pair name.
potential : func
Values of the potential at every pot_r.
"""
def __init__(self, type1, type2, potential, head_correction_form="linear"):
self.type1 = str(type1)
self.type2 = str(type2)
self.name = f"{self.type1}-{self.type2}"
self.potential_file = ""
self._states = dict()
if isinstance(potential, str):
self.potential = np.loadtxt(potential)[:, 1]
# TODO: this could be dangerous
else:
self.potential = potential
self.previous_potential = None
self.head_correction_form = head_correction_form
def _add_state(self, state, smooth=True):
"""Add a state to be used in optimizing this pair.
Parameters
----------
state : msibi.state.State
A state object created previously.
"""
target_rdf = self.get_state_rdf(state, query=False)
if state._opt.smooth_rdfs:
target_rdf[:, 1] = savitzky_golay(
target_rdf[:, 1], 9, 2, deriv=0, rate=1
)
negative_idx = np.where(target_rdf < 0)
target_rdf[negative_idx] = 0
self._states[state] = {
"target_rdf": target_rdf,
"current_rdf": None,
"alpha": state.alpha,
"alpha_form": "linear",
"pair_indices": None,
"f_fit": [],
"path": state.dir
}
def get_state_rdf(self, state, query):
"""Calculate the RDF of a Pair at a State."""
if query:
traj = state.query_traj
else:
traj = state.traj_file
rdf, norm = gsd_rdf(
traj,
self.type1,
self.type2,
start=-state._opt.max_frames,
r_max=state._opt.rdf_cutoff,
bins=state._opt.n_rdf_points,
exclude_bonded=state._opt.rdf_exclude_bonded
)
return np.stack((rdf.bin_centers, rdf.rdf*norm)).T
def compute_current_rdf(self, state, smooth, verbose=False, query=True):
rdf = self.get_state_rdf(state, query=query)
self._states[state]["current_rdf"] = rdf
if state._opt.smooth_rdfs:
current_rdf = self._states[state]["current_rdf"]
current_rdf[:, 1] = savitzky_golay(
current_rdf[:, 1], 9, 2, deriv=0, rate=1
)
negative_idx = np.where(current_rdf < 0)
current_rdf[negative_idx] = 0
if verbose: # pragma: no cover
plt.title(f"RDF smoothing for {state.name}")
plt.plot(rdf[:,0], rdf[:, 1], label="unsmoothed")
plt.plot(rdf[:,0], current_rdf[:,1], label="smoothed")
plt.legend()
plt.show()
# Compute fitness function comparing the two RDFs.
f_fit = calc_similarity(
rdf[:, 1], self._states[state]["target_rdf"][:, 1]
)
self._states[state]["f_fit"].append(f_fit)
def save_current_rdf(self, state, iteration, dr):
"""Save the current rdf
Parameters
----------
state : State
A state object
iteration : int
Current iteration step, used in the filename
dr : float
The RDF bin size
"""
rdf = self._states[state]["current_rdf"]
rdf[:, 0] -= dr / 2
np.savetxt(os.path.join(
state.dir,
f"pair_{self.name}-state_{state.name}-step{iteration}.txt"
),
rdf)
def update_potential(self, pot_r, r_switch=None, verbose=False):
"""Update the potential using all states. """
self.previous_potential = np.copy(self.potential)
for state in self._states:
kT = state.kT
alpha0 = self._states[state]["alpha"]
form = self._states[state]["alpha_form"]
alpha = alpha_array(alpha0, pot_r, form=form)
current_rdf = self._states[state]["current_rdf"]
target_rdf = self._states[state]["target_rdf"]
# For cases where rdf_cutoff != pot_cutoff, only update the
# potential using RDF values < pot_cutoff.
unused_rdf_vals = current_rdf.shape[0] - self.potential.shape[0]
if unused_rdf_vals != 0:
current_rdf = current_rdf[:-unused_rdf_vals,:]
target_rdf = target_rdf[:-unused_rdf_vals,:]
if verbose: # pragma: no cover
plt.plot(current_rdf[:,0], current_rdf[:,1], label="current rdf")
plt.plot(target_rdf[:,0], target_rdf[:,1], label="target rdf")
plt.legend()
plt.show()
# The actual IBI step.
self.potential += (
kT * alpha * np.log(current_rdf[:,1] / target_rdf[:,1]) / len(self._states)
)
if verbose: # pragma: no cover
plt.plot(
pot_r, self.previous_potential, label="previous potential"
)
plt.plot(pot_r, self.potential, label="potential")
plt.ylim(
(min(self.potential[np.isfinite(self.potential)])-1,10)
)
plt.legend()
plt.show()
# Apply corrections to ensure continuous, well-behaved potentials.
pot = self.potential
self.potential = tail_correction(pot_r, self.potential, r_switch)
tail = self.potential
self.potential = head_correction(
pot_r,
self.potential,
self.previous_potential,
self.head_correction_form
)
head = self.potential
if verbose: # pragma: no cover
plt.plot(pot_r, head, label="head correction")
plt.plot(pot_r, pot, label="uncorrected potential")
idx_r, _ = find_nearest(pot_r, r_switch)
plt.plot(pot_r[idx_r:], tail[idx_r:], label="tail correction")
plt.ylim((min(pot[np.isfinite(pot)])-1, 10))
plt.legend()
plt.show()
def save_table_potential(self, r, dr, iteration=0, engine="hoomd"):
"""Save the table potential to a file usable by the MD engine. """
V = self.potential
F = -1.0 * np.gradient(V, dr)
data = np.vstack([r, V, F])
basename = os.path.basename(self.potential_file)
basename = "step{0:d}.{1}".format(iteration, basename)
dirname = os.path.dirname(self.potential_file)
iteration_filename = os.path.join(dirname, basename)
# This file is overwritten at each iteration and actually used for
# performing the query simulations.
np.savetxt(self.potential_file, data.T)
# This file is written for viewing of how the potential evolves.
np.savetxt(iteration_filename, data.T)
| 35.762557 | 95 | 0.572651 |
f856687d34a8bf822cc6303cebec55837d1cb34d | 2,018 | py | Python | othello/gis/util.py | ulaval-rs/othello | d62c55986a778443f43a639a980a3be36767f661 | [
"BSD-3-Clause"
]
| null | null | null | othello/gis/util.py | ulaval-rs/othello | d62c55986a778443f43a639a980a3be36767f661 | [
"BSD-3-Clause"
]
| 8 | 2021-07-07T14:06:40.000Z | 2021-07-27T00:03:23.000Z | othello/gis/util.py | ulaval-rs/othello | d62c55986a778443f43a639a980a3be36767f661 | [
"BSD-3-Clause"
]
| null | null | null | import copy
import functools
from typing import Dict, List
import pandas
from geopandas import GeoDataFrame
from othello.gis import io
def find_common_columns(dfs: List[GeoDataFrame]) -> List[str]:
common_columns = functools.reduce(lambda c1, c2: set(c1).intersection(set(c2)), dfs)
return common_columns
def make_dataframe_with_common_columns(dfs: List[GeoDataFrame], common_columns: List[str]) -> GeoDataFrame:
df = copy.deepcopy(dfs[0][common_columns])
return df
def add_weighted_columns_to_dataframe(df: GeoDataFrame, criteria_information: List[Dict], join_on: str) -> GeoDataFrame:
"""Add the weighted criteria to the 'df' dataframe by joining data on the join_on column name.
Args:
df: Base GeoDataFrame
criteria_information: Criteria data that allows to retrieve the exact column from a table from a layer from aGDB file
join_on: Column name on which the table join will be based
Returns:
GeoDataFrame with the added criteria and the final score
"""
weighted_columns = []
for criterion_information in criteria_information:
field = criterion_information['field']
filepath = criterion_information['filepath']
layer = criterion_information['layer']
weight = criterion_information['weight']
criterion_name = criterion_information['criterion_name']
criterion_df = io.read(filepath, layer=layer)[[join_on, field]]
criterion_df.columns = [join_on, criterion_name + '_np']
criterion_df[criterion_name + '_p'] = weight * criterion_df[criterion_name + '_np']
df = pandas.merge(left=df, right=criterion_df, on=join_on)
weighted_columns.append(criterion_name + '_p')
# df[criterion_name + '_np'] = criterion_df
# df[criterion_name + '_p'] = weight * criterion_df
# Final score in a new series
df['FinalScore'] = [0 for _ in range(len(df))]
for column in weighted_columns:
df['FinalScore'] += df[column]
return df
| 34.20339 | 125 | 0.705154 |
229893786f39bf0b9a729f93fee32b2388504fec | 24,951 | py | Python | monospace/game.py | Ball-Man/monospace | 570faa0b800b95e5305e83542512c38ff500b3b2 | [
"MIT"
]
| 1 | 2021-06-19T00:24:17.000Z | 2021-06-19T00:24:17.000Z | monospace/game.py | Ball-Man/monospace | 570faa0b800b95e5305e83542512c38ff500b3b2 | [
"MIT"
]
| null | null | null | monospace/game.py | Ball-Man/monospace | 570faa0b800b95e5305e83542512c38ff500b3b2 | [
"MIT"
]
| null | null | null | import ctypes
import copy
import random
import enum
import math
import desper
import esper
import dsdl
import monospace
from sdl2 import *
from sdl2.sdlttf import *
from sdl2.sdlmixer import *
DEFAULT_BULLET_SPEED = 15
MAX_BULLET_SPEED = 25
DEFAULT_BULLET_DELAY = 30
MIN_BULLET_DELAY = 7
class GameProcessor(esper.Processor):
"""Main game logic(enemy waves, powerup spawns etc.)."""
WAVE_THRESHOLDS = [50, 100, 250, 400, 600, 800, 1000, math.inf]
score = 0
def __init__(self):
self._cur_threshold = 0
self._cached_texture = None
self._cached_entity = None
self.model = None
self._state = GameState.WAVE
self._powerup_coroutine = None
self._rewards_spawned = False
self._next_wave_coroutine = None
self.waves = [monospace.FirstWave(),
random.choice((monospace.SecondWaveShooter(),
monospace.SecondWaveRoll())),
random.choice((monospace.ThirdWave(),
monospace.ThirdWaveRocket())),
monospace.FourthWave(),
monospace.FifthWave(),
monospace.SixthWave(),
monospace.SeventhWave(),
monospace.InfWave()]
self.keys = SDL_GetKeyboardState(None)
def process(self, model):
if self.model is None:
self.model = model
if self._cached_entity is None:
self.score_up(0)
coroutines = self.world.get_processor(desper.CoroutineProcessor)
# State machine
if self._state == GameState.WAVE:
self.waves[self._cur_threshold].spawn(self.world)
elif self._state == GameState.REWARD:
if self._powerup_coroutine is None:
# Spawn rewards
self._powerup_coroutine = coroutines.start(
self.spawn_rewards())
# When there are no rewards left, go to the next wave
rewards = tuple(self.world.get_component(PowerupBox))
if (self._rewards_spawned and len(rewards) == 0
and self._next_wave_coroutine is None):
self._next_wave_coroutine = coroutines.start(
self.next_wave())
if self.keys[dsdl.SCANCODE_BACK]:
monospace.pause_game(0, self.world, monospace.model)
def spawn_rewards(self):
yield 120
self.waves[self._cur_threshold].spawn_rewards(self.world)
self._rewards_spawned = True
def next_wave(self):
yield 120
self._cur_threshold += 1
self._rewards_spawned = False
self.score_up(0)
self._powerup_coroutine = None
self._next_wave_coroutine = None
self._state = GameState.WAVE
# Marker for the last wave
if self.is_infinite_wave:
text_surface = TTF_RenderText_Blended(
self.model.res['fonts']['timenspace_sm'].get(),
b'INF', SDL_Color())
text = SDL_CreateTextureFromSurface(
self.model.renderer, text_surface)
self.world.create_entity(dsdl.Position(30, 30), text)
def change_color_coroutine():
proc = self.world.get_processor(dsdl.ScreenClearerProcessor)
start_color = copy.copy(proc.color)
dst_color = self.waves[self._cur_threshold].bg_color
time = 120
for prog in range(time):
proc.color.r = int(
start_color.r + (dst_color.r - start_color.r)
* prog / time)
proc.color.g = int(
start_color.g + (dst_color.g - start_color.g)
* prog / time)
proc.color.b = int(
start_color.b + (dst_color.b - start_color.b)
* prog / time)
yield
self.world.get_processor(desper.CoroutineProcessor).start(
change_color_coroutine())
@property
def is_infinite_wave(self):
return math.isinf(self.WAVE_THRESHOLDS[self._cur_threshold])
def score_up(self, value):
"""Add some value to the current score.
This also invalidates the current cached texture.
"""
self.score += value
# Cache texture and create a new entity
#
# Remove current entity
if self._cached_entity is not None:
# SDL_DestroyTexture(self._cached_texture)
# Apparently this operation crashes on android. Looks like
# textures are freed when the entity is deleted.
self.world.delete_entity(self._cached_entity)
# Change internal state if necessary
if self.score >= self.WAVE_THRESHOLDS[self._cur_threshold]:
self.change_to_reward()
# Score to show on screen
pos_y = 50
shown_score = max(
self.WAVE_THRESHOLDS[self._cur_threshold] - self.score, 0)
if self.is_infinite_wave: # At the last wave show total score
shown_score = self.score
pos_y = 70
# Create new texture
text_surface = TTF_RenderText_Blended(
self.model.res['fonts']['timenspace'].get(),
str(shown_score).encode(), SDL_Color())
self._cached_texture = SDL_CreateTextureFromSurface(
self.model.renderer, text_surface)
# Add entity
self._cached_entity = self.world.create_entity(
self._cached_texture, dsdl.Position(30, pos_y))
# Cleanup
SDL_FreeSurface(text_surface)
def change_to_reward(self):
"""Change state to REWARD, with consequences."""
if self._state == GameState.REWARD:
return
self._state = GameState.REWARD
self.clear_screen()
# Reset bonuses given to the ship
ship = self.world.get_component(Ship)[0][1]
ship.revert_bonuses()
def clear_screen(self):
"""Clear all the unwanted entities from the screen.
This clears all enemies and powerups.
"""
# Clear all the enemies
for en, enemy in self.world.get_component(monospace.Enemy):
enemy.dead = True
enemy.spawn_particles()
self.world.delete_entity(en)
# Clear all enemy bullets
for en, _ in self.world.get_component(monospace.EnemyBullet):
self.world.delete_entity(en)
# Clear all the bonuses
for en, _ in self.world.get_component(PowerupBox):
self.world.delete_entity(en)
class GameState(enum.Enum):
"""Enumeration for the GameProcessor internal state machine."""
WAVE = 0
REWARD = 1
class EntityCleanerProcessor(esper.Processor):
"""Clean bullets and enemies from memory."""
def process(self, _):
for en, _ in self.world.get_component(ShipBullet):
position = self.world.component_for_entity(en, dsdl.Position)
if position.y <= 50 and self.world.entity_exists(en):
self.world.delete_entity(en)
for en, _ in self.world.get_component(monospace.Enemy):
position = self.world.component_for_entity(en, dsdl.Position)
if position.y > monospace.LOGICAL_HEIGHT + 50 \
and self.world.entity_exists(en):
self.world.delete_entity(en)
class Ship(desper.Controller):
"""Main ship controller."""
drag_ratio = 1.0
def __init__(self, position, bbox):
super().__init__()
self.position = position
self.bbox = bbox
self._old_x = position.x
self._old_y = position.y
self._old_pressing = False
self._drag = False
self._timer = 0
self.blasters = []
self.texture = None
self.bonuses = set()
self.default_blaster = None
# Select movement type(based on current platform)
self.movement_method = self.touch_movement if monospace.on_android \
else self.mouse_movement
def on_attach(self, en, world):
super().on_attach(en, world)
self.texture = self.get(ctypes.POINTER(SDL_Texture))
self.default_blaster = Blaster(
(0, 0), ShipBullet,
monospace.model.res['text']['ship_bullet'].get(),
DEFAULT_BULLET_DELAY,
(0, -DEFAULT_BULLET_SPEED),
(10, 10, (5, 40)), world)
self.blasters.append(copy.copy(self.default_blaster))
# Set selected texture
world.add_component(en, monospace.get_selected_ship_texture())
def update(self, en, world, model):
self.movement_method()
# Don't move outside borders
self.position.x = max(min(self.position.x, monospace.LOGICAL_WIDTH), 0)
self.position.y = max(min(self.position.y, monospace.LOGICAL_HEIGHT),
0)
# Trigger blasters
for blaster in self.blasters:
if blaster.shoot(self.position.x, self.position.y):
# Feedback sound
Mix_PlayChannel(-1,
monospace.model.res['chunks']['shot'].get(),
0)
# Check collisions with powerups
powerup = self.check_collisions(PowerupBox)
if powerup is not None:
powerup.apply(self)
# Feedback sound
Mix_PlayChannel(-1, monospace.model.res['chunks']['powerup'].get(),
0)
# Check collisions with enemy bullets
enemy_bullet = self.check_collisions(EnemyBullet)
if enemy_bullet is not None:
try:
# If has shield, protect
self.get(PowerShield)
except KeyError:
# If no shield, defeat
self.die()
enemy = self.check_collisions(monospace.Enemy)
if enemy is not None:
self.die()
def mouse_movement(self):
"""Movement update managed by mouse(mainly for desktop)."""
mouse_x, mouse_y = ctypes.c_int(), ctypes.c_int()
pressing = (SDL_GetMouseState(ctypes.byref(mouse_x),
ctypes.byref(mouse_y))
& SDL_BUTTON(SDL_BUTTON_LEFT))
# Start drag
if pressing and not self._old_pressing:
self._drag = True
self._old_x, self._old_y = mouse_x.value, mouse_y.value
elif not pressing and self._old_pressing: # Stop drag
self._drag = False
if self._drag:
self.position.x += ((mouse_x.value - self._old_x)
* monospace.LOGICAL_WIDTH_RATIO
* self.drag_ratio)
self.position.y += ((mouse_y.value - self._old_y)
* monospace.LOGICAL_WIDTH_RATIO
* self.drag_ratio)
self._old_x, self._old_y = mouse_x.value, mouse_y.value
self._old_pressing = pressing
def touch_movement(self):
"""Movement update managed by multitouch(mainly for android)."""
if len(dsdl.finger_stack) > 0 \
and dsdl.fingers[dsdl.finger_stack[0]].moving:
dx = dsdl.fingers[dsdl.finger_stack[0]].dx
dy = dsdl.fingers[dsdl.finger_stack[0]].dy
if math.isnan(dx):
dx = 0
if math.isnan(dy):
dy = 0
self.position.x += (dx
* monospace.LOGICAL_WIDTH
* self.drag_ratio)
self.position.y += (dy
* monospace.LOGICAL_HEIGHT
* self.drag_ratio)
def check_collisions(self, component_type):
"""Check for collisions with a component_type(bbox).
Return None if no collision is detected, return an instance of
component_type instead(the one colliding).
"""
for en, comp in self.world.get_component(component_type):
bbox = self.world.try_component(en, dsdl.BoundingBox)
if bbox.overlaps(self.bbox):
return comp
return None
def revert_bonuses(self):
"""Revert the effect of all bonuses."""
bonuses = list(self.bonuses)
for bonus in bonuses:
bonus.revert(self)
def die(self):
"""Death animation and world change."""
self.world.remove_component(self.entity, Ship)
def coroutine():
"""Spawn particles."""
texture = self.get(ctypes.POINTER(SDL_Texture))
position = self.get(dsdl.Position)
offset = position.get_offset(texture.w, texture.h)
position.alpha = 70
for i in range(10):
x = random.randint(int(position.x - offset[0]),
int(position.x - offset[0] + texture.w))
y = random.randint(int(position.y - offset[1]),
int(position.y - offset[1] + texture.h))
# Big burst
for _ in range(random.randrange(10, 15)):
angle = math.radians(random.randrange(0, 360))
mag = random.randrange(1, 3)
self.world.create_entity(
dsdl.Particle(20, -0.1 / 64, -0.002),
dsdl.Position(x, y,
size_x=20 / 64, size_y=20 / 64,
offset=dsdl.Offset.CENTER),
monospace.model.res['text']['part']['quad'].get(),
dsdl.Velocity(x=math.cos(angle) * mag,
y=math.sin(angle) * mag)
)
# Small burst
for _ in range(random.randrange(4, 10)):
angle = math.radians(random.randrange(0, 360))
mag = random.randrange(1, 2)
self.world.create_entity(
dsdl.Particle(30),
dsdl.Position(x, y,
size_x=5 / 64, size_y=5 / 64,
offset=dsdl.Offset.CENTER),
monospace.model.res['text']['part']['quad'].get(),
dsdl.Velocity(x=math.cos(angle) * mag,
y=math.sin(angle) * mag)
)
# Feedback sound
if i % 2:
sound = monospace.model.res['chunks']['death1'].get()
else:
sound = monospace.model.res['chunks']['death2'].get()
Mix_PlayChannel(-1, sound, 0)
yield 10
# Final big burst
x = position.x - offset[0] + texture.w / 2
y = position.y - offset[1] + texture.h / 2
for _ in range(random.randrange(40, 60)):
angle = math.radians(random.randrange(0, 360))
mag = random.randrange(1, 3)
size = random.randint(10, 30)
self.world.create_entity(
dsdl.Particle(160, -1 / (160 * 3)),
dsdl.Position(x, y,
size_x=size / 64, size_y=size / 64,
offset=dsdl.Offset.CENTER),
monospace.model.res['text']['part']['quad'].get(),
dsdl.Velocity(x=math.cos(angle) * mag,
y=math.sin(angle) * mag)
)
# Feedback sound
Mix_PlayChannel(-1, monospace.model.res['chunks']['death3'].get(),
0)
self.world.delete_entity(self.entity)
yield 210
# Sound feedback
Mix_PlayChannel(
-1,
monospace.model.res['chunks']['enemies']['shot'].get(),
0)
# Change room
# Set temporary score for next room
monospace.score.temp_score = self.processor(GameProcessor).score
model = monospace.model
model.switch(model.res['death_world'], True, stack=True)
self.processor(desper.CoroutineProcessor).start(coroutine())
class Blaster:
"""Class that represents a bullet blaster."""
def __init__(self, offset, bullet_type, bullet_text, bullet_delay,
bullet_velocity, bullet_bbox, world, animation=None):
self.bullet_type = bullet_type
self.bullet_text = bullet_text
self.bullet_delay = bullet_delay
self.bullet_velocity = bullet_velocity
self.bullet_bbox = bullet_bbox
self.offset = offset
self.world = world
self.animation = animation
self._timer = bullet_delay
def shoot(self, x, y):
"""Shoot a bullet of bullet_type, offsetted given x and y.
Attach to the bullet the given velocity component and buonding
box.
"""
self._timer -= 1
if self._timer > 0:
return False
# Restart timer and shoot
self._timer = self.bullet_delay
components = [
dsdl.Position(self.offset[0] + x, self.offset[1] + y,
offset=dsdl.Offset.BOTTOM_CENTER),
self.bullet_text,
dsdl.Velocity(*self.bullet_velocity),
dsdl.BoundingBox(w=self.bullet_bbox[0], h=self.bullet_bbox[1],
offset=self.bullet_bbox[2]),
self.bullet_type()]
if self.animation is not None:
components.append(dsdl.Animation(*self.animation))
self.world.create_entity(*components)
return True
class EnemyBullet(desper.Controller):
"""Class representing an opponent's bullet."""
def update(self, en, world, model):
# If a shield is found, self-destruct
for _, (circle, shield) \
in world.get_components(dsdl.CollisionCircle,
monospace.PowerShield):
if dsdl.check_collisions(circle, self.get(dsdl.BoundingBox)):
world.delete_entity(en)
class ShipBullet(desper.OnAttachListener):
"""Base component for ship bullets."""
damage = 1
def __init__(self):
self.hit = False # Ignore multiple hits by setting hit to 1
def on_attach(self, en, world):
self.entity = en
self.world = world
def die(self):
"""Default method used for bullet destruction.
Override to change the behaviour of the bullet.
"""
if self.world.entity_exists(self.entity):
self.world.delete_entity(self.entity)
self.hit = True
class DriftingShipBullet(ShipBullet, desper.AbstractComponent):
"""Special type of bullet for the ship.
This is actually a bit of code smell that could be sanitized with
an adpater/decorator/bridge like pattern. For the simplicity of this
small game, the smell is kept.
"""
def on_attach(self, en, world):
super().on_attach(en, world)
self.position = world.try_component(en, dsdl.Position)
self.starting_x = self.position.x
self._drift_time = 0
def update(self, *args):
self._drift_time += 1
self.position.x = (self.starting_x + 40
* math.sin(self._drift_time / 10))
class PowerShield(desper.Controller):
"""Component that shields a hit from an enemy.
All the bullets from enemies are shielded without the shield
being destroyed.
"""
def on_attach(self, en, world):
super().on_attach(en, world)
self.ship_pos = world.get_components(Ship, dsdl.Position)[0][1][1]
def update(self, en, world, _):
# Check collision with enemies
pos = self.get(dsdl.Position)
circle = self.get(dsdl.CollisionCircle)
# Follow player
pos.x = self.ship_pos.x
pos.y = self.ship_pos.y
# Check collision
for entity, enemy in world.get_component(monospace.Enemy):
bbox = world.try_component(entity, dsdl.BoundingBox)
if dsdl.check_collisions(circle, bbox):
enemy.die()
world.delete_entity(en)
class MiniShip(desper.Controller):
"""Mini helper ship."""
def __init__(self):
super().__init__()
self._time = 0
self._base_y = monospace.LOGICAL_HEIGHT / 4 * 3
self.flight_h = random.randrange(monospace.LOGICAL_HEIGHT // 6,
monospace.LOGICAL_HEIGHT // 4) / 2
self._base_x = monospace.LOGICAL_WIDTH / 2
self.flight_w = random.randrange(monospace.LOGICAL_WIDTH // 4,
monospace.LOGICAL_WIDTH // 1.2) / 2
self.yfactor = random.randint(1, 3)
def on_attach(self, en, world):
super().on_attach(en, world)
self.blaster = Blaster(
(0, 0), ShipBullet,
monospace.model.res['text']['ship_bullet'].get(), 0,
(0, -DEFAULT_BULLET_SPEED),
(10, 10, (5, 40)), world)
self.position = self.get(dsdl.Position)
# Start shooting
self.processor(desper.CoroutineProcessor).start(self.shoot_coroutine())
def shoot_coroutine(self):
"""Shoot every once in a while."""
while self.world.entity_exists(self.entity):
yield random.randint(50, 120)
self.shoot()
if random.randint(0, 2) == 0: # Sometimes shoot twice
yield 10
self.shoot()
if random.randint(0, 18) == 0: # Rarely shoot a third time
yield 10
self.shoot()
def shoot(self):
self.blaster.shoot(self.position.x, self.position.y)
Mix_PlayChannel(-1,
monospace.model.res['chunks']['shot'].get(),
0)
def update(self, *args):
self.position.y = self._base_y \
+ self.flight_h * math.sin(self._time * self.yfactor)
self.position.x = self._base_x \
+ self.flight_w * math.cos(self._time)
self._time += math.pi / 160
class PowerupBox(desper.OnAttachListener):
"""Proxy component for a power function."""
def __init__(self, powerup_applier):
# Bound specific powerup appliers to specific textures
# If an applier isn't found in the dict, change to the blank.
self.powerup_texture_names = {
monospace.BonusDelay: 'speed',
monospace.powerup_drift: 'drift',
monospace.powerup_double_blasters: 'double_blasters',
monospace.powerup_shield: 'shield_',
monospace.powerup_delay1: 'delay1',
monospace.powerup_add_blaster: 'add_blaster',
monospace.powerup_quick: 'quick',
monospace.powerup_help: 'help'
}
self.powerup_applier = powerup_applier
self.applied = False
def on_attach(self, en, world):
self.world = world
self.en = en
self.apply_texture()
# Move inside world if spawned outside
self.text = world.component_for_entity(en, ctypes.POINTER(SDL_Texture))
self.pos = world.component_for_entity(en, dsdl.Position)
if self.outside_world:
world.get_processor(desper.CoroutineProcessor) \
.start(self.drag_in())
def apply(self, ship):
"""Apply the incapsulated powerup to the given ship.
After being applied, destroy it
"""
if self.applied:
return
self.applied = True
self.powerup_applier(ship)
# Particles?
# Delete all powerup boxes on screen
for en, powerup in self.world.get_component(PowerupBox):
powerup.applied = True # Prevent anomalies
self.world.delete_entity(en)
def apply_texture(self):
"""Select the correct texture for this powerup."""
text_name = self.powerup_texture_names.get(self.powerup_applier)
if text_name is None:
text_name = \
self.powerup_texture_names.get(type(self.powerup_applier))
text_name = 'blank' if text_name is None else text_name
self.world.add_component(
self.en,
monospace.model.res['text']['powerups'][text_name].get())
@property
def outside_world(self):
return not (self.text.w / 2 < self.pos.x
< monospace.LOGICAL_WIDTH - self.text.w / 2)
@property
def drag_in_dir(self):
return 1 if self.pos.x - self.text.w / 2 < 0 else -1
def drag_in(self):
"""Coroutine, move the box inside the room."""
while self.outside_world:
self.pos.x += 2 * self.drag_in_dir
yield
| 34.086066 | 79 | 0.562262 |
3852360e2f4978f2491a1181800210068ea01dca | 4,724 | py | Python | datasets.py | microsoft/VoxHRNet | be72d6448ff4b45c531163a6a6b46ff1ff5e60fd | [
"MIT"
]
| 4 | 2021-11-12T17:19:29.000Z | 2022-03-27T05:08:24.000Z | datasets.py | microsoft/VoxHRNet | be72d6448ff4b45c531163a6a6b46ff1ff5e60fd | [
"MIT"
]
| null | null | null | datasets.py | microsoft/VoxHRNet | be72d6448ff4b45c531163a6a6b46ff1ff5e60fd | [
"MIT"
]
| 1 | 2021-11-10T11:22:01.000Z | 2021-11-10T11:22:01.000Z | # --------------------------------------------------------------------------
# Copyright (c) Microsoft
# Licensed under the MIT License.
# Created by Yeshu Li ([email protected])
# --------------------------------------------------------------------------
from torch.utils.data.dataset import Dataset
from torch.utils.data import DataLoader
import torch
import nibabel
import os
import numpy as np
import transforms
nibabel.Nifti1Header.quaternion_threshold = -1e-06
class MyCustomDataset(Dataset):
def __init__(self, img_list, label_list, is_train, label_map_file_name = None):
super(MyCustomDataset, self).__init__()
self.img_list = img_list
self.label_list = label_list
self.is_train = is_train
self.count = len(img_list)
self.label_map = np.load(label_map_file_name, allow_pickle = True).item()
return
def reshape_img_dims(self, img):
if len(img.shape) == 3:
img = img.unsqueeze(0)
else:
img = img.permute(3, 0, 1, 2)
return img
def transform_volume(self, img, label, prob = 0.2):
if np.random.rand() > 1.0:
img, label = transforms.random_affine(img, label)
if np.random.rand() > prob:
img, label = transforms.random_elastic(img, label)
if np.random.rand() > 1.0:
img, label = transforms.block_random_regions(img, label)
if np.random.rand() > prob:
img = transforms.add_gaussian_noise(img)
if np.random.rand() > 1.0:
img, label = transforms.shift_scale(img, label)
return img, label
def remap_label(self, label):
ret = np.zeros(label.shape, dtype = label.dtype)
for to_label, from_label in self.label_map.items():
ret[label == from_label] = to_label
return ret
def __getitem__(self, index):
img = nibabel.load(self.img_list[index]).get_data()
label = nibabel.load(self.label_list[index]).get_data()
label = self.remap_label(label)
img = (img - np.mean(img)) / np.std(img)
if self.is_train:
img, label = self.transform_volume(img, label)
img = torch.Tensor(img)
label = torch.LongTensor(label)
img = self.reshape_img_dims(img)
return (img, label)
def __len__(self):
return self.count
def read_img(input_path):
img = nibabel.load(input_path)
data = img.get_data()
affine = img.affine
return data, affine
def get_dataset_file_list(config, div_name):
root_dir = os.path.join(config['DATASET']['ROOT'], config['DATASET']['DATASET'])
group_names = config[div_name]['GROUP_NAME']
group_cnts = config[div_name]['GROUP_CNT']
imgs = []
segs = []
for group_name, group_cnt in zip(group_names, group_cnts):
for ind in range(group_cnt):
subject_id = '{}{:03d}'.format(group_name, ind + 1)
img_file_name = 'orig_{}.nii.gz'.format(subject_id)
seg_file_name = 'aseg_{}.nii.gz'.format(subject_id)
img_file_path = os.path.join(root_dir, img_file_name)
seg_file_path = os.path.join(root_dir, seg_file_name)
imgs.append(img_file_path)
segs.append(seg_file_path)
return imgs, segs
def get_dataloader(config):
train_bsize = config.TRAIN.BATCH_SIZE_PER_GPU * len(config.GPUS)
val_bsize = config.VALIDATE.BATCH_SIZE_PER_GPU * len(config.GPUS)
test_bsize = config.TEST.BATCH_SIZE_PER_GPU * len(config.GPUS)
train_img_names, train_seg_names = get_dataset_file_list(config, div_name = 'TRAIN')
val_img_names, val_seg_names = get_dataset_file_list(config, div_name = 'VALIDATE')
test_img_names, test_seg_names = get_dataset_file_list(config, div_name = 'TEST')
train_set = MyCustomDataset(train_img_names, train_seg_names, is_train = True, label_map_file_name = config.DATASET.DATASET_DICT)
val_set = MyCustomDataset(val_img_names, val_seg_names, is_train = False, label_map_file_name = config.DATASET.DATASET_DICT)
test_set = MyCustomDataset(test_img_names, test_seg_names, is_train = False, label_map_file_name = config.DATASET.DATASET_DICT)
train_loader = DataLoader(train_set, batch_size = train_bsize, shuffle = True, pin_memory = True, num_workers = config.WORKERS)
val_loader = DataLoader(val_set, batch_size = val_bsize, shuffle = False, pin_memory = True, num_workers = config.WORKERS)
test_loader = DataLoader(test_set, batch_size = test_bsize, shuffle = False, pin_memory = True, num_workers = config.WORKERS)
return train_loader, val_loader, test_loader
| 33.985612 | 133 | 0.6395 |
7dd70c0ed291bda4e8e7e80c18c7fd2271093442 | 534 | py | Python | 11-Notifications/mqttwarn/funcs-alldata.py | outsmartit/raspberry-pi-home-automation | 398201d97ea5f2e18a69a79145c25bfc5a4290ad | [
"MIT"
]
| 30 | 2020-08-11T08:38:48.000Z | 2022-03-26T03:17:06.000Z | 11-Notifications/mqttwarn/funcs-alldata.py | outsmartit/raspberry-pi-home-automation | 398201d97ea5f2e18a69a79145c25bfc5a4290ad | [
"MIT"
]
| 5 | 2020-09-01T18:44:06.000Z | 2021-03-13T16:16:30.000Z | 11-Notifications/mqttwarn/funcs-alldata.py | outsmartit/raspberry-pi-home-automation | 398201d97ea5f2e18a69a79145c25bfc5a4290ad | [
"MIT"
]
| 10 | 2020-09-21T14:14:49.000Z | 2022-02-15T10:46:21.000Z | """Custom functions for mqttwarn.
Copyright (C) 2020 Koen Vervloesem
License: MIT
"""
import json
# Data mapping functions
def translate_xiaomi_aqara_contact(topic, data, srv=None):
"""Translate the Xiaomi Aqara's contact sensor JSON data to a
human-readable description of whether it's open or closed."""
payload = json.loads(data["payload"])
if "contact" in payload:
if payload["contact"]:
return dict(status="closed")
else:
return dict(status="opened")
return None
| 23.217391 | 65 | 0.670412 |
2d23d75e11c8804f3da1e804a9d528d9cccff58a | 17,977 | py | Python | xarray/backends/netCDF4_.py | EricKeenan/xarray | 7355c350a88ec6892c5e9ccdf8dee5ef532036be | [
"CC-BY-4.0",
"PSF-2.0",
"BSD-2-Clause",
"Apache-2.0",
"BSD-3-Clause"
]
| null | null | null | xarray/backends/netCDF4_.py | EricKeenan/xarray | 7355c350a88ec6892c5e9ccdf8dee5ef532036be | [
"CC-BY-4.0",
"PSF-2.0",
"BSD-2-Clause",
"Apache-2.0",
"BSD-3-Clause"
]
| null | null | null | xarray/backends/netCDF4_.py | EricKeenan/xarray | 7355c350a88ec6892c5e9ccdf8dee5ef532036be | [
"CC-BY-4.0",
"PSF-2.0",
"BSD-2-Clause",
"Apache-2.0",
"BSD-3-Clause"
]
| null | null | null | import functools
import operator
import os
import pathlib
from contextlib import suppress
import numpy as np
from .. import coding
from ..coding.variables import pop_to
from ..core import indexing
from ..core.utils import FrozenDict, is_remote_uri
from ..core.variable import Variable
from .common import (
BackendArray,
WritableCFDataStore,
find_root_and_group,
robust_getitem,
)
from .file_manager import CachingFileManager, DummyFileManager
from .locks import HDF5_LOCK, NETCDFC_LOCK, combine_locks, ensure_lock, get_write_lock
from .netcdf3 import encode_nc3_attr_value, encode_nc3_variable
from .plugins import BackendEntrypoint
from .store import open_backend_dataset_store
# This lookup table maps from dtype.byteorder to a readable endian
# string used by netCDF4.
_endian_lookup = {"=": "native", ">": "big", "<": "little", "|": "native"}
NETCDF4_PYTHON_LOCK = combine_locks([NETCDFC_LOCK, HDF5_LOCK])
class BaseNetCDF4Array(BackendArray):
__slots__ = ("datastore", "dtype", "shape", "variable_name")
def __init__(self, variable_name, datastore):
self.datastore = datastore
self.variable_name = variable_name
array = self.get_array()
self.shape = array.shape
dtype = array.dtype
if dtype is str:
# use object dtype because that's the only way in numpy to
# represent variable length strings; it also prevents automatic
# string concatenation via conventions.decode_cf_variable
dtype = np.dtype("O")
self.dtype = dtype
def __setitem__(self, key, value):
with self.datastore.lock:
data = self.get_array(needs_lock=False)
data[key] = value
if self.datastore.autoclose:
self.datastore.close(needs_lock=False)
def get_array(self, needs_lock=True):
raise NotImplementedError("Virtual Method")
class NetCDF4ArrayWrapper(BaseNetCDF4Array):
__slots__ = ()
def get_array(self, needs_lock=True):
ds = self.datastore._acquire(needs_lock)
variable = ds.variables[self.variable_name]
variable.set_auto_maskandscale(False)
# only added in netCDF4-python v1.2.8
with suppress(AttributeError):
variable.set_auto_chartostring(False)
return variable
def __getitem__(self, key):
return indexing.explicit_indexing_adapter(
key, self.shape, indexing.IndexingSupport.OUTER, self._getitem
)
def _getitem(self, key):
if self.datastore.is_remote: # pragma: no cover
getitem = functools.partial(robust_getitem, catch=RuntimeError)
else:
getitem = operator.getitem
try:
with self.datastore.lock:
original_array = self.get_array(needs_lock=False)
array = getitem(original_array, key)
except IndexError:
# Catch IndexError in netCDF4 and return a more informative
# error message. This is most often called when an unsorted
# indexer is used before the data is loaded from disk.
msg = (
"The indexing operation you are attempting to perform "
"is not valid on netCDF4.Variable object. Try loading "
"your data into memory first by calling .load()."
)
raise IndexError(msg)
return array
def _encode_nc4_variable(var):
for coder in [
coding.strings.EncodedStringCoder(allows_unicode=True),
coding.strings.CharacterArrayCoder(),
]:
var = coder.encode(var)
return var
def _check_encoding_dtype_is_vlen_string(dtype):
if dtype is not str:
raise AssertionError( # pragma: no cover
"unexpected dtype encoding %r. This shouldn't happen: please "
"file a bug report at github.com/pydata/xarray" % dtype
)
def _get_datatype(var, nc_format="NETCDF4", raise_on_invalid_encoding=False):
if nc_format == "NETCDF4":
datatype = _nc4_dtype(var)
else:
if "dtype" in var.encoding:
encoded_dtype = var.encoding["dtype"]
_check_encoding_dtype_is_vlen_string(encoded_dtype)
if raise_on_invalid_encoding:
raise ValueError(
"encoding dtype=str for vlen strings is only supported "
"with format='NETCDF4'."
)
datatype = var.dtype
return datatype
def _nc4_dtype(var):
if "dtype" in var.encoding:
dtype = var.encoding.pop("dtype")
_check_encoding_dtype_is_vlen_string(dtype)
elif coding.strings.is_unicode_dtype(var.dtype):
dtype = str
elif var.dtype.kind in ["i", "u", "f", "c", "S"]:
dtype = var.dtype
else:
raise ValueError(f"unsupported dtype for netCDF4 variable: {var.dtype}")
return dtype
def _netcdf4_create_group(dataset, name):
return dataset.createGroup(name)
def _nc4_require_group(ds, group, mode, create_group=_netcdf4_create_group):
if group in {None, "", "/"}:
# use the root group
return ds
else:
# make sure it's a string
if not isinstance(group, str):
raise ValueError("group must be a string or None")
# support path-like syntax
path = group.strip("/").split("/")
for key in path:
try:
ds = ds.groups[key]
except KeyError as e:
if mode != "r":
ds = create_group(ds, key)
else:
# wrap error to provide slightly more helpful message
raise OSError("group not found: %s" % key, e)
return ds
def _ensure_fill_value_valid(data, attributes):
# work around for netCDF4/scipy issue where _FillValue has the wrong type:
# https://github.com/Unidata/netcdf4-python/issues/271
if data.dtype.kind == "S" and "_FillValue" in attributes:
attributes["_FillValue"] = np.string_(attributes["_FillValue"])
def _force_native_endianness(var):
# possible values for byteorder are:
# = native
# < little-endian
# > big-endian
# | not applicable
# Below we check if the data type is not native or NA
if var.dtype.byteorder not in ["=", "|"]:
# if endianness is specified explicitly, convert to the native type
data = var.data.astype(var.dtype.newbyteorder("="))
var = Variable(var.dims, data, var.attrs, var.encoding)
# if endian exists, remove it from the encoding.
var.encoding.pop("endian", None)
# check to see if encoding has a value for endian its 'native'
if not var.encoding.get("endian", "native") == "native":
raise NotImplementedError(
"Attempt to write non-native endian type, "
"this is not supported by the netCDF4 "
"python library."
)
return var
def _extract_nc4_variable_encoding(
variable,
raise_on_invalid=False,
lsd_okay=True,
h5py_okay=False,
backend="netCDF4",
unlimited_dims=None,
):
if unlimited_dims is None:
unlimited_dims = ()
encoding = variable.encoding.copy()
safe_to_drop = {"source", "original_shape"}
valid_encodings = {
"zlib",
"complevel",
"fletcher32",
"contiguous",
"chunksizes",
"shuffle",
"_FillValue",
"dtype",
}
if lsd_okay:
valid_encodings.add("least_significant_digit")
if h5py_okay:
valid_encodings.add("compression")
valid_encodings.add("compression_opts")
if not raise_on_invalid and encoding.get("chunksizes") is not None:
# It's possible to get encoded chunksizes larger than a dimension size
# if the original file had an unlimited dimension. This is problematic
# if the new file no longer has an unlimited dimension.
chunksizes = encoding["chunksizes"]
chunks_too_big = any(
c > d and dim not in unlimited_dims
for c, d, dim in zip(chunksizes, variable.shape, variable.dims)
)
has_original_shape = "original_shape" in encoding
changed_shape = (
has_original_shape and encoding.get("original_shape") != variable.shape
)
if chunks_too_big or changed_shape:
del encoding["chunksizes"]
var_has_unlim_dim = any(dim in unlimited_dims for dim in variable.dims)
if not raise_on_invalid and var_has_unlim_dim and "contiguous" in encoding.keys():
del encoding["contiguous"]
for k in safe_to_drop:
if k in encoding:
del encoding[k]
if raise_on_invalid:
invalid = [k for k in encoding if k not in valid_encodings]
if invalid:
raise ValueError(
"unexpected encoding parameters for %r backend: %r. Valid "
"encodings are: %r" % (backend, invalid, valid_encodings)
)
else:
for k in list(encoding):
if k not in valid_encodings:
del encoding[k]
return encoding
def _is_list_of_strings(value):
if np.asarray(value).dtype.kind in ["U", "S"] and np.asarray(value).size > 1:
return True
else:
return False
class NetCDF4DataStore(WritableCFDataStore):
"""Store for reading and writing data via the Python-NetCDF4 library.
This store supports NetCDF3, NetCDF4 and OpenDAP datasets.
"""
__slots__ = (
"autoclose",
"format",
"is_remote",
"lock",
"_filename",
"_group",
"_manager",
"_mode",
)
def __init__(
self, manager, group=None, mode=None, lock=NETCDF4_PYTHON_LOCK, autoclose=False
):
import netCDF4
if isinstance(manager, netCDF4.Dataset):
if group is None:
root, group = find_root_and_group(manager)
else:
if not type(manager) is netCDF4.Dataset:
raise ValueError(
"must supply a root netCDF4.Dataset if the group "
"argument is provided"
)
root = manager
manager = DummyFileManager(root)
self._manager = manager
self._group = group
self._mode = mode
self.format = self.ds.data_model
self._filename = self.ds.filepath()
self.is_remote = is_remote_uri(self._filename)
self.lock = ensure_lock(lock)
self.autoclose = autoclose
@classmethod
def open(
cls,
filename,
mode="r",
format="NETCDF4",
group=None,
clobber=True,
diskless=False,
persist=False,
lock=None,
lock_maker=None,
autoclose=False,
):
import netCDF4
if isinstance(filename, pathlib.Path):
filename = os.fspath(filename)
if not isinstance(filename, str):
raise ValueError(
"can only read bytes or file-like objects "
"with engine='scipy' or 'h5netcdf'"
)
if format is None:
format = "NETCDF4"
if lock is None:
if mode == "r":
if is_remote_uri(filename):
lock = NETCDFC_LOCK
else:
lock = NETCDF4_PYTHON_LOCK
else:
if format is None or format.startswith("NETCDF4"):
base_lock = NETCDF4_PYTHON_LOCK
else:
base_lock = NETCDFC_LOCK
lock = combine_locks([base_lock, get_write_lock(filename)])
kwargs = dict(
clobber=clobber, diskless=diskless, persist=persist, format=format
)
manager = CachingFileManager(
netCDF4.Dataset, filename, mode=mode, kwargs=kwargs
)
return cls(manager, group=group, mode=mode, lock=lock, autoclose=autoclose)
def _acquire(self, needs_lock=True):
with self._manager.acquire_context(needs_lock) as root:
ds = _nc4_require_group(root, self._group, self._mode)
return ds
@property
def ds(self):
return self._acquire()
def open_store_variable(self, name, var):
dimensions = var.dimensions
data = indexing.LazilyOuterIndexedArray(NetCDF4ArrayWrapper(name, self))
attributes = {k: var.getncattr(k) for k in var.ncattrs()}
_ensure_fill_value_valid(data, attributes)
# netCDF4 specific encoding; save _FillValue for later
encoding = {}
filters = var.filters()
if filters is not None:
encoding.update(filters)
chunking = var.chunking()
if chunking is not None:
if chunking == "contiguous":
encoding["contiguous"] = True
encoding["chunksizes"] = None
else:
encoding["contiguous"] = False
encoding["chunksizes"] = tuple(chunking)
# TODO: figure out how to round-trip "endian-ness" without raising
# warnings from netCDF4
# encoding['endian'] = var.endian()
pop_to(attributes, encoding, "least_significant_digit")
# save source so __repr__ can detect if it's local or not
encoding["source"] = self._filename
encoding["original_shape"] = var.shape
encoding["dtype"] = var.dtype
return Variable(dimensions, data, attributes, encoding)
def get_variables(self):
dsvars = FrozenDict(
(k, self.open_store_variable(k, v)) for k, v in self.ds.variables.items()
)
return dsvars
def get_attrs(self):
attrs = FrozenDict((k, self.ds.getncattr(k)) for k in self.ds.ncattrs())
return attrs
def get_dimensions(self):
dims = FrozenDict((k, len(v)) for k, v in self.ds.dimensions.items())
return dims
def get_encoding(self):
encoding = {}
encoding["unlimited_dims"] = {
k for k, v in self.ds.dimensions.items() if v.isunlimited()
}
return encoding
def set_dimension(self, name, length, is_unlimited=False):
dim_length = length if not is_unlimited else None
self.ds.createDimension(name, size=dim_length)
def set_attribute(self, key, value):
if self.format != "NETCDF4":
value = encode_nc3_attr_value(value)
if _is_list_of_strings(value):
# encode as NC_STRING if attr is list of strings
self.ds.setncattr_string(key, value)
else:
self.ds.setncattr(key, value)
def encode_variable(self, variable):
variable = _force_native_endianness(variable)
if self.format == "NETCDF4":
variable = _encode_nc4_variable(variable)
else:
variable = encode_nc3_variable(variable)
return variable
def prepare_variable(
self, name, variable, check_encoding=False, unlimited_dims=None
):
datatype = _get_datatype(
variable, self.format, raise_on_invalid_encoding=check_encoding
)
attrs = variable.attrs.copy()
fill_value = attrs.pop("_FillValue", None)
if datatype is str and fill_value is not None:
raise NotImplementedError(
"netCDF4 does not yet support setting a fill value for "
"variable-length strings "
"(https://github.com/Unidata/netcdf4-python/issues/730). "
"Either remove '_FillValue' from encoding on variable %r "
"or set {'dtype': 'S1'} in encoding to use the fixed width "
"NC_CHAR type." % name
)
encoding = _extract_nc4_variable_encoding(
variable, raise_on_invalid=check_encoding, unlimited_dims=unlimited_dims
)
if name in self.ds.variables:
nc4_var = self.ds.variables[name]
else:
nc4_var = self.ds.createVariable(
varname=name,
datatype=datatype,
dimensions=variable.dims,
zlib=encoding.get("zlib", False),
complevel=encoding.get("complevel", 4),
shuffle=encoding.get("shuffle", True),
fletcher32=encoding.get("fletcher32", False),
contiguous=encoding.get("contiguous", False),
chunksizes=encoding.get("chunksizes"),
endian="native",
least_significant_digit=encoding.get("least_significant_digit"),
fill_value=fill_value,
)
nc4_var.setncatts(attrs)
target = NetCDF4ArrayWrapper(name, self)
return target, variable.data
def sync(self):
self.ds.sync()
def close(self, **kwargs):
self._manager.close(**kwargs)
def open_backend_dataset_netcdf4(
filename_or_obj,
mask_and_scale=True,
decode_times=None,
concat_characters=None,
decode_coords=None,
drop_variables=None,
use_cftime=None,
decode_timedelta=None,
group=None,
mode="r",
format="NETCDF4",
clobber=True,
diskless=False,
persist=False,
lock=None,
autoclose=False,
):
store = NetCDF4DataStore.open(
filename_or_obj,
mode=mode,
format=format,
group=group,
clobber=clobber,
diskless=diskless,
persist=persist,
lock=lock,
autoclose=autoclose,
)
ds = open_backend_dataset_store(
store,
mask_and_scale=mask_and_scale,
decode_times=decode_times,
concat_characters=concat_characters,
decode_coords=decode_coords,
drop_variables=drop_variables,
use_cftime=use_cftime,
decode_timedelta=decode_timedelta,
)
return ds
netcdf4_backend = BackendEntrypoint(open_dataset=open_backend_dataset_netcdf4)
| 32.508137 | 87 | 0.610558 |
07b5bd9ae4623a54004aa82e6b043ff14298e072 | 2,608 | py | Python | tests/unit/plugins/widgets/test_widget_directive.py | pauleveritt/kaybee | a00a718aaaa23b2d12db30dfacb6b2b6ec84459c | [
"Apache-2.0"
]
| 2 | 2017-11-08T19:55:57.000Z | 2018-12-21T12:41:41.000Z | tests/unit/plugins/widgets/test_widget_directive.py | pauleveritt/kaybee | a00a718aaaa23b2d12db30dfacb6b2b6ec84459c | [
"Apache-2.0"
]
| null | null | null | tests/unit/plugins/widgets/test_widget_directive.py | pauleveritt/kaybee | a00a718aaaa23b2d12db30dfacb6b2b6ec84459c | [
"Apache-2.0"
]
| 1 | 2018-10-13T08:59:29.000Z | 2018-10-13T08:59:29.000Z | import pytest
from kaybee.plugins.widgets.directive import WidgetDirective
from kaybee import app
class TestWidgetDirective:
def test_import(self):
assert 'WidgetDirective' == WidgetDirective.__name__
def test_construction(self, dummy_directive):
assert 'dummy_directive' == dummy_directive.name
def test_get_widget_class(self, monkeypatch,
dummy_directive_class,
dummy_widget_class,
widgets_kb_app):
# Setup fake registry
monkeypatch.setattr(app, 'kb', widgets_kb_app)
dwc = dummy_directive_class.name
widgets_kb_app.config.widgets = {dwc: dummy_widget_class}
actual = WidgetDirective.get_widget_class(dwc)
assert dummy_widget_class == actual
@pytest.mark.parametrize('content, yc, rc', [
(['yaml'], 'yaml', None),
(['yaml', '', 'rst'], 'yaml', 'rst'),
(['', '', 'yaml', '', '', 'rst', '', ''], 'yaml', '\nrst'),
(['yaml', '', '', 'rst1', '', '', 'rst2', '', '', 'rst3'],
'yaml', '\nrst1\n\n\nrst2\n\n\nrst3'),
])
def test_split_content(self, content, yc, rc):
yaml_content, rst_content = WidgetDirective.split_content(content)
assert yc == yaml_content
assert rc == rst_content
def test_get_widget(self, monkeypatch, dummy_directive,
dummy_directive_class,
dummy_widget_class,
widgets_kb_app):
monkeypatch.setattr(app, 'kb', widgets_kb_app)
dwc = dummy_directive_class.name
widgets_kb_app.config.widgets = {dwc: dummy_widget_class}
actual = WidgetDirective.get_widget_class(dwc)
assert dummy_widget_class == actual
widget = dummy_directive.get_widget('dummy123')
assert 'dummy123' == widget.docname
assert 'dummy_directive' == widget.wtype
def test_docname(self, dummy_directive):
assert 'somedoc' == dummy_directive.docname
def test_widgets(self, dummy_directive):
assert dict() == dummy_directive.widgets
def test_run_result(self, monkeypatch,
dummy_directive_class,
dummy_widget_class,
dummy_directive, widgets_kb_app):
# Setup fake registry
monkeypatch.setattr(app, 'kb', widgets_kb_app)
drc = dummy_directive_class.name
widgets_kb_app.config.widgets = {drc: dummy_widget_class}
result = dummy_directive.run()
assert 'widget' == result[0].__class__.__name__
| 37.797101 | 74 | 0.61158 |
e76226567e0e27e230c551557515ef62cc2fdd41 | 3,766 | py | Python | splices2npz_test.py | gcunhase/AnnotatedMV-PreProcessing | 32ccce86c0cc4219d9d9f09868aa1e473ca3c25a | [
"MIT"
]
| 3 | 2021-03-09T10:55:33.000Z | 2021-04-08T19:19:19.000Z | splices2npz_test.py | gcunhase/AnnotatedMV-PreProcessing | 32ccce86c0cc4219d9d9f09868aa1e473ca3c25a | [
"MIT"
]
| 3 | 2020-01-18T11:50:26.000Z | 2020-06-27T04:51:03.000Z | splices2npz_test.py | gcunhase/AnnotatedMV-PreProcessing | 32ccce86c0cc4219d9d9f09868aa1e473ca3c25a | [
"MIT"
]
| null | null | null |
import numpy as np
from moviepy.editor import VideoFileClip
from ImageSequenceClip import ImageSequenceClip
import utils
from skimage import color
import librosa
import pandas as pd
import math
"""
Test steps: takes a movie file, resizes, downsamples, does RGB2HSV color transformation and saves data in .npz,
saves new video, loads that video for testing, loads .npz, converts from HSV2RGB, saves in restored video for
testing.
DONE: add emotion and text
"""
__author__ = "Gwena Cunha"
params = {
'fps': 10,
'root': utils.project_dir_name() + 'data_test/',
'new_size': 100,
'sr': 16000
}
def load_video(filename):
# Load videos (fps = 30)
clip = VideoFileClip(filename)
# Resize to 100 x 100
clip_resized = clip.resize(newsize=(params['new_size'], params['new_size']))
print("clip: {}, resized: {}".format(clip.size, clip_resized.size))
# Downsample
downsampled_frames, new_filename = utils.downsample_video(clip_resized, params, save_downsampled=True)
# Load video for testing
clip = VideoFileClip(new_filename)
num_frames = round(clip.fps * clip.duration)
print("Number of frames: {}, size: {}, fps: {}".format(num_frames, clip.size, clip.fps))
# Frames colour conversion
frame_hsv_arr = []
for frame in downsampled_frames:
frame_hsv = color.rgb2hsv(frame)
frame_hsv_arr.append(frame_hsv)
return frame_hsv_arr
if __name__ == '__main__':
# Load video
params['root'] += 'BMI/'
sample_idx = 0
video_filename = "{}video_splices_3secs/{}.mp4".format(params['root'], sample_idx)
frame_hsv_arr = load_video(video_filename)
# Load corresponding audio
audio, sr = librosa.load("{}audio_splices_3secs_16000_c1_16bits/{}.wav".format(params['root'], sample_idx), sr=params['sr'])
print("audio: {}, sr: {}".format(np.shape(audio), sr))
# Load corresponding emotion
emotion_csv = pd.read_csv(params['root'] + "intended_1_splices_3secs.csv")
emotion_data = emotion_csv['emotion']
emotion = emotion_data[sample_idx]
print("emotion: {}, {}: {}".format(np.shape(emotion_data), sample_idx, emotion))
# Load corresponding text
text_csv = pd.read_csv(params['root'] + "text_splices_3secs.csv")
text_data = text_csv['text']
text = text_data[sample_idx]
# text = "" if math.isnan(text) else text
text = "" if isinstance(text, float) else text
print("text: {}, {}: {}".format(np.shape(text_data), sample_idx, text))
# Save in .npz
save_npz_filename = '{}video_feats_HSL.npz'.format(params['root'])
# np.savez_compressed(save_npz_filename, HSL_data=frame_hsv_arr, audio=audio)
np.savez_compressed(save_npz_filename, HSL_data=frame_hsv_arr, audio=audio, emotion=emotion, text=text)
# Load from .npz, convert to RGB and save in recovered (movie)
data = np.load(save_npz_filename)
hsl_frames = data['HSL_data']
frame_rgb_arr = []
for frame in hsl_frames:
frame_rgb = color.hsv2rgb(frame)
frame_rgb_arr.append(frame_rgb * 255)
scaled = np.ascontiguousarray(frame_rgb_arr)
clip = ImageSequenceClip(np.array(scaled), fps=params['fps'])
new_filename = '{root}1_{new_size}x{new_size}_{fps}fps_recovered.mp4'.format(root=params['root'],
new_size=params['new_size'],
fps=params['fps'])
clip.write_videofile(new_filename, fps=params['fps'])
# Load audio and save as recovered
audio = data['audio']
new_filename = '{root}BMI_1_recovered.wav'.format(root=params['root'])
librosa.output.write_wav(new_filename, y=audio, sr=sr)
| 35.866667 | 128 | 0.661445 |
8dea51464e66b60b236b3c56f185707b384c213c | 458 | py | Python | env/lib/python3.8/site-packages/plotly/validators/scatter3d/_customdatasrc.py | acrucetta/Chicago_COVI_WebApp | a37c9f492a20dcd625f8647067394617988de913 | [
"MIT",
"Unlicense"
]
| 76 | 2020-07-06T14:44:05.000Z | 2022-02-14T15:30:21.000Z | env/lib/python3.8/site-packages/plotly/validators/scatter3d/_customdatasrc.py | acrucetta/Chicago_COVI_WebApp | a37c9f492a20dcd625f8647067394617988de913 | [
"MIT",
"Unlicense"
]
| 11 | 2020-08-09T02:30:14.000Z | 2022-03-12T00:50:14.000Z | env/lib/python3.8/site-packages/plotly/validators/scatter3d/_customdatasrc.py | acrucetta/Chicago_COVI_WebApp | a37c9f492a20dcd625f8647067394617988de913 | [
"MIT",
"Unlicense"
]
| 11 | 2020-07-12T16:18:07.000Z | 2022-02-05T16:48:35.000Z | import _plotly_utils.basevalidators
class CustomdatasrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(self, plotly_name="customdatasrc", parent_name="scatter3d", **kwargs):
super(CustomdatasrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
role=kwargs.pop("role", "info"),
**kwargs
)
| 35.230769 | 87 | 0.665939 |
ecbc4477f3b5089a29dee0151dc1660b383b6874 | 4,835 | py | Python | flask_rest_controller/controller.py | teitei-tk/Flask-REST-Controller | b4386b523f3d2c6550051c95d5ba74e5ff459946 | [
"MIT"
]
| 1 | 2016-10-14T08:52:45.000Z | 2016-10-14T08:52:45.000Z | flask_rest_controller/controller.py | teitei-tk/Flask-REST-Controller | b4386b523f3d2c6550051c95d5ba74e5ff459946 | [
"MIT"
]
| 1 | 2015-06-16T12:17:23.000Z | 2015-06-16T12:17:23.000Z | flask_rest_controller/controller.py | teitei-tk/Flask-REST-Controller | b4386b523f3d2c6550051c95d5ba74e5ff459946 | [
"MIT"
]
| 1 | 2017-05-21T13:49:56.000Z | 2017-05-21T13:49:56.000Z | # coding: utf-8
try:
import simplejson as json
except:
import json
import urllib
import jsonschema
from flask import redirect, url_for, render_template, session, request, abort, current_app
from flask.views import MethodView
__all__ = ['BaseRender', 'JsonRender', 'TemplateRender', 'BaseHandler', 'Controller']
class BaseRender(object):
mimetype = None
def set_mimetype(self, mimetype):
self.mimetype = mimetype.lower()
@property
def request(self):
return request
@property
def into(self):
return self.request.method.lower()
class JsonRender(BaseRender):
"""
for rendering a json response
"""
json_mime_type = "application/json"
schema = None
def render_json(self, data):
if not self.is_json_response(data):
data = [data]
if self.should_schema_check:
self.valid_schema(data)
self.set_mimetype(self.json_mime_type)
return json.dumps(data)
@property
def should_schema_check(self):
"""
can verify using json schema of json response,
"""
return True
def _detect_schema_value(self):
schema = None
for key in self.schema.keys():
if not key.lower() == self.into:
continue
schema = self.schema[key]
break
if not schema:
schema = self.schema
return schema
def valid_schema(self, response):
if not isinstance(self.schema, dict):
return response
schema = self._detect_schema_value()
return jsonschema.validate(response, schema)
def is_json_response(self, response):
return isinstance(response, dict) or isinstance(response, list)
class TemplateRender(BaseRender):
"""
for rendering a html template
"""
html_mime_type = "text/html"
def render_template(self, template_path, values={}):
self.set_mimetype(self.html_mime_type)
return render_template(template_path, **values)
class BaseHandler(MethodView):
"""
handling a dispatch for request
"""
methods = ['GET', 'POST']
def dispatch_request(self, *args, **kwargs):
if not self.authenticate(*args, **kwargs):
return self.authenticate_error()
if not self.prepare(*args, **kwargs):
return self.prepare_error()
response = super(BaseHandler, self).dispatch_request(*args, **kwargs)
self.after()
return self.after_response(response)
def authenticate(self, *args, **kwargs):
"""
run validat about your authentication
"""
return True
def authenticate_error(self):
"""
run for authenticate error
"""
return self.render_error()
def prepare(self, *args, **kwargs):
"""
prepare your validation and Update logic
"""
return True
def prepare_error(self):
"""
run fot prepare error
"""
return self.render_error()
def after(self):
"""
your performed after execution method
this method please use the override
"""
pass
def get(self, *args, **kwargs):
self.error_404()
def post(self, *args, **kwargs):
self.error_404()
def put(self, *args, **kwargs):
self.error_404()
def delete(self, *args, **kwargs):
self.error_404()
def after_response(self, response):
return response
def error_404(self):
return abort(404)
def render_error(self):
return self.error_404()
class Controller(TemplateRender, JsonRender, BaseHandler):
"""
base Class based Controller implemented,
If you want to use this class, please use to perform extends
When there was a request to the methods, the appropriate method is run method
example:
HTTP GET Request -> get
HTTP POST Request -> post
"""
storage = dict()
headers = dict()
def __init__(self, *args, **kwargs):
super(Controller, self).__init__(*args, **kwargs)
self.storage = dict()
self.headers = dict()
def add_header(self, key, value):
self.headers[key] = value
def get_headers(self):
return self.headers
def after_response(self, response):
return current_app.response_class(response, headers=self.get_headers(), mimetype=self.mimetype.lower())
@property
def session(self):
return session
def redirect(self, uri, params={}):
try:
return redirect(url_for(uri))
except RuntimeError:
pass
query = [(k, v) for k, v in sorted(params.items())]
params = urllib.urlencode(query)
return redirect("{0}?{1}".format(uri, params))
| 23.585366 | 111 | 0.609514 |
345d3c0a1b64e66f197430e075ae6940e8ff9993 | 12,728 | py | Python | tensorflow/python/ops/parallel_for/array_test.py | uve/tensorflow | e08079463bf43e5963acc41da1f57e95603f8080 | [
"Apache-2.0"
]
| null | null | null | tensorflow/python/ops/parallel_for/array_test.py | uve/tensorflow | e08079463bf43e5963acc41da1f57e95603f8080 | [
"Apache-2.0"
]
| null | null | null | tensorflow/python/ops/parallel_for/array_test.py | uve/tensorflow | e08079463bf43e5963acc41da1f57e95603f8080 | [
"Apache-2.0"
]
| null | null | null | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for vectorization of array kernels."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.compat import compat
from tensorflow.python.eager import backprop
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import tensor_array_grad # pylint: disable=unused-import
from tensorflow.python.ops.parallel_for import control_flow_ops as pfor_control_flow_ops
from tensorflow.python.ops.parallel_for.test_util import PForTestCase
from tensorflow.python.platform import test
@test_util.run_all_in_graph_and_eager_modes
class ArrayTest(PForTestCase):
def test_gather(self):
x = random_ops.random_uniform([3, 3, 3])
def loop_fn(i):
outputs = []
x_i = array_ops.gather(x, i)
for y in [x, x_i]:
axes = [0, 2, -1] if y is x else [0]
for axis in axes:
outputs.append(array_ops.gather(y, 2, axis=axis))
outputs.append(array_ops.gather(y, i, axis=axis))
outputs.append(array_ops.gather(y, [i], axis=axis))
outputs.append(array_ops.gather(y, [i, 2], axis=axis))
outputs.append(array_ops.gather(y, [[2, i], [i, 1]], axis=axis))
return outputs
self._test_loop_fn(loop_fn, 3, loop_fn_dtypes=[dtypes.float32] * 20)
def test_shape(self):
x = random_ops.random_uniform([3, 2, 3])
def loop_fn(i):
x_i = array_ops.gather(x, i)
return array_ops.shape(x_i), array_ops.shape(x_i, out_type=dtypes.int64)
self._test_loop_fn(loop_fn, 3, loop_fn_dtypes=[dtypes.int32, dtypes.int64])
def test_size(self):
x = random_ops.random_uniform([3, 2, 3])
def loop_fn(i):
x_i = array_ops.gather(x, i)
return array_ops.size(x_i), array_ops.size(x_i, out_type=dtypes.int64)
self._test_loop_fn(loop_fn, 3, loop_fn_dtypes=[dtypes.int32, dtypes.int64])
def test_rank(self):
x = random_ops.random_uniform([3, 2, 3])
def loop_fn(i):
x_i = array_ops.gather(x, i)
return array_ops.rank(x_i)
self._test_loop_fn(loop_fn, 3, loop_fn_dtypes=[dtypes.int32])
def test_shape_n(self):
x = random_ops.random_uniform([3, 2, 3])
y = random_ops.random_uniform([3])
def loop_fn(i):
x_i = array_ops.gather(x, i)
y_i = array_ops.gather(y, i)
return array_ops.shape_n([x_i, x, y, y_i]), array_ops.shape_n(
[x_i, x, y, y_i], out_type=dtypes.int64)
self._test_loop_fn(
loop_fn, 3, loop_fn_dtypes=[dtypes.int32] * 4 + [dtypes.int64] * 4)
def test_reshape(self):
x = random_ops.random_uniform([3, 2, 3])
def loop_fn(i):
x1 = array_ops.gather(x, i)
return array_ops.reshape(x1, [-1]), array_ops.reshape(x1, [1, 3, 1, -1])
self._test_loop_fn(loop_fn, 3, loop_fn_dtypes=[dtypes.float32] * 2)
def test_broadcast_to(self):
x = random_ops.random_uniform([3, 2, 1, 3])
def loop_fn(i):
x1 = array_ops.gather(x, i)
return (array_ops.broadcast_to(x1, [2, 2, 3]),
array_ops.broadcast_to(x1, [1, 2, 1, 3]))
self._test_loop_fn(loop_fn, 3, loop_fn_dtypes=[dtypes.float32] * 2)
def test_expand_dims(self):
x = random_ops.random_uniform([3, 2, 3])
def loop_fn(i):
x1 = array_ops.gather(x, i)
return array_ops.expand_dims(
x1, axis=-1), array_ops.expand_dims(
x1, axis=1)
self._test_loop_fn(loop_fn, 3, loop_fn_dtypes=[dtypes.float32] * 2)
def test_one_hot(self):
indices = random_ops.random_uniform(
[3, 2, 3], minval=0, maxval=4, dtype=dtypes.int32)
def loop_fn(i):
indices_i = array_ops.gather(indices, i)
return (array_ops.one_hot(indices_i, depth=4, on_value=2., off_value=-2.),
array_ops.one_hot(indices_i, depth=4, axis=1))
self._test_loop_fn(loop_fn, 3, loop_fn_dtypes=[dtypes.float32] * 2)
def test_searchsorted(self):
sorted_inputs = math_ops.cumsum(random_ops.random_uniform([3, 2, 4]),
axis=-1)
values = random_ops.random_uniform([2, 3], minval=-1, maxval=4.5)
def loop_fn(i):
inputs_i = array_ops.gather(sorted_inputs, i)
return [array_ops.searchsorted(inputs_i, values, out_type=dtypes.int32,
side="left"), # creates LowerBound op.
array_ops.searchsorted(inputs_i, values, out_type=dtypes.int64,
side="right")] # creates UpperBound op.
self._test_loop_fn(loop_fn, 3, loop_fn_dtypes=[dtypes.int32, dtypes.int64])
def test_slice(self):
x = random_ops.random_uniform([3, 2, 3])
def loop_fn(i):
x1 = array_ops.gather(x, i)
return array_ops.slice(x1, begin=(0, 1), size=(2, 1))
self._test_loop_fn(loop_fn, 3)
def test_tile(self):
x = random_ops.random_uniform([3, 2, 3])
def loop_fn(i):
x1 = array_ops.gather(x, i)
return array_ops.tile(x1, [2, 1])
self._test_loop_fn(loop_fn, 3)
def test_tile_loop_dependent(self):
x = random_ops.random_uniform([3, 2, 3])
def loop_fn(i):
x1 = array_ops.gather(x, i)
return array_ops.tile(x1, [i, 1])
with self.assertRaisesRegexp(ValueError, "expected to be loop invariant"):
pfor_control_flow_ops.pfor(loop_fn, 2)
def test_pack(self):
x = random_ops.random_uniform([3, 2, 3])
y = random_ops.random_uniform([2, 3])
def loop_fn(i):
x1 = array_ops.gather(x, i)
return array_ops.stack([x1, y], axis=-1)
self._test_loop_fn(loop_fn, 1)
def test_unpack(self):
x = random_ops.random_uniform([3, 2, 3, 4])
def loop_fn(i):
x_i = array_ops.gather(x, i)
return array_ops.unstack(
x_i, 4, axis=-1), array_ops.unstack(
x_i, 3, axis=1)
self._test_loop_fn(loop_fn, 3, loop_fn_dtypes=[dtypes.float32] * 7)
def test_pad(self):
x = random_ops.random_uniform([3, 2, 3])
padding = constant_op.constant([[1, 2], [3, 4]])
def loop_fn(i):
x1 = array_ops.gather(x, i)
return array_ops.pad(x1, padding, mode="CONSTANT")
self._test_loop_fn(loop_fn, 3)
def test_split(self):
x = random_ops.random_uniform([3, 2, 3])
def loop_fn(i):
x1 = array_ops.gather(x, i)
return array_ops.split(x1, 2, axis=0), array_ops.split(x1, 3, axis=-1)
self._test_loop_fn(loop_fn, 3, loop_fn_dtypes=[dtypes.float32] * 5)
def test_split_v(self):
x = random_ops.random_uniform([3, 6, 3])
def loop_fn(i):
x1 = array_ops.gather(x, i)
return (array_ops.split(x1, [2, 1, 3], axis=0),
array_ops.split(x1, [3], axis=-1))
self._test_loop_fn(loop_fn, 3, loop_fn_dtypes=[dtypes.float32] * 4)
def test_squeeze(self):
x = random_ops.random_uniform([5, 1, 2, 1])
def loop_fn(i):
x1 = array_ops.gather(x, i)
return (array_ops.squeeze(x1, axis=0),
array_ops.squeeze(x1, axis=-1),
array_ops.squeeze(x1))
self._test_loop_fn(loop_fn, 3, loop_fn_dtypes=[dtypes.float32] * 3)
def test_transpose(self):
x = random_ops.random_uniform([3, 2, 3, 4])
def loop_fn(i):
x1 = array_ops.gather(x, i)
return array_ops.transpose(x1, [2, 1, 0])
self._test_loop_fn(loop_fn, 3)
def test_zeros_like(self):
x = random_ops.random_uniform([3, 2, 3])
def loop_fn(i):
x1 = array_ops.gather(x, i)
z = array_ops.zeros_like(x1),
return z, z + x1
self._test_loop_fn(loop_fn, 3, loop_fn_dtypes=[dtypes.float32] * 2)
def test_concat_v2(self):
x = random_ops.random_uniform([3, 2, 3])
y = random_ops.random_uniform([2, 3])
def loop_fn(i):
x1 = array_ops.gather(x, i)
return array_ops.concat(
[x1, x1, y], axis=0), array_ops.concat(
[x1, x1, y], axis=-1)
self._test_loop_fn(loop_fn, 3, loop_fn_dtypes=[dtypes.float32] * 2)
def test_unary_cwise_ops(self):
for op in [array_ops.identity, array_ops.stop_gradient]:
with backprop.GradientTape(persistent=True) as g:
x = random_ops.random_uniform([3, 5])
g.watch(x)
# pylint: disable=cell-var-from-loop
def loop_fn(i):
with g:
x1 = array_ops.gather(x, i)
y = op(x1) + x1
loss = nn.l2_loss(y)
return op(x), y, g.gradient(loss, x1)
# pylint: enable=cell-var-from-loop
self._test_loop_fn(loop_fn, 3, loop_fn_dtypes=[dtypes.float32] * 3)
def test_identity_n(self):
x = random_ops.random_uniform([3, 4])
def loop_fn(i):
return array_ops.identity_n([x, array_ops.gather(x, i)])
self._test_loop_fn(loop_fn, 3, loop_fn_dtypes=[dtypes.float32] * 2)
def test_matrix_band_part(self):
x = random_ops.random_uniform([3, 4, 2, 2])
for num_lower, num_upper in ((0, -1), (-1, 0), (1, 1)):
# pylint: disable=cell-var-from-loop
def loop_fn(i):
return array_ops.matrix_band_part(
array_ops.gather(x, i),
num_lower=num_lower,
num_upper=num_upper)
# pylint: enable=cell-var-from-loop
self._test_loop_fn(loop_fn, 3)
def test_matrix_diag(self):
x = random_ops.random_uniform([3, 2, 4])
def loop_fn(i):
diagonal = array_ops.gather(x, i)
if compat.forward_compatible(2019, 8, 31):
return array_ops.matrix_diag(diagonal, k=(0, 1), num_rows=4, num_cols=5)
return array_ops.matrix_diag(diagonal)
self._test_loop_fn(loop_fn, 3, loop_fn_dtypes=[dtypes.float32])
def test_matrix_diag_part(self):
x = random_ops.random_uniform([3, 4, 6])
def loop_fn(i):
input = array_ops.gather(x, i) # pylint: disable=redefined-builtin
if compat.forward_compatible(2019, 8, 31):
return array_ops.matrix_diag_part(input, k=(-2, 0), padding_value=3)
return array_ops.matrix_diag_part(input)
self._test_loop_fn(loop_fn, 3, loop_fn_dtypes=[dtypes.float32])
def test_matrix_set_diag(self):
matrices = random_ops.random_uniform([3, 4, 4])
diags = random_ops.random_uniform([3, 4])
num_outputs = 3
if compat.forward_compatible(2019, 8, 31):
bands = random_ops.random_uniform([3, 3, 4])
num_outputs = 6
def loop_fn(i):
matrix_i = array_ops.gather(matrices, i)
diag_i = array_ops.gather(diags, i)
results = [
array_ops.matrix_set_diag(matrix_i, diag_i),
array_ops.matrix_set_diag(matrices[0, ...], diag_i),
array_ops.matrix_set_diag(matrix_i, diags[0, ...])
]
if compat.forward_compatible(2019, 8, 31):
k = (-1, 1)
band_i = array_ops.gather(bands, i)
results.extend([
array_ops.matrix_set_diag(matrix_i, band_i, k=k),
array_ops.matrix_set_diag(matrices[0, ...], band_i, k=k),
array_ops.matrix_set_diag(matrix_i, bands[0, ...], k=k)
])
return results
self._test_loop_fn(
loop_fn, 3, loop_fn_dtypes=[dtypes.float32] * num_outputs)
def test_strided_slice(self):
with backprop.GradientTape(persistent=True) as g:
x = random_ops.random_uniform([3, 3, 4, 4, 2, 2, 2])
g.watch(x)
def loop_fn(i):
with g:
x_i = array_ops.gather(x, i)
y = x_i[:2, ::2, 1::3, ..., array_ops.newaxis, 1]
loss = nn.l2_loss(y)
return y, g.gradient(loss, x_i)
self._test_loop_fn(loop_fn, 3, loop_fn_dtypes=[dtypes.float32] * 2)
if __name__ == "__main__":
test.main()
| 33.494737 | 89 | 0.626807 |
5f14a769907cd2d3c3e71f7f8b11474a1d5c0a5b | 259 | py | Python | change_color/__init__.py | fongradnastya/color_palette | 98baeba0c26499ef5c42034a13f6b72dfa8affe5 | [
"MIT"
]
| null | null | null | change_color/__init__.py | fongradnastya/color_palette | 98baeba0c26499ef5c42034a13f6b72dfa8affe5 | [
"MIT"
]
| null | null | null | change_color/__init__.py | fongradnastya/color_palette | 98baeba0c26499ef5c42034a13f6b72dfa8affe5 | [
"MIT"
]
| null | null | null | """Модуль инициализирует пакет change_color"""
__all__ = ['color', 'graphic_ui']
from change_color import *
__title__ = 'color_palette'
__author__ = 'Anastasia Fongrad'
__email__ = '[email protected]'
__copyright__ = f'Copyright (c) 2021, {__author__}'
| 32.375 | 51 | 0.760618 |
0ad97eeb130a091b29e4b39e3ed1e9cb77b44a94 | 75 | py | Python | states/__init__.py | vogelfenx/storagebot | 64ab07b068bf645d7cdf5bb1cd5db91c0e2a9228 | [
"MIT"
]
| null | null | null | states/__init__.py | vogelfenx/storagebot | 64ab07b068bf645d7cdf5bb1cd5db91c0e2a9228 | [
"MIT"
]
| 2 | 2021-11-24T18:20:00.000Z | 2021-11-24T18:31:55.000Z | states/__init__.py | vogelfenx/storagebot | 64ab07b068bf645d7cdf5bb1cd5db91c0e2a9228 | [
"MIT"
]
| 4 | 2021-11-24T16:40:28.000Z | 2021-11-28T10:40:57.000Z | from .natural_person_state import NaturalPerson
__all__ = [NaturalPerson] | 18.75 | 47 | 0.84 |
09fabafeb847ec65b2ba112b261adc1031ff53c0 | 3,701 | py | Python | module/malware/28.py | 5l1v3r1/saydog-framework | 92d6d2a80958ecf5724c95d1d3c76d8ca95db8d6 | [
"Xnet",
"TCP-wrappers",
"X11"
]
| 2 | 2020-09-26T16:43:11.000Z | 2021-02-09T21:46:08.000Z | module/malware/28.py | 5l1v3r1/saydog-framework | 92d6d2a80958ecf5724c95d1d3c76d8ca95db8d6 | [
"Xnet",
"TCP-wrappers",
"X11"
]
| null | null | null | module/malware/28.py | 5l1v3r1/saydog-framework | 92d6d2a80958ecf5724c95d1d3c76d8ca95db8d6 | [
"Xnet",
"TCP-wrappers",
"X11"
]
| 1 | 2022-03-19T06:40:56.000Z | 2022-03-19T06:40:56.000Z | import os,sys
import time
u='\033[4m'
w='\x1b[00m'
r='\x1b[91m'
b='\x1b[36;1m'
y='\x1b[33m'
def exit():
print(r+'[!]'+w+' The user forces it to stop')
print(r+'[!]'+w+' Exiting program')
sys.exit(1)
def corrupt():
print(r+'[?]'+w+' Command not found, please type help')
########### 256 ############
def thezoo():
while True:
try:
global name
global output
names = 'EquationGroup.Fanny'
mg2 = input(w+'saydog('+r+'malware/'+names+w+') > ')
if mg2 == 'help':
print('')
print('Malware name: '+names)
print('-------')
print('command example')
print('------- -------')
print('set name [new name] set name saydog')
print('set output [path] set output /sdcard')
print('show show')
print('run, go, create create')
print('')
elif mg2 == 'exit':
exit()
elif mg2 == 'back':
sys.exit(0)
elif mg2 == 'clear':
os.system('clear')
elif 'set name' in mg2:
name = mg2.split()[(-1)]
print('name > '+name)
elif 'set output' in mg2:
output = mg2.split()[(-1)]
print('output > '+output)
elif mg2 == 'show':
print('')
print('-------------------')
print('name : '+name)
print('output : '+output)
print('-------------------')
print('')
elif mg2 == 'run' or mg2 == 'go' or mg2 == 'create':
time.sleep(1)
print(y+'[-]'+w+' Generate malware '+names)
time.sleep(2)
print(y+'[-]'+w+' please wait for a minute ...')
os.system('wget https://raw.githubusercontent.com/ytisf/theZoo/master/malwares/Binaries/'+names+'/'+names+'.md5 -O '+output+'/'+name+'.md5')
os.system('wget https://raw.githubusercontent.com/ytisf/theZoo/master/malwares/Binaries/'+names+'/'+names+'.pass -O '+output+'/'+name+'.pass')
os.system('wget https://raw.githubusercontent.com/ytisf/theZoo/master/malwares/Binaries/'+names+'/'+names+'.sha256 -O '+output+'/'+name+'.sha256')
os.system('wget https://raw.githubusercontent.com/ytisf/theZoo/master/malwares/Binaries/'+names+'/'+names+'.zip -O '+output+'/'+name+'.zip')
print(w+'\033[41m success \033[00m file saved as '+output)
else:
corrupt()
except NameError:
print(r+'[!] Error: '+w+'[name] or [output] not found')
except KeyboardInterrupt:
exit()
thezoo() | 50.013514 | 178 | 0.348014 |
3ac6ddb482b37086f75ce1f674fd7aaf2b343911 | 58,080 | py | Python | object_detection/utils/object_detection_evaluation.py | Picsell-ia/training | ab16373c0deaf17a6408136e8a8d62d318bedd9e | [
"MIT"
]
| 3 | 2020-06-04T00:56:07.000Z | 2020-06-16T08:20:31.000Z | object_detection/utils/object_detection_evaluation.py | Picsell-ia/training | ab16373c0deaf17a6408136e8a8d62d318bedd9e | [
"MIT"
]
| 6 | 2020-06-08T14:35:41.000Z | 2022-02-10T01:52:40.000Z | object_detection/utils/object_detection_evaluation.py | Picsell-ia/training | ab16373c0deaf17a6408136e8a8d62d318bedd9e | [
"MIT"
]
| null | null | null | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""object_detection_evaluation module.
ObjectDetectionEvaluation is a class which manages ground truth information of a
object detection dataset, and computes frequently used detection metrics such as
Precision, Recall, CorLoc of the provided detection results.
It supports the following operations:
1) Add ground truth information of images sequentially.
2) Add detection result of images sequentially.
3) Evaluate detection metrics on already inserted detection results.
4) Write evaluation result into a pickle file for future processing or
visualization.
Note: This module operates on numpy boxes and box lists.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from abc import ABCMeta
from abc import abstractmethod
import collections
# import logging
from tensorflow import logging as logging
import unicodedata
import numpy as np
import six
from six.moves import range
from six.moves import zip
import tensorflow as tf
from object_detection.core import standard_fields
from object_detection.utils import label_map_util
from object_detection.utils import metrics
from object_detection.utils import per_image_evaluation
class DetectionEvaluator(six.with_metaclass(ABCMeta, object)):
"""Interface for object detection evalution classes.
Example usage of the Evaluator:
------------------------------
evaluator = DetectionEvaluator(categories)
# Detections and groundtruth for image 1.
evaluator.add_single_groundtruth_image_info(...)
evaluator.add_single_detected_image_info(...)
# Detections and groundtruth for image 2.
evaluator.add_single_groundtruth_image_info(...)
evaluator.add_single_detected_image_info(...)
metrics_dict = evaluator.evaluate()
"""
def __init__(self, categories):
"""Constructor.
Args:
categories: A list of dicts, each of which has the following keys -
'id': (required) an integer id uniquely identifying this category.
'name': (required) string representing category name e.g., 'cat', 'dog'.
"""
self._categories = categories
def observe_result_dict_for_single_example(self, eval_dict):
"""Observes an evaluation result dict for a single example.
When executing eagerly, once all observations have been observed by this
method you can use `.evaluate()` to get the final metrics.
When using `tf.estimator.Estimator` for evaluation this function is used by
`get_estimator_eval_metric_ops()` to construct the metric update op.
Args:
eval_dict: A dictionary that holds tensors for evaluating an object
detection model, returned from
eval_util.result_dict_for_single_example().
Returns:
None when executing eagerly, or an update_op that can be used to update
the eval metrics in `tf.estimator.EstimatorSpec`.
"""
raise NotImplementedError('Not implemented for this evaluator!')
@abstractmethod
def add_single_ground_truth_image_info(self, image_id, groundtruth_dict):
"""Adds groundtruth for a single image to be used for evaluation.
Args:
image_id: A unique string/integer identifier for the image.
groundtruth_dict: A dictionary of groundtruth numpy arrays required for
evaluations.
"""
pass
@abstractmethod
def add_single_detected_image_info(self, image_id, detections_dict):
"""Adds detections for a single image to be used for evaluation.
Args:
image_id: A unique string/integer identifier for the image.
detections_dict: A dictionary of detection numpy arrays required for
evaluation.
"""
pass
def get_estimator_eval_metric_ops(self, eval_dict):
"""Returns dict of metrics to use with `tf.estimator.EstimatorSpec`.
Note that this must only be implemented if performing evaluation with a
`tf.estimator.Estimator`.
Args:
eval_dict: A dictionary that holds tensors for evaluating an object
detection model, returned from
eval_util.result_dict_for_single_example().
Returns:
A dictionary of metric names to tuple of value_op and update_op that can
be used as eval metric ops in `tf.estimator.EstimatorSpec`.
"""
pass
@abstractmethod
def evaluate(self):
"""Evaluates detections and returns a dictionary of metrics."""
pass
@abstractmethod
def clear(self):
"""Clears the state to prepare for a fresh evaluation."""
pass
class ObjectDetectionEvaluator(DetectionEvaluator):
"""A class to evaluate detections."""
def __init__(self,
categories,
matching_iou_threshold=0.5,
recall_lower_bound=0.0,
recall_upper_bound=1.0,
evaluate_corlocs=False,
evaluate_precision_recall=False,
metric_prefix=None,
use_weighted_mean_ap=False,
evaluate_masks=False,
group_of_weight=0.0):
"""Constructor.
Args:
categories: A list of dicts, each of which has the following keys -
'id': (required) an integer id uniquely identifying this category.
'name': (required) string representing category name e.g., 'cat', 'dog'.
matching_iou_threshold: IOU threshold to use for matching groundtruth
boxes to detection boxes.
recall_lower_bound: lower bound of recall operating area.
recall_upper_bound: upper bound of recall operating area.
evaluate_corlocs: (optional) boolean which determines if corloc scores are
to be returned or not.
evaluate_precision_recall: (optional) boolean which determines if
precision and recall values are to be returned or not.
metric_prefix: (optional) string prefix for metric name; if None, no
prefix is used.
use_weighted_mean_ap: (optional) boolean which determines if the mean
average precision is computed directly from the scores and tp_fp_labels
of all classes.
evaluate_masks: If False, evaluation will be performed based on boxes. If
True, mask evaluation will be performed instead.
group_of_weight: Weight of group-of boxes.If set to 0, detections of the
correct class within a group-of box are ignored. If weight is > 0, then
if at least one detection falls within a group-of box with
matching_iou_threshold, weight group_of_weight is added to true
positives. Consequently, if no detection falls within a group-of box,
weight group_of_weight is added to false negatives.
Raises:
ValueError: If the category ids are not 1-indexed.
"""
super(ObjectDetectionEvaluator, self).__init__(categories)
self._num_classes = max([cat['id'] for cat in categories])
if min(cat['id'] for cat in categories) < 1:
raise ValueError('Classes should be 1-indexed.')
self._matching_iou_threshold = matching_iou_threshold
self._recall_lower_bound = recall_lower_bound
self._recall_upper_bound = recall_upper_bound
self._use_weighted_mean_ap = use_weighted_mean_ap
self._label_id_offset = 1
self._evaluate_masks = evaluate_masks
self._group_of_weight = group_of_weight
self._evaluation = ObjectDetectionEvaluation(
num_groundtruth_classes=self._num_classes,
matching_iou_threshold=self._matching_iou_threshold,
recall_lower_bound=self._recall_lower_bound,
recall_upper_bound=self._recall_upper_bound,
use_weighted_mean_ap=self._use_weighted_mean_ap,
label_id_offset=self._label_id_offset,
group_of_weight=self._group_of_weight)
self._image_ids = set([])
self._evaluate_corlocs = evaluate_corlocs
self._evaluate_precision_recall = evaluate_precision_recall
self._metric_prefix = (metric_prefix + '_') if metric_prefix else ''
self._expected_keys = set([
standard_fields.InputDataFields.key,
standard_fields.InputDataFields.groundtruth_boxes,
standard_fields.InputDataFields.groundtruth_classes,
standard_fields.InputDataFields.groundtruth_difficult,
standard_fields.InputDataFields.groundtruth_instance_masks,
standard_fields.DetectionResultFields.detection_boxes,
standard_fields.DetectionResultFields.detection_scores,
standard_fields.DetectionResultFields.detection_classes,
standard_fields.DetectionResultFields.detection_masks
])
self._build_metric_names()
def get_internal_state(self):
"""Returns internal state and image ids that lead to the state.
Note that only evaluation results will be returned (e.g. not raw predictions
or groundtruth.
"""
return self._evaluation.get_internal_state(), self._image_ids
def merge_internal_state(self, image_ids, state_tuple):
"""Merges internal state with the existing state of evaluation.
If image_id is already seen by evaluator, an error will be thrown.
Args:
image_ids: list of images whose state is stored in the tuple.
state_tuple: state.
"""
for image_id in image_ids:
if image_id in self._image_ids:
raise ValueError('Image with id {} already added.'.format(image_id))
self._evaluation.merge_internal_state(state_tuple)
def _build_metric_names(self):
"""Builds a list with metric names."""
if self._recall_lower_bound > 0.0 or self._recall_upper_bound < 1.0:
self._metric_names = [
self._metric_prefix +
'Precision/mAP@{}IOU@[{:.1f},{:.1f}]Recall'.format(
self._matching_iou_threshold, self._recall_lower_bound,
self._recall_upper_bound)
]
else:
self._metric_names = [
self._metric_prefix +
'Precision/mAP@{}IOU'.format(self._matching_iou_threshold)
]
if self._evaluate_corlocs:
self._metric_names.append(
self._metric_prefix +
'Precision/meanCorLoc@{}IOU'.format(self._matching_iou_threshold))
category_index = label_map_util.create_category_index(self._categories)
for idx in range(self._num_classes):
if idx + self._label_id_offset in category_index:
category_name = category_index[idx + self._label_id_offset]['name']
try:
category_name = six.text_type(category_name, 'utf-8')
except TypeError:
pass
category_name = unicodedata.normalize('NFKD', category_name)
if six.PY2:
category_name = category_name.encode('ascii', 'ignore')
self._metric_names.append(
self._metric_prefix + 'PerformanceByCategory/AP@{}IOU/{}'.format(
self._matching_iou_threshold, category_name))
if self._evaluate_corlocs:
self._metric_names.append(
self._metric_prefix +
'PerformanceByCategory/CorLoc@{}IOU/{}'.format(
self._matching_iou_threshold, category_name))
def add_single_ground_truth_image_info(self, image_id, groundtruth_dict):
"""Adds groundtruth for a single image to be used for evaluation.
Args:
image_id: A unique string/integer identifier for the image.
groundtruth_dict: A dictionary containing -
standard_fields.InputDataFields.groundtruth_boxes: float32 numpy array
of shape [num_boxes, 4] containing `num_boxes` groundtruth boxes of
the format [ymin, xmin, ymax, xmax] in absolute image coordinates.
standard_fields.InputDataFields.groundtruth_classes: integer numpy array
of shape [num_boxes] containing 1-indexed groundtruth classes for the
boxes.
standard_fields.InputDataFields.groundtruth_difficult: Optional length M
numpy boolean array denoting whether a ground truth box is a difficult
instance or not. This field is optional to support the case that no
boxes are difficult.
standard_fields.InputDataFields.groundtruth_instance_masks: Optional
numpy array of shape [num_boxes, height, width] with values in {0, 1}.
Raises:
ValueError: On adding groundtruth for an image more than once. Will also
raise error if instance masks are not in groundtruth dictionary.
"""
if image_id in self._image_ids:
raise ValueError('Image with id {} already added.'.format(image_id))
groundtruth_classes = (
groundtruth_dict[standard_fields.InputDataFields.groundtruth_classes] -
self._label_id_offset)
# If the key is not present in the groundtruth_dict or the array is empty
# (unless there are no annotations for the groundtruth on this image)
# use values from the dictionary or insert None otherwise.
if (standard_fields.InputDataFields.groundtruth_difficult in six.viewkeys(
groundtruth_dict) and
(groundtruth_dict[standard_fields.InputDataFields.groundtruth_difficult]
.size or not groundtruth_classes.size)):
groundtruth_difficult = groundtruth_dict[
standard_fields.InputDataFields.groundtruth_difficult]
else:
groundtruth_difficult = None
if not len(self._image_ids) % 1000:
logging.warning(
'image %s does not have groundtruth difficult flag specified',
image_id)
groundtruth_masks = None
if self._evaluate_masks:
if (standard_fields.InputDataFields.groundtruth_instance_masks not in
groundtruth_dict):
raise ValueError('Instance masks not in groundtruth dictionary.')
groundtruth_masks = groundtruth_dict[
standard_fields.InputDataFields.groundtruth_instance_masks]
self._evaluation.add_single_ground_truth_image_info(
image_key=image_id,
groundtruth_boxes=groundtruth_dict[
standard_fields.InputDataFields.groundtruth_boxes],
groundtruth_class_labels=groundtruth_classes,
groundtruth_is_difficult_list=groundtruth_difficult,
groundtruth_masks=groundtruth_masks)
self._image_ids.update([image_id])
def add_single_detected_image_info(self, image_id, detections_dict):
"""Adds detections for a single image to be used for evaluation.
Args:
image_id: A unique string/integer identifier for the image.
detections_dict: A dictionary containing -
standard_fields.DetectionResultFields.detection_boxes: float32 numpy
array of shape [num_boxes, 4] containing `num_boxes` detection boxes
of the format [ymin, xmin, ymax, xmax] in absolute image coordinates.
standard_fields.DetectionResultFields.detection_scores: float32 numpy
array of shape [num_boxes] containing detection scores for the boxes.
standard_fields.DetectionResultFields.detection_classes: integer numpy
array of shape [num_boxes] containing 1-indexed detection classes for
the boxes.
standard_fields.DetectionResultFields.detection_masks: uint8 numpy array
of shape [num_boxes, height, width] containing `num_boxes` masks of
values ranging between 0 and 1.
Raises:
ValueError: If detection masks are not in detections dictionary.
"""
detection_classes = (
detections_dict[standard_fields.DetectionResultFields.detection_classes]
- self._label_id_offset)
detection_masks = None
if self._evaluate_masks:
if (standard_fields.DetectionResultFields.detection_masks not in
detections_dict):
raise ValueError('Detection masks not in detections dictionary.')
detection_masks = detections_dict[
standard_fields.DetectionResultFields.detection_masks]
self._evaluation.add_single_detected_image_info(
image_key=image_id,
detected_boxes=detections_dict[
standard_fields.DetectionResultFields.detection_boxes],
detected_scores=detections_dict[
standard_fields.DetectionResultFields.detection_scores],
detected_class_labels=detection_classes,
detected_masks=detection_masks)
def evaluate(self):
"""Compute evaluation result.
Returns:
A dictionary of metrics with the following fields -
1. summary_metrics:
'<prefix if not empty>_Precision/mAP@<matching_iou_threshold>IOU': mean
average precision at the specified IOU threshold.
2. per_category_ap: category specific results with keys of the form
'<prefix if not empty>_PerformanceByCategory/
mAP@<matching_iou_threshold>IOU/category'.
"""
(per_class_ap, mean_ap, per_class_precision, per_class_recall,
per_class_corloc, mean_corloc) = (
self._evaluation.evaluate())
pascal_metrics = {self._metric_names[0]: mean_ap}
if self._evaluate_corlocs:
pascal_metrics[self._metric_names[1]] = mean_corloc
category_index = label_map_util.create_category_index(self._categories)
for idx in range(per_class_ap.size):
if idx + self._label_id_offset in category_index:
category_name = category_index[idx + self._label_id_offset]['name']
try:
category_name = six.text_type(category_name, 'utf-8')
except TypeError:
pass
category_name = unicodedata.normalize('NFKD', category_name)
if six.PY2:
category_name = category_name.encode('ascii', 'ignore')
display_name = (
self._metric_prefix + 'PerformanceByCategory/AP@{}IOU/{}'.format(
self._matching_iou_threshold, category_name))
pascal_metrics[display_name] = per_class_ap[idx]
# Optionally add precision and recall values
if self._evaluate_precision_recall:
display_name = (
self._metric_prefix +
'PerformanceByCategory/Precision@{}IOU/{}'.format(
self._matching_iou_threshold, category_name))
pascal_metrics[display_name] = per_class_precision[idx]
display_name = (
self._metric_prefix +
'PerformanceByCategory/Recall@{}IOU/{}'.format(
self._matching_iou_threshold, category_name))
pascal_metrics[display_name] = per_class_recall[idx]
# Optionally add CorLoc metrics.classes
if self._evaluate_corlocs:
display_name = (
self._metric_prefix +
'PerformanceByCategory/CorLoc@{}IOU/{}'.format(
self._matching_iou_threshold, category_name))
pascal_metrics[display_name] = per_class_corloc[idx]
return pascal_metrics
def clear(self):
"""Clears the state to prepare for a fresh evaluation."""
self._evaluation = ObjectDetectionEvaluation(
num_groundtruth_classes=self._num_classes,
matching_iou_threshold=self._matching_iou_threshold,
use_weighted_mean_ap=self._use_weighted_mean_ap,
label_id_offset=self._label_id_offset)
self._image_ids.clear()
def add_eval_dict(self, eval_dict):
"""Observes an evaluation result dict for a single example.
When executing eagerly, once all observations have been observed by this
method you can use `.evaluate()` to get the final metrics.
When using `tf.estimator.Estimator` for evaluation this function is used by
`get_estimator_eval_metric_ops()` to construct the metric update op.
Args:
eval_dict: A dictionary that holds tensors for evaluating an object
detection model, returned from
eval_util.result_dict_for_single_example().
Returns:
None when executing eagerly, or an update_op that can be used to update
the eval metrics in `tf.estimator.EstimatorSpec`.
"""
# remove unexpected fields
eval_dict_filtered = dict()
for key, value in eval_dict.items():
if key in self._expected_keys:
eval_dict_filtered[key] = value
eval_dict_keys = list(eval_dict_filtered.keys())
def update_op(image_id, *eval_dict_batched_as_list):
"""Update operation that adds batch of images to ObjectDetectionEvaluator.
Args:
image_id: image id (single id or an array)
*eval_dict_batched_as_list: the values of the dictionary of tensors.
"""
if np.isscalar(image_id):
single_example_dict = dict(
zip(eval_dict_keys, eval_dict_batched_as_list))
self.add_single_ground_truth_image_info(image_id, single_example_dict)
self.add_single_detected_image_info(image_id, single_example_dict)
else:
for unzipped_tuple in zip(*eval_dict_batched_as_list):
single_example_dict = dict(zip(eval_dict_keys, unzipped_tuple))
image_id = single_example_dict[standard_fields.InputDataFields.key]
self.add_single_ground_truth_image_info(image_id, single_example_dict)
self.add_single_detected_image_info(image_id, single_example_dict)
args = [eval_dict_filtered[standard_fields.InputDataFields.key]]
args.extend(six.itervalues(eval_dict_filtered))
return tf.py_func(update_op, args, [])
def get_estimator_eval_metric_ops(self, eval_dict):
"""Returns dict of metrics to use with `tf.estimator.EstimatorSpec`.
Note that this must only be implemented if performing evaluation with a
`tf.estimator.Estimator`.
Args:
eval_dict: A dictionary that holds tensors for evaluating an object
detection model, returned from
eval_util.result_dict_for_single_example(). It must contain
standard_fields.InputDataFields.key.
Returns:
A dictionary of metric names to tuple of value_op and update_op that can
be used as eval metric ops in `tf.estimator.EstimatorSpec`.
"""
update_op = self.add_eval_dict(eval_dict)
def first_value_func():
self._metrics = self.evaluate()
self.clear()
return np.float32(self._metrics[self._metric_names[0]])
def value_func_factory(metric_name):
def value_func():
return np.float32(self._metrics[metric_name])
return value_func
# Ensure that the metrics are only evaluated once.
first_value_op = tf.py_func(first_value_func, [], tf.float32)
eval_metric_ops = {self._metric_names[0]: (first_value_op, update_op)}
with tf.control_dependencies([first_value_op]):
for metric_name in self._metric_names[1:]:
eval_metric_ops[metric_name] = (tf.py_func(
value_func_factory(metric_name), [], np.float32), update_op)
return eval_metric_ops
class PascalDetectionEvaluator(ObjectDetectionEvaluator):
"""A class to evaluate detections using PASCAL metrics."""
def __init__(self, categories, matching_iou_threshold=0.5):
super(PascalDetectionEvaluator, self).__init__(
categories,
matching_iou_threshold=matching_iou_threshold,
evaluate_corlocs=False,
metric_prefix='PascalBoxes',
use_weighted_mean_ap=False)
class WeightedPascalDetectionEvaluator(ObjectDetectionEvaluator):
"""A class to evaluate detections using weighted PASCAL metrics.
Weighted PASCAL metrics computes the mean average precision as the average
precision given the scores and tp_fp_labels of all classes. In comparison,
PASCAL metrics computes the mean average precision as the mean of the
per-class average precisions.
This definition is very similar to the mean of the per-class average
precisions weighted by class frequency. However, they are typically not the
same as the average precision is not a linear function of the scores and
tp_fp_labels.
"""
def __init__(self, categories, matching_iou_threshold=0.5):
super(WeightedPascalDetectionEvaluator, self).__init__(
categories,
matching_iou_threshold=matching_iou_threshold,
evaluate_corlocs=False,
metric_prefix='WeightedPascalBoxes',
use_weighted_mean_ap=True)
class PrecisionAtRecallDetectionEvaluator(ObjectDetectionEvaluator):
"""A class to evaluate detections using precision@recall metrics."""
def __init__(self,
categories,
matching_iou_threshold=0.5,
recall_lower_bound=0.0,
recall_upper_bound=1.0):
super(PrecisionAtRecallDetectionEvaluator, self).__init__(
categories,
matching_iou_threshold=matching_iou_threshold,
recall_lower_bound=recall_lower_bound,
recall_upper_bound=recall_upper_bound,
evaluate_corlocs=False,
metric_prefix='PrecisionAtRecallBoxes',
use_weighted_mean_ap=False)
class PascalInstanceSegmentationEvaluator(ObjectDetectionEvaluator):
"""A class to evaluate instance masks using PASCAL metrics."""
def __init__(self, categories, matching_iou_threshold=0.5):
super(PascalInstanceSegmentationEvaluator, self).__init__(
categories,
matching_iou_threshold=matching_iou_threshold,
evaluate_corlocs=False,
metric_prefix='PascalMasks',
use_weighted_mean_ap=False,
evaluate_masks=True)
class WeightedPascalInstanceSegmentationEvaluator(ObjectDetectionEvaluator):
"""A class to evaluate instance masks using weighted PASCAL metrics.
Weighted PASCAL metrics computes the mean average precision as the average
precision given the scores and tp_fp_labels of all classes. In comparison,
PASCAL metrics computes the mean average precision as the mean of the
per-class average precisions.
This definition is very similar to the mean of the per-class average
precisions weighted by class frequency. However, they are typically not the
same as the average precision is not a linear function of the scores and
tp_fp_labels.
"""
def __init__(self, categories, matching_iou_threshold=0.5):
super(WeightedPascalInstanceSegmentationEvaluator, self).__init__(
categories,
matching_iou_threshold=matching_iou_threshold,
evaluate_corlocs=False,
metric_prefix='WeightedPascalMasks',
use_weighted_mean_ap=True,
evaluate_masks=True)
class OpenImagesDetectionEvaluator(ObjectDetectionEvaluator):
"""A class to evaluate detections using Open Images V2 metrics.
Open Images V2 introduce group_of type of bounding boxes and this metric
handles those boxes appropriately.
"""
def __init__(self,
categories,
matching_iou_threshold=0.5,
evaluate_masks=False,
evaluate_corlocs=False,
metric_prefix='OpenImagesV2',
group_of_weight=0.0):
"""Constructor.
Args:
categories: A list of dicts, each of which has the following keys -
'id': (required) an integer id uniquely identifying this category.
'name': (required) string representing category name e.g., 'cat', 'dog'.
matching_iou_threshold: IOU threshold to use for matching groundtruth
boxes to detection boxes.
evaluate_masks: if True, evaluator evaluates masks.
evaluate_corlocs: if True, additionally evaluates and returns CorLoc.
metric_prefix: Prefix name of the metric.
group_of_weight: Weight of the group-of bounding box. If set to 0 (default
for Open Images V2 detection protocol), detections of the correct class
within a group-of box are ignored. If weight is > 0, then if at least
one detection falls within a group-of box with matching_iou_threshold,
weight group_of_weight is added to true positives. Consequently, if no
detection falls within a group-of box, weight group_of_weight is added
to false negatives.
"""
super(OpenImagesDetectionEvaluator, self).__init__(
categories,
matching_iou_threshold,
evaluate_corlocs,
metric_prefix=metric_prefix,
group_of_weight=group_of_weight,
evaluate_masks=evaluate_masks)
self._expected_keys = set([
standard_fields.InputDataFields.key,
standard_fields.InputDataFields.groundtruth_boxes,
standard_fields.InputDataFields.groundtruth_classes,
standard_fields.InputDataFields.groundtruth_group_of,
standard_fields.DetectionResultFields.detection_boxes,
standard_fields.DetectionResultFields.detection_scores,
standard_fields.DetectionResultFields.detection_classes,
])
if evaluate_masks:
self._expected_keys.add(
standard_fields.InputDataFields.groundtruth_instance_masks)
self._expected_keys.add(
standard_fields.DetectionResultFields.detection_masks)
def add_single_ground_truth_image_info(self, image_id, groundtruth_dict):
"""Adds groundtruth for a single image to be used for evaluation.
Args:
image_id: A unique string/integer identifier for the image.
groundtruth_dict: A dictionary containing -
standard_fields.InputDataFields.groundtruth_boxes: float32 numpy array
of shape [num_boxes, 4] containing `num_boxes` groundtruth boxes of
the format [ymin, xmin, ymax, xmax] in absolute image coordinates.
standard_fields.InputDataFields.groundtruth_classes: integer numpy array
of shape [num_boxes] containing 1-indexed groundtruth classes for the
boxes.
standard_fields.InputDataFields.groundtruth_group_of: Optional length M
numpy boolean array denoting whether a groundtruth box contains a
group of instances.
Raises:
ValueError: On adding groundtruth for an image more than once.
"""
if image_id in self._image_ids:
raise ValueError('Image with id {} already added.'.format(image_id))
groundtruth_classes = (
groundtruth_dict[standard_fields.InputDataFields.groundtruth_classes] -
self._label_id_offset)
# If the key is not present in the groundtruth_dict or the array is empty
# (unless there are no annotations for the groundtruth on this image)
# use values from the dictionary or insert None otherwise.
if (standard_fields.InputDataFields.groundtruth_group_of in six.viewkeys(
groundtruth_dict) and
(groundtruth_dict[standard_fields.InputDataFields.groundtruth_group_of]
.size or not groundtruth_classes.size)):
groundtruth_group_of = groundtruth_dict[
standard_fields.InputDataFields.groundtruth_group_of]
else:
groundtruth_group_of = None
if not len(self._image_ids) % 1000:
logging.warning(
'image %s does not have groundtruth group_of flag specified',
image_id)
if self._evaluate_masks:
groundtruth_masks = groundtruth_dict[
standard_fields.InputDataFields.groundtruth_instance_masks]
else:
groundtruth_masks = None
self._evaluation.add_single_ground_truth_image_info(
image_id,
groundtruth_dict[standard_fields.InputDataFields.groundtruth_boxes],
groundtruth_classes,
groundtruth_is_difficult_list=None,
groundtruth_is_group_of_list=groundtruth_group_of,
groundtruth_masks=groundtruth_masks)
self._image_ids.update([image_id])
class OpenImagesChallengeEvaluator(OpenImagesDetectionEvaluator):
"""A class implements Open Images Challenge metrics.
Both Detection and Instance Segmentation evaluation metrics are implemented.
Open Images Challenge Detection metric has two major changes in comparison
with Open Images V2 detection metric:
- a custom weight might be specified for detecting an object contained in
a group-of box.
- verified image-level labels should be explicitelly provided for
evaluation: in case in image has neither positive nor negative image level
label of class c, all detections of this class on this image will be
ignored.
Open Images Challenge Instance Segmentation metric allows to measure per
formance of models in case of incomplete annotations: some instances are
annotations only on box level and some - on image-level. In addition,
image-level labels are taken into account as in detection metric.
Open Images Challenge Detection metric default parameters:
evaluate_masks = False
group_of_weight = 1.0
Open Images Challenge Instance Segmentation metric default parameters:
evaluate_masks = True
(group_of_weight will not matter)
"""
def __init__(self,
categories,
evaluate_masks=False,
matching_iou_threshold=0.5,
evaluate_corlocs=False,
group_of_weight=1.0):
"""Constructor.
Args:
categories: A list of dicts, each of which has the following keys -
'id': (required) an integer id uniquely identifying this category.
'name': (required) string representing category name e.g., 'cat', 'dog'.
evaluate_masks: set to true for instance segmentation metric and to false
for detection metric.
matching_iou_threshold: IOU threshold to use for matching groundtruth
boxes to detection boxes.
evaluate_corlocs: if True, additionally evaluates and returns CorLoc.
group_of_weight: Weight of group-of boxes. If set to 0, detections of the
correct class within a group-of box are ignored. If weight is > 0, then
if at least one detection falls within a group-of box with
matching_iou_threshold, weight group_of_weight is added to true
positives. Consequently, if no detection falls within a group-of box,
weight group_of_weight is added to false negatives.
"""
if not evaluate_masks:
metrics_prefix = 'OpenImagesDetectionChallenge'
else:
metrics_prefix = 'OpenImagesInstanceSegmentationChallenge'
super(OpenImagesChallengeEvaluator, self).__init__(
categories,
matching_iou_threshold,
evaluate_masks=evaluate_masks,
evaluate_corlocs=evaluate_corlocs,
group_of_weight=group_of_weight,
metric_prefix=metrics_prefix)
self._evaluatable_labels = {}
self._expected_keys.add(
standard_fields.InputDataFields.groundtruth_image_classes)
def add_single_ground_truth_image_info(self, image_id, groundtruth_dict):
"""Adds groundtruth for a single image to be used for evaluation.
Args:
image_id: A unique string/integer identifier for the image.
groundtruth_dict: A dictionary containing -
standard_fields.InputDataFields.groundtruth_boxes: float32 numpy array
of shape [num_boxes, 4] containing `num_boxes` groundtruth boxes of
the format [ymin, xmin, ymax, xmax] in absolute image coordinates.
standard_fields.InputDataFields.groundtruth_classes: integer numpy array
of shape [num_boxes] containing 1-indexed groundtruth classes for the
boxes.
standard_fields.InputDataFields.groundtruth_image_classes: integer 1D
numpy array containing all classes for which labels are verified.
standard_fields.InputDataFields.groundtruth_group_of: Optional length M
numpy boolean array denoting whether a groundtruth box contains a
group of instances.
Raises:
ValueError: On adding groundtruth for an image more than once.
"""
super(OpenImagesChallengeEvaluator,
self).add_single_ground_truth_image_info(image_id, groundtruth_dict)
groundtruth_classes = (
groundtruth_dict[standard_fields.InputDataFields.groundtruth_classes] -
self._label_id_offset)
self._evaluatable_labels[image_id] = np.unique(
np.concatenate(((groundtruth_dict.get(
standard_fields.InputDataFields.groundtruth_image_classes,
np.array([], dtype=int)) - self._label_id_offset),
groundtruth_classes)))
def add_single_detected_image_info(self, image_id, detections_dict):
"""Adds detections for a single image to be used for evaluation.
Args:
image_id: A unique string/integer identifier for the image.
detections_dict: A dictionary containing -
standard_fields.DetectionResultFields.detection_boxes: float32 numpy
array of shape [num_boxes, 4] containing `num_boxes` detection boxes
of the format [ymin, xmin, ymax, xmax] in absolute image coordinates.
standard_fields.DetectionResultFields.detection_scores: float32 numpy
array of shape [num_boxes] containing detection scores for the boxes.
standard_fields.DetectionResultFields.detection_classes: integer numpy
array of shape [num_boxes] containing 1-indexed detection classes for
the boxes.
Raises:
ValueError: If detection masks are not in detections dictionary.
"""
if image_id not in self._image_ids:
# Since for the correct work of evaluator it is assumed that groundtruth
# is inserted first we make sure to break the code if is it not the case.
self._image_ids.update([image_id])
self._evaluatable_labels[image_id] = np.array([])
detection_classes = (
detections_dict[standard_fields.DetectionResultFields.detection_classes]
- self._label_id_offset)
allowed_classes = np.where(
np.isin(detection_classes, self._evaluatable_labels[image_id]))
detection_classes = detection_classes[allowed_classes]
detected_boxes = detections_dict[
standard_fields.DetectionResultFields.detection_boxes][allowed_classes]
detected_scores = detections_dict[
standard_fields.DetectionResultFields.detection_scores][allowed_classes]
if self._evaluate_masks:
detection_masks = detections_dict[standard_fields.DetectionResultFields
.detection_masks][allowed_classes]
else:
detection_masks = None
self._evaluation.add_single_detected_image_info(
image_key=image_id,
detected_boxes=detected_boxes,
detected_scores=detected_scores,
detected_class_labels=detection_classes,
detected_masks=detection_masks)
def clear(self):
"""Clears stored data."""
super(OpenImagesChallengeEvaluator, self).clear()
self._evaluatable_labels.clear()
ObjectDetectionEvalMetrics = collections.namedtuple(
'ObjectDetectionEvalMetrics', [
'average_precisions', 'mean_ap', 'precisions', 'recalls', 'corlocs',
'mean_corloc'
])
class OpenImagesDetectionChallengeEvaluator(OpenImagesChallengeEvaluator):
"""A class implements Open Images Detection Challenge metric."""
def __init__(self,
categories,
matching_iou_threshold=0.5,
evaluate_corlocs=False):
"""Constructor.
Args:
categories: A list of dicts, each of which has the following keys -
'id': (required) an integer id uniquely identifying this category.
'name': (required) string representing category name e.g., 'cat', 'dog'.
matching_iou_threshold: IOU threshold to use for matching groundtruth
boxes to detection boxes.
evaluate_corlocs: if True, additionally evaluates and returns CorLoc.
"""
super(OpenImagesDetectionChallengeEvaluator, self).__init__(
categories=categories,
evaluate_masks=False,
matching_iou_threshold=matching_iou_threshold,
evaluate_corlocs=False,
group_of_weight=1.0)
class OpenImagesInstanceSegmentationChallengeEvaluator(
OpenImagesChallengeEvaluator):
"""A class implements Open Images Instance Segmentation Challenge metric."""
def __init__(self,
categories,
matching_iou_threshold=0.5,
evaluate_corlocs=False):
"""Constructor.
Args:
categories: A list of dicts, each of which has the following keys -
'id': (required) an integer id uniquely identifying this category.
'name': (required) string representing category name e.g., 'cat', 'dog'.
matching_iou_threshold: IOU threshold to use for matching groundtruth
boxes to detection boxes.
evaluate_corlocs: if True, additionally evaluates and returns CorLoc.
"""
super(OpenImagesInstanceSegmentationChallengeEvaluator, self).__init__(
categories=categories,
evaluate_masks=True,
matching_iou_threshold=matching_iou_threshold,
evaluate_corlocs=False,
group_of_weight=0.0)
ObjectDetectionEvaluationState = collections.namedtuple(
'ObjectDetectionEvaluationState', [
'num_gt_instances_per_class',
'scores_per_class',
'tp_fp_labels_per_class',
'num_gt_imgs_per_class',
'num_images_correctly_detected_per_class',
])
class ObjectDetectionEvaluation(object):
"""Internal implementation of Pascal object detection metrics."""
def __init__(self,
num_groundtruth_classes,
matching_iou_threshold=0.5,
nms_iou_threshold=1.0,
nms_max_output_boxes=10000,
recall_lower_bound=0.0,
recall_upper_bound=1.0,
use_weighted_mean_ap=False,
label_id_offset=0,
group_of_weight=0.0,
per_image_eval_class=per_image_evaluation.PerImageEvaluation):
"""Constructor.
Args:
num_groundtruth_classes: Number of ground-truth classes.
matching_iou_threshold: IOU threshold used for matching detected boxes to
ground-truth boxes.
nms_iou_threshold: IOU threshold used for non-maximum suppression.
nms_max_output_boxes: Maximum number of boxes returned by non-maximum
suppression.
recall_lower_bound: lower bound of recall operating area
recall_upper_bound: upper bound of recall operating area
use_weighted_mean_ap: (optional) boolean which determines if the mean
average precision is computed directly from the scores and tp_fp_labels
of all classes.
label_id_offset: The label id offset.
group_of_weight: Weight of group-of boxes.If set to 0, detections of the
correct class within a group-of box are ignored. If weight is > 0, then
if at least one detection falls within a group-of box with
matching_iou_threshold, weight group_of_weight is added to true
positives. Consequently, if no detection falls within a group-of box,
weight group_of_weight is added to false negatives.
per_image_eval_class: The class that contains functions for computing per
image metrics.
Raises:
ValueError: if num_groundtruth_classes is smaller than 1.
"""
if num_groundtruth_classes < 1:
raise ValueError('Need at least 1 groundtruth class for evaluation.')
self.per_image_eval = per_image_eval_class(
num_groundtruth_classes=num_groundtruth_classes,
matching_iou_threshold=matching_iou_threshold,
nms_iou_threshold=nms_iou_threshold,
nms_max_output_boxes=nms_max_output_boxes,
group_of_weight=group_of_weight)
self.recall_lower_bound = recall_lower_bound
self.recall_upper_bound = recall_upper_bound
self.group_of_weight = group_of_weight
self.num_class = num_groundtruth_classes
self.use_weighted_mean_ap = use_weighted_mean_ap
self.label_id_offset = label_id_offset
self.groundtruth_boxes = {}
self.groundtruth_class_labels = {}
self.groundtruth_masks = {}
self.groundtruth_is_difficult_list = {}
self.groundtruth_is_group_of_list = {}
self.num_gt_instances_per_class = np.zeros(self.num_class, dtype=float)
self.num_gt_imgs_per_class = np.zeros(self.num_class, dtype=int)
self._initialize_detections()
def _initialize_detections(self):
"""Initializes internal data structures."""
self.detection_keys = set()
self.scores_per_class = [[] for _ in range(self.num_class)]
self.tp_fp_labels_per_class = [[] for _ in range(self.num_class)]
self.num_images_correctly_detected_per_class = np.zeros(self.num_class)
self.average_precision_per_class = np.empty(self.num_class, dtype=float)
self.average_precision_per_class.fill(np.nan)
self.precisions_per_class = [np.nan] * self.num_class
self.recalls_per_class = [np.nan] * self.num_class
self.sum_tp_class = [np.nan] * self.num_class
self.corloc_per_class = np.ones(self.num_class, dtype=float)
def clear_detections(self):
self._initialize_detections()
def get_internal_state(self):
"""Returns internal state of the evaluation.
NOTE: that only evaluation results will be returned
(e.g. no raw predictions or groundtruth).
Returns:
internal state of the evaluation.
"""
return ObjectDetectionEvaluationState(
self.num_gt_instances_per_class, self.scores_per_class,
self.tp_fp_labels_per_class, self.num_gt_imgs_per_class,
self.num_images_correctly_detected_per_class)
def merge_internal_state(self, state_tuple):
"""Merges internal state of the evaluation with the current state.
Args:
state_tuple: state tuple representing evaluation state: should be of type
ObjectDetectionEvaluationState.
"""
(num_gt_instances_per_class, scores_per_class, tp_fp_labels_per_class,
num_gt_imgs_per_class, num_images_correctly_detected_per_class) = (
state_tuple)
assert self.num_class == len(num_gt_instances_per_class)
assert self.num_class == len(scores_per_class)
assert self.num_class == len(tp_fp_labels_per_class)
for i in range(self.num_class):
self.scores_per_class[i].extend(scores_per_class[i])
self.tp_fp_labels_per_class[i].extend(tp_fp_labels_per_class[i])
self.num_gt_instances_per_class[i] += num_gt_instances_per_class[i]
self.num_gt_imgs_per_class[i] += num_gt_imgs_per_class[i]
self.num_images_correctly_detected_per_class[
i] += num_images_correctly_detected_per_class[i]
def add_single_ground_truth_image_info(self,
image_key,
groundtruth_boxes,
groundtruth_class_labels,
groundtruth_is_difficult_list=None,
groundtruth_is_group_of_list=None,
groundtruth_masks=None):
"""Adds groundtruth for a single image to be used for evaluation.
Args:
image_key: A unique string/integer identifier for the image.
groundtruth_boxes: float32 numpy array of shape [num_boxes, 4] containing
`num_boxes` groundtruth boxes of the format [ymin, xmin, ymax, xmax] in
absolute image coordinates.
groundtruth_class_labels: integer numpy array of shape [num_boxes]
containing 0-indexed groundtruth classes for the boxes.
groundtruth_is_difficult_list: A length M numpy boolean array denoting
whether a ground truth box is a difficult instance or not. To support
the case that no boxes are difficult, it is by default set as None.
groundtruth_is_group_of_list: A length M numpy boolean array denoting
whether a ground truth box is a group-of box or not. To support the case
that no boxes are groups-of, it is by default set as None.
groundtruth_masks: uint8 numpy array of shape [num_boxes, height, width]
containing `num_boxes` groundtruth masks. The mask values range from 0
to 1.
"""
if image_key in self.groundtruth_boxes:
logging.warning(
'image %s has already been added to the ground truth database.',
image_key)
return
self.groundtruth_boxes[image_key] = groundtruth_boxes
self.groundtruth_class_labels[image_key] = groundtruth_class_labels
self.groundtruth_masks[image_key] = groundtruth_masks
if groundtruth_is_difficult_list is None:
num_boxes = groundtruth_boxes.shape[0]
groundtruth_is_difficult_list = np.zeros(num_boxes, dtype=bool)
self.groundtruth_is_difficult_list[
image_key] = groundtruth_is_difficult_list.astype(dtype=bool)
if groundtruth_is_group_of_list is None:
num_boxes = groundtruth_boxes.shape[0]
groundtruth_is_group_of_list = np.zeros(num_boxes, dtype=bool)
if groundtruth_masks is None:
num_boxes = groundtruth_boxes.shape[0]
mask_presence_indicator = np.zeros(num_boxes, dtype=bool)
else:
mask_presence_indicator = (np.sum(groundtruth_masks,
axis=(1, 2)) == 0).astype(dtype=bool)
self.groundtruth_is_group_of_list[
image_key] = groundtruth_is_group_of_list.astype(dtype=bool)
self._update_ground_truth_statistics(
groundtruth_class_labels,
groundtruth_is_difficult_list.astype(dtype=bool)
| mask_presence_indicator, # ignore boxes without masks
groundtruth_is_group_of_list.astype(dtype=bool))
def add_single_detected_image_info(self,
image_key,
detected_boxes,
detected_scores,
detected_class_labels,
detected_masks=None):
"""Adds detections for a single image to be used for evaluation.
Args:
image_key: A unique string/integer identifier for the image.
detected_boxes: float32 numpy array of shape [num_boxes, 4] containing
`num_boxes` detection boxes of the format [ymin, xmin, ymax, xmax] in
absolute image coordinates.
detected_scores: float32 numpy array of shape [num_boxes] containing
detection scores for the boxes.
detected_class_labels: integer numpy array of shape [num_boxes] containing
0-indexed detection classes for the boxes.
detected_masks: np.uint8 numpy array of shape [num_boxes, height, width]
containing `num_boxes` detection masks with values ranging between 0 and
1.
Raises:
ValueError: if the number of boxes, scores and class labels differ in
length.
"""
if (len(detected_boxes) != len(detected_scores) or
len(detected_boxes) != len(detected_class_labels)):
raise ValueError(
'detected_boxes, detected_scores and '
'detected_class_labels should all have same lengths. Got'
'[%d, %d, %d]' % len(detected_boxes), len(detected_scores),
len(detected_class_labels))
if image_key in self.detection_keys:
logging.warning(
'image %s has already been added to the detection result database',
image_key)
return
self.detection_keys.add(image_key)
if image_key in self.groundtruth_boxes:
groundtruth_boxes = self.groundtruth_boxes[image_key]
groundtruth_class_labels = self.groundtruth_class_labels[image_key]
# Masks are popped instead of look up. The reason is that we do not want
# to keep all masks in memory which can cause memory overflow.
groundtruth_masks = self.groundtruth_masks.pop(image_key)
groundtruth_is_difficult_list = self.groundtruth_is_difficult_list[
image_key]
groundtruth_is_group_of_list = self.groundtruth_is_group_of_list[
image_key]
else:
groundtruth_boxes = np.empty(shape=[0, 4], dtype=float)
groundtruth_class_labels = np.array([], dtype=int)
if detected_masks is None:
groundtruth_masks = None
else:
groundtruth_masks = np.empty(shape=[0, 1, 1], dtype=float)
groundtruth_is_difficult_list = np.array([], dtype=bool)
groundtruth_is_group_of_list = np.array([], dtype=bool)
scores, tp_fp_labels, is_class_correctly_detected_in_image = (
self.per_image_eval.compute_object_detection_metrics(
detected_boxes=detected_boxes,
detected_scores=detected_scores,
detected_class_labels=detected_class_labels,
groundtruth_boxes=groundtruth_boxes,
groundtruth_class_labels=groundtruth_class_labels,
groundtruth_is_difficult_list=groundtruth_is_difficult_list,
groundtruth_is_group_of_list=groundtruth_is_group_of_list,
detected_masks=detected_masks,
groundtruth_masks=groundtruth_masks))
for i in range(self.num_class):
if scores[i].shape[0] > 0:
self.scores_per_class[i].append(scores[i])
self.tp_fp_labels_per_class[i].append(tp_fp_labels[i])
(self.num_images_correctly_detected_per_class
) += is_class_correctly_detected_in_image
def _update_ground_truth_statistics(self, groundtruth_class_labels,
groundtruth_is_difficult_list,
groundtruth_is_group_of_list):
"""Update grouth truth statitistics.
1. Difficult boxes are ignored when counting the number of ground truth
instances as done in Pascal VOC devkit.
2. Difficult boxes are treated as normal boxes when computing CorLoc related
statitistics.
Args:
groundtruth_class_labels: An integer numpy array of length M, representing
M class labels of object instances in ground truth
groundtruth_is_difficult_list: A boolean numpy array of length M denoting
whether a ground truth box is a difficult instance or not
groundtruth_is_group_of_list: A boolean numpy array of length M denoting
whether a ground truth box is a group-of box or not
"""
for class_index in range(self.num_class):
num_gt_instances = np.sum(groundtruth_class_labels[
~groundtruth_is_difficult_list
& ~groundtruth_is_group_of_list] == class_index)
num_groupof_gt_instances = self.group_of_weight * np.sum(
groundtruth_class_labels[
groundtruth_is_group_of_list
& ~groundtruth_is_difficult_list] == class_index)
self.num_gt_instances_per_class[
class_index] += num_gt_instances + num_groupof_gt_instances
if np.any(groundtruth_class_labels == class_index):
self.num_gt_imgs_per_class[class_index] += 1
def evaluate(self):
"""Compute evaluation result.
Returns:
A named tuple with the following fields -
average_precision: float numpy array of average precision for
each class.
mean_ap: mean average precision of all classes, float scalar
precisions: List of precisions, each precision is a float numpy
array
recalls: List of recalls, each recall is a float numpy array
corloc: numpy float array
mean_corloc: Mean CorLoc score for each class, float scalar
"""
if (self.num_gt_instances_per_class == 0).any():
logging.warning(
'The following classes have no ground truth examples: %s',
np.squeeze(np.argwhere(self.num_gt_instances_per_class == 0)) +
self.label_id_offset)
if self.use_weighted_mean_ap:
all_scores = np.array([], dtype=float)
all_tp_fp_labels = np.array([], dtype=bool)
for class_index in range(self.num_class):
if self.num_gt_instances_per_class[class_index] == 0:
continue
if not self.scores_per_class[class_index]:
scores = np.array([], dtype=float)
tp_fp_labels = np.array([], dtype=float)
else:
scores = np.concatenate(self.scores_per_class[class_index])
tp_fp_labels = np.concatenate(self.tp_fp_labels_per_class[class_index])
if self.use_weighted_mean_ap:
all_scores = np.append(all_scores, scores)
all_tp_fp_labels = np.append(all_tp_fp_labels, tp_fp_labels)
precision, recall = metrics.compute_precision_recall(
scores, tp_fp_labels, self.num_gt_instances_per_class[class_index])
recall_within_bound_indices = [
index for index, value in enumerate(recall) if
value >= self.recall_lower_bound and value <= self.recall_upper_bound
]
recall_within_bound = recall[recall_within_bound_indices]
precision_within_bound = precision[recall_within_bound_indices]
self.precisions_per_class[class_index] = precision_within_bound
self.recalls_per_class[class_index] = recall_within_bound
self.sum_tp_class[class_index] = tp_fp_labels.sum()
average_precision = metrics.compute_average_precision(
precision_within_bound, recall_within_bound)
self.average_precision_per_class[class_index] = average_precision
logging.info('average_precision: %f', average_precision)
self.corloc_per_class = metrics.compute_cor_loc(
self.num_gt_imgs_per_class,
self.num_images_correctly_detected_per_class)
if self.use_weighted_mean_ap:
num_gt_instances = np.sum(self.num_gt_instances_per_class)
precision, recall = metrics.compute_precision_recall(
all_scores, all_tp_fp_labels, num_gt_instances)
recall_within_bound_indices = [
index for index, value in enumerate(recall) if
value >= self.recall_lower_bound and value <= self.recall_upper_bound
]
recall_within_bound = recall[recall_within_bound_indices]
precision_within_bound = precision[recall_within_bound_indices]
mean_ap = metrics.compute_average_precision(precision_within_bound,
recall_within_bound)
else:
mean_ap = np.nanmean(self.average_precision_per_class)
mean_corloc = np.nanmean(self.corloc_per_class)
return ObjectDetectionEvalMetrics(self.average_precision_per_class, mean_ap,
self.precisions_per_class,
self.recalls_per_class,
self.corloc_per_class, mean_corloc)
| 43.408072 | 80 | 0.714205 |
4740288f2f586e8c1a632338cbd08f2fdd2ed987 | 2,239 | py | Python | recipes/python/flask/{{cookiecutter.app_name}}/{{cookiecutter.app_name}}/app.py | roscopecoltran/sniperkit-cookiecutter | 50b7ecd87d4127875764c2b7d4668ede2ed4b299 | [
"BSD-3-Clause"
]
| null | null | null | recipes/python/flask/{{cookiecutter.app_name}}/{{cookiecutter.app_name}}/app.py | roscopecoltran/sniperkit-cookiecutter | 50b7ecd87d4127875764c2b7d4668ede2ed4b299 | [
"BSD-3-Clause"
]
| null | null | null | recipes/python/flask/{{cookiecutter.app_name}}/{{cookiecutter.app_name}}/app.py | roscopecoltran/sniperkit-cookiecutter | 50b7ecd87d4127875764c2b7d4668ede2ed4b299 | [
"BSD-3-Clause"
]
| null | null | null | # -*- coding: utf-8 -*-
"""The app module, containing the app factory function."""
from flask import Flask, render_template
from {{cookiecutter.app_name}} import commands, public, user
from {{cookiecutter.app_name}}.extensions import bcrypt, cache, csrf_protect, db, debug_toolbar, login_manager, migrate, webpack
from {{cookiecutter.app_name}}.settings import ProdConfig
def create_app(config_object=ProdConfig):
"""An application factory, as explained here: http://flask.pocoo.org/docs/patterns/appfactories/.
:param config_object: The configuration object to use.
"""
app = Flask(__name__.split('.')[0])
app.config.from_object(config_object)
register_extensions(app)
register_blueprints(app)
register_errorhandlers(app)
register_shellcontext(app)
register_commands(app)
return app
def register_extensions(app):
"""Register Flask extensions."""
bcrypt.init_app(app)
cache.init_app(app)
db.init_app(app)
csrf_protect.init_app(app)
login_manager.init_app(app)
debug_toolbar.init_app(app)
migrate.init_app(app, db)
webpack.init_app(app)
return None
def register_blueprints(app):
"""Register Flask blueprints."""
app.register_blueprint(public.views.blueprint)
app.register_blueprint(user.views.blueprint)
return None
def register_errorhandlers(app):
"""Register error handlers."""
def render_error(error):
"""Render error template."""
# If a HTTPException, pull the `code` attribute; default to 500
error_code = getattr(error, 'code', 500)
return render_template('{0}.html'.format(error_code)), error_code
for errcode in [401, 404, 500]:
app.errorhandler(errcode)(render_error)
return None
def register_shellcontext(app):
"""Register shell context objects."""
def shell_context():
"""Shell context objects."""
return {
'db': db,
'User': user.models.User}
app.shell_context_processor(shell_context)
def register_commands(app):
"""Register Click commands."""
app.cli.add_command(commands.test)
app.cli.add_command(commands.lint)
app.cli.add_command(commands.clean)
app.cli.add_command(commands.urls)
| 30.256757 | 128 | 0.702546 |
82e0fd769d2a5d60acda55e2cfb1ddf7edba78f1 | 8,857 | py | Python | lib/epd4in2.py | jsyzthz/rasp-epaper-calendar | caf33e0c8ee650b735e4363546abe61a3e51287d | [
"MIT"
]
| 5 | 2019-08-12T11:27:39.000Z | 2021-05-23T16:30:57.000Z | lib/epd4in2.py | jsyzthz/rasp-epaper-calendar | caf33e0c8ee650b735e4363546abe61a3e51287d | [
"MIT"
]
| null | null | null | lib/epd4in2.py | jsyzthz/rasp-epaper-calendar | caf33e0c8ee650b735e4363546abe61a3e51287d | [
"MIT"
]
| 4 | 2019-09-08T18:51:50.000Z | 2020-07-29T16:40:07.000Z | # *****************************************************************************
# * | File : epd4in2.py
# * | Author : Waveshare team
# * | Function : Electronic paper driver
# * | Info :
# *----------------
# * | This version: V4.0
# * | Date : 2019-06-20
# # | Info : python demo
# -----------------------------------------------------------------------------
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documnetation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS OR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
import logging
import epdconfig
from PIL import Image
import RPi.GPIO as GPIO
# Display resolution
EPD_WIDTH = 400
EPD_HEIGHT = 300
class EPD:
def __init__(self):
self.reset_pin = epdconfig.RST_PIN
self.dc_pin = epdconfig.DC_PIN
self.busy_pin = epdconfig.BUSY_PIN
self.cs_pin = epdconfig.CS_PIN
self.width = EPD_WIDTH
self.height = EPD_HEIGHT
lut_vcom0 = [
0x00, 0x17, 0x00, 0x00, 0x00, 0x02,
0x00, 0x17, 0x17, 0x00, 0x00, 0x02,
0x00, 0x0A, 0x01, 0x00, 0x00, 0x01,
0x00, 0x0E, 0x0E, 0x00, 0x00, 0x02,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
]
lut_ww = [
0x40, 0x17, 0x00, 0x00, 0x00, 0x02,
0x90, 0x17, 0x17, 0x00, 0x00, 0x02,
0x40, 0x0A, 0x01, 0x00, 0x00, 0x01,
0xA0, 0x0E, 0x0E, 0x00, 0x00, 0x02,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
]
lut_bw = [
0x40, 0x17, 0x00, 0x00, 0x00, 0x02,
0x90, 0x17, 0x17, 0x00, 0x00, 0x02,
0x40, 0x0A, 0x01, 0x00, 0x00, 0x01,
0xA0, 0x0E, 0x0E, 0x00, 0x00, 0x02,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
]
lut_wb = [
0x80, 0x17, 0x00, 0x00, 0x00, 0x02,
0x90, 0x17, 0x17, 0x00, 0x00, 0x02,
0x80, 0x0A, 0x01, 0x00, 0x00, 0x01,
0x50, 0x0E, 0x0E, 0x00, 0x00, 0x02,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
]
lut_bb = [
0x80, 0x17, 0x00, 0x00, 0x00, 0x02,
0x90, 0x17, 0x17, 0x00, 0x00, 0x02,
0x80, 0x0A, 0x01, 0x00, 0x00, 0x01,
0x50, 0x0E, 0x0E, 0x00, 0x00, 0x02,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
]
# Hardware reset
def reset(self):
epdconfig.digital_write(self.reset_pin, 1)
epdconfig.delay_ms(200)
epdconfig.digital_write(self.reset_pin, 0)
epdconfig.delay_ms(10)
epdconfig.digital_write(self.reset_pin, 1)
epdconfig.delay_ms(200)
def send_command(self, command):
epdconfig.digital_write(self.dc_pin, 0)
epdconfig.digital_write(self.cs_pin, 0)
epdconfig.spi_writebyte([command])
epdconfig.digital_write(self.cs_pin, 1)
def send_data(self, data):
epdconfig.digital_write(self.dc_pin, 1)
epdconfig.digital_write(self.cs_pin, 0)
epdconfig.spi_writebyte([data])
epdconfig.digital_write(self.cs_pin, 1)
def ReadBusy(self):
while(epdconfig.digital_read(self.busy_pin) == 0): # 0: idle, 1: busy
epdconfig.delay_ms(100)
def set_lut(self):
self.send_command(0x20) # vcom
for count in range(0, 44):
self.send_data(self.lut_vcom0[count])
self.send_command(0x21) # ww --
for count in range(0, 42):
self.send_data(self.lut_ww[count])
self.send_command(0x22) # bw r
for count in range(0, 42):
self.send_data(self.lut_bw[count])
self.send_command(0x23) # wb w
for count in range(0, 42):
self.send_data(self.lut_bb[count])
self.send_command(0x24) # bb b
for count in range(0, 42):
self.send_data(self.lut_wb[count])
def init(self):
if (epdconfig.module_init() != 0):
return -1
# EPD hardware init start
self.reset()
self.send_command(0x01) # POWER SETTING
self.send_data(0x03) # VDS_EN, VDG_EN
self.send_data(0x00) # VCOM_HV, VGHL_LV[1], VGHL_LV[0]
self.send_data(0x2b) # VDH
self.send_data(0x2b) # VDL
self.send_command(0x06) # boost soft start
self.send_data(0x17)
self.send_data(0x17)
self.send_data(0x17)
self.send_command(0x04) # POWER_ON
self.ReadBusy()
self.send_command(0x00) # panel setting
self.send_data(0xbf) # KW-BF KWR-AF BWROTP 0f
self.send_data(0x0d)
self.send_command(0x30) # PLL setting
self.send_data(0x3c) # 3A 100HZ 29 150Hz 39 200HZ 31 171HZ
self.send_command(0x61) # resolution setting
self.send_data(0x01)
self.send_data(0x90) # 128
self.send_data(0x01)
self.send_data(0x2c)
self.send_command(0x82) # vcom_DC setting
self.send_data(0x28)
self.send_command(0X50) # VCOM AND DATA INTERVAL SETTING
self.send_data(0x97) # 97white border 77black border VBDF 17|D7 VBDW 97 VBDB 57 VBDF F7 VBDW 77 VBDB 37 VBDR B7
self.set_lut()
# EPD hardware init end
return 0
def getbuffer(self, image):
# logging.debug("bufsiz = ",int(self.width/8) * self.height)
buf = [0xFF] * (int(self.width/8) * self.height)
image_monocolor = image.convert('1')
imwidth, imheight = image_monocolor.size
pixels = image_monocolor.load()
# logging.debug("imwidth = %d, imheight = %d",imwidth,imheight)
if(imwidth == self.width and imheight == self.height):
logging.debug("Horizontal")
for y in range(imheight):
for x in range(imwidth):
# Set the bits for the column of pixels at the current position.
if pixels[x, y] == 0:
buf[int((x + y * self.width) / 8)] &= ~(0x80 >> (x % 8))
elif(imwidth == self.height and imheight == self.width):
logging.debug("Vertical")
for y in range(imheight):
for x in range(imwidth):
newx = y
newy = self.height - x - 1
if pixels[x, y] == 0:
buf[int((newx + newy*self.width) / 8)] &= ~(0x80 >> (y % 8))
return buf
def display(self, image):
self.send_command(0x10)
for i in range(0, int(self.width * self.height / 8)):
self.send_data(0xFF)
self.send_command(0x13)
for i in range(0, int(self.width * self.height / 8)):
self.send_data(image[i])
self.send_command(0x12)
self.ReadBusy()
def Clear(self):
self.send_command(0x10)
for i in range(0, int(self.width * self.height / 8)):
self.send_data(0xFF)
self.send_command(0x13)
for i in range(0, int(self.width * self.height / 8)):
self.send_data(0xFF)
self.send_command(0x12)
self.ReadBusy()
def sleep(self):
self.send_command(0x02) # POWER_OFF
self.ReadBusy()
self.send_command(0x07) # DEEP_SLEEP
self.send_data(0XA5)
epdconfig.module_exit()
### END OF FILE ###
| 36.599174 | 123 | 0.557074 |
5309214d0163ff4ed4820b501f0b4df63828be46 | 23,128 | py | Python | examples/rdp_check.py | iamjmat/impacket | 61da73fcc793259e6d1487fff9835e638487fe04 | [
"Apache-1.1"
]
| 4 | 2019-08-12T01:48:30.000Z | 2021-12-03T11:30:54.000Z | examples/rdp_check.py | 3ozir/impacket | d1ced941eb2235ed365b13f661b1d5b4bc2683f3 | [
"Apache-1.1"
]
| null | null | null | examples/rdp_check.py | 3ozir/impacket | d1ced941eb2235ed365b13f661b1d5b4bc2683f3 | [
"Apache-1.1"
]
| 1 | 2021-08-03T20:43:02.000Z | 2021-08-03T20:43:02.000Z | #!/usr/bin/env python
# SECUREAUTH LABS. Copyright 2018 SecureAuth Corporation. All rights reserved.
#
# This software is provided under under a slightly modified version
# of the Apache Software License. See the accompanying LICENSE file
# for more information.
#
# Author:
# Alberto Solino (@agsolino)
#
# Description: [MS-RDPBCGR] and [MS-CREDSSP] partial implementation
# just to reach CredSSP auth. This example test whether
# an account is valid on the target host.
#
# ToDo:
# [x] Manage to grab the server's SSL key so we can finalize the whole
# authentication process (check [MS-CSSP] section 3.1.5)
#
from struct import pack, unpack
from impacket.examples import logger
from impacket.structure import Structure
from impacket.spnego import GSSAPI, ASN1_SEQUENCE, ASN1_OCTET_STRING, asn1decode, asn1encode
TDPU_CONNECTION_REQUEST = 0xe0
TPDU_CONNECTION_CONFIRM = 0xd0
TDPU_DATA = 0xf0
TPDU_REJECT = 0x50
TPDU_DATA_ACK = 0x60
# RDP_NEG_REQ constants
TYPE_RDP_NEG_REQ = 1
PROTOCOL_RDP = 0
PROTOCOL_SSL = 1
PROTOCOL_HYBRID = 2
# RDP_NEG_RSP constants
TYPE_RDP_NEG_RSP = 2
EXTENDED_CLIENT_DATA_SUPPORTED = 1
DYNVC_GFX_PROTOCOL_SUPPORTED = 2
# RDP_NEG_FAILURE constants
TYPE_RDP_NEG_FAILURE = 3
SSL_REQUIRED_BY_SERVER = 1
SSL_NOT_ALLOWED_BY_SERVER = 2
SSL_CERT_NOT_ON_SERVER = 3
INCONSISTENT_FLAGS = 4
HYBRID_REQUIRED_BY_SERVER = 5
SSL_WITH_USER_AUTH_REQUIRED_BY_SERVER = 6
class TPKT(Structure):
commonHdr = (
('Version','B=3'),
('Reserved','B=0'),
('Length','>H=len(TPDU)+4'),
('_TPDU','_-TPDU','self["Length"]-4'),
('TPDU',':=""'),
)
class TPDU(Structure):
commonHdr = (
('LengthIndicator','B=len(VariablePart)+1'),
('Code','B=0'),
('VariablePart',':=""'),
)
def __init__(self, data = None):
Structure.__init__(self,data)
self['VariablePart']=''
class CR_TPDU(Structure):
commonHdr = (
('DST-REF','<H=0'),
('SRC-REF','<H=0'),
('CLASS-OPTION','B=0'),
('Type','B=0'),
('Flags','B=0'),
('Length','<H=8'),
)
class DATA_TPDU(Structure):
commonHdr = (
('EOT','B=0x80'),
('UserData',':=""'),
)
def __init__(self, data = None):
Structure.__init__(self,data)
self['UserData'] =''
class RDP_NEG_REQ(CR_TPDU):
structure = (
('requestedProtocols','<L'),
)
def __init__(self,data=None):
CR_TPDU.__init__(self,data)
if data is None:
self['Type'] = TYPE_RDP_NEG_REQ
class RDP_NEG_RSP(CR_TPDU):
structure = (
('selectedProtocols','<L'),
)
class RDP_NEG_FAILURE(CR_TPDU):
structure = (
('failureCode','<L'),
)
class TSPasswordCreds(GSSAPI):
# TSPasswordCreds ::= SEQUENCE {
# domainName [0] OCTET STRING,
# userName [1] OCTET STRING,
# password [2] OCTET STRING
# }
def __init__(self, data=None):
GSSAPI.__init__(self,data)
del self['UUID']
def getData(self):
ans = pack('B', ASN1_SEQUENCE)
ans += asn1encode( pack('B', 0xa0) +
asn1encode( pack('B', ASN1_OCTET_STRING) +
asn1encode( self['domainName'].encode('utf-16le'))) +
pack('B', 0xa1) +
asn1encode( pack('B', ASN1_OCTET_STRING) +
asn1encode( self['userName'].encode('utf-16le'))) +
pack('B', 0xa2) +
asn1encode( pack('B', ASN1_OCTET_STRING) +
asn1encode( self['password'].encode('utf-16le'))) )
return ans
class TSCredentials(GSSAPI):
# TSCredentials ::= SEQUENCE {
# credType [0] INTEGER,
# credentials [1] OCTET STRING
# }
def __init__(self, data=None):
GSSAPI.__init__(self,data)
del self['UUID']
def getData(self):
# Let's pack the credentials field
credentials = pack('B',0xa1)
credentials += asn1encode(pack('B',ASN1_OCTET_STRING) +
asn1encode(self['credentials']))
ans = pack('B',ASN1_SEQUENCE)
ans += asn1encode( pack('B', 0xa0) +
asn1encode( pack('B', 0x02) +
asn1encode( pack('B', self['credType']))) +
credentials)
return ans
class TSRequest(GSSAPI):
# TSRequest ::= SEQUENCE {
# version [0] INTEGER,
# negoTokens [1] NegoData OPTIONAL,
# authInfo [2] OCTET STRING OPTIONAL,
# pubKeyAuth [3] OCTET STRING OPTIONAL,
#}
#
# NegoData ::= SEQUENCE OF SEQUENCE {
# negoToken [0] OCTET STRING
#}
#
def __init__(self, data=None):
GSSAPI.__init__(self,data)
del self['UUID']
def fromString(self, data = None):
next_byte = unpack('B',data[:1])[0]
if next_byte != ASN1_SEQUENCE:
raise Exception('SEQUENCE expected! (%x)' % next_byte)
data = data[1:]
decode_data, total_bytes = asn1decode(data)
next_byte = unpack('B',decode_data[:1])[0]
if next_byte != 0xa0:
raise Exception('0xa0 tag not found %x' % next_byte)
decode_data = decode_data[1:]
next_bytes, total_bytes = asn1decode(decode_data)
# The INTEGER tag must be here
if unpack('B',next_bytes[0:1])[0] != 0x02:
raise Exception('INTEGER tag not found %r' % next_byte)
next_byte, _ = asn1decode(next_bytes[1:])
self['Version'] = unpack('B',next_byte)[0]
decode_data = decode_data[total_bytes:]
next_byte = unpack('B',decode_data[:1])[0]
if next_byte == 0xa1:
# We found the negoData token
decode_data, total_bytes = asn1decode(decode_data[1:])
next_byte = unpack('B',decode_data[:1])[0]
if next_byte != ASN1_SEQUENCE:
raise Exception('ASN1_SEQUENCE tag not found %r' % next_byte)
decode_data, total_bytes = asn1decode(decode_data[1:])
next_byte = unpack('B',decode_data[:1])[0]
if next_byte != ASN1_SEQUENCE:
raise Exception('ASN1_SEQUENCE tag not found %r' % next_byte)
decode_data, total_bytes = asn1decode(decode_data[1:])
next_byte = unpack('B',decode_data[:1])[0]
if next_byte != 0xa0:
raise Exception('0xa0 tag not found %r' % next_byte)
decode_data, total_bytes = asn1decode(decode_data[1:])
next_byte = unpack('B',decode_data[:1])[0]
if next_byte != ASN1_OCTET_STRING:
raise Exception('ASN1_OCTET_STRING tag not found %r' % next_byte)
decode_data2, total_bytes = asn1decode(decode_data[1:])
# the rest should be the data
self['NegoData'] = decode_data2
decode_data = decode_data[total_bytes+1:]
if next_byte == 0xa2:
# ToDo: Check all this
# We found the authInfo token
decode_data, total_bytes = asn1decode(decode_data[1:])
next_byte = unpack('B',decode_data[:1])[0]
if next_byte != ASN1_OCTET_STRING:
raise Exception('ASN1_OCTET_STRING tag not found %r' % next_byte)
decode_data2, total_bytes = asn1decode(decode_data[1:])
self['authInfo'] = decode_data2
decode_data = decode_data[total_bytes+1:]
if next_byte == 0xa3:
# ToDo: Check all this
# We found the pubKeyAuth token
decode_data, total_bytes = asn1decode(decode_data[1:])
next_byte = unpack('B',decode_data[:1])[0]
if next_byte != ASN1_OCTET_STRING:
raise Exception('ASN1_OCTET_STRING tag not found %r' % next_byte)
decode_data2, total_bytes = asn1decode(decode_data[1:])
self['pubKeyAuth'] = decode_data2
def getData(self):
# Do we have pubKeyAuth?
if 'pubKeyAuth' in self.fields:
pubKeyAuth = pack('B',0xa3)
pubKeyAuth += asn1encode(pack('B', ASN1_OCTET_STRING) +
asn1encode(self['pubKeyAuth']))
else:
pubKeyAuth = b''
if 'authInfo' in self.fields:
authInfo = pack('B',0xa2)
authInfo+= asn1encode(pack('B', ASN1_OCTET_STRING) +
asn1encode(self['authInfo']))
else:
authInfo = b''
if 'NegoData' in self.fields:
negoData = pack('B',0xa1)
negoData += asn1encode(pack('B', ASN1_SEQUENCE) +
asn1encode(pack('B', ASN1_SEQUENCE) +
asn1encode(pack('B', 0xa0) +
asn1encode(pack('B', ASN1_OCTET_STRING) +
asn1encode(self['NegoData'])))))
else:
negoData = b''
ans = pack('B', ASN1_SEQUENCE)
ans += asn1encode(pack('B',0xa0) +
asn1encode(pack('B',0x02) + asn1encode(pack('B',0x02))) +
negoData + authInfo + pubKeyAuth)
return ans
if __name__ == '__main__':
import socket
import argparse
import sys
import logging
from binascii import a2b_hex
from Cryptodome.Cipher import ARC4
from impacket import ntlm, version
try:
from OpenSSL import SSL, crypto
except:
logging.critical("pyOpenSSL is not installed, can't continue")
sys.exit(1)
class SPNEGOCipher:
def __init__(self, flags, randomSessionKey):
self.__flags = flags
if self.__flags & ntlm.NTLMSSP_NEGOTIATE_EXTENDED_SESSIONSECURITY:
self.__clientSigningKey = ntlm.SIGNKEY(self.__flags, randomSessionKey)
self.__serverSigningKey = ntlm.SIGNKEY(self.__flags, randomSessionKey,"Server")
self.__clientSealingKey = ntlm.SEALKEY(self.__flags, randomSessionKey)
self.__serverSealingKey = ntlm.SEALKEY(self.__flags, randomSessionKey,"Server")
# Preparing the keys handle states
cipher3 = ARC4.new(self.__clientSealingKey)
self.__clientSealingHandle = cipher3.encrypt
cipher4 = ARC4.new(self.__serverSealingKey)
self.__serverSealingHandle = cipher4.encrypt
else:
# Same key for everything
self.__clientSigningKey = randomSessionKey
self.__serverSigningKey = randomSessionKey
self.__clientSealingKey = randomSessionKey
self.__clientSealingKey = randomSessionKey
cipher = ARC4.new(self.__clientSigningKey)
self.__clientSealingHandle = cipher.encrypt
self.__serverSealingHandle = cipher.encrypt
self.__sequence = 0
def encrypt(self, plain_data):
if self.__flags & ntlm.NTLMSSP_NEGOTIATE_EXTENDED_SESSIONSECURITY:
# When NTLM2 is on, we sign the whole pdu, but encrypt just
# the data, not the dcerpc header. Weird..
sealedMessage, signature = ntlm.SEAL(self.__flags,
self.__clientSigningKey,
self.__clientSealingKey,
plain_data,
plain_data,
self.__sequence,
self.__clientSealingHandle)
else:
sealedMessage, signature = ntlm.SEAL(self.__flags,
self.__clientSigningKey,
self.__clientSealingKey,
plain_data,
plain_data,
self.__sequence,
self.__clientSealingHandle)
self.__sequence += 1
return signature, sealedMessage
def decrypt(self, answer):
if self.__flags & ntlm.NTLMSSP_NEGOTIATE_EXTENDED_SESSIONSECURITY:
# TODO: FIX THIS, it's not calculating the signature well
# Since I'm not testing it we don't care... yet
answer, signature = ntlm.SEAL(self.__flags,
self.__serverSigningKey,
self.__serverSealingKey,
answer,
answer,
self.__sequence,
self.__serverSealingHandle)
else:
answer, signature = ntlm.SEAL(self.__flags,
self.__serverSigningKey,
self.__serverSealingKey,
answer,
answer,
self.__sequence,
self.__serverSealingHandle)
self.__sequence += 1
return signature, answer
def check_rdp(host, username, password, domain, hashes = None):
if hashes is not None:
lmhash, nthash = hashes.split(':')
lmhash = a2b_hex(lmhash)
nthash = a2b_hex(nthash)
else:
lmhash = ''
nthash = ''
tpkt = TPKT()
tpdu = TPDU()
rdp_neg = RDP_NEG_REQ()
rdp_neg['Type'] = TYPE_RDP_NEG_REQ
rdp_neg['requestedProtocols'] = PROTOCOL_HYBRID | PROTOCOL_SSL
tpdu['VariablePart'] = rdp_neg.getData()
tpdu['Code'] = TDPU_CONNECTION_REQUEST
tpkt['TPDU'] = tpdu.getData()
s = socket.socket()
s.connect((host,3389))
s.sendall(tpkt.getData())
pkt = s.recv(8192)
tpkt.fromString(pkt)
tpdu.fromString(tpkt['TPDU'])
cr_tpdu = CR_TPDU(tpdu['VariablePart'])
if cr_tpdu['Type'] == TYPE_RDP_NEG_FAILURE:
rdp_failure = RDP_NEG_FAILURE(tpdu['VariablePart'])
rdp_failure.dump()
logging.error("Server doesn't support PROTOCOL_HYBRID, hence we can't use CredSSP to check credentials")
return
else:
rdp_neg.fromString(tpdu['VariablePart'])
# Since we were accepted to talk PROTOCOL_HYBRID, below is its implementation
# 1. The CredSSP client and CredSSP server first complete the TLS handshake,
# as specified in [RFC2246]. After the handshake is complete, all subsequent
# CredSSP Protocol messages are encrypted by the TLS channel.
# The CredSSP Protocol does not extend the TLS wire protocol. As part of the TLS
# handshake, the CredSSP server does not request the client's X.509 certificate
# (thus far, the client is anonymous). Also, the CredSSP Protocol does not require
# the client to have a commonly trusted certification authority root with the
# CredSSP server. Thus, the CredSSP server MAY use, for example,
# a self-signed X.509 certificate.
# Switching to TLS now
ctx = SSL.Context(SSL.TLSv1_2_METHOD)
ctx.set_cipher_list(b'RC4,AES')
tls = SSL.Connection(ctx,s)
tls.set_connect_state()
tls.do_handshake()
# If you want to use Python internal ssl, uncomment this and comment
# the previous lines
#tls = ssl.wrap_socket(s, ssl_version=ssl.PROTOCOL_TLSv1, ciphers='RC4')
# 2. Over the encrypted TLS channel, the SPNEGO handshake between the client
# and server completes mutual authentication and establishes an encryption key
# that is used by the SPNEGO confidentiality services, as specified in [RFC4178].
# All SPNEGO tokens as well as the underlying encryption algorithms are opaque to
# the calling application (the CredSSP client and CredSSP server).
# The wire protocol for SPNEGO is specified in [MS-SPNG].
# The SPNEGO tokens exchanged between the client and the server are encapsulated
# in the negoTokens field of the TSRequest structure. Both the client and the
# server use this structure as many times as necessary to complete the SPNEGO
# exchange.<9>
#
# Note During this phase of the protocol, the OPTIONAL authInfo field is omitted
# from the TSRequest structure by the client and server; the OPTIONAL pubKeyAuth
# field is omitted by the client unless the client is sending the last SPNEGO token.
# If the client is sending the last SPNEGO token, the TSRequest structure MUST have
# both the negoToken and the pubKeyAuth fields filled in.
# NTLMSSP stuff
auth = ntlm.getNTLMSSPType1('','',True, use_ntlmv2 = True)
ts_request = TSRequest()
ts_request['NegoData'] = auth.getData()
tls.send(ts_request.getData())
buff = tls.recv(4096)
ts_request.fromString(buff)
# 3. The client encrypts the public key it received from the server (contained
# in the X.509 certificate) in the TLS handshake from step 1, by using the
# confidentiality support of SPNEGO. The public key that is encrypted is the
# ASN.1-encoded SubjectPublicKey sub-field of SubjectPublicKeyInfo from the X.509
# certificate, as specified in [RFC3280] section 4.1. The encrypted key is
# encapsulated in the pubKeyAuth field of the TSRequest structure and is sent over
# the TLS channel to the server.
#
# Note During this phase of the protocol, the OPTIONAL authInfo field is omitted
# from the TSRequest structure; the client MUST send its last SPNEGO token to the
# server in the negoTokens field (see step 2) along with the encrypted public key
# in the pubKeyAuth field.
# Last SPNEGO token calculation
#ntlmChallenge = ntlm.NTLMAuthChallenge(ts_request['NegoData'])
type3, exportedSessionKey = ntlm.getNTLMSSPType3(auth, ts_request['NegoData'], username, password, domain, lmhash, nthash, use_ntlmv2 = True)
# Get server public key
server_cert = tls.get_peer_certificate()
pkey = server_cert.get_pubkey()
dump = crypto.dump_privatekey(crypto.FILETYPE_ASN1, pkey)
# Fix up due to PyOpenSSL lack for exporting public keys
dump = dump[7:]
dump = b'\x30'+ asn1encode(dump)
cipher = SPNEGOCipher(type3['flags'], exportedSessionKey)
signature, cripted_key = cipher.encrypt(dump)
ts_request['NegoData'] = type3.getData()
ts_request['pubKeyAuth'] = signature.getData() + cripted_key
try:
# Sending the Type 3 NTLM blob
tls.send(ts_request.getData())
# The other end is waiting for the pubKeyAuth field, but looks like it's
# not needed to check whether authentication worked.
# If auth is unsuccessful, it throws an exception with the previous send().
# If auth is successful, the server waits for the pubKeyAuth and doesn't answer
# anything. So, I'm sending garbage so the server returns an error.
# Luckily, it's a different error so we can determine whether or not auth worked ;)
buff = tls.recv(1024)
except Exception as err:
if str(err).find("denied") > 0:
logging.error("Access Denied")
else:
logging.error(err)
return
# 4. After the server receives the public key in step 3, it first verifies that
# it has the same public key that it used as part of the TLS handshake in step 1.
# The server then adds 1 to the first byte representing the public key (the ASN.1
# structure corresponding to the SubjectPublicKey field, as described in step 3)
# and encrypts the binary result by using the SPNEGO encryption services.
# Due to the addition of 1 to the binary data, and encryption of the data as a binary
# structure, the resulting value may not be valid ASN.1-encoded values.
# The encrypted binary data is encapsulated in the pubKeyAuth field of the TSRequest
# structure and is sent over the encrypted TLS channel to the client.
# The addition of 1 to the first byte of the public key is performed so that the
# client-generated pubKeyAuth message cannot be replayed back to the client by an
# attacker.
#
# Note During this phase of the protocol, the OPTIONAL authInfo and negoTokens
# fields are omitted from the TSRequest structure.
ts_request = TSRequest(buff)
# Now we're decrypting the certificate + 1 sent by the server. Not worth checking ;)
signature, plain_text = cipher.decrypt(ts_request['pubKeyAuth'][16:])
# 5. After the client successfully verifies server authenticity by performing a
# binary comparison of the data from step 4 to that of the data representing
# the public key from the server's X.509 certificate (as specified in [RFC3280],
# section 4.1), it encrypts the user's credentials (either password or smart card
# PIN) by using the SPNEGO encryption services. The resulting value is
# encapsulated in the authInfo field of the TSRequest structure and sent over
# the encrypted TLS channel to the server.
# The TSCredentials structure within the authInfo field of the TSRequest
# structure MAY contain either a TSPasswordCreds or a TSSmartCardCreds structure,
# but MUST NOT contain both.
#
# Note During this phase of the protocol, the OPTIONAL pubKeyAuth and negoTokens
# fields are omitted from the TSRequest structure.
tsp = TSPasswordCreds()
tsp['domainName'] = domain
tsp['userName'] = username
tsp['password'] = password
tsc = TSCredentials()
tsc['credType'] = 1 # TSPasswordCreds
tsc['credentials'] = tsp.getData()
signature, cripted_creds = cipher.encrypt(tsc.getData())
ts_request = TSRequest()
ts_request['authInfo'] = signature.getData() + cripted_creds
tls.send(ts_request.getData())
tls.close()
logging.info("Access Granted")
# Init the example's logger theme
logger.init()
print(version.BANNER)
parser = argparse.ArgumentParser(add_help = True, description = "Test whether an account is valid on the target "
"host using the RDP protocol.")
parser.add_argument('target', action='store', help='[[domain/]username[:password]@]<targetName or address>')
group = parser.add_argument_group('authentication')
group.add_argument('-hashes', action="store", metavar = "LMHASH:NTHASH", help='NTLM hashes, format is LMHASH:NTHASH')
if len(sys.argv)==1:
parser.print_help()
sys.exit(1)
options = parser.parse_args()
import re
domain, username, password, address = re.compile('(?:(?:([^/@:]*)/)?([^@:]*)(?::([^@]*))?@)?(.*)').match(options.target).groups('')
#In case the password contains '@'
if '@' in address:
password = password + '@' + address.rpartition('@')[0]
address = address.rpartition('@')[2]
if domain is None:
domain = ''
if password == '' and username != '' and options.hashes is None:
from getpass import getpass
password = getpass("Password:")
check_rdp(address, username, password, domain, options.hashes)
| 40.083189 | 148 | 0.611467 |
b7dbe27f079e6a1edec1fd3fd179efd5b734786d | 5,485 | py | Python | eval_c3d.py | crazylazylife/Crowd-Behaviour-Analysis | 3422084e1adc16d801b5643c4758a80da12f4bf8 | [
"MIT"
]
| 4 | 2019-12-23T02:43:26.000Z | 2022-02-28T22:41:32.000Z | eval_c3d.py | crazylazylife/Crowd-Behaviour-Analysis | 3422084e1adc16d801b5643c4758a80da12f4bf8 | [
"MIT"
]
| null | null | null | eval_c3d.py | crazylazylife/Crowd-Behaviour-Analysis | 3422084e1adc16d801b5643c4758a80da12f4bf8 | [
"MIT"
]
| 2 | 2019-06-27T15:44:40.000Z | 2020-08-19T15:54:29.000Z | #!/usr/bin/env python
"""Evaluates the C3D network"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from datetime import datetime
import math
import time
import tensorflow as tf
import numpy as np
import c3d_model
import input_data
# Basic model parameters as external flags.
FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_integer('gpu_num', 1,
"""How many GPUs to use""")
tf.app.flags.DEFINE_integer('batch_size', 48,
"""Batch size.""")
tf.app.flags.DEFINE_integer('eval_interval_secs', 60 * 5,
"""How often to run the eval.""")
tf.app.flags.DEFINE_string('checkpoint_dir', 'result',
"""Check point directory.""")
tf.app.flags.DEFINE_boolean('run_once', True,
"""Whether to run eval only once.""")
tf.app.flags.DEFINE_integer('num_examples', 50,
"""Number of examples to run.""")
def placeholder_inputs(batch_size):
"""Generate placeholder variables to represent the input tensors.
These placeholders are used as inputs by the rest of the model building
code and will be fed from the downloaded data in the .run() loop, below.
Args:
batch_size: The batch size will be baked into both placeholders.
Returns:
images_placeholder: Images placeholder.
labels_placeholder: Labels placeholder.
"""
# Note that the shapes of the placeholders match the shapes of the full
# image and label tensors, except the first dimension is now batch_size
# rather than the full size of the train or test data sets.
images_placeholder = tf.placeholder(tf.float32, shape=(batch_size,
c3d_model.NUM_FRAMES_PER_CLIP,
c3d_model.CROP_SIZE,
c3d_model.CROP_SIZE,
c3d_model.CHANNELS))
labels_placeholder = tf.placeholder(tf.int64, shape=(batch_size))
return images_placeholder, labels_placeholder
def eval_once(saver, top_k_op, images_placeholder,
labels_placeholder):
"""Run Eval once.
Args:
saver: Saver.
top_k_op: Top K op.
"""
with tf.Session() as sess:
ckpt = tf.train.get_checkpoint_state(FLAGS.checkpoint_dir)
if ckpt and ckpt.model_checkpoint_path:
# Restores from checkpoint
saver.restore(sess, ckpt.model_checkpoint_path)
# Assuming model_checkpoint_path looks something like:
# /my-favorite-path/cifar10_train/model.ckpt-0,
# extract global_step from it.
global_step = ckpt.model_checkpoint_path.split('/')[-1].split('-')[-1]
else:
print('No checkpoint file found')
return
# Start the queue runners.
coord = tf.train.Coordinator()
try:
threads = []
for qr in tf.get_collection(tf.GraphKeys.QUEUE_RUNNERS):
threads.extend(qr.create_threads(sess, coord=coord, daemon=True,
start=True))
num_iter = int(math.ceil(FLAGS.num_examples / FLAGS.batch_size))
true_count = 0 # Counts the number of correct predictions.
total_sample_count = num_iter * FLAGS.batch_size
step = 0
while step < num_iter and not coord.should_stop():
eval_images, eval_labels, _, _, _ = input_data.read_clip_and_label(
filename='./test.list',
batch_size=FLAGS.batch_size,
num_frames_per_clip=c3d_model.NUM_FRAMES_PER_CLIP,
crop_size=c3d_model.CROP_SIZE,
shuffle=True)
predictions = sess.run([top_k_op],
feed_dict={
images_placeholder: eval_images,
labels_placeholder: eval_labels})
true_count += np.sum(predictions)
step += 1
if step % 10 == 0:
print("%i/100" % int(step/num_iter))
# Compute precision @ 1.
precision = true_count / total_sample_count
print('%s: precision @ 1 = %.3f' % (datetime.now(), precision))
except Exception as e: # pylint: disable=broad-except
coord.request_stop(e)
coord.request_stop()
coord.join(threads, stop_grace_period_secs=10)
def evaluate():
with tf.Graph().as_default() as g:
# Get the image and the labels placeholder
images_placeholder, labels_placeholder = placeholder_inputs(FLAGS.batch_size)
# Build the Graph that computes the logits predictions from the inference
# model.
with tf.variable_scope('c3d_var'):
logits = c3d_model.inference_c3d(images_placeholder)
top_k_op = tf.nn.in_top_k(logits, labels_placeholder, 1)
# Restore the moving average version of the learned variables for eval.
variable_averages = tf.train.ExponentialMovingAverage(
c3d_model.MOVING_AVERAGE_DECAY)
variables_to_restore = variable_averages.variables_to_restore()
saver = tf.train.Saver(variables_to_restore)
while True:
eval_once(saver, top_k_op, images_placeholder, labels_placeholder)
if FLAGS.run_once:
break
time.sleep(FLAGS.eval_interval_secs)
def main(_):
evaluate()
if __name__ == '__main__':
tf.app.run()
| 36.085526 | 88 | 0.624066 |
2fc7ec891b849ce28d3f628a89349d65759e0461 | 3,585 | py | Python | camera.py | Georg-Auer/hetcam-webcam | 03ec1d76c889af17656b299dce374f50294aebc4 | [
"MIT"
]
| null | null | null | camera.py | Georg-Auer/hetcam-webcam | 03ec1d76c889af17656b299dce374f50294aebc4 | [
"MIT"
]
| null | null | null | camera.py | Georg-Auer/hetcam-webcam | 03ec1d76c889af17656b299dce374f50294aebc4 | [
"MIT"
]
| null | null | null | #Modified by Georg Auer
#Desc: Camera import for Flask
import cv2
try:
from imutils.video.pivideostream import PiVideoStream
except:
print("No Raspberry/No Raspberry Cam found")
import imutils
import time
import numpy as np
class VideoCamera(object):
def __init__(self, flip = False):
try:
self.vs = PiVideoStream().start()
print(f"using raspberry camera with standard resolution, not changed")
# {self.vs.resolution}
# try raspberry camera first
# try:
# self.vs = PiVideoStream(resolution=(320, 240)).start()
# print("started with custom resolution")
# except:
# self.vs = PiVideoStream().start()
# print("started with standard resolution")
#resolution=(640, 480)
#resolution=(320, 240)
except:
# start webcam for testing instead
self.vs = cv2.VideoCapture(0, cv2.CAP_DSHOW)
print(f"using standard resolution with webcam")
#0 is the standard number of the connected camera in windows
self.flip = flip
time.sleep(2.0)
def __del__(self):
try:
self.vs.stop()
except:
self.vs.release()
def flip_if_needed(self, frame):
if self.flip:
return np.flip(frame, 0)
return frame
def get_frame(self):
try:
frame = self.flip_if_needed(self.vs.read())
# print("flipped?")
ret, jpeg = cv2.imencode('.jpg', frame)
except:
ret, frame = self.vs.read()
# print("no flip")
ret, jpeg = cv2.imencode('.jpg', frame)
# now returns a simple frame additionally
return jpeg.tobytes(), frame
# def get_frame(self):
# frame = self.flip_if_needed(self.vs.read())
# ret, jpeg = cv2.imencode('.jpg', frame)
# return jpeg.tobytes()
def get_frame_resolution(self):
object_methods = [method_name for method_name in dir(self.resolution)
if callable(getattr(self.resolution, method_name))]
print(object_methods)
print(f"previously set resolution: {self.vs.resolution}")
try:
self.vs.VideoCapture().resolution = (320, 240)
except:
print(f"using standard resolution: {self.vs.resolution}")
print(f"previously set resolution: {self.vs.resolution}")
try:
frame = self.flip_if_needed(self.vs.read())
ret, jpeg = cv2.imencode('.jpg', frame)
except:
ret, frame = self.vs.read()
ret, jpeg = cv2.imencode('.jpg', frame)
# set resolution back to last value
try:
self.vs.resolution = (320, 240)
except:
print(f"using standard resolution, not changed back: {self.vs.resolution}")
# 640, 480
# 1280, 720
#resolution=(320, 240)
# now returns a simple frame additionally
return jpeg.tobytes(), frame
# def take_image(self):
# # stop cam before taking images
# # try:
# # self.vs.stop()
# # except:
# # self.vs.release()
# try:
# ret, frame = self.vs.read()
# # for printing path where image was saved
# # self.vs.stop()
# # self.vs = PiVideoStream().start()
# except:
# print("take_image did not work")
# ret, jpeg = cv2.imencode('.jpg', frame)
# return frame, jpeg
| 31.447368 | 87 | 0.553696 |
399578101cd173b7ab82b7e81a3da232708ab81b | 2,160 | py | Python | stem_cell_hypothesis/en_bert_base/head/con_dot.py | emorynlp/stem-cell-hypothesis | 48a628093d93d653865fbac6409d179cddd99293 | [
"Apache-2.0"
]
| 4 | 2021-09-17T15:23:31.000Z | 2022-02-28T10:18:04.000Z | stem_cell_hypothesis/en_bert_base/head/con_dot.py | emorynlp/stem-cell-hypothesis | 48a628093d93d653865fbac6409d179cddd99293 | [
"Apache-2.0"
]
| null | null | null | stem_cell_hypothesis/en_bert_base/head/con_dot.py | emorynlp/stem-cell-hypothesis | 48a628093d93d653865fbac6409d179cddd99293 | [
"Apache-2.0"
]
| null | null | null | # -*- coding:utf-8 -*-
# Author: hankcs
# Date: 2021-04-08 22:49
import matplotlib.pyplot as plt
import torch
from stem_cell_hypothesis import cdroot
from stem_cell_hypothesis.en_bert_base.head.con import ConAcc
import numpy as np
from tests import cdroot
from adjustText import adjust_text
cdroot()
rs = []
static = True
if not static:
for i in range(3):
save_dir = f'data/model/mtl/ontonotes_bert_base_en/pos/basic/{i}/single/records.pt'
records = torch.load(save_dir)
for label, count in records.label_count.items():
records.label_correct[label] /= count
rs.append(records)
else:
save_dir = f'data/model/mtl/ontonotes_bert_base_en/con/1/static/records.pt'
records: ConAcc = torch.load(save_dir)
records.finalize()
rs.append(records)
records = ConAcc()
records.label_count = rs[0].label_count
ratios = dict()
total = 0
for tag, freq in rs[0].label_count.most_common():
tag: str = tag
# if tag in ('punct', 'num'):
# continue
# if records.label_count[tag] < 1000:
# continue
# ratios[tag] = records.label_count[tag] / sum(records.label_count.values())
# if ratios[tag] < 0.001:
# continue
records.label_correct[tag] = torch.mean(torch.stack([x.label_correct[tag] for x in rs]), dim=0)
total += 1
# if total == 30:
# break
for label, count in records.label_count.items():
if label in records.label_correct:
records.label_correct[label] /= count
texts = []
for tag, head in records.label_correct.items():
acc, offset = head.max(1)
acc, layer = acc.max(0)
acc = acc.item()
layer = layer.item() + 1
plt.scatter(layer, acc)
# plt.annotate(tag, (layer, acc))
texts.append(plt.text(layer, acc, tag))
adjust_text(texts)
plt.xticks(np.arange(1, 13))
plt.xlabel('layer')
plt.ylabel('accuracy')
# plt.title('Speciality of each head' + (' [static]' if static else ' [finetune]'))
if static:
plt.savefig('data/model/mtl/ontonotes_bert_base_en/con/1/static/con-acc-per-layer.pdf')
else:
plt.savefig('data/model/mtl/ontonotes_bert_base_en_/con/0/single/con-acc-per-layer.pdf')
plt.show()
| 28.8 | 99 | 0.675463 |
99d278bb60a306a0d2cf39fa8a93f8adedc1f506 | 922 | py | Python | src/python_wheel_boilerplate/framework/messages.py | NexSabre/python-wheel-boilerplate | f9fa23671b8dc72b91ea5701e3c4dc0a048af6dd | [
"MIT"
]
| null | null | null | src/python_wheel_boilerplate/framework/messages.py | NexSabre/python-wheel-boilerplate | f9fa23671b8dc72b91ea5701e3c4dc0a048af6dd | [
"MIT"
]
| null | null | null | src/python_wheel_boilerplate/framework/messages.py | NexSabre/python-wheel-boilerplate | f9fa23671b8dc72b91ea5701e3c4dc0a048af6dd | [
"MIT"
]
| null | null | null | class Messages:
@staticmethod
def package_availability(available: list, unavailable: list):
Messages.info("Package status")
if available:
print("Available to convert:")
for key in available:
print(f"\t{key}")
packages_not_found = unavailable
if not packages_not_found:
print()
return
print("\nUnavailable to convert:")
for p in packages_not_found:
print(f'\t{p}')
else:
print()
@staticmethod
def error(message):
print(f"ERRO :: {message}")
@staticmethod
def warn(message):
print(f"WARN :: {message}")
@staticmethod
def info(message):
print(f"INFO :: {message}")
@staticmethod
def ok(message):
print(f" OK :: {message}")
@staticmethod
def clean(message):
print(f" :: {message}")
| 23.05 | 65 | 0.543384 |
4cb08e6fe5eadc3fa231d183f23c09eaef57149c | 11,672 | py | Python | nenupy/astro2/sky.py | AlanLoh/nenupy | 5b9e6ae7cc28d0dc4ed450a408b124f71a1c9cc8 | [
"MIT"
]
| 4 | 2018-10-02T16:32:32.000Z | 2022-03-08T10:55:36.000Z | nenupy/astro2/sky.py | AlanLoh/nenupy | 5b9e6ae7cc28d0dc4ed450a408b124f71a1c9cc8 | [
"MIT"
]
| 54 | 2018-10-16T14:48:29.000Z | 2022-02-24T11:02:59.000Z | nenupy/astro2/sky.py | AlanLoh/nenupy | 5b9e6ae7cc28d0dc4ed450a408b124f71a1c9cc8 | [
"MIT"
]
| 2 | 2020-10-12T14:29:17.000Z | 2021-11-24T13:11:16.000Z | #! /usr/bin/python3
# -*- coding: utf-8 -*-
"""
"""
__author__ = "Alan Loh"
__copyright__ = "Copyright 2021, nenupy"
__credits__ = ["Alan Loh"]
__maintainer__ = "Alan"
__email__ = "[email protected]"
__status__ = "Production"
__all__ = [
"Sky",
"HpxSky"
]
import numpy as np
import copy
import logging
log = logging.getLogger(__name__)
from astropy.coordinates import SkyCoord, EarthLocation, ICRS
from astropy.time import Time
import astropy.units as u
from astropy.visualization.wcsaxes.frame import EllipticalFrame
from astropy.wcs import WCS
from reproject import reproject_from_healpix
import matplotlib.pyplot as plt
from matplotlib import patheffects
from matplotlib.colorbar import ColorbarBase
from mpl_toolkits.axes_grid1.inset_locator import inset_axes
from matplotlib.cm import get_cmap
from matplotlib.ticker import LinearLocator
from matplotlib.colors import Normalize
try:
import healpy.pixelfunc as hpx
except ImportError:
log.warning("Unable to load 'healpy', some functionalities may not be working.")
hpx = None
from nenupy import nenufar_position
from nenupy.astro2.astro_tools import AstroObject, radec_to_altaz
# ============================================================= #
# ---------------------------- Sky ---------------------------- #
# ============================================================= #
class Sky(AstroObject):
""" """
def __init__(self,
coordinates: SkyCoord,
time: Time = Time.now(),
frequency: u.Quantity = 50*u.MHz,
value: np.ndarray = np.array([0]),
observer: EarthLocation = nenufar_position
):
self.coordinates = coordinates
self.time = time
self.frequency = frequency
self.value = value
self.observer = observer
# --------------------------------------------------------- #
# --------------------- Getter/Setter --------------------- #
@property
def value(self):
""" """
return self._value
@value.setter
def value(self, v):
if v.dtype < np.float64:
v = v.astype(np.float64)
self._value = v
@property
def visible_sky(self):
""" """
altaz = radec_to_altaz(
radec=self.coordinates,
time=self.time,
observer=self.observer,
fast_compute=True
)
return altaz.alt.deg > 0
# --------------------------------------------------------- #
# ------------------------ Methods ------------------------ #
def plot(self, **kwargs):
"""
"""
# Parsing the keyword arguments
resolution = kwargs.get("resolution", 1*u.deg)
figname = kwargs.get("figname", None)
cmap = kwargs.get("cmap", "YlGnBu_r")
figsize = kwargs.get("figsize", (15, 10))
center = kwargs.get("center", SkyCoord(0*u.deg, 0*u.deg))
radius = kwargs.get("radius", None)
ticks_color = kwargs.get("ticks_color", "0.9")
colorbar_label = kwargs.get("colorbar_label", "Colorbar")
title = kwargs.get("title", "")
visible_sky = kwargs.get("only_visible", True)
decibel = kwargs.get("decibel", False)
# Initialize figure
wcs, shape = self._compute_wcs(
center=center,
resolution=getattr(self, "resolution", resolution),
radius=radius
)
fig = plt.figure(figsize=figsize)
ax = plt.subplot(
projection=wcs,
frame_class=EllipticalFrame
)
# Get the data projected on fullsky
data = self._fullsky_projection(
wcs=wcs,
shape=shape,
display_visible_sky=visible_sky
)
# Scale the data in decibel
if decibel:
data = 10 * np.log10(data)
vmin = kwargs.get("vmin", np.nanmin(data))
vmax = kwargs.get("vmax", np.nanmax(data))
# Plot the data
im = ax.imshow(
data,
origin="lower",
interpolation="none",
cmap=cmap,
vmin=vmin,
vmax=vmax
)
# Define ax ticks
ax.coords.grid(color=ticks_color, alpha=0.5)
path_effects=[patheffects.withStroke(linewidth=3, foreground='black')]
ra_axis = ax.coords[0]
dec_axis = ax.coords[1]
ra_axis.set_ticks_visible(False)
ra_axis.set_ticklabel(color=ticks_color, path_effects=path_effects)
ra_axis.set_axislabel("RA", color=ticks_color, path_effects=path_effects)
ra_axis.set_major_formatter("d")
ra_axis.set_ticks(number=12)
dec_axis.set_ticks_visible(False)
dec_axis.set_axislabel("Dec", minpad=2)
dec_axis.set_major_formatter("d")
dec_axis.set_ticks(number=10)
# Colorbar
cax = inset_axes(
ax,
width='3%',
height='100%',
loc='lower left',
bbox_to_anchor=(1.05, 0., 1, 1),
bbox_transform=ax.transAxes,
borderpad=0,
)
cb = ColorbarBase(
cax,
cmap=get_cmap(name=cmap),
orientation='vertical',
norm=Normalize(
vmin=vmin,
vmax=vmax
),
ticks=LinearLocator()
)
cb.solids.set_edgecolor("face")
cb.set_label(colorbar_label)
cb.formatter.set_powerlimits((0, 0))
# Other
im.set_clip_path(ax.coords.frame.patch)
ax.set_title(title, pad=20)
# Save or show
if figname is None:
plt.show()
elif figname.lower() == 'return':
return fig, ax
else:
fig.savefig(
figname,
dpi=300,
transparent=True,
bbox_inches='tight'
)
plt.close('all')
# --------------------------------------------------------- #
# ----------------------- Internal ------------------------ #
@staticmethod
def _compute_wcs(center: SkyCoord, resolution: u.Quantity, radius: u.Quantity):
""" """
dangle = 0.675
scale = int(dangle/resolution.to(u.deg).value)
#scale = int(resolution.to(u.deg).value/dangle)
scale = 1 if scale <= 1 else scale
ra_dim = 480*scale
dec_dim = 240*scale
if radius is not None:
resol = dangle/scale
ra_dim = int(2 * radius.to(u.deg).value / resol)
dec_dim = ra_dim
#raauto = False
wcs = WCS(naxis=2)
wcs.wcs.crpix = [ra_dim/2 + 0.5, dec_dim/2 + 0.5]
wcs.wcs.cdelt = np.array([-dangle/scale, dangle/scale])
wcs.wcs.crval = [center.ra.deg, center.dec.deg]
wcs.wcs.ctype = ['RA---AIT', 'DEC--AIT']
return wcs, (ra_dim, dec_dim)
def _fullsky_projection(self, wcs: WCS, shape: tuple, display_visible_sky: bool):
""" """
x, y = wcs.world_to_pixel(self.coordinates)
data = np.zeros(shape, dtype=np.float64)
data[:, :] = np.nan
weights = np.zeros(shape, dtype=int)
x_int = np.floor(x).astype(int)
x_in_image = (x_int >= 0) & (x_int < shape[0])
y_int = np.floor(y).astype(int)
y_in_image = (y_int >= 0) & (y_int < shape[1])
in_image_mask = x_in_image & y_in_image
x_int = x_int[in_image_mask]
y_int = y_int[in_image_mask]
values = copy.deepcopy(self.value)
if display_visible_sky:
values[~self.visible_sky] = np.nan
values = values[in_image_mask]
#data.mask[(x_int, y_int)] = False
data[(x_int, y_int)] = 0.
np.add.at(weights, (x_int, y_int), 1)
weights[weights<0.5] = 1.
np.add.at(data, (x_int, y_int), values)
data[(x_int, y_int)] /= weights[(x_int, y_int)]
#data = np.ma.masked_array(
# data,
# mask=np.ones(shape, dtype=bool),
# fill_value=np.nan
#)
return data.T
# ============================================================= #
# ============================================================= #
# ============================================================= #
# -------------------------- HpxSky --------------------------- #
# ============================================================= #
class HpxSky(Sky):
""" """
def __init__(self,
resolution: u.Quantity = 1*u.deg,
time: Time = Time.now(),
frequency: u.Quantity = 50*u.MHz,
value: np.ndarray = np.array([0]),
observer: EarthLocation = nenufar_position
):
if hpx is None:
log.error(
f"Unable to create an instance of {self.__qualname__} since 'healpy' does not work."
)
self.nside, self.resolution = self._resol2nside(resolution=resolution)
# Construct the Healpix coordinates map
ra, dec = hpx.pix2ang(
nside=self.nside,
ipix=np.arange(
hpx.nside2npix(self.nside),
dtype=np.int64
),
lonlat=True,
nest=False
)
super().__init__(
coordinates=SkyCoord(ra, dec, unit="deg"),
time=time,
frequency=frequency,
value=value,
observer=observer
)
# --------------------------------------------------------- #
# ----------------------- Internal ------------------------ #
@staticmethod
def _resol2nside(resolution: u.Quantity):
""" Returns the HEALPix nside and effective resolution. """
# Get all nsides for all HEALPix oders
healpix_nsides = hpx.order2nside(np.arange(30))
# Convert them into angular resolutions
available_resolutions = hpx.nside2resol(
healpix_nsides,
arcmin=True
)*u.arcmin
# Find the index of the closest matching HEALPix resolution
order_index = np.argmin(
np.abs(available_resolutions - resolution)
)
# Retrieve the corresponding nside and reoslution
nside = healpix_nsides[order_index]
effective_resolution = available_resolutions[order_index]
return nside, effective_resolution
@staticmethod
def _compute_wcs(center: SkyCoord, resolution: u.Quantity, radius: u.Quantity = None):
""" """
dangle = 0.675
scale = int(dangle/resolution.to(u.deg).value)
scale = 1 if scale <= 1 else scale
ra_dim = 480*scale
dec_dim = 240*scale
if radius is not None:
resol = dangle/scale
ra_dim = int(2 * radius.to(u.deg).value / resol)
dec_dim = ra_dim
#raauto = False
wcs = WCS(naxis=2)
wcs.wcs.crpix = [ra_dim/2 + 0.5, dec_dim/2 + 0.5]
wcs.wcs.cdelt = np.array([-dangle/scale, dangle/scale])
wcs.wcs.crval = [center.ra.deg, center.dec.deg]
wcs.wcs.ctype = ['RA---AIT', 'DEC--AIT']
return wcs, (dec_dim, ra_dim)
def _fullsky_projection(self, wcs: WCS, shape: tuple, display_visible_sky: bool):
""" """
values = copy.deepcopy(self.value)
if display_visible_sky:
values[~self.visible_sky] = np.nan
array, _ = reproject_from_healpix(
(values, ICRS()),
wcs,
nested=False,
shape_out=shape
)
return array
# ============================================================= #
# ============================================================= #
| 30.960212 | 100 | 0.514222 |
5763a7a93d9bee2f1c0e0ad4c3caee9b4724d1ab | 6,152 | py | Python | project-code/AllTests/lib/googlemock/test/gmock_output_test.py | Bho007/MPEG-2-TS-Decoder | 6021af80f9a79b241da1db158b87ed3fe53f8e61 | [
"MIT"
]
| null | null | null | project-code/AllTests/lib/googlemock/test/gmock_output_test.py | Bho007/MPEG-2-TS-Decoder | 6021af80f9a79b241da1db158b87ed3fe53f8e61 | [
"MIT"
]
| null | null | null | project-code/AllTests/lib/googlemock/test/gmock_output_test.py | Bho007/MPEG-2-TS-Decoder | 6021af80f9a79b241da1db158b87ed3fe53f8e61 | [
"MIT"
]
| null | null | null | #!/usr/bin/env python
#
# Copyright 2008, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
r"""Tests the text output of Google C++ Mocking Framework.
To update the golden file:
gmock_output_test.py --build_dir=BUILD/DIR --gengolden
where BUILD/DIR contains the built gmock_output_test_ file.
gmock_output_test.py --gengolden
gmock_output_test.py
"""
import os
import re
import sys
from io import open # pylint: disable=redefined-builtin, g-importing-member
import gmock_test_utils
# The flag for generating the golden file
GENGOLDEN_FLAG = '--gengolden'
PROGRAM_PATH = gmock_test_utils.GetTestExecutablePath('gmock_output_test_')
COMMAND = [PROGRAM_PATH, '--gtest_stack_trace_depth=0', '--gtest_print_time=0']
GOLDEN_NAME = 'gmock_output_test_golden.txt'
GOLDEN_PATH = os.path.join(gmock_test_utils.GetSourceDir(), GOLDEN_NAME)
def ToUnixLineEnding(s):
"""Changes all Windows/Mac line endings in s to UNIX line endings."""
return s.replace('\r\n', '\n').replace('\r', '\n')
def RemoveReportHeaderAndFooter(output):
"""Removes Google Test result report's header and footer from the output."""
output = re.sub(r'.*gtest_main.*\n', '', output)
output = re.sub(r'\[.*\d+ tests.*\n', '', output)
output = re.sub(r'\[.* test environment .*\n', '', output)
output = re.sub(r'\[=+\] \d+ tests .* ran.*', '', output)
output = re.sub(r'.* FAILED TESTS\n', '', output)
return output
def RemoveLocations(output):
"""Removes all file location info from a Google Test program's output.
Args:
output: the output of a Google Test program.
Returns:
output with all file location info (in the form of
'DIRECTORY/FILE_NAME:LINE_NUMBER: 'or
'DIRECTORY\\FILE_NAME(LINE_NUMBER): ') replaced by
'FILE:#: '.
"""
return re.sub(r'.*[/\\](.+)(\:\d+|\(\d+\))\:', 'FILE:#:', output)
def NormalizeErrorMarker(output):
"""Normalizes the error marker, which is different on Windows vs on Linux."""
return re.sub(r' error: ', ' Failure\n', output)
def RemoveMemoryAddresses(output):
"""Removes memory addresses from the test output."""
return re.sub(r'@\w+', '@0x#', output)
def RemoveTestNamesOfLeakedMocks(output):
"""Removes the test names of leaked mock objects from the test output."""
return re.sub(r'\(used in test .+\) ', '', output)
def GetLeakyTests(output):
"""Returns a list of test names that leak mock objects."""
# findall() returns a list of all matches of the regex in output.
# For example, if '(used in test FooTest.Bar)' is in output, the
# list will contain 'FooTest.Bar'.
return re.findall(r'\(used in test (.+)\)', output)
def GetNormalizedOutputAndLeakyTests(output):
"""Normalizes the output of gmock_output_test_.
Args:
output: The test output.
Returns:
A tuple (the normalized test output, the list of test names that have
leaked mocks).
"""
output = ToUnixLineEnding(output)
output = RemoveReportHeaderAndFooter(output)
output = NormalizeErrorMarker(output)
output = RemoveLocations(output)
output = RemoveMemoryAddresses(output)
return (RemoveTestNamesOfLeakedMocks(output), GetLeakyTests(output))
def GetShellCommandOutput(cmd):
"""Runs a command in a sub-process, and returns its STDOUT in a string."""
return gmock_test_utils.Subprocess(cmd, capture_stderr=False).output
def GetNormalizedCommandOutputAndLeakyTests(cmd):
"""Runs a command and returns its normalized output and a list of leaky tests.
Args:
cmd: the shell command.
"""
# Disables exception pop-ups on Windows.
os.environ['GTEST_CATCH_EXCEPTIONS'] = '1'
return GetNormalizedOutputAndLeakyTests(GetShellCommandOutput(cmd))
class GMockOutputTest(gmock_test_utils.TestCase):
def testOutput(self):
(output, leaky_tests) = GetNormalizedCommandOutputAndLeakyTests(COMMAND)
golden_file = open(GOLDEN_PATH, 'rb')
golden = golden_file.read().decode('utf-8')
golden_file.close()
# The normalized output should match the golden file.
self.assertEquals(golden, output)
# The raw output should contain 2 leaked mock object errors for
# test GMockOutputTest.CatchesLeakedMocks.
self.assertEquals(['GMockOutputTest.CatchesLeakedMocks',
'GMockOutputTest.CatchesLeakedMocks'],
leaky_tests)
if __name__ == '__main__':
if sys.argv[1:] == [GENGOLDEN_FLAG]:
(output, _) = GetNormalizedCommandOutputAndLeakyTests(COMMAND)
golden_file = open(GOLDEN_PATH, 'wb')
golden_file.write(output)
golden_file.close()
# Suppress the error "googletest was imported but a call to its main()
# was never detected."
os._exit(0)
else:
gmock_test_utils.Main()
| 33.434783 | 80 | 0.725618 |
de76db907e240dbd4bfb09601bb4570bdd122b3e | 26,404 | py | Python | intersight/apis/storage_flex_util_physical_drive_api.py | ategaw-cisco/intersight-python | 9d6476620507281b1dc358e29ac452d56081bbb0 | [
"Apache-2.0"
]
| null | null | null | intersight/apis/storage_flex_util_physical_drive_api.py | ategaw-cisco/intersight-python | 9d6476620507281b1dc358e29ac452d56081bbb0 | [
"Apache-2.0"
]
| null | null | null | intersight/apis/storage_flex_util_physical_drive_api.py | ategaw-cisco/intersight-python | 9d6476620507281b1dc358e29ac452d56081bbb0 | [
"Apache-2.0"
]
| null | null | null | # coding: utf-8
"""
Intersight REST API
This is Intersight REST API
OpenAPI spec version: 1.0.9-262
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import sys
import os
import re
# python 2 and python 3 compatibility library
from six import iteritems
from ..configuration import Configuration
from ..api_client import ApiClient
class StorageFlexUtilPhysicalDriveApi(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
config = Configuration()
if api_client:
self.api_client = api_client
else:
if not config.api_client:
config.api_client = ApiClient()
self.api_client = config.api_client
def storage_flex_util_physical_drives_get(self, **kwargs):
"""
Get a list of 'storageFlexUtilPhysicalDrive' instances
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.storage_flex_util_physical_drives_get(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param bool count: The $count query option allows clients to request a count of the matching resources.
:param str inlinecount: The $inlinecount query option allows clients to request an inline count of the matching resources included with the resources in the response
:param int top: The max number of records to return
:param int skip: The number of records to skip
:param str filter: Filter criteria for records to return. A URI with a $filter System Query Option identifies a subset of the Entries from the Collection of Entries identified by the Resource Path section of the URI. The subset is determined by selecting only the Entries that satisfy the predicate expression specified by the query option. The expression language that is used in $filter operators supports references to properties and literals. The literal values can be strings enclosed in single quotes, numbers and boolean values (true or false) or any of the additional literal representations shown in the Abstract Type System section. Query examples: $filter=Name eq 'Bob' $filter=Tags/any(t: t/Key eq 'Site') $filter=Tags/any(t: t/Key eq 'Site' and t/Value eq 'London')
:param str select: Specifies a subset of properties to return
:param str orderby: Determines what values are used to order a collection of records
:param str expand: Specify additional attributes or related records to return. Supports only 'DisplayNames' attribute now. Query examples: $expand=DisplayNames
:param str apply: Specify one or more transformation operations to perform aggregation on records. The transformations are processed in order with the output from a transformation being used as input for the subsequent transformation. Query examples: $apply=groupby((Model), aggregate($count as Total)) $apply=groupby((Model), aggregate(AvailableMemory with average as AverageAvailableMemory))
:param str at: Similar to \"$filter\", but \"at\" is specifically used to filter versioning information properties for records to return. A URI with an \"at\" Query Option identifies a subset of the Entries from the Collection of Entries identified by the Resource Path section of the URI. The subset is determined by selecting only the Entries that satisfy the predicate expression specified by the query option. The expression language that is used in at operators supports references to properties and literals. The literal values can be strings enclosed in single quotes, numbers and boolean values (true or false) or any of the additional literal representations shown in the Abstract Type System section. Query examples: at=VersionType eq 'Configured' at=InterestedMos.Moid eq '5b5877e56c6730367acf46cd'
:return: StorageFlexUtilPhysicalDriveList
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.storage_flex_util_physical_drives_get_with_http_info(**kwargs)
else:
(data) = self.storage_flex_util_physical_drives_get_with_http_info(**kwargs)
return data
def storage_flex_util_physical_drives_get_with_http_info(self, **kwargs):
"""
Get a list of 'storageFlexUtilPhysicalDrive' instances
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.storage_flex_util_physical_drives_get_with_http_info(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param bool count: The $count query option allows clients to request a count of the matching resources.
:param str inlinecount: The $inlinecount query option allows clients to request an inline count of the matching resources included with the resources in the response
:param int top: The max number of records to return
:param int skip: The number of records to skip
:param str filter: Filter criteria for records to return. A URI with a $filter System Query Option identifies a subset of the Entries from the Collection of Entries identified by the Resource Path section of the URI. The subset is determined by selecting only the Entries that satisfy the predicate expression specified by the query option. The expression language that is used in $filter operators supports references to properties and literals. The literal values can be strings enclosed in single quotes, numbers and boolean values (true or false) or any of the additional literal representations shown in the Abstract Type System section. Query examples: $filter=Name eq 'Bob' $filter=Tags/any(t: t/Key eq 'Site') $filter=Tags/any(t: t/Key eq 'Site' and t/Value eq 'London')
:param str select: Specifies a subset of properties to return
:param str orderby: Determines what values are used to order a collection of records
:param str expand: Specify additional attributes or related records to return. Supports only 'DisplayNames' attribute now. Query examples: $expand=DisplayNames
:param str apply: Specify one or more transformation operations to perform aggregation on records. The transformations are processed in order with the output from a transformation being used as input for the subsequent transformation. Query examples: $apply=groupby((Model), aggregate($count as Total)) $apply=groupby((Model), aggregate(AvailableMemory with average as AverageAvailableMemory))
:param str at: Similar to \"$filter\", but \"at\" is specifically used to filter versioning information properties for records to return. A URI with an \"at\" Query Option identifies a subset of the Entries from the Collection of Entries identified by the Resource Path section of the URI. The subset is determined by selecting only the Entries that satisfy the predicate expression specified by the query option. The expression language that is used in at operators supports references to properties and literals. The literal values can be strings enclosed in single quotes, numbers and boolean values (true or false) or any of the additional literal representations shown in the Abstract Type System section. Query examples: at=VersionType eq 'Configured' at=InterestedMos.Moid eq '5b5877e56c6730367acf46cd'
:return: StorageFlexUtilPhysicalDriveList
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['count', 'inlinecount', 'top', 'skip', 'filter', 'select', 'orderby', 'expand', 'apply', 'at']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method storage_flex_util_physical_drives_get" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'count' in params:
query_params.append(('$count', params['count']))
if 'inlinecount' in params:
query_params.append(('$inlinecount', params['inlinecount']))
if 'top' in params:
query_params.append(('$top', params['top']))
if 'skip' in params:
query_params.append(('$skip', params['skip']))
if 'filter' in params:
query_params.append(('$filter', params['filter']))
if 'select' in params:
query_params.append(('$select', params['select']))
if 'orderby' in params:
query_params.append(('$orderby', params['orderby']))
if 'expand' in params:
query_params.append(('$expand', params['expand']))
if 'apply' in params:
query_params.append(('$apply', params['apply']))
if 'at' in params:
query_params.append(('at', params['at']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = []
return self.api_client.call_api('/storage/FlexUtilPhysicalDrives', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='StorageFlexUtilPhysicalDriveList',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def storage_flex_util_physical_drives_moid_get(self, moid, **kwargs):
"""
Get a specific instance of 'storageFlexUtilPhysicalDrive'
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.storage_flex_util_physical_drives_moid_get(moid, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str moid: The moid of the storageFlexUtilPhysicalDrive instance. (required)
:return: StorageFlexUtilPhysicalDrive
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.storage_flex_util_physical_drives_moid_get_with_http_info(moid, **kwargs)
else:
(data) = self.storage_flex_util_physical_drives_moid_get_with_http_info(moid, **kwargs)
return data
def storage_flex_util_physical_drives_moid_get_with_http_info(self, moid, **kwargs):
"""
Get a specific instance of 'storageFlexUtilPhysicalDrive'
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.storage_flex_util_physical_drives_moid_get_with_http_info(moid, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str moid: The moid of the storageFlexUtilPhysicalDrive instance. (required)
:return: StorageFlexUtilPhysicalDrive
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['moid']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method storage_flex_util_physical_drives_moid_get" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'moid' is set
if ('moid' not in params) or (params['moid'] is None):
raise ValueError("Missing the required parameter `moid` when calling `storage_flex_util_physical_drives_moid_get`")
collection_formats = {}
path_params = {}
if 'moid' in params:
path_params['moid'] = params['moid']
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = []
return self.api_client.call_api('/storage/FlexUtilPhysicalDrives/{moid}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='StorageFlexUtilPhysicalDrive',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def storage_flex_util_physical_drives_moid_patch(self, moid, body, **kwargs):
"""
Update an instance of 'storageFlexUtilPhysicalDrive'
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.storage_flex_util_physical_drives_moid_patch(moid, body, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str moid: The moid of the storageFlexUtilPhysicalDrive instance. (required)
:param StorageFlexUtilPhysicalDrive body: storageFlexUtilPhysicalDrive to update (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.storage_flex_util_physical_drives_moid_patch_with_http_info(moid, body, **kwargs)
else:
(data) = self.storage_flex_util_physical_drives_moid_patch_with_http_info(moid, body, **kwargs)
return data
def storage_flex_util_physical_drives_moid_patch_with_http_info(self, moid, body, **kwargs):
"""
Update an instance of 'storageFlexUtilPhysicalDrive'
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.storage_flex_util_physical_drives_moid_patch_with_http_info(moid, body, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str moid: The moid of the storageFlexUtilPhysicalDrive instance. (required)
:param StorageFlexUtilPhysicalDrive body: storageFlexUtilPhysicalDrive to update (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['moid', 'body']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method storage_flex_util_physical_drives_moid_patch" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'moid' is set
if ('moid' not in params) or (params['moid'] is None):
raise ValueError("Missing the required parameter `moid` when calling `storage_flex_util_physical_drives_moid_patch`")
# verify the required parameter 'body' is set
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `storage_flex_util_physical_drives_moid_patch`")
collection_formats = {}
path_params = {}
if 'moid' in params:
path_params['moid'] = params['moid']
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = []
return self.api_client.call_api('/storage/FlexUtilPhysicalDrives/{moid}', 'PATCH',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None,
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def storage_flex_util_physical_drives_moid_post(self, moid, body, **kwargs):
"""
Update an instance of 'storageFlexUtilPhysicalDrive'
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.storage_flex_util_physical_drives_moid_post(moid, body, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str moid: The moid of the storageFlexUtilPhysicalDrive instance. (required)
:param StorageFlexUtilPhysicalDrive body: storageFlexUtilPhysicalDrive to update (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.storage_flex_util_physical_drives_moid_post_with_http_info(moid, body, **kwargs)
else:
(data) = self.storage_flex_util_physical_drives_moid_post_with_http_info(moid, body, **kwargs)
return data
def storage_flex_util_physical_drives_moid_post_with_http_info(self, moid, body, **kwargs):
"""
Update an instance of 'storageFlexUtilPhysicalDrive'
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.storage_flex_util_physical_drives_moid_post_with_http_info(moid, body, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str moid: The moid of the storageFlexUtilPhysicalDrive instance. (required)
:param StorageFlexUtilPhysicalDrive body: storageFlexUtilPhysicalDrive to update (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['moid', 'body']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method storage_flex_util_physical_drives_moid_post" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'moid' is set
if ('moid' not in params) or (params['moid'] is None):
raise ValueError("Missing the required parameter `moid` when calling `storage_flex_util_physical_drives_moid_post`")
# verify the required parameter 'body' is set
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `storage_flex_util_physical_drives_moid_post`")
collection_formats = {}
path_params = {}
if 'moid' in params:
path_params['moid'] = params['moid']
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = []
return self.api_client.call_api('/storage/FlexUtilPhysicalDrives/{moid}', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None,
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
| 52.285149 | 818 | 0.630624 |
48ef4c1cd59038d70cfc478348c2708a91c06781 | 13,914 | py | Python | capa/features/extractors/smda/insn.py | ruppde/capa | 3c0bafc22b9e1c209066c12e444fec66920b53af | [
"Apache-2.0"
]
| 1 | 2021-07-06T23:20:56.000Z | 2021-07-06T23:20:56.000Z | capa/features/extractors/smda/insn.py | ruppde/capa | 3c0bafc22b9e1c209066c12e444fec66920b53af | [
"Apache-2.0"
]
| null | null | null | capa/features/extractors/smda/insn.py | ruppde/capa | 3c0bafc22b9e1c209066c12e444fec66920b53af | [
"Apache-2.0"
]
| null | null | null | import re
import string
import struct
from smda.common.SmdaReport import SmdaReport
import capa.features.extractors.helpers
from capa.features import (
ARCH_X32,
ARCH_X64,
MAX_BYTES_FEATURE_SIZE,
THUNK_CHAIN_DEPTH_DELTA,
Bytes,
String,
Characteristic,
)
from capa.features.insn import API, Number, Offset, Mnemonic
# security cookie checks may perform non-zeroing XORs, these are expected within a certain
# byte range within the first and returning basic blocks, this helps to reduce FP features
SECURITY_COOKIE_BYTES_DELTA = 0x40
PATTERN_HEXNUM = re.compile(r"[+\-] (?P<num>0x[a-fA-F0-9]+)")
PATTERN_SINGLENUM = re.compile(r"[+\-] (?P<num>[0-9])")
def get_arch(smda_report):
if smda_report.architecture == "intel":
if smda_report.bitness == 32:
return ARCH_X32
elif smda_report.bitness == 64:
return ARCH_X64
else:
raise NotImplementedError
def extract_insn_api_features(f, bb, insn):
"""parse API features from the given instruction."""
if insn.offset in f.apirefs:
api_entry = f.apirefs[insn.offset]
# reformat
dll_name, api_name = api_entry.split("!")
dll_name = dll_name.split(".")[0]
dll_name = dll_name.lower()
for name in capa.features.extractors.helpers.generate_symbols(dll_name, api_name):
yield API(name), insn.offset
elif insn.offset in f.outrefs:
current_function = f
current_instruction = insn
for index in range(THUNK_CHAIN_DEPTH_DELTA):
if current_function and len(current_function.outrefs[current_instruction.offset]) == 1:
target = current_function.outrefs[current_instruction.offset][0]
referenced_function = current_function.smda_report.getFunction(target)
if referenced_function:
# TODO SMDA: implement this function for both jmp and call, checking if function has 1 instruction which refs an API
if referenced_function.isApiThunk():
api_entry = (
referenced_function.apirefs[target] if target in referenced_function.apirefs else None
)
if api_entry:
# reformat
dll_name, api_name = api_entry.split("!")
dll_name = dll_name.split(".")[0]
dll_name = dll_name.lower()
for name in capa.features.extractors.helpers.generate_symbols(dll_name, api_name):
yield API(name), insn.offset
elif referenced_function.num_instructions == 1 and referenced_function.num_outrefs == 1:
current_function = referenced_function
current_instruction = [i for i in referenced_function.getInstructions()][0]
else:
return
def extract_insn_number_features(f, bb, insn):
"""parse number features from the given instruction."""
# example:
#
# push 3136B0h ; dwControlCode
operands = [o.strip() for o in insn.operands.split(",")]
if insn.mnemonic == "add" and operands[0] in ["esp", "rsp"]:
# skip things like:
#
# .text:00401140 call sub_407E2B
# .text:00401145 add esp, 0Ch
return
for operand in operands:
try:
yield Number(int(operand, 16)), insn.offset
yield Number(int(operand, 16), arch=get_arch(f.smda_report)), insn.offset
except:
continue
def read_bytes(smda_report, va, num_bytes=None):
"""
read up to MAX_BYTES_FEATURE_SIZE from the given address.
"""
rva = va - smda_report.base_addr
if smda_report.buffer is None:
return
buffer_end = len(smda_report.buffer)
max_bytes = num_bytes if num_bytes is not None else MAX_BYTES_FEATURE_SIZE
if rva + max_bytes > buffer_end:
return smda_report.buffer[rva:]
else:
return smda_report.buffer[rva : rva + max_bytes]
def derefs(smda_report, p):
"""
recursively follow the given pointer, yielding the valid memory addresses along the way.
useful when you may have a pointer to string, or pointer to pointer to string, etc.
this is a "do what i mean" type of helper function.
based on the implementation in viv/insn.py
"""
depth = 0
while True:
if not smda_report.isAddrWithinMemoryImage(p):
return
yield p
bytes_ = read_bytes(smda_report, p, num_bytes=4)
val = struct.unpack("I", bytes_)[0]
# sanity: pointer points to self
if val == p:
return
# sanity: avoid chains of pointers that are unreasonably deep
depth += 1
if depth > 10:
return
p = val
def extract_insn_bytes_features(f, bb, insn):
"""
parse byte sequence features from the given instruction.
example:
# push offset iid_004118d4_IShellLinkA ; riid
"""
for data_ref in insn.getDataRefs():
for v in derefs(f.smda_report, data_ref):
bytes_read = read_bytes(f.smda_report, v)
if bytes_read is None:
continue
if capa.features.extractors.helpers.all_zeros(bytes_read):
continue
yield Bytes(bytes_read), insn.offset
def detect_ascii_len(smda_report, offset):
if smda_report.buffer is None:
return 0
ascii_len = 0
rva = offset - smda_report.base_addr
char = smda_report.buffer[rva]
while char < 127 and chr(char) in string.printable:
ascii_len += 1
rva += 1
char = smda_report.buffer[rva]
if char == 0:
return ascii_len
return 0
def detect_unicode_len(smda_report, offset):
if smda_report.buffer is None:
return 0
unicode_len = 0
rva = offset - smda_report.base_addr
char = smda_report.buffer[rva]
second_char = smda_report.buffer[rva + 1]
while char < 127 and chr(char) in string.printable and second_char == 0:
unicode_len += 2
rva += 2
char = smda_report.buffer[rva]
second_char = smda_report.buffer[rva + 1]
if char == 0 and second_char == 0:
return unicode_len
return 0
def read_string(smda_report, offset):
alen = detect_ascii_len(smda_report, offset)
if alen > 1:
return read_bytes(smda_report, offset, alen).decode("utf-8")
ulen = detect_unicode_len(smda_report, offset)
if ulen > 2:
return read_bytes(smda_report, offset, ulen).decode("utf-16")
def extract_insn_string_features(f, bb, insn):
"""parse string features from the given instruction."""
# example:
#
# push offset aAcr ; "ACR > "
for data_ref in insn.getDataRefs():
for v in derefs(f.smda_report, data_ref):
string_read = read_string(f.smda_report, v)
if string_read:
yield String(string_read.rstrip("\x00")), insn.offset
def extract_insn_offset_features(f, bb, insn):
"""parse structure offset features from the given instruction."""
# examples:
#
# mov eax, [esi + 4]
# mov eax, [esi + ecx + 16384]
operands = [o.strip() for o in insn.operands.split(",")]
for operand in operands:
if not "ptr" in operand:
continue
if "esp" in operand or "ebp" in operand or "rbp" in operand:
continue
number = 0
number_hex = re.search(PATTERN_HEXNUM, operand)
number_int = re.search(PATTERN_SINGLENUM, operand)
if number_hex:
number = int(number_hex.group("num"), 16)
number = -1 * number if number_hex.group().startswith("-") else number
elif number_int:
number = int(number_int.group("num"))
number = -1 * number if number_int.group().startswith("-") else number
yield Offset(number), insn.offset
yield Offset(number, arch=get_arch(f.smda_report)), insn.offset
def is_security_cookie(f, bb, insn):
"""
check if an instruction is related to security cookie checks
"""
# security cookie check should use SP or BP
operands = [o.strip() for o in insn.operands.split(",")]
if operands[1] not in ["esp", "ebp", "rsp", "rbp"]:
return False
for index, block in enumerate(f.getBlocks()):
# expect security cookie init in first basic block within first bytes (instructions)
block_instructions = [i for i in block.getInstructions()]
if index == 0 and insn.offset < (block_instructions[0].offset + SECURITY_COOKIE_BYTES_DELTA):
return True
# ... or within last bytes (instructions) before a return
if block_instructions[-1].mnemonic.startswith("ret") and insn.offset > (
block_instructions[-1].offset - SECURITY_COOKIE_BYTES_DELTA
):
return True
return False
def extract_insn_nzxor_characteristic_features(f, bb, insn):
"""
parse non-zeroing XOR instruction from the given instruction.
ignore expected non-zeroing XORs, e.g. security cookies.
"""
if insn.mnemonic not in ("xor", "xorpd", "xorps", "pxor"):
return
operands = [o.strip() for o in insn.operands.split(",")]
if operands[0] == operands[1]:
return
if is_security_cookie(f, bb, insn):
return
yield Characteristic("nzxor"), insn.offset
def extract_insn_mnemonic_features(f, bb, insn):
"""parse mnemonic features from the given instruction."""
yield Mnemonic(insn.mnemonic), insn.offset
def extract_insn_peb_access_characteristic_features(f, bb, insn):
"""
parse peb access from the given function. fs:[0x30] on x86, gs:[0x60] on x64
"""
if insn.mnemonic not in ["push", "mov"]:
return
operands = [o.strip() for o in insn.operands.split(",")]
for operand in operands:
if "fs:" in operand and "0x30" in operand:
yield Characteristic("peb access"), insn.offset
elif "gs:" in operand and "0x60" in operand:
yield Characteristic("peb access"), insn.offset
def extract_insn_segment_access_features(f, bb, insn):
"""parse the instruction for access to fs or gs"""
operands = [o.strip() for o in insn.operands.split(",")]
for operand in operands:
if "fs:" in operand:
yield Characteristic("fs access"), insn.offset
elif "gs:" in operand:
yield Characteristic("gs access"), insn.offset
def extract_insn_cross_section_cflow(f, bb, insn):
"""
inspect the instruction for a CALL or JMP that crosses section boundaries.
"""
if insn.mnemonic in ["call", "jmp"]:
if insn.offset in f.apirefs:
return
smda_report = insn.smda_function.smda_report
if insn.offset in f.outrefs:
for target in f.outrefs[insn.offset]:
if smda_report.getSection(insn.offset) != smda_report.getSection(target):
yield Characteristic("cross section flow"), insn.offset
elif insn.operands.startswith("0x"):
target = int(insn.operands, 16)
if smda_report.getSection(insn.offset) != smda_report.getSection(target):
yield Characteristic("cross section flow"), insn.offset
# this is a feature that's most relevant at the function scope,
# however, its most efficient to extract at the instruction scope.
def extract_function_calls_from(f, bb, insn):
if insn.mnemonic != "call":
return
if insn.offset in f.outrefs:
for outref in f.outrefs[insn.offset]:
yield Characteristic("calls from"), outref
if outref == f.offset:
# if we found a jump target and it's the function address
# mark as recursive
yield Characteristic("recursive call"), outref
if insn.offset in f.apirefs:
yield Characteristic("calls from"), insn.offset
# this is a feature that's most relevant at the function or basic block scope,
# however, its most efficient to extract at the instruction scope.
def extract_function_indirect_call_characteristic_features(f, bb, insn):
"""
extract indirect function call characteristic (e.g., call eax or call dword ptr [edx+4])
does not include calls like => call ds:dword_ABD4974
"""
if insn.mnemonic != "call":
return
if insn.operands.startswith("0x"):
return False
if "qword ptr" in insn.operands and "rip" in insn.operands:
return False
if insn.operands.startswith("dword ptr [0x"):
return False
# call edx
# call dword ptr [eax+50h]
# call qword ptr [rsp+78h]
yield Characteristic("indirect call"), insn.offset
def extract_features(f, bb, insn):
"""
extract features from the given insn.
args:
f (smda.common.SmdaFunction): the function to process.
bb (smda.common.SmdaBasicBlock): the basic block to process.
insn (smda.common.SmdaInstruction): the instruction to process.
yields:
Feature, set[VA]: the features and their location found in this insn.
"""
for insn_handler in INSTRUCTION_HANDLERS:
for feature, va in insn_handler(f, bb, insn):
yield feature, va
INSTRUCTION_HANDLERS = (
extract_insn_api_features,
extract_insn_number_features,
extract_insn_string_features,
extract_insn_bytes_features,
extract_insn_offset_features,
extract_insn_nzxor_characteristic_features,
extract_insn_mnemonic_features,
extract_insn_peb_access_characteristic_features,
extract_insn_cross_section_cflow,
extract_insn_segment_access_features,
extract_function_calls_from,
extract_function_indirect_call_characteristic_features,
)
| 35.314721 | 136 | 0.636481 |
94d057e874fc8daf77391f99a70ff127295d3253 | 2,209 | py | Python | src/RIOT/tests/test_tools/tests/01-run.py | ARte-team/ARte | 19f17f57522e1b18ba390718fc94be246451837b | [
"MIT"
]
| 2 | 2020-04-30T08:17:45.000Z | 2020-05-23T08:46:54.000Z | src/RIOT/tests/test_tools/tests/01-run.py | ARte-team/ARte | 19f17f57522e1b18ba390718fc94be246451837b | [
"MIT"
]
| null | null | null | src/RIOT/tests/test_tools/tests/01-run.py | ARte-team/ARte | 19f17f57522e1b18ba390718fc94be246451837b | [
"MIT"
]
| null | null | null | #!/usr/bin/env python3
"""Test behaviour of the test running and the term program interaction."""
import sys
import pexpect
from testrunner import run
def _shellping(child, timeout=1):
"""Issue a 'shellping' command.
Raises a pexpect exception on failure.
:param timeout: timeout for the answer
"""
child.sendline('shellping')
child.expect_exact('shellpong\r\n', timeout=timeout)
def _wait_shell_ready(child, numtries=5):
"""Wait until the shell is ready by using 'shellping'."""
for _ in range(numtries - 1):
try:
_shellping(child)
except pexpect.TIMEOUT:
pass
else:
break
else:
# This one should fail
_shellping(child)
def _test_no_local_echo(child):
"""Verify that there is not local echo while testing."""
msg = 'true this should not be echoed'
child.sendline(msg)
res = child.expect_exact([pexpect.TIMEOUT, msg], timeout=1)
assert res == 0, "There should have been a timeout and not match stdin"
def _test_sending_newline(child):
"""Verify that a empty line can be send to the node.
The local terminal must NOT repeat the previous command.
"""
child.sendline('getchar')
child.sendline('') # send only one newline character
child.expect_exact('getchar 0x0a\r\n')
def _test_clean_output(child):
"""Verify that only what the node sends is received."""
child.sendline('toupper lowercase')
retline = child.readline()
assert retline.strip() == 'LOWERCASE'
def testfunc(child):
"""Run some tests to verify the board under test behaves correctly.
It currently tests:
* local echo
* getting some test output without other messages
* sending empty lines
"""
_wait_shell_ready(child)
# Verify there is no local and remote echo as it is disabled
_test_no_local_echo(child)
# The node should still answer after the previous one
_shellping(child)
# Check that the output is clean without extra terminal output
_test_clean_output(child)
# It is possible to send an empty newline
_test_sending_newline(child)
if __name__ == "__main__":
sys.exit(run(testfunc))
| 26.297619 | 75 | 0.679493 |
e6cad6c6aa70ef0bb94c6e9e7fd9563040b20262 | 8,223 | py | Python | redis-twitter-console/users.py | lauslim12/redis-twitter-console | 56f2658d1f9396c20cb7bc80565639b917304928 | [
"MIT"
]
| null | null | null | redis-twitter-console/users.py | lauslim12/redis-twitter-console | 56f2658d1f9396c20cb7bc80565639b917304928 | [
"MIT"
]
| null | null | null | redis-twitter-console/users.py | lauslim12/redis-twitter-console | 56f2658d1f9396c20cb7bc80565639b917304928 | [
"MIT"
]
| null | null | null | """This module is to manage our users."""
from time import time
from redis import Redis
from errors import (
DuplicateUserError,
EmptyInputError,
FollowError,
UserDoesNotExistError,
UserIsNotYourFollowingError,
)
from utils import is_string_blank
def follow(redis: Redis, uid: int) -> None:
"""Follows another user.
Algorithm:
----------
1. Enter the username of someone that one wants to follow.
2. Sanity check, if the string is blank, raise an exception.
3. Check if user ID exists.
4. Fetch the user id from 'users' hash.
5. If the current user wants to follow themselves, then raise an exception.
6. Create a mapping to store data in zset (sorted set).
7. Store 'following' and 'followers' with the suitable IDs.
Parameters:
-----------
redis (Redis): Redis instance
uid (int): User id
Returns:
--------
None
"""
follow_username = input("Enter the username of someone to follow: ")
if is_string_blank(follow_username):
raise EmptyInputError()
if not redis.hexists("users", follow_username):
raise UserDoesNotExistError()
user_id_to_be_followed = redis.hget("users", follow_username)
if user_id_to_be_followed == uid:
raise FollowError()
following_mapping = {user_id_to_be_followed: int(time())}
followers_mapping = {uid: int(time())}
redis.zadd(f"following:{uid}", following_mapping)
redis.zadd(f"followers:{user_id_to_be_followed}", followers_mapping)
print(f"You have successfully followed {follow_username}")
def logout() -> False:
"""Logs out a user."""
print("You have been logged out!")
return False
def other_profile(redis: Redis) -> None:
"""Try to look at other people's profile.
Algorithm:
----------
1. Take input of username.
2. Sanity check, if blank raise an exception.
3. Check if user exists.
4. Fetch the 'user_id' in 'users' HSET.
5. Fetch the user's data in 'user' HSET.
6. Display our data.
Parameters:
-----------
redis (Redis): Redis instance
Returns:
--------
None
"""
target_username = input("Enter the username that you want to see: ")
if is_string_blank(target_username):
raise EmptyInputError()
if not redis.hget("users", target_username):
raise UserDoesNotExistError()
uid = redis.hget("users", target_username)
user_data = redis.hgetall(f"user:{uid}")
tweet_data = redis.lrange(f"tweet_user:{uid}", 0, -1)
following_data = redis.zrange(f"following:{uid}", 0, -1)
followers_data = redis.zrange(f"followers:{uid}", 0, -1)
print("Their personal data:")
print(user_data)
print("\nTheir tweets:")
for item in tweet_data:
post = redis.hgetall(f"tweet:{item}")
print(post)
print("\nTheir following:")
for item in following_data:
user = redis.hgetall(f"user:{item}")
print(user)
print("\nTheir followers:")
for item in followers_data:
user = redis.hgetall(f"user:{item}")
print(user)
def profile(redis: Redis, uid: int) -> None:
"""Get personal data according to the session key.
Algorithm:
----------
1. Get all user's data from 'user' hash (intentionally hide passwords).
2. Get all user's tweets from 'tweet_user' list.
3. Get all user's following data from 'following' sorted set.
4. Get all user's followers data from 'followers' sorted set.
5. Display data in the screen.
Parameters:
-----------
redis (Redis): Redis instance
uid (int): User id
Returns:
--------
None
"""
user_data = redis.hgetall(f"user:{uid}")
tweet_data = redis.lrange(f"tweet_user:{uid}", 0, -1)
following_data = redis.zrange(f"following:{uid}", 0, -1)
followers_data = redis.zrange(f"followers:{uid}", 0, -1)
print("My personal data:")
user_data.pop("password", "secret")
print(user_data)
print("\nMy tweets:")
for item in tweet_data:
post = redis.hgetall(f"tweet:{item}")
print(post)
print("\nMy following:")
for item in following_data:
user = redis.hgetall(f"user:{item}")
print(user)
print("\nMy followers:")
for item in followers_data:
user = redis.hgetall(f"user:{item}")
print(user)
def timeline(redis: Redis) -> None:
"""Gets the global timeline of what's happening in the world.
Algorithm:
----------
1. Get all recent tweets.
Parameters:
----------
redis (Redis): Redis instance
Returns:
--------
None
"""
recent_tweets_list = redis.lrange("timeline", 0, 1000)
print("All recent tweets: ")
for item in recent_tweets_list:
post = redis.hgetall(f"tweet:{item}")
print(post)
def tweet(redis: Redis, uid: int) -> None:
"""Send a tweet connected to the user's account.
Algorithm:
----------
1. Get tweet.
2. Sanity check, if tweet is blank, raise an exception.
3. Increment 'tweet_id', as it is a standalone entity.
4. Store our tweet data in a HSET.
5. Store our tweet identifier for a certain user in LPUSH (list).
6. Store the reference to the tweet in an LPUSH (list) for the global timeline.
7. Trim the 'timeline' Redis list to the latest 1000 tweet references.
Parameters:
-----------
redis (Redis): Redis instance
uid (int): User id
Returns:
--------
None
"""
content = input("What's on your mind: ")
if is_string_blank(content):
raise EmptyInputError()
tweet_data = {
"uid": uid,
"content": content,
"date_posted": int(time()),
"date_modified": int(time()),
}
tweet_id = redis.incr("next_tweet_id")
redis.hset(f"tweet:{tweet_id}", mapping=tweet_data)
redis.lpush(f"tweet_user:{uid}", tweet_id)
redis.lpush("timeline", tweet_id)
redis.ltrim("timeline", 0, 1000)
print("Tweet has been successfully inserted!")
def unfollow(redis: Redis, uid: int) -> None:
"""Unfollows a user.
Algorithm:
----------
1. Get the sorted set of the user.
2. Sanity check, if the user id is blank, raise an exception.
3. Get the targeted user ID.
4. If user does not exist, raise an exception.
5. If targeted user is not in the 'following' of the current user, raise an exception.
6. Remove from the sorted set of the current user, and change the suitable followers/following.
Parameters:
-----------
redis (Redis): Redis instance
uid (int): User id
Returns:
-------
None
"""
username_to_be_unfollowed = input("Enter the username that you want to unfollow: ")
user_id_to_be_unfollowed = redis.hget("users", username_to_be_unfollowed)
if is_string_blank(username_to_be_unfollowed):
raise EmptyInputError()
if not user_id_to_be_unfollowed:
raise UserDoesNotExistError()
if not redis.zscore(f"following:{uid}", user_id_to_be_unfollowed):
raise UserIsNotYourFollowingError()
redis.zrem(f"following:{uid}", user_id_to_be_unfollowed)
redis.zrem(f"followers:{user_id_to_be_unfollowed}", uid)
print(
f"You have succesfully unfollowed a person with username {username_to_be_unfollowed}!"
)
def update_profile(redis: Redis, uid: int) -> None:
"""Updates a user.
Algorithm:
----------
1. Get the input.
2. Sanity checks.
3. Check for duplicate username.
4. Update the hash for 'user:id'.
Parameters:
-----------
redis (Redis): Redis instance
uid (int): User id
Returns:
--------
"""
new_username = input("Enter your new username here: ")
if is_string_blank(new_username):
raise EmptyInputError()
if redis.hexists("users", new_username):
raise DuplicateUserError()
new_user_data = {"username": new_username, "modification_date": int(time())}
old_username = redis.hget(f"user:{uid}", "username")
redis.hset(f"user:{uid}", mapping=new_user_data)
redis.hdel("users", old_username)
redis.hset("users", new_username, uid)
print("Your personal data has been successfully updated!")
| 26.785016 | 99 | 0.634197 |
df07f73afb6f8270c71e1c1b9f23204abfea14cb | 20,379 | py | Python | sympy/logic/algorithms/dpll2.py | bigfooted/sympy | 1fb2490fa2fa9b476da450f02a25b03c1dc07cf0 | [
"BSD-3-Clause"
]
| 8,323 | 2015-01-02T15:51:43.000Z | 2022-03-31T13:13:19.000Z | sympy/logic/algorithms/dpll2.py | bigfooted/sympy | 1fb2490fa2fa9b476da450f02a25b03c1dc07cf0 | [
"BSD-3-Clause"
]
| 15,102 | 2015-01-01T01:33:17.000Z | 2022-03-31T22:53:13.000Z | sympy/logic/algorithms/dpll2.py | bigfooted/sympy | 1fb2490fa2fa9b476da450f02a25b03c1dc07cf0 | [
"BSD-3-Clause"
]
| 4,490 | 2015-01-01T17:48:07.000Z | 2022-03-31T17:24:05.000Z | """Implementation of DPLL algorithm
Features:
- Clause learning
- Watch literal scheme
- VSIDS heuristic
References:
- https://en.wikipedia.org/wiki/DPLL_algorithm
"""
from collections import defaultdict
from heapq import heappush, heappop
from sympy import ordered
from sympy.assumptions.cnf import EncodedCNF
def dpll_satisfiable(expr, all_models=False):
"""
Check satisfiability of a propositional sentence.
It returns a model rather than True when it succeeds.
Returns a generator of all models if all_models is True.
Examples
========
>>> from sympy.abc import A, B
>>> from sympy.logic.algorithms.dpll2 import dpll_satisfiable
>>> dpll_satisfiable(A & ~B)
{A: True, B: False}
>>> dpll_satisfiable(A & ~A)
False
"""
if not isinstance(expr, EncodedCNF):
exprs = EncodedCNF()
exprs.add_prop(expr)
expr = exprs
# Return UNSAT when False (encoded as 0) is present in the CNF
if {0} in expr.data:
if all_models:
return (f for f in [False])
return False
solver = SATSolver(expr.data, expr.variables, set(), expr.symbols)
models = solver._find_model()
if all_models:
return _all_models(models)
try:
return next(models)
except StopIteration:
return False
# Uncomment to confirm the solution is valid (hitting set for the clauses)
#else:
#for cls in clauses_int_repr:
#assert solver.var_settings.intersection(cls)
def _all_models(models):
satisfiable = False
try:
while True:
yield next(models)
satisfiable = True
except StopIteration:
if not satisfiable:
yield False
class SATSolver:
"""
Class for representing a SAT solver capable of
finding a model to a boolean theory in conjunctive
normal form.
"""
def __init__(self, clauses, variables, var_settings, symbols=None,
heuristic='vsids', clause_learning='none', INTERVAL=500):
self.var_settings = var_settings
self.heuristic = heuristic
self.is_unsatisfied = False
self._unit_prop_queue = []
self.update_functions = []
self.INTERVAL = INTERVAL
if symbols is None:
self.symbols = list(ordered(variables))
else:
self.symbols = symbols
self._initialize_variables(variables)
self._initialize_clauses(clauses)
if 'vsids' == heuristic:
self._vsids_init()
self.heur_calculate = self._vsids_calculate
self.heur_lit_assigned = self._vsids_lit_assigned
self.heur_lit_unset = self._vsids_lit_unset
self.heur_clause_added = self._vsids_clause_added
# Note: Uncomment this if/when clause learning is enabled
#self.update_functions.append(self._vsids_decay)
else:
raise NotImplementedError
if 'simple' == clause_learning:
self.add_learned_clause = self._simple_add_learned_clause
self.compute_conflict = self.simple_compute_conflict
self.update_functions.append(self.simple_clean_clauses)
elif 'none' == clause_learning:
self.add_learned_clause = lambda x: None
self.compute_conflict = lambda: None
else:
raise NotImplementedError
# Create the base level
self.levels = [Level(0)]
self._current_level.varsettings = var_settings
# Keep stats
self.num_decisions = 0
self.num_learned_clauses = 0
self.original_num_clauses = len(self.clauses)
def _initialize_variables(self, variables):
"""Set up the variable data structures needed."""
self.sentinels = defaultdict(set)
self.occurrence_count = defaultdict(int)
self.variable_set = [False] * (len(variables) + 1)
def _initialize_clauses(self, clauses):
"""Set up the clause data structures needed.
For each clause, the following changes are made:
- Unit clauses are queued for propagation right away.
- Non-unit clauses have their first and last literals set as sentinels.
- The number of clauses a literal appears in is computed.
"""
self.clauses = []
for cls in clauses:
self.clauses.append(list(cls))
for i in range(len(self.clauses)):
# Handle the unit clauses
if 1 == len(self.clauses[i]):
self._unit_prop_queue.append(self.clauses[i][0])
continue
self.sentinels[self.clauses[i][0]].add(i)
self.sentinels[self.clauses[i][-1]].add(i)
for lit in self.clauses[i]:
self.occurrence_count[lit] += 1
def _find_model(self):
"""
Main DPLL loop. Returns a generator of models.
Variables are chosen successively, and assigned to be either
True or False. If a solution is not found with this setting,
the opposite is chosen and the search continues. The solver
halts when every variable has a setting.
Examples
========
>>> from sympy.logic.algorithms.dpll2 import SATSolver
>>> l = SATSolver([{2, -3}, {1}, {3, -3}, {2, -2},
... {3, -2}], {1, 2, 3}, set())
>>> list(l._find_model())
[{1: True, 2: False, 3: False}, {1: True, 2: True, 3: True}]
>>> from sympy.abc import A, B, C
>>> l = SATSolver([{2, -3}, {1}, {3, -3}, {2, -2},
... {3, -2}], {1, 2, 3}, set(), [A, B, C])
>>> list(l._find_model())
[{A: True, B: False, C: False}, {A: True, B: True, C: True}]
"""
# We use this variable to keep track of if we should flip a
# variable setting in successive rounds
flip_var = False
# Check if unit prop says the theory is unsat right off the bat
self._simplify()
if self.is_unsatisfied:
return
# While the theory still has clauses remaining
while True:
# Perform cleanup / fixup at regular intervals
if self.num_decisions % self.INTERVAL == 0:
for func in self.update_functions:
func()
if flip_var:
# We have just backtracked and we are trying to opposite literal
flip_var = False
lit = self._current_level.decision
else:
# Pick a literal to set
lit = self.heur_calculate()
self.num_decisions += 1
# Stopping condition for a satisfying theory
if 0 == lit:
yield {self.symbols[abs(lit) - 1]:
lit > 0 for lit in self.var_settings}
while self._current_level.flipped:
self._undo()
if len(self.levels) == 1:
return
flip_lit = -self._current_level.decision
self._undo()
self.levels.append(Level(flip_lit, flipped=True))
flip_var = True
continue
# Start the new decision level
self.levels.append(Level(lit))
# Assign the literal, updating the clauses it satisfies
self._assign_literal(lit)
# _simplify the theory
self._simplify()
# Check if we've made the theory unsat
if self.is_unsatisfied:
self.is_unsatisfied = False
# We unroll all of the decisions until we can flip a literal
while self._current_level.flipped:
self._undo()
# If we've unrolled all the way, the theory is unsat
if 1 == len(self.levels):
return
# Detect and add a learned clause
self.add_learned_clause(self.compute_conflict())
# Try the opposite setting of the most recent decision
flip_lit = -self._current_level.decision
self._undo()
self.levels.append(Level(flip_lit, flipped=True))
flip_var = True
########################
# Helper Methods #
########################
@property
def _current_level(self):
"""The current decision level data structure
Examples
========
>>> from sympy.logic.algorithms.dpll2 import SATSolver
>>> l = SATSolver([{1}, {2}], {1, 2}, set())
>>> next(l._find_model())
{1: True, 2: True}
>>> l._current_level.decision
0
>>> l._current_level.flipped
False
>>> l._current_level.var_settings
{1, 2}
"""
return self.levels[-1]
def _clause_sat(self, cls):
"""Check if a clause is satisfied by the current variable setting.
Examples
========
>>> from sympy.logic.algorithms.dpll2 import SATSolver
>>> l = SATSolver([{1}, {-1}], {1}, set())
>>> try:
... next(l._find_model())
... except StopIteration:
... pass
>>> l._clause_sat(0)
False
>>> l._clause_sat(1)
True
"""
for lit in self.clauses[cls]:
if lit in self.var_settings:
return True
return False
def _is_sentinel(self, lit, cls):
"""Check if a literal is a sentinel of a given clause.
Examples
========
>>> from sympy.logic.algorithms.dpll2 import SATSolver
>>> l = SATSolver([{2, -3}, {1}, {3, -3}, {2, -2},
... {3, -2}], {1, 2, 3}, set())
>>> next(l._find_model())
{1: True, 2: False, 3: False}
>>> l._is_sentinel(2, 3)
True
>>> l._is_sentinel(-3, 1)
False
"""
return cls in self.sentinels[lit]
def _assign_literal(self, lit):
"""Make a literal assignment.
The literal assignment must be recorded as part of the current
decision level. Additionally, if the literal is marked as a
sentinel of any clause, then a new sentinel must be chosen. If
this is not possible, then unit propagation is triggered and
another literal is added to the queue to be set in the future.
Examples
========
>>> from sympy.logic.algorithms.dpll2 import SATSolver
>>> l = SATSolver([{2, -3}, {1}, {3, -3}, {2, -2},
... {3, -2}], {1, 2, 3}, set())
>>> next(l._find_model())
{1: True, 2: False, 3: False}
>>> l.var_settings
{-3, -2, 1}
>>> l = SATSolver([{2, -3}, {1}, {3, -3}, {2, -2},
... {3, -2}], {1, 2, 3}, set())
>>> l._assign_literal(-1)
>>> try:
... next(l._find_model())
... except StopIteration:
... pass
>>> l.var_settings
{-1}
"""
self.var_settings.add(lit)
self._current_level.var_settings.add(lit)
self.variable_set[abs(lit)] = True
self.heur_lit_assigned(lit)
sentinel_list = list(self.sentinels[-lit])
for cls in sentinel_list:
if not self._clause_sat(cls):
other_sentinel = None
for newlit in self.clauses[cls]:
if newlit != -lit:
if self._is_sentinel(newlit, cls):
other_sentinel = newlit
elif not self.variable_set[abs(newlit)]:
self.sentinels[-lit].remove(cls)
self.sentinels[newlit].add(cls)
other_sentinel = None
break
# Check if no sentinel update exists
if other_sentinel:
self._unit_prop_queue.append(other_sentinel)
def _undo(self):
"""
_undo the changes of the most recent decision level.
Examples
========
>>> from sympy.logic.algorithms.dpll2 import SATSolver
>>> l = SATSolver([{2, -3}, {1}, {3, -3}, {2, -2},
... {3, -2}], {1, 2, 3}, set())
>>> next(l._find_model())
{1: True, 2: False, 3: False}
>>> level = l._current_level
>>> level.decision, level.var_settings, level.flipped
(-3, {-3, -2}, False)
>>> l._undo()
>>> level = l._current_level
>>> level.decision, level.var_settings, level.flipped
(0, {1}, False)
"""
# Undo the variable settings
for lit in self._current_level.var_settings:
self.var_settings.remove(lit)
self.heur_lit_unset(lit)
self.variable_set[abs(lit)] = False
# Pop the level off the stack
self.levels.pop()
#########################
# Propagation #
#########################
"""
Propagation methods should attempt to soundly simplify the boolean
theory, and return True if any simplification occurred and False
otherwise.
"""
def _simplify(self):
"""Iterate over the various forms of propagation to simplify the theory.
Examples
========
>>> from sympy.logic.algorithms.dpll2 import SATSolver
>>> l = SATSolver([{2, -3}, {1}, {3, -3}, {2, -2},
... {3, -2}], {1, 2, 3}, set())
>>> l.variable_set
[False, False, False, False]
>>> l.sentinels
{-3: {0, 2}, -2: {3, 4}, 2: {0, 3}, 3: {2, 4}}
>>> l._simplify()
>>> l.variable_set
[False, True, False, False]
>>> l.sentinels
{-3: {0, 2}, -2: {3, 4}, -1: set(), 2: {0, 3},
...3: {2, 4}}
"""
changed = True
while changed:
changed = False
changed |= self._unit_prop()
changed |= self._pure_literal()
def _unit_prop(self):
"""Perform unit propagation on the current theory."""
result = len(self._unit_prop_queue) > 0
while self._unit_prop_queue:
next_lit = self._unit_prop_queue.pop()
if -next_lit in self.var_settings:
self.is_unsatisfied = True
self._unit_prop_queue = []
return False
else:
self._assign_literal(next_lit)
return result
def _pure_literal(self):
"""Look for pure literals and assign them when found."""
return False
#########################
# Heuristics #
#########################
def _vsids_init(self):
"""Initialize the data structures needed for the VSIDS heuristic."""
self.lit_heap = []
self.lit_scores = {}
for var in range(1, len(self.variable_set)):
self.lit_scores[var] = float(-self.occurrence_count[var])
self.lit_scores[-var] = float(-self.occurrence_count[-var])
heappush(self.lit_heap, (self.lit_scores[var], var))
heappush(self.lit_heap, (self.lit_scores[-var], -var))
def _vsids_decay(self):
"""Decay the VSIDS scores for every literal.
Examples
========
>>> from sympy.logic.algorithms.dpll2 import SATSolver
>>> l = SATSolver([{2, -3}, {1}, {3, -3}, {2, -2},
... {3, -2}], {1, 2, 3}, set())
>>> l.lit_scores
{-3: -2.0, -2: -2.0, -1: 0.0, 1: 0.0, 2: -2.0, 3: -2.0}
>>> l._vsids_decay()
>>> l.lit_scores
{-3: -1.0, -2: -1.0, -1: 0.0, 1: 0.0, 2: -1.0, 3: -1.0}
"""
# We divide every literal score by 2 for a decay factor
# Note: This doesn't change the heap property
for lit in self.lit_scores.keys():
self.lit_scores[lit] /= 2.0
def _vsids_calculate(self):
"""
VSIDS Heuristic Calculation
Examples
========
>>> from sympy.logic.algorithms.dpll2 import SATSolver
>>> l = SATSolver([{2, -3}, {1}, {3, -3}, {2, -2},
... {3, -2}], {1, 2, 3}, set())
>>> l.lit_heap
[(-2.0, -3), (-2.0, 2), (-2.0, -2), (0.0, 1), (-2.0, 3), (0.0, -1)]
>>> l._vsids_calculate()
-3
>>> l.lit_heap
[(-2.0, -2), (-2.0, 2), (0.0, -1), (0.0, 1), (-2.0, 3)]
"""
if len(self.lit_heap) == 0:
return 0
# Clean out the front of the heap as long the variables are set
while self.variable_set[abs(self.lit_heap[0][1])]:
heappop(self.lit_heap)
if len(self.lit_heap) == 0:
return 0
return heappop(self.lit_heap)[1]
def _vsids_lit_assigned(self, lit):
"""Handle the assignment of a literal for the VSIDS heuristic."""
pass
def _vsids_lit_unset(self, lit):
"""Handle the unsetting of a literal for the VSIDS heuristic.
Examples
========
>>> from sympy.logic.algorithms.dpll2 import SATSolver
>>> l = SATSolver([{2, -3}, {1}, {3, -3}, {2, -2},
... {3, -2}], {1, 2, 3}, set())
>>> l.lit_heap
[(-2.0, -3), (-2.0, 2), (-2.0, -2), (0.0, 1), (-2.0, 3), (0.0, -1)]
>>> l._vsids_lit_unset(2)
>>> l.lit_heap
[(-2.0, -3), (-2.0, -2), (-2.0, -2), (-2.0, 2), (-2.0, 3), (0.0, -1),
...(-2.0, 2), (0.0, 1)]
"""
var = abs(lit)
heappush(self.lit_heap, (self.lit_scores[var], var))
heappush(self.lit_heap, (self.lit_scores[-var], -var))
def _vsids_clause_added(self, cls):
"""Handle the addition of a new clause for the VSIDS heuristic.
Examples
========
>>> from sympy.logic.algorithms.dpll2 import SATSolver
>>> l = SATSolver([{2, -3}, {1}, {3, -3}, {2, -2},
... {3, -2}], {1, 2, 3}, set())
>>> l.num_learned_clauses
0
>>> l.lit_scores
{-3: -2.0, -2: -2.0, -1: 0.0, 1: 0.0, 2: -2.0, 3: -2.0}
>>> l._vsids_clause_added({2, -3})
>>> l.num_learned_clauses
1
>>> l.lit_scores
{-3: -1.0, -2: -2.0, -1: 0.0, 1: 0.0, 2: -1.0, 3: -2.0}
"""
self.num_learned_clauses += 1
for lit in cls:
self.lit_scores[lit] += 1
########################
# Clause Learning #
########################
def _simple_add_learned_clause(self, cls):
"""Add a new clause to the theory.
Examples
========
>>> from sympy.logic.algorithms.dpll2 import SATSolver
>>> l = SATSolver([{2, -3}, {1}, {3, -3}, {2, -2},
... {3, -2}], {1, 2, 3}, set())
>>> l.num_learned_clauses
0
>>> l.clauses
[[2, -3], [1], [3, -3], [2, -2], [3, -2]]
>>> l.sentinels
{-3: {0, 2}, -2: {3, 4}, 2: {0, 3}, 3: {2, 4}}
>>> l._simple_add_learned_clause([3])
>>> l.clauses
[[2, -3], [1], [3, -3], [2, -2], [3, -2], [3]]
>>> l.sentinels
{-3: {0, 2}, -2: {3, 4}, 2: {0, 3}, 3: {2, 4, 5}}
"""
cls_num = len(self.clauses)
self.clauses.append(cls)
for lit in cls:
self.occurrence_count[lit] += 1
self.sentinels[cls[0]].add(cls_num)
self.sentinels[cls[-1]].add(cls_num)
self.heur_clause_added(cls)
def _simple_compute_conflict(self):
""" Build a clause representing the fact that at least one decision made
so far is wrong.
Examples
========
>>> from sympy.logic.algorithms.dpll2 import SATSolver
>>> l = SATSolver([{2, -3}, {1}, {3, -3}, {2, -2},
... {3, -2}], {1, 2, 3}, set())
>>> next(l._find_model())
{1: True, 2: False, 3: False}
>>> l._simple_compute_conflict()
[3]
"""
return [-(level.decision) for level in self.levels[1:]]
def _simple_clean_clauses(self):
"""Clean up learned clauses."""
pass
class Level:
"""
Represents a single level in the DPLL algorithm, and contains
enough information for a sound backtracking procedure.
"""
def __init__(self, decision, flipped=False):
self.decision = decision
self.var_settings = set()
self.flipped = flipped
| 30.783988 | 80 | 0.521076 |
5f98abe3deb4db120b5bdab2d8a9209617bd09da | 1,094 | py | Python | migrations/versions/0330_edit_templates_permission.py | department-of-veterans-affairs/notification-api | 698bc98d8e78a13a0b2cfc432cfc718ff1016b06 | [
"MIT"
]
| 10 | 2020-05-04T14:11:06.000Z | 2022-02-22T19:06:36.000Z | migrations/versions/0330_edit_templates_permission.py | department-of-veterans-affairs/notification-api | 698bc98d8e78a13a0b2cfc432cfc718ff1016b06 | [
"MIT"
]
| 554 | 2020-05-07T21:56:24.000Z | 2022-03-31T23:04:51.000Z | migrations/versions/0330_edit_templates_permission.py | department-of-veterans-affairs/notification-api | 698bc98d8e78a13a0b2cfc432cfc718ff1016b06 | [
"MIT"
]
| 4 | 2020-08-27T16:43:29.000Z | 2021-02-17T22:17:27.000Z | """
Revision ID: 0330_edit_templates_permission
Revises: 0329_notification_status
Create Date: 2021-06-25 10:39:29.089237
"""
from alembic import op
revision = '0330_edit_templates_permission'
down_revision = '0329_notification_status'
def upgrade():
# adding a value to an enum must be done outside of a transaction, hence autocommit_block
with op.get_context().autocommit_block():
op.execute("ALTER TYPE permission_types ADD VALUE 'edit_templates'")
def downgrade():
# there's no ALTER TYPE ... DROP VALUE, so we've got to do this whole dance
op.execute("ALTER TYPE permission_types RENAME TO permission_types_old")
# old values
op.execute("""
CREATE TYPE permission_types AS ENUM(
'manage_users', 'manage_templates', 'manage_settings', 'send_texts', 'send_emails', 'send_letters', 'manage_api_keys', 'platform_admin', 'view_activity'
)
""")
op.execute("ALTER TABLE permissions ALTER COLUMN permission TYPE permission_types USING permission::text::permission_types")
op.execute("DROP TYPE permission_types_old")
| 34.1875 | 164 | 0.734918 |
5d26e4c20ad43ff78ea236bca81b77806218285a | 1,869 | py | Python | scandeval/benchmarks/ndt_nn_pos.py | ebanalyse/ScandEval | ecdacaf176c42e5f9078ea586315fdf5bc8e2015 | [
"MIT"
]
| null | null | null | scandeval/benchmarks/ndt_nn_pos.py | ebanalyse/ScandEval | ecdacaf176c42e5f9078ea586315fdf5bc8e2015 | [
"MIT"
]
| null | null | null | scandeval/benchmarks/ndt_nn_pos.py | ebanalyse/ScandEval | ecdacaf176c42e5f9078ea586315fdf5bc8e2015 | [
"MIT"
]
| null | null | null | """POS evaluation of a language model on the Nynorsk part of the NDT dataset"""
import logging
from .abstract import PosBenchmark
logger = logging.getLogger(__name__)
class NdtNNPosBenchmark(PosBenchmark):
"""Benchmark of language models on the Nynorsk POS part of the NDT dataset.
Args:
cache_dir (str, optional):
Where the downloaded models will be stored. Defaults to
'.benchmark_models'.
evaluate_train (bool, optional):
Whether the models should be evaluated on the training scores.
Defaults to False.
verbose (bool, optional):
Whether to print additional output during evaluation. Defaults to
False.
Attributes:
name (str): The name of the dataset.
task (str): The type of task to be benchmarked.
metric_names (dict): The names of the metrics.
id2label (dict or None): A dictionary converting indices to labels.
label2id (dict or None): A dictionary converting labels to indices.
num_labels (int or None): The number of labels in the dataset.
label_synonyms (list of lists of str): Synonyms of the dataset labels.
evaluate_train (bool): Whether the training set should be evaluated.
cache_dir (str): Directory where models are cached.
two_labels (bool): Whether two labels should be predicted.
split_point (int or None): Splitting point of `id2label` into labels.
verbose (bool): Whether to print additional output.
"""
def __init__(
self,
cache_dir: str = ".benchmark_models",
evaluate_train: bool = False,
verbose: bool = False,
):
super().__init__(
name="ndt-nn-pos",
cache_dir=cache_dir,
evaluate_train=evaluate_train,
verbose=verbose,
)
| 35.942308 | 79 | 0.647405 |
7565bb536b722cc6e845a1351b00dbe1801bbec2 | 457 | py | Python | core/wsgi.py | rijkerd/Smart-Class | c72ef9c140f722b110d841e0ad03938485803758 | [
"MIT"
]
| 4 | 2020-05-22T10:29:43.000Z | 2022-03-17T07:24:03.000Z | core/wsgi.py | rijkerd/Smart-Class | c72ef9c140f722b110d841e0ad03938485803758 | [
"MIT"
]
| 8 | 2020-04-01T22:29:42.000Z | 2021-09-29T08:43:09.000Z | core/wsgi.py | rijkerd/Smart-Class | c72ef9c140f722b110d841e0ad03938485803758 | [
"MIT"
]
| null | null | null | """
WSGI config for core project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
from whitenoise import WhiteNoise
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'core.settings')
application = get_wsgi_application()
application = WhiteNoise(application)
| 24.052632 | 78 | 0.794311 |
5c2788a122c4f1b13c58a148d257e94de33bdea8 | 2,769 | py | Python | oblib/taxonomy_types.py | dfcoffin/core | 11efb163a84698961fe624d964ef5b921f7e58a9 | [
"Apache-2.0"
]
| null | null | null | oblib/taxonomy_types.py | dfcoffin/core | 11efb163a84698961fe624d964ef5b921f7e58a9 | [
"Apache-2.0"
]
| null | null | null | oblib/taxonomy_types.py | dfcoffin/core | 11efb163a84698961fe624d964ef5b921f7e58a9 | [
"Apache-2.0"
]
| null | null | null | # Copyright 2018 Wells Fargo
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import xml.sax
import constants
class _TaxonomyTypesHandler(xml.sax.ContentHandler):
"""
Loads Taxonomy Types from the solar types xsd file.
"""
def __init__(self):
self._types = {}
def startElement(self, name, attrs):
if name == "complexType":
for item in attrs.items():
if item[0] == "name":
self._curr = []
self._types[item[1]] = self._curr
elif name == "xs:enumeration":
for item in attrs.items():
if item[0] == "value":
self._curr.append(item[1])
def types(self):
return self._types
class TaxonomyTypes(object):
"""
Represents Taxonomy Types and allows lookup of enumerated values for each Taxonomy Type.
Please note that in the implementation of this class the variable name "type" is never
used although "_type" and "types" are in order to avoid confusion with the python
"type" builtin.
"""
def __init__(self):
self._types = self._load_types()
def _load_types_file(self, pathname):
tax = _TaxonomyTypesHandler()
parser = xml.sax.make_parser()
parser.setContentHandler(tax)
parser.parse(open(pathname))
return tax.types()
def _load_types(self):
pathname = os.path.join(constants.SOLAR_TAXONOMY_DIR, "core")
for filename in os.listdir(pathname):
if 'types' in filename:
types = self._load_types_file(os.path.join(pathname, filename))
return types
def types(self):
"""
Returns a map and sublists of all types.
"""
return self._types
def validate_type(self, type_name):
"""
Validates that a type is in the taxonomy.
"""
if type_name in self._types:
return True
else:
return False
def type_enum(self, type_name):
"""
Returns an enumeration given a type or None if the type does not exist in the taxonomy.
"""
if type_name in self._types:
return self._types[type_name]
else:
return None
| 28.84375 | 95 | 0.622246 |
d73ca2ddfbfa1a6f2fc4c1dc2673932feaadd8f1 | 876 | py | Python | birdwatch.py | antithalian/birdwatch | d1067faa7ee942862453e69db0a22df9e94cac49 | [
"MIT"
]
| null | null | null | birdwatch.py | antithalian/birdwatch | d1067faa7ee942862453e69db0a22df9e94cac49 | [
"MIT"
]
| null | null | null | birdwatch.py | antithalian/birdwatch | d1067faa7ee942862453e69db0a22df9e94cac49 | [
"MIT"
]
| null | null | null | # birdwatch.py
# downloads images on Tweet link provided by user
import tweepy
import json
import argparse
import os, sys
# takes url of tweet and a tweepy twitter api instance as inputs
# returns a list with all media urls or integer -1 if no images on linked tweet
def get_fullres(tweet_url, api_instance):
pass
# main function, iterates through provided links, downloads any images, and saves them
def main():
# load secrets from JSON file
sec_file = './secrets.json' # if you want to use a different file, change this and the .gitignore
with open(sec_file, 'r') as file:
secrets = json.load(file)
# authenticate to twitter API using tweepy
auth = tweepy.AppAuthHandler(secrets['api_key'], secrets['secret'])
# get an api object
api = tweepy.API(auth)
# run main function
if __name__ == '__main__':
main()
| 25.028571 | 101 | 0.703196 |
bc5ab96cf4ff79a6879ba7f9e730c5a7c305574c | 5,200 | py | Python | zsparse/core.py | ryan-williams/zsparse | 7f3ec59481e64357eb8e195656cfeaeaff58322a | [
"MIT"
]
| 5 | 2019-06-06T03:29:47.000Z | 2022-02-10T22:32:27.000Z | zsparse/core.py | ryan-williams/zsparse | 7f3ec59481e64357eb8e195656cfeaeaff58322a | [
"MIT"
]
| null | null | null | zsparse/core.py | ryan-williams/zsparse | 7f3ec59481e64357eb8e195656cfeaeaff58322a | [
"MIT"
]
| 1 | 2021-04-06T13:49:09.000Z | 2021-04-06T13:49:09.000Z | import numpy as np
import scipy.sparse as ss
import zarr
import os
import pickle
import operator
from functools import reduce
from .indexing import getitem
from .utils import html_table, human_readable_size
FORMATS = {'coo': ss.coo_matrix,
'csr': ss.csr_matrix,
'csc': ss.csc_matrix}
FORMAT_NAMES = {'coo': 'Coordinate Sparse Matrix',
'csr': 'Compressed Sparse Row Matrix',
'csc': 'Compressed Sparse Column Matrix'}
class Matrix:
def __init__(self,
arg,
format,
compressor='default',
shape=None,
store=None,
chunks=None,
dtype=None):
if format not in FORMATS:
raise NotImplementedError('The given format is not supported.')
if not isinstance(arg, ss.spmatrix):
try:
arg = FORMATS[format](arg,shape=shape)
except:
raise ValueError('Invalid input')
arg = arg.asformat(format)
self.shape = arg.shape
if arg.format == 'coo':
arg = (arg.data,arg.row,arg.col)
else:
arg = (arg.data,arg.indices,arg.indptr)
if store is not None:
store1 = store.__class__(os.path.join(store.path,'data.zarr'))
if format == 'coo':
store2 = store.__class__(os.path.join(store.path,'row.zarr'))
store3 = store.__class__(os.path.join(store.path,'col.zarr'))
else:
store2 = store.__class__(os.path.join(store.path,'indices.zarr'))
store3 = store.__class__(os.path.join(store.path,'indptr.zarr'))
else:
store1 = store2 = store3 = None
if format == 'coo':
self.row = zarr.array(arg[1],chunks=chunks,store=store2,compressor=compressor)
self.col = zarr.array(arg[2],chunks=chunks,store=store3,compressor=compressor)
else:
self.indices = zarr.array(arg[1],chunks=chunks,store=store2,compressor=compressor)
self.indptr = zarr.array(arg[2],chunks=chunks,store=store3,compressor=compressor)
self.data = zarr.array(arg[0],chunks=chunks,store=store1,compressor=compressor,dtype=dtype)
self.format = format
self._store = store
if self._store is not None:
with open(os.path.join(store.path,'attrs.pkl'), 'wb') as file:
pickle.dump(self, file)
def __getstate__(self):
state = self.__dict__.copy()
del state['data']
if self.format == 'coo':
del state['row']
del state['col']
else:
del state['indices']
del state['indptr']
return state
def __setstate__(self,state):
self.__dict__.update(state)
path = self._store.path
self.data = zarr.open(os.path.join(path,'data.zarr'))
if self.format == 'coo':
self.row = zarr.open(os.path.join(path,'row.zarr'))
self.col = zarr.open(os.path.join(path,'col.zarr'))
else:
self.indices = zarr.open(os.path.join(path,'indices.zarr'))
self.indptr = zarr.open(os.path.join(path,'indptr.zarr'))
__getitem__ = getitem
def __str__(self):
nbytes = human_readable_size(self.nbytes_stored)
return "<{}, shape={}, nnz={}, bytes_stored = {}>".format(
FORMAT_NAMES[self.format],self.shape,self.nnz,nbytes)
__repr__ = __str__
@property
def dtype(self):
return self.data.dtype
@property
def nchunks(self):
if self.format == 'coo':
return self.data.nchunks + self.row.nchunks + self.col.nchunks
else:
return self.data.nchunks + self.indices.nchunks + self.indptr.nchunks
@property
def nchunks_initialized(self):
if self.format == 'coo':
return self.data.nchunks_initialized + self.row.nchunks_initialized + self.col.nchunks_initialized
else:
return self.data.nchunks_initialized + self.indices.nchunks_initialized + self.indptr.nchunks_initialized
@property
def nbytes(self):
if self.format == 'coo':
return self.data.nbytes + self.row.nbytes + self.col.nbytes
else:
return self.data.nbytes + self.indices.nbytes + self.indptr.nbytes
@property
def nbytes_stored(self):
if self.format == 'coo':
return self.data.nbytes_stored + self.row.nbytes_stored + self.col.nbytes_stored
else:
return self.data.nbytes_stored + self.indices.nbytes_stored + self.indptr.nbytes_stored
@property
def nnz(self):
return self.data.shape[0]
@property
def density(self):
return self.nnz/(self.shape[0] * self.shape[1])
@property
def compressor(self):
return self.data.compressor
@property
def size(self):
return reduce(operator.mul,self.shape)
def _repr_html_(self):
return html_table(self) | 36.363636 | 118 | 0.574808 |
491e9effd555b5cf0da2adbe52e7109375a33ac2 | 6,244 | py | Python | accenv/lib/python3.4/site-packages/IPython/utils/_process_win32.py | adamshamsudeen/clubdin-dj | eb48c67dab3a4ae7c4032544eb4d64e0b1d7e15a | [
"MIT"
]
| null | null | null | accenv/lib/python3.4/site-packages/IPython/utils/_process_win32.py | adamshamsudeen/clubdin-dj | eb48c67dab3a4ae7c4032544eb4d64e0b1d7e15a | [
"MIT"
]
| null | null | null | accenv/lib/python3.4/site-packages/IPython/utils/_process_win32.py | adamshamsudeen/clubdin-dj | eb48c67dab3a4ae7c4032544eb4d64e0b1d7e15a | [
"MIT"
]
| null | null | null | """Windows-specific implementation of process utilities.
This file is only meant to be imported by process.py, not by end-users.
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2010-2011 The IPython Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# stdlib
import os
import sys
import ctypes
from ctypes import c_int, POINTER
from ctypes.wintypes import LPCWSTR, HLOCAL
from subprocess import STDOUT
# our own imports
from ._process_common import read_no_interrupt, process_handler, arg_split as py_arg_split
from . import py3compat
from .encoding import DEFAULT_ENCODING
#-----------------------------------------------------------------------------
# Function definitions
#-----------------------------------------------------------------------------
class AvoidUNCPath(object):
"""A context manager to protect command execution from UNC paths.
In the Win32 API, commands can't be invoked with the cwd being a UNC path.
This context manager temporarily changes directory to the 'C:' drive on
entering, and restores the original working directory on exit.
The context manager returns the starting working directory *if* it made a
change and None otherwise, so that users can apply the necessary adjustment
to their system calls in the event of a change.
Example
-------
::
cmd = 'dir'
with AvoidUNCPath() as path:
if path is not None:
cmd = '"pushd %s &&"%s' % (path, cmd)
os.system(cmd)
"""
def __enter__(self):
self.path = os.getcwd()
self.is_unc_path = self.path.startswith(r"\\")
if self.is_unc_path:
# change to c drive (as cmd.exe cannot handle UNC addresses)
os.chdir("C:")
return self.path
else:
# We return None to signal that there was no change in the working
# directory
return None
def __exit__(self, exc_type, exc_value, traceback):
if self.is_unc_path:
os.chdir(self.path)
def _find_cmd(cmd):
"""Find the full path to a .bat or .exe using the win32api module."""
try:
from win32api import SearchPath
except ImportError:
raise ImportError('you need to have pywin32 installed for this to work')
else:
PATH = os.environ['PATH']
extensions = ['.exe', '.com', '.bat', '.py']
path = None
for ext in extensions:
try:
path = SearchPath(PATH, cmd, ext)[0]
except:
pass
if path is None:
raise OSError("command %r not found" % cmd)
else:
return path
def _system_body(p):
"""Callback for _system."""
enc = DEFAULT_ENCODING
for line in read_no_interrupt(p.stdout).splitlines():
line = line.decode(enc, 'replace')
print(line, file=sys.stdout)
for line in read_no_interrupt(p.stderr).splitlines():
line = line.decode(enc, 'replace')
print(line, file=sys.stderr)
# Wait to finish for returncode
return p.wait()
def system(cmd):
"""Win32 version of os.system() that works with network shares.
Note that this implementation returns None, as meant for use in IPython.
Parameters
----------
cmd : str
A command to be executed in the system shell.
Returns
-------
None : we explicitly do NOT return the subprocess status code, as this
utility is meant to be used extensively in IPython, where any return value
would trigger :func:`sys.displayhook` calls.
"""
# The controller provides interactivity with both
# stdin and stdout
#import _process_win32_controller
#_process_win32_controller.system(cmd)
with AvoidUNCPath() as path:
if path is not None:
cmd = '"pushd %s &&"%s' % (path, cmd)
return process_handler(cmd, _system_body)
def getoutput(cmd):
"""Return standard output of executing cmd in a shell.
Accepts the same arguments as os.system().
Parameters
----------
cmd : str
A command to be executed in the system shell.
Returns
-------
stdout : str
"""
with AvoidUNCPath() as path:
if path is not None:
cmd = '"pushd %s &&"%s' % (path, cmd)
out = process_handler(cmd, lambda p: p.communicate()[0], STDOUT)
if out is None:
out = b''
return py3compat.bytes_to_str(out)
try:
CommandLineToArgvW = ctypes.windll.shell32.CommandLineToArgvW
CommandLineToArgvW.arg_types = [LPCWSTR, POINTER(c_int)]
CommandLineToArgvW.restype = POINTER(LPCWSTR)
LocalFree = ctypes.windll.kernel32.LocalFree
LocalFree.res_type = HLOCAL
LocalFree.arg_types = [HLOCAL]
def arg_split(commandline, posix=False, strict=True):
"""Split a command line's arguments in a shell-like manner.
This is a special version for windows that use a ctypes call to CommandLineToArgvW
to do the argv splitting. The posix paramter is ignored.
If strict=False, process_common.arg_split(...strict=False) is used instead.
"""
#CommandLineToArgvW returns path to executable if called with empty string.
if commandline.strip() == "":
return []
if not strict:
# not really a cl-arg, fallback on _process_common
return py_arg_split(commandline, posix=posix, strict=strict)
argvn = c_int()
result_pointer = CommandLineToArgvW(py3compat.cast_unicode(commandline.lstrip()), ctypes.byref(argvn))
result_array_type = LPCWSTR * argvn.value
result = [arg for arg in result_array_type.from_address(ctypes.addressof(result_pointer.contents))]
retval = LocalFree(result_pointer)
return result
except AttributeError:
arg_split = py_arg_split
| 33.212766 | 110 | 0.599455 |
a547e1af3d087392b10fdc56bbb1fe77b1609af2 | 1,176 | py | Python | flsim/utils/simple_batch_metrics.py | JohnlNguyen/FLSim | a5ed7c0b84499cd9dbc5fe95f8bcb4ba8ab5a5cb | [
"BSD-3-Clause"
]
| 79 | 2021-12-09T18:05:09.000Z | 2022-03-23T20:43:46.000Z | flsim/utils/simple_batch_metrics.py | JohnlNguyen/FLSim | a5ed7c0b84499cd9dbc5fe95f8bcb4ba8ab5a5cb | [
"BSD-3-Clause"
]
| 11 | 2021-12-30T17:54:04.000Z | 2022-03-23T17:23:00.000Z | flsim/utils/simple_batch_metrics.py | JohnlNguyen/FLSim | a5ed7c0b84499cd9dbc5fe95f8bcb4ba8ab5a5cb | [
"BSD-3-Clause"
]
| 9 | 2021-12-09T19:55:22.000Z | 2022-03-15T00:02:08.000Z | #!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from typing import Any
import torch
from flsim.interfaces.batch_metrics import IFLBatchMetrics
class FLBatchMetrics(IFLBatchMetrics):
def __init__(
self,
*,
loss: torch.Tensor,
num_examples: int,
predictions: torch.Tensor,
targets: torch.Tensor,
model_inputs: Any,
) -> None:
self._loss = loss
self._num_examples = num_examples
self._predictions = predictions
self._targets = targets
self._model_inputs = model_inputs
@property
def loss(self) -> torch.Tensor:
return self._loss
@property
def num_examples(self) -> int:
return self._num_examples
@property
def predictions(self) -> torch.Tensor:
return self._predictions
@property
def targets(self) -> torch.Tensor:
return self._targets
@property
def model_inputs(self) -> Any:
return self._model_inputs
| 24 | 71 | 0.657313 |
bfb09e9f45067c8c5427c66252a7c28a3c391a73 | 51,599 | py | Python | dask/dataframe/io/tests/test_csv.py | JayjeetAtGithub/dask | ee9d64a98193f67567fc289b2306199b0bcf5b59 | [
"BSD-3-Clause"
]
| null | null | null | dask/dataframe/io/tests/test_csv.py | JayjeetAtGithub/dask | ee9d64a98193f67567fc289b2306199b0bcf5b59 | [
"BSD-3-Clause"
]
| null | null | null | dask/dataframe/io/tests/test_csv.py | JayjeetAtGithub/dask | ee9d64a98193f67567fc289b2306199b0bcf5b59 | [
"BSD-3-Clause"
]
| null | null | null | import gzip
import os
from io import BytesIO
from time import sleep
from unittest import mock
import pytest
pd = pytest.importorskip("pandas")
dd = pytest.importorskip("dask.dataframe")
from fsspec.compression import compr
from tlz import partition_all, valmap
import dask
import dask.dataframe as dd
from dask.base import compute_as_if_collection
from dask.bytes.core import read_bytes
from dask.bytes.utils import compress
from dask.core import flatten
from dask.dataframe._compat import tm
from dask.dataframe.io.csv import (
_infer_block_size,
auto_blocksize,
block_mask,
pandas_read_text,
text_blocks_to_pandas,
)
from dask.dataframe.utils import assert_eq, has_known_categories
from dask.utils import filetext, filetexts, tmpdir, tmpfile
# List of available compression format for test_read_csv_compression
compression_fmts = [fmt for fmt in compr] + [None]
def normalize_text(s):
return "\n".join(map(str.strip, s.strip().split("\n")))
def parse_filename(path):
return os.path.split(path)[1]
csv_text = """
name,amount
Alice,100
Bob,-200
Charlie,300
Dennis,400
Edith,-500
Frank,600
Alice,200
Frank,-200
Bob,600
Alice,400
Frank,200
Alice,300
Edith,600
""".strip()
tsv_text = csv_text.replace(",", "\t")
tsv_text2 = """
name amount
Alice 100
Bob -200
Charlie 300
Dennis 400
Edith -500
Frank 600
Alice 200
Frank -200
Bob 600
Alice 400
Frank 200
Alice 300
Edith 600
""".strip()
timeseries = """
Date,Open,High,Low,Close,Volume,Adj Close
2015-08-28,198.50,199.839996,197.919998,199.240005,143298900,199.240005
2015-08-27,197.020004,199.419998,195.210007,199.160004,266244700,199.160004
2015-08-26,192.080002,194.789993,188.369995,194.679993,328058100,194.679993
2015-08-25,195.429993,195.449997,186.919998,187.229996,353966700,187.229996
2015-08-24,197.630005,197.630005,182.399994,189.550003,478672400,189.550003
2015-08-21,201.729996,203.940002,197.520004,197.630005,328271500,197.630005
2015-08-20,206.509995,208.289993,203.899994,204.009995,185865600,204.009995
2015-08-19,209.089996,210.009995,207.350006,208.279999,167316300,208.279999
2015-08-18,210.259995,210.679993,209.699997,209.929993,70043800,209.929993
""".strip()
csv_files = {
"2014-01-01.csv": (
b"name,amount,id\n" b"Alice,100,1\n" b"Bob,200,2\n" b"Charlie,300,3\n"
),
"2014-01-02.csv": b"name,amount,id\n",
"2014-01-03.csv": (
b"name,amount,id\n" b"Dennis,400,4\n" b"Edith,500,5\n" b"Frank,600,6\n"
),
}
tsv_files = {k: v.replace(b",", b"\t") for (k, v) in csv_files.items()}
fwf_files = {
"2014-01-01.csv": (
b" name amount id\n"
b" Alice 100 1\n"
b" Bob 200 2\n"
b" Charlie 300 3\n"
),
"2014-01-02.csv": b" name amount id\n",
"2014-01-03.csv": (
b" name amount id\n"
b" Dennis 400 4\n"
b" Edith 500 5\n"
b" Frank 600 6\n"
),
}
expected = pd.concat([pd.read_csv(BytesIO(csv_files[k])) for k in sorted(csv_files)])
comment_header = b"""# some header lines
# that may be present
# in a data file
# before any data"""
csv_units_row = b"str, int, int\n"
tsv_units_row = csv_units_row.replace(b",", b"\t")
# Pandas has deprecated read_table
read_table_mark = pytest.mark.filterwarnings("ignore:read_table:FutureWarning")
csv_and_table = pytest.mark.parametrize(
"reader,files",
[
(pd.read_csv, csv_files),
pytest.param(pd.read_table, tsv_files, marks=read_table_mark),
(pd.read_fwf, fwf_files),
],
)
@csv_and_table
def test_pandas_read_text(reader, files):
b = files["2014-01-01.csv"]
df = pandas_read_text(reader, b, b"", {})
assert list(df.columns) == ["name", "amount", "id"]
assert len(df) == 3
assert df.id.sum() == 1 + 2 + 3
@csv_and_table
def test_pandas_read_text_kwargs(reader, files):
b = files["2014-01-01.csv"]
df = pandas_read_text(reader, b, b"", {"usecols": ["name", "id"]})
assert list(df.columns) == ["name", "id"]
@csv_and_table
def test_pandas_read_text_dtype_coercion(reader, files):
b = files["2014-01-01.csv"]
df = pandas_read_text(reader, b, b"", {}, {"amount": "float"})
assert df.amount.dtype == "float"
@csv_and_table
def test_pandas_read_text_with_header(reader, files):
b = files["2014-01-01.csv"]
header, b = b.split(b"\n", 1)
header = header + b"\n"
df = pandas_read_text(reader, b, header, {})
assert list(df.columns) == ["name", "amount", "id"]
assert len(df) == 3
assert df.id.sum() == 1 + 2 + 3
@csv_and_table
def test_text_blocks_to_pandas_simple(reader, files):
blocks = [[files[k]] for k in sorted(files)]
kwargs = {}
head = pandas_read_text(reader, files["2014-01-01.csv"], b"", {})
header = files["2014-01-01.csv"].split(b"\n")[0] + b"\n"
df = text_blocks_to_pandas(reader, blocks, header, head, kwargs)
assert isinstance(df, dd.DataFrame)
assert list(df.columns) == ["name", "amount", "id"]
values = text_blocks_to_pandas(reader, blocks, header, head, kwargs)
assert isinstance(values, dd.DataFrame)
assert hasattr(values, "dask")
assert len(values.dask) == 3
assert_eq(df.amount.sum(), 100 + 200 + 300 + 400 + 500 + 600)
@csv_and_table
def test_text_blocks_to_pandas_kwargs(reader, files):
blocks = [files[k] for k in sorted(files)]
blocks = [[b] for b in blocks]
kwargs = {"usecols": ["name", "id"]}
head = pandas_read_text(reader, files["2014-01-01.csv"], b"", kwargs)
header = files["2014-01-01.csv"].split(b"\n")[0] + b"\n"
df = text_blocks_to_pandas(reader, blocks, header, head, kwargs)
assert list(df.columns) == ["name", "id"]
result = df.compute()
assert (result.columns == df.columns).all()
@csv_and_table
def test_text_blocks_to_pandas_blocked(reader, files):
header = files["2014-01-01.csv"].split(b"\n")[0] + b"\n"
blocks = []
for k in sorted(files):
b = files[k]
lines = b.split(b"\n")
blocks.append([b"\n".join(bs) for bs in partition_all(2, lines)])
df = text_blocks_to_pandas(reader, blocks, header, expected.head(), {})
assert_eq(
df.compute().reset_index(drop=True),
expected.reset_index(drop=True),
check_dtype=False,
)
expected2 = expected[["name", "id"]]
df = text_blocks_to_pandas(
reader, blocks, header, expected2.head(), {"usecols": ["name", "id"]}
)
assert_eq(
df.compute().reset_index(drop=True),
expected2.reset_index(drop=True),
check_dtype=False,
)
@pytest.mark.parametrize(
"dd_read,pd_read,files",
[(dd.read_csv, pd.read_csv, csv_files), (dd.read_table, pd.read_table, tsv_files)],
)
@read_table_mark
def test_skiprows(dd_read, pd_read, files):
files = {name: comment_header + b"\n" + content for name, content in files.items()}
skip = len(comment_header.splitlines())
with filetexts(files, mode="b"):
df = dd_read("2014-01-*.csv", skiprows=skip)
expected_df = pd.concat([pd_read(n, skiprows=skip) for n in sorted(files)])
assert_eq(df, expected_df, check_dtype=False)
@pytest.mark.parametrize(
"dd_read,pd_read,files,units",
[
(dd.read_csv, pd.read_csv, csv_files, csv_units_row),
(dd.read_table, pd.read_table, tsv_files, tsv_units_row),
],
)
@read_table_mark
def test_skiprows_as_list(dd_read, pd_read, files, units):
files = {
name: (comment_header + b"\n" + content.replace(b"\n", b"\n" + units, 1))
for name, content in files.items()
}
skip = [0, 1, 2, 3, 5]
with filetexts(files, mode="b"):
df = dd_read("2014-01-*.csv", skiprows=skip)
expected_df = pd.concat([pd_read(n, skiprows=skip) for n in sorted(files)])
assert_eq(df, expected_df, check_dtype=False)
csv_blocks = [
[b"aa,bb\n1,1.0\n2,2.0", b"10,20\n30,40"],
[b"aa,bb\n1,1.0\n2,2.0", b"10,20\n30,40"],
]
tsv_blocks = [
[b"aa\tbb\n1\t1.0\n2\t2.0", b"10\t20\n30\t40"],
[b"aa\tbb\n1\t1.0\n2\t2.0", b"10\t20\n30\t40"],
]
@pytest.mark.parametrize(
"reader,blocks", [(pd.read_csv, csv_blocks), (pd.read_table, tsv_blocks)]
)
@read_table_mark
def test_enforce_dtypes(reader, blocks):
head = reader(BytesIO(blocks[0][0]), header=0)
header = blocks[0][0].split(b"\n")[0] + b"\n"
dfs = text_blocks_to_pandas(reader, blocks, header, head, {})
dfs = dask.compute(dfs, scheduler="sync")
assert all(df.dtypes.to_dict() == head.dtypes.to_dict() for df in dfs)
@pytest.mark.parametrize(
"reader,blocks", [(pd.read_csv, csv_blocks), (pd.read_table, tsv_blocks)]
)
@read_table_mark
def test_enforce_columns(reader, blocks):
# Replace second header with different column name
blocks = [blocks[0], [blocks[1][0].replace(b"a", b"A"), blocks[1][1]]]
head = reader(BytesIO(blocks[0][0]), header=0)
header = blocks[0][0].split(b"\n")[0] + b"\n"
with pytest.raises(ValueError):
dfs = text_blocks_to_pandas(reader, blocks, header, head, {}, enforce=True)
dask.compute(*dfs, scheduler="sync")
#############################
# read_csv and read_table #
#############################
@pytest.mark.parametrize(
"dd_read,pd_read,text,sep",
[
(dd.read_csv, pd.read_csv, csv_text, ","),
(dd.read_table, pd.read_table, tsv_text, "\t"),
(dd.read_table, pd.read_table, tsv_text2, r"\s+"),
],
)
@read_table_mark
def test_read_csv(dd_read, pd_read, text, sep):
with filetext(text) as fn:
f = dd_read(fn, blocksize=30, lineterminator=os.linesep, sep=sep)
assert list(f.columns) == ["name", "amount"]
# index may be different
result = f.compute(scheduler="sync").reset_index(drop=True)
assert_eq(result, pd_read(fn, sep=sep))
@pytest.mark.parametrize(
"dd_read,pd_read,text,skip",
[
(dd.read_csv, pd.read_csv, csv_text, 7),
(dd.read_table, pd.read_table, tsv_text, [1, 13]),
],
)
@read_table_mark
def test_read_csv_large_skiprows(dd_read, pd_read, text, skip):
names = ["name", "amount"]
with filetext(text) as fn:
actual = dd_read(fn, skiprows=skip, names=names)
assert_eq(actual, pd_read(fn, skiprows=skip, names=names))
@pytest.mark.parametrize(
"dd_read,pd_read,text,skip",
[
(dd.read_csv, pd.read_csv, csv_text, 7),
(dd.read_table, pd.read_table, tsv_text, [1, 12]),
],
)
@read_table_mark
def test_read_csv_skiprows_only_in_first_partition(dd_read, pd_read, text, skip):
names = ["name", "amount"]
with filetext(text) as fn:
with pytest.warns(UserWarning, match="sample=blocksize"):
actual = dd_read(fn, blocksize=200, skiprows=skip, names=names).compute()
assert_eq(actual, pd_read(fn, skiprows=skip, names=names))
with pytest.warns(UserWarning):
# if new sample does not contain all the skiprows, raise error
with pytest.raises(ValueError):
dd_read(fn, blocksize=30, skiprows=skip, names=names)
@pytest.mark.parametrize(
"dd_read,pd_read,files",
[(dd.read_csv, pd.read_csv, csv_files), (dd.read_table, pd.read_table, tsv_files)],
)
@read_table_mark
def test_read_csv_files(dd_read, pd_read, files):
with filetexts(files, mode="b"):
df = dd_read("2014-01-*.csv")
assert_eq(df, expected, check_dtype=False)
fn = "2014-01-01.csv"
df = dd_read(fn)
expected2 = pd_read(BytesIO(files[fn]))
assert_eq(df, expected2, check_dtype=False)
@pytest.mark.parametrize(
"dd_read,pd_read,files",
[(dd.read_csv, pd.read_csv, csv_files), (dd.read_table, pd.read_table, tsv_files)],
)
@read_table_mark
def test_read_csv_files_list(dd_read, pd_read, files):
with filetexts(files, mode="b"):
subset = sorted(files)[:2] # Just first 2
sol = pd.concat([pd_read(BytesIO(files[k])) for k in subset])
res = dd_read(subset)
assert_eq(res, sol, check_dtype=False)
with pytest.raises(ValueError):
dd_read([])
@pytest.mark.parametrize(
"dd_read,files", [(dd.read_csv, csv_files), (dd.read_table, tsv_files)]
)
@read_table_mark
def test_read_csv_include_path_column(dd_read, files):
with filetexts(files, mode="b"):
df = dd_read(
"2014-01-*.csv",
include_path_column=True,
converters={"path": parse_filename},
)
filenames = df.path.compute().unique()
assert "2014-01-01.csv" in filenames
assert "2014-01-02.csv" not in filenames
assert "2014-01-03.csv" in filenames
@pytest.mark.parametrize(
"dd_read,files", [(dd.read_csv, csv_files), (dd.read_table, tsv_files)]
)
@read_table_mark
def test_read_csv_include_path_column_as_str(dd_read, files):
with filetexts(files, mode="b"):
df = dd_read(
"2014-01-*.csv",
include_path_column="filename",
converters={"filename": parse_filename},
)
filenames = df.filename.compute().unique()
assert "2014-01-01.csv" in filenames
assert "2014-01-02.csv" not in filenames
assert "2014-01-03.csv" in filenames
@pytest.mark.parametrize(
"dd_read,files", [(dd.read_csv, csv_files), (dd.read_table, tsv_files)]
)
@read_table_mark
def test_read_csv_include_path_column_with_duplicate_name(dd_read, files):
with filetexts(files, mode="b"):
with pytest.raises(ValueError):
dd_read("2014-01-*.csv", include_path_column="name")
@pytest.mark.parametrize(
"dd_read,files", [(dd.read_csv, csv_files), (dd.read_table, tsv_files)]
)
@read_table_mark
def test_read_csv_include_path_column_is_dtype_category(dd_read, files):
with filetexts(files, mode="b"):
df = dd_read("2014-01-*.csv", include_path_column=True)
assert df.path.dtype == "category"
assert has_known_categories(df.path)
dfs = dd_read("2014-01-*.csv", include_path_column=True)
result = dfs.compute()
assert result.path.dtype == "category"
assert has_known_categories(result.path)
@pytest.mark.parametrize(
"dd_read,files", [(dd.read_csv, csv_files), (dd.read_table, tsv_files)]
)
@read_table_mark
def test_read_csv_include_path_column_with_multiple_partitions_per_file(dd_read, files):
with filetexts(files, mode="b"):
df = dd_read("2014-01-*.csv", blocksize="10B", include_path_column=True)
assert df.npartitions > 3
assert df.path.dtype == "category"
assert has_known_categories(df.path)
dfs = dd_read("2014-01-*.csv", blocksize="10B", include_path_column=True)
result = dfs.compute()
assert result.path.dtype == "category"
assert has_known_categories(result.path)
# After this point, we test just using read_csv, as all functionality
# for both is implemented using the same code.
def test_read_csv_index():
with filetext(csv_text) as fn:
f = dd.read_csv(fn, blocksize=20).set_index("amount")
result = f.compute(scheduler="sync")
assert result.index.name == "amount"
blocks = compute_as_if_collection(
dd.DataFrame, f.dask, f.__dask_keys__(), scheduler="sync"
)
for i, block in enumerate(blocks):
if i < len(f.divisions) - 2:
assert (block.index < f.divisions[i + 1]).all()
if i > 0:
assert (block.index >= f.divisions[i]).all()
expected = pd.read_csv(fn).set_index("amount")
assert_eq(result, expected)
def test_read_csv_skiprows_range():
with filetext(csv_text) as fn:
f = dd.read_csv(fn, skiprows=range(5))
result = f
expected = pd.read_csv(fn, skiprows=range(5))
assert_eq(result, expected)
def test_usecols():
with filetext(timeseries) as fn:
df = dd.read_csv(fn, blocksize=30, usecols=["High", "Low"])
expected = pd.read_csv(fn, usecols=["High", "Low"])
assert (df.compute().values == expected.values).all()
def test_string_blocksize():
with filetext(timeseries) as fn:
a = dd.read_csv(fn, blocksize="30B")
b = dd.read_csv(fn, blocksize="30")
assert a.npartitions == b.npartitions
c = dd.read_csv(fn, blocksize="64MiB")
assert c.npartitions == 1
def test_skipinitialspace():
text = normalize_text(
"""
name, amount
Alice,100
Bob,-200
Charlie,300
Dennis,400
Edith,-500
Frank,600
"""
)
with filetext(text) as fn:
df = dd.read_csv(fn, skipinitialspace=True, blocksize=20)
assert "amount" in df.columns
assert df.amount.max().compute() == 600
def test_consistent_dtypes():
text = normalize_text(
"""
name,amount
Alice,100.5
Bob,-200.5
Charlie,300
Dennis,400
Edith,-500
Frank,600
"""
)
with filetext(text) as fn:
df = dd.read_csv(fn, blocksize=30)
assert df.amount.compute().dtype == float
def test_consistent_dtypes_2():
text1 = normalize_text(
"""
name,amount
Alice,100
Bob,-200
Charlie,300
"""
)
text2 = normalize_text(
"""
name,amount
1,400
2,-500
Frank,600
"""
)
with filetexts({"foo.1.csv": text1, "foo.2.csv": text2}):
df = dd.read_csv("foo.*.csv", blocksize=25)
assert df.name.dtype == object
assert df.name.compute().dtype == object
def test_categorical_dtypes():
text1 = normalize_text(
"""
fruit,count
apple,10
apple,25
pear,100
orange,15
"""
)
text2 = normalize_text(
"""
fruit,count
apple,200
banana,300
orange,400
banana,10
"""
)
with filetexts({"foo.1.csv": text1, "foo.2.csv": text2}):
df = dd.read_csv("foo.*.csv", dtype={"fruit": "category"}, blocksize=25)
assert df.fruit.dtype == "category"
assert not has_known_categories(df.fruit)
res = df.compute()
assert res.fruit.dtype == "category"
assert sorted(res.fruit.cat.categories) == ["apple", "banana", "orange", "pear"]
def test_categorical_known():
text1 = normalize_text(
"""
A,B
a,a
b,b
a,a
"""
)
text2 = normalize_text(
"""
A,B
a,a
b,b
c,c
"""
)
dtype = pd.api.types.CategoricalDtype(["a", "b", "c"], ordered=False)
with filetexts({"foo.1.csv": text1, "foo.2.csv": text2}):
result = dd.read_csv("foo.*.csv", dtype={"A": "category", "B": "category"})
assert result.A.cat.known is False
assert result.B.cat.known is False
expected = pd.DataFrame(
{
"A": pd.Categorical(
["a", "b", "a", "a", "b", "c"], categories=dtype.categories
),
"B": pd.Categorical(
["a", "b", "a", "a", "b", "c"], categories=dtype.categories
),
},
index=[0, 1, 2, 0, 1, 2],
)
assert_eq(result, expected)
# Specify a dtype
result = dd.read_csv("foo.*.csv", dtype={"A": dtype, "B": "category"})
assert result.A.cat.known is True
assert result.B.cat.known is False
tm.assert_index_equal(result.A.cat.categories, dtype.categories)
assert result.A.cat.ordered is False
assert_eq(result, expected)
# ordered
dtype = pd.api.types.CategoricalDtype(["a", "b", "c"], ordered=True)
result = dd.read_csv("foo.*.csv", dtype={"A": dtype, "B": "category"})
expected["A"] = expected["A"].cat.as_ordered()
assert result.A.cat.known is True
assert result.B.cat.known is False
assert result.A.cat.ordered is True
assert_eq(result, expected)
# Specify "unknown" categories
result = dd.read_csv(
"foo.*.csv", dtype=pd.api.types.CategoricalDtype(ordered=False)
)
assert result.A.cat.known is False
result = dd.read_csv("foo.*.csv", dtype="category")
assert result.A.cat.known is False
@pytest.mark.slow
@pytest.mark.parametrize("compression", ["infer", "gzip"])
def test_compression_multiple_files(compression):
with tmpdir() as tdir:
f = gzip.open(os.path.join(tdir, "a.csv.gz"), "wb")
f.write(csv_text.encode())
f.close()
f = gzip.open(os.path.join(tdir, "b.csv.gz"), "wb")
f.write(csv_text.encode())
f.close()
with pytest.warns(UserWarning):
df = dd.read_csv(os.path.join(tdir, "*.csv.gz"), compression=compression)
assert len(df.compute()) == (len(csv_text.split("\n")) - 1) * 2
def test_empty_csv_file():
with filetext("a,b") as fn:
df = dd.read_csv(fn, header=0)
assert len(df.compute()) == 0
assert list(df.columns) == ["a", "b"]
def test_read_csv_no_sample():
with filetexts(csv_files, mode="b") as fn:
df = dd.read_csv(fn, sample=False)
assert list(df.columns) == ["name", "amount", "id"]
def test_read_csv_sensitive_to_enforce():
with filetexts(csv_files, mode="b"):
a = dd.read_csv("2014-01-*.csv", enforce=True)
b = dd.read_csv("2014-01-*.csv", enforce=False)
assert a._name != b._name
@pytest.mark.parametrize("blocksize", [None, 10])
@pytest.mark.parametrize("fmt", compression_fmts)
def test_read_csv_compression(fmt, blocksize):
if fmt and fmt not in compress:
pytest.skip("compress function not provided for %s" % fmt)
suffix = {"gzip": ".gz", "bz2": ".bz2", "zip": ".zip", "xz": ".xz"}.get(fmt, "")
files2 = valmap(compress[fmt], csv_files) if fmt else csv_files
renamed_files = {k + suffix: v for k, v in files2.items()}
with filetexts(renamed_files, mode="b"):
# This test is using `compression="infer"` (the default) for
# read_csv. The paths must have the appropriate extension.
if fmt and blocksize:
with pytest.warns(UserWarning):
df = dd.read_csv("2014-01-*.csv" + suffix, blocksize=blocksize)
else:
df = dd.read_csv("2014-01-*.csv" + suffix, blocksize=blocksize)
assert_eq(
df.compute(scheduler="sync").reset_index(drop=True),
expected.reset_index(drop=True),
check_dtype=False,
)
@pytest.mark.skip
def test_warn_non_seekable_files():
files2 = valmap(compress["gzip"], csv_files)
with filetexts(files2, mode="b"):
with pytest.warns(UserWarning) as w:
df = dd.read_csv("2014-01-*.csv", compression="gzip")
assert df.npartitions == 3
assert len(w) == 1
msg = str(w[0].message)
assert "gzip" in msg
assert "blocksize=None" in msg
with pytest.warns(None) as w:
df = dd.read_csv("2014-01-*.csv", compression="gzip", blocksize=None)
assert len(w) == 0
with pytest.raises(NotImplementedError):
with pytest.warns(UserWarning): # needed for pytest
df = dd.read_csv("2014-01-*.csv", compression="foo")
def test_windows_line_terminator():
text = "a,b\r\n1,2\r\n2,3\r\n3,4\r\n4,5\r\n5,6\r\n6,7"
with filetext(text) as fn:
df = dd.read_csv(fn, blocksize=5, lineterminator="\r\n")
assert df.b.sum().compute() == 2 + 3 + 4 + 5 + 6 + 7
assert df.a.sum().compute() == 1 + 2 + 3 + 4 + 5 + 6
def test_header_None():
with filetexts({".tmp.1.csv": "1,2", ".tmp.2.csv": "", ".tmp.3.csv": "3,4"}):
df = dd.read_csv(".tmp.*.csv", header=None)
expected = pd.DataFrame({0: [1, 3], 1: [2, 4]})
assert_eq(df.compute().reset_index(drop=True), expected)
def test_auto_blocksize():
assert isinstance(auto_blocksize(3000, 15), int)
assert auto_blocksize(3000, 3) == 100
assert auto_blocksize(5000, 2) == 250
def test__infer_block_size(monkeypatch):
"""
psutil returns a total memory of `None` on some systems
see https://github.com/dask/dask/pull/7601
"""
psutil = pytest.importorskip("psutil")
class MockOutput:
total = None
def mock_virtual_memory():
return MockOutput
monkeypatch.setattr(psutil, "virtual_memory", mock_virtual_memory)
assert _infer_block_size()
def test_auto_blocksize_max64mb():
blocksize = auto_blocksize(1000000000000, 3)
assert blocksize == int(64e6)
assert isinstance(blocksize, int)
def test_auto_blocksize_csv(monkeypatch):
psutil = pytest.importorskip("psutil")
total_memory = psutil.virtual_memory().total
cpu_count = psutil.cpu_count()
mock_read_bytes = mock.Mock(wraps=read_bytes)
monkeypatch.setattr(dask.dataframe.io.csv, "read_bytes", mock_read_bytes)
expected_block_size = auto_blocksize(total_memory, cpu_count)
with filetexts(csv_files, mode="b"):
dd.read_csv("2014-01-01.csv")
assert mock_read_bytes.called
assert mock_read_bytes.call_args[1]["blocksize"] == expected_block_size
def test_head_partial_line_fix():
files = {
".overflow1.csv": (
"a,b\n0,'abcdefghijklmnopqrstuvwxyz'\n1,'abcdefghijklmnopqrstuvwxyz'"
),
".overflow2.csv": "a,b\n111111,-11111\n222222,-22222\n333333,-33333\n",
}
with filetexts(files):
# 64 byte file, 52 characters is mid-quote; this should not cause exception in head-handling code.
dd.read_csv(".overflow1.csv", sample=52)
# 35 characters is cuts off before the second number on the last line
# Should sample to end of line, otherwise pandas will infer `b` to be
# a float dtype
df = dd.read_csv(".overflow2.csv", sample=35)
assert (df.dtypes == "i8").all()
def test_read_csv_raises_on_no_files():
fn = ".not.a.real.file.csv"
try:
dd.read_csv(fn)
assert False
except (OSError, IOError) as e:
assert fn in str(e)
def test_read_csv_has_deterministic_name():
with filetext(csv_text) as fn:
a = dd.read_csv(fn)
b = dd.read_csv(fn)
assert a._name == b._name
assert sorted(a.dask.keys(), key=str) == sorted(b.dask.keys(), key=str)
assert isinstance(a._name, str)
c = dd.read_csv(fn, skiprows=1, na_values=[0])
assert a._name != c._name
def test_multiple_read_csv_has_deterministic_name():
with filetexts({"_foo.1.csv": csv_text, "_foo.2.csv": csv_text}):
a = dd.read_csv("_foo.*.csv")
b = dd.read_csv("_foo.*.csv")
assert sorted(a.dask.keys(), key=str) == sorted(b.dask.keys(), key=str)
def test_read_csv_has_different_names_based_on_blocksize():
with filetext(csv_text) as fn:
a = dd.read_csv(fn, blocksize="10kB")
b = dd.read_csv(fn, blocksize="20kB")
assert a._name != b._name
def test_csv_with_integer_names():
with filetext("alice,1\nbob,2") as fn:
df = dd.read_csv(fn, header=None)
assert list(df.columns) == [0, 1]
@pytest.mark.slow
def test_read_csv_of_modified_file_has_different_name():
with filetext(csv_text) as fn:
sleep(1)
a = dd.read_csv(fn)
sleep(1)
with open(fn, "a") as f:
f.write("\nGeorge,700")
os.fsync(f)
b = dd.read_csv(fn)
assert sorted(a.dask, key=str) != sorted(b.dask, key=str)
def test_late_dtypes():
text = "numbers,names,more_numbers,integers,dates\n"
for i in range(1000):
text += "1,,2,3,2017-10-31 00:00:00\n"
text += "1.5,bar,2.5,3,4998-01-01 00:00:00\n"
date_msg = (
"\n"
"\n"
"-------------------------------------------------------------\n"
"\n"
"The following columns also failed to properly parse as dates:\n"
"\n"
"- dates\n"
"\n"
"This is usually due to an invalid value in that column. To\n"
"diagnose and fix it's recommended to drop these columns from the\n"
"`parse_dates` keyword, and manually convert them to dates later\n"
"using `dd.to_datetime`."
)
with filetext(text) as fn:
sol = pd.read_csv(fn)
msg = (
"Mismatched dtypes found in `pd.read_csv`/`pd.read_table`.\n"
"\n"
"+--------------+---------+----------+\n"
"| Column | Found | Expected |\n"
"+--------------+---------+----------+\n"
"| more_numbers | float64 | int64 |\n"
"| names | object | float64 |\n"
"| numbers | float64 | int64 |\n"
"+--------------+---------+----------+\n"
"\n"
"- names\n"
" ValueError(.*)\n"
"\n"
"Usually this is due to dask's dtype inference failing, and\n"
"*may* be fixed by specifying dtypes manually by adding:\n"
"\n"
"dtype={'more_numbers': 'float64',\n"
" 'names': 'object',\n"
" 'numbers': 'float64'}\n"
"\n"
"to the call to `read_csv`/`read_table`."
)
with pytest.raises(ValueError) as e:
dd.read_csv(fn, sample=50, parse_dates=["dates"]).compute(scheduler="sync")
assert e.match(msg + date_msg)
with pytest.raises(ValueError) as e:
dd.read_csv(fn, sample=50).compute(scheduler="sync")
assert e.match(msg)
msg = (
"Mismatched dtypes found in `pd.read_csv`/`pd.read_table`.\n"
"\n"
"+--------------+---------+----------+\n"
"| Column | Found | Expected |\n"
"+--------------+---------+----------+\n"
"| more_numbers | float64 | int64 |\n"
"| numbers | float64 | int64 |\n"
"+--------------+---------+----------+\n"
"\n"
"Usually this is due to dask's dtype inference failing, and\n"
"*may* be fixed by specifying dtypes manually by adding:\n"
"\n"
"dtype={'more_numbers': 'float64',\n"
" 'numbers': 'float64'}\n"
"\n"
"to the call to `read_csv`/`read_table`.\n"
"\n"
"Alternatively, provide `assume_missing=True` to interpret\n"
"all unspecified integer columns as floats."
)
with pytest.raises(ValueError) as e:
dd.read_csv(fn, sample=50, dtype={"names": "O"}).compute(scheduler="sync")
assert str(e.value) == msg
with pytest.raises(ValueError) as e:
dd.read_csv(
fn, sample=50, parse_dates=["dates"], dtype={"names": "O"}
).compute(scheduler="sync")
assert str(e.value) == msg + date_msg
msg = (
"Mismatched dtypes found in `pd.read_csv`/`pd.read_table`.\n"
"\n"
"The following columns failed to properly parse as dates:\n"
"\n"
"- dates\n"
"\n"
"This is usually due to an invalid value in that column. To\n"
"diagnose and fix it's recommended to drop these columns from the\n"
"`parse_dates` keyword, and manually convert them to dates later\n"
"using `dd.to_datetime`."
)
with pytest.raises(ValueError) as e:
dd.read_csv(
fn,
sample=50,
parse_dates=["dates"],
dtype={"more_numbers": float, "names": object, "numbers": float},
).compute(scheduler="sync")
assert str(e.value) == msg
# Specifying dtypes works
res = dd.read_csv(
fn,
sample=50,
dtype={"more_numbers": float, "names": object, "numbers": float},
)
assert_eq(res, sol)
def test_assume_missing():
text = "numbers,names,more_numbers,integers\n"
for i in range(1000):
text += "1,foo,2,3\n"
text += "1.5,bar,2.5,3\n"
with filetext(text) as fn:
sol = pd.read_csv(fn)
# assume_missing affects all columns
res = dd.read_csv(fn, sample=50, assume_missing=True)
assert_eq(res, sol.astype({"integers": float}))
# assume_missing doesn't override specified dtypes
res = dd.read_csv(
fn, sample=50, assume_missing=True, dtype={"integers": "int64"}
)
assert_eq(res, sol)
# assume_missing works with dtype=None
res = dd.read_csv(fn, sample=50, assume_missing=True, dtype=None)
assert_eq(res, sol.astype({"integers": float}))
text = "numbers,integers\n"
for i in range(1000):
text += "1,2\n"
text += "1.5,2\n"
with filetext(text) as fn:
sol = pd.read_csv(fn)
# assume_missing ignored when all dtypes specifed
df = dd.read_csv(fn, sample=30, dtype="int64", assume_missing=True)
assert df.numbers.dtype == "int64"
def test_index_col():
with filetext(csv_text) as fn:
try:
dd.read_csv(fn, blocksize=30, index_col="name")
assert False
except ValueError as e:
assert "set_index" in str(e)
def test_read_csv_with_datetime_index_partitions_one():
with filetext(timeseries) as fn:
df = pd.read_csv(
fn, index_col=0, header=0, usecols=[0, 4], parse_dates=["Date"]
)
# blocksize set to explicitly set to single chunk
ddf = dd.read_csv(
fn, header=0, usecols=[0, 4], parse_dates=["Date"], blocksize=10000000
).set_index("Date")
assert_eq(df, ddf)
# because fn is so small, by default, this will only be one chunk
ddf = dd.read_csv(fn, header=0, usecols=[0, 4], parse_dates=["Date"]).set_index(
"Date"
)
assert_eq(df, ddf)
def test_read_csv_with_datetime_index_partitions_n():
with filetext(timeseries) as fn:
df = pd.read_csv(
fn, index_col=0, header=0, usecols=[0, 4], parse_dates=["Date"]
)
# because fn is so small, by default, set chunksize small
ddf = dd.read_csv(
fn, header=0, usecols=[0, 4], parse_dates=["Date"], blocksize=400
).set_index("Date")
assert_eq(df, ddf)
xfail_pandas_100 = pytest.mark.xfail(
dd._compat.PANDAS_GT_100, reason="https://github.com/dask/dask/issues/5787"
)
@pytest.mark.parametrize(
"encoding",
[
pytest.param("utf-16", marks=xfail_pandas_100),
pytest.param("utf-16-le", marks=xfail_pandas_100),
"utf-16-be",
],
)
def test_encoding_gh601(encoding):
ar = pd.Series(range(0, 100))
br = ar % 7
cr = br * 3.3
dr = br / 1.9836
test_df = pd.DataFrame({"a": ar, "b": br, "c": cr, "d": dr})
with tmpfile(".csv") as fn:
test_df.to_csv(fn, encoding=encoding, index=False)
a = pd.read_csv(fn, encoding=encoding)
d = dd.read_csv(fn, encoding=encoding, blocksize=1000)
d = d.compute()
d.index = range(len(d.index))
assert_eq(d, a)
def test_read_csv_header_issue_823():
text = """a b c-d\n1 2 3\n4 5 6""".replace(" ", "\t")
with filetext(text) as fn:
df = dd.read_csv(fn, sep="\t")
assert_eq(df, pd.read_csv(fn, sep="\t"))
df = dd.read_csv(fn, delimiter="\t")
assert_eq(df, pd.read_csv(fn, delimiter="\t"))
def test_none_usecols():
with filetext(csv_text) as fn:
df = dd.read_csv(fn, usecols=None)
assert_eq(df, pd.read_csv(fn, usecols=None))
def test_parse_dates_multi_column():
pdmc_text = normalize_text(
"""
ID,date,time
10,2003-11-04,180036
11,2003-11-05,125640
12,2003-11-01,2519
13,2003-10-22,142559
14,2003-10-24,163113
15,2003-10-20,170133
16,2003-11-11,160448
17,2003-11-03,171759
18,2003-11-07,190928
19,2003-10-21,84623
20,2003-10-25,192207
21,2003-11-13,180156
22,2003-11-15,131037
"""
)
with filetext(pdmc_text) as fn:
ddf = dd.read_csv(fn, parse_dates=[["date", "time"]])
df = pd.read_csv(fn, parse_dates=[["date", "time"]])
assert (df.columns == ddf.columns).all()
assert len(df) == len(ddf)
def test_read_csv_sep():
sep_text = normalize_text(
"""
name###amount
alice###100
bob###200
charlie###300"""
)
with filetext(sep_text) as fn:
ddf = dd.read_csv(fn, sep="###", engine="python")
df = pd.read_csv(fn, sep="###", engine="python")
assert (df.columns == ddf.columns).all()
assert len(df) == len(ddf)
def test_read_csv_slash_r():
data = b"0,my\n1,data\n" * 1000 + b"2,foo\rbar"
with filetext(data, mode="wb") as fn:
dd.read_csv(
fn,
header=None,
sep=",",
lineterminator="\n",
names=["a", "b"],
blocksize=200,
).compute(scheduler="sync")
def test_read_csv_singleton_dtype():
data = b"a,b\n1,2\n3,4\n5,6"
with filetext(data, mode="wb") as fn:
assert_eq(pd.read_csv(fn, dtype=float), dd.read_csv(fn, dtype=float))
def test_robust_column_mismatch():
files = csv_files.copy()
k = sorted(files)[-1]
files[k] = files[k].replace(b"name", b"Name")
with filetexts(files, mode="b"):
ddf = dd.read_csv("2014-01-*.csv")
df = pd.read_csv("2014-01-01.csv")
assert (df.columns == ddf.columns).all()
assert_eq(ddf, ddf)
def test_error_if_sample_is_too_small():
text = "AAAAA,BBBBB,CCCCC,DDDDD,EEEEE\n1,2,3,4,5\n6,7,8,9,10\n11,12,13,14,15"
with filetext(text) as fn:
# Sample size stops mid header row
sample = 20
with pytest.raises(ValueError):
dd.read_csv(fn, sample=sample)
# Saying no header means this is fine
assert_eq(
dd.read_csv(fn, sample=sample, header=None), pd.read_csv(fn, header=None)
)
skiptext = "# skip\n# these\n# lines\n"
text = skiptext + text
with filetext(text) as fn:
# Sample size stops mid header row
sample = 20 + len(skiptext)
with pytest.raises(ValueError):
dd.read_csv(fn, sample=sample, skiprows=3)
# Saying no header means this is fine
assert_eq(
dd.read_csv(fn, sample=sample, header=None, skiprows=3),
pd.read_csv(fn, header=None, skiprows=3),
)
def test_read_csv_names_not_none():
text = (
"Alice,100\n"
"Bob,-200\n"
"Charlie,300\n"
"Dennis,400\n"
"Edith,-500\n"
"Frank,600\n"
)
names = ["name", "amount"]
with filetext(text) as fn:
ddf = dd.read_csv(fn, names=names, blocksize=16)
df = pd.read_csv(fn, names=names)
assert_eq(df, ddf, check_index=False)
############
# to_csv #
############
def test_to_csv():
df = pd.DataFrame({"x": ["a", "b", "c", "d"], "y": [1, 2, 3, 4]})
for npartitions in [1, 2]:
a = dd.from_pandas(df, npartitions)
with tmpdir() as dn:
a.to_csv(dn, index=False)
result = dd.read_csv(os.path.join(dn, "*")).compute().reset_index(drop=True)
assert_eq(result, df)
with tmpdir() as dn:
r = a.to_csv(dn, index=False, compute=False)
dask.compute(*r, scheduler="sync")
result = dd.read_csv(os.path.join(dn, "*")).compute().reset_index(drop=True)
assert_eq(result, df)
with tmpdir() as dn:
fn = os.path.join(dn, "data_*.csv")
a.to_csv(fn, index=False)
result = dd.read_csv(fn).compute().reset_index(drop=True)
assert_eq(result, df)
def test_to_csv_multiple_files_cornercases():
df = pd.DataFrame({"x": ["a", "b", "c", "d"], "y": [1, 2, 3, 4]})
a = dd.from_pandas(df, 2)
with tmpdir() as dn:
with pytest.raises(ValueError):
fn = os.path.join(dn, "data_*_*.csv")
a.to_csv(fn)
df16 = pd.DataFrame(
{
"x": [
"a",
"b",
"c",
"d",
"e",
"f",
"g",
"h",
"i",
"j",
"k",
"l",
"m",
"n",
"o",
"p",
],
"y": [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16],
}
)
a = dd.from_pandas(df16, 16)
with tmpdir() as dn:
fn = os.path.join(dn, "data_*.csv")
a.to_csv(fn, index=False)
result = dd.read_csv(fn).compute().reset_index(drop=True)
assert_eq(result, df16)
# test handling existing files when links are optimized out
a = dd.from_pandas(df, 2)
with tmpdir() as dn:
a.to_csv(dn, index=False)
fn = os.path.join(dn, "data_*.csv")
a.to_csv(fn, mode="w", index=False)
result = dd.read_csv(fn).compute().reset_index(drop=True)
assert_eq(result, df)
# test handling existing files when links are optimized out
a = dd.from_pandas(df16, 16)
with tmpdir() as dn:
a.to_csv(dn, index=False)
fn = os.path.join(dn, "data_*.csv")
a.to_csv(fn, mode="w", index=False)
result = dd.read_csv(fn).compute().reset_index(drop=True)
assert_eq(result, df16)
def test_to_single_csv():
df = pd.DataFrame({"x": ["a", "b", "c", "d"], "y": [1, 2, 3, 4]})
for npartitions in [1, 2]:
a = dd.from_pandas(df, npartitions)
with tmpdir() as dn:
fn = os.path.join(dn, "test.csv")
a.to_csv(fn, index=False, single_file=True)
result = dd.read_csv(fn).compute().reset_index(drop=True)
assert_eq(result, df)
with tmpdir() as dn:
fn = os.path.join(dn, "test.csv")
r = a.to_csv(fn, index=False, compute=False, single_file=True)
dask.compute(r, scheduler="sync")
result = dd.read_csv(fn).compute().reset_index(drop=True)
assert_eq(result, df)
def test_to_single_csv_with_name_function():
df = pd.DataFrame({"x": ["a", "b", "c", "d"], "y": [1, 2, 3, 4]})
a = dd.from_pandas(df, 1)
with tmpdir() as dn:
fn = os.path.join(dn, "test.csv")
with pytest.raises(
ValueError,
match="name_function is not supported under the single file mode",
):
a.to_csv(fn, name_function=lambda x: x, index=False, single_file=True)
def test_to_single_csv_with_header_first_partition_only():
df = pd.DataFrame({"x": ["a", "b", "c", "d"], "y": [1, 2, 3, 4]})
a = dd.from_pandas(df, 1)
with tmpdir() as dn:
fn = os.path.join(dn, "test.csv")
with pytest.raises(
ValueError,
match="header_first_partition_only cannot be False in the single file mode.",
):
a.to_csv(
fn, index=False, header_first_partition_only=False, single_file=True
)
def test_to_single_csv_gzip():
df = pd.DataFrame({"x": ["a", "b", "c", "d"], "y": [1, 2, 3, 4]})
for npartitions in [1, 2]:
a = dd.from_pandas(df, npartitions)
with tmpdir() as dn:
fn = os.path.join(dn, "test.csv.gz")
a.to_csv(fn, index=False, compression="gzip", single_file=True)
result = pd.read_csv(fn, compression="gzip").reset_index(drop=True)
assert_eq(result, df)
@pytest.mark.xfail(reason="to_csv does not support compression")
def test_to_csv_gzip():
df = pd.DataFrame(
{"x": ["a", "b", "c", "d"], "y": [1, 2, 3, 4]}, index=[1.0, 2.0, 3.0, 4.0]
)
for npartitions in [1, 2]:
a = dd.from_pandas(df, npartitions)
with tmpfile("csv") as fn:
a.to_csv(fn, compression="gzip")
result = pd.read_csv(fn, index_col=0, compression="gzip")
tm.assert_frame_equal(result, df)
def test_to_csv_nodir():
# See #6062 https://github.com/intake/filesystem_spec/pull/271 and
df0 = pd.DataFrame(
{"x": ["a", "b", "c", "d"], "y": [1, 2, 3, 4]}, index=[1.0, 2.0, 3.0, 4.0]
)
df = dd.from_pandas(df0, npartitions=2)
with tmpdir() as dir:
dir0 = os.path.join(str(dir), "createme")
df.to_csv(dir0)
assert "createme" in os.listdir(dir)
assert os.listdir(dir0)
result = dd.read_csv(os.path.join(dir0, "*")).compute()
assert (result.x.values == df0.x.values).all()
def test_to_csv_simple():
df0 = pd.DataFrame(
{"x": ["a", "b", "c", "d"], "y": [1, 2, 3, 4]}, index=[1.0, 2.0, 3.0, 4.0]
)
df = dd.from_pandas(df0, npartitions=2)
with tmpdir() as dir:
dir = str(dir)
df.to_csv(dir)
assert os.listdir(dir)
result = dd.read_csv(os.path.join(dir, "*")).compute()
assert (result.x.values == df0.x.values).all()
def test_to_csv_series():
df0 = pd.Series(["a", "b", "c", "d"], index=[1.0, 2.0, 3.0, 4.0])
df = dd.from_pandas(df0, npartitions=2)
with tmpdir() as dir:
dir = str(dir)
df.to_csv(dir, header=False)
assert os.listdir(dir)
result = dd.read_csv(os.path.join(dir, "*"), header=None, names=["x"]).compute()
assert (result.x == df0).all()
def test_to_csv_with_get():
from dask.multiprocessing import get as mp_get
flag = [False]
def my_get(*args, **kwargs):
flag[0] = True
return mp_get(*args, **kwargs)
df = pd.DataFrame({"x": ["a", "b", "c", "d"], "y": [1, 2, 3, 4]})
ddf = dd.from_pandas(df, npartitions=2)
with tmpdir() as dn:
ddf.to_csv(dn, index=False, compute_kwargs={"scheduler": my_get})
assert flag[0]
result = dd.read_csv(os.path.join(dn, "*"))
assert_eq(result, df, check_index=False)
def test_to_csv_warns_using_scheduler_argument():
from dask.multiprocessing import get as mp_get
df = pd.DataFrame({"x": ["a", "b", "c", "d"], "y": [1, 2, 3, 4]})
ddf = dd.from_pandas(df, npartitions=2)
def my_get(*args, **kwargs):
return mp_get(*args, **kwargs)
with tmpdir() as dn:
with pytest.warns(FutureWarning):
ddf.to_csv(dn, index=False, scheduler=my_get)
def test_to_csv_errors_using_multiple_scheduler_args():
from dask.multiprocessing import get as mp_get
df = pd.DataFrame({"x": ["a", "b", "c", "d"], "y": [1, 2, 3, 4]})
ddf = dd.from_pandas(df, npartitions=2)
def my_get(*args, **kwargs):
return mp_get(*args, **kwargs)
with tmpdir() as dn:
with pytest.raises(ValueError) and pytest.warns(FutureWarning):
ddf.to_csv(
dn, index=False, scheduler=my_get, compute_kwargs={"scheduler": my_get}
)
def test_to_csv_keeps_all_non_scheduler_compute_kwargs():
from dask.multiprocessing import get as mp_get
def my_get(*args, **kwargs):
assert kwargs["test_kwargs_passed"] == "foobar"
return mp_get(*args, **kwargs)
df = pd.DataFrame({"x": ["a", "b", "c", "d"], "y": [1, 2, 3, 4]})
ddf = dd.from_pandas(df, npartitions=2)
with tmpdir() as dn:
ddf.to_csv(
dn,
index=False,
compute_kwargs={"scheduler": my_get, "test_kwargs_passed": "foobar"},
)
def test_to_csv_paths():
df = pd.DataFrame({"A": range(10)})
ddf = dd.from_pandas(df, npartitions=2)
paths = ddf.to_csv("foo*.csv")
assert paths[0].endswith("foo0.csv")
assert paths[1].endswith("foo1.csv")
os.remove("foo0.csv")
os.remove("foo1.csv")
@pytest.mark.parametrize("header, expected", [(False, ""), (True, "x,y\n")])
def test_to_csv_header_empty_dataframe(header, expected):
dfe = pd.DataFrame({"x": [], "y": []})
ddfe = dd.from_pandas(dfe, npartitions=1)
with tmpdir() as dn:
ddfe.to_csv(os.path.join(dn, "fooe*.csv"), index=False, header=header)
assert not os.path.exists(os.path.join(dn, "fooe1.csv"))
filename = os.path.join(dn, "fooe0.csv")
with open(filename, "r") as fp:
line = fp.readline()
assert line == expected
os.remove(filename)
@pytest.mark.parametrize(
"header,header_first_partition_only,expected_first,expected_next",
[
(False, False, "a,1\n", "d,4\n"),
(True, False, "x,y\n", "x,y\n"),
(False, True, "a,1\n", "d,4\n"),
(True, True, "x,y\n", "d,4\n"),
(["aa", "bb"], False, "aa,bb\n", "aa,bb\n"),
(["aa", "bb"], True, "aa,bb\n", "d,4\n"),
],
)
def test_to_csv_header(
header, header_first_partition_only, expected_first, expected_next
):
partition_count = 2
df = pd.DataFrame({"x": ["a", "b", "c", "d", "e", "f"], "y": [1, 2, 3, 4, 5, 6]})
ddf = dd.from_pandas(df, npartitions=partition_count)
with tmpdir() as dn:
# Test NO header case
# (header=False, header_first_chunk_only not passed)
ddf.to_csv(
os.path.join(dn, "fooa*.csv"),
index=False,
header=header,
header_first_partition_only=header_first_partition_only,
)
filename = os.path.join(dn, "fooa0.csv")
with open(filename, "r") as fp:
line = fp.readline()
assert line == expected_first
os.remove(filename)
filename = os.path.join(dn, "fooa1.csv")
with open(filename, "r") as fp:
line = fp.readline()
assert line == expected_next
os.remove(filename)
def test_to_csv_line_ending():
df = pd.DataFrame({"x": [0]})
ddf = dd.from_pandas(df, npartitions=1)
expected = {b"0\r\n", b"0\n"} # either/or
# For comparison...
# unexpected = {b'0\r\r\n'}
# This test addresses GH4809, and checks that only (at most) one
# '\r' character is written per line when writing to csv.
# In case it's correct (on UNIX) to have no '\r' at all, this test
# considers either '\r\n' or '\n' as appropriate line endings,
# but not '\r\r\n'.
with tmpdir() as dn:
ddf.to_csv(os.path.join(dn, "foo*.csv"), header=False, index=False)
filename = os.path.join(dn, "foo0.csv")
with open(filename, "rb") as f:
raw = f.read()
assert raw in expected
@pytest.mark.parametrize(
"block_lists",
[
[[1, 2], [3], [4, 5, 6]],
[],
[[], [], [1], [], [1]],
[list(range(i)) for i in range(10)],
],
)
def test_block_mask(block_lists):
mask = list(block_mask(block_lists))
assert len(mask) == len(list(flatten(block_lists)))
def test_reading_empty_csv_files_with_path():
with tmpdir() as tdir:
for k, content in enumerate(["0, 1, 2", "", "6, 7, 8"]):
with open(os.path.join(tdir, str(k) + ".csv"), "w") as file:
file.write(content)
result = dd.read_csv(
os.path.join(tdir, "*.csv"),
include_path_column=True,
converters={"path": parse_filename},
names=["A", "B", "C"],
).compute()
df = pd.DataFrame(
{
"A": [0, 6],
"B": [1, 7],
"C": [2, 8],
"path": ["0.csv", "2.csv"],
}
)
df["path"] = df["path"].astype("category")
assert_eq(result, df, check_index=False)
def test_read_csv_groupby_get_group(tmpdir):
# https://github.com/dask/dask/issues/7005
path = os.path.join(str(tmpdir), "test.csv")
df1 = pd.DataFrame([{"foo": 10, "bar": 4}])
df1.to_csv(path, index=False)
ddf1 = dd.read_csv(path)
ddfs = ddf1.groupby("foo")
assert_eq(df1, ddfs.get_group(10).compute())
| 31.234262 | 106 | 0.586484 |
0796ab2d599a8af8c406546a4a3881a9c5d9bbc3 | 45 | py | Python | tests/__init__.py | wasilukm/hoymiles_modbus | 00fbb9a31a8dcc8d3d8714214d8774dfea7745a6 | [
"MIT"
]
| null | null | null | tests/__init__.py | wasilukm/hoymiles_modbus | 00fbb9a31a8dcc8d3d8714214d8774dfea7745a6 | [
"MIT"
]
| 3 | 2022-03-12T08:12:47.000Z | 2022-03-15T20:39:01.000Z | tests/__init__.py | wasilukm/hoymiles_modbus | 00fbb9a31a8dcc8d3d8714214d8774dfea7745a6 | [
"MIT"
]
| 1 | 2022-03-11T11:56:21.000Z | 2022-03-11T11:56:21.000Z | """Unit test package for hoymiles_modbus."""
| 22.5 | 44 | 0.733333 |
29fcf7d8a215204e9a1f422ad12a694afbe74e78 | 15,540 | py | Python | clif/testing/python/extend_methods_test.py | timgates42/clif | b865c88beff70b31068889926d1184d5ddc0b9eb | [
"Apache-2.0"
]
| null | null | null | clif/testing/python/extend_methods_test.py | timgates42/clif | b865c88beff70b31068889926d1184d5ddc0b9eb | [
"Apache-2.0"
]
| null | null | null | clif/testing/python/extend_methods_test.py | timgates42/clif | b865c88beff70b31068889926d1184d5ddc0b9eb | [
"Apache-2.0"
]
| null | null | null | # Lint-as: python3
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import absltest
from absl.testing import parameterized
import six
from clif.python import type_customization
from clif.testing.python import extend_methods
# TODO: Restore simple import after OSS setup includes pybind11.
# pylint: disable=g-import-not-at-top
try:
from clif.testing.python import extend_methods_pybind11
except ImportError:
extend_methods_pybind11 = None
# pylint: enable=g-import-not-at-top
# extend_methods_pybind11 = None
HAVE_PB11 = extend_methods_pybind11 is not None
class DummyForExtend:
"""Used only if pybind11 is not available."""
def DirectlyAssigned(self, prefix):
return prefix.upper() + self.__class__.__name__
# Direct assignment for unit test, NOT recommended for general use.
extend_methods.ConcreteHolder.DirectlyAssigned = DirectlyAssigned
extend_methods.VirtualBaseHolder.DirectlyAssigned = DirectlyAssigned
if HAVE_PB11:
extend_methods_pybind11.ConcreteHolder.DirectlyAssigned = DirectlyAssigned
extend_methods_pybind11.VirtualBaseHolder.DirectlyAssigned = DirectlyAssigned
# RECOMMENDED for general use.
for concrete_holder in (extend_methods.ConcreteHolder,
extend_methods_pybind11.ConcreteHolder
if HAVE_PB11 else DummyForExtend):
@type_customization.extend(concrete_holder)
class _(object): # The object base is needed for Python 2 only.
# This works in Python 3 but not in Python 2:
# """Docstring."""
if not six.PY2: # Workaround.
__doc__ = """Added to ConcreteHolder."""
data = 'Data added to ConcreteHolder.'
def AnInstanceMethod(self, prefix):
return ':'.join((prefix.upper(), 'self', self.__class__.__name__))
@classmethod
def AClassMethod(cls, prefix):
return ':'.join((prefix.lower(), 'cls', cls.__name__))
@staticmethod
def AStaticMethod(prefix):
return ':'.join((prefix.capitalize(), 'static'))
@property
def a_property(self):
return ':'.join(('getter', str(self.Get()), self.__class__.__name__))
@a_property.setter
def a_property(self, value):
self.Set(value + 5)
for virtual_base_holder in (extend_methods.VirtualBaseHolder,
extend_methods_pybind11.VirtualBaseHolder
if HAVE_PB11 else DummyForExtend):
@type_customization.extend(virtual_base_holder)
class _(object): # The object base is needed for Python 2 only.
# This works in Python 3 but not in Python 2:
# """Docstring."""
# An alternative workaround is below.
data = 'Data added to VirtualBaseHolder.'
def AnInstanceMethod(self, prefix):
return ':'.join((prefix.capitalize(), 'self', self.__class__.__name__))
@classmethod
def AClassMethod(cls, prefix):
return ':'.join((prefix.upper(), 'cls', cls.__name__))
@staticmethod
def AStaticMethod(prefix):
return ':'.join((prefix.lower(), 'static'))
@property
def a_property(self):
return ':'.join(('Getter', str(self.Get()), self.__class__.__name__))
@a_property.setter
def a_property(self, value):
self.Set(value + 7)
if not six.PY2:
@type_customization.extend(virtual_base_holder)
class _(object):
"""Added to VirtualBaseHolder."""
@type_customization.extend(extend_methods.ConcreteHolder)
class NamedFromClass(object):
def NfcInstanceMethod(self, prefix):
return ':'.join(
(''.join(reversed(prefix)), 'self', self.__class__.__name__))
@classmethod
def NfcClassMethod(cls, prefix):
return ':'.join((''.join(reversed(prefix)), 'cls', cls.__name__))
@staticmethod
def NfcStaticMethod(prefix):
return ':'.join((''.join(reversed(prefix)), 'static'))
@property
def nfc_property(self):
return ':'.join(('GETTER', str(self.Get()), self.__class__.__name__))
@nfc_property.setter
def nfc_property(self, value):
self.Set(value + 11)
@type_customization.extend(
extend_methods_pybind11.ConcreteHolder if HAVE_PB11 else DummyForExtend)
class NamedFromClassPybind11(object):
def NfcInstanceMethod(self, prefix):
return ':'.join(
(''.join(reversed(prefix)), 'self', self.__class__.__name__))
@classmethod
def NfcClassMethod(cls, prefix):
return ':'.join((''.join(reversed(prefix)), 'cls', cls.__name__))
@staticmethod
def NfcStaticMethod(prefix):
return ':'.join((''.join(reversed(prefix)), 'static'))
@property
def nfc_property(self):
return ':'.join(('GETTER', str(self.Get()), self.__class__.__name__))
@nfc_property.setter
def nfc_property(self, value):
self.Set(value + 11)
class NfcInstanceMethodOverride(object):
def NfcInstanceMethod(self, prefix):
return ':'.join(
(''.join(reversed(prefix)).upper(), 'self', self.__class__.__name__))
# Re-use with override.
for virtual_base_holder in (extend_methods.VirtualBaseHolder,
extend_methods_pybind11.VirtualBaseHolder
if HAVE_PB11 else DummyForExtend):
@type_customization.extend(virtual_base_holder)
class _(NfcInstanceMethodOverride, NamedFromClass):
pass
# Keeping the failure tests below at module scope (not inside test methods)
# because this is more representative of the actual (invalid) use cases.
try:
@type_customization.extend(extend_methods.ConcreteHolder)
class _:
pass
except TypeError as e:
_from_class_old_style_failure_test_error = str(e)
else:
_from_class_old_style_failure_test_error = None
class _BaseOldStyleFailureTestBase: # Python 2 old-style.
pass
try:
@type_customization.extend(extend_methods.ConcreteHolder)
class _(_BaseOldStyleFailureTestBase, object):
pass
except TypeError as e:
_base_old_style_failure_test_error = str(e)
else:
_base_old_style_failure_test_error = None
class _BaseBaseFailureTestBase(object):
pass
class _BaseBaseFailureTestDerived(_BaseBaseFailureTestBase):
pass
try:
@type_customization.extend(extend_methods.ConcreteHolder)
class _(_BaseBaseFailureTestDerived):
pass
except TypeError as e:
_base_base_failure_test_error = str(e)
else:
_base_base_failure_test_error = None
try:
@type_customization.extend(extend_methods.ConcreteHolder)
class _(tuple): # Any type exercising rejection of __getattribute__.
pass
except TypeError as e:
_tuple_as_base_failure_test_error = str(e)
else:
_tuple_as_base_failure_test_error = None
if HAVE_PB11:
try:
@type_customization.extend(extend_methods_pybind11.ConcreteHolder)
class _:
pass
except TypeError as e:
_from_class_old_style_failure_test_error = str(e)
else:
_from_class_old_style_failure_test_error = None
try:
@type_customization.extend(extend_methods_pybind11.ConcreteHolder)
class _(_BaseOldStyleFailureTestBase, object):
pass
except TypeError as e:
_base_old_style_failure_test_error = str(e)
else:
_base_old_style_failure_test_error = None
try:
@type_customization.extend(extend_methods_pybind11.ConcreteHolder)
class _(_BaseBaseFailureTestDerived):
pass
except TypeError as e:
_base_base_failure_test_error = str(e)
else:
_base_base_failure_test_error = None
try:
@type_customization.extend(extend_methods_pybind11.ConcreteHolder)
class _(tuple): # Any type exercising rejection of __getattribute__.
pass
except TypeError as e:
_tuple_as_base_failure_test_error = str(e)
else:
_tuple_as_base_failure_test_error = None
@parameterized.named_parameters([
np for np in zip(('c_api', 'pybind11'), (extend_methods,
extend_methods_pybind11))
if np[1] is not None
])
class ExtendMethodsTest(absltest.TestCase):
def testConcreteDirectAssignment(self, wrapper_lib):
ch = wrapper_lib.ConcreteHolder()
s = ch.DirectlyAssigned('Red:')
self.assertEqual(s, 'RED:ConcreteHolder')
def testVirtualDirectAssignment(self, wrapper_lib):
vdh = wrapper_lib.VirtualDerivedHolder()
s = vdh.DirectlyAssigned('Blue:')
self.assertEqual(s, 'BLUE:VirtualDerivedHolder')
def testConcreteExtend(self, wrapper_lib):
if wrapper_lib is extend_methods:
expected_doc = ('CLIF wrapper for ::clif_testing::ConcreteHolder'
if six.PY2 else 'Added to ConcreteHolder.')
self.assertEqual(wrapper_lib.ConcreteHolder.__doc__, expected_doc)
if not six.PY2:
self.assertEqual(wrapper_lib.ConcreteHolder.data,
'Data added to ConcreteHolder.')
ch = wrapper_lib.ConcreteHolder()
s = ch.AnInstanceMethod('Green')
self.assertEqual(s, 'GREEN:self:ConcreteHolder')
s = ch.AClassMethod('Yellow')
self.assertEqual(s, 'yellow:cls:ConcreteHolder')
s = wrapper_lib.ConcreteHolder.AClassMethod('Gray')
self.assertEqual(s, 'gray:cls:ConcreteHolder')
s = ch.AStaticMethod('magenta')
self.assertEqual(s, 'Magenta:static')
s = wrapper_lib.ConcreteHolder.AStaticMethod('silver')
self.assertEqual(s, 'Silver:static')
s = ch.a_property
self.assertEqual(s, 'getter:0:ConcreteHolder')
ch.a_property = 13
s = ch.a_property
self.assertEqual(s, 'getter:18:ConcreteHolder')
def testVirtualExtend(self, wrapper_lib):
if wrapper_lib is extend_methods:
expected_doc = ('CLIF wrapper for ::clif_testing::VirtualBaseHolder'
if six.PY2 else 'Added to VirtualBaseHolder.')
self.assertEqual(wrapper_lib.VirtualBaseHolder.__doc__, expected_doc)
self.assertEqual(wrapper_lib.VirtualDerivedHolder.__doc__,
'CLIF wrapper for ::clif_testing::VirtualDerivedHolder')
self.assertEqual(wrapper_lib.VirtualBaseHolder.data,
'Data added to VirtualBaseHolder.')
self.assertEqual(wrapper_lib.VirtualDerivedHolder.data,
'Data added to VirtualBaseHolder.')
vdh = wrapper_lib.VirtualDerivedHolder()
s = vdh.AnInstanceMethod('orange')
self.assertEqual(s, 'Orange:self:VirtualDerivedHolder')
s = vdh.AClassMethod('Purple')
self.assertEqual(s, 'PURPLE:cls:VirtualDerivedHolder')
s = wrapper_lib.VirtualBaseHolder.AClassMethod('Cyan')
self.assertEqual(s, 'CYAN:cls:VirtualBaseHolder')
s = wrapper_lib.VirtualDerivedHolder.AClassMethod('Mint')
self.assertEqual(s, 'MINT:cls:VirtualDerivedHolder')
s = vdh.AStaticMethod('Black')
self.assertEqual(s, 'black:static')
s = wrapper_lib.VirtualBaseHolder.AStaticMethod('Gold')
self.assertEqual(s, 'gold:static')
s = wrapper_lib.VirtualDerivedHolder.AStaticMethod('Platinum')
self.assertEqual(s, 'platinum:static')
s = vdh.a_property
self.assertEqual(s, 'Getter:0:VirtualDerivedHolder')
vdh.a_property = 29
s = vdh.a_property
self.assertEqual(s, 'Getter:36:VirtualDerivedHolder')
def testNamedFromClass(self, wrapper_lib):
if wrapper_lib is extend_methods_pybind11:
nfc = NamedFromClassPybind11()
pb11_suffix = 'Pybind11'
else:
nfc = NamedFromClass()
pb11_suffix = ''
ch = wrapper_lib.ConcreteHolder()
vdh = wrapper_lib.VirtualDerivedHolder()
s = nfc.NfcInstanceMethod('eeffoc')
self.assertEqual(s, 'coffee:self:NamedFromClass' + pb11_suffix)
s = ch.NfcInstanceMethod('eeffoC')
self.assertEqual(s, 'Coffee:self:ConcreteHolder')
s = vdh.NfcInstanceMethod('eeffoc')
self.assertEqual(s, 'COFFEE:self:VirtualDerivedHolder')
s = nfc.NfcClassMethod('ynobe')
self.assertEqual(s, 'ebony:cls:NamedFromClass' + pb11_suffix)
s = NamedFromClass.NfcClassMethod('yrovI')
self.assertEqual(s, 'Ivory:cls:NamedFromClass')
s = ch.NfcClassMethod('ynobE')
self.assertEqual(s, 'Ebony:cls:ConcreteHolder')
s = wrapper_lib.ConcreteHolder.NfcClassMethod('yrovi')
self.assertEqual(s, 'ivory:cls:ConcreteHolder')
s = vdh.NfcClassMethod('YNOBE')
self.assertEqual(s, 'EBONY:cls:VirtualDerivedHolder')
s = wrapper_lib.VirtualDerivedHolder.NfcClassMethod('YROVI')
self.assertEqual(s, 'IVORY:cls:VirtualDerivedHolder')
s = NamedFromClass.NfcStaticMethod('doow')
self.assertEqual(s, 'wood:static')
s = wrapper_lib.ConcreteHolder.NfcStaticMethod('dooW')
self.assertEqual(s, 'Wood:static')
s = wrapper_lib.VirtualDerivedHolder.NfcStaticMethod('DOOW')
self.assertEqual(s, 'WOOD:static')
with self.assertRaises(AttributeError) as ctx:
nfc.nfc_property # pylint: disable=pointless-statement
self.assertEqual(
str(ctx.exception),
"'NamedFromClass" + pb11_suffix + "' object has no attribute 'Get'")
with self.assertRaises(AttributeError) as ctx:
nfc.nfc_property = 0
self.assertEqual(
str(ctx.exception),
"'NamedFromClass" + pb11_suffix + "' object has no attribute 'Set'")
s = ch.nfc_property
self.assertEqual(s, 'GETTER:0:ConcreteHolder')
ch.nfc_property = 59
s = ch.nfc_property
self.assertEqual(s, 'GETTER:70:ConcreteHolder')
s = vdh.nfc_property
self.assertEqual(s, 'GETTER:0:VirtualDerivedHolder')
vdh.nfc_property = 71
s = vdh.nfc_property
self.assertEqual(s, 'GETTER:82:VirtualDerivedHolder')
def testFromClassOldStyleFailureTestError(self, wrapper_lib):
self.assertIsNotNone(wrapper_lib)
if six.PY2:
self.assertEqual(
_from_class_old_style_failure_test_error,
'extend from_class must be a new-style class.')
else:
self.assertIsNone(_from_class_old_style_failure_test_error)
def testBaseOldStyleFailureTestError(self, wrapper_lib):
self.assertIsNotNone(wrapper_lib)
if six.PY2:
self.assertIn(
'extend base must be a new-style class ',
_base_old_style_failure_test_error)
self.assertIn(
' is not).',
_base_old_style_failure_test_error)
self.assertIn(
'BaseOldStyleFailureTestBase',
_base_old_style_failure_test_error)
else:
self.assertIsNone(_base_old_style_failure_test_error)
def testBaseBaseFailureTestError(self, wrapper_lib):
self.assertIsNotNone(wrapper_lib)
self.assertIsNotNone(_base_base_failure_test_error)
self.assertIn('extend from_class base ', _base_base_failure_test_error)
self.assertIn(
' must not have bases themselves ', _base_base_failure_test_error)
self.assertIn('BaseBaseFailureTestDerived', _base_base_failure_test_error)
self.assertIn('BaseBaseFailureTestBase', _base_base_failure_test_error)
def testTupleAsBaseFailureTestError(self, wrapper_lib):
self.assertIsNotNone(wrapper_lib)
self.assertIn(
'extend base must not have a __getattribute__ attribute (base ',
_tuple_as_base_failure_test_error)
self.assertIn(' does).', _tuple_as_base_failure_test_error)
self.assertIn('tuple', _tuple_as_base_failure_test_error)
if __name__ == '__main__':
absltest.main()
| 33.134328 | 79 | 0.72027 |
7569ba0b0c8f33b5404cafb7e2d90cb439630a01 | 1,353 | py | Python | 05_draw_bbox.py | worldbank/ml4dev | d91f1b2a08067da31364dee60f07274d66929fa5 | [
"MIT"
]
| 49 | 2017-03-06T21:34:03.000Z | 2022-01-16T05:10:12.000Z | 05_draw_bbox.py | HKCaesar/ml4dev | d91f1b2a08067da31364dee60f07274d66929fa5 | [
"MIT"
]
| 3 | 2016-12-21T20:39:15.000Z | 2017-08-18T06:30:30.000Z | 05_draw_bbox.py | HKCaesar/ml4dev | d91f1b2a08067da31364dee60f07274d66929fa5 | [
"MIT"
]
| 16 | 2017-06-20T15:14:36.000Z | 2021-04-19T00:52:26.000Z | '''
We're gonna use the bbox info from OSM to draw a box around the pitch, we
will use this box to better train our ML algo.
'''
from utils.geo import get_rectangle
from utils.geo import WAYS_DATA_FILENAME
import cv2
import json
import os
# We need the elements
# print 'Loading %s...' % WAYS_DATA_FILENAME
# with open(WAYS_DATA_FILENAME, 'r') as f:
# ways_data = json.load(f)
image_files = [f for f in os.listdir('satellite') if f.endswith('.png')]
print len(image_files)
for image_file in image_files:
print 'Processing %s...' % image_file
# The ID is between the last underscore and the extension dot
# For example: pitch_volleyball_268478401.png -> 268478401
# way_id = image_file[image_file.rfind('_') + 1:image_file.find('.')]
# bounds = ways_data[way_id]['bounds']
# Add a rectangle with the feature
# x, y, w, h = get_rectangle(bounds=bounds)
# img = cv2.imread(os.path.join('satellite', image_file))
# cv2.rectangle(img,(x,y),(x+w,y+h),(255,0,0),2)
# cv2.imwrite(os.path.join('satellite/rectangle', image_file), img)
# To show the image
# cv2.imshow('img',img)
# cv2.waitKey(0)
# cv2.destroyAllWindows()
# Generate a grayscale version
img_gray = cv2.imread(os.path.join('satellite', image_file), 0)
cv2.imwrite(os.path.join('satellite/gray', image_file), img_gray)
| 33 | 73 | 0.68884 |
28a5072c2dc154a12ddf556f4abb8c0dccacf337 | 662 | py | Python | packages/sqlmap-master/plugins/dbms/h2/filesystem.py | ZooAtmosphereGroup/HelloPackages | 0ccffd33bf927b13d28c8f715ed35004c33465d9 | [
"Apache-2.0"
]
| null | null | null | packages/sqlmap-master/plugins/dbms/h2/filesystem.py | ZooAtmosphereGroup/HelloPackages | 0ccffd33bf927b13d28c8f715ed35004c33465d9 | [
"Apache-2.0"
]
| null | null | null | packages/sqlmap-master/plugins/dbms/h2/filesystem.py | ZooAtmosphereGroup/HelloPackages | 0ccffd33bf927b13d28c8f715ed35004c33465d9 | [
"Apache-2.0"
]
| null | null | null | #!/usr/bin/env python
"""
Copyright (c) 2006-2021 sqlmap developers (http://sqlmap.org/)
See the file 'LICENSE' for copying permission
"""
from lib.core.exception import SqlmapUnsupportedFeatureException
from plugins.generic.filesystem import Filesystem as GenericFilesystem
class Filesystem(GenericFilesystem):
def readFile(self, remoteFile):
errMsg = "on H2 it is not possible to read files"
raise SqlmapUnsupportedFeatureException(errMsg)
def writeFile(self, localFile, remoteFile, fileType=None, forceCheck=False):
errMsg = "on H2 it is not possible to write files"
raise SqlmapUnsupportedFeatureException(errMsg)
| 34.842105 | 80 | 0.756798 |
c432900101e388047353c502028714f97adcebf3 | 142 | py | Python | ch08/08_26.py | leeseedong/book-cryptocurrency | 58c0bb3f5a80f8cc73ba47c4839be3bd33c9d67c | [
"Apache-2.0"
]
| 121 | 2019-03-23T13:53:06.000Z | 2022-03-28T15:15:03.000Z | ch08/08_26.py | leeseedong/book-cryptocurrency | 58c0bb3f5a80f8cc73ba47c4839be3bd33c9d67c | [
"Apache-2.0"
]
| 3 | 2021-04-14T14:31:26.000Z | 2021-05-09T13:46:14.000Z | ch08/08_26.py | leeseedong/book-cryptocurrency | 58c0bb3f5a80f8cc73ba47c4839be3bd33c9d67c | [
"Apache-2.0"
]
| 114 | 2019-03-21T13:43:03.000Z | 2022-03-31T18:42:11.000Z | import ccxt
binance = ccxt.binance()
orderbook = binance.fetch_order_book('ETH/BTC')
for ask in orderbook['asks']:
print(ask[0], ask[1])
| 20.285714 | 47 | 0.704225 |
f32dea13737b0d207891264af6b1e263f67cbdca | 3,076 | py | Python | Section_04_code/PyQT4/Lib/site-packages/PyQt4/uic/pyuic.py | PacktPublishing/Python-Machine-Learning-Solutions-V- | 8bb80a43a7c64032c25c1023faaa29bbfbd39d45 | [
"MIT"
]
| 1 | 2022-03-16T02:10:30.000Z | 2022-03-16T02:10:30.000Z | Section_04_code/PyQT4/Lib/site-packages/PyQt4/uic/pyuic.py | wensincai/Python-Machine-Learning-Solutions-V- | 130c9881757fa90bbb124d48ddd0c6c1136fa20c | [
"MIT"
]
| null | null | null | Section_04_code/PyQT4/Lib/site-packages/PyQt4/uic/pyuic.py | wensincai/Python-Machine-Learning-Solutions-V- | 130c9881757fa90bbb124d48ddd0c6c1136fa20c | [
"MIT"
]
| 2 | 2019-05-28T11:58:59.000Z | 2020-09-23T17:21:19.000Z | #############################################################################
##
## Copyright (c) 2013 Riverbank Computing Limited <[email protected]>
##
## This file is part of PyQt.
##
## This file may be used under the terms of the GNU General Public
## License versions 2.0 or 3.0 as published by the Free Software
## Foundation and appearing in the files LICENSE.GPL2 and LICENSE.GPL3
## included in the packaging of this file. Alternatively you may (at
## your option) use any later version of the GNU General Public
## License if such license has been publicly approved by Riverbank
## Computing Limited (or its successors, if any) and the KDE Free Qt
## Foundation. In addition, as a special exception, Riverbank gives you
## certain additional rights. These rights are described in the Riverbank
## GPL Exception version 1.1, which can be found in the file
## GPL_EXCEPTION.txt in this package.
##
## If you are unsure which license is appropriate for your use, please
## contact the sales department at [email protected].
##
## This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
## WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
##
#############################################################################
import sys
import optparse
from PyQt4 import QtCore
from PyQt4.uic.driver import Driver
Version = "Python User Interface Compiler %s for Qt version %s" % (QtCore.PYQT_VERSION_STR, QtCore.QT_VERSION_STR)
if sys.hexversion >= 0x03000000:
from PyQt4.uic.port_v3.invoke import invoke
else:
from PyQt4.uic.port_v2.invoke import invoke
parser = optparse.OptionParser(usage="pyuic4 [options] <ui-file>",
version=Version)
parser.add_option("-p", "--preview", dest="preview", action="store_true",
default=False,
help="show a preview of the UI instead of generating code")
parser.add_option("-o", "--output", dest="output", default="-", metavar="FILE",
help="write generated code to FILE instead of stdout")
parser.add_option("-x", "--execute", dest="execute", action="store_true",
default=False,
help="generate extra code to test and display the class")
parser.add_option("-d", "--debug", dest="debug", action="store_true",
default=False, help="show debug output")
parser.add_option("-i", "--indent", dest="indent", action="store", type="int",
default=4, metavar="N",
help="set indent width to N spaces, tab if N is 0 (default: 4)")
parser.add_option("-w", "--pyqt3-wrapper", dest="pyqt3_wrapper",
action="store_true", default=False,
help="generate a PyQt v3 style wrapper")
g = optparse.OptionGroup(parser, title="Code generation options")
g.add_option("--from-imports", dest="from_imports", action="store_true",
default=False, help="generate imports relative to '.'")
parser.add_option_group(g)
opts, args = parser.parse_args()
if len(args) != 1:
sys.stderr.write("Error: one input ui-file must be specified\n")
sys.exit(1)
sys.exit(invoke(Driver(opts, args[0])))
| 41.013333 | 114 | 0.679454 |
21c3af041ed5642a15fef9056abd9380ed27f77a | 33,912 | py | Python | view-recipes/parse-recipes-p2.py | kbrohkahn/kevin.broh-kahn.com | e6b6427bd745cdf011adb8cf67a7395472f25c20 | [
"Apache-2.0"
]
| null | null | null | view-recipes/parse-recipes-p2.py | kbrohkahn/kevin.broh-kahn.com | e6b6427bd745cdf011adb8cf67a7395472f25c20 | [
"Apache-2.0"
]
| null | null | null | view-recipes/parse-recipes-p2.py | kbrohkahn/kevin.broh-kahn.com | e6b6427bd745cdf011adb8cf67a7395472f25c20 | [
"Apache-2.0"
]
| null | null | null | # encoding=utf8
#!/usr/bin/env python
import urllib2
import json
import re
from bs4 import BeautifulSoup
from nltk.tokenize import sent_tokenize
from socket import error as SocketError
#
# checks whether the first argument is the same word as a plural string, checking plurals
#
def equalCheckingPlurals(string, pluralString):
# only check plurals if first 3 letters match
if string[0] != pluralString[0]:
return None
if len(string) > 1 and len(pluralString) > 1 and string[1] != pluralString[1]:
return None
if len(string) > 2 and len(pluralString) > 2 and string[2] != pluralString[2]:
return None
# check all possible plurals of string
if string == pluralString or string + "s" == pluralString or string + "es" == pluralString or string[:-1] + "ies" == pluralString or string[:-1] + "ves" == pluralString:
return pluralString
return None
#
# checks whether the first argument matches a string in a list of plurals, checking plurals
#
def inCheckingPlurals(string, pluralList):
for pluralString in pluralList:
if equalCheckingPlurals(string, pluralString):
return pluralString
return None
# arrays for labeling ingredients (categorized for the purpose of cooking, to tomato is veg, not fruit)
dairyIngredients = ['buttermilk', 'cottage', 'cream', 'creamer', 'creamy', 'creme', 'ghee', 'half-and-half',
'milk', 'yogurt']
cheeses = ['bocconcini', 'mozzarella', 'gouda', 'swiss', 'brie']
meats = ['bacon', 'beefs', 'burgers', 'chorizo', 'dogs', 'frankfurters', 'giblets', 'ham', 'lambs', 'livers',
'meatballs', 'meatloaves', 'meats', 'mignon', 'mincemeat', 'pepperonis', "pig's", 'porks',
'prosciutto', 'ribs', 'roasts', 'sausages', 'sirloin', 'tripe', 'veal', 'venison', 'kielbasas',
'liverwurst', 'wieners', 'cotechino', 'linguica', 'pastrami', 'squirrels', 'sauerbraten',
'picadillo', 'carcass', 'brains', 'mortadella', 'rounds', 'sweetbread', 'toad', 'tinga',
'embutido', 'hash', 'broil', 'brisket', 'franks', 'pigs', 'rouladen', 'chops', 'scrapple',
'barbeque', 'spareribs']
poultry = ['bologna', 'bratwursts', 'chickens', 'ducks', 'goose', 'hens', 'pollo', 'salami', 'turkey',
'pheasant', 'quail', 'turducken', 'drumettes', 'wings', 'roosters']
fish = ['albacores', 'bass', 'catfish', 'cods', 'fish', 'flounder', 'grouper', 'haddock', 'halibut', 'mahi',
'monkfish', 'salmon', 'shark', 'snapper', 'sole', 'swordfishes', 'trouts', 'tunas', 'bluefish',
'bonito', 'rockfish', 'mackerel', 'naruto', 'drum', 'marlin', 'tilapia', 'carp', 'kingfish',
'mullets', 'whitefish', 'kippers', 'torsk', 'saltfish']
seafoods = ['anchovies', 'calamaris', 'clams', 'crabs', 'crabmeat', 'crawfish', 'lobsters', 'mussels',
'oysters', 'prawns', 'scallops', 'seafood', 'shrimps', 'squids', 'snails', 'shellfish', 'caviar']
mainProteins = ['beans', 'chickpeas', 'nuts', 'seeds', 'tofu', 'whey', 'buckwheat', 'protein', 'soybeans',
'soy', 'tempeh', 'lentils', 'masoor', 'gluten', 'pine', 'falafel', 'portobello']
fruits = ['apples', 'apricots', 'bananas', 'blackberries', 'blueberries', 'cantaloupe', 'cherries', 'citrons',
'citrus', 'coconuts', 'cranberries', 'currants', 'elderberries', 'figs', 'fruitcakes', 'fruits',
'gooseberries', 'grapefruit', 'grapes', 'guava', 'honeydew', 'huckleberries', 'kiwis','kumquats',
'lemonade', 'lemons', 'limes', 'mangoes', 'marrons', 'mincemeat', 'mulberries', 'nectarines', 'oranges',
'papayas', 'peaches', 'pears', 'persimmon', 'persimmons', 'pineapples', 'plums', 'prunes', 'raisins',
'raspberries', 'slushies', 'smoothies', 'sorrel', 'strawberries', 'tangerines', 'watermelons', 'yuzu',
'lingonberries', 'plantains', 'juniper', 'lingonberries', 'pomegranates', 'serviceberries',
'zinfandel', 'lychees', 'carambola', 'uvas']
vegetables = ['artichokes', 'arugula', 'asparagus', 'avocados', 'bamboo', 'beets', 'broccoli', 'cabbage',
'calzones', 'carrots', 'cauliflower', 'celery', 'chilis', 'chives', 'choy', 'cilantro', 'coleslaw',
'coriander', 'cucumber', 'cucumbers', 'dates', 'eggplant', 'eggplants', 'endive', 'escarole',
'galangal', 'haystacks', 'jicama', 'kale', 'kohlrabi', 'kucai', 'leeks', 'lettuce',
'mushrooms', 'okra', 'olives', 'onions', 'parsley', 'parsnips', 'peas', 'peppers', 'pickles',
'pizzas', 'potatoes', 'pumpkins', 'radishes', 'rutabagas', 'salad', 'sauerkraut', 'shallots', 'slaws',
'spinach', 'sprouts', 'squash', 'tamarind', 'taros', 'tomatillo', 'tomatillos', 'tomatoes', 'turnips',
'vegetable', 'vegetables', 'veggies', 'watercress', 'yams', 'zucchinis', 'chervil', 'daikon', 'iceberg',
'nopales', 'pimentos', 'radicchio', 'karengo', 'nori', 'succotash', 'truffle', 'chard', 'fries', 'leaves',
'browns', 'romain', 'palm', 'sorghum', 'aloo', 'haricots', 'caprese', 'salata', 'shiitake']
sugars = ['Jell-O®', 'butterscotch', 'candied', 'candy', 'caramels', 'frosting', 'fructose', 'gingersnaps',
'glaces', 'glaze', 'glycerin', 'glycerol', 'gumdrops', 'gummi', 'honey', 'icing', 'jellybeans',
'ladyfingers', 'licorice', 'macaroons', 'maple', 'marrons glaces', 'marshmallows', 'marzipan',
'molasses', 'pastries', 'pectin', 'peppermints', 'pie', 'piping', 'puddings', 'puff', 'sourball',
'sprinkles', 'sucanat', 'sugar', 'sweetener', 'syrup', 'tarts', 'toffee', 'twinkies', 'colaciones'
'sherbet', "hershey®'s", 'candies', "confectioners'", 'fudge', 'taffy', 'pink', 'sherbet']
sauces = ['alfredo', 'applesauce', 'chutney', 'cannoli', 'dips', 'guacamole', 'hummus', 'paste', 'spreads',
'tahini', 'tzatziki', 'denjang', 'salsa', 'sauce', 'tapenade', 'coating', 'teriyaki',
'aioli', 'checca', 'amatriciana', 'ragu', 'marinara']
condiments = ['dressing', 'jam', 'ketchup', 'marinade', 'marjoram', 'mayonnaise', 'mirin', 'mustard',
'pesto', 'relish', 'shoyu', 'tamari', 'vinaigrette', 'gochujang']
soups = ['broth', 'chowder', 'dashi', 'soup', 'stew', 'jambalaya', 'gumbo', 'gazpacho', 'goulash', 'pho',
'slumgullion', 'cioppino', 'minestrone']
nuts = ['almonds', 'butternuts', 'candlenuts', 'cashews', 'chestnuts', 'hazelnuts', 'macadamia', 'nuts',
'peanuts', 'pecans', 'pistachios', 'walnuts', 'nuts']
alcoholicIngredients = ['anisette', 'beer', 'bitters', 'bourbon', 'brandy', 'cacao', 'chambord', 'champagne',
'cognac', 'eggnog', 'kirsch', 'kirschwasser', 'liqueur', 'rum', 'schnapps', 'sherry', 'ale',
'spritz', 'tequila', 'vermouth', 'vodka', 'whiskey', 'wine', 'campari', 'alcohol', 'absinthe',
'cachaca', 'liquor', 'cointreau', 'curacao', 'sake', 'sec', 'calvados', 'galliano', 'lillet',
'margaritas', 'coladas', 'negroni', 'mojitos', 'mimosas', 'bahama', 'slammer', 'sauvignon', 'chablis',
'martinis', 'tequinis', 'spritzs', 'cosmopolitan', 'hurricanes', 'sangria', 'sex', "shaggy's", 'nipples',
'stoli']
spices = ['allspice', 'anise', 'arrowroot', 'basil', 'bay', 'capers', 'caraway', 'cardamom', 'cassava',
'cayenne', 'chocolate', 'cilantro', 'cinnamon', 'cloves', 'cocoa', 'coriander', 'cumin', 'dill',
'fennel', 'flax', 'garlic', 'ginger', 'herbs', 'kalonji', 'mace', 'masala', 'miso', 'monosodium',
'nutmeg', 'oregano', 'paprika', 'pepper', 'peppercorns', 'pimento', 'poppy', 'poppyseed',
'powder','rhubarb', 'rosemary', 'saffron', 'sage', 'salt', 'savory', 'seasoning', 'sesame', 'spices',
'sunflower', 'tarragon', 'thyme', 'turmeric', 'vanilla', 'watercress', 'spearmint', 'comfort']
spicy = ['angelica', 'dijon', 'horseradish', 'jerk', 'wasabi', 'spicy']
hotPeppers = ['jalapenos', 'pepperoncinis', 'chiles']
grains = ['bagels', 'baguettes', 'barley', 'biscuits', 'bran', 'bread', 'buns', 'cereal', 'corn', 'cornbread',
'cornstarch', 'couscous', 'crackers', 'croutons', 'crusts', 'dough', 'granola', 'hominy', 'kasha',
'masa', 'matzo', 'millet', 'muffins', 'oats', 'pitas', 'popcorn', 'pretzels', 'quinoa', 'rice', 'rolls',
'shortbread', 'sourdough', 'stuffing', 'tapioca', 'toasts', 'tortillas', 'wheat', 'kaiser', 'cornmeal',
'breadcrumbs', 'graham', 'bulgur', 'farina', 'oatmeal', 'croissants', 'polenta', 'grits', 'pumpernickel',
'sago', 'seitan', 'grains', 'taters', 'risotto', 'shells', 'amarettini', 'mochi', 'cornflakes', 'pilaf',
'puppies']
pastas = ['farfalle', 'fettuccine', 'lasagnas', 'linguine', 'mac', 'macaroni', 'manicotti', 'noodles', 'pasta',
'farfel', 'vermicelli', 'tagliatelle', 'cannelloni', 'penne']
wrappedMeals = ['burritos', 'calzones', 'dumplings', 'empanadas', 'fajitas', 'hero', 'pie', 'pinwheels', 'pizzas',
'quesadillas', 'sandwiches', 'tacos', 'tourtiere', 'wontons', 'hoagie', 'pierogies', 'rarebit',
'joes', 'enchiladas', 'pierogi', 'bierrocks', 'torta', 'reuben', 'wraps', 'piroshki', 'tamales',
'bruschetta', 'antipasto', 'hamburger', 'muffuletta', 'blanket', 'runzas', 'samosas', 'sambousas',
'chalupas', 'spanakopita', 'submarine']
pastaDishes = ['casseroles', 'curry', 'lasagna', 'marzetti', 'mostaccioli', 'spaghetti', 'stroganoff', 'ziti',
'pastini', 'pastitsio', 'fideo', 'spaghettini', 'moussaka', 'tortellinis', 'tallerine', 'talerine',
'scampi', 'ravioli', 'pad', 'gnocchi', 'spaetzle', 'stromboli']
vegetableDishes = ['tabbouleh', 'kabobs', 'suey', 'frittatas', 'quiches', 'raita', 'shieldzini', 'stir',
'sukiyaki']
drinks = ['beverage', 'cider', 'coffee', 'dew™', 'drink', 'eggnog', 'epazote', 'espresso', 'gin', 'juices',
'lemonade', 'limeade', 'milk', 'rosewater', 'soda', 'tea', 'wassail', 'punch', 'shake', 'shirley',
'americano']
cookingLiquids = ['oil', 'vinegar', 'water', 'snow', 'ice']
bakingIngredients = ['ammonia', 'baking', 'eggs', 'flour', 'margarine', 'yeast', 'bisquick®']
cookingFats = ['butter', 'gelatin', 'gravy', 'lard', 'lecithin', 'ovalette', 'shortening', 'xanthan', 'suet']
extras = ['carnations', 'coloring', 'dust', 'flowers', 'lilies', 'spray', 'toppings', 'drippings', 'powdered',
'gold']
fasteners = ['sticks', 'skewers', 'toothpicks']
adhesives = ['glue']
containers = ['jars']
flavorings = ['extract', 'flavorings', 'mint', 'pandan', 'hickory', 'flavored', 'mesquite', 'wood',
'hardwood']
mixtures = ['food', 'mixes']
# words with succeeding noun ("milk" or "cake")
nonDairyMilks = ['almond', 'soy', 'coconut']
cakeTypes = ['pound', 'sponge', 'white', 'yellow', 'bunny', "'scratch'"]
#
# returns a list of labels that match word(s) in list of ingredient/recipe words
#
def getLabelsFromArray(words):
labels = set()
for word in words:
if inCheckingPlurals(word, dairyIngredients):
labels.add("dairy")
labels.add("fat and vitamins")
continue
if ("cheese" == word and "cream" not in words) or word in cheeses:
labels.add("cheese")
labels.add("dairy")
continue
if inCheckingPlurals(word, meats):
labels.add("meat")
continue
if inCheckingPlurals(word, poultry):
labels.add("poultry")
continue
if inCheckingPlurals(word, fish):
labels.add("fish")
continue
if inCheckingPlurals(word, seafoods):
labels.add("seafood")
continue
if inCheckingPlurals(word, mainProteins):
labels.add("main protein")
continue
if inCheckingPlurals(word, fruits):
labels.add("fruit")
continue
if inCheckingPlurals(word, vegetables):
labels.add("vegetable")
continue
if inCheckingPlurals(word, spices):
labels.add("spice or herb")
continue
if inCheckingPlurals(word, sauces):
labels.add("sauce")
continue
if inCheckingPlurals(word, condiments):
labels.add("condiment")
continue
if inCheckingPlurals(word, soups):
labels.add("soup")
continue
if inCheckingPlurals(word, alcoholicIngredients):
labels.add("alcoholic")
continue
if inCheckingPlurals(word, spicy):
labels.add("spicy")
continue
if inCheckingPlurals(word, hotPeppers):
labels.add("vegetable")
labels.add("spicy")
continue
if inCheckingPlurals(word, nuts):
labels.add("nut")
continue
if inCheckingPlurals(word, cookingLiquids):
labels.add("cooking liquid")
continue
if inCheckingPlurals(word, cookingFats):
labels.add("cooking fat")
continue
if inCheckingPlurals(word, bakingIngredients):
labels.add("baking ingredient")
continue
if inCheckingPlurals(word, sugars):
labels.add("sugar")
continue
if inCheckingPlurals(word, grains):
labels.add("grain")
continue
if inCheckingPlurals(word, pastas):
labels.add("pasta")
continue
if inCheckingPlurals(word, drinks):
labels.add("drink")
continue
if inCheckingPlurals(word, wrappedMeals):
labels.add("wrapped meal")
continue
if inCheckingPlurals(word, pastaDishes):
labels.add("pasta dish")
continue
if inCheckingPlurals(word, vegetableDishes):
labels.add("vegetable dish")
continue
if inCheckingPlurals(word, extras):
labels.add("recipe extra")
continue
if inCheckingPlurals(word, flavorings):
labels.add("flavoring")
continue
if inCheckingPlurals(word, mixtures):
labels.add("mixture")
continue
if inCheckingPlurals(word, fasteners):
labels.add("fastener")
continue
if inCheckingPlurals(word, adhesives):
labels.add("adhesive")
continue
if inCheckingPlurals(word, containers):
labels.add("container")
continue
# check for non dairy milks
if "milk" in words:
index = words.index("milk")
if index > 0 and words[index - 1] in nonDairyMilks:
labels.remove("dairy")
# check if "cake" actually is a type of cake
if "cake" in words:
index = words.index("cake")
if index > 0 and words[index - 1] in cakeTypes:
labels.add("sugar")
elif "cakes" in words:
index = words.index("cakes")
if index > 0 and words[index - 1] in cakeTypes:
labels.add("sugar")
# check if "non dairy" in parsed ingredient
if "dairy" in words and "dairy" in labels:
index = words.index("dairy")
if index > 0 and words[index - 1] == "non":
labels.remove("dairy")
# add "greens" but not "green" as vegetable
if "greens" in words:
labels.add("vegetable")
# add "steak" as meat only if not used with fish (ie "salmon steak")
if ("steak" in words or "steaks" in words) and "fish" not in labels:
labels.add("meat")
# chili either a pepper or soup
if "chili" in words:
index = words.index("chili")
if index+1 < len(words) and words[index+1] == "pepper":
labels.add("vegetable")
labels.add("spicy")
else:
labels.add("soup")
# check for unsweetened sugars
if "unsweetened" in words and "sugar" in labels:
labels.remove("sugar")
# check for unflavored flavorings
if "unflavored" in words and "flavoring" in labels:
labels.remove("flavoring")
return list(labels)
# arrays for labeling recipes
breakfasts = ['crepes', 'pancakes', 'waffles', 'eggs', 'beignets', 'doughnuts', 'muffins', 'crepes', 'stroopwaffels',
'brunch', 'omelets']
desserts = ['cookies', 'cakes', 'brownies', 'pies', 'cobblers', 'mousses', 'puffs', 'biscottis', 'wafers', 'splits',
'scones', 'cupcakes', 'puddings', 'snowballs', 'candys', 'cheesecakes', 'wafers', 'macaroons', 'fruitcakes',
'gingerbreads', 'pastries', 'fudges', 'tarts', 'tarte', 'crinkles', 'chews', 'bars', 'squares', 'twists', 'snaps',
'brittles', 'thumbprints', 'babka', 'dessert', 'twinkies', 'cannolis', 'genoise', 'stollen', 'panettone',
'tiramisu', 'tuppakaka', 'vasilopita', 'zeppoli', 'sachertorte', 'spudnuts', 'botercake', 'kolaches', 'eclairs',
'ponczki', 'popovers', 'pulla', 'injera', 'dulce', 'bibingka', 'fastnachts', 'springerle', 'spritsar', 'spruffoli',
'snickerdoodles', 'santa\'s', 'sandtarts', 'sandbakelser', 'rugelach', 'rocky', 'pralines', 'pfeffernusse',
'pavlova', 'meringue', 'melting', 'meltaways', 'listy', 'lebkuchen', 'koulourakia', 'hamantashen', 'fudgies',
'florentines', 'gods', 'bark', 'buckeyes', 'torte', 'ladyfingers', 'baumkuchen', 'kipferl', 'kake', 'mocha',
'strufoli', 'stracciatella', 'rosettes', 'pepparkakor', 'sopapillas', 'kolacky', 'kolaczki', 'velvet', 'yums',
'vaselopita', 'necklaces', 'tres', 'timbales', 'wandies', 'lizzies', 'kringles', 'meringues', 'gateau', 'flan',
'baklava', 'trifle', 'dollies', 'krumkake', 'locks', 'lamingtons', 'napoleons', 'pasties', 'penuche', 'peppernuts',
'delights', 'prusurates', 'savoiardi', 'scotcharoos', 'sandies', 'sfinge', 'sfingi', 'rainbows', 'spitzbuben',
'sponges', 'spumetti', 'streusel', 'sufganiot', 'sufganiyot', 'crumbcake', 'bliss', 'malasadas']
breads = ['bagels', 'bannock', 'biscuits', 'breads', 'brioche', 'buns', 'challahs', 'chow', 'ciabattas', 'cornbread',
'crisps', 'croissants', 'doughs', 'focaccia', 'fougassetoast', 'gingerbreads', 'hoska', 'johnnycakes',
'kaiserbaguettes', 'kiflicrusts', 'kourabiedes', 'lefse', 'loafs', 'loaves', 'naan', 'oatmeal', 'paella',
'pan', 'paximade', 'pizzelles', 'pumpernickel', 'rolls', 'shells', 'shortbread', 'sourdoughs', 'stuffings',
'taralli', 'tortillas']
def getRecipeLabels(parsedRecipe):
labels = set(getLabelsFromArray(parsedRecipe))
for string in parsedRecipe:
if inCheckingPlurals(string, breakfasts):
labels.add("breakfast")
continue
if inCheckingPlurals(string, desserts):
labels.add("dessert")
continue
if inCheckingPlurals(string, breads):
labels.add("bread")
continue
# don't use "grain" as "label" if recipe label has "bread"
if "bread" in labels and "grain" in labels:
labels.remove("grain")
if "alcoholic" in labels:
# if recipe title includes alcohol but no other defining words, it's a drink
if len(labels) == 1:
labels.add("drink")
# if recipe title includes "non-alcoholic", it's not an alcoholic recipe
if "non-alcoholic" in parsedRecipe:
labels.remove("alcoholic")
if "vegetarian" in parsedRecipe:
if "meat" in labels:
labels.remove("meat")
if "seafood" in labels:
labels.remove("seafood")
if "fish" in labels:
labels.remove("fish")
if "poultry" in labels:
labels.remove("poultry")
return list(labels)
# list of measurement units for parsing ingredient
measurementUnits = ['teaspoons','tablespoons','cups','containers','packets','bags','quarts','pounds','cans','bottles',
'pints','packages','ounces','jars','heads','gallons','drops','envelopes','bars','boxes','pinches',
'dashes','bunches','recipes','layers','slices','links','bulbs','stalks','squares','sprigs',
'fillets','pieces','legs','thighs','cubes','granules','strips','trays','leaves','loaves','halves']
#
# transform amount to cups based on amount and original unit
#
def transformToCups(amount, unit):
if unit == "cups":
return amount
elif unit == "quarts":
return amount / 16
elif unit == "quarts":
return amount / 4
elif unit == "pints":
return amount / 2
elif unit == "ounces":
return amount * 8
elif unit == "tablespoons":
return amount * 16
elif unit == "teaspoons":
return amount * 48
else:
return amount
# strings indicating ingredient as optional (currently don't use optional boolean for anything)
# optionalStrings = ['optional', 'to taste', 'as needed', 'if desired']
# list of adjectives and participles used to describe ingredients
descriptions = ['baked', 'beaten', 'blanched', 'boiled', 'boiling', 'boned', 'breaded', 'brewed', 'broken', 'chilled',
'chopped', 'cleaned', 'coarse', 'cold', 'cooked', 'cool', 'cooled', 'cored', 'creamed', 'crisp', 'crumbled',
'crushed', 'cubed', 'cut', 'deboned', 'deseeded', 'diced', 'dissolved', 'divided', 'drained', 'dried', 'dry',
'fine', 'firm', 'fluid', 'fresh', 'frozen', 'grated', 'grilled', 'ground', 'halved', 'hard', 'hardened',
'heated', 'heavy', 'juiced', 'julienned', 'jumbo', 'large', 'lean', 'light', 'lukewarm', 'marinated',
'mashed', 'medium', 'melted', 'minced', 'near', 'opened', 'optional', 'packed', 'peeled', 'pitted', 'popped',
'pounded', 'prepared', 'pressed', 'pureed', 'quartered', 'refrigerated', 'rinsed', 'ripe', 'roasted',
'roasted', 'rolled', 'rough', 'scalded', 'scrubbed', 'seasoned', 'seeded', 'segmented', 'separated',
'shredded', 'sifted', 'skinless', 'sliced', 'slight', 'slivered', 'small', 'soaked', 'soft', 'softened',
'split', 'squeezed', 'stemmed', 'stewed', 'stiff', 'strained', 'strong', 'thawed', 'thick', 'thin', 'tied',
'toasted', 'torn', 'trimmed', 'wrapped', 'vained', 'warm', 'washed', 'weak', 'zested', 'wedged',
'skinned', 'gutted', 'browned', 'patted', 'raw', 'flaked', 'deveined', 'shelled', 'shucked', 'crumbs',
'halves', 'squares', 'zest', 'peel', 'uncooked', 'butterflied', 'unwrapped', 'unbaked', 'warmed']
# list of adverbs used before or after description
precedingAdverbs = ['well', 'very', 'super']
succeedingAdverbs = ['diagonally', 'lengthwise', 'overnight']
# list of prepositions used after ingredient name
prepositions = ['as', 'such', 'for', 'with', 'without', 'if', 'about', 'e.g.', 'in', 'into', 'at', 'until']
# only used as <something> removed, <something> reserved, <x> inches, <x> old, <some> temperature
descriptionsWithPredecessor = ['removed', 'discarded', 'reserved', 'included', 'inch', 'inches', 'old', 'temperature', 'up']
# descriptions that can be removed from ingredient, i.e. candied pineapple chunks
unnecessaryDescriptions = ['chunks', 'pieces', 'rings', 'spears']
# list of prefixes and suffixes that should be hyphenated
hypenatedPrefixes = ['non', 'reduced', 'semi', 'low']
hypenatedSuffixes = ['coated', 'free', 'flavored']
#
# main function
#
jsonFile = open("recipes.json", "w")
jsonFile.truncate()
outputFile = open("output.txt", "w")
outputFile.truncate()
parenthesesRegex = re.compile(r"\([^()]*\)")
# load list of all ingredients
allIngredientsFile = open("allIngredients.txt", "r")
allIngredients = allIngredientsFile.read().split("\n")
allIngredientsFile.close()
while "" in allIngredients:
allIngredients.remove("")
unlabeledIngredients = set()
unlabeledRecipes = set()
# recipes start at id~6660 and end at id=~27000
for recipeId in range(6660, 27000):
soup = None
try:
url = "http://allrecipes.com/recipe/{}".format(recipeId)
# html = urllib2.urlopen(url).read()
# soup = BeautifulSoup( html , "html.parser")
with urllib2.urlopen(url, timeout = 1) as response:
soup = BeautifulSoup(response.read(), "html.parser")
# with urllib2.urlopen(url) as response:
# soup = BeautifulSoup(response)
except AttributeError as e:
outputFile.write("{0}: AttributeError".format(recipeId))
except urllib2.HTTPError as e:
outputFile.write("{0}: No recipe".format(recipeId))
outputFile.write(e.reason)
except urllib2.URLError as e:
outputFile.write("{0}: URL ERROR".format(recipeId))
outputFile.write(e.reason)
except SocketError as e:
outputFile.write("{0}: SOCKET ERROR".format(recipeId))
if soup:
titleSpan = soup.find("h1", class_="recipe-summary__h1")
servingSpan = soup.find("span", class_="servings-count")
calorieSpan = soup.find("span", class_="calorie-count")
directionObjects = soup.find_all("span", class_="recipe-directions__list--item")
ingredientObjects = soup.find_all("span", class_="recipe-ingred_txt")
footnotesSection = soup.find("section", class_="recipe-footnotes")
#
# get title
#
title = titleSpan.text
title = title.replace("Linguini", "Linguine")
title = title.replace("Genoese", "Genoise")
#
# get labels
#
parsedTitle = title.lower().replace("(", "").replace(")", "").replace("-", " ").split(" ");
while "" in parsedTitle:
parsedTitle.remove("")
allLabels = getRecipeLabels(parsedTitle)
if len(allLabels) == 0:
unlabeledRecipes.add(title)
#
# get ingredients
#
count = len(ingredientObjects) - 3 # 2 spans with "Add all" and 1 empty
ingredients = []
for i in range(0, count):
ingredientString = ingredientObjects[i].text
# check if not ingredient, but separator
# ie "For Bread:"
if ingredientString.find("For ") == 0 or " " not in ingredientString or (":" in ingredientString and "eg:" not in ingredientString):
continue
ingredient = {}
ingredient["descriptions"] = []
# move parentheses to description
while True:
parentheses = parenthesesRegex.search(ingredientString)
if not parentheses:
break
searchString = parentheses.group()
ingredientString = ingredientString.replace(searchString, "")
ingredient["descriptions"].append(searchString[1:-1])
# remove "," and "-" then split ingredient into words
ingredientString = ingredientString.replace(","," and ")
ingredientString = ingredientString.replace("-"," ")
parsedIngredient = ingredientString.split(" ")
# remove "", caused by extra spaces
while "" in parsedIngredient:
parsedIngredient.remove("")
# move prepositions to description
for index in range(0, len(parsedIngredient)):
if parsedIngredient[index] in prepositions:
if (index + 1 < len(parsedIngredient) and parsedIngredient[index + 1] == "use") or (index > 0 and parsedIngredient[index - 1] == "bone" and parsedIngredient[index] == "in"):
continue
parsedPrepositionalPhrase = parsedIngredient[index:]
ingredient["descriptions"].append(" ".join(parsedPrepositionalPhrase))
parsedIngredient = parsedIngredient[:index]
break
#
# get ingredient amount
#
ingredient["amount"] = 0
while len(parsedIngredient) > 0:
# check if current word is number of inches, not amount
if len(parsedIngredient) > 1 and parsedIngredient[1] == "inch":
break
# get first word
# if first word is digit or fraction, eval
# "x" not multiplier, "%" used as modulo
try:
ingredient["amount"] += eval(parsedIngredient[0])
del parsedIngredient[0]
except (SyntaxError, NameError, TypeError):
break
#
# get ingredient unit
#
# check words for unit
unitString = ""
for i in range(0, len(parsedIngredient)):
pluralUnit = inCheckingPlurals(parsedIngredient[i], measurementUnits)
if pluralUnit:
unitString = pluralUnit
del parsedIngredient[i]
if i < len(parsedIngredient) and parsedIngredient[i] == "+":
while "+" in parsedIngredient:
index = parsedIngredient.index("+")
del parsedIngredient[index]
ingredient["amount"] += transformToCups(eval(parsedIngredient[index]), parsedIngredient[index+1])
del parsedIngredient[index]
del parsedIngredient[index+1]
break
# check for "cake" as unit, but only if "yeast" somewhere in ingredient
if "yeast" in parsedIngredient:
for word in parsedIngredient:
if equalCheckingPlurals(word, "cakes"):
unitString = "cakes"
parsedIngredient.remove(word)
break
# check if first word in array is "or", then ingredient has 2 possible units
if parsedIngredient[0] == "or":
pluralUnit = inCheckingPlurals(parsedIngredient[1], measurementUnits)
if pluralUnit:
unitString += " " + parsedIngredient[0] + " " + pluralUnit
parsedIngredient = parsedIngredient[2:]
# delete "of" at first index, ie "1 cup of milk" -> "1 cup milk"
if parsedIngredient[0] == "of":
del parsedIngredient[0]
ingredient["unit"] = unitString
#
# get ingredient descriptions
#
# remove useless words
for word in parsedIngredient:
if word in unnecessaryDescriptions:
parsedIngredient.remove(word)
index = 0
while index < len(parsedIngredient):
descriptionString = ""
word = parsedIngredient[index]
# search through descriptions (adjectives)
if word in descriptions:
descriptionString = word
# check previous word
if index > 0:
previousWord = parsedIngredient[index - 1]
if previousWord in precedingAdverbs or previousWord[-2:] == "ly":
descriptionString = previousWord + " " + word
parsedIngredient.remove(previousWord)
# check next word
elif index + 1 < len(parsedIngredient):
nextWord = parsedIngredient[index + 1]
if nextWord in succeedingAdverbs or nextWord[-2:] == "ly":
descriptionString = word + " " + nextWord
parsedIngredient.remove(nextWord)
# word not in descriptions, check if description with predecessor
elif word in descriptionsWithPredecessor and index > 0:
descriptionString = parsedIngredient[index - 1] + " " + word
del parsedIngredient[index - 1]
# either add description string to descriptions or check next word
if descriptionString == "":
index+=1
else:
ingredient["descriptions"].append(descriptionString)
parsedIngredient.remove(word)
# remove "and"
while "and" in parsedIngredient:
parsedIngredient.remove("and")
# remove "style"
while "style" in parsedIngredient:
parsedIngredient.remove("style")
# remove "or" if last word
if parsedIngredient[-1] == "or":
del parsedIngredient[-1]
# replace hyphenated prefixes and suffixes
for word in parsedIngredient:
for hypenatedSuffix in hypenatedSuffixes:
if hypenatedSuffix in word:
word=word.replace(hypenatedSuffix, "-" + hypenatedSuffix)
for hypenatedPrefix in hypenatedPrefixes:
if word.find(hypenatedPrefix) == 0:
word=word.replace(hypenatedPrefix, hypenatedPrefix + "-")
# move various nouns to description
if "powder" in parsedIngredient and ("coffee" in parsedIngredient or "espresso" in parsedIngredient or "tea" in parsedIngredient):
parsedIngredient.remove("powder")
ingredient["descriptions"].append("unbrewed")
#
# get ingredient
#
ingredientString = " ".join(parsedIngredient)
# remove "*", add footnote to description
if "*" in ingredientString:
ingredient["descriptions"].append("* see footnote")
ingredientString = ingredientString.replace("*", "")
# standardize "-" styling
ingredientString = ingredientString.replace("- ", "-")
ingredientString = ingredientString.replace(" -", "-")
ingredientString = ingredientString.replace("Jell O", "Jell-O")
ingredientString = ingredientString.replace("half half", "half-and-half")
# remove unnecessary punctuation
ingredientString = ingredientString.replace(".", "")
ingredientString = ingredientString.replace(";", "")
# fix spelling errors
ingredientString = ingredientString.replace("linguini", "linguine")
ingredientString = ingredientString.replace("filets", "fillets")
ingredientString = ingredientString.replace("chile", "chili")
ingredientString = ingredientString.replace("chiles", "chilis")
ingredientString = ingredientString.replace("chilies", "chilis")
ingredientString = ingredientString.replace("won ton", "wonton")
ingredientString = ingredientString.replace("liquer", "liqueur")
ingredientString = ingredientString.replace("confectioners ", "confectioners' ")
ingredientString = ingredientString.replace("creme de cacao", "chocolate liquer")
ingredientString = ingredientString.replace("pepperjack", "Pepper Jack")
ingredientString = ingredientString.replace("Pepper jack", "Pepper Jack")
# standardize ingredient styling
ingredientString = ingredientString.replace("dressing mix", "dressing")
ingredientString = ingredientString.replace("salad dressing", "dressing")
ingredientString = ingredientString.replace("bourbon whiskey", "bourbon")
ingredientString = ingredientString.replace("pudding mix", "pudding")
if ingredientString == "":
outputFile.write("Bad ingredient string: {0}".format(ingredientObjects[i].text))
ingredientString = ingredientObjects[i].text
pluralString = inCheckingPlurals(ingredientString, allIngredients)
if pluralString:
ingredientString = pluralString
else:
allIngredients.append(ingredientString)
ingredient["ingredient"] = ingredientString
#
# get ingredient labels
#
ingredientString = ingredientString.replace("-flavored", "")
ingredientString = ingredientString.lower()
ingredient["labels"] = getLabelsFromArray(ingredientString.split(" "))
if len(ingredient["labels"]) == 0:
unlabeledIngredients.add(ingredient["ingredient"])
ingredients.append(ingredient)
#
# get directions
#
# get number of spans and concatenate all contents to string
count = len(directionObjects) - 1 # 1 empty span at end
directionsString = directionObjects[0].text
for i in range(1, count):
directionsString += " " + directionObjects[i].text
# use nltk to split direction string into sentences
directionsArray = sent_tokenize(directionsString)
directions = []
for i in range(0, len(directionsArray)):
direction = {}
direction["step"] = i
direction["direction"] = directionsArray[i]
directions.append(direction)
#
# get footnotes
#
footnotes = []
if footnotesSection:
for footnote in footnotesSection.find_all("li"):
footnotes.append(footnote.text)
#
# get servings
#
servings = servingSpan.contents[0].text if servingSpan is not None else None
if servings and servings.isdigit():
servings = eval(servings)
else:
servings = 0
#
# get calories
#
calories = calorieSpan.contents[0].text if calorieSpan is not None else None
if calories and calories.isdigit():
calories = eval(calories)
else:
calories = 0
# write ingredient to JSON file
jsonFile.write(json.dumps({"id": recipeId,
"name": title,
"ingredients": ingredients,
"directions": directions,
"footnotes": footnotes,
"labels": allLabels,
"servings": servings,
"calories": calories}))
jsonFile.write("\n")
print(recipeId)
# write data to files every 10 recipes
if recipeId % 10 == 0:
unlabeledRecipeFile = open("unlabeledRecipes.txt", "w")
unlabeledRecipeFile.truncate()
for string in sorted(unlabeledRecipes):
unlabeledRecipeFile.write("{0}\n".format(string))
unlabeledRecipeFile.close()
unlabeledIngredientsFile = open("unlabeledIngredients.txt", "w")
unlabeledIngredientsFile.truncate()
for string in sorted(unlabeledIngredients):
unlabeledIngredientsFile.write("{0}\n".format(string))
unlabeledIngredientsFile.close()
allIngredientsFile = open("allIngredients.txt", "w")
allIngredientsFile.truncate()
for string in sorted(allIngredients):
allIngredientsFile.write("{0}\n".format(string))
allIngredientsFile.close()
print(recipeId)
jsonFile.close()
outputFile.close()
| 40.085106 | 178 | 0.673744 |
b549aca55475496f51e360d4ae4c2b14de615b08 | 5,250 | py | Python | Cthulhu.py | Mr-Xcoder/Cthulhu | 9847bab9a31a542479d86148151262d620e1a51a | [
"MIT"
]
| 6 | 2017-07-16T18:26:11.000Z | 2021-08-12T04:34:31.000Z | Cthulhu.py | Mr-Xcoder/Cthulhu | 9847bab9a31a542479d86148151262d620e1a51a | [
"MIT"
]
| 3 | 2017-07-16T19:31:44.000Z | 2017-08-18T15:36:12.000Z | Cthulhu.py | Mr-Xcoder/Cthulhu | 9847bab9a31a542479d86148151262d620e1a51a | [
"MIT"
]
| null | null | null | import sympy
import math
import sys
pause = 0
stack = []
STDIN = []
register_1 = []
register_2 = []
digits = "0123456789"
if "--input" in sys.argv[1:]:
STDIN = open("input.txt", "r").read().split("\n")
else:
STDIN = sys.stdin.read().split("\n")
class Command:
global stack
arity: int = 0
func: callable = None
is_void: bool = False
neutral_elements: list = []
def __init__(self, arity: int, func: callable, is_void: bool = False, neutral_elements: list = []):
self.arity = arity
self.func = func
self.is_void = is_void
self.neutral_elements = neutral_elements
def call(self):
argument_list = []
for i in range(self.arity):
try:
argument_list = [stack.pop()] + argument_list
except IndexError:
argument_list.append(self.neutral_elements.pop(0) if self.neutral_elements else 0)
if self.is_void:
self.func(*argument_list)
else:
stack.append(self.func(*argument_list))
def primefac(x: int) -> list:
result = []
for factor, exponent in sympy.ntheory.factor_.factorint(int(x)).items():
result.extend([factor] * exponent)
return result
def wrap_stack():
global stack
stack = [stack]
def evaluate(x: object) -> object:
try:
return eval(x)
except (ValueError, SyntaxError):
return x
commands = {
# Bifunctions
'+': Command(2, (lambda x, y: x + y), neutral_elements=[0, 0]),
'-': Command(2, (lambda x, y: x - y), neutral_elements=[0, 0]),
'/': Command(2, (lambda x, y: x / y), neutral_elements=[1, 1]),
'*': Command(2, (lambda x, y: x * y), neutral_elements=[1, 1]),
'ˆ': Command(2, (lambda x, y: x ** y), neutral_elements=[1, 1]),
'%': Command(2, (lambda x, y: x % y), neutral_elements=[1, 1]),
':': Command(2, (lambda x, y: x // y), neutral_elements=[1, 1]),
'|': Command(2, (lambda x, y: x | y), neutral_elements=[1, 1]),
'X': Command(2, (lambda x, y: x ^ y), neutral_elements=[1, 1]),
'&': Command(2, (lambda x, y: x & y), neutral_elements=[1, 1]),
'>': Command(2, (lambda x, y: int(x > y)), neutral_elements=[2, 1]),
'<': Command(2, (lambda x, y: int(x < y)), neutral_elements=[0, 1]),
'≥': Command(2, (lambda x, y: int(x >= y)), neutral_elements=[1, 1]),
'≤': Command(2, (lambda x, y: int(x <= y)), neutral_elements=[1, 1]),
'=': Command(2, (lambda x, y: int(x == y)), neutral_elements=[1, 1]),
'≠': Command(2, (lambda x, y: int(x != y)), neutral_elements=[1, 1]),
# Unifunctions
',': Command(1, (lambda x: [stack.pop(-1 if x >= 0 else 0) for _ in range(abs(x))
if stack][::-1 if x >= 0 else 1])),
'_': Command(1, (lambda x: -x if isinstance(x, int) or isinstance(x, float) else x[::-1])),
'p': Command(1, (lambda x: sympy.primetest.isprime(int(x)) if x >= 0 else primefac(abs(x))), neutral_elements=[1]),
'd': Command(1, sympy.ntheory.factor_.divisors, neutral_elements=[1]),
'I': Command(1, (lambda x: evaluate(STDIN[x]) if len(STDIN) > x else
[1024, sympy.pi, math.e, "aeiou", "bcdfghjklmnpqrstvwxyz"][x % 5])),
'l': Command(1, str.lower, neutral_elements=["abcdefghijklmnopqrstuvwxyz"]),
'u': Command(1, str.upper, neutral_elements=["ABCDEFGHIJKLMNOPQRSTUVWXYZ"]),
'e': Command(1, (lambda x: 10 ** x), neutral_elements=[3]),
'±': Command(1, (lambda x: (x > 0) - (x < 0)), neutral_elements=[0]),
'!': Command(1, (lambda x: 1-x)),
# Invariant functions
'i': Command(0, (lambda x: evaluate(STDIN[0]) if STDIN else 100)),
'r': Command(0, (lambda x: STDIN[0] if STDIN else 2 ** 31 - 1)),
# Stack manipulators
's': Command(2, (lambda x, y: stack.extend([y, x])), is_void=True, neutral_elements=[0, 0]),
'S': Command(3, (lambda x, y, z: stack.extend([z, y, x])), is_void=True, neutral_elements=[0, 0, 0]),
'D': Command(1, (lambda x: stack.extend([x, x])), is_void=True, neutral_elements=[0]),
'T': Command(1, (lambda x: stack.extend([x, x, x])), is_void=True, neutral_elements=[0]),
'W': Command(0, wrap_stack, is_void=True)
}
def run(program):
global pause
global stack
global STDIN
global register_1
global register_2
for index, command in enumerate(program):
# Handling Multi-Byte commands
if pause:
pause -= 1
continue
# Handling strings
if command == '"' and (program[index - 1] != '\\' if index != 0 else True):
if (program[:index].count('"') - program[:index].count('\\"')) % 2 == 0:
stack.append("")
elif (program[:index].count('"') - program[:index].count('\\"')) % 2:
stack[-1] += command
# Handling number literals, which are special functions
elif command in digits:
# Multi-digit integers
if index != 0 and program[index - 1] in digits:
stack[-1] = stack[-1] * 10 + int(command)
# Pushing digits
else:
stack.append(int(command))
else:
if command in commands.keys():
commands[command].call()
return stack[-1]
| 35.714286 | 119 | 0.563429 |
728daf07077b630f32f3fcf4550e08f4371c4da7 | 11,577 | py | Python | fanficfare/adapters/adapter_fanfictionjunkiesde.py | davidferguson/FanFicUpload | dcc3010b9c35c6d0e479cfc2aa07d951d280d9b2 | [
"Apache-2.0"
]
| 1 | 2019-06-13T11:20:33.000Z | 2019-06-13T11:20:33.000Z | fanficfare/adapters/adapter_fanfictionjunkiesde.py | davidferguson/FanFicUpload | dcc3010b9c35c6d0e479cfc2aa07d951d280d9b2 | [
"Apache-2.0"
]
| null | null | null | fanficfare/adapters/adapter_fanfictionjunkiesde.py | davidferguson/FanFicUpload | dcc3010b9c35c6d0e479cfc2aa07d951d280d9b2 | [
"Apache-2.0"
]
| null | null | null | # -*- coding: utf-8 -*-
# Copyright 2011 Fanficdownloader team, 2015 FanFicFare team
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Software: eFiction
import time
import logging
logger = logging.getLogger(__name__)
import re
import urllib2
from ..htmlcleanup import stripHTML
from .. import exceptions as exceptions
from base_adapter import BaseSiteAdapter, makeDate
# By virtue of being recent and requiring both is_adult and user/pass,
# adapter_fanficcastletvnet.py is the best choice for learning to
# write adapters--especially for sites that use the eFiction system.
# Most sites that have ".../viewstory.php?sid=123" in the story URL
# are eFiction.
# For non-eFiction sites, it can be considerably more complex, but
# this is still a good starting point.
# In general an 'adapter' needs to do these five things:
# - 'Register' correctly with the downloader
# - Site Login (if needed)
# - 'Are you adult?' check (if needed--some do one, some the other, some both)
# - Grab the chapter list
# - Grab the story meta-data (some (non-eFiction) adapters have to get it from the author page)
# - Grab the chapter texts
# Search for XXX comments--that's where things are most likely to need changing.
# This function is called by the downloader in all adapter_*.py files
# in this dir to register the adapter class. So it needs to be
# updated to reflect the class below it. That, plus getSiteDomain()
# take care of 'Registering'.
def getClass():
return FanfictionJunkiesDeAdapter # XXX
# Class name has to be unique. Our convention is camel case the
# sitename with Adapter at the end. www is skipped.
class FanfictionJunkiesDeAdapter(BaseSiteAdapter): # XXX
def __init__(self, config, url):
BaseSiteAdapter.__init__(self, config, url)
self.decode = ["Windows-1252",
"utf8"] # 1252 is a superset of iso-8859-1.
# Most sites that claim to be
# iso-8859-1 (and some that claim to be
# utf8) are really windows-1252.
self.username = "NoneGiven" # if left empty, site doesn't return any message at all.
self.password = ""
self.is_adult=False
# get storyId from url--url validation guarantees query is only sid=1234
self.story.setMetadata('storyId',self.parsedUrl.query.split('=',)[1])
# normalized story URL.
# XXX Most sites don't have the /fanfic part. Replace all to remove it usually.
self._setURL('http://' + self.getSiteDomain() + '/efiction/viewstory.php?sid='+self.story.getMetadata('storyId'))
# Each adapter needs to have a unique site abbreviation.
self.story.setMetadata('siteabbrev','ffjde') # XXX
# The date format will vary from site to site.
# http://docs.python.org/library/datetime.html#strftime-strptime-behavior
self.dateformat = "%d/%m/%y" # XXX
@staticmethod # must be @staticmethod, don't remove it.
def getSiteDomain():
# The site domain. Does have www here, if it uses it.
return 'fanfiction-junkies.de' # XXX
@classmethod
def getSiteExampleURLs(cls):
return "http://"+cls.getSiteDomain()+"/efiction/viewstory.php?sid=1234"
def getSiteURLPattern(self):
return re.escape("http://"+self.getSiteDomain()+"/efiction/viewstory.php?sid=")+r"\d+$"
## Login seems to be reasonably standard across eFiction sites.
def needToLoginCheck(self, data):
if 'Registered Users Only' in data \
or 'There is no such account on our website' in data \
or "That password doesn't match the one in our database" in data:
return True
else:
return False
def performLogin(self, url):
params = {}
if self.password:
params['penname'] = self.username
params['password'] = self.password
else:
params['penname'] = self.getConfig("username")
params['password'] = self.getConfig("password")
params['cookiecheck'] = '1'
params['submit'] = 'Submit'
loginUrl = 'http://' + self.getSiteDomain() + '/efiction/user.php?action=login'
logger.debug("Will now login to URL (%s) as (%s)" % (loginUrl,
params['penname']))
d = self._fetchUrl(loginUrl, params)
if "Member Account" not in d : #Member Account
logger.info("Failed to login to URL %s as %s" % (loginUrl,
params['penname']))
raise exceptions.FailedToLogin(url,params['penname'])
return False
else:
return True
## Getting the chapter list and the meta data, plus 'is adult' checking.
def extractChapterUrlsAndMetadata(self):
if self.is_adult or self.getConfig("is_adult"):
# Weirdly, different sites use different warning numbers.
# If the title search below fails, there's a good chance
# you need a different number. print data at that point
# and see what the 'click here to continue' url says.
addurl = "&ageconsent=ok&warning=1" # XXX
else:
addurl=""
# index=1 makes sure we see the story chapter index. Some
# sites skip that for one-chapter stories.
url = self.url+'&index=1'+addurl
logger.debug("URL: "+url)
try:
data = self._fetchUrl(url)
except urllib2.HTTPError, e:
if e.code == 404:
raise exceptions.StoryDoesNotExist(self.url)
else:
raise e
if self.needToLoginCheck(data):
# need to log in for this one.
self.performLogin(url)
data = self._fetchUrl(url)
# The actual text that is used to announce you need to be an
# adult varies from site to site. Again, print data before
# the title search to troubleshoot.
if "For adults only " in data: # XXX
raise exceptions.AdultCheckRequired(self.url)
if "Access denied. This story has not been validated by the adminstrators of this site." in data:
raise exceptions.AccessDenied(self.getSiteDomain() +" says: Access denied. This story has not been validated by the adminstrators of this site.")
# use BeautifulSoup HTML parser to make everything easier to find.
soup = self.make_soup(data)
# print data
# Now go hunting for all the meta data and the chapter list.
pagetitle = soup.find('h4')
## Title
a = pagetitle.find('a', href=re.compile(r'viewstory.php\?sid='+self.story.getMetadata('storyId')+"$"))
self.story.setMetadata('title',a.string)
# Find authorid and URL from... author url.
a = pagetitle.find('a', href=re.compile(r"viewuser.php\?uid=\d+"))
self.story.setMetadata('authorId',a['href'].split('=')[1])
self.story.setMetadata('authorUrl','http://'+self.host+'/efiction/'+a['href'])
self.story.setMetadata('author',a.string)
# Reviews
reviewdata = soup.find('div', {'id' : 'sort'})
a = reviewdata.findAll('a', href=re.compile(r'reviews.php\?type=ST&(amp;)?item='+self.story.getMetadata('storyId')+"$"))[1] # second one.
self.story.setMetadata('reviews',stripHTML(a))
# Find the chapters:
for chapter in soup.findAll('a', href=re.compile(r'viewstory.php\?sid='+self.story.getMetadata('storyId')+"&chapter=\d+$")):
# just in case there's tags, like <i> in chapter titles.
self.chapterUrls.append((stripHTML(chapter),'http://'+self.host+'/efiction/'+chapter['href']+addurl))
self.story.setMetadata('numChapters',len(self.chapterUrls))
# eFiction sites don't help us out a lot with their meta data
# formating, so it's a little ugly.
# utility method
def defaultGetattr(d,k):
try:
return d[k]
except:
return ""
# <span class="label">Rated:</span> NC-17<br /> etc
list = soup.find('div', {'class':'listbox'})
labels = list.findAll('b')
for labelspan in labels:
value = labelspan.nextSibling
label = labelspan.string
if 'Zusammenfassung' in label:
self.setDescription(url,value)
if 'Eingestuft' in label:
self.story.setMetadata('rating', value)
if u'Wörter' in label:
self.story.setMetadata('numWords', value)
if 'Kategorie' in label:
cats = labelspan.parent.findAll('a',href=re.compile(r'browse.php\?type=categories'))
for cat in cats:
self.story.addToList('category',cat.string)
if 'Charaktere' in label:
chars = labelspan.parent.findAll('a',href=re.compile(r'browse.php\?type=characters'))
for char in chars:
self.story.addToList('characters',char.string)
if 'Abgeschlossen' in label:
if 'Yes' in value:
self.story.setMetadata('status', 'Completed')
else:
self.story.setMetadata('status', 'In-Progress')
if u'Veröffentlicht' in label:
self.story.setMetadata('datePublished', makeDate(stripHTML(value), self.dateformat))
if 'Aktualisiert' in label:
# there's a stray [ at the end.
#value = value[0:-1]
self.story.setMetadata('dateUpdated', makeDate(stripHTML(value), self.dateformat))
try:
# Find Series name from series URL.
a = soup.find('a', href=re.compile(r"viewseries.php\?seriesid=\d+"))
series_name = a.string
series_url = 'http://'+self.host+'/efiction/'+a['href']
# use BeautifulSoup HTML parser to make everything easier to find.
seriessoup = self.make_soup(self._fetchUrl(series_url))
storyas = seriessoup.findAll('a', href=re.compile(r'^viewstory.php\?sid=\d+$'))
i=1
for a in storyas:
if a['href'] == ('viewstory.php?sid='+self.story.getMetadata('storyId')):
self.setSeries(series_name, i)
self.story.setMetadata('seriesUrl',series_url)
break
i+=1
except:
# I find it hard to care if the series parsing fails
pass
# grab the text for an individual chapter.
def getChapterText(self, url):
logger.debug('Getting chapter text from: %s' % url)
soup = self.make_soup(self._fetchUrl(url))
div = soup.find('div', {'id' : 'story'})
if None == div:
raise exceptions.FailedToDownload("Error downloading Chapter: %s! Missing required element!" % url)
return self.utf8FromSoup(url,div)
| 39.783505 | 157 | 0.609398 |
d2624bfee5cfba25486d5d6c459131c5c6121c28 | 567 | py | Python | test_setup/urls.py | raratiru/django-letsagree | 3b11e55b44eb421789155b491a244985acc9fe26 | [
"BSD-3-Clause"
]
| 12 | 2019-04-11T11:41:06.000Z | 2021-09-28T15:29:48.000Z | test_setup/urls.py | raratiru/django-letsagree | 3b11e55b44eb421789155b491a244985acc9fe26 | [
"BSD-3-Clause"
]
| 16 | 2019-04-14T20:55:58.000Z | 2021-03-12T15:42:02.000Z | test_setup/urls.py | raratiru/django-letsagree | 3b11e55b44eb421789155b491a244985acc9fe26 | [
"BSD-3-Clause"
]
| 3 | 2019-05-12T19:51:32.000Z | 2020-10-19T13:52:09.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# ==============================================================================
#
# File Name : test_setup.urls.py
#
# Creation Date : Tue 09 Apr 2019 03:25:45 AM EEST (03:25)
#
# Last Modified : Tue 09 Apr 2019 03:27:15 AM EEST (03:27)
#
# ==============================================================================
from django.contrib import admin
from django.urls import path, include
urlpatterns = [
path("admin/", admin.site.urls),
path("letsagree/", include("letsagree.urls")),
]
| 24.652174 | 80 | 0.447972 |
01d713246d778e6d3f978801015e3f9ee90eeac9 | 1,597 | py | Python | plant_vs_zoomie_game_normal02.py | ChengzhuLi/plantwarzombie | d9cb018d04df241846c49dbeabd52df688de5e5f | [
"MIT"
]
| 4 | 2020-08-09T00:25:13.000Z | 2021-05-08T09:42:45.000Z | plant_vs_zoomie_game_normal02.py | ChengzhuLi/plantwarzombie | d9cb018d04df241846c49dbeabd52df688de5e5f | [
"MIT"
]
| null | null | null | plant_vs_zoomie_game_normal02.py | ChengzhuLi/plantwarzombie | d9cb018d04df241846c49dbeabd52df688de5e5f | [
"MIT"
]
| 2 | 2021-01-16T10:18:17.000Z | 2021-04-15T12:01:04.000Z | import pygame
import os
from Peashooter import Peashooter
from SunFlower import SunFlower
from WallNut import WallNut
from Sun import Sun
pygame.init()
backgd_size = (1200, 600)
screen = pygame.display.set_mode(backgd_size)
pygame.display.set_caption('plant_vs_zoomie')
bg_img_path = 'material/images/background1.jpg'
bg_img_obj = pygame.image.load(bg_img_path).convert_alpha()
sunbank_img_path = 'material/images/SunBack.png'
sunbank_img_obj = pygame.image.load(sunbank_img_path).convert_alpha()
text = '900'
sun_font = pygame.font.SysFont('arial',25)
sun_num_surface = sun_font.render(text,True,(0,0,0))
peashooter = Peashooter()
sunflower = SunFlower()
wallnut = WallNut()
sunList = []
clock = pygame.time.Clock()
def main():
running = True
index = 0
while running:
if index >= 130:
index = 0
clock.tick(20)
#2s产生一个太阳花
if index % 40 == 0:
sun = Sun(sunflower.rect)
sunList.append(sun)
screen.blit(bg_img_obj,(0,0))
screen.blit(sunbank_img_obj,(250,0))
screen.blit(sun_num_surface,(300,5))
screen.blit(peashooter.images[index%13],peashooter.rect)
screen.blit(sunflower.images[index % 13], sunflower.rect)
screen.blit(wallnut.images[index % 13], wallnut.rect)
for sun in sunList:
screen.blit(sun.images[index % 17], sun.rect)
index+=1
for event in pygame.event.get():
if event.type == pygame.QUIT:
running = False
pygame.display.update()
if __name__ == '__main__':
main()
| 23.144928 | 69 | 0.657483 |
ea92e4ed2236e531804c563cb71058ac319dfdc4 | 4,899 | py | Python | grr/server/grr_response_server/authorization/client_approval_auth.py | JiYonG-Lee/grr | 57fef67080ac6b8fd3de3ba0adfca064d34b7689 | [
"Apache-2.0"
]
| 1 | 2020-06-25T14:25:51.000Z | 2020-06-25T14:25:51.000Z | grr/server/grr_response_server/authorization/client_approval_auth.py | JiYonG-Lee/grr | 57fef67080ac6b8fd3de3ba0adfca064d34b7689 | [
"Apache-2.0"
]
| 3 | 2021-05-11T20:18:38.000Z | 2022-03-02T09:33:56.000Z | grr/server/grr_response_server/authorization/client_approval_auth.py | JiYonG-Lee/grr | 57fef67080ac6b8fd3de3ba0adfca064d34b7689 | [
"Apache-2.0"
]
| 1 | 2020-06-25T14:25:54.000Z | 2020-06-25T14:25:54.000Z | #!/usr/bin/env python
# Lint as: python3
"""Client label approvals authorization manager."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import io
from grr_response_core import config
from grr_response_core.lib import utils
from grr_response_core.lib.rdfvalues import structs as rdf_structs
from grr_response_proto import acls_pb2
from grr_response_server import access_control
from grr_response_server.authorization import auth_manager
class Error(Exception):
"""Base class for user manager exception."""
class ErrorInvalidClientApprovalAuthorization(Error):
"""Used when an invalid ClientApprovalAuthorization is defined."""
class ErrorInvalidApprovers(Error):
"""Raised when approvers.yaml is invalid."""
class ErrorInvalidApprovalSpec(Error):
"""Raised when approval spec in approvers.yaml is invalid."""
class ClientApprovalAuthorization(rdf_structs.RDFProtoStruct):
"""Authorization to approve clients with a particular label."""
protobuf = acls_pb2.ClientApprovalAuthorization
@property
def label(self):
label = self.Get("label")
if not label:
raise ErrorInvalidClientApprovalAuthorization(
"label string cannot be empty")
return label
@label.setter
def label(self, value):
if not isinstance(value, str) or not value:
raise ErrorInvalidClientApprovalAuthorization(
"label must be a non-empty string")
self.Set("label", value)
@property
def users(self):
return self.Get("users")
@users.setter
def users(self, value):
if not isinstance(value, list):
raise ErrorInvalidClientApprovalAuthorization("users must be a list")
self.Set("users", value)
@property
def groups(self):
return self.Get("groups")
@groups.setter
def groups(self, value):
if not isinstance(value, list):
raise ErrorInvalidClientApprovalAuthorization("groups must be a list")
self.Set("groups", value)
@property
def key(self):
return self.Get("label")
class ClientApprovalAuthorizationManager(auth_manager.AuthorizationManager):
"""Manage client label approvers from approvers.yaml."""
def Initialize(self):
self.LoadApprovals()
def IsActive(self):
"""Does this manager have any rules loaded?"""
return bool(self.reader.auth_objects)
def LoadApprovals(self, yaml_data=None):
self.reader = auth_manager.AuthorizationReader()
# Clear out any previous approvals
if yaml_data:
self.reader.CreateAuthorizations(yaml_data, ClientApprovalAuthorization)
else:
config_filepath = config.CONFIG["ACL.approvers_config_file"]
with io.open(config_filepath, mode="r", encoding="utf-8") as fh:
self.reader.CreateAuthorizations(fh.read(), ClientApprovalAuthorization)
for approval_spec in self.reader.GetAllAuthorizationObjects():
for group in approval_spec.groups:
self.AuthorizeGroup(group, approval_spec.label)
for user in approval_spec.users:
self.AuthorizeUser(user, approval_spec.label)
def CheckApproversForLabel(self, token, client_id, requester, approvers,
label):
"""Checks if requester and approvers have approval privileges for labels.
Checks against list of approvers for each label defined in approvers.yaml to
determine if the list of approvers is sufficient.
Args:
token: user token
client_id: Client ID of the client
requester: username string of person requesting approval.
approvers: list of username strings that have approved this client.
label: label strings to check approval privs for.
Returns:
True if access is allowed, raises otherwise.
"""
auth = self.reader.GetAuthorizationForSubject(label)
if not auth:
# This label isn't listed in approvers.yaml
return True
if auth.requester_must_be_authorized:
if not self.CheckPermissions(requester, label):
raise access_control.UnauthorizedAccess(
"User %s not in %s or groups:%s for %s" %
(requester, auth.users, auth.groups, label),
subject=client_id,
requested_access=token.requested_access)
approved_count = 0
for approver in approvers:
if self.CheckPermissions(approver, label) and approver != requester:
approved_count += 1
if approved_count < auth.num_approvers_required:
raise access_control.UnauthorizedAccess(
"Found %s approvers for %s, needed %s" %
(approved_count, label, auth.num_approvers_required),
subject=client_id,
requested_access=token.requested_access)
return True
CLIENT_APPROVAL_AUTH_MGR = None
@utils.RunOnce
def InitializeClientApprovalAuthorizationManagerOnce():
global CLIENT_APPROVAL_AUTH_MGR
CLIENT_APPROVAL_AUTH_MGR = ClientApprovalAuthorizationManager()
| 30.811321 | 80 | 0.729333 |
c4cfca965c8f1079088ef3ceeae11898de7fefa0 | 1,633 | py | Python | CSV2Paper/base_window.py | jcedmiston/CSV2PAPER | b57533f49bd2585615356e14973c227a7c59be96 | [
"MIT"
]
| 1 | 2021-12-23T00:58:32.000Z | 2021-12-23T00:58:32.000Z | CSV2Paper/base_window.py | jcedmiston/CSV2Paper | b57533f49bd2585615356e14973c227a7c59be96 | [
"MIT"
]
| null | null | null | CSV2Paper/base_window.py | jcedmiston/CSV2Paper | b57533f49bd2585615356e14973c227a7c59be96 | [
"MIT"
]
| null | null | null | from os.path import join
from files import __location__
from user_settings import UserSettings
class BaseWindow:
def __init__(self, user_settings: UserSettings):
self.user_settings = user_settings
self.window_bg = None
self.widget_bg = None
self.fg = None
self.insert_bg = None
self.disabled_bg = 'gray80'
self.select_bg = None
self.folder_icon_file = None
self.up_arrow_icon_file = None
self.down_arrow_icon_file = None
self.set_colors()
def set_colors(self):
if self.user_settings.dark_mode_enabled:
self.window_bg = 'gray15'
self.widget_bg = 'gray35'
self.fg = 'white'
self.insert_bg = 'white'
self.disabled_bg = 'gray20'
self.select_bg = 'gray30'
self.folder_icon_file = join(__location__, 'resources', 'folder_open', '2x', 'sharp_folder_open_white_48dp.png')
self.up_arrow_icon_file = join(__location__, 'resources', 'cheveron_up', '2x', 'sharp_chevron_up_white_48dp.png')
self.down_arrow_icon_file = join(__location__, 'resources', 'cheveron_down', '2x', 'sharp_chevron_down_white_48dp.png')
else:
self.window_bg = 'SystemButtonFace'
self.widget_bg = 'SystemWindow'
self.fg = 'SystemWindowText'
self.insert_bg = 'SystemWindowText'
self.disabled_bg = 'gray80'
self.select_bg = 'SystemWindow'
self.folder_icon_file = join(__location__, 'resources', 'folder_open', '2x', 'sharp_folder_open_black_48dp.png')
self.up_arrow_icon_file = join(__location__, 'resources', 'cheveron_up', '2x', 'sharp_chevron_up_black_48dp.png')
self.down_arrow_icon_file = join(__location__, 'resources', 'cheveron_down', '2x', 'sharp_chevron_down_black_48dp.png')
| 37.976744 | 122 | 0.748928 |
45da506627fe573e4b27013fe366a130fe8bef07 | 87,974 | py | Python | src/python/tests/appengine/handlers/cron/cleanup_test.py | mukundv-chrome/clusterfuzz | 5e232f9d8e55bd7b11e706cad04963c20bac1d0e | [
"Apache-2.0"
]
| null | null | null | src/python/tests/appengine/handlers/cron/cleanup_test.py | mukundv-chrome/clusterfuzz | 5e232f9d8e55bd7b11e706cad04963c20bac1d0e | [
"Apache-2.0"
]
| 1 | 2019-06-07T21:29:28.000Z | 2019-06-07T21:29:28.000Z | src/python/tests/appengine/handlers/cron/cleanup_test.py | mukundv-chrome/clusterfuzz | 5e232f9d8e55bd7b11e706cad04963c20bac1d0e | [
"Apache-2.0"
]
| null | null | null | # Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for cleanup task."""
# pylint: disable=protected-access
from builtins import range
import datetime
import unittest
from datastore import data_types
from handlers.cron import cleanup
from libs.issue_management import issue_tracker_policy
from tests.test_libs import appengine_test_utils
from tests.test_libs import helpers
from tests.test_libs import test_utils
ISSUE_IGNORE_LABEL = 'ClusterFuzz-Ignore'
ISSUE_INVALID_FUZZER_LABEL = 'ClusterFuzz-Invalid-Fuzzer'
ISSUE_MISTRIAGED_LABEL = 'ClusterFuzz-Wrong'
ISSUE_NEEDS_FEEDBACK_LABEL = 'Needs-Feedback'
ISSUE_VERIFIED_LABEL = 'ClusterFuzz-Verified'
ISSUE_FUZZ_BLOCKER_LABEL = 'Fuzz-Blocker'
@test_utils.with_cloud_emulators('datastore')
class GetPredatorResultItemTest(unittest.TestCase):
"""Tests for the get_predator_result_item helper function."""
def test_with_components(self):
"""Ensure that we return the components for test cases which have them."""
result_with_component = {'result': {'suspected_components': ['A', 'B>C']}}
testcase = test_utils.create_generic_testcase()
testcase.set_metadata('predator_result', result_with_component)
actual = cleanup._get_predator_result_item(testcase, 'suspected_components')
self.assertListEqual(actual, ['A', 'B>C'])
def test_no_components(self):
"""Ensure that we handle cases with a result, but no components field."""
result_no_component = {'result': {}}
testcase = test_utils.create_generic_testcase()
testcase.set_metadata('predator_result', result_no_component)
actual = cleanup._get_predator_result_item(testcase, 'suspected_components')
self.assertIsNone(actual)
def test_no_result(self):
"""Ensure that we handle cases without a predator result."""
testcase = test_utils.create_generic_testcase()
testcase.delete_metadata('predator_result')
actual = cleanup._get_predator_result_item(
testcase, 'suspected_components', default=[])
self.assertListEqual(actual, [])
@test_utils.with_cloud_emulators('datastore')
class CleanupTest(unittest.TestCase):
"""Tests for various cleanup functions."""
def setUp(self):
helpers.patch(self, [
'base.utils.utcnow',
'handlers.cron.cleanup.get_crash_occurrence_platforms',
])
self.mock.utcnow.return_value = test_utils.CURRENT_TIME
self.issue = appengine_test_utils.create_generic_issue()
self.policy = issue_tracker_policy.get('test-project')
def test_mark_duplicate_testcase_as_closed_with_no_issue_1(self):
"""Ensure that a regular bug older than 7 days does not get closed."""
testcase = test_utils.create_generic_testcase(
created_days_ago=data_types.DUPLICATE_TESTCASE_NO_BUG_DEADLINE + 1)
testcase.status = 'Processed'
testcase.put()
cleanup.mark_duplicate_testcase_as_closed_with_no_issue(testcase=testcase)
self.assertTrue(testcase.open)
def test_mark_duplicate_testcase_as_closed_with_no_issue_2(self):
"""Ensure that a duplicate bug older than 7 days, with an associated
issue does not get closed."""
testcase = test_utils.create_generic_testcase(
created_days_ago=data_types.DUPLICATE_TESTCASE_NO_BUG_DEADLINE + 1)
testcase.bug_information = str(self.issue.id)
testcase.status = 'Duplicate'
testcase.put()
cleanup.mark_duplicate_testcase_as_closed_with_no_issue(testcase=testcase)
self.assertTrue(testcase.open)
def test_mark_duplicate_testcase_as_closed_with_no_issue_3(self):
"""Ensure that a duplicate bug older than 7 days, with no associated
issue does get closed."""
testcase = test_utils.create_generic_testcase(
created_days_ago=data_types.DUPLICATE_TESTCASE_NO_BUG_DEADLINE + 1)
testcase.bug_information = ''
testcase.status = 'Duplicate'
testcase.put()
cleanup.mark_duplicate_testcase_as_closed_with_no_issue(testcase=testcase)
self.assertFalse(testcase.open)
def test_mark_duplicate_testcase_as_closed_with_no_issue_4(self):
"""Ensure that a duplicate bug 7 days old does not get closed."""
testcase = test_utils.create_generic_testcase(
created_days_ago=data_types.DUPLICATE_TESTCASE_NO_BUG_DEADLINE)
testcase.bug_information = ''
testcase.status = 'Duplicate'
testcase.put()
cleanup.mark_duplicate_testcase_as_closed_with_no_issue(testcase=testcase)
self.assertTrue(testcase.open)
def test_delete_unreproducible_testcase_with_no_issue_1(self):
"""Ensure that a reproducible bug with no crash in last 7 days, and with an
associated issue does not get deleted."""
self.mock.get_crash_occurrence_platforms.return_value = []
testcase = test_utils.create_generic_testcase()
testcase.bug_information = str(self.issue.id)
testcase.one_time_crasher_flag = False
testcase.put()
cleanup.delete_unreproducible_testcase_with_no_issue(testcase=testcase)
self.assertTrue(test_utils.entity_exists(testcase))
def test_delete_unreproducible_testcase_with_no_issue_2(self):
"""Ensure that an unreproducible bug with no crash in last 7 days, with an
associated issue does not get deleted."""
self.mock.get_crash_occurrence_platforms.return_value = []
testcase = test_utils.create_generic_testcase()
testcase.bug_information = str(self.issue.id)
testcase.one_time_crasher_flag = True
testcase.put()
cleanup.delete_unreproducible_testcase_with_no_issue(testcase=testcase)
self.assertTrue(test_utils.entity_exists(testcase))
def test_delete_unreproducible_testcase_with_no_issue_3(self):
"""Ensure that an unreproducible bug with no crash in last 7 days, and with
no associated issue does get deleted."""
self.mock.get_crash_occurrence_platforms.return_value = []
testcase = test_utils.create_generic_testcase()
testcase.bug_information = ''
testcase.one_time_crasher_flag = True
testcase.put()
cleanup.delete_unreproducible_testcase_with_no_issue(testcase=testcase)
self.assertFalse(test_utils.entity_exists(testcase))
def test_delete_unreproducible_testcase_with_no_issue_4(self):
"""Ensure that an unreproducible bug with crash in last 7 days does not get
deleted."""
self.mock.get_crash_occurrence_platforms.return_value = ['Linux']
testcase = test_utils.create_generic_testcase()
testcase.one_time_crasher_flag = True
testcase.bug_information = ''
testcase.put()
cleanup.delete_unreproducible_testcase_with_no_issue(testcase=testcase)
self.assertTrue(test_utils.entity_exists(testcase))
def test_delete_unreproducible_testcase_with_no_issue_5(self):
"""Ensure that an unreproducible bug created in last 7 days and with crash
seen in last 7 days does not get deleted."""
self.mock.get_crash_occurrence_platforms.return_value = ['Linux']
testcase = test_utils.create_generic_testcase(
created_days_ago=data_types.UNREPRODUCIBLE_TESTCASE_NO_BUG_DEADLINE - 1)
testcase.one_time_crasher_flag = True
testcase.bug_information = ''
testcase.put()
cleanup.delete_unreproducible_testcase_with_no_issue(testcase=testcase)
self.assertTrue(test_utils.entity_exists(testcase))
def test_mark_issue_as_closed_if_testcase_is_fixed_1(self):
"""Ensure that we don't close issue if associated testcase is open and
reproducible."""
testcase = test_utils.create_generic_testcase()
testcase.bug_information = str(self.issue.id)
testcase.open = True
testcase.one_time_crasher_flag = False
testcase.put()
cleanup.mark_issue_as_closed_if_testcase_is_fixed(
policy=self.policy, testcase=testcase, issue=self.issue)
self.assertNotEqual(self.issue.status, 'Verified')
self.assertEqual('', self.issue._monorail_issue.comment)
def test_mark_issue_as_closed_if_testcase_is_fixed_2(self):
"""Ensure that we don't close issue if associated testcase is open and
unreproducible."""
testcase = test_utils.create_generic_testcase()
testcase.bug_information = str(self.issue.id)
testcase.open = True
testcase.one_time_crasher_flag = True
testcase.put()
cleanup.mark_issue_as_closed_if_testcase_is_fixed(
policy=self.policy, testcase=testcase, issue=self.issue)
self.assertNotEqual(self.issue.status, 'Verified')
self.assertEqual('', self.issue._monorail_issue.comment)
def test_mark_issue_as_closed_if_testcase_is_fixed_3(self):
"""Ensure that we close issue if associated testcase is unreproducible, but
is explicitly marked as closed."""
testcase = test_utils.create_generic_testcase()
testcase.bug_information = str(self.issue.id)
testcase.one_time_crasher_flag = True
testcase.open = False
testcase.fixed = 'Yes'
testcase.put()
cleanup.mark_issue_as_closed_if_testcase_is_fixed(
policy=self.policy, testcase=testcase, issue=self.issue)
self.assertEqual(self.issue.status, 'Verified')
self.assertIn('ClusterFuzz testcase 1 is verified as fixed.',
self.issue._monorail_issue.comment)
def test_mark_issue_as_closed_if_testcase_is_fixed_4(self):
"""Ensure that we close issue if associated testcase is closed and
reproducible, and the similar open testcase is unreproducible."""
testcase = test_utils.create_generic_testcase()
testcase.bug_information = str(self.issue.id)
testcase.fixed = '1:2'
testcase.open = False
testcase.one_time_crasher_flag = False
testcase.put()
similar_testcase = test_utils.create_generic_testcase()
similar_testcase.bug_information = str(self.issue.id)
similar_testcase.one_time_crasher_flag = True
similar_testcase.put()
cleanup.mark_issue_as_closed_if_testcase_is_fixed(
policy=self.policy, testcase=testcase, issue=self.issue)
self.assertEqual(self.issue.status, 'Verified')
self.assertIn(
'ClusterFuzz testcase 1 is verified as fixed in '
'https://test-clusterfuzz.appspot.com/revisions'
'?job=test_content_shell_drt&range=1:2',
self.issue._monorail_issue.comment)
def test_mark_issue_as_closed_if_testcase_is_fixed_5(self):
"""Ensure that we don't close issue if associated testcase is closed and
reproducible, but there is a similar testcase is opened."""
testcase = test_utils.create_generic_testcase()
testcase.bug_information = str(self.issue.id)
testcase.fixed = '1:2'
testcase.open = False
testcase.one_time_crasher_flag = False
testcase.put()
similar_testcase = test_utils.create_generic_testcase()
similar_testcase.bug_information = str(self.issue.id)
similar_testcase.one_time_crasher_flag = False
similar_testcase.put()
cleanup.mark_issue_as_closed_if_testcase_is_fixed(
policy=self.policy, testcase=testcase, issue=self.issue)
self.assertNotEqual(self.issue.status, 'Verified')
self.assertEqual('', self.issue._monorail_issue.comment)
def test_mark_issue_as_closed_if_testcase_is_fixed_6(self):
"""Ensure that we close issue if all associated testcases are closed and
reproducible."""
testcase = test_utils.create_generic_testcase()
testcase.bug_information = str(self.issue.id)
testcase.fixed = '1:2'
testcase.open = False
testcase.one_time_crasher_flag = False
testcase.put()
similar_testcase = test_utils.create_generic_testcase()
similar_testcase.bug_information = str(self.issue.id)
similar_testcase.open = False
similar_testcase.put()
cleanup.mark_issue_as_closed_if_testcase_is_fixed(
policy=self.policy, testcase=testcase, issue=self.issue)
self.assertEqual(self.issue.status, 'Verified')
self.assertIn(
'ClusterFuzz testcase 1 is verified as fixed in '
'https://test-clusterfuzz.appspot.com/revisions'
'?job=test_content_shell_drt&range=1:2',
self.issue._monorail_issue.comment)
def test_mark_issue_as_closed_if_testcase_is_fixed_7(self):
"""Ensure that we close issue if issue is marked fixed and all associated
testcases are closed and reproducible."""
testcase = test_utils.create_generic_testcase()
testcase.bug_information = str(self.issue.id)
testcase.fixed = '1:2'
testcase.open = False
testcase.one_time_crasher_flag = False
testcase.put()
similar_testcase = test_utils.create_generic_testcase()
similar_testcase.bug_information = str(self.issue.id)
similar_testcase.open = False
similar_testcase.put()
self.issue._monorail_issue.open = False
self.issue.status = 'Fixed'
cleanup.mark_issue_as_closed_if_testcase_is_fixed(
policy=self.policy, testcase=testcase, issue=self.issue)
self.assertEqual(self.issue.status, 'Verified')
self.assertIn(
'ClusterFuzz testcase 1 is verified as fixed in '
'https://test-clusterfuzz.appspot.com/revisions'
'?job=test_content_shell_drt&range=1:2',
self.issue._monorail_issue.comment)
def test_mark_issue_as_closed_if_testcase_is_fixed_8(self):
"""Ensure that we don't close issue when we already did the issue
verification once."""
testcase = test_utils.create_generic_testcase()
testcase.bug_information = str(self.issue.id)
testcase.fixed = '1:2'
testcase.open = False
testcase.one_time_crasher_flag = False
testcase.put()
self.issue.status = 'Assigned'
self.issue._monorail_issue.comments += [
appengine_test_utils.create_generic_issue_comment(
labels=[ISSUE_VERIFIED_LABEL])
]
cleanup.mark_issue_as_closed_if_testcase_is_fixed(
policy=self.policy, testcase=testcase, issue=self.issue)
self.assertNotEqual(self.issue.status, 'Verified')
self.assertEqual('', self.issue._monorail_issue.comment)
def test_mark_issue_as_closed_if_testcase_is_fixed_9(self):
"""Ensure that we don't close issue if a developer has labeled the last
verification as incorrect."""
testcase = test_utils.create_generic_testcase()
testcase.bug_information = str(self.issue.id)
testcase.fixed = '1:2'
testcase.open = False
testcase.one_time_crasher_flag = False
testcase.put()
self.issue.status = 'Assigned'
self.issue._monorail_issue.comments += [
appengine_test_utils.create_generic_issue_comment(
labels=[ISSUE_MISTRIAGED_LABEL])
]
cleanup.mark_issue_as_closed_if_testcase_is_fixed(
policy=self.policy, testcase=testcase, issue=self.issue)
self.assertNotEqual(self.issue.status, 'Verified')
self.assertEqual('', self.issue._monorail_issue.comment)
def test_mark_issue_as_closed_if_testcase_is_fixed_10(self):
"""Ensure that we don't close issue when this is unreproducible upload."""
testcase = test_utils.create_generic_testcase()
testcase.bug_information = str(self.issue.id)
testcase.status = 'Unreproducible (trunk)'
testcase.fixed = ''
testcase.open = False
testcase.one_time_crasher_flag = False
testcase.put()
cleanup.mark_issue_as_closed_if_testcase_is_fixed(
policy=self.policy, testcase=testcase, issue=self.issue)
self.assertNotEqual(self.issue.status, 'Verified')
self.assertEqual('', self.issue._monorail_issue.comment)
def test_mark_testcase_as_closed_if_issue_is_closed_1(self):
"""Test that we don't do anything if testcase is already closed."""
testcase = test_utils.create_generic_testcase()
testcase.open = False
testcase.put()
cleanup.mark_testcase_as_closed_if_issue_is_closed(
policy=self.policy, testcase=testcase, issue=self.issue)
self.assertFalse(testcase.open)
def test_mark_testcase_as_closed_if_issue_is_closed_2(self):
"""Test that we don't do anything if we are unable to get issue object from
issue tracker."""
testcase = test_utils.create_generic_testcase()
testcase.bug_information = str(self.issue.id)
testcase.put()
cleanup.mark_testcase_as_closed_if_issue_is_closed(
policy=self.policy, testcase=testcase, issue=None)
self.assertTrue(testcase.open)
def test_mark_testcase_as_closed_if_issue_is_closed_3(self):
"""Test that we don't do anything if there is no associated issue i.e.
bug_information is not set."""
testcase = test_utils.create_generic_testcase()
cleanup.mark_testcase_as_closed_if_issue_is_closed(
policy=self.policy, testcase=testcase, issue=self.issue)
self.assertTrue(testcase.open)
def test_mark_testcase_as_closed_if_issue_is_closed_4(self):
"""Test that we don't do anything if issue is still open."""
testcase = test_utils.create_generic_testcase()
testcase.bug_information = str(self.issue.id)
testcase.put()
self.issue._monorail_issue.open = True
cleanup.mark_testcase_as_closed_if_issue_is_closed(
policy=self.policy, testcase=testcase, issue=self.issue)
self.assertTrue(testcase.open)
def test_mark_testcase_as_closed_if_issue_is_closed_5(self):
"""Test that we don't do anything if there is a ignore label on issue."""
testcase = test_utils.create_generic_testcase()
testcase.bug_information = str(self.issue.id)
testcase.put()
self.issue._monorail_issue.open = False
self.issue._monorail_issue.closed = (
test_utils.CURRENT_TIME - datetime.timedelta(
days=data_types.CLOSE_TESTCASE_WITH_CLOSED_BUG_DEADLINE + 1))
self.issue._monorail_issue.comments += [
appengine_test_utils.create_generic_issue_comment(
labels=[ISSUE_IGNORE_LABEL])
]
cleanup.mark_testcase_as_closed_if_issue_is_closed(
policy=self.policy, testcase=testcase, issue=self.issue)
self.assertTrue(testcase.open)
def test_mark_testcase_as_closed_if_issue_is_closed_6(self):
"""Test that we don't close testcase if issue is closed <= 2 weeks and
does not have ignore label."""
testcase = test_utils.create_generic_testcase()
testcase.bug_information = str(self.issue.id)
testcase.put()
self.issue._monorail_issue.open = False
self.issue._monorail_issue.closed = (
test_utils.CURRENT_TIME - datetime.timedelta(
days=data_types.CLOSE_TESTCASE_WITH_CLOSED_BUG_DEADLINE))
cleanup.mark_testcase_as_closed_if_issue_is_closed(
policy=self.policy, testcase=testcase, issue=self.issue)
self.assertTrue(testcase.open)
def test_mark_testcase_as_closed_if_issue_is_closed_7(self):
"""Test that we close testcase if issue is closed longer than 2 weeks and
does not have ignore label."""
testcase = test_utils.create_generic_testcase()
testcase.bug_information = str(self.issue.id)
testcase.put()
self.issue._monorail_issue.open = False
self.issue._monorail_issue.closed = (
test_utils.CURRENT_TIME - datetime.timedelta(
days=data_types.CLOSE_TESTCASE_WITH_CLOSED_BUG_DEADLINE + 1))
cleanup.mark_testcase_as_closed_if_issue_is_closed(
policy=self.policy, testcase=testcase, issue=self.issue)
self.assertFalse(testcase.open)
def test_mark_testcase_as_closed_if_job_is_invalid_1(self):
"""Test that we don't close testcase if we have a valid job type."""
testcase = test_utils.create_generic_testcase()
jobs = [testcase.job_type]
cleanup.mark_testcase_as_closed_if_job_is_invalid(
testcase=testcase, jobs=jobs)
self.assertTrue(testcase.open)
def test_mark_testcase_as_closed_if_job_is_invalid_2(self):
"""Test that we close testcase if we don't have a job type."""
testcase = test_utils.create_generic_testcase()
jobs = []
cleanup.mark_testcase_as_closed_if_job_is_invalid(
testcase=testcase, jobs=jobs)
self.assertFalse(testcase.open)
def test_mark_unreproducible_testcase_as_fixed_if_issue_is_closed_1(self):
"""Ensure that a reproducible testcase with no associated issue is not
marked as Fixed."""
testcase = test_utils.create_generic_testcase()
testcase.bug_information = ''
testcase.one_time_crasher_flag = False
testcase.put()
cleanup.mark_unreproducible_testcase_as_fixed_if_issue_is_closed(
testcase=testcase, issue=None)
self.assertTrue(testcase.open)
def test_mark_unreproducible_testcase_as_fixed_if_issue_is_closed_2(self):
"""Ensure that a reproducible testcase with associated issue in open state
is not marked as Fixed."""
testcase = test_utils.create_generic_testcase()
testcase.bug_information = str(self.issue.id)
testcase.one_time_crasher_flag = False
testcase.put()
cleanup.mark_unreproducible_testcase_as_fixed_if_issue_is_closed(
testcase=testcase, issue=self.issue)
self.assertTrue(testcase.open)
def test_mark_unreproducible_testcase_as_fixed_if_issue_is_closed_3(self):
"""Ensure that a reproducible testcase with associated issue in closed state
is not marked as Fixed."""
self.issue._monorail_issue.open = False
testcase = test_utils.create_generic_testcase()
testcase.bug_information = str(self.issue.id)
testcase.one_time_crasher_flag = False
testcase.put()
cleanup.mark_unreproducible_testcase_as_fixed_if_issue_is_closed(
testcase=testcase, issue=self.issue)
self.assertTrue(testcase.open)
def test_mark_unreproducible_testcase_as_fixed_if_issue_is_closed_4(self):
"""Ensure that an unreproducible testcase with associated issue in open
state is marked as Fixed."""
self.issue._monorail_issue.open = True
testcase = test_utils.create_generic_testcase()
testcase.bug_information = str(self.issue.id)
testcase.one_time_crasher_flag = True
testcase.put()
cleanup.mark_unreproducible_testcase_as_fixed_if_issue_is_closed(
testcase=testcase, issue=self.issue)
self.assertTrue(testcase.open)
def test_mark_unreproducible_testcase_as_fixed_if_issue_is_closed_5(self):
"""Ensure that an unreproducible testcase with associated issue in closed
state is marked as Fixed."""
self.issue._monorail_issue.open = False
testcase = test_utils.create_generic_testcase()
testcase.bug_information = str(self.issue.id)
testcase.one_time_crasher_flag = True
testcase.put()
cleanup.mark_unreproducible_testcase_as_fixed_if_issue_is_closed(
testcase=testcase, issue=self.issue)
self.assertFalse(testcase.open)
def test_mark_unreproducible_testcase_and_issue_as_closed_after_deadline_1(
self):
"""Ensure that a reproducible testcase with no associated issue is not
closed."""
testcase = test_utils.create_generic_testcase()
testcase.bug_information = ''
testcase.one_time_crasher_flag = False
testcase.put()
cleanup.mark_unreproducible_testcase_and_issue_as_closed_after_deadline(
policy=self.policy, testcase=testcase, issue=None)
self.assertTrue(testcase.open)
def test_mark_unreproducible_testcase_and_issue_as_closed_after_deadline_2(
self):
"""Ensure that an unreproducible testcase with no associated issue is not
closed."""
testcase = test_utils.create_generic_testcase()
testcase.bug_information = ''
testcase.one_time_crasher_flag = True
testcase.put()
cleanup.mark_unreproducible_testcase_and_issue_as_closed_after_deadline(
policy=self.policy, testcase=testcase, issue=None)
self.assertTrue(testcase.open)
def test_mark_unreproducible_testcase_and_issue_as_closed_after_deadline_3(
self):
"""Ensure that an unreproducible testcase with a closed issue is not
closed."""
self.issue._monorail_issue.open = False
testcase = test_utils.create_generic_testcase()
testcase.bug_information = str(self.issue.id)
testcase.one_time_crasher_flag = True
testcase.put()
cleanup.mark_unreproducible_testcase_and_issue_as_closed_after_deadline(
policy=self.policy, testcase=testcase, issue=self.issue)
self.assertTrue(testcase.open)
def test_mark_unreproducible_testcase_and_issue_as_closed_after_deadline_4(
self):
"""Ensure that an unreproducible testcase with an open issue and with crash
still seen in crash stats is not closed."""
self.issue = appengine_test_utils.create_generic_issue()
self.mock.get_crash_occurrence_platforms.return_value = ['Linux']
testcase = test_utils.create_generic_testcase()
testcase.bug_information = str(self.issue.id)
testcase.one_time_crasher_flag = True
testcase.put()
cleanup.mark_unreproducible_testcase_and_issue_as_closed_after_deadline(
policy=self.policy, testcase=testcase, issue=self.issue)
self.assertTrue(testcase.open)
def test_mark_unreproducible_testcase_and_issue_as_closed_after_deadline_5(
self):
"""Ensure that an unreproducible testcase with an open issue, with crash not
seen in crash stats, but with other open reproducible testcase is not
closed."""
self.issue = appengine_test_utils.create_generic_issue()
self.mock.get_crash_occurrence_platforms.return_value = []
testcase = test_utils.create_generic_testcase()
testcase.bug_information = str(self.issue.id)
testcase.one_time_crasher_flag = True
testcase.put()
similar_testcase = test_utils.create_generic_testcase()
similar_testcase.bug_information = str(self.issue.id)
similar_testcase.one_time_crasher_flag = False
similar_testcase.put()
cleanup.mark_unreproducible_testcase_and_issue_as_closed_after_deadline(
policy=self.policy, testcase=testcase, issue=self.issue)
self.assertTrue(testcase.open)
def test_mark_unreproducible_testcase_and_issue_as_closed_after_deadline_6(
self):
"""Ensure that an unreproducible testcase with an open issue, with crash not
seen in crash stats, but with mistriaged issue label is not closed."""
self.issue = appengine_test_utils.create_generic_issue()
self.mock.get_crash_occurrence_platforms.return_value = []
self.issue._monorail_issue.comments += [
appengine_test_utils.create_generic_issue_comment(
labels=[ISSUE_MISTRIAGED_LABEL])
]
testcase = test_utils.create_generic_testcase()
testcase.bug_information = str(self.issue.id)
testcase.one_time_crasher_flag = True
testcase.put()
cleanup.mark_unreproducible_testcase_and_issue_as_closed_after_deadline(
policy=self.policy, testcase=testcase, issue=self.issue)
self.assertTrue(testcase.open)
def test_mark_unreproducible_testcase_and_issue_as_closed_after_deadline_7(
self):
"""Ensure that an unreproducible testcase with an open issue, with crash not
seen in crash stats, and status as Unreproducible does not lead to closing
of issue."""
self.issue = appengine_test_utils.create_generic_issue()
self.mock.get_crash_occurrence_platforms.return_value = []
testcase = test_utils.create_generic_testcase()
testcase.bug_information = str(self.issue.id)
testcase.one_time_crasher_flag = True
testcase.status = 'Unreproducible'
testcase.put()
cleanup.mark_unreproducible_testcase_and_issue_as_closed_after_deadline(
policy=self.policy, testcase=testcase, issue=self.issue)
self.assertTrue(testcase.open)
self.assertEqual('Assigned', self.issue.status)
def test_mark_unreproducible_testcase_and_issue_as_closed_after_deadline_8(
self):
"""Ensure that an unreproducible testcase with an open issue, created within
the deadline and crash seen in crash stats is not not closed."""
self.issue = appengine_test_utils.create_generic_issue()
self.mock.get_crash_occurrence_platforms.return_value = ['Linux']
testcase = test_utils.create_generic_testcase(
created_days_ago=data_types.UNREPRODUCIBLE_TESTCASE_WITH_BUG_DEADLINE -
1)
testcase.bug_information = str(self.issue.id)
testcase.one_time_crasher_flag = True
testcase.put()
cleanup.mark_unreproducible_testcase_and_issue_as_closed_after_deadline(
policy=self.policy, testcase=testcase, issue=self.issue)
self.assertTrue(testcase.open)
self.assertEqual('Assigned', self.issue.status)
def test_mark_unreproducible_testcase_and_issue_as_closed_after_deadline_9(
self):
"""Ensure that an unreproducible testcase with an open issue, with crash not
seen in crash stats, is closed."""
self.issue = appengine_test_utils.create_generic_issue()
self.mock.get_crash_occurrence_platforms.return_value = []
testcase = test_utils.create_generic_testcase()
testcase.bug_information = str(self.issue.id)
testcase.one_time_crasher_flag = True
testcase.put()
cleanup.mark_unreproducible_testcase_and_issue_as_closed_after_deadline(
policy=self.policy, testcase=testcase, issue=self.issue)
self.assertFalse(testcase.open)
self.assertEqual('WontFix', self.issue.status)
def test_mark_unreproducible_testcase_and_issue_as_closed_after_deadline_10(
self):
"""Ensure that an unreproducible testcase with an open issue, with crash not
seen in crash stats, but with an uploader email is not closed."""
self.issue = appengine_test_utils.create_generic_issue()
self.mock.get_crash_occurrence_platforms.return_value = []
testcase = test_utils.create_generic_testcase()
testcase.bug_information = str(self.issue.id)
testcase.one_time_crasher_flag = True
testcase.uploader_email = '[email protected]'
testcase.put()
cleanup.mark_unreproducible_testcase_and_issue_as_closed_after_deadline(
policy=self.policy, testcase=testcase, issue=self.issue)
self.assertTrue(testcase.open)
self.assertEqual('Assigned', self.issue.status)
def test_mark_unreproducible_testcase_and_issue_as_closed_after_deadline_11(
self):
"""Ensure that an unreproducible testcase with an open issue, with crash not
seen in crash stats, but reproduced yesterday as as part of progression task
is not closed."""
self.issue = appengine_test_utils.create_generic_issue()
self.mock.get_crash_occurrence_platforms.return_value = []
testcase = test_utils.create_generic_testcase()
testcase.bug_information = str(self.issue.id)
testcase.one_time_crasher_flag = True
testcase.put()
testcase.set_metadata('last_tested_crash_time', test_utils.CURRENT_TIME)
cleanup.mark_unreproducible_testcase_and_issue_as_closed_after_deadline(
policy=self.policy, testcase=testcase, issue=self.issue)
self.assertTrue(testcase.open)
self.assertEqual('Assigned', self.issue.status)
def test_mark_unreproducible_testcase_and_issue_as_closed_after_deadline_12(
self):
"""Ensure that an unreproducible testcase with an open issue, with crash not
seen in crash stats and progression task is closed."""
self.issue = appengine_test_utils.create_generic_issue()
self.mock.get_crash_occurrence_platforms.return_value = []
testcase = test_utils.create_generic_testcase()
testcase.bug_information = str(self.issue.id)
testcase.one_time_crasher_flag = True
testcase.put()
testcase.set_metadata(
'last_tested_crash_time',
test_utils.CURRENT_TIME - datetime.timedelta(
days=data_types.UNREPRODUCIBLE_TESTCASE_WITH_BUG_DEADLINE + 1))
cleanup.mark_unreproducible_testcase_and_issue_as_closed_after_deadline(
policy=self.policy, testcase=testcase, issue=self.issue)
self.assertFalse(testcase.open)
self.assertEqual('WontFix', self.issue.status)
def test_notify_closed_issue_if_testcase_is_open_1(self):
"""Test that we don't do anything if testcase is already closed."""
testcase = test_utils.create_generic_testcase()
testcase.open = False
testcase.put()
cleanup.notify_closed_issue_if_testcase_is_open(
policy=self.policy, testcase=testcase, issue=self.issue)
self.assertNotIn(ISSUE_NEEDS_FEEDBACK_LABEL, self.issue.labels)
def test_notify_closed_issue_if_testcase_is_open_2(self):
"""Test that we don't do anything if testcase has status unreproducible
(upload didn't reproduce)."""
testcase = test_utils.create_generic_testcase()
testcase.bug_information = str(self.issue.id)
testcase.status = 'Unreproducible'
testcase.put()
self.issue._monorail_issue.open = False
self.issue._monorail_issue.closed = (
test_utils.CURRENT_TIME - datetime.timedelta(
days=data_types.NOTIFY_CLOSED_BUG_WITH_OPEN_TESTCASE_DEADLINE + 1))
cleanup.notify_closed_issue_if_testcase_is_open(
policy=self.policy, testcase=testcase, issue=self.issue)
self.assertNotIn(ISSUE_NEEDS_FEEDBACK_LABEL, self.issue.labels)
def test_notify_closed_issue_if_testcase_is_open_3(self):
"""Test that we don't do anything if we are unable to get issue object from
issue tracker."""
testcase = test_utils.create_generic_testcase()
testcase.bug_information = str(self.issue.id)
testcase.put()
cleanup.notify_closed_issue_if_testcase_is_open(
policy=self.policy, testcase=testcase, issue=None)
self.assertNotIn(ISSUE_NEEDS_FEEDBACK_LABEL, self.issue.labels)
def test_notify_closed_issue_if_testcase_is_open_4(self):
"""Test that we don't do anything if there is no associated issue i.e.
bug_information is not set."""
testcase = test_utils.create_generic_testcase()
cleanup.notify_closed_issue_if_testcase_is_open(
policy=self.policy, testcase=testcase, issue=self.issue)
self.assertNotIn(ISSUE_NEEDS_FEEDBACK_LABEL, self.issue.labels)
def test_notify_closed_issue_if_testcase_is_open_5(self):
"""Test that we don't do anything if issue is still open."""
testcase = test_utils.create_generic_testcase()
testcase.bug_information = str(self.issue.id)
testcase.put()
self.issue._monorail_issue.open = True
cleanup.notify_closed_issue_if_testcase_is_open(
policy=self.policy, testcase=testcase, issue=self.issue)
self.assertNotIn(ISSUE_NEEDS_FEEDBACK_LABEL, self.issue.labels)
def test_notify_closed_issue_if_testcase_is_open_6(self):
"""Test that we don't do anything if we have not exceeded the notification
deadline."""
testcase = test_utils.create_generic_testcase()
testcase.bug_information = str(self.issue.id)
testcase.put()
self.issue._monorail_issue.open = False
self.issue._monorail_issue.closed = (
test_utils.CURRENT_TIME - datetime.timedelta(
days=data_types.NOTIFY_CLOSED_BUG_WITH_OPEN_TESTCASE_DEADLINE))
cleanup.notify_closed_issue_if_testcase_is_open(
policy=self.policy, testcase=testcase, issue=self.issue)
self.assertNotIn(ISSUE_NEEDS_FEEDBACK_LABEL, self.issue.labels)
def test_notify_closed_issue_if_testcase_is_open_7(self):
"""Test that we don't do anything if there is an ignore label already on
the issue."""
testcase = test_utils.create_generic_testcase()
testcase.bug_information = str(self.issue.id)
testcase.put()
self.issue._monorail_issue.open = False
self.issue._monorail_issue.closed = (
test_utils.CURRENT_TIME - datetime.timedelta(
days=data_types.NOTIFY_CLOSED_BUG_WITH_OPEN_TESTCASE_DEADLINE + 1))
self.issue._monorail_issue.comments += [
appengine_test_utils.create_generic_issue_comment(
labels=[ISSUE_IGNORE_LABEL])
]
cleanup.notify_closed_issue_if_testcase_is_open(
policy=self.policy, testcase=testcase, issue=self.issue)
self.assertNotIn(ISSUE_NEEDS_FEEDBACK_LABEL, self.issue.labels)
def test_notify_closed_issue_if_testcase_is_open_8(self):
"""Test that we don't do anything if there is an needs feedback label
already on the issue."""
testcase = test_utils.create_generic_testcase()
testcase.bug_information = str(self.issue.id)
testcase.put()
self.issue._monorail_issue.open = False
self.issue._monorail_issue.closed = (
test_utils.CURRENT_TIME - datetime.timedelta(
days=data_types.NOTIFY_CLOSED_BUG_WITH_OPEN_TESTCASE_DEADLINE + 1))
self.issue._monorail_issue.comments += [
appengine_test_utils.create_generic_issue_comment(
labels=[ISSUE_NEEDS_FEEDBACK_LABEL])
]
cleanup.notify_closed_issue_if_testcase_is_open(
policy=self.policy, testcase=testcase, issue=self.issue)
self.assertNotIn(ISSUE_NEEDS_FEEDBACK_LABEL, self.issue.labels)
def test_notify_closed_issue_if_testcase_is_open_9(self):
"""Test that we add notification if we are past the notification deadline
and we have not added a needs feedback already."""
testcase = test_utils.create_generic_testcase()
testcase.bug_information = str(self.issue.id)
testcase.put()
self.issue._monorail_issue.open = False
self.issue._monorail_issue.closed = (
test_utils.CURRENT_TIME - datetime.timedelta(
days=data_types.NOTIFY_CLOSED_BUG_WITH_OPEN_TESTCASE_DEADLINE + 1))
cleanup.notify_closed_issue_if_testcase_is_open(
policy=self.policy, testcase=testcase, issue=self.issue)
self.assertIn(ISSUE_NEEDS_FEEDBACK_LABEL, self.issue.labels)
@test_utils.with_cloud_emulators('datastore')
class UpdateOsLabelsTest(unittest.TestCase):
"""Test updateOsLabels."""
def setUp(self):
helpers.patch(self, [
'metrics.crash_stats.get',
])
self.policy = issue_tracker_policy.get('test-project')
def test_no_issue(self):
"""Test no issue."""
testcase = data_types.Testcase(
crash_type='type',
crash_state='state',
security_flag=True,
project_name='project')
testcase.put()
cleanup.update_os_labels(self.policy, testcase, None)
def test_labels_added(self):
"""Test os label added from crash stats."""
testcase = data_types.Testcase(
crash_type='type',
crash_state='state',
security_flag=True,
project_name='project')
testcase.put()
history = data_types.BuildCrashStatsJobHistory(end_time_in_hours=10000)
history.put()
rows = [{
'groups': [{
'name': 'windows'
}, {
'name': 'linux'
}, {
'name': 'mac'
}, {
'name': 'android:test'
}, {
'name': 'android:test2'
}]
}]
self.mock.get.return_value = (1, rows)
issue = appengine_test_utils.create_generic_issue()
issue._monorail_issue.labels = []
cleanup.update_os_labels(self.policy, testcase, issue)
self.assertEqual({'OS-Windows', 'OS-Linux', 'OS-Mac', 'OS-Android'},
set(issue.labels))
self.mock.get.assert_called_once_with(
end=10000,
block='day',
days=1,
group_by='platform',
where_clause=('crash_type = "type" AND crash_state = "state" AND '
'security_flag = true AND project = "project"'),
group_having_clause='',
sort_by='total_count',
offset=0,
limit=1)
def test_labels_not_added(self):
"""Test os labels not added from crash stats."""
testcase = data_types.Testcase(
crash_type='type',
crash_state='state',
security_flag=True,
project_name='project')
testcase.put()
history = data_types.BuildCrashStatsJobHistory(end_time_in_hours=10000)
history.put()
rows = [{
'groups': [{
'name': 'windows'
}, {
'name': 'linux'
}, {
'name': 'mac'
}, {
'name': 'android:test'
}, {
'name': 'android:test2'
}]
}]
self.mock.get.return_value = (1, rows)
issue = appengine_test_utils.create_generic_issue()
issue._monorail_issue.labels = []
comment = appengine_test_utils.create_generic_issue_comment(
labels=['OS-Mac', 'OS-Android'])
issue._monorail_issue.comments.append(comment)
issue.labels.add('OS-Windows')
cleanup.update_os_labels(self.policy, testcase, issue)
self.assertEqual({'OS-Windows', 'OS-Linux'}, set(issue.labels))
@test_utils.with_cloud_emulators('datastore')
class GetJobsAndPlatformsForTopCrashesTest(unittest.TestCase):
"""Test get_jobs_and_platforms_for_top_crashes."""
def setUp(self):
data_types.Job(
name='job1',
platform='LINUX',
environment_string=('EXPERIMENTAL = True\n')).put()
data_types.Job(
name='job2',
platform='MAC',
environment_string=('CUSTOM_BINARY = True\n')).put()
data_types.Job(
name='job3',
platform='WINDOWS',
environment_string=(
'SYSTEM_BINARY_DIR = C:\\Program Files\\Internet Explorer\\\n'
)).put()
data_types.Job(
name='job4',
platform='ANDROID',
environment_string=('EXCLUDE_FROM_TOP_CRASHES = True\n')).put()
data_types.Job(name='job5', platform='LINUX', environment_string=('')).put()
data_types.Job(name='job6', platform='MAC', environment_string=('')).put()
def test(self):
actual_jobs, actual_platforms = (
cleanup.get_jobs_and_platforms_for_top_crashes())
expected_jobs = {'job5', 'job6'}
expected_platforms = {'LINUX', 'MAC'}
self.assertEqual(expected_jobs, actual_jobs)
self.assertEqual(expected_platforms, actual_platforms)
@test_utils.with_cloud_emulators('datastore')
class GetTopCrashesForAllProjectsAndPlatforms(unittest.TestCase):
"""Test get_top_crashes_for_all_projects_and_platforms."""
def setUp(self):
helpers.patch(self, [
'metrics.crash_stats.get',
'metrics.crash_stats.get_last_successful_hour',
])
self.top_crashes_rows = [
{
'crashState': 'state1',
'crashType': 'type1',
'isSecurity': True,
'totalCount': 350
},
{
'crashState': 'state2',
'crashType': 'type2',
'isSecurity': False,
'totalCount': 450
},
{
'crashState': 'state3',
'crashType': 'type3',
'isSecurity': False,
'totalCount': 250
},
]
self.mock.get.return_value = (1, self.top_crashes_rows)
self.mock.get_last_successful_hour.return_value = 10000
data_types.Job(name='job', platform='LINUX', environment_string=('')).put()
test_utils.create_generic_testcase()
def test(self):
"""Test."""
expected_top_crashes_map = {
u'project': {
'LINUX': [{
'crashState': 'state1',
'crashType': 'type1',
'isSecurity': True,
'totalCount': 350
}, {
'crashState': 'state2',
'crashType': 'type2',
'isSecurity': False,
'totalCount': 450
}]
}
}
actual_top_crashes_map = (
cleanup.get_top_crashes_for_all_projects_and_platforms())
self.assertEqual(expected_top_crashes_map, actual_top_crashes_map)
self.mock.get.assert_called_once_with(
end=10000,
block='day',
days=7,
group_by='platform',
where_clause=(
'crash_type NOT IN UNNEST('
'["Hang", "Out-of-memory", "Stack-overflow", "Timeout"]) AND '
'crash_state NOT IN UNNEST(["NULL"]) AND '
'job_type IN UNNEST(["job"]) AND '
'platform LIKE "linux%" AND '
'project = "project" AND '
'crash_state NOT LIKE "%Zygote%" AND '
'crash_state NOT LIKE "%__printf_chk%" AND '
'crash_state NOT LIKE "%gtk_%" AND '
'crash_state NOT LIKE "%sandbox::%"'),
group_having_clause='',
sort_by='total_count',
offset=0,
limit=5)
@test_utils.with_cloud_emulators('datastore')
class UpdateTopCrashLabelsTest(unittest.TestCase):
"""Test update_fuzz_blocker_label."""
def setUp(self):
helpers.patch_environ(self)
helpers.patch(self, [
'base.utils.is_oss_fuzz',
'base.utils.is_chromium',
'chrome.build_info.get_release_milestone',
])
self.mock.get_release_milestone.return_value = 63
self.issue = appengine_test_utils.create_generic_issue()
self.testcase = test_utils.create_generic_testcase()
self.mock.is_chromium.return_value = True
self.mock.is_oss_fuzz.return_value = False
self.policy = issue_tracker_policy.get('test-project')
def test_no_top_crashes(self):
"""Test no label is added if there are no top crashes."""
top_crashes_by_project_and_platform_map = {u'project': {'LINUX': []}}
cleanup.update_fuzz_blocker_label(self.policy, self.testcase, self.issue,
top_crashes_by_project_and_platform_map)
self.assertNotIn(ISSUE_FUZZ_BLOCKER_LABEL, self.issue.labels)
self.assertNotIn(data_types.CHROMIUM_ISSUE_RELEASEBLOCK_BETA_LABEL,
self.issue.labels)
self.assertNotIn('M-63', self.issue.labels)
self.assertEqual('', self.issue._monorail_issue.comment)
def test_top_crashes_no_match(self):
"""Test no label is added if there are no matching top crashes."""
top_crashes_by_project_and_platform_map = {
u'project': {
'LINUX': [{
'crashState': 'state1',
'crashType': 'type1',
'isSecurity': True,
'totalCount': 500
}]
}
}
cleanup.update_fuzz_blocker_label(self.policy, self.testcase, self.issue,
top_crashes_by_project_and_platform_map)
self.assertNotIn(ISSUE_FUZZ_BLOCKER_LABEL, self.issue.labels)
self.assertNotIn(data_types.CHROMIUM_ISSUE_RELEASEBLOCK_BETA_LABEL,
self.issue.labels)
self.assertNotIn('M-63', self.issue.labels)
self.assertEqual('', self.issue._monorail_issue.comment)
def test_top_crashes_with_testcase_closed(self):
"""Test label is not added if testcase is closed."""
self.testcase.open = False
self.testcase.put()
top_crashes_by_project_and_platform_map = {
u'project': {
'LINUX': [{
'crashState': self.testcase.crash_state,
'crashType': self.testcase.crash_type,
'isSecurity': self.testcase.security_flag,
'totalCount': 350
}]
}
}
cleanup.update_fuzz_blocker_label(self.policy, self.testcase, self.issue,
top_crashes_by_project_and_platform_map)
self.assertNotIn(ISSUE_FUZZ_BLOCKER_LABEL, self.issue.labels)
self.assertNotIn(data_types.CHROMIUM_ISSUE_RELEASEBLOCK_BETA_LABEL,
self.issue.labels)
self.assertNotIn('M-63', self.issue.labels)
self.assertEqual('', self.issue._monorail_issue.comment)
def test_top_crashes_match_single_platform(self):
"""Test label is added if there is a matching top crash."""
top_crashes_by_project_and_platform_map = {
u'project': {
'LINUX': [{
'crashState': self.testcase.crash_state,
'crashType': self.testcase.crash_type,
'isSecurity': self.testcase.security_flag,
'totalCount': 350
}]
}
}
self.issue.labels.add('M-62')
cleanup.update_fuzz_blocker_label(self.policy, self.testcase, self.issue,
top_crashes_by_project_and_platform_map)
self.assertIn(ISSUE_FUZZ_BLOCKER_LABEL, self.issue.labels)
self.assertIn(data_types.CHROMIUM_ISSUE_RELEASEBLOCK_BETA_LABEL,
self.issue.labels)
self.assertIn('M-63', self.issue.labels)
self.assertNotIn('M-62', self.issue.labels)
self.assertEqual(
'This crash occurs very frequently on linux platform and is likely '
'preventing the fuzzer fuzzer1 from making much progress. '
'Fixing this will allow more bugs to be found.'
'\n\nMarking this bug as a blocker for next Beta release.'
'\n\nIf this is incorrect, please add ClusterFuzz-Wrong label and '
'remove the ReleaseBlock-Beta label.',
self.issue._monorail_issue.comment)
def test_top_crashes_match_single_platform_oss_fuzz(self):
"""Test label is added if there is a matching top crash for external
project."""
self.mock.is_oss_fuzz.return_value = True
self.testcase.set_metadata('fuzzer_binary_name', 'fuzz_target1')
top_crashes_by_project_and_platform_map = {
u'project': {
'LINUX': [{
'crashState': self.testcase.crash_state,
'crashType': self.testcase.crash_type,
'isSecurity': self.testcase.security_flag,
'totalCount': 350
}]
}
}
cleanup.update_fuzz_blocker_label(self.policy, self.testcase, self.issue,
top_crashes_by_project_and_platform_map)
self.assertIn(ISSUE_FUZZ_BLOCKER_LABEL, self.issue.labels)
self.assertNotIn(data_types.CHROMIUM_ISSUE_RELEASEBLOCK_BETA_LABEL,
self.issue.labels)
self.assertNotIn('M-63', self.issue.labels)
self.assertEqual(
'This crash occurs very frequently on linux platform and is likely '
'preventing the fuzzer fuzz_target1 from making much progress. '
'Fixing this will allow more bugs to be found.'
'\n\nIf this is incorrect, please file a bug on '
'https://github.com/google/oss-fuzz/issues/new',
self.issue._monorail_issue.comment)
def test_top_crashes_match_multiple_platforms(self):
"""Test label is added if there is a matching top crash."""
top_crashes_by_project_and_platform_map = {
u'project': {
'LINUX': [{
'crashState': self.testcase.crash_state,
'crashType': self.testcase.crash_type,
'isSecurity': self.testcase.security_flag,
'totalCount': 500
}],
'MAC': [{
'crashState': self.testcase.crash_state,
'crashType': self.testcase.crash_type,
'isSecurity': self.testcase.security_flag,
'totalCount': 600
}],
'WINDOWS': [{
'crashState': self.testcase.crash_state,
'crashType': self.testcase.crash_type,
'isSecurity': self.testcase.security_flag,
'totalCount': 700
}]
}
}
cleanup.update_fuzz_blocker_label(self.policy, self.testcase, self.issue,
top_crashes_by_project_and_platform_map)
self.assertIn(ISSUE_FUZZ_BLOCKER_LABEL, self.issue.labels)
self.assertIn(data_types.CHROMIUM_ISSUE_RELEASEBLOCK_BETA_LABEL,
self.issue.labels)
self.assertIn('M-63', self.issue.labels)
self.assertEqual(
'This crash occurs very frequently on linux, mac and windows platforms '
'and is likely preventing the fuzzer fuzzer1 from making much '
'progress. Fixing this will allow more bugs to be found.'
'\n\nMarking this bug as a blocker for next Beta release.'
'\n\nIf this is incorrect, please add ClusterFuzz-Wrong label and '
'remove the ReleaseBlock-Beta label.',
self.issue._monorail_issue.comment)
def test_top_crashes_match_and_label_removed(self):
"""Test label is not added if it was added before and removed."""
top_crashes_by_project_and_platform_map = {
u'project': {
'LINUX': [{
'crashState': self.testcase.crash_state,
'crashType': self.testcase.crash_type,
'isSecurity': self.testcase.security_flag,
'totalCount': 500
}]
}
}
self.issue._monorail_issue.comments += [
appengine_test_utils.create_generic_issue_comment(
labels=[ISSUE_FUZZ_BLOCKER_LABEL])
]
cleanup.update_fuzz_blocker_label(self.policy, self.testcase, self.issue,
top_crashes_by_project_and_platform_map)
self.assertNotIn(ISSUE_FUZZ_BLOCKER_LABEL, self.issue.labels)
self.assertNotIn(data_types.CHROMIUM_ISSUE_RELEASEBLOCK_BETA_LABEL,
self.issue.labels)
self.assertNotIn('M-63', self.issue.labels)
self.assertEqual('', self.issue._monorail_issue.comment)
@test_utils.with_cloud_emulators('datastore')
class UpdateComponentsTest(unittest.TestCase):
"""Tests for update_component_labels."""
def setUp(self):
self.issue = appengine_test_utils.create_generic_issue()
self.testcase = test_utils.create_generic_testcase()
def test_components_added(self):
"""Ensure that we add components when applicable."""
self.testcase.set_metadata(
'predator_result', {'result': {
'suspected_components': ['A', 'B>C']
}})
cleanup.update_component_labels(self.testcase, self.issue)
self.assertIn('A', self.issue.components)
self.assertIn('B>C', self.issue.components)
self.assertIn('Test-Predator-Auto-Components', self.issue.labels)
def test_components_not_reapplied(self):
"""Ensure that we don't re-add components once applied."""
self.testcase.set_metadata(
'predator_result', {'result': {
'suspected_components': ['A', 'B>C']
}})
comment = appengine_test_utils.create_generic_issue_comment(
labels=['Test-Predator-Auto-Components'])
self.issue._monorail_issue.comments.append(comment)
cleanup.update_component_labels(self.testcase, self.issue)
self.assertNotIn('A', self.issue.components)
self.assertNotIn('B>C', self.issue.components)
self.assertNotIn('Test-Predator-Auto-Components', self.issue.labels)
def test_no_label_added_for_no_components(self):
"""Ensure that we don't add label when there is no component in result."""
self.testcase.set_metadata('predator_result', {})
self.issue.components.add('A')
cleanup.update_component_labels(self.testcase, self.issue)
self.assertIn('A', self.issue.components)
self.assertNotIn('Test-Predator-Auto-Components', self.issue.labels)
def test_no_label_added_for_same_components(self):
"""Ensure that we don't add label when there is no component in result."""
self.testcase.set_metadata(
'predator_result', {'result': {
'suspected_components': ['A', 'B>C']
}})
self.issue.components.add('A')
self.issue.components.add('B>C')
self.issue.components.add('D')
cleanup.update_component_labels(self.testcase, self.issue)
self.assertIn('A', self.issue.components)
self.assertIn('B>C', self.issue.components)
self.assertIn('D', self.issue.components)
self.assertNotIn('Test-Predator-Auto-Components', self.issue.labels)
def test_no_label_added_for_more_specific_component(self):
"""Ensure that we don't add label when there is a more specific component
already."""
self.testcase.set_metadata('predator_result',
{'result': {
'suspected_components': ['A']
}})
self.issue.components.add('A>B')
self.issue.components.add('D')
cleanup.update_component_labels(self.testcase, self.issue)
self.assertNotIn('A', self.issue.components)
self.assertIn('A>B', self.issue.components)
self.assertIn('D', self.issue.components)
self.assertNotIn('Test-Predator-Auto-Components', self.issue.labels)
def test_label_added_for_more_specific_component_and_new_component(self):
"""Ensure that we add label when there is a more specific component
already, but also a new components."""
self.testcase.set_metadata('predator_result',
{'result': {
'suspected_components': ['A', 'E']
}})
self.issue.components.add('A>B')
self.issue.components.add('D')
cleanup.update_component_labels(self.testcase, self.issue)
self.assertNotIn('A', self.issue.components)
self.assertIn('A>B', self.issue.components)
self.assertIn('D', self.issue.components)
self.assertIn('E', self.issue.components)
self.assertIn('Test-Predator-Auto-Components', self.issue.labels)
def test_label_added_for_unrelated_component(self):
"""Ensure that we add label when there is a unrelated component with same
prefix."""
self.testcase.set_metadata('predator_result',
{'result': {
'suspected_components': ['A']
}})
self.issue.components.add('AA>B')
self.issue.components.add('D')
cleanup.update_component_labels(self.testcase, self.issue)
self.assertIn('A', self.issue.components)
self.assertIn('AA>B', self.issue.components)
self.assertIn('D', self.issue.components)
self.assertIn('Test-Predator-Auto-Components', self.issue.labels)
@test_utils.with_cloud_emulators('datastore')
class UpdateIssueCCsFromOwnersFileTest(unittest.TestCase):
"""Tests for update_issue_ccs_from_owners_file."""
def setUp(self):
helpers.patch(self, [
'base.utils.is_oss_fuzz',
])
helpers.patch_environ(self)
self.issue = appengine_test_utils.create_generic_issue()
self.testcase = test_utils.create_generic_testcase()
# We'll generally want to assume we have an unassigned issue.
self.issue.assignee = ''
self.issue._monorail_issue.cc = []
self.issue.status = 'Untriaged'
self.testcase.set_metadata('issue_owners',
'[email protected],[email protected]')
self.mock.is_oss_fuzz.return_value = False
self.policy = issue_tracker_policy.get('test-project')
def test_skipped_issue_closed(self):
"""Test that we don't add ccs to closed issues."""
self.issue.status = 'Fixed'
self.issue._monorail_issue.open = False
cleanup.update_issue_ccs_from_owners_file(self.policy, self.testcase,
self.issue)
self.assertEqual('', self.issue._monorail_issue.comment)
self.assertItemsEqual([], self.issue.ccs)
self.assertNotIn('ClusterFuzz-Auto-CC', self.issue.labels)
def test_skipped_issue_updated_once(self):
"""Test that we don't add ccs if we added ccs once already."""
comment = appengine_test_utils.create_generic_issue_comment(
labels=['ClusterFuzz-Auto-CC'])
self.assertEqual('', self.issue._monorail_issue.comment)
self.issue._monorail_issue.comments.append(comment)
cleanup.update_issue_ccs_from_owners_file(self.policy, self.testcase,
self.issue)
self.assertItemsEqual([], self.issue.ccs)
def test_skipped_no_testcase_metadata(self):
"""Test that we don't add ccs if there are no issue_owners key in testcase
metadata."""
self.testcase.delete_metadata('issue_owners')
cleanup.update_issue_ccs_from_owners_file(self.policy, self.testcase,
self.issue)
self.assertEqual('', self.issue._monorail_issue.comment)
self.assertItemsEqual([], self.issue.ccs)
self.assertNotIn('ClusterFuzz-Auto-CC', self.issue.labels)
def test_skipped_empty_testcase_metadata(self):
"""Test that we don't add ccs if owners list is empty in testcase
metadata."""
self.testcase.set_metadata('issue_owners', '')
cleanup.update_issue_ccs_from_owners_file(self.policy, self.testcase,
self.issue)
self.assertEqual('', self.issue._monorail_issue.comment)
self.assertItemsEqual([], self.issue.ccs)
self.assertNotIn('ClusterFuzz-Auto-CC', self.issue.labels)
def test_skipped_ccs_already_added_and_metadata_set(self):
"""Test that we don't add ccs if ccs are added already and metadata has
has_issue_ccs_from_owners_file attribute."""
self.testcase.set_metadata('has_issue_ccs_from_owners_file', True)
cleanup.update_issue_ccs_from_owners_file(self.policy, self.testcase,
self.issue)
self.assertEqual('', self.issue._monorail_issue.comment)
self.assertItemsEqual([], self.issue.ccs)
self.assertNotIn('ClusterFuzz-Auto-CC', self.issue.labels)
def test_skipped_ccs_alread_added_and_metadata_set(self):
"""Test that we don't add ccs if ccs are added already."""
self.issue.ccs.add('[email protected]')
self.issue.ccs.add('[email protected]')
cleanup.update_issue_ccs_from_owners_file(self.policy, self.testcase,
self.issue)
self.assertEqual(
True, self.testcase.get_metadata('has_issue_ccs_from_owners_file'))
self.assertEqual('', self.issue._monorail_issue.comment)
self.assertItemsEqual(['[email protected]', '[email protected]'],
sorted(self.issue.ccs))
self.assertNotIn('ClusterFuzz-Auto-CC', self.issue.labels)
def test_add_ccs_with_some_initial_ones(self):
"""Test that we only add new ccs if some are added already."""
self.issue._monorail_issue.cc = ['[email protected]']
cleanup.update_issue_ccs_from_owners_file(self.policy, self.testcase,
self.issue)
self.assertEqual(
'Automatically adding ccs based on OWNERS file / target commit history.'
'\n\nIf this is incorrect, please add ClusterFuzz-Wrong label.',
self.issue._monorail_issue.comment)
self.assertItemsEqual(['[email protected]', '[email protected]'],
sorted(self.issue.ccs))
self.assertIn('ClusterFuzz-Auto-CC', self.issue.labels)
def test_add_ccs_without_any_initial_ones(self):
"""Test adding of ccs with none already existing on the issue."""
self.mock.is_oss_fuzz.return_value = True
cleanup.update_issue_ccs_from_owners_file(self.policy, self.testcase,
self.issue)
self.assertEqual(
'Automatically adding ccs based on OWNERS file / target commit history.'
'\n\nIf this is incorrect, '
'please file a bug on https://github.com/google/oss-fuzz/issues/new.',
self.issue._monorail_issue.comment)
self.assertItemsEqual(['[email protected]', '[email protected]'],
sorted(self.issue.ccs))
self.assertIn('ClusterFuzz-Auto-CC', self.issue.labels)
def test_only_add_five_random_ccs(self):
"""Test that up to 5 random ccs are added. """
issue_owners = ['dev%[email protected]' % idx for idx in range(100)]
self.testcase.set_metadata('issue_owners', ','.join(issue_owners))
helpers.patch(self, ['random.sample'])
self.mock.sample.side_effect = lambda l, size: l[-size:]
cleanup.update_issue_ccs_from_owners_file(self.policy, self.testcase,
self.issue)
self.assertEqual(
'Automatically adding ccs based on OWNERS file / target commit history.'
'\n\nIf this is incorrect, please add ClusterFuzz-Wrong label.',
self.issue._monorail_issue.comment)
self.assertItemsEqual(issue_owners[-5:], self.issue.ccs)
self.assertIn('ClusterFuzz-Auto-CC', self.issue.labels)
@test_utils.with_cloud_emulators('datastore')
class UpdateIssueLabelsForFlakyTestcaseTest(unittest.TestCase):
"""Tests for update_issue_labels_for_flaky_testcase."""
def setUp(self):
self.issue = appengine_test_utils.create_generic_issue()
self.testcase = test_utils.create_generic_testcase()
self.policy = issue_tracker_policy.get('test-project')
def test_mark_unreproducible_if_reproducible_change(self):
"""Test that we change label on issue if the testcase is now flaky."""
self.issue.labels.add('Reproducible')
self.testcase.one_time_crasher_flag = True
cleanup.update_issue_labels_for_flaky_testcase(self.policy, self.testcase,
self.issue)
self.assertNotIn('Reproducible', self.issue.labels)
self.assertIn('Unreproducible', self.issue.labels)
self.assertEqual(
'ClusterFuzz testcase 1 appears to be flaky, '
'updating reproducibility label.', self.issue._monorail_issue.comment)
def test_skip_if_unreproducible(self):
"""Test that we don't change labels if the testcase is unreproducible and
issue is already marked unreproducible."""
self.issue.labels.add('Unreproducible')
self.testcase.one_time_crasher_flag = True
cleanup.update_issue_labels_for_flaky_testcase(self.policy, self.testcase,
self.issue)
self.assertNotIn('Reproducible', self.issue.labels)
self.assertIn('Unreproducible', self.issue.labels)
self.assertEqual('', self.issue._monorail_issue.comment)
def test_skip_if_reproducible(self):
"""Test that we don't change labels if the testcase is reproducible."""
self.issue.labels.add('Reproducible')
self.testcase.one_time_crasher_flag = False
cleanup.update_issue_labels_for_flaky_testcase(self.policy, self.testcase,
self.issue)
self.assertIn('Reproducible', self.issue.labels)
self.assertNotIn('Unreproducible', self.issue.labels)
self.assertEqual('', self.issue._monorail_issue.comment)
@test_utils.with_cloud_emulators('datastore')
class UpdateIssueOwnerAndCCsFromPredatorResultsTest(unittest.TestCase):
"""Tests for update_issue_owner_and_ccs_from_predator_results."""
def setUp(self):
self.issue = appengine_test_utils.create_generic_issue()
self.testcase = test_utils.create_generic_testcase()
# We'll generally want to assume we have an unassigned issue.
self.issue.assignee = ''
self.issue.status = 'Untriaged'
self.policy = issue_tracker_policy.get('test-project')
# Set the metadata to a generic result that would lead to an update,
# assuming no other conditions are violated.
self.testcase.set_metadata(
'predator_result', {
'result': {
'suspected_cls': [{
'author': '[email protected]',
'description': 'blah',
'url': 'url'
},]
}
})
def test_owner_assigned(self):
"""Ensure that we set the owner when appropriate."""
cleanup.update_issue_owner_and_ccs_from_predator_results(
self.policy, self.testcase, self.issue)
self.assertEqual(self.issue.assignee, '[email protected]')
self.assertEqual(self.issue.status, 'Assigned')
self.assertIn('Test-Predator-Auto-Owner', self.issue.labels)
def test_single_owner_cced_if_specified(self):
"""Ensure that we cc single authors if assignment is disabled."""
cleanup.update_issue_owner_and_ccs_from_predator_results(
self.policy, self.testcase, self.issue, only_allow_ccs=True)
self.assertEqual(self.issue.assignee, '')
self.assertEqual(self.issue.status, 'Untriaged')
self.assertIn('[email protected]', self.issue.ccs)
self.assertIn('Test-Predator-Auto-CC', self.issue.labels)
def test_closed_not_updated(self):
"""Ensure that we don't set owners for closed issues."""
self.issue.status = 'Fixed'
self.issue._monorail_issue.open = False
cleanup.update_issue_owner_and_ccs_from_predator_results(
self.policy, self.testcase, self.issue)
self.assertEqual(self.issue.assignee, '')
self.assertEqual(self.issue.status, 'Fixed')
self.assertNotIn('Test-Predator-Auto-Owner', self.issue.labels)
def test_owner_not_reassigned(self):
"""Ensure that we don't overwrite already assigned owners."""
self.issue.status = 'Assigned'
self.issue.assignee = '[email protected]'
cleanup.update_issue_owner_and_ccs_from_predator_results(
self.policy, self.testcase, self.issue)
self.assertEqual(self.issue.assignee, '[email protected]')
self.assertEqual(self.issue.status, 'Assigned')
self.assertNotIn('Test-Predator-Auto-Owner', self.issue.labels)
def test_skipped_if_already_updated(self):
"""Ensure that we don't try to update the same issue twice."""
comment = appengine_test_utils.create_generic_issue_comment(
labels=['Test-Predator-Auto-Owner'])
self.issue._monorail_issue.comments.append(comment)
cleanup.update_issue_owner_and_ccs_from_predator_results(
self.policy, self.testcase, self.issue)
self.assertEqual(self.issue.assignee, '')
self.assertEqual(self.issue.status, 'Untriaged')
self.assertNotIn('Test-Predator-Auto-Owner', self.issue.labels)
def test_skipped_if_previously_assigned(self):
"""Ensure that we don't assign to someone who was already the owner."""
comment = appengine_test_utils.create_generic_issue_comment()
comment.owner = '[email protected]'
self.issue._monorail_issue.comments.append(comment)
cleanup.update_issue_owner_and_ccs_from_predator_results(
self.policy, self.testcase, self.issue)
self.assertEqual(self.issue.assignee, '')
self.assertEqual(self.issue.status, 'Untriaged')
self.assertNotIn('Test-Predator-Auto-Owner', self.issue.labels)
def test_skipped_if_no_cls(self):
"""Ensure that we do nothing if we have no suspected CLs."""
self.testcase.set_metadata('predator_result',
{'result': {
'suspected_cls': []
}})
cleanup.update_issue_owner_and_ccs_from_predator_results(
self.policy, self.testcase, self.issue)
self.assertEqual(self.issue.assignee, '')
self.assertEqual(self.issue.status, 'Untriaged')
self.assertNotIn('Test-Predator-Auto-Owner', self.issue.labels)
def test_add_ccs_if_multiple_cls(self):
"""Ensure that we only cc when we have multiple suspected CLs."""
self.testcase.set_metadata(
'predator_result', {
'result': {
'suspected_cls': [
{
'author': '[email protected]',
'description': 'blah',
'url': 'url'
},
{
'author': '[email protected]',
'description': 'halb',
'url': 'lru'
},
]
}
})
cleanup.update_issue_owner_and_ccs_from_predator_results(
self.policy, self.testcase, self.issue)
self.assertEqual(self.issue.assignee, '')
self.assertEqual(self.issue.status, 'Untriaged')
self.assertNotIn('Test-Predator-Auto-Owner', self.issue.labels)
self.assertIn('Test-Predator-Auto-CC', self.issue.labels)
self.assertIn('[email protected]', self.issue.ccs)
self.assertIn('[email protected]', self.issue.ccs)
def test_skipped_if_previously_cced_and_metadata_set(self):
"""Ensure that we don't re-cc authors who were cced in the past and have
has_issue_ccs_from_predator_results set in metadata."""
self.testcase.set_metadata('has_issue_ccs_from_predator_results', True)
self.testcase.set_metadata(
'predator_result', {
'result': {
'suspected_cls': [
{
'author': '[email protected]',
'description': 'blah',
'url': 'url'
},
{
'author': '[email protected]',
'description': 'halb',
'url': 'lru'
},
]
}
})
cleanup.update_issue_owner_and_ccs_from_predator_results(
self.policy, self.testcase, self.issue, only_allow_ccs=True)
self.assertNotIn('[email protected]', self.issue.ccs)
self.assertNotIn('[email protected]', self.issue.ccs)
self.assertNotIn('Test-Predator-Auto-CC', self.issue.labels)
def test_skipped_if_previously_cced_and_metadata_not_set(self):
"""Ensure that we don't re-cc authors who were cced in the past."""
comment = appengine_test_utils.create_generic_issue_comment()
comment.cc = ['[email protected]']
self.issue._monorail_issue.comments.append(comment)
self.testcase.set_metadata(
'predator_result', {
'result': {
'suspected_cls': [
{
'author': '[email protected]',
'description': 'blah',
'url': 'url'
},
{
'author': '[email protected]',
'description': 'halb',
'url': 'lru'
},
]
}
})
cleanup.update_issue_owner_and_ccs_from_predator_results(
self.policy, self.testcase, self.issue)
self.assertNotIn('[email protected]', self.issue.ccs)
self.assertIn('[email protected]', self.issue.ccs)
self.assertIn('Test-Predator-Auto-CC', self.issue.labels)
def test_skipped_if_malformed_cl(self):
"""Ensure that we do nothing if the suspected CL is malformed."""
self.testcase.set_metadata('predator_result',
{'result': {
'suspected_cls': [{
'url': 'url'
},]
}})
cleanup.update_issue_owner_and_ccs_from_predator_results(
self.policy, self.testcase, self.issue)
self.assertEqual(self.issue.assignee, '')
self.assertEqual(self.issue.status, 'Untriaged')
self.assertNotIn('Test-Predator-Auto-Owner', self.issue.labels)
@test_utils.with_cloud_emulators('datastore')
class NotifyIssueIfTestcaseIsInvalidTest(unittest.TestCase):
"""Tests for notify_issue_if_testcase_is_invalid."""
def setUp(self):
self.issue = appengine_test_utils.create_generic_issue()
self.testcase = test_utils.create_generic_testcase()
# Assume a test case associated with an assigned issue.
self.issue.status = 'Assigned'
self.testcase.bug_information = '123456'
self.policy = issue_tracker_policy.get('test-project')
def test_skipped_if_no_issue(self):
"""Ensure that we handle the case where there is no issue."""
self.testcase.bug_information = None
# Simply ensure that we don't throw an exception in this case.
cleanup.notify_issue_if_testcase_is_invalid(self.policy, self.testcase,
None)
def test_skipped_if_closed_issue(self):
"""Ensure that we ignore issues that are already closed."""
self.issue.status = 'Fixed'
self.issue._monorail_issue.open = False
cleanup.notify_issue_if_testcase_is_invalid(self.policy, self.testcase,
self.issue)
self.assertEqual(self.issue._monorail_issue.comment, '')
def test_skipped_if_unmarked_issue(self):
"""Ensure that we ignore issues that have valid fuzzers."""
cleanup.notify_issue_if_testcase_is_invalid(self.policy, self.testcase,
self.issue)
self.assertEqual(self.issue._monorail_issue.comment, '')
def test_notified_if_fuzzer_was_deleted(self):
"""Ensure that we comment on issues that have invalid fuzzers."""
self.testcase.set_metadata('fuzzer_was_deleted', True)
cleanup.notify_issue_if_testcase_is_invalid(self.policy, self.testcase,
self.issue)
self.assertIn('is associated with an obsolete fuzzer',
self.issue._monorail_issue.comment)
self.assertIn(ISSUE_INVALID_FUZZER_LABEL, self.issue.labels)
def test_not_notified_if_fuzzer_was_deleted_and_notified(self):
"""Ensure that we don't comment again on issues that have invalid fuzzers
and we have commented once."""
self.testcase.set_metadata('fuzzer_was_deleted', True)
self.issue._monorail_issue.comments += [
appengine_test_utils.create_generic_issue_comment(
labels=[ISSUE_INVALID_FUZZER_LABEL])
]
cleanup.notify_issue_if_testcase_is_invalid(self.policy, self.testcase,
self.issue)
self.assertNotIn('is associated with an obsolete fuzzer',
self.issue._monorail_issue.comment)
@test_utils.with_cloud_emulators('datastore')
class NotifyUploaderIfTestcaseIsProcessed(unittest.TestCase):
"""Tests for notify_uploader_when_testcase_is_processed."""
def setUp(self):
helpers.patch(self, [
'handlers.cron.cleanup._update_issue_security_severity_and_get_comment',
'libs.issue_management.issue_filer.update_issue_impact_labels',
'libs.mail.send',
])
self.issue = appengine_test_utils.create_generic_issue()
self.testcase = test_utils.create_generic_testcase()
self.testcase_id = self.testcase.key.id()
self.uploader_email = '[email protected]'
self.policy = issue_tracker_policy.get('test-project')
data_types.Config(url='url', reproduction_help_url='repro_help_url').put()
def _get_notification(self):
"""Return notification entity for our testcase."""
return data_types.Notification.query(
data_types.Notification.testcase_id == self.testcase_id).get()
def test_no_upload_metadata(self):
"""Ensure that we don't send notification if there is no upload metadata."""
cleanup.notify_uploader_when_testcase_is_processed(
self.policy, self.testcase, self.issue)
self.assertEqual(0, self.mock.send.call_count)
self.assertIsNone(self._get_notification())
def test_upload_metadata_with_no_uploader_email(self):
"""Ensure that we don't send notification if there is no uploader email."""
data_types.TestcaseUploadMetadata(
testcase_id=self.testcase_id, uploader_email=None, bundled=False).put()
cleanup.notify_uploader_when_testcase_is_processed(
self.policy, self.testcase, self.issue)
self.assertEqual(0, self.mock.send.call_count)
self.assertIsNone(self._get_notification())
def test_upload_metadata_with_multiple_testcases(self):
"""Ensure that we don't send notification if this a bundled metadata archive
(with multiple testcases)."""
data_types.TestcaseUploadMetadata(
testcase_id=self.testcase_id,
uploader_email=self.uploader_email,
bundled=False).put()
cleanup.notify_uploader_when_testcase_is_processed(
self.policy, self.testcase, self.issue)
self.assertEqual(0, self.mock.send.call_count)
self.assertIsNone(self._get_notification())
def test_critical_tasks_not_completed(self):
"""Ensure that we don't send notification if critical tasks not complete."""
data_types.TestcaseUploadMetadata(
testcase_id=self.testcase_id,
uploader_email=self.uploader_email,
bundled=False).put()
self.testcase.minimized_keys = None
self.testcase.regression = None
self.testcase.put()
cleanup.notify_uploader_when_testcase_is_processed(
self.policy, self.testcase, self.issue)
self.assertEqual(0, self.mock.send.call_count)
def test_pending_testcase(self):
"""Ensure that notification is not sent with a pending testcase."""
data_types.TestcaseUploadMetadata(
testcase_id=self.testcase_id,
uploader_email=self.uploader_email,
bundled=False).put()
self.testcase.status = 'Pending'
self.testcase.one_time_crasher_flag = False
self.testcase.put()
cleanup.notify_uploader_when_testcase_is_processed(
self.policy, self.testcase, self.issue)
self.assertEqual(0, self.mock.send.call_count)
def test_notification_sent_with_regular_testcase(self):
"""Ensure that notification is sent with a regular testcase."""
data_types.TestcaseUploadMetadata(
testcase_id=self.testcase_id,
uploader_email=self.uploader_email,
bundled=False).put()
self.testcase.status = 'Processed'
self.testcase.minimized_keys = 'some-key'
self.testcase.regression = '1:2'
self.testcase.is_impact_set_flag = True
self.testcase.put()
cleanup.notify_uploader_when_testcase_is_processed(
self.policy, self.testcase, self.issue)
self.mock.send.assert_called_once_with(
'[email protected]', 'Your testcase upload 1 analysis is complete.',
'Detailed Report: https://test-clusterfuzz.appspot.com/'
'testcase?key=1<br><br>'
'Fuzzer: fuzzer1<br>'
'Job Type: test_content_shell_drt<br>'
'Crash Type: fake type<br>'
'Crash Address: 0xdeadbeef<br>'
'Crash State:<br>'
' ...see report...<br>'
'Sanitizer: address (ASAN)<br><br>'
'Regressed: https://test-clusterfuzz.appspot.com/revisions?'
'job=test_content_shell_drt&range=1:2<br><br>'
'Reproducer Testcase: '
'https://test-clusterfuzz.appspot.com/download?testcase_id=1<br><br>'
'See repro_help_url for instructions to reproduce this bug locally.'
'<br><br>'
'If you suspect that the result above is incorrect, '
'try re-doing that job on the testcase report page.')
self.assertIsNotNone(self._get_notification())
def test_notification_sent_with_unreproducible_testcase(self):
"""Ensure that notification is sent with an unreproducible testcase."""
data_types.TestcaseUploadMetadata(
testcase_id=self.testcase_id,
uploader_email=self.uploader_email,
bundled=False).put()
self.testcase.status = 'Unreproducible'
self.testcase.one_time_crasher_flag = False
self.testcase.put()
cleanup.notify_uploader_when_testcase_is_processed(
self.policy, self.testcase, self.issue)
self.mock.send.assert_called_once_with(
'[email protected]', 'Your testcase upload 1 analysis is complete.',
'Testcase 1 failed to reproduce the crash. '
'Please inspect the program output at '
'https://test-clusterfuzz.appspot.com/testcase?key=1.<br><br>'
'If you suspect that the result above is incorrect, '
'try re-doing that job on the testcase report page.')
self.assertIsNotNone(self._get_notification())
@test_utils.with_cloud_emulators('datastore')
class CleanupUnusedFuzzTargetsTest(unittest.TestCase):
"""Tests for cleanup_unused_fuzz_targets_and_jobs."""
def setUp(self):
helpers.patch_environ(self)
helpers.patch(self, [
'base.utils.utcnow',
])
self.mock.utcnow.return_value = datetime.datetime(2018, 1, 31)
def test_cleanup_unused_fuzz_targets_and_jobs(self):
"""Test cleaning up fuzz targets."""
# FuzzTarget should be removed. All FuzzTargetJobs are older than the
# threshold.
data_types.FuzzTarget(
engine='libFuzzer', binary='binary1', project='test-project').put()
data_types.FuzzTargetJob(
fuzz_target_name='libFuzzer_binary1',
job='job1',
last_run=datetime.datetime(2018, 1, 23)).put()
# FuzzTarget should be removed. No FuzzTargetJobs.
data_types.FuzzTarget(
engine='libFuzzer', binary='binary2', project='test-project').put()
# FuzzTarget should not be removed. Has 1 FuzzTargetJob left after removing
# old ones.
data_types.FuzzTarget(
engine='libFuzzer', binary='binary3', project='test-project').put()
data_types.FuzzTargetJob(
fuzz_target_name='libFuzzer_binary3',
job='job1',
last_run=datetime.datetime(2018, 1, 25)).put()
data_types.FuzzTargetJob(
fuzz_target_name='libFuzzer_binary3',
job='job2',
last_run=datetime.datetime(2018, 1, 23)).put()
# FuzzTarget should not be removed. All FuzzTargetJob valid.
data_types.FuzzTarget(
engine='libFuzzer', binary='binary4', project='test-project').put()
data_types.FuzzTargetJob(
fuzz_target_name='libFuzzer_binary4',
job='job1',
last_run=datetime.datetime(2018, 1, 25)).put()
data_types.FuzzTargetJob(
fuzz_target_name='libFuzzer_binary4',
job='job2',
last_run=datetime.datetime(2018, 1, 25)).put()
cleanup.cleanup_unused_fuzz_targets_and_jobs()
self.assertItemsEqual(
['libFuzzer_binary3', 'libFuzzer_binary4'],
list([t.key.id() for t in data_types.FuzzTarget.query()]))
self.assertItemsEqual([
'libFuzzer_binary3/job1', 'libFuzzer_binary4/job1',
'libFuzzer_binary4/job2'
], list([t.key.id() for t in data_types.FuzzTargetJob.query()]))
@test_utils.with_cloud_emulators('datastore')
class CleanupUnusedHeartbeatsTest(unittest.TestCase):
"""Tests for cleaning up heartbeat entities."""
def setUp(self):
helpers.patch_environ(self)
helpers.patch(self, [
'base.utils.utcnow',
])
self.mock.utcnow.return_value = datetime.datetime(2018, 1, 31)
def test_cleanup(self):
"""Test cleanup_unused_heartbeats."""
data_types.Heartbeat(last_beat_time=datetime.datetime(2018, 1, 14)).put()
data_types.Heartbeat(last_beat_time=datetime.datetime(2018, 1, 15)).put()
data_types.Heartbeat(last_beat_time=datetime.datetime(2018, 1, 16)).put()
data_types.Heartbeat(last_beat_time=datetime.datetime(2018, 1, 17)).put()
data_types.Heartbeat(last_beat_time=datetime.datetime(2018, 2, 1)).put()
cleanup.cleanup_unused_heartbeats()
self.assertItemsEqual([
{
'task_payload': None,
'source_version': None,
'task_end_time': None,
'last_beat_time': datetime.datetime(2018, 1, 16, 0, 0),
'bot_name': None
},
{
'task_payload': None,
'source_version': None,
'task_end_time': None,
'last_beat_time': datetime.datetime(2018, 1, 17, 0, 0),
'bot_name': None
},
{
'task_payload': None,
'source_version': None,
'task_end_time': None,
'last_beat_time': datetime.datetime(2018, 2, 1, 0, 0),
'bot_name': None
},
], [e.to_dict() for e in data_types.Heartbeat.query()])
class UpdateSeverityLabelsTest(unittest.TestCase):
"""Tests for updating severity labels."""
def setUp(self):
self.testcase = data_types.Testcase()
self.issue = appengine_test_utils.create_generic_issue()
self.policy = issue_tracker_policy.get('test-project')
def test_add_missing_severity(self):
"""Test updating missing severity."""
self.testcase.security_severity = data_types.SecuritySeverity.HIGH
result = cleanup._update_issue_security_severity_and_get_comment(
self.policy, self.testcase, self.issue)
self.assertIn('Security_Severity-High', self.issue.labels)
self.assertIn('A recommended severity was added to this bug.', result)
def test_add_same_severity(self):
"""Test correct severity already set."""
self.testcase.security_severity = data_types.SecuritySeverity.HIGH
self.issue.labels.add('Security_severity-High')
result = cleanup._update_issue_security_severity_and_get_comment(
self.policy, self.testcase, self.issue)
self.assertIn('Security_Severity-High', self.issue.labels)
self.assertEqual('', result)
def test_add_different_severity(self):
"""Test incorrect severity set."""
self.testcase.security_severity = data_types.SecuritySeverity.HIGH
self.issue.labels.add('Security_Severity-Medium')
result = cleanup._update_issue_security_severity_and_get_comment(
self.policy, self.testcase, self.issue)
self.assertNotIn('Security_Severity-High', self.issue.labels)
self.assertIn('Security_Severity-Medium', self.issue.labels)
self.assertIn('different from what was assigned', result)
| 42.3359 | 80 | 0.70098 |
d3f1cf12437889a91276a931211d95204f42e5f3 | 1,179 | py | Python | asyncrpc/handler.py | cosminbasca/asyncrpc | 49f2b4b5c686e19e9b314189720ed105fda0bb9f | [
"Apache-2.0"
]
| 1 | 2015-04-14T14:41:08.000Z | 2015-04-14T14:41:08.000Z | asyncrpc/handler.py | cosminbasca/asyncrpc | 49f2b4b5c686e19e9b314189720ed105fda0bb9f | [
"Apache-2.0"
]
| null | null | null | asyncrpc/handler.py | cosminbasca/asyncrpc | 49f2b4b5c686e19e9b314189720ed105fda0bb9f | [
"Apache-2.0"
]
| null | null | null | #
# author: Cosmin Basca
#
# Copyright 2010 University of Zurich
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from abc import ABCMeta, abstractmethod
__author__ = 'basca'
class RpcHandler(object):
__metaclass__ = ABCMeta
@abstractmethod
def get_instance(self, *args, **kwargs):
return None
def rpc(self, *args, **kwargs):
instance = self.get_instance(*args, **kwargs)
def rpc_call(name, *args, **kwargs):
func = getattr(instance, name, None)
if not func:
raise NameError('instance does not have method "{0}"'.format(name))
return func(*args, **kwargs)
return rpc_call
| 31.026316 | 83 | 0.68363 |
75ffeb2c4651b4fc26d9f8056eb3435433f744b0 | 68 | py | Python | python-for-data-science/week_1/np.py | assassinen/coursera_mfti_python | eee7b3c55256f391c1be32924fa1ad3364b307f2 | [
"Apache-2.0"
]
| null | null | null | python-for-data-science/week_1/np.py | assassinen/coursera_mfti_python | eee7b3c55256f391c1be32924fa1ad3364b307f2 | [
"Apache-2.0"
]
| null | null | null | python-for-data-science/week_1/np.py | assassinen/coursera_mfti_python | eee7b3c55256f391c1be32924fa1ad3364b307f2 | [
"Apache-2.0"
]
| null | null | null | import numpy as np
x = np.array([1,2,3,4], dtype=np.int64)
print(x) | 17 | 39 | 0.661765 |
ba362cb76017ad8e993be41ba3cec48b2ec2e664 | 6,896 | py | Python | finetune.py | haroldNLP/Distiller | f3ab5f94a9092fca1e2bdb9f486e66fd0b24bcfd | [
"MIT"
]
| 2 | 2022-03-21T08:02:02.000Z | 2022-03-21T08:29:07.000Z | finetune.py | haroldNLP/Distiller | f3ab5f94a9092fca1e2bdb9f486e66fd0b24bcfd | [
"MIT"
]
| null | null | null | finetune.py | haroldNLP/Distiller | f3ab5f94a9092fca1e2bdb9f486e66fd0b24bcfd | [
"MIT"
]
| null | null | null |
import os
import json
import torch
import numpy as np
import argparse
from Distiller.glue_preprocess import load_and_cache_examples, glue_compute_metrics
from transformers import AutoTokenizer, AutoModelForSequenceClassification, AutoConfig, AdamW
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler
from torch.utils.data.distributed import DistributedSampler
from tqdm import tqdm
def eval(args, model, tokenizer):
dataset, s_dataset, features, s_features, examples = load_and_cache_examples(args, tokenizer, mode="dev",
return_examples=True)
eval_sampler = SequentialSampler(dataset)
eval_dataloader = DataLoader(dataset, sampler=eval_sampler, batch_size=args.eval_batch_size)
preds = []
label_list = []
model.eval()
for batch in tqdm(eval_dataloader):
# labels = batch['labels']
# batch = tuple(t.to(args.device) for t in batch)
batch = {key: value.to(args.device) for key, value in batch.items()}
with torch.no_grad():
outputs = model(**batch)
# outputs = model(**batch)
predictions = outputs.logits.detach().cpu()
if args.task_name not in ["stsb","cloth"]:
predictions = predictions.argmax(dim=-1)
else:
predictions = predictions[:, 0]
label_list.extend(batch['labels'].cpu().tolist())
preds.extend(predictions.tolist())
model.train()
# eval_metric_compute = metric.compute()
eval_metric = glue_compute_metrics(args.task_name, np.array(preds), np.array(label_list))
print(f"Eval result: {eval_metric}")
return eval_metric
def main(args):
best_result = 0.0
if not os.path.exists(args.output_dir) and args.local_rank in [-1, 0]:
os.makedirs(args.output_dir)
if not os.path.exists(args.data_dir) and args.local_rank in [-1, 0]:
os.makedirs(args.data_dir)
# Setup CUDA, GPU & distributed training
if args.local_rank == -1:
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
args.n_gpu = torch.cuda.device_count()
else: # Initializes the distributed backend which will take care of sychronizing nodes/GPUs
torch.cuda.set_device(args.local_rank)
device = torch.device("cuda", args.local_rank)
torch.distributed.init_process_group(backend="nccl")
args.n_gpu = 1
args.device = device
config = AutoConfig.from_pretrained(args.model_path)
config.num_labels = args.num_labels
model = AutoModelForSequenceClassification.from_pretrained(args.model_path, config=config)
tokenizer = AutoTokenizer.from_pretrained(args.model_path,use_fast=False,
config=config)
dataset = load_and_cache_examples(args, tokenizer, "train", False)
train_sampler = RandomSampler(dataset) if args.local_rank == -1 else DistributedSampler(dataset)
train_dataloader = DataLoader(dataset, sampler=train_sampler, batch_size=args.train_batch_size)
no_decay = ["bias", "LayerNorm.weight"]
optimizer_grouped_parameters = [
{"params": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)],
"weight_decay": args.weight_decay},
{"params": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], "weight_decay": 0.0}
]
optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon)
model.to(args.device)
optimizer.zero_grad()
if args.train:
for i in range(args.epoch):
print(f"Epoch {i+1}")
for step, batch in tqdm(enumerate(train_dataloader)):
batch = {key: value.to(args.device) for key, value in batch.items()}
outputs = model(**batch)
loss = outputs.loss
loss.backward()
optimizer.step()
optimizer.zero_grad()
eval_result = eval(args, model, tokenizer)
model.train()
if eval_result['acc'] > best_result:
best_result = eval_result['acc']
model_to_save = model.module if hasattr(model,
"module") else model # Take care of distributed/parallel training
model_to_save.save_pretrained(args.output_dir)
if tokenizer:
tokenizer.save_pretrained(args.output_dir)
torch.save(args, os.path.join(args.output_dir, "training_args.bin"))
output_eval_file = os.path.join(args.output_dir, f"eval_results.txt")
with open(output_eval_file, "a") as writer:
writer.write(f"Output: {json.dumps(eval_result, indent=2)}\n")
with open(os.path.join(args.output_dir, "training_args.json"), 'w') as f:
arg_dict = vars(args)
arg_dict['device'] = str(arg_dict['device'])
json.dump(arg_dict, f)
if args.eval:
eval_result = eval(args, model, tokenizer)
print(eval_result)
# model_to_save = model.module if hasattr(model,
# "module") else model # Take care of distributed/parallel training
# model_to_save.save_pretrained(args.output_dir)
# if tokenizer:
# tokenizer.save_pretrained(args.output_dir)
# torch.save(args, os.path.join(args.output_dir, "training_args.bin"))
# with open(os.path.join(args.output_dir, "training_args.json"), 'w') as f:
# arg_dict = vars(args)
# arg_dict['device'] = str(arg_dict['device'])
# json.dump(arg_dict, f)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--model_path", required=True)
parser.add_argument("--output_dir", default="finetuned_kaggle/")
parser.add_argument("--data_dir", required=True)
parser.add_argument("--max_seq_length", default=512, type=int)
parser.add_argument("--epoch", default=5, type=int)
parser.add_argument("--local_rank", default=-1)
parser.add_argument("--task_name",default="kaggle")
parser.add_argument("--overwrite_cache",default=False)
parser.add_argument("--learning_rate",default=5e-5,type=float)
parser.add_argument("--adam_epsilon",default=1e-8)
parser.add_argument("--train_batch_size", default=16, type=int)
parser.add_argument("--eval_batch_size", default=64, type=int)
parser.add_argument("--weight_decay", default=0.1, type=float,
help="Weight decay if we apply some.")
parser.add_argument("--num_labels", default=2, type=int, required=True)
parser.add_argument("--train", action="store_true")
parser.add_argument("--eval", action="store_true")
args = parser.parse_args()
main(args)
| 45.668874 | 122 | 0.645447 |
6b1a34de235aa949f9f66888ab1e1085806053f4 | 37,115 | py | Python | psutil/_pswindows.py | dothebart/psutil | dccf8bedf34fced63f1962dd77b58f0da339759c | [
"BSD-3-Clause"
]
| 2 | 2021-09-25T20:26:39.000Z | 2022-02-09T13:22:58.000Z | psutil/_pswindows.py | dothebart/psutil | dccf8bedf34fced63f1962dd77b58f0da339759c | [
"BSD-3-Clause"
]
| null | null | null | psutil/_pswindows.py | dothebart/psutil | dccf8bedf34fced63f1962dd77b58f0da339759c | [
"BSD-3-Clause"
]
| 1 | 2021-03-04T19:46:31.000Z | 2021-03-04T19:46:31.000Z | # Copyright (c) 2009, Giampaolo Rodola'. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Windows platform implementation."""
import contextlib
import errno
import functools
import os
import signal
import sys
import time
from collections import namedtuple
from . import _common
from ._common import AccessDenied
from ._common import conn_tmap
from ._common import conn_to_ntuple
from ._common import debug
from ._common import ENCODING
from ._common import ENCODING_ERRS
from ._common import isfile_strict
from ._common import memoize
from ._common import memoize_when_activated
from ._common import NoSuchProcess
from ._common import parse_environ_block
from ._common import TimeoutExpired
from ._common import usage_percent
from ._compat import long
from ._compat import lru_cache
from ._compat import PY3
from ._compat import range
from ._compat import unicode
from ._psutil_windows import ABOVE_NORMAL_PRIORITY_CLASS
from ._psutil_windows import BELOW_NORMAL_PRIORITY_CLASS
from ._psutil_windows import HIGH_PRIORITY_CLASS
from ._psutil_windows import IDLE_PRIORITY_CLASS
from ._psutil_windows import NORMAL_PRIORITY_CLASS
from ._psutil_windows import REALTIME_PRIORITY_CLASS
try:
from . import _psutil_windows as cext
except ImportError as err:
if str(err).lower().startswith("dll load failed") and \
sys.getwindowsversion()[0] < 6:
# We may get here if:
# 1) we are on an old Windows version
# 2) psutil was installed via pip + wheel
# See: https://github.com/giampaolo/psutil/issues/811
msg = "this Windows version is too old (< Windows Vista); "
msg += "psutil 3.4.2 is the latest version which supports Windows "
msg += "2000, XP and 2003 server"
raise RuntimeError(msg)
else:
raise
if sys.version_info >= (3, 4):
import enum
else:
enum = None
# process priority constants, import from __init__.py:
# http://msdn.microsoft.com/en-us/library/ms686219(v=vs.85).aspx
__extra__all__ = [
"win_service_iter", "win_service_get",
# Process priority
"ABOVE_NORMAL_PRIORITY_CLASS", "BELOW_NORMAL_PRIORITY_CLASS",
"HIGH_PRIORITY_CLASS", "IDLE_PRIORITY_CLASS", "NORMAL_PRIORITY_CLASS",
"REALTIME_PRIORITY_CLASS",
# IO priority
"IOPRIO_VERYLOW", "IOPRIO_LOW", "IOPRIO_NORMAL", "IOPRIO_HIGH",
# others
"CONN_DELETE_TCB", "AF_LINK",
]
# =====================================================================
# --- globals
# =====================================================================
CONN_DELETE_TCB = "DELETE_TCB"
ERROR_PARTIAL_COPY = 299
PYPY = '__pypy__' in sys.builtin_module_names
if enum is None:
AF_LINK = -1
else:
AddressFamily = enum.IntEnum('AddressFamily', {'AF_LINK': -1})
AF_LINK = AddressFamily.AF_LINK
TCP_STATUSES = {
cext.MIB_TCP_STATE_ESTAB: _common.CONN_ESTABLISHED,
cext.MIB_TCP_STATE_SYN_SENT: _common.CONN_SYN_SENT,
cext.MIB_TCP_STATE_SYN_RCVD: _common.CONN_SYN_RECV,
cext.MIB_TCP_STATE_FIN_WAIT1: _common.CONN_FIN_WAIT1,
cext.MIB_TCP_STATE_FIN_WAIT2: _common.CONN_FIN_WAIT2,
cext.MIB_TCP_STATE_TIME_WAIT: _common.CONN_TIME_WAIT,
cext.MIB_TCP_STATE_CLOSED: _common.CONN_CLOSE,
cext.MIB_TCP_STATE_CLOSE_WAIT: _common.CONN_CLOSE_WAIT,
cext.MIB_TCP_STATE_LAST_ACK: _common.CONN_LAST_ACK,
cext.MIB_TCP_STATE_LISTEN: _common.CONN_LISTEN,
cext.MIB_TCP_STATE_CLOSING: _common.CONN_CLOSING,
cext.MIB_TCP_STATE_DELETE_TCB: CONN_DELETE_TCB,
cext.PSUTIL_CONN_NONE: _common.CONN_NONE,
}
if enum is not None:
class Priority(enum.IntEnum):
ABOVE_NORMAL_PRIORITY_CLASS = ABOVE_NORMAL_PRIORITY_CLASS
BELOW_NORMAL_PRIORITY_CLASS = BELOW_NORMAL_PRIORITY_CLASS
HIGH_PRIORITY_CLASS = HIGH_PRIORITY_CLASS
IDLE_PRIORITY_CLASS = IDLE_PRIORITY_CLASS
NORMAL_PRIORITY_CLASS = NORMAL_PRIORITY_CLASS
REALTIME_PRIORITY_CLASS = REALTIME_PRIORITY_CLASS
globals().update(Priority.__members__)
if enum is None:
IOPRIO_VERYLOW = 0
IOPRIO_LOW = 1
IOPRIO_NORMAL = 2
IOPRIO_HIGH = 3
else:
class IOPriority(enum.IntEnum):
IOPRIO_VERYLOW = 0
IOPRIO_LOW = 1
IOPRIO_NORMAL = 2
IOPRIO_HIGH = 3
globals().update(IOPriority.__members__)
pinfo_map = dict(
num_handles=0,
ctx_switches=1,
user_time=2,
kernel_time=3,
create_time=4,
num_threads=5,
io_rcount=6,
io_wcount=7,
io_rbytes=8,
io_wbytes=9,
io_count_others=10,
io_bytes_others=11,
num_page_faults=12,
peak_wset=13,
wset=14,
peak_paged_pool=15,
paged_pool=16,
peak_non_paged_pool=17,
non_paged_pool=18,
pagefile=19,
peak_pagefile=20,
mem_private=21,
)
# =====================================================================
# --- named tuples
# =====================================================================
# psutil.cpu_times()
scputimes = namedtuple('scputimes',
['user', 'system', 'idle', 'interrupt', 'dpc'])
# psutil.virtual_memory()
svmem = namedtuple('svmem', ['total', 'available', 'percent', 'used', 'free'])
# psutil.Process.memory_info()
pmem = namedtuple(
'pmem', ['rss', 'vms',
'num_page_faults', 'peak_wset', 'wset', 'peak_paged_pool',
'paged_pool', 'peak_nonpaged_pool', 'nonpaged_pool',
'pagefile', 'peak_pagefile', 'private'])
# psutil.Process.memory_full_info()
pfullmem = namedtuple('pfullmem', pmem._fields + ('uss', ))
# psutil.Process.memory_maps(grouped=True)
pmmap_grouped = namedtuple('pmmap_grouped', ['path', 'rss'])
# psutil.Process.memory_maps(grouped=False)
pmmap_ext = namedtuple(
'pmmap_ext', 'addr perms ' + ' '.join(pmmap_grouped._fields))
# psutil.Process.io_counters()
pio = namedtuple('pio', ['read_count', 'write_count',
'read_bytes', 'write_bytes',
'other_count', 'other_bytes'])
# =====================================================================
# --- utils
# =====================================================================
@lru_cache(maxsize=512)
def convert_dos_path(s):
r"""Convert paths using native DOS format like:
"\Device\HarddiskVolume1\Windows\systemew\file.txt"
into:
"C:\Windows\systemew\file.txt"
"""
rawdrive = '\\'.join(s.split('\\')[:3])
driveletter = cext.QueryDosDevice(rawdrive)
remainder = s[len(rawdrive):]
return os.path.join(driveletter, remainder)
def py2_strencode(s):
"""Encode a unicode string to a byte string by using the default fs
encoding + "replace" error handler.
"""
if PY3:
return s
else:
if isinstance(s, str):
return s
else:
return s.encode(ENCODING, ENCODING_ERRS)
@memoize
def getpagesize():
return cext.getpagesize()
# =====================================================================
# --- memory
# =====================================================================
def virtual_memory():
"""System virtual memory as a namedtuple."""
mem = cext.virtual_mem()
totphys, availphys, totpagef, availpagef, totvirt, freevirt = mem
#
total = totphys
avail = availphys
free = availphys
used = total - avail
percent = usage_percent((total - avail), total, round_=1)
return svmem(total, avail, percent, used, free)
def swap_memory():
"""Swap system memory as a (total, used, free, sin, sout) tuple."""
mem = cext.virtual_mem()
total_phys = mem[0]
free_phys = mem[1]
total_system = mem[2]
free_system = mem[3]
# Despite the name PageFile refers to total system memory here
# thus physical memory values need to be substracted to get swap values
total = total_system - total_phys
free = min(total, free_system - free_phys)
used = total - free
percent = usage_percent(used, total, round_=1)
return _common.sswap(total, used, free, percent, 0, 0)
# =====================================================================
# --- disk
# =====================================================================
disk_io_counters = cext.disk_io_counters
def disk_usage(path):
"""Return disk usage associated with path."""
if PY3 and isinstance(path, bytes):
# XXX: do we want to use "strict"? Probably yes, in order
# to fail immediately. After all we are accepting input here...
path = path.decode(ENCODING, errors="strict")
total, free = cext.disk_usage(path)
used = total - free
percent = usage_percent(used, total, round_=1)
return _common.sdiskusage(total, used, free, percent)
def disk_partitions(all):
"""Return disk partitions."""
rawlist = cext.disk_partitions(all)
return [_common.sdiskpart(*x) for x in rawlist]
# =====================================================================
# --- CPU
# =====================================================================
def cpu_times():
"""Return system CPU times as a named tuple."""
user, system, idle = cext.cpu_times()
# Internally, GetSystemTimes() is used, and it doesn't return
# interrupt and dpc times. cext.per_cpu_times() does, so we
# rely on it to get those only.
percpu_summed = scputimes(*[sum(n) for n in zip(*cext.per_cpu_times())])
return scputimes(user, system, idle,
percpu_summed.interrupt, percpu_summed.dpc)
def per_cpu_times():
"""Return system per-CPU times as a list of named tuples."""
ret = []
for user, system, idle, interrupt, dpc in cext.per_cpu_times():
item = scputimes(user, system, idle, interrupt, dpc)
ret.append(item)
return ret
def cpu_count_logical():
"""Return the number of logical CPUs in the system."""
return cext.cpu_count_logical()
def cpu_count_cores():
"""Return the number of CPU cores in the system."""
return cext.cpu_count_cores()
def cpu_stats():
"""Return CPU statistics."""
ctx_switches, interrupts, dpcs, syscalls = cext.cpu_stats()
soft_interrupts = 0
return _common.scpustats(ctx_switches, interrupts, soft_interrupts,
syscalls)
def cpu_freq():
"""Return CPU frequency.
On Windows per-cpu frequency is not supported.
"""
curr, max_ = cext.cpu_freq()
min_ = 0.0
return [_common.scpufreq(float(curr), min_, float(max_))]
_loadavg_inititialized = False
def getloadavg():
"""Return the number of processes in the system run queue averaged
over the last 1, 5, and 15 minutes respectively as a tuple"""
global _loadavg_inititialized
if not _loadavg_inititialized:
cext.init_loadavg_counter()
_loadavg_inititialized = True
# Drop to 2 decimal points which is what Linux does
raw_loads = cext.getloadavg()
return tuple([round(load, 2) for load in raw_loads])
# =====================================================================
# --- network
# =====================================================================
def net_connections(kind, _pid=-1):
"""Return socket connections. If pid == -1 return system-wide
connections (as opposed to connections opened by one process only).
"""
if kind not in conn_tmap:
raise ValueError("invalid %r kind argument; choose between %s"
% (kind, ', '.join([repr(x) for x in conn_tmap])))
families, types = conn_tmap[kind]
rawlist = cext.net_connections(_pid, families, types)
ret = set()
for item in rawlist:
fd, fam, type, laddr, raddr, status, pid = item
nt = conn_to_ntuple(fd, fam, type, laddr, raddr, status, TCP_STATUSES,
pid=pid if _pid == -1 else None)
ret.add(nt)
return list(ret)
def net_if_stats():
"""Get NIC stats (isup, duplex, speed, mtu)."""
ret = {}
rawdict = cext.net_if_stats()
for name, items in rawdict.items():
if not PY3:
assert isinstance(name, unicode), type(name)
name = py2_strencode(name)
isup, duplex, speed, mtu = items
if hasattr(_common, 'NicDuplex'):
duplex = _common.NicDuplex(duplex)
ret[name] = _common.snicstats(isup, duplex, speed, mtu)
return ret
def net_io_counters():
"""Return network I/O statistics for every network interface
installed on the system as a dict of raw tuples.
"""
ret = cext.net_io_counters()
return dict([(py2_strencode(k), v) for k, v in ret.items()])
def net_if_addrs():
"""Return the addresses associated to each NIC."""
ret = []
for items in cext.net_if_addrs():
items = list(items)
items[0] = py2_strencode(items[0])
ret.append(items)
return ret
# =====================================================================
# --- sensors
# =====================================================================
def sensors_battery():
"""Return battery information."""
# For constants meaning see:
# https://msdn.microsoft.com/en-us/library/windows/desktop/
# aa373232(v=vs.85).aspx
acline_status, flags, percent, secsleft = cext.sensors_battery()
power_plugged = acline_status == 1
no_battery = bool(flags & 128)
charging = bool(flags & 8)
if no_battery:
return None
if power_plugged or charging:
secsleft = _common.POWER_TIME_UNLIMITED
elif secsleft == -1:
secsleft = _common.POWER_TIME_UNKNOWN
return _common.sbattery(percent, secsleft, power_plugged)
# =====================================================================
# --- other system functions
# =====================================================================
_last_btime = 0
def boot_time():
"""The system boot time expressed in seconds since the epoch."""
# This dirty hack is to adjust the precision of the returned
# value which may have a 1 second fluctuation, see:
# https://github.com/giampaolo/psutil/issues/1007
global _last_btime
ret = float(cext.boot_time())
if abs(ret - _last_btime) <= 1:
return _last_btime
else:
_last_btime = ret
return ret
def users():
"""Return currently connected users as a list of namedtuples."""
retlist = []
rawlist = cext.users()
for item in rawlist:
user, hostname, tstamp = item
user = py2_strencode(user)
nt = _common.suser(user, None, hostname, tstamp, None)
retlist.append(nt)
return retlist
# =====================================================================
# --- Windows services
# =====================================================================
def win_service_iter():
"""Yields a list of WindowsService instances."""
for name, display_name in cext.winservice_enumerate():
yield WindowsService(py2_strencode(name), py2_strencode(display_name))
def win_service_get(name):
"""Open a Windows service and return it as a WindowsService instance."""
service = WindowsService(name, None)
service._display_name = service._query_config()['display_name']
return service
class WindowsService(object):
"""Represents an installed Windows service."""
def __init__(self, name, display_name):
self._name = name
self._display_name = display_name
def __str__(self):
details = "(name=%r, display_name=%r)" % (
self._name, self._display_name)
return "%s%s" % (self.__class__.__name__, details)
def __repr__(self):
return "<%s at %s>" % (self.__str__(), id(self))
def __eq__(self, other):
# Test for equality with another WindosService object based
# on name.
if not isinstance(other, WindowsService):
return NotImplemented
return self._name == other._name
def __ne__(self, other):
return not self == other
def _query_config(self):
with self._wrap_exceptions():
display_name, binpath, username, start_type = \
cext.winservice_query_config(self._name)
# XXX - update _self.display_name?
return dict(
display_name=py2_strencode(display_name),
binpath=py2_strencode(binpath),
username=py2_strencode(username),
start_type=py2_strencode(start_type))
def _query_status(self):
with self._wrap_exceptions():
status, pid = cext.winservice_query_status(self._name)
if pid == 0:
pid = None
return dict(status=status, pid=pid)
@contextlib.contextmanager
def _wrap_exceptions(self):
"""Ctx manager which translates bare OSError and WindowsError
exceptions into NoSuchProcess and AccessDenied.
"""
try:
yield
except OSError as err:
if is_permission_err(err):
raise AccessDenied(
pid=None, name=self._name,
msg="service %r is not querable (not enough privileges)" %
self._name)
elif err.winerror in (cext.ERROR_INVALID_NAME,
cext.ERROR_SERVICE_DOES_NOT_EXIST):
raise NoSuchProcess(
pid=None, name=self._name,
msg="service %r does not exist)" % self._name)
else:
raise
# config query
def name(self):
"""The service name. This string is how a service is referenced
and can be passed to win_service_get() to get a new
WindowsService instance.
"""
return self._name
def display_name(self):
"""The service display name. The value is cached when this class
is instantiated.
"""
return self._display_name
def binpath(self):
"""The fully qualified path to the service binary/exe file as
a string, including command line arguments.
"""
return self._query_config()['binpath']
def username(self):
"""The name of the user that owns this service."""
return self._query_config()['username']
def start_type(self):
"""A string which can either be "automatic", "manual" or
"disabled".
"""
return self._query_config()['start_type']
# status query
def pid(self):
"""The process PID, if any, else None. This can be passed
to Process class to control the service's process.
"""
return self._query_status()['pid']
def status(self):
"""Service status as a string."""
return self._query_status()['status']
def description(self):
"""Service long description."""
return py2_strencode(cext.winservice_query_descr(self.name()))
# utils
def as_dict(self):
"""Utility method retrieving all the information above as a
dictionary.
"""
d = self._query_config()
d.update(self._query_status())
d['name'] = self.name()
d['display_name'] = self.display_name()
d['description'] = self.description()
return d
# actions
# XXX: the necessary C bindings for start() and stop() are
# implemented but for now I prefer not to expose them.
# I may change my mind in the future. Reasons:
# - they require Administrator privileges
# - can't implement a timeout for stop() (unless by using a thread,
# which sucks)
# - would require adding ServiceAlreadyStarted and
# ServiceAlreadyStopped exceptions, adding two new APIs.
# - we might also want to have modify(), which would basically mean
# rewriting win32serviceutil.ChangeServiceConfig, which involves a
# lot of stuff (and API constants which would pollute the API), see:
# http://pyxr.sourceforge.net/PyXR/c/python24/lib/site-packages/
# win32/lib/win32serviceutil.py.html#0175
# - psutil is typically about "read only" monitoring stuff;
# win_service_* APIs should only be used to retrieve a service and
# check whether it's running
# def start(self, timeout=None):
# with self._wrap_exceptions():
# cext.winservice_start(self.name())
# if timeout:
# giveup_at = time.time() + timeout
# while True:
# if self.status() == "running":
# return
# else:
# if time.time() > giveup_at:
# raise TimeoutExpired(timeout)
# else:
# time.sleep(.1)
# def stop(self):
# # Note: timeout is not implemented because it's just not
# # possible, see:
# # http://stackoverflow.com/questions/11973228/
# with self._wrap_exceptions():
# return cext.winservice_stop(self.name())
# =====================================================================
# --- processes
# =====================================================================
pids = cext.pids
pid_exists = cext.pid_exists
ppid_map = cext.ppid_map # used internally by Process.children()
def is_permission_err(exc):
"""Return True if this is a permission error."""
assert isinstance(exc, OSError), exc
# On Python 2 OSError doesn't always have 'winerror'. Sometimes
# it does, in which case the original exception was WindowsError
# (which is a subclass of OSError).
return exc.errno in (errno.EPERM, errno.EACCES) or \
getattr(exc, "winerror", -1) in (cext.ERROR_ACCESS_DENIED,
cext.ERROR_PRIVILEGE_NOT_HELD)
def convert_oserror(exc, pid=None, name=None):
"""Convert OSError into NoSuchProcess or AccessDenied."""
assert isinstance(exc, OSError), exc
if is_permission_err(exc):
return AccessDenied(pid=pid, name=name)
if exc.errno == errno.ESRCH:
return NoSuchProcess(pid=pid, name=name)
raise exc
def wrap_exceptions(fun):
"""Decorator which converts OSError into NoSuchProcess or AccessDenied."""
@functools.wraps(fun)
def wrapper(self, *args, **kwargs):
try:
return fun(self, *args, **kwargs)
except OSError as err:
raise convert_oserror(err, pid=self.pid, name=self._name)
return wrapper
def retry_error_partial_copy(fun):
"""Workaround for https://github.com/giampaolo/psutil/issues/875.
See: https://stackoverflow.com/questions/4457745#4457745
"""
@functools.wraps(fun)
def wrapper(self, *args, **kwargs):
delay = 0.0001
times = 33
for x in range(times): # retries for roughly 1 second
try:
return fun(self, *args, **kwargs)
except WindowsError as _:
err = _
if err.winerror == ERROR_PARTIAL_COPY:
time.sleep(delay)
delay = min(delay * 2, 0.04)
continue
else:
raise
else:
msg = "%s retried %s times, converted to AccessDenied as it's " \
"still returning %r" % (fun, times, err)
raise AccessDenied(pid=self.pid, name=self._name, msg=msg)
return wrapper
class Process(object):
"""Wrapper class around underlying C implementation."""
__slots__ = ["pid", "_name", "_ppid", "_cache"]
def __init__(self, pid):
self.pid = pid
self._name = None
self._ppid = None
# --- oneshot() stuff
def oneshot_enter(self):
self._proc_info.cache_activate(self)
self.exe.cache_activate(self)
def oneshot_exit(self):
self._proc_info.cache_deactivate(self)
self.exe.cache_deactivate(self)
@memoize_when_activated
def _proc_info(self):
"""Return multiple information about this process as a
raw tuple.
"""
ret = cext.proc_info(self.pid)
assert len(ret) == len(pinfo_map)
return ret
def name(self):
"""Return process name, which on Windows is always the final
part of the executable.
"""
# This is how PIDs 0 and 4 are always represented in taskmgr
# and process-hacker.
if self.pid == 0:
return "System Idle Process"
if self.pid == 4:
return "System"
return os.path.basename(self.exe())
@wrap_exceptions
@memoize_when_activated
def exe(self):
if PYPY:
try:
exe = cext.proc_exe(self.pid)
except WindowsError as err:
# 24 = ERROR_TOO_MANY_OPEN_FILES. Not sure why this happens
# (perhaps PyPy's JIT delaying garbage collection of files?).
if err.errno == 24:
debug("%r forced into AccessDenied" % err)
raise AccessDenied(self.pid, self._name)
raise
else:
exe = cext.proc_exe(self.pid)
if not PY3:
exe = py2_strencode(exe)
if exe.startswith('\\'):
return convert_dos_path(exe)
return exe # May be "Registry", "MemCompression", ...
@wrap_exceptions
@retry_error_partial_copy
def cmdline(self):
if cext.WINVER >= cext.WINDOWS_8_1:
# PEB method detects cmdline changes but requires more
# privileges: https://github.com/giampaolo/psutil/pull/1398
try:
ret = cext.proc_cmdline(self.pid, use_peb=True)
except OSError as err:
if is_permission_err(err):
ret = cext.proc_cmdline(self.pid, use_peb=False)
else:
raise
else:
ret = cext.proc_cmdline(self.pid, use_peb=True)
if PY3:
return ret
else:
return [py2_strencode(s) for s in ret]
@wrap_exceptions
@retry_error_partial_copy
def environ(self):
ustr = cext.proc_environ(self.pid)
if ustr and not PY3:
assert isinstance(ustr, unicode), type(ustr)
return parse_environ_block(py2_strencode(ustr))
def ppid(self):
try:
return ppid_map()[self.pid]
except KeyError:
raise NoSuchProcess(self.pid, self._name)
def _get_raw_meminfo(self):
try:
return cext.proc_memory_info(self.pid)
except OSError as err:
if is_permission_err(err):
# TODO: the C ext can probably be refactored in order
# to get this from cext.proc_info()
info = self._proc_info()
return (
info[pinfo_map['num_page_faults']],
info[pinfo_map['peak_wset']],
info[pinfo_map['wset']],
info[pinfo_map['peak_paged_pool']],
info[pinfo_map['paged_pool']],
info[pinfo_map['peak_non_paged_pool']],
info[pinfo_map['non_paged_pool']],
info[pinfo_map['pagefile']],
info[pinfo_map['peak_pagefile']],
info[pinfo_map['mem_private']],
)
raise
@wrap_exceptions
def memory_info(self):
# on Windows RSS == WorkingSetSize and VSM == PagefileUsage.
# Underlying C function returns fields of PROCESS_MEMORY_COUNTERS
# struct.
t = self._get_raw_meminfo()
rss = t[2] # wset
vms = t[7] # pagefile
return pmem(*(rss, vms, ) + t)
@wrap_exceptions
def memory_full_info(self):
basic_mem = self.memory_info()
uss = cext.proc_memory_uss(self.pid)
uss *= getpagesize()
return pfullmem(*basic_mem + (uss, ))
def memory_maps(self):
try:
raw = cext.proc_memory_maps(self.pid)
except OSError as err:
# XXX - can't use wrap_exceptions decorator as we're
# returning a generator; probably needs refactoring.
raise convert_oserror(err, self.pid, self._name)
else:
for addr, perm, path, rss in raw:
path = convert_dos_path(path)
if not PY3:
path = py2_strencode(path)
addr = hex(addr)
yield (addr, perm, path, rss)
@wrap_exceptions
def kill(self):
return cext.proc_kill(self.pid)
@wrap_exceptions
def send_signal(self, sig):
if sig == signal.SIGTERM:
cext.proc_kill(self.pid)
# py >= 2.7
elif sig in (getattr(signal, "CTRL_C_EVENT", object()),
getattr(signal, "CTRL_BREAK_EVENT", object())):
os.kill(self.pid, sig)
else:
raise ValueError(
"only SIGTERM, CTRL_C_EVENT and CTRL_BREAK_EVENT signals "
"are supported on Windows")
@wrap_exceptions
def wait(self, timeout=None):
if timeout is None:
cext_timeout = cext.INFINITE
else:
# WaitForSingleObject() expects time in milliseconds.
cext_timeout = int(timeout * 1000)
timer = getattr(time, 'monotonic', time.time)
stop_at = timer() + timeout if timeout is not None else None
try:
# Exit code is supposed to come from GetExitCodeProcess().
# May also be None if OpenProcess() failed with
# ERROR_INVALID_PARAMETER, meaning PID is already gone.
exit_code = cext.proc_wait(self.pid, cext_timeout)
except cext.TimeoutExpired:
# WaitForSingleObject() returned WAIT_TIMEOUT. Just raise.
raise TimeoutExpired(timeout, self.pid, self._name)
except cext.TimeoutAbandoned:
# WaitForSingleObject() returned WAIT_ABANDONED, see:
# https://github.com/giampaolo/psutil/issues/1224
# We'll just rely on the internal polling and return None
# when the PID disappears. Subprocess module does the same
# (return None):
# https://github.com/python/cpython/blob/
# be50a7b627d0aa37e08fa8e2d5568891f19903ce/
# Lib/subprocess.py#L1193-L1194
exit_code = None
# At this point WaitForSingleObject() returned WAIT_OBJECT_0,
# meaning the process is gone. Stupidly there are cases where
# its PID may still stick around so we do a further internal
# polling.
delay = 0.0001
while True:
if not pid_exists(self.pid):
return exit_code
if stop_at and timer() >= stop_at:
raise TimeoutExpired(timeout, pid=self.pid, name=self._name)
time.sleep(delay)
delay = min(delay * 2, 0.04) # incremental delay
@wrap_exceptions
def username(self):
if self.pid in (0, 4):
return 'NT AUTHORITY\\SYSTEM'
domain, user = cext.proc_username(self.pid)
return py2_strencode(domain) + '\\' + py2_strencode(user)
@wrap_exceptions
def create_time(self):
# Note: proc_times() not put under oneshot() 'cause create_time()
# is already cached by the main Process class.
try:
user, system, created = cext.proc_times(self.pid)
return created
except OSError as err:
if is_permission_err(err):
return self._proc_info()[pinfo_map['create_time']]
raise
@wrap_exceptions
def num_threads(self):
return self._proc_info()[pinfo_map['num_threads']]
@wrap_exceptions
def threads(self):
rawlist = cext.proc_threads(self.pid)
retlist = []
for thread_id, utime, stime in rawlist:
ntuple = _common.pthread(thread_id, utime, stime)
retlist.append(ntuple)
return retlist
@wrap_exceptions
def cpu_times(self):
try:
user, system, created = cext.proc_times(self.pid)
except OSError as err:
if not is_permission_err(err):
raise
info = self._proc_info()
user = info[pinfo_map['user_time']]
system = info[pinfo_map['kernel_time']]
# Children user/system times are not retrievable (set to 0).
return _common.pcputimes(user, system, 0.0, 0.0)
@wrap_exceptions
def suspend(self):
cext.proc_suspend_or_resume(self.pid, True)
@wrap_exceptions
def resume(self):
cext.proc_suspend_or_resume(self.pid, False)
@wrap_exceptions
@retry_error_partial_copy
def cwd(self):
if self.pid in (0, 4):
raise AccessDenied(self.pid, self._name)
# return a normalized pathname since the native C function appends
# "\\" at the and of the path
path = cext.proc_cwd(self.pid)
return py2_strencode(os.path.normpath(path))
@wrap_exceptions
def open_files(self):
if self.pid in (0, 4):
return []
ret = set()
# Filenames come in in native format like:
# "\Device\HarddiskVolume1\Windows\systemew\file.txt"
# Convert the first part in the corresponding drive letter
# (e.g. "C:\") by using Windows's QueryDosDevice()
raw_file_names = cext.proc_open_files(self.pid)
for _file in raw_file_names:
_file = convert_dos_path(_file)
if isfile_strict(_file):
if not PY3:
_file = py2_strencode(_file)
ntuple = _common.popenfile(_file, -1)
ret.add(ntuple)
return list(ret)
@wrap_exceptions
def connections(self, kind='inet'):
return net_connections(kind, _pid=self.pid)
@wrap_exceptions
def nice_get(self):
value = cext.proc_priority_get(self.pid)
if enum is not None:
value = Priority(value)
return value
@wrap_exceptions
def nice_set(self, value):
return cext.proc_priority_set(self.pid, value)
@wrap_exceptions
def ionice_get(self):
ret = cext.proc_io_priority_get(self.pid)
if enum is not None:
ret = IOPriority(ret)
return ret
@wrap_exceptions
def ionice_set(self, ioclass, value):
if value:
raise TypeError("value argument not accepted on Windows")
if ioclass not in (IOPRIO_VERYLOW, IOPRIO_LOW, IOPRIO_NORMAL,
IOPRIO_HIGH):
raise ValueError("%s is not a valid priority" % ioclass)
cext.proc_io_priority_set(self.pid, ioclass)
@wrap_exceptions
def io_counters(self):
try:
ret = cext.proc_io_counters(self.pid)
except OSError as err:
if not is_permission_err(err):
raise
info = self._proc_info()
ret = (
info[pinfo_map['io_rcount']],
info[pinfo_map['io_wcount']],
info[pinfo_map['io_rbytes']],
info[pinfo_map['io_wbytes']],
info[pinfo_map['io_count_others']],
info[pinfo_map['io_bytes_others']],
)
return pio(*ret)
@wrap_exceptions
def status(self):
suspended = cext.proc_is_suspended(self.pid)
if suspended:
return _common.STATUS_STOPPED
else:
return _common.STATUS_RUNNING
@wrap_exceptions
def cpu_affinity_get(self):
def from_bitmask(x):
return [i for i in range(64) if (1 << i) & x]
bitmask = cext.proc_cpu_affinity_get(self.pid)
return from_bitmask(bitmask)
@wrap_exceptions
def cpu_affinity_set(self, value):
def to_bitmask(ls):
if not ls:
raise ValueError("invalid argument %r" % ls)
out = 0
for b in ls:
out |= 2 ** b
return out
# SetProcessAffinityMask() states that ERROR_INVALID_PARAMETER
# is returned for an invalid CPU but this seems not to be true,
# therefore we check CPUs validy beforehand.
allcpus = list(range(len(per_cpu_times())))
for cpu in value:
if cpu not in allcpus:
if not isinstance(cpu, (int, long)):
raise TypeError(
"invalid CPU %r; an integer is required" % cpu)
else:
raise ValueError("invalid CPU %r" % cpu)
bitmask = to_bitmask(value)
cext.proc_cpu_affinity_set(self.pid, bitmask)
@wrap_exceptions
def num_handles(self):
try:
return cext.proc_num_handles(self.pid)
except OSError as err:
if is_permission_err(err):
return self._proc_info()[pinfo_map['num_handles']]
raise
@wrap_exceptions
def num_ctx_switches(self):
ctx_switches = self._proc_info()[pinfo_map['ctx_switches']]
# only voluntary ctx switches are supported
return _common.pctxsw(ctx_switches, 0)
| 33.316876 | 78 | 0.592725 |
c6e474e3a6630ecb66498bcfa91fd498dfc28bb5 | 22,763 | py | Python | sdk/python/pulumi_google_native/bigtableadmin/v2/instance_cluster_backup_iam_policy.py | AaronFriel/pulumi-google-native | 75d1cda425e33d4610348972cd70bddf35f1770d | [
"Apache-2.0"
]
| 44 | 2021-04-18T23:00:48.000Z | 2022-02-14T17:43:15.000Z | sdk/python/pulumi_google_native/bigtableadmin/v2/instance_cluster_backup_iam_policy.py | AaronFriel/pulumi-google-native | 75d1cda425e33d4610348972cd70bddf35f1770d | [
"Apache-2.0"
]
| 354 | 2021-04-16T16:48:39.000Z | 2022-03-31T17:16:39.000Z | sdk/python/pulumi_google_native/bigtableadmin/v2/instance_cluster_backup_iam_policy.py | AaronFriel/pulumi-google-native | 75d1cda425e33d4610348972cd70bddf35f1770d | [
"Apache-2.0"
]
| 8 | 2021-04-24T17:46:51.000Z | 2022-01-05T10:40:21.000Z | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
from ._enums import *
from ._inputs import *
__all__ = ['InstanceClusterBackupIamPolicyArgs', 'InstanceClusterBackupIamPolicy']
@pulumi.input_type
class InstanceClusterBackupIamPolicyArgs:
def __init__(__self__, *,
backup_id: pulumi.Input[str],
cluster_id: pulumi.Input[str],
instance_id: pulumi.Input[str],
audit_configs: Optional[pulumi.Input[Sequence[pulumi.Input['AuditConfigArgs']]]] = None,
bindings: Optional[pulumi.Input[Sequence[pulumi.Input['BindingArgs']]]] = None,
etag: Optional[pulumi.Input[str]] = None,
project: Optional[pulumi.Input[str]] = None,
update_mask: Optional[pulumi.Input[str]] = None,
version: Optional[pulumi.Input[int]] = None):
"""
The set of arguments for constructing a InstanceClusterBackupIamPolicy resource.
:param pulumi.Input[Sequence[pulumi.Input['AuditConfigArgs']]] audit_configs: Specifies cloud audit logging configuration for this policy.
:param pulumi.Input[Sequence[pulumi.Input['BindingArgs']]] bindings: Associates a list of `members`, or principals, with a `role`. Optionally, may specify a `condition` that determines how and when the `bindings` are applied. Each of the `bindings` must contain at least one principal. The `bindings` in a `Policy` can refer to up to 1,500 principals; up to 250 of these principals can be Google groups. Each occurrence of a principal counts towards these limits. For example, if the `bindings` grant 50 different roles to `user:[email protected]`, and not to any other principal, then you can add another 1,450 principals to the `bindings` in the `Policy`.
:param pulumi.Input[str] etag: `etag` is used for optimistic concurrency control as a way to help prevent simultaneous updates of a policy from overwriting each other. It is strongly suggested that systems make use of the `etag` in the read-modify-write cycle to perform policy updates in order to avoid race conditions: An `etag` is returned in the response to `getIamPolicy`, and systems are expected to put that etag in the request to `setIamPolicy` to ensure that their change will be applied to the same version of the policy. **Important:** If you use IAM Conditions, you must include the `etag` field whenever you call `setIamPolicy`. If you omit this field, then IAM allows you to overwrite a version `3` policy with a version `1` policy, and all of the conditions in the version `3` policy are lost.
:param pulumi.Input[str] update_mask: OPTIONAL: A FieldMask specifying which fields of the policy to modify. Only the fields in the mask will be modified. If no mask is provided, the following default mask is used: `paths: "bindings, etag"`
:param pulumi.Input[int] version: Specifies the format of the policy. Valid values are `0`, `1`, and `3`. Requests that specify an invalid value are rejected. Any operation that affects conditional role bindings must specify version `3`. This requirement applies to the following operations: * Getting a policy that includes a conditional role binding * Adding a conditional role binding to a policy * Changing a conditional role binding in a policy * Removing any role binding, with or without a condition, from a policy that includes conditions **Important:** If you use IAM Conditions, you must include the `etag` field whenever you call `setIamPolicy`. If you omit this field, then IAM allows you to overwrite a version `3` policy with a version `1` policy, and all of the conditions in the version `3` policy are lost. If a policy does not include any conditions, operations on that policy may specify any valid version or leave the field unset. To learn which resources support conditions in their IAM policies, see the [IAM documentation](https://cloud.google.com/iam/help/conditions/resource-policies).
"""
pulumi.set(__self__, "backup_id", backup_id)
pulumi.set(__self__, "cluster_id", cluster_id)
pulumi.set(__self__, "instance_id", instance_id)
if audit_configs is not None:
pulumi.set(__self__, "audit_configs", audit_configs)
if bindings is not None:
pulumi.set(__self__, "bindings", bindings)
if etag is not None:
pulumi.set(__self__, "etag", etag)
if project is not None:
pulumi.set(__self__, "project", project)
if update_mask is not None:
pulumi.set(__self__, "update_mask", update_mask)
if version is not None:
pulumi.set(__self__, "version", version)
@property
@pulumi.getter(name="backupId")
def backup_id(self) -> pulumi.Input[str]:
return pulumi.get(self, "backup_id")
@backup_id.setter
def backup_id(self, value: pulumi.Input[str]):
pulumi.set(self, "backup_id", value)
@property
@pulumi.getter(name="clusterId")
def cluster_id(self) -> pulumi.Input[str]:
return pulumi.get(self, "cluster_id")
@cluster_id.setter
def cluster_id(self, value: pulumi.Input[str]):
pulumi.set(self, "cluster_id", value)
@property
@pulumi.getter(name="instanceId")
def instance_id(self) -> pulumi.Input[str]:
return pulumi.get(self, "instance_id")
@instance_id.setter
def instance_id(self, value: pulumi.Input[str]):
pulumi.set(self, "instance_id", value)
@property
@pulumi.getter(name="auditConfigs")
def audit_configs(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['AuditConfigArgs']]]]:
"""
Specifies cloud audit logging configuration for this policy.
"""
return pulumi.get(self, "audit_configs")
@audit_configs.setter
def audit_configs(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['AuditConfigArgs']]]]):
pulumi.set(self, "audit_configs", value)
@property
@pulumi.getter
def bindings(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['BindingArgs']]]]:
"""
Associates a list of `members`, or principals, with a `role`. Optionally, may specify a `condition` that determines how and when the `bindings` are applied. Each of the `bindings` must contain at least one principal. The `bindings` in a `Policy` can refer to up to 1,500 principals; up to 250 of these principals can be Google groups. Each occurrence of a principal counts towards these limits. For example, if the `bindings` grant 50 different roles to `user:[email protected]`, and not to any other principal, then you can add another 1,450 principals to the `bindings` in the `Policy`.
"""
return pulumi.get(self, "bindings")
@bindings.setter
def bindings(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['BindingArgs']]]]):
pulumi.set(self, "bindings", value)
@property
@pulumi.getter
def etag(self) -> Optional[pulumi.Input[str]]:
"""
`etag` is used for optimistic concurrency control as a way to help prevent simultaneous updates of a policy from overwriting each other. It is strongly suggested that systems make use of the `etag` in the read-modify-write cycle to perform policy updates in order to avoid race conditions: An `etag` is returned in the response to `getIamPolicy`, and systems are expected to put that etag in the request to `setIamPolicy` to ensure that their change will be applied to the same version of the policy. **Important:** If you use IAM Conditions, you must include the `etag` field whenever you call `setIamPolicy`. If you omit this field, then IAM allows you to overwrite a version `3` policy with a version `1` policy, and all of the conditions in the version `3` policy are lost.
"""
return pulumi.get(self, "etag")
@etag.setter
def etag(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "etag", value)
@property
@pulumi.getter
def project(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "project")
@project.setter
def project(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "project", value)
@property
@pulumi.getter(name="updateMask")
def update_mask(self) -> Optional[pulumi.Input[str]]:
"""
OPTIONAL: A FieldMask specifying which fields of the policy to modify. Only the fields in the mask will be modified. If no mask is provided, the following default mask is used: `paths: "bindings, etag"`
"""
return pulumi.get(self, "update_mask")
@update_mask.setter
def update_mask(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "update_mask", value)
@property
@pulumi.getter
def version(self) -> Optional[pulumi.Input[int]]:
"""
Specifies the format of the policy. Valid values are `0`, `1`, and `3`. Requests that specify an invalid value are rejected. Any operation that affects conditional role bindings must specify version `3`. This requirement applies to the following operations: * Getting a policy that includes a conditional role binding * Adding a conditional role binding to a policy * Changing a conditional role binding in a policy * Removing any role binding, with or without a condition, from a policy that includes conditions **Important:** If you use IAM Conditions, you must include the `etag` field whenever you call `setIamPolicy`. If you omit this field, then IAM allows you to overwrite a version `3` policy with a version `1` policy, and all of the conditions in the version `3` policy are lost. If a policy does not include any conditions, operations on that policy may specify any valid version or leave the field unset. To learn which resources support conditions in their IAM policies, see the [IAM documentation](https://cloud.google.com/iam/help/conditions/resource-policies).
"""
return pulumi.get(self, "version")
@version.setter
def version(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "version", value)
class InstanceClusterBackupIamPolicy(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
audit_configs: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['AuditConfigArgs']]]]] = None,
backup_id: Optional[pulumi.Input[str]] = None,
bindings: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['BindingArgs']]]]] = None,
cluster_id: Optional[pulumi.Input[str]] = None,
etag: Optional[pulumi.Input[str]] = None,
instance_id: Optional[pulumi.Input[str]] = None,
project: Optional[pulumi.Input[str]] = None,
update_mask: Optional[pulumi.Input[str]] = None,
version: Optional[pulumi.Input[int]] = None,
__props__=None):
"""
Sets the access control policy on a Table resource. Replaces any existing policy.
Note - this resource's API doesn't support deletion. When deleted, the resource will persist
on Google Cloud even though it will be deleted from Pulumi state.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['AuditConfigArgs']]]] audit_configs: Specifies cloud audit logging configuration for this policy.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['BindingArgs']]]] bindings: Associates a list of `members`, or principals, with a `role`. Optionally, may specify a `condition` that determines how and when the `bindings` are applied. Each of the `bindings` must contain at least one principal. The `bindings` in a `Policy` can refer to up to 1,500 principals; up to 250 of these principals can be Google groups. Each occurrence of a principal counts towards these limits. For example, if the `bindings` grant 50 different roles to `user:[email protected]`, and not to any other principal, then you can add another 1,450 principals to the `bindings` in the `Policy`.
:param pulumi.Input[str] etag: `etag` is used for optimistic concurrency control as a way to help prevent simultaneous updates of a policy from overwriting each other. It is strongly suggested that systems make use of the `etag` in the read-modify-write cycle to perform policy updates in order to avoid race conditions: An `etag` is returned in the response to `getIamPolicy`, and systems are expected to put that etag in the request to `setIamPolicy` to ensure that their change will be applied to the same version of the policy. **Important:** If you use IAM Conditions, you must include the `etag` field whenever you call `setIamPolicy`. If you omit this field, then IAM allows you to overwrite a version `3` policy with a version `1` policy, and all of the conditions in the version `3` policy are lost.
:param pulumi.Input[str] update_mask: OPTIONAL: A FieldMask specifying which fields of the policy to modify. Only the fields in the mask will be modified. If no mask is provided, the following default mask is used: `paths: "bindings, etag"`
:param pulumi.Input[int] version: Specifies the format of the policy. Valid values are `0`, `1`, and `3`. Requests that specify an invalid value are rejected. Any operation that affects conditional role bindings must specify version `3`. This requirement applies to the following operations: * Getting a policy that includes a conditional role binding * Adding a conditional role binding to a policy * Changing a conditional role binding in a policy * Removing any role binding, with or without a condition, from a policy that includes conditions **Important:** If you use IAM Conditions, you must include the `etag` field whenever you call `setIamPolicy`. If you omit this field, then IAM allows you to overwrite a version `3` policy with a version `1` policy, and all of the conditions in the version `3` policy are lost. If a policy does not include any conditions, operations on that policy may specify any valid version or leave the field unset. To learn which resources support conditions in their IAM policies, see the [IAM documentation](https://cloud.google.com/iam/help/conditions/resource-policies).
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: InstanceClusterBackupIamPolicyArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Sets the access control policy on a Table resource. Replaces any existing policy.
Note - this resource's API doesn't support deletion. When deleted, the resource will persist
on Google Cloud even though it will be deleted from Pulumi state.
:param str resource_name: The name of the resource.
:param InstanceClusterBackupIamPolicyArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(InstanceClusterBackupIamPolicyArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
audit_configs: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['AuditConfigArgs']]]]] = None,
backup_id: Optional[pulumi.Input[str]] = None,
bindings: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['BindingArgs']]]]] = None,
cluster_id: Optional[pulumi.Input[str]] = None,
etag: Optional[pulumi.Input[str]] = None,
instance_id: Optional[pulumi.Input[str]] = None,
project: Optional[pulumi.Input[str]] = None,
update_mask: Optional[pulumi.Input[str]] = None,
version: Optional[pulumi.Input[int]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = InstanceClusterBackupIamPolicyArgs.__new__(InstanceClusterBackupIamPolicyArgs)
__props__.__dict__["audit_configs"] = audit_configs
if backup_id is None and not opts.urn:
raise TypeError("Missing required property 'backup_id'")
__props__.__dict__["backup_id"] = backup_id
__props__.__dict__["bindings"] = bindings
if cluster_id is None and not opts.urn:
raise TypeError("Missing required property 'cluster_id'")
__props__.__dict__["cluster_id"] = cluster_id
__props__.__dict__["etag"] = etag
if instance_id is None and not opts.urn:
raise TypeError("Missing required property 'instance_id'")
__props__.__dict__["instance_id"] = instance_id
__props__.__dict__["project"] = project
__props__.__dict__["update_mask"] = update_mask
__props__.__dict__["version"] = version
super(InstanceClusterBackupIamPolicy, __self__).__init__(
'google-native:bigtableadmin/v2:InstanceClusterBackupIamPolicy',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'InstanceClusterBackupIamPolicy':
"""
Get an existing InstanceClusterBackupIamPolicy resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = InstanceClusterBackupIamPolicyArgs.__new__(InstanceClusterBackupIamPolicyArgs)
__props__.__dict__["audit_configs"] = None
__props__.__dict__["bindings"] = None
__props__.__dict__["etag"] = None
__props__.__dict__["version"] = None
return InstanceClusterBackupIamPolicy(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="auditConfigs")
def audit_configs(self) -> pulumi.Output[Sequence['outputs.AuditConfigResponse']]:
"""
Specifies cloud audit logging configuration for this policy.
"""
return pulumi.get(self, "audit_configs")
@property
@pulumi.getter
def bindings(self) -> pulumi.Output[Sequence['outputs.BindingResponse']]:
"""
Associates a list of `members`, or principals, with a `role`. Optionally, may specify a `condition` that determines how and when the `bindings` are applied. Each of the `bindings` must contain at least one principal. The `bindings` in a `Policy` can refer to up to 1,500 principals; up to 250 of these principals can be Google groups. Each occurrence of a principal counts towards these limits. For example, if the `bindings` grant 50 different roles to `user:[email protected]`, and not to any other principal, then you can add another 1,450 principals to the `bindings` in the `Policy`.
"""
return pulumi.get(self, "bindings")
@property
@pulumi.getter
def etag(self) -> pulumi.Output[str]:
"""
`etag` is used for optimistic concurrency control as a way to help prevent simultaneous updates of a policy from overwriting each other. It is strongly suggested that systems make use of the `etag` in the read-modify-write cycle to perform policy updates in order to avoid race conditions: An `etag` is returned in the response to `getIamPolicy`, and systems are expected to put that etag in the request to `setIamPolicy` to ensure that their change will be applied to the same version of the policy. **Important:** If you use IAM Conditions, you must include the `etag` field whenever you call `setIamPolicy`. If you omit this field, then IAM allows you to overwrite a version `3` policy with a version `1` policy, and all of the conditions in the version `3` policy are lost.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter
def version(self) -> pulumi.Output[int]:
"""
Specifies the format of the policy. Valid values are `0`, `1`, and `3`. Requests that specify an invalid value are rejected. Any operation that affects conditional role bindings must specify version `3`. This requirement applies to the following operations: * Getting a policy that includes a conditional role binding * Adding a conditional role binding to a policy * Changing a conditional role binding in a policy * Removing any role binding, with or without a condition, from a policy that includes conditions **Important:** If you use IAM Conditions, you must include the `etag` field whenever you call `setIamPolicy`. If you omit this field, then IAM allows you to overwrite a version `3` policy with a version `1` policy, and all of the conditions in the version `3` policy are lost. If a policy does not include any conditions, operations on that policy may specify any valid version or leave the field unset. To learn which resources support conditions in their IAM policies, see the [IAM documentation](https://cloud.google.com/iam/help/conditions/resource-policies).
"""
return pulumi.get(self, "version")
| 76.130435 | 1,118 | 0.704257 |
70bf91f6c1a4f4e543e0c338a13fa7d73fa29fe7 | 2,297 | py | Python | pytype/overlay.py | mraarif/pytype | 546e8b8114c9af54a409985a036398c4f6955677 | [
"Apache-2.0"
]
| null | null | null | pytype/overlay.py | mraarif/pytype | 546e8b8114c9af54a409985a036398c4f6955677 | [
"Apache-2.0"
]
| null | null | null | pytype/overlay.py | mraarif/pytype | 546e8b8114c9af54a409985a036398c4f6955677 | [
"Apache-2.0"
]
| null | null | null | """Base class for module overlays."""
from pytype import abstract
from pytype import datatypes
class Overlay(abstract.Module):
"""A layer between pytype and a module's pytd definition.
An overlay pretends to be a module, but provides members that generate extra
typing information that cannot be expressed in a pytd file. For example,
collections.namedtuple is a factory method that generates class definitions
at runtime. An overlay is needed for Pytype to generate these classes.
An Overlay will typically import its underlying module in its __init__, e.g.
by calling vm.loader.import_name(). Due to this, Overlays should only be used
when their underlying module is imported by the Python script being analyzed!
A subclass of Overlay should have an __init__ with the signature:
def __init__(self, vm)
Attributes:
real_module: An abstract.Module wrapping the AST for the underlying module.
"""
def __init__(self, vm, name, member_map, ast):
"""Initialize the overlay.
Args:
vm: Instance of vm.VirtualMachine.
name: A string containing the name of the underlying module.
member_map: Dict of str to abstract.AtomicAbstractValues that provide type
information not available in the underlying module.
ast: An pytd.TypeDeclUnit containing the AST for the underlying module.
Used to access type information for members of the module that are not
explicitly provided by the overlay.
"""
super().__init__(vm, name, member_map, ast)
self.real_module = vm.convert.constant_to_value(
ast, subst=datatypes.AliasingDict(), node=vm.root_cfg_node)
def _convert_member(self, member):
val = member(self.vm)
val.module = self.name
return val.to_variable(self.vm.root_cfg_node)
def get_module(self, name):
"""Returns the abstract.Module for the given name."""
if name in self._member_map:
return self
else:
return self.real_module
def items(self):
items = super().items()
items += [(name, item) for name, item in self.real_module.items()
if name not in self._member_map]
return items
def build(name, builder):
"""Wrapper to turn (name, vm) -> val method signatures into (vm) -> val."""
return lambda vm: builder(name, vm)
| 37.048387 | 80 | 0.719634 |
b6a2528a96636cd0dd5fc67e59ad8fefa42223c0 | 1,374 | py | Python | gcpdiag/lint/gke/bp_2021_002_groups_enabled.py | taylorjstacey/gcpdiag | 84ba1725cd3ed326b8da3e64bdd6569ed7ef20a4 | [
"Apache-2.0"
]
| null | null | null | gcpdiag/lint/gke/bp_2021_002_groups_enabled.py | taylorjstacey/gcpdiag | 84ba1725cd3ed326b8da3e64bdd6569ed7ef20a4 | [
"Apache-2.0"
]
| null | null | null | gcpdiag/lint/gke/bp_2021_002_groups_enabled.py | taylorjstacey/gcpdiag | 84ba1725cd3ed326b8da3e64bdd6569ed7ef20a4 | [
"Apache-2.0"
]
| null | null | null | # Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Google Groups for RBAC enabled.
Enable Google Groups for RBAC so cluster administrators do not need to
manage permissions manually for each user on the cluster and so Workspace
administrators can manage user accounts, such as revoking access when
someone leaves your organization.
"""
from gcpdiag import lint, models
from gcpdiag.queries import gke
def run_rule(context: models.Context, report: lint.LintReportRuleInterface):
clusters = gke.get_clusters(context)
if not clusters:
report.add_skipped(None, 'no clusters found')
for _, c in sorted(clusters.items()):
disabled = []
if not c.has_authenticator_group_enabled():
disabled.append('groups')
if disabled:
report.add_failed(c, ' and '.join(disabled) + ' are disabled')
else:
report.add_ok(c)
| 36.157895 | 76 | 0.750364 |
4fab4e4cca7981a309f274724815ca7feea31147 | 7,607 | py | Python | bokeh/charts/utils.py | rothnic/bokeh | 8da5e16b260a75caa8e7ef4caf215bb93dd784db | [
"BSD-3-Clause"
]
| 1 | 2015-07-17T13:57:01.000Z | 2015-07-17T13:57:01.000Z | bokeh/_legacy_charts/utils.py | evidation-health/bokeh | 2c580d93419033b962d36e3c46d7606cc2f24606 | [
"BSD-3-Clause"
]
| null | null | null | bokeh/_legacy_charts/utils.py | evidation-health/bokeh | 2c580d93419033b962d36e3c46d7606cc2f24606 | [
"BSD-3-Clause"
]
| 1 | 2016-03-18T03:01:59.000Z | 2016-03-18T03:01:59.000Z | """ This is the utils module that collects convenience functions and code that are
useful for charts ecosystem.
"""
#-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2014, Continuum Analytics, Inc. All rights reserved.
#
# Powered by the Bokeh Development Team.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function
from collections import OrderedDict
import itertools
from math import cos, sin
from ..browserlib import view
from ..document import Document
from ..embed import file_html
from ..models import GlyphRenderer
from ..models.glyphs import (
Asterisk, Circle, CircleCross, CircleX, Cross, Diamond, DiamondCross,
InvertedTriangle, Square, SquareCross, SquareX, Triangle, X)
from ..resources import INLINE
from ..session import Session
from ..util.notebook import publish_display_data
#-----------------------------------------------------------------------------
# Classes and functions
#-----------------------------------------------------------------------------
# TODO: (bev) this should go in a plotting utils one level up
_default_cycle_palette = [
"#f22c40", "#5ab738", "#407ee7", "#df5320", "#00ad9c", "#c33ff3"
]
def cycle_colors(chunk, palette=_default_cycle_palette):
""" Build a color list just cycling through a given palette.
Args:
chuck (seq): the chunk of elements to generate the color list
palette (seq[color]) : a palette of colors to cycle through
Returns:
colors
"""
colors = []
g = itertools.cycle(palette)
for i in range(len(chunk)):
colors.append(next(g))
return colors
# TODO: (bev) this should go in a plotting utils one level up
def make_scatter(source, x, y, markertype, color, line_color=None,
size=10, fill_alpha=0.2, line_alpha=1.0):
"""Create a marker glyph and appends it to the renderers list.
Args:
source (obj): datasource object containing markers references.
x (str or list[float]) : values or field names of line ``x`` coordinates
y (str or list[float]) : values or field names of line ``y`` coordinates
markertype (int or str): Marker type to use (e.g., 2, 'circle', etc.)
color (str): color of the points
size (int) : size of the scatter marker
fill_alpha(float) : alpha value of the fill color
line_alpha(float) : alpha value of the line color
Return:
scatter: Marker Glyph instance
"""
if line_color is None:
line_color = color
_marker_types = OrderedDict(
[
("circle", Circle),
("square", Square),
("triangle", Triangle),
("diamond", Diamond),
("inverted_triangle", InvertedTriangle),
("asterisk", Asterisk),
("cross", Cross),
("x", X),
("circle_cross", CircleCross),
("circle_x", CircleX),
("square_x", SquareX),
("square_cross", SquareCross),
("diamond_cross", DiamondCross),
]
)
g = itertools.cycle(_marker_types.keys())
if isinstance(markertype, int):
for i in range(markertype):
shape = next(g)
else:
shape = markertype
glyph = _marker_types[shape](
x=x, y=y, size=size, fill_color=color, fill_alpha=fill_alpha,
line_color=line_color, line_alpha=line_alpha
)
return GlyphRenderer(data_source=source, glyph=glyph)
def chunk(l, n):
"""Yield successive n-sized chunks from l.
Args:
l (list: the incomming list to be chunked
n (int): lenght of you chucks
"""
for i in range(0, len(l), n):
yield l[i:i + n]
def polar_to_cartesian(r, start_angles, end_angles):
"""Translate polar coordinates to cartesian.
Args:
r (float): radial coordinate
start_angles (list(float)): list of start angles
end_angles (list(float)): list of end_angles angles
Returns:
x, y points
"""
cartesian = lambda r, alpha: (r*cos(alpha), r*sin(alpha))
points = []
for start, end in zip(start_angles, end_angles):
points.append(cartesian(r, (end + start)/2))
return zip(*points)
# TODO: Experimental implementation. This should really be a shared
# pattern between plotting/charts and other bokeh interfaces.
# This will probably be part of the future charts re-design
# to make them inherit from plot (or at least be closer to).
# In this was both charts and plotting could share figure,
# show, save, push methods as well as VBox, etc...
class Figure(object):
def __init__(self, *charts, **kwargs):
self.filename = kwargs.pop('filename', None)
self.server = kwargs.pop('server', None)
self.notebook = kwargs.pop('notebook', None)
self.title = kwargs.pop('title', '')
self.children = kwargs.pop('children', None)
self.charts = charts
self.doc = Document()
self.doc.hold(True)
self._plots = []
if self.server:
self.session = Session()
self.session.use_doc(self.server)
self.session.load_document(self.doc)
if self.children:
from bokeh.models import VBox
self.doc.add(VBox(children=self.children))
self.plot = None
for i, chart in enumerate(self.charts):
chart.doc = self.doc
if self.server:
chart.session = self.session
# Force the chart to create the underlying plot
chart._setup_show()
chart._prepare_show()
chart._show_teardown()
if not self.title:
self.title = chart.chart.title
self._plots += chart.chart._plots
# reset the pot title with the one set for the Figure
self.doc._current_plot.title = self.title
def show(self):
"""Main show function.
It shows the Figure in file, server and notebook outputs.
"""
show(self, self.title, self.filename, self.server, self.notebook)
def show(obj, title='test', filename=False, server=False, notebook=False, **kws):
""" 'shows' a plot object, by auto-raising the window or tab
displaying the current plot (for file/server output modes) or displaying
it in an output cell (IPython notebook).
Args:
obj (Widget/Plot object, optional): it accepts a plot object and just shows it.
"""
if filename:
if filename is True:
filename = "untitled"
else:
filename = filename
with open(filename, "w") as f:
f.write(file_html(obj.doc, INLINE, title))
print("Wrote %s" % filename)
view(filename)
elif filename is False and server is False and notebook is False:
print("You have to provide a filename (filename='foo.html' or"
" .filename('foo.html')) to save your plot.")
if server:
obj.session.store_document(obj.doc)
link = obj.session.object_link(obj.doc.context)
view(link)
if notebook:
from bokeh.embed import notebook_div
for plot in obj._plots:
publish_display_data({'text/html': notebook_div(plot)})
| 33.511013 | 87 | 0.589194 |
0c5ce4ea3e5c6dd8f7c189c90fd97255b19afd30 | 3,226 | py | Python | google/ads/googleads/v8/googleads-py/google/ads/googleads/v8/errors/types/query_error.py | googleapis/googleapis-gen | d84824c78563d59b0e58d5664bfaa430e9ad7e7a | [
"Apache-2.0"
]
| 7 | 2021-02-21T10:39:41.000Z | 2021-12-07T07:31:28.000Z | google/ads/googleads/v8/googleads-py/google/ads/googleads/v8/errors/types/query_error.py | googleapis/googleapis-gen | d84824c78563d59b0e58d5664bfaa430e9ad7e7a | [
"Apache-2.0"
]
| 6 | 2021-02-02T23:46:11.000Z | 2021-11-15T01:46:02.000Z | google/ads/googleads/v8/googleads-py/google/ads/googleads/v8/errors/types/query_error.py | googleapis/googleapis-gen | d84824c78563d59b0e58d5664bfaa430e9ad7e7a | [
"Apache-2.0"
]
| 4 | 2021-01-28T23:25:45.000Z | 2021-08-30T01:55:16.000Z | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
__protobuf__ = proto.module(
package='google.ads.googleads.v8.errors',
marshal='google.ads.googleads.v8',
manifest={
'QueryErrorEnum',
},
)
class QueryErrorEnum(proto.Message):
r"""Container for enum describing possible query errors.
"""
class QueryError(proto.Enum):
r"""Enum describing possible query errors."""
UNSPECIFIED = 0
UNKNOWN = 1
QUERY_ERROR = 50
BAD_ENUM_CONSTANT = 18
BAD_ESCAPE_SEQUENCE = 7
BAD_FIELD_NAME = 12
BAD_LIMIT_VALUE = 15
BAD_NUMBER = 5
BAD_OPERATOR = 3
BAD_PARAMETER_NAME = 61
BAD_PARAMETER_VALUE = 62
BAD_RESOURCE_TYPE_IN_FROM_CLAUSE = 45
BAD_SYMBOL = 2
BAD_VALUE = 4
DATE_RANGE_TOO_WIDE = 36
DATE_RANGE_TOO_NARROW = 60
EXPECTED_AND = 30
EXPECTED_BY = 14
EXPECTED_DIMENSION_FIELD_IN_SELECT_CLAUSE = 37
EXPECTED_FILTERS_ON_DATE_RANGE = 55
EXPECTED_FROM = 44
EXPECTED_LIST = 41
EXPECTED_REFERENCED_FIELD_IN_SELECT_CLAUSE = 16
EXPECTED_SELECT = 13
EXPECTED_SINGLE_VALUE = 42
EXPECTED_VALUE_WITH_BETWEEN_OPERATOR = 29
INVALID_DATE_FORMAT = 38
INVALID_STRING_VALUE = 57
INVALID_VALUE_WITH_BETWEEN_OPERATOR = 26
INVALID_VALUE_WITH_DURING_OPERATOR = 22
INVALID_VALUE_WITH_LIKE_OPERATOR = 56
OPERATOR_FIELD_MISMATCH = 35
PROHIBITED_EMPTY_LIST_IN_CONDITION = 28
PROHIBITED_ENUM_CONSTANT = 54
PROHIBITED_FIELD_COMBINATION_IN_SELECT_CLAUSE = 31
PROHIBITED_FIELD_IN_ORDER_BY_CLAUSE = 40
PROHIBITED_FIELD_IN_SELECT_CLAUSE = 23
PROHIBITED_FIELD_IN_WHERE_CLAUSE = 24
PROHIBITED_RESOURCE_TYPE_IN_FROM_CLAUSE = 43
PROHIBITED_RESOURCE_TYPE_IN_SELECT_CLAUSE = 48
PROHIBITED_RESOURCE_TYPE_IN_WHERE_CLAUSE = 58
PROHIBITED_METRIC_IN_SELECT_OR_WHERE_CLAUSE = 49
PROHIBITED_SEGMENT_IN_SELECT_OR_WHERE_CLAUSE = 51
PROHIBITED_SEGMENT_WITH_METRIC_IN_SELECT_OR_WHERE_CLAUSE = 53
LIMIT_VALUE_TOO_LOW = 25
PROHIBITED_NEWLINE_IN_STRING = 8
PROHIBITED_VALUE_COMBINATION_IN_LIST = 10
PROHIBITED_VALUE_COMBINATION_WITH_BETWEEN_OPERATOR = 21
STRING_NOT_TERMINATED = 6
TOO_MANY_SEGMENTS = 34
UNEXPECTED_END_OF_QUERY = 9
UNEXPECTED_FROM_CLAUSE = 47
UNRECOGNIZED_FIELD = 32
UNEXPECTED_INPUT = 11
REQUESTED_METRICS_FOR_MANAGER = 59
FILTER_HAS_TOO_MANY_VALUES = 63
__all__ = tuple(sorted(__protobuf__.manifest))
| 35.065217 | 74 | 0.700868 |
c9d275b86334259bb549224d0f76e900a7357ceb | 358 | py | Python | yt_dlp/__main__.py | olipfei/yt-dlp | 7879e79d11a2e5855167820518df49caf623fe48 | [
"Unlicense"
]
| 11 | 2022-01-06T22:09:50.000Z | 2022-03-12T22:26:22.000Z | yt_dlp/__main__.py | olipfei/yt-dlp | 7879e79d11a2e5855167820518df49caf623fe48 | [
"Unlicense"
]
| 4 | 2022-02-25T08:20:18.000Z | 2022-03-17T16:16:20.000Z | yt_dlp/__main__.py | olipfei/yt-dlp | 7879e79d11a2e5855167820518df49caf623fe48 | [
"Unlicense"
]
| 3 | 2022-02-19T08:59:13.000Z | 2022-03-06T16:11:21.000Z | #!/usr/bin/env python3
# Execute with
# $ python -m yt_dlp
import sys
if __package__ is None and not hasattr(sys, 'frozen'):
# direct call of __main__.py
import os.path
path = os.path.realpath(os.path.abspath(__file__))
sys.path.insert(0, os.path.dirname(os.path.dirname(path)))
import yt_dlp
if __name__ == '__main__':
yt_dlp.main()
| 21.058824 | 62 | 0.689944 |
b13b79fff3d36491a2b8458cfcd8b64373d00757 | 1,170 | py | Python | tests/augmentation/test_augmentation_utils.py | techthiyanes/textacy | c7a5e1f881a3df63a89991accefcbd375ede5353 | [
"Apache-2.0"
]
| null | null | null | tests/augmentation/test_augmentation_utils.py | techthiyanes/textacy | c7a5e1f881a3df63a89991accefcbd375ede5353 | [
"Apache-2.0"
]
| null | null | null | tests/augmentation/test_augmentation_utils.py | techthiyanes/textacy | c7a5e1f881a3df63a89991accefcbd375ede5353 | [
"Apache-2.0"
]
| null | null | null | import pytest
from textacy.augmentation import utils
from textacy.types import AugTok
def test_aug_tok():
aug_tok = AugTok(text="text", ws=" ", pos="pos", is_word=True, syns=["doc"])
assert isinstance(aug_tok, tuple)
with pytest.raises(AttributeError):
aug_tok.foo = "bar"
def test_to_aug_toks(doc_en):
aug_toks = utils.to_aug_toks(doc_en)
assert isinstance(aug_toks, list)
assert all(isinstance(aug_tok, AugTok) for aug_tok in aug_toks)
assert len(aug_toks) == len(doc_en)
for obj in ["foo bar bat baz", ["foo", "bar", "bat", "baz"]]:
with pytest.raises(TypeError):
_ = utils.to_aug_toks(obj)
@pytest.mark.skipif(
utils.udhr.index is None,
reason="UDHR dataset must be downloaded before running this test",
)
def test_get_char_weights():
for lang in ("en", "es", "xx"):
char_weights = utils.get_char_weights(lang)
assert isinstance(char_weights, list)
assert all(isinstance(item, tuple) for item in char_weights)
assert all(isinstance(char, str) for char, _ in char_weights)
assert all(isinstance(weight, (int, float)) for _, weight in char_weights)
| 33.428571 | 82 | 0.681197 |
25b88d7f8a15ee56b44131fddfbffd907256561d | 1,082 | py | Python | gen/sample/background.py | thuanvh/pico | 6900d1401ffbc862b0f95bf4708990800735b63f | [
"MIT"
]
| 595 | 2015-01-21T16:00:10.000Z | 2022-03-29T19:17:42.000Z | gen/sample/background.py | thuanvh/pico | 6900d1401ffbc862b0f95bf4708990800735b63f | [
"MIT"
]
| 25 | 2015-02-06T02:55:54.000Z | 2021-01-07T19:59:36.000Z | gen/sample/background.py | thuanvh/pico | 6900d1401ffbc862b0f95bf4708990800735b63f | [
"MIT"
]
| 210 | 2015-01-14T22:30:38.000Z | 2021-11-29T09:05:20.000Z | #
#
#
#
import sys
import os
import numpy
import cv2
import struct
#
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('src')
args = parser.parse_args()
#
def write_rid_to_stdout(im):
#
# raw intensity data
#
#
h = im.shape[0]
w = im.shape[1]
#
hw = struct.pack('ii', h, w)
pixels = struct.pack('%sB' % h*w, *im.reshape(-1))
#
sys.stdout.buffer.write(hw)
sys.stdout.buffer.write(pixels)
#
for dirpath, dirnames, filenames in os.walk(args.src):
for filename in filenames:
if filename.endswith('.jpg') or filename.endswith('.png') or filename.endswith('.JPG') or filename.endswith('.jpeg'):
#
path = dirpath + '/' + filename
#
img = cv2.imread(path)
if img is None:
continue
if len(img.shape)==3:
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
#
if max(img.shape) > 1500:
img = cv2.resize(img, (0, 0), fx=0.5, fy=0.5)
#
write_rid_to_stdout(img)
sys.stdout.buffer.write( struct.pack('i', 0) )
#
cv2.flip(img, 1)
write_rid_to_stdout(img)
sys.stdout.buffer.write( struct.pack('i', 0) ) | 17.451613 | 119 | 0.643253 |
89538d0d4b1a6d7f9df5010dd59a39611f435c4c | 2,884 | py | Python | sorl/thumbnail/parsers.py | apahomov/sorl-thumbnail | b6358d234d7de3927a2666a2a5ab3d7870c0e1d3 | [
"BSD-3-Clause"
]
| 3 | 2020-03-30T13:11:57.000Z | 2020-04-22T13:55:31.000Z | sorl/thumbnail/parsers.py | apahomov/sorl-thumbnail | b6358d234d7de3927a2666a2a5ab3d7870c0e1d3 | [
"BSD-3-Clause"
]
| 22 | 2020-02-12T01:21:10.000Z | 2022-03-11T23:55:13.000Z | sorl/thumbnail/parsers.py | apahomov/sorl-thumbnail | b6358d234d7de3927a2666a2a5ab3d7870c0e1d3 | [
"BSD-3-Clause"
]
| 2 | 2021-01-06T19:25:07.000Z | 2021-05-14T02:00:19.000Z | # coding=utf-8
import re
from django.utils import six
from sorl.thumbnail.helpers import ThumbnailError, toint
bgpos_pat = re.compile(r'^(?P<value>\d+)(?P<unit>%|px)$')
geometry_pat = re.compile(r'^(?P<x>\d+)?(?:x(?P<y>\d+))?$')
class ThumbnailParseError(ThumbnailError):
pass
def parse_geometry(geometry, ratio=None):
"""
Parses a geometry string syntax and returns a (width, height) tuple
"""
m = geometry_pat.match(geometry)
def syntax_error():
return ThumbnailParseError('Geometry does not have the correct '
'syntax: %s' % geometry)
if not m:
raise syntax_error()
x = m.group('x')
y = m.group('y')
if x is None and y is None:
raise syntax_error()
if x is not None:
x = int(x)
if y is not None:
y = int(y)
# calculate x or y proportionally if not set but we need the image ratio
# for this
if ratio is not None:
ratio = float(ratio)
if x is None:
x = toint(y * ratio)
elif y is None:
y = toint(x / ratio)
return x, y
def parse_crop(crop, xy_image, xy_window):
"""
Returns x, y offsets for cropping. The window area should fit inside
image but it works out anyway
"""
x_alias_percent = {
'left': '0%',
'center': '50%',
'right': '100%',
}
y_alias_percent = {
'top': '0%',
'center': '50%',
'bottom': '100%',
}
xy_crop = crop.split(' ')
if len(xy_crop) == 1:
if crop in x_alias_percent:
x_crop = x_alias_percent[crop]
y_crop = '50%'
elif crop in y_alias_percent:
y_crop = y_alias_percent[crop]
x_crop = '50%'
else:
x_crop, y_crop = crop, crop
elif len(xy_crop) == 2:
x_crop, y_crop = xy_crop
x_crop = x_alias_percent.get(x_crop, x_crop)
y_crop = y_alias_percent.get(y_crop, y_crop)
else:
raise ThumbnailParseError('Unrecognized crop option: %s' % crop)
def get_offset(crop, epsilon):
m = bgpos_pat.match(crop)
if not m:
raise ThumbnailParseError('Unrecognized crop option: %s' % crop)
value = int(m.group('value')) # we only take ints in the regexp
unit = m.group('unit')
if unit == '%':
value = epsilon * value / 100.0
# return ∈ [0, epsilon]
return int(max(0, min(value, epsilon)))
offset_x = get_offset(x_crop, xy_image[0] - xy_window[0])
offset_y = get_offset(y_crop, xy_image[1] - xy_window[1])
return offset_x, offset_y
def parse_cropbox(cropbox):
"""
Returns x, y, x2, y2 tuple for cropping.
"""
if isinstance(cropbox, six.text_type):
return tuple([int(x.strip()) for x in cropbox.split(',')])
else:
return tuple(cropbox)
| 26.953271 | 80 | 0.568655 |
9544c7b06592486b9890e4ad0d8bb714ae7977c9 | 367 | py | Python | __init__.py | psifertex/bn-goloader | f3945aa7759f09e9dd8ccd38de87186c17d47e08 | [
"MIT"
]
| 21 | 2017-08-12T12:07:09.000Z | 2022-03-08T04:01:06.000Z | __init__.py | psifertex/bn-goloader | f3945aa7759f09e9dd8ccd38de87186c17d47e08 | [
"MIT"
]
| 2 | 2019-05-16T07:44:10.000Z | 2021-02-02T21:28:23.000Z | __init__.py | psifertex/bn-goloader | f3945aa7759f09e9dd8ccd38de87186c17d47e08 | [
"MIT"
]
| 12 | 2017-06-28T08:59:52.000Z | 2022-03-08T04:01:05.000Z | from binaryninja import PluginCommand
from .gohelpers import rename_functions, rename_newproc_fptrs
PluginCommand.register(
"golang\\auto-rename functions",
"Automatically rename go functions based on symbol table",
rename_functions)
PluginCommand.register("golang\\rename fptrs passed to newproc", "....",
rename_newproc_fptrs)
| 30.583333 | 72 | 0.741144 |
be044f9b660cedf2757d010746b999a43e199935 | 106,416 | py | Python | helloworld.py | TheKingR09/HelloWorld2018 | ba1b920d302d20e887428f75a07134007033a10e | [
"MIT",
"BSD-3-Clause"
]
| 11 | 2018-10-14T14:46:46.000Z | 2019-03-01T17:52:28.000Z | helloworld.py | TheKingR09/HelloWorld2018 | ba1b920d302d20e887428f75a07134007033a10e | [
"MIT",
"BSD-3-Clause"
]
| 2 | 2021-04-30T20:44:37.000Z | 2021-06-01T23:51:08.000Z | helloworld.py | TheKingR09/HelloWorld2018 | ba1b920d302d20e887428f75a07134007033a10e | [
"MIT",
"BSD-3-Clause"
]
| 7 | 2018-10-14T14:46:46.000Z | 2019-11-13T14:53:02.000Z | # -*- coding: utf-8 -*-
'''
Free to use, all credits belong to me, Zero Cool.
Do not sell or rent it!
© 2018 Hello World
'''
from important import *
# Setup Argparse
parser = argparse.ArgumentParser(description='Selfbot Hello World')
parser.add_argument('-t', '--token', type=str, metavar='', required=False, help='Token | Example : Exxxx')
parser.add_argument('-e', '--email', type=str, default='', metavar='', required=False, help='Email Address | Example : [email protected]')
parser.add_argument('-p', '--passwd', type=str, default='', metavar='', required=False, help='Password | Example : xxxx')
parser.add_argument('-a', '--apptype', type=str, default='', metavar='', required=False, choices=list(ApplicationType._NAMES_TO_VALUES), help='Application Type | Example : CHROMEOS')
parser.add_argument('-s', '--systemname', type=str, default='', metavar='', required=False, help='System Name | Example : Chrome_OS')
parser.add_argument('-c', '--channelid', type=str, default='', metavar='', required=False, help='Channel ID | Example : 1341209950')
parser.add_argument('-T', '--traceback', type=str2bool, nargs='?', default=False, metavar='', required=False, const=True, choices=[True, False], help='Using Traceback | Use : True/False')
parser.add_argument('-S', '--showqr', type=str2bool, nargs='?', default=False, metavar='', required=False, const=True, choices=[True, False], help='Show QR | Use : True/False')
args = parser.parse_args()
# Login Client
listAppType = ['DESKTOPWIN', 'DESKTOPMAC', 'IOSIPAD', 'CHROMEOS']
try:
print ('##----- LOGIN CLIENT -----##')
line = None
if args.apptype:
tokenPath = Path('authToken.txt')
if tokenPath.exists():
tokenFile = tokenPath.open('r')
else:
tokenFile = tokenPath.open('w+')
savedAuthToken = tokenFile.read().strip()
authToken = savedAuthToken if savedAuthToken and not args.token else args.token
idOrToken = authToken if authToken else args.email
try:
line = LINE(idOrToken, args.passwd, appType=args.apptype, systemName=args.systemname, channelId=args.channelid, showQr=args.showqr)
tokenFile.close()
tokenFile = tokenPath.open('w+')
tokenFile.write(line.authToken)
tokenFile.close()
except TalkException as talk_error:
if args.traceback: traceback.print_tb(talk_error.__traceback__)
sys.exit('++ Error : %s' % talk_error.reason.replace('_', ' '))
except Exception as error:
if args.traceback: traceback.print_tb(error.__traceback__)
sys.exit('++ Error : %s' % str(error))
else:
for appType in listAppType:
tokenPath = Path('authToken.txt')
if tokenPath.exists():
tokenFile = tokenPath.open('r')
else:
tokenFile = tokenPath.open('w+')
savedAuthToken = tokenFile.read().strip()
authToken = savedAuthToken if savedAuthToken and not args.token else args.token
idOrToken = authToken if authToken else args.email
try:
line = LINE(idOrToken, args.passwd, appType=appType, systemName=args.systemname, channelId=args.channelid, showQr=args.showqr)
tokenFile.close()
tokenFile = tokenPath.open('w+')
tokenFile.write(line.authToken)
tokenFile.close()
except TalkException as talk_error:
print ('++ Error : %s' % talk_error.reason.replace('_', ' '))
if args.traceback: traceback.print_tb(talk_error.__traceback__)
if talk_error.code == 1:
continue
sys.exit(1)
except Exception as error:
print ('++ Error : %s' % str(error))
if args.traceback: traceback.print_tb(error.__traceback__)
sys.exit(1)
except Exception as error:
print ('++ Error : %s' % str(error))
if args.traceback: traceback.print_tb(error.__traceback__)
sys.exit(1)
if line:
print ('++ Auth Token : %s' % line.authToken)
print ('++ Timeline Token : %s' % line.tl.channelAccessToken)
print ('##----- LOGIN CLIENT (Success) -----##')
else:
sys.exit('##----- LOGIN CLIENT (Failed) -----##')
myMid = line.profile.mid
programStart = time.time()
oepoll = OEPoll(line)
tmp_text = []
lurking = {}
settings = livejson.File('setting.json', True, False, 4)
bool_dict = {
True: ['Yes', 'Active', 'Success', 'Open', 'On'],
False: ['No', 'Not Active', 'Failed', 'Close', 'Off']
}
# Backup profile
profile = line.getContact(myMid)
settings['myProfile']['displayName'] = profile.displayName
settings['myProfile']['statusMessage'] = profile.statusMessage
settings['myProfile']['pictureStatus'] = profile.pictureStatus
coverId = line.profileDetail['result']['objectId']
settings['myProfile']['coverId'] = coverId
def restartProgram():
print ('##----- PROGRAM RESTARTED -----##')
python = sys.executable
os.execl(python, python, *sys.argv)
def logError(error, write=True):
errid = str(random.randint(100, 999))
filee = open('tmp/errors/%s.txt'%errid, 'w') if write else None
if args.traceback: traceback.print_tb(error.__traceback__)
if write:
traceback.print_tb(error.__traceback__, file=filee)
filee.close()
with open('errorLog.txt', 'a') as e:
e.write('\n%s : %s'%(errid, str(error)))
print ('++ Error : {error}'.format(error=error))
def command(text):
pesan = text.lower()
if settings['setKey']['status']:
if pesan.startswith(settings['setKey']['key']):
cmd = pesan.replace(settings['setKey']['key'],'')
else:
cmd = 'Undefined command'
else:
cmd = text.lower()
return cmd
def genImageB64(path):
with open(path, 'rb') as img_file:
encode_str = img_file.read()
b64img = base64.b64encode(encode_str)
return b64img.decode('utf-8')
def genUrlB64(url):
return base64.b64encode(url.encode('utf-8')).decode('utf-8')
def removeCmd(text, key=''):
if key == '':
setKey = '' if not settings['setKey']['status'] else settings['setKey']['key']
else:
setKey = key
text_ = text[len(setKey):]
sep = text_.split(' ')
return text_[len(sep[0] + ' '):]
def multiCommand(cmd, list_cmd=[]):
if True in [cmd.startswith(c) for c in list_cmd]:
return True
else:
return False
def replaceAll(text, dic):
try:
rep_this = dic.items()
except:
rep_this = dic.iteritems()
for i, j in rep_this:
text = text.replace(i, j)
return text
def help():
key = '' if not settings['setKey']['status'] else settings['setKey']['key']
with open('help.txt', 'r') as f:
text = f.read()
helpMsg = text.format(key=key.title())
return helpMsg
def parsingRes(res):
result = ''
textt = res.split('\n')
for text in textt:
if True not in [text.startswith(s) for s in ['╭', '├', '│', '╰']]:
result += '\n│ ' + text
else:
if text == textt[0]:
result += text
else:
result += '\n' + text
return result
def mentionMembers(to, mids=[]):
if myMid in mids: mids.remove(myMid)
parsed_len = len(mids)//20+1
result = '╭───「 Mention Members 」\n'
mention = '@zeroxyuuki\n'
no = 0
for point in range(parsed_len):
mentionees = []
for mid in mids[point*20:(point+1)*20]:
no += 1
result += '│ %i. %s' % (no, mention)
slen = len(result) - 12
elen = len(result) + 3
mentionees.append({'S': str(slen), 'E': str(elen - 4), 'M': mid})
if mid == mids[-1]:
result += '╰───「 Hello World 」\n'
if result:
if result.endswith('\n'): result = result[:-1]
line.sendMessage(to, result, {'MENTION': json.dumps({'MENTIONEES': mentionees})}, 0)
result = ''
def cloneProfile(mid):
contact = line.getContact(mid)
profile = line.getProfile()
profile.displayName, profile.statusMessage = contact.displayName, contact.statusMessage
line.updateProfile(profile)
if contact.pictureStatus:
pict = line.downloadFileURL('http://dl.profile.line-cdn.net/' + contact.pictureStatus)
line.updateProfilePicture(pict)
coverId = line.getProfileDetail(mid)['result']['objectId']
line.updateProfileCoverById(coverId)
def backupProfile():
profile = line.getContact(myMid)
settings['myProfile']['displayName'] = profile.displayName
settings['myProfile']['pictureStatus'] = profile.pictureStatus
settings['myProfile']['statusMessage'] = profile.statusMessage
coverId = line.getProfileDetail()['result']['objectId']
settings['myProfile']['coverId'] = str(coverId)
def restoreProfile():
profile = line.getProfile()
profile.displayName = settings['myProfile']['displayName']
profile.statusMessage = settings['myProfile']['statusMessage']
line.updateProfile(profile)
if settings['myProfile']['pictureStatus']:
pict = line.downloadFileURL('http://dl.profile.line-cdn.net/' + settings['myProfile']['pictureStatus'])
line.updateProfilePicture(pict)
coverId = settings['myProfile']['coverId']
line.updateProfileCoverById(coverId)
def executeCmd(msg, text, txt, cmd, msg_id, receiver, sender, to, setKey):
if cmd == 'logoutbot':
line.sendMessage(to, 'Bot will logged out')
sys.exit('##----- PROGRAM STOPPED -----##')
elif cmd == 'logoutdevicee':
line.logout()
sys.exit('##----- CLIENT LOGOUT -----##')
elif cmd == 'restart':
line.sendMessage(to, 'Bot will restarting')
restartProgram()
elif cmd == 'help':
line.sendReplyMessage(msg_id, to, help())
elif cmd == 'speed':
start = time.time()
line.sendMessage(to, 'Checking speed')
elapse = time.time() - start
line.sendMessage(to, 'Speed sending message took %s seconds' % str(elapse))
elif cmd == 'me':
line.sendContact(to, myMid)
elif cmd == 'runtime':
runtime = time.time() - programStart
line.sendMessage(to, 'Bot already running on ' + format_timespan(runtime))
elif cmd == 'author':
line.sendMessage(to, 'Author is linepy')
elif cmd == 'about':
res = '╭───「 About 」'
res += '\n├ Type : Selfbot Hello World'
res += '\n├ Version : 3.0.8'
res += '\n├ Library : linepy (Python)'
res += '\n├ Creator : Zero Cool'
res += '\n╰───「 Hello World 」'
line.sendMessage(to, res)
elif cmd == 'status':
res = '╭───「 Status 」'
res += '\n├ Auto Add : ' + bool_dict[settings['autoAdd']['status']][1]
res += '\n├ Auto Join : ' + bool_dict[settings['autoJoin']['status']][1]
res += '\n├ Auto Respond : ' + bool_dict[settings['autoRespond']['status']][1]
res += '\n├ Auto Respond Mention : ' + bool_dict[settings['autoRespondMention']['status']][1]
res += '\n├ Auto Read : ' + bool_dict[settings['autoRead']][1]
res += '\n├ Setting Key : ' + bool_dict[settings['setKey']['status']][1]
res += '\n├ Mimic : ' + bool_dict[settings['mimic']['status']][1]
res += '\n├ Greetings Join : ' + bool_dict[settings['greet']['join']['status']][1]
res += '\n├ Greetings Leave : ' + bool_dict[settings['greet']['leave']['status']][1]
res += '\n├ Check Contact : ' + bool_dict[settings['checkContact']][1]
res += '\n├ Check Post : ' + bool_dict[settings['checkPost']][1]
res += '\n├ Check Sticker : ' + bool_dict[settings['checkSticker']][1]
res += '\n╰───「 Hello World 」'
line.sendMessage(to, parsingRes(res))
elif cmd == 'abort':
aborted = False
if to in settings['changeGroupPicture']:
settings['changeGroupPicture'].remove(to)
line.sendMessage(to, 'Change group picture aborted')
aborted = True
if settings['changePictureProfile']:
settings['changePictureProfile'] = False
line.sendMessage(to, 'Change picture profile aborted')
aborted = True
if settings['changeCoverProfile']:
settings['changeCoverProfile'] = False
line.sendMessage(to, 'Change cover profile aborted')
aborted = True
if not aborted:
line.sendMessage(to, 'Failed abort, nothing to abort')
elif cmd.startswith('error'):
textt = removeCmd(text, setKey)
texttl = textt.lower()
cond = textt.split(' ')
res = '╭───「 Error 」'
res += '\n├ Usage : '
res += '\n│ • {key}Error'
res += '\n│ • {key}Error Logs'
res += '\n│ • {key}Error Reset'
res += '\n│ • {key}Error Detail <errid>'
res += '\n╰───「 Hello World 」'
if cmd == 'error':
line.sendMessage(to, parsingRes(res).format_map(SafeDict(key=setKey.title())))
elif cond[0].lower() == 'logs':
try:
filee = open('errorLog.txt', 'r')
except FileNotFoundError:
return line.sendMessage(to, 'Failed display error logs, error logs file not found')
errors = [err.strip() for err in filee.readlines()]
filee.close()
if not errors: return line.sendMessage(to, 'Failed display error logs, empty error logs')
res = '╭───「 Error Logs 」'
res += '\n├ List :'
parsed_len = len(errors)//200+1
no = 0
for point in range(parsed_len):
for error in errors[point*200:(point+1)*200]:
if not error: continue
no += 1
res += '\n│ %i. %s' % (no, error)
if error == errors[-1]:
res += '\n╰───「 Hello World 」'
if res:
if res.startswith('\n'): res = res[1:]
line.sendMessage(to, res)
res = ''
elif cond[0].lower() == 'reset':
filee = open('errorLog.txt', 'w')
filee.write('')
filee.close()
shutil.rmtree('tmp/errors/', ignore_errors=True)
os.system('mkdir tmp/errors')
line.sendMessage(to, 'Success reset error logs')
elif cond[0].lower() == 'detail':
if len(cond) < 2:
return line.sendMessage(to, parsingRes(res).format_map(SafeDict(key=setKey.title())))
errid = cond[1]
if os.path.exists('tmp/errors/%s.txt' % errid):
with open('tmp/errors/%s.txt' % errid, 'r') as f:
line.sendMessage(to, f.read())
else:
return line.sendMessage(to, 'Failed display details error, errorid not valid')
else:
line.sendMessage(to, parsingRes(res).format_map(SafeDict(key=setKey.title())))
elif txt.startswith('setkey'):
textt = removeCmd(text, setKey)
texttl = textt.lower()
res = '╭───「 Setting Key 」'
res += '\n├ Status : ' + bool_dict[settings['setKey']['status']][1]
res += '\n├ Key : ' + settings['setKey']['key'].title()
res += '\n├ Usage : '
res += '\n│ • Setkey'
res += '\n│ • Setkey <on/off>'
res += '\n│ • Setkey <key>'
res += '\n╰───「 Hello World 」'
if txt == 'setkey':
line.sendMessage(to, parsingRes(res))
elif texttl == 'on':
if settings['setKey']['status']:
line.sendMessage(to, 'Setkey already active')
else:
settings['setKey']['status'] = True
line.sendMessage(to, 'Success activated setkey')
elif texttl == 'off':
if not settings['setKey']['status']:
line.sendMessage(to, 'Setkey already deactive')
else:
settings['setKey']['status'] = False
line.sendMessage(to, 'Success deactivated setkey')
else:
settings['setKey']['key'] = texttl
line.sendMessage(to, 'Success change set key to (%s)' % textt)
elif cmd.startswith('autoadd'):
textt = removeCmd(text, setKey)
texttl = textt.lower()
cond = textt.split(' ')
res = '╭───「 Auto Add 」'
res += '\n├ Status : ' + bool_dict[settings['autoAdd']['status']][1]
res += '\n├ Reply : ' + bool_dict[settings['autoAdd']['reply']][0]
res += '\n├ Reply Message : ' + settings['autoAdd']['message']
res += '\n├ Usage : '
res += '\n│ • {key}AutoAdd'
res += '\n│ • {key}AutoAdd <on/off>'
res += '\n│ • {key}AutoAdd Reply <on/off>'
res += '\n│ • {key}AutoAdd <message>'
res += '\n╰───「 Hello World 」'
if cmd == 'autoadd':
line.sendMessage(to, parsingRes(res).format_map(SafeDict(key=setKey.title())))
elif texttl == 'on':
if settings['autoAdd']['status']:
line.sendMessage(to, 'Autoadd already active')
else:
settings['autoAdd']['status'] = True
line.sendMessage(to, 'Success activated autoadd')
elif texttl == 'off':
if not settings['autoAdd']['status']:
line.sendMessage(to, 'Autoadd already deactive')
else:
settings['autoAdd']['status'] = False
line.sendMessage(to, 'Success deactivated autoadd')
elif cond[0].lower() == 'reply':
if len(cond) < 2:
return line.sendMessage(to, parsingRes(res).format_map(SafeDict(key=setKey.title())))
if cond[1].lower() == 'on':
if settings['autoAdd']['reply']:
line.sendMessage(to, 'Reply message autoadd already active')
else:
settings['autoAdd']['reply'] = True
line.sendMessage(to, 'Success activate reply message autoadd')
elif cond[1].lower() == 'off':
if not settings['autoAdd']['reply']:
line.sendMessage(to, 'Reply message autoadd already deactive')
else:
settings['autoAdd']['reply'] = False
line.sendMessage(to, 'Success deactivate reply message autoadd')
else:
line.sendMessage(to, parsingRes(res).format_map(SafeDict(key=setKey.title())))
else:
settings['autoAdd']['message'] = textt
line.sendMessage(to, 'Success change autoadd message to `%s`' % textt)
elif cmd.startswith('autojoin'):
textt = removeCmd(text, setKey)
texttl = textt.lower()
cond = textt.split(' ')
res = '╭───「 Auto Join 」'
res += '\n├ Status : ' + bool_dict[settings['autoJoin']['status']][1]
res += '\n├ Reply : ' + bool_dict[settings['autoJoin']['reply']][0]
res += '\n├ Reply Message : ' + settings['autoJoin']['message']
res += '\n├ Usage : '
res += '\n│ • {key}AutoJoin'
res += '\n│ • {key}AutoJoin <on/off>'
res += '\n│ • {key}AutoJoin Ticket <on/off>'
res += '\n│ • {key}AutoJoin Reply <on/off>'
res += '\n│ • {key}AutoJoin <message>'
res += '\n╰───「 Hello World 」'
if cmd == 'autojoin':
line.sendMessage(to, parsingRes(res).format_map(SafeDict(key=setKey.title())))
elif texttl == 'on':
if settings['autoJoin']['status']:
line.sendMessage(to, 'Autojoin already active')
else:
settings['autoJoin']['status'] = True
line.sendMessage(to, 'Success activated autojoin')
elif texttl == 'off':
if not settings['autoJoin']['status']:
line.sendMessage(to, 'Autojoin already deactive')
else:
settings['autoJoin']['status'] = False
line.sendMessage(to, 'Success deactivated autojoin')
elif cond[0].lower() == 'reply':
if len(cond) < 2:
return line.sendMessage(to, parsingRes(res).format_map(SafeDict(key=setKey.title())))
if cond[1].lower() == 'on':
if settings['autoJoin']['reply']:
line.sendMessage(to, 'Reply message autojoin already active')
else:
settings['autoJoin']['reply'] = True
line.sendMessage(to, 'Success activate reply message autojoin')
elif cond[1].lower() == 'off':
if not settings['autoJoin']['reply']:
line.sendMessage(to, 'Reply message autojoin already deactive')
else:
settings['autoJoin']['reply'] = False
line.sendMessage(to, 'Success deactivate reply message autojoin')
else:
line.sendMessage(to, parsingRes(res).format_map(SafeDict(key=setKey.title())))
elif cond[0].lower() == 'ticket':
if len(cond) < 2:
return line.sendMessage(to, parsingRes(res).format_map(SafeDict(key=setKey.title())))
if cond[1].lower() == 'on':
if settings['autoJoin']['ticket']:
line.sendMessage(to, 'Autojoin ticket already active')
else:
settings['autoJoin']['ticket'] = True
line.sendMessage(to, 'Success activate autojoin ticket')
elif cond[1].lower() == 'off':
if not settings['autoJoin']['ticket']:
line.sendMessage(to, 'Autojoin ticket already deactive')
else:
settings['autoJoin']['ticket'] = False
line.sendMessage(to, 'Success deactivate autojoin ticket')
else:
line.sendMessage(to, parsingRes(res).format_map(SafeDict(key=setKey.title())))
else:
settings['autoJoin']['message'] = textt
line.sendMessage(to, 'Success change autojoin message to `%s`' % textt)
elif cmd.startswith('autorespondmention'):
textt = removeCmd(text, setKey)
texttl = textt.lower()
res = '╭───「 Auto Respond 」'
res += '\n├ Status : ' + bool_dict[settings['autoRespondMention']['status']][1]
res += '\n├ Reply Message : ' + settings['autoRespondMention']['message']
res += '\n├ Usage : '
res += '\n│ • {key}AutoRespondMention'
res += '\n│ • {key}AutoRespondMention <on/off>'
res += '\n│ • {key}AutoRespondMention <message>'
res += '\n╰───「 Hello World 」'
if cmd == 'autorespondmention':
line.sendMessage(to, parsingRes(res).format_map(SafeDict(key=setKey.title())))
elif texttl == 'on':
if settings['autoRespondMention']['status']:
line.sendMessage(to, 'Autorespondmention already active')
else:
settings['autoRespondMention']['status'] = True
line.sendMessage(to, 'Success activated autorespondmention')
elif texttl == 'off':
if not settings['autoRespondMention']['status']:
line.sendMessage(to, 'Autorespondmention already deactive')
else:
settings['autoRespondMention']['status'] = False
line.sendMessage(to, 'Success deactivated autorespondmention')
else:
settings['autoRespondMention']['message'] = textt
line.sendMessage(to, 'Success change autorespondmention message to `%s`' % textt)
elif cmd.startswith('autorespond'):
textt = removeCmd(text, setKey)
texttl = textt.lower()
res = '╭───「 Auto Respond 」'
res += '\n├ Status : ' + bool_dict[settings['autoRespond']['status']][1]
res += '\n├ Reply Message : ' + settings['autoRespond']['message']
res += '\n├ Usage : '
res += '\n│ • {key}AutoRespond'
res += '\n│ • {key}AutoRespond <on/off>'
res += '\n│ • {key}AutoRespond <message>'
res += '\n╰───「 Hello World 」'
if cmd == 'autorespond':
line.sendMessage(to, parsingRes(res).format_map(SafeDict(key=setKey.title())))
elif texttl == 'on':
if settings['autoRespond']['status']:
line.sendMessage(to, 'Autorespond already active')
else:
settings['autoRespond']['status'] = True
line.sendMessage(to, 'Success activated autorespond')
elif texttl == 'off':
if not settings['autoRespond']['status']:
line.sendMessage(to, 'Autorespond already deactive')
else:
settings['autoRespond']['status'] = False
line.sendMessage(to, 'Success deactivated autorespond')
else:
settings['autoRespond']['message'] = textt
line.sendMessage(to, 'Success change autorespond message to `%s`' % textt)
elif cmd.startswith('autoread '):
textt = removeCmd(text, setKey)
texttl = textt.lower()
if texttl == 'on':
if settings['autoRead']:
line.sendMessage(to, 'Autoread already active')
else:
settings['autoRead'] = True
line.sendMessage(to, 'Success activated autoread')
elif texttl == 'off':
if not settings['autoRead']:
line.sendMessage(to, 'Autoread already deactive')
else:
settings['autoRead'] = False
line.sendMessage(to, 'Success deactivated autoread')
elif cmd.startswith('checkcontact '):
textt = removeCmd(text, setKey)
texttl = textt.lower()
if texttl == 'on':
if settings['checkContact']:
line.sendMessage(to, 'Checkcontact already active')
else:
settings['checkContact'] = True
line.sendMessage(to, 'Success activated checkcontact')
elif texttl == 'off':
if not settings['checkContact']:
line.sendMessage(to, 'Checkcontact already deactive')
else:
settings['checkContact'] = False
line.sendMessage(to, 'Success deactivated checkcontact')
elif cmd.startswith('checkpost '):
textt = removeCmd(text, setKey)
texttl = textt.lower()
if texttl == 'on':
if settings['checkPost']:
line.sendMessage(to, 'Checkpost already active')
else:
settings['checkPost'] = True
line.sendMessage(to, 'Success activated checkpost')
elif texttl == 'off':
if not settings['checkPost']:
line.sendMessage(to, 'Checkpost already deactive')
else:
settings['checkPost'] = False
line.sendMessage(to, 'Success deactivated checkpost')
elif cmd.startswith('checksticker '):
textt = removeCmd(text, setKey)
texttl = textt.lower()
if texttl == 'on':
if settings['checkSticker']:
line.sendMessage(to, 'Checksticker already active')
else:
settings['checkSticker'] = True
line.sendMessage(to, 'Success activated checksticker')
elif texttl == 'off':
if not settings['checkSticker']:
line.sendMessage(to, 'Checksticker already deactive')
else:
settings['checkSticker'] = False
line.sendMessage(to, 'Success deactivated checksticker')
elif cmd.startswith('myprofile'):
textt = removeCmd(text, setKey)
texttl = textt.lower()
profile = line.getProfile()
res = '╭───「 My Profile 」'
res += '\n├ MID : ' + profile.mid
res += '\n├ Display Name : ' + str(profile.displayName)
res += '\n├ Status Message : ' + str(profile.statusMessage)
res += '\n├ Usage : '
res += '\n│ • {key}MyProfile'
res += '\n│ • {key}MyProfile MID'
res += '\n│ • {key}MyProfile Name'
res += '\n│ • {key}MyProfile Bio'
res += '\n│ • {key}MyProfile Pict'
res += '\n│ • {key}MyProfile Cover'
res += '\n│ • {key}MyProfile Change Name <name>'
res += '\n│ • {key}MyProfile Change Bio <bio>'
res += '\n│ • {key}MyProfile Change Pict'
res += '\n│ • {key}MyProfile Change Cover'
res += '\n╰───「 Hello World 」'
if cmd == 'myprofile':
if profile.pictureStatus:
line.sendImageWithURL(to, 'http://dl.profile.line-cdn.net/' + profile.pictureStatus)
cover = line.getProfileCoverURL(profile.mid)
line.sendImageWithURL(to, str(cover))
line.sendMessage(to, parsingRes(res).format_map(SafeDict(key=setKey.title())))
elif texttl == 'mid':
line.sendMessage(to, '「 MID 」\n' + str(profile.mid))
elif texttl == 'name':
line.sendMessage(to, '「 Display Name 」\n' + str(profile.displayName))
elif texttl == 'bio':
line.sendMessage(to, '「 Status Message 」\n' + str(profile.statusMessage))
elif texttl == 'pict':
if profile.pictureStatus:
path = 'http://dl.profile.line-cdn.net/' + profile.pictureStatus
line.sendImageWithURL(to, path)
line.sendMessage(to, '「 Picture Status 」\n' + path)
else:
line.sendMessage(to, 'Failed display picture status, user doesn\'t have a picture status')
elif texttl == 'cover':
cover = line.getProfileCoverURL(profile.mid)
line.sendImageWithURL(to, str(cover))
line.sendMessage(to, '「 Cover Picture 」\n' + str(cover))
elif texttl.startswith('change '):
texts = textt[7:]
textsl = texts.lower()
if textsl.startswith('name '):
name = texts[5:]
if len(name) <= 20:
profile.displayName = name
line.updateProfile(profile)
line.sendMessage(to, 'Success change display name, changed to `%s`' % name)
else:
line.sendMessage(to, 'Failed change display name, the length of the name cannot be more than 20')
elif textsl.startswith('bio '):
bio = texts[4:]
if len(bio) <= 500:
profile.statusMessage = bio
line.updateProfile(profile)
line.sendMessage(to, 'Success change status message, changed to `%s`' % bio)
else:
line.sendMessage(to, 'Failed change status message, the length of the bio cannot be more than 500')
elif textsl == 'pict':
settings['changePictureProfile'] = True
line.sendMessage(to, 'Please send the image to set in picture profile, type `{key}Abort` if want cancel it.\nFYI: Downloading images will fail if too long upload the image'.format(key=setKey.title()))
elif textsl == 'cover':
settings['changeCoverProfile'] = True
line.sendMessage(to, 'Please send the image to set in cover profile, type `{key}Abort` if want cancel it.\nFYI: Downloading images will fail if too long upload the image'.format(key=setKey.title()))
else:
line.sendMessage(to, parsingRes(res).format_map(SafeDict(key=setKey.title())))
else:
line.sendMessage(to, parsingRes(res).format_map(SafeDict(key=setKey.title())))
elif cmd.startswith('profile'):
textt = removeCmd(text, setKey)
texttl = textt.lower()
profile = line.getContact(to) if msg.toType == 0 else None
res = '╭───「 My Profile 」'
if profile:
res += '\n├ MID : ' + profile.mid
res += '\n├ Display Name : ' + str(profile.displayName)
if profile.displayNameOverridden: res += '\n├ Display Name Overridden : ' + str(profile.displayNameOverridden)
res += '\n├ Status Message : ' + str(profile.statusMessage)
res += '\n├ Usage : '
res += '\n│ • {key}Profile'
res += '\n│ • {key}Profile Mid'
res += '\n│ • {key}Profile Name'
res += '\n│ • {key}Profile Bio'
res += '\n│ • {key}Profile Pict'
res += '\n│ • {key}Profile Cover'
res += '\n│ • {key}Profile Steal Profile <mention>'
res += '\n│ • {key}Profile Steal Mid <mention>'
res += '\n│ • {key}Profile Steal Name <mention>'
res += '\n│ • {key}Profile Steal Bio <mention>'
res += '\n│ • {key}Profile Steal Pict <mention>'
res += '\n│ • {key}Profile Steal Cover <mention>'
res += '\n╰───「 Hello World 」'
if cmd == 'profile':
if profile:
if profile.pictureStatus:
line.sendImageWithURL(to, 'http://dl.profile.line-cdn.net/' + profile.pictureStatus)
cover = line.getProfileCoverURL(profile.mid)
line.sendImageWithURL(to, str(cover))
line.sendMessage(to, parsingRes(res).format_map(SafeDict(key=setKey.title())))
elif texttl == 'mid':
if msg.toType != 0: return line.sendMessage(to, 'Failed display mid user, use this command only in personal chat')
line.sendMessage(to, '「 MID 」\n' + str(profile.mid))
elif texttl == 'name':
if msg.toType != 0: return line.sendMessage(to, 'Failed display mid user, use this command only in personal chat')
line.sendMessage(to, '「 Display Name 」\n' + str(profile.displayName))
elif texttl == 'bio':
if msg.toType != 0: return line.sendMessage(to, 'Failed display mid user, use this command only in personal chat')
line.sendMessage(to, '「 Status Message 」\n' + str(profile.statusMessage))
elif texttl == 'pict':
if msg.toType != 0: return line.sendMessage(to, 'Failed display mid user, use this command only in personal chat')
if profile.pictureStatus:
path = 'http://dl.profile.line-cdn.net/' + profile.pictureStatus
line.sendImageWithURL(to, path)
line.sendMessage(to, '「 Picture Status 」\n' + path)
else:
line.sendMessage(to, 'Failed display picture status, user doesn\'t have a picture status')
elif texttl == 'cover':
if msg.toType != 0: return line.sendMessage(to, 'Failed display mid user, use this command only in personal chat')
cover = line.getProfileCoverURL(profile.mid)
line.sendImageWithURL(to, str(cover))
line.sendMessage(to, '「 Cover Picture 」\n' + str(cover))
elif texttl.startswith('steal '):
texts = textt[6:]
textsl = texts.lower()
if textsl.startswith('profile '):
if 'MENTION' in msg.contentMetadata.keys():
mentions = ast.literal_eval(msg.contentMetadata['MENTION'])
for mention in mentions['MENTIONEES']:
profile = line.getContact(mention['M'])
if profile.pictureStatus:
line.sendImageWithURL(to, 'http://dl.profile.line-cdn.net/' + profile.pictureStatus)
cover = line.getProfileCoverURL(profile.mid)
line.sendImageWithURL(to, str(cover))
res = '╭───「 Profile 」'
res += '\n├ MID : ' + profile.mid
res += '\n├ Display Name : ' + str(profile.displayName)
if profile.displayNameOverridden: res += '\n├ Display Name Overridden : ' + str(profile.displayNameOverridden)
res += '\n├ Status Message : ' + str(profile.statusMessage)
res += '\n╰───「 Hello World 」'
line.sendMessage(to, parsingRes(res))
else:
line.sendMessage(to, 'Failed steal profile, no one user mentioned')
elif textsl.startswith('mid '):
res = '╭───「 MID 」'
no = 0
if 'MENTION' in msg.contentMetadata.keys():
mentions = ast.literal_eval(msg.contentMetadata['MENTION'])
if len(mentions['MENTIONEES']) == 1:
mid = mentions['MENTIONEES'][0]['M']
return line.sendMessage(to, '「 MID 」\n' + mid)
for mention in mentions['MENTIONEES']:
mid = mention['M']
no += 1
res += '\n│ %i. %s' % (no, mid)
res += '\n╰───「 Hello World 」'
line.sendMessage(to, parsingRes(res))
else:
line.sendMessage(to, 'Failed steal mid, no one user mentioned')
elif textsl.startswith('name '):
res = '╭───「 Display Name 」'
no = 0
if 'MENTION' in msg.contentMetadata.keys():
mentions = ast.literal_eval(msg.contentMetadata['MENTION'])
if len(mentions['MENTIONEES']) == 1:
profile = line.getContact(mentions['MENTIONEES'][0]['M'])
return line.sendMessage(to, '「 Display Name 」\n' + str(profile.displayName))
for mention in mentions['MENTIONEES']:
mid = mention['M']
profile = line.getContact(mid)
no += 1
res += '\n│ %i. %s' % (no, profile.displayName)
res += '\n╰───「 Hello World 」'
line.sendMessage(to, parsingRes(res))
else:
line.sendMessage(to, 'Failed steal display name, no one user mentioned')
elif textsl.startswith('bio '):
res = '╭───「 Status Message 」'
no = 0
if 'MENTION' in msg.contentMetadata.keys():
mentions = ast.literal_eval(msg.contentMetadata['MENTION'])
if len(mentions['MENTIONEES']) == 1:
profile = line.getContact(mentions['MENTIONEES'][0]['M'])
return line.sendMessage(to, '「 Status Message 」\n' + str(profile.statusMessage))
for mention in mentions['MENTIONEES']:
mid = mention['M']
profile = line.getContact(mid)
no += 1
res += '\n│ %i. %s' % (no, profile.statusMessage)
res += '\n╰───「 Hello World 」'
line.sendMessage(to, parsingRes(res))
else:
line.sendMessage(to, 'Failed steal status message, no one user mentioned')
elif textsl.startswith('pict '):
res = '╭───「 Picture Status 」'
no = 0
if 'MENTION' in msg.contentMetadata.keys():
mentions = ast.literal_eval(msg.contentMetadata['MENTION'])
if len(mentions['MENTIONEES']) == 1:
profile = line.getContact(mentions['MENTIONEES'][0]['M'])
if profile.pictureStatus:
path = 'http://dl.profile.line-cdn.net/' + profile.pictureStatus
line.sendImageWithURL(to, path)
return line.sendMessage(to, '「 Picture Status 」\n' + path)
else:
return line.sendMessage(to, 'Failed steal picture status, user `%s` doesn\'t have a picture status' % profile.displayName)
for mention in mentions['MENTIONEES']:
mid = mention['M']
profile = line.getContact(mid)
no += 1
if profile.pictureStatus:
path = 'http://dl.profile.line-cdn.net/' + profile.pictureStatus
line.sendImageWithURL(to, path)
res += '\n│ %i. %s' % (no, path)
else:
res += '\n│ %i. Not Found' % no
res += '\n╰───「 Hello World 」'
line.sendMessage(to, parsingRes(res))
else:
line.sendMessage(to, 'Failed steal picture status, no one user mentioned')
elif textsl.startswith('cover '):
res = '╭───「 Cover Picture 」'
no = 0
if 'MENTION' in msg.contentMetadata.keys():
mentions = ast.literal_eval(msg.contentMetadata['MENTION'])
if len(mentions['MENTIONEES']) == 1:
mid = mentions['MENTIONEES'][0]['M']
cover = line.getProfileCoverURL(mid)
line.sendImageWithURL(to, str(cover))
line.sendMessage(to, '「 Cover Picture 」\n' + str(cover))
for mention in mentions['MENTIONEES']:
mid = mention['M']
no += 1
cover = line.getProfileCoverURL(mid)
line.sendImageWithURL(to, str(cover))
res += '\n│ %i. %s' % (no, cover)
res += '\n╰───「 Hello World 」'
line.sendMessage(to, parsingRes(res))
else:
line.sendMessage(to, 'Failed steal cover picture, no one user mentioned')
else:
line.sendMessage(to, parsingRes(res).format_map(SafeDict(key=setKey.title())))
else:
line.sendMessage(to, parsingRes(res).format_map(SafeDict(key=setKey.title())))
elif cmd.startswith('mimic'):
textt = removeCmd(text, setKey)
texttl = textt.lower()
targets = ''
if settings['mimic']['target']:
no = 0
for target, status in settings['mimic']['target'].items():
no += 1
try:
name = line.getContact(target).displayName
except TalkException:
name = 'Unknown'
targets += '\n│ %i. %s//%s' % (no, name, bool_dict[status][1])
else:
targets += '\n│ Nothing'
res = '╭───「 Mimic 」'
res += '\n├ Status : ' + bool_dict[settings['mimic']['status']][1]
res += '\n├ List :'
res += targets
res += '\n├ Usage : '
res += '\n│ • {key}Mimic'
res += '\n│ • {key}Mimic <on/off>'
res += '\n│ • {key}Mimic Reset'
res += '\n│ • {key}Mimic Add <mention>'
res += '\n│ • {key}Mimic Del <mention>'
res += '\n╰───「 Hello World 」'
if cmd == 'mimic':
line.sendMessage(to, parsingRes(res).format_map(SafeDict(key=setKey.title())))
elif texttl == 'on':
if settings['mimic']['status']:
line.sendMessage(to, 'Mimic already active')
else:
settings['mimic']['status'] = True
line.sendMessage(to, 'Success activated mimic')
elif texttl == 'off':
if not settings['mimic']['status']:
line.sendMessage(to, 'Mimic already deactive')
else:
settings['mimic']['status'] = False
line.sendMessage(to, 'Success deactivated mimic')
elif texttl == 'reset':
settings['mimic']['target'] = {}
line.sendMessage(to, 'Success reset mimic list')
elif texttl.startswith('add '):
res = '╭───「 Mimic 」'
res += '\n├ Status : Add Target'
res += '\n├ Added :'
no = 0
if 'MENTION' in msg.contentMetadata.keys():
mentions = ast.literal_eval(msg.contentMetadata['MENTION'])
for mention in mentions['MENTIONEES']:
mid = mention['M']
settings['mimic']['target'][mid] = True
no += 1
try:
name = line.getContact(mid).displayName
except TalkException:
name = 'Unknown'
res += '\n│ %i. %s' % (no, name)
if no == 0: res += '\n│ Nothing'
res += '\n╰───「 Hello World 」'
line.sendMessage(to, res)
else:
line.sendMessage(to, 'Failed add mimic target, no one user mentioned')
elif texttl.startswith('del '):
res = '╭───「 Mimic 」'
res += '\n├ Status : Del Target'
res += '\n├ Deleted :'
no = 0
if 'MENTION' in msg.contentMetadata.keys():
mentions = ast.literal_eval(msg.contentMetadata['MENTION'])
for mention in mentions['MENTIONEES']:
mid = mention['M']
if mid in settings['mimic']['target']:
settings['mimic']['target'][mid] = False
no += 1
try:
name = line.getContact(mid).displayName
except TalkException:
name = 'Unknown'
res += '\n│ %i. %s' % (no, name)
if no == 0: res += '\n│ Nothing'
res += '\n╰───「 Hello World 」'
line.sendMessage(to, res)
else:
line.sendMessage(to, 'Failed del mimic target, no one user mentioned')
else:
line.sendMessage(to, parsingRes(res).format_map(SafeDict(key=setKey.title())))
elif cmd.startswith('broadcast'):
textt = removeCmd(text, setKey)
texttl = textt.lower()
cond = textt.split(' ')
res = '╭───「 Broadcast 」'
res += '\n├ Broadcast Type : '
res += '\n│ 1 : Friends'
res += '\n│ 2 : Groups'
res += '\n│ 0 : All'
res += '\n├ Usage : '
res += '\n│ • {key}Broadcast'
res += '\n│ • {key}Broadcast <type> <message>'
res += '\n╰───「 Hello World 」'
if cmd == 'broadcast':
line.sendMessage(to, parsingRes(res).format(key=setKey.title()))
elif cond[0] == '1':
if len(cond) < 2:
return line.sendMessage(to, 'Failed broadcast, no message detected')
res = '「 Broadcast 」\n'
res += textt[2:]
res += '\n\n「 Hello World 」'
targets = line.getAllContactIds()
for target in targets:
try:
line.sendMessage(target, res)
except TalkException:
targets.remove(target)
continue
time.sleep(0.8)
line.sendMessage(to, 'Success broadcast to all friends, sent to %i friends' % len(targets))
elif cond[0] == '2':
if len(cond) < 2:
return line.sendMessage(to, 'Failed broadcast, no message detected')
res = '「 Broadcast 」\n'
res += textt[2:]
res += '\n\n「 Hello World 」'
targets = line.getGroupIdsJoined()
for target in targets:
try:
line.sendMessage(target, res)
except TalkException:
targets.remove(target)
continue
time.sleep(0.8)
line.sendMessage(to, 'Success broadcast to all groups, sent to %i groups' % len(targets))
elif cond[0] == '0':
if len(cond) < 2:
return line.sendMessage(to, 'Failed broadcast, no message detected')
res = '「 Broadcast 」\n'
res += textt[2:]
res += '\n\n「 Hello World 」'
targets = line.getGroupIdsJoined() + line.getAllContactIds()
for target in targets:
try:
line.sendMessage(target, res)
except TalkException:
targets.remove(target)
continue
time.sleep(0.8)
line.sendMessage(to, 'Success broadcast to all groups and friends, sent to %i groups and friends' % len(targets))
else:
line.sendMessage(to, parsingRes(res).format(key=setKey.title()))
elif cmd.startswith('friendlist'):
textt = removeCmd(text, setKey)
texttl = textt.lower()
cids = line.getAllContactIds()
cids.sort()
cnames = []
ress = []
res = '╭───「 Friend List 」'
res += '\n├ List:'
if cids:
contacts = []
no = 0
if len(cids) > 200:
parsed_len = len(cids)//200+1
for point in range(parsed_len):
for cid in cids[point*200:(point+1)*200]:
try:
contact = line.getContact(cid)
contacts.append(contact)
except TalkException:
cids.remove(cid)
continue
no += 1
res += '\n│ %i. %s' % (no, contact.displayName)
cnames.append(contact.displayName)
if res:
if res.startswith('\n'): res = res[1:]
if point != parsed_len - 1:
ress.append(res)
if point != parsed_len - 1:
res = ''
else:
for cid in cids:
try:
contact = line.getContact(cid)
contacts.append(contact)
except TalkException:
cids.remove(cid)
continue
no += 1
res += '\n│ %i. %s' % (no, contact.displayName)
cnames.append(contact.displayName)
else:
res += '\n│ Nothing'
res += '\n├ Usage : '
res += '\n│ • {key}FriendList'
res += '\n│ • {key}FriendList Info <num/name>'
res += '\n│ • {key}FriendList Add <mention>'
res += '\n│ • {key}FriendList Del <mention/num/name/all>'
res += '\n╰───「 Hello World 」'
ress.append(res)
if cmd == 'friendlist':
for res in ress:
line.sendMessage(to, parsingRes(res).format_map(SafeDict(key=setKey.title())))
elif texttl.startswith('info '):
texts = textt[5:].split(', ')
if not cids:
return line.sendMessage(to, 'Failed display info friend, nothing friend in list')
for texxt in texts:
num = None
name = None
try:
num = int(texxt)
except ValueError:
name = texxt
if num != None:
contact = contacts[num - 1]
if contact.pictureStatus:
line.sendImageWithURL(to, 'http://dl.profile.line-cdn.net/' + contact.pictureStatus)
cover = line.getProfileCoverURL(contact.mid)
line.sendImageWithURL(to, str(cover))
res = '╭───「 Contact Info 」'
res += '\n├ MID : ' + contact.mid
res += '\n├ Display Name : ' + str(contact.displayName)
if contact.displayNameOverridden: res += '\n├ Display Name Overridden : ' + str(contact.displayNameOverridden)
res += '\n├ Status Message : ' + str(contact.statusMessage)
res += '\n╰───「 Hello World 」'
line.sendMessage(to, parsingRes(res))
elif name != None:
if name in cnames:
contact = contacts[cnames.index(name)]
if contact.pictureStatus:
line.sendImageWithURL(to, 'http://dl.profile.line-cdn.net/' + contact.pictureStatus)
cover = line.getProfileCoverURL(contact.mid)
line.sendImageWithURL(to, str(cover))
res = '╭───「 Contact Info 」'
res += '\n├ MID : ' + contact.mid
res += '\n├ Display Name : ' + str(contact.displayName)
if contact.displayNameOverridden: res += '\n├ Display Name Overridden : ' + str(contact.displayNameOverridden)
res += '\n├ Status Message : ' + str(contact.statusMessage)
res += '\n╰───「 Hello World 」'
line.sendMessage(to, parsingRes(res))
elif texttl.startswith('add '):
res = '╭───「 Friend List 」'
res += '\n├ Status : Add Friend'
res += '\n├ Added :'
no = 0
added = []
if 'MENTION' in msg.contentMetadata.keys():
mentions = ast.literal_eval(msg.contentMetadata['MENTION'])
for mention in mentions['MENTIONEES']:
mid = mention['M']
if mid in cids or mid in added:
continue
no += 1
try:
line.findAndAddContactsByMid(mid)
name = line.getContact(mid).displayName
except TalkException:
name = 'Unknown'
res += '\n│ %i. %s' % (no, name)
added.append(mid)
if no == 0: res += '\n│ Nothing'
res += '\n╰───「 Hello World 」'
line.sendMessage(to, res)
else:
line.sendMessage(to, 'Failed add contact to friend list, no one user mentioned')
elif texttl.startswith('del '):
texts = textt[4:].split(', ')
if not cids:
return line.sendMessage(to, 'Failed del contact from friend list, nothing friend in list')
res = '╭───「 Friend List 」'
res += '\n├ Status : Del Friend'
res += '\n├ Deleted :'
no = 0
deleted = []
if 'MENTION' in msg.contentMetadata.keys():
mentions = ast.literal_eval(msg.contentMetadata['MENTION'])
for mention in mentions['MENTIONEES']:
mid = mention['M']
if mid not in cids or mid in deleted:
continue
no += 1
try:
line.deleteContact(mid)
name = line.getContact(mid).displayName
except TalkException:
name = 'Unknown'
res += '\n│ %i. %s' % (no, name)
deleted.append(mid)
for texxt in texts:
num = None
name = None
try:
num = int(texxt)
except ValueError:
name = texxt
if num != None:
contact = contacts[num - 1]
if contact.mid not in cids and contact.mid in deleted:
continue
no += 1
try:
line.deleteContact(contact.mid)
name = contact.displayName
except TalkException:
name = 'Unknown'
res += '\n│ %i. %s' % (no, name)
deleted.append(contact.mid)
elif name != None:
if name in cnames:
contact = contacts[cnames.index(name)]
if contact.mid not in cids and contact.mid in deleted:
continue
no += 1
try:
line.deleteContact(contact.mid)
name = contact.displayName
except TalkException:
name = 'Unknown'
res += '\n│ %i. %s' % (no, name)
deleted.append(contact.mid)
elif texxt.lower() == 'all':
for contact in contacts:
if contact.mid not in cids and contact.mid in deleted:
continue
no += 1
try:
line.deleteContact(contact.mid)
name = contact.displayName
except TalkException:
name = 'Unknown'
res += '\n│ %i. %s' % (no, name)
deleted.append(contact.mid)
time.sleep(0.8)
if no == 0: res += '\n│ Nothing'
res += '\n╰───「 Hello World 」'
line.sendMessage(to, res)
else:
for res in ress:
line.sendMessage(to, parsingRes(res).format_map(SafeDict(key=setKey.title())))
elif cmd.startswith('blocklist'):
textt = removeCmd(text, setKey)
texttl = textt.lower()
cids = line.getBlockedContactIds()
cids.sort()
cnames = []
ress = []
res = '╭───「 Block List 」'
res += '\n├ List:'
if cids:
contacts = []
no = 0
if len(cids) > 200:
parsed_len = len(cids)//200+1
for point in range(parsed_len):
for cid in cids[point*200:(point+1)*200]:
try:
contact = line.getContact(cid)
contacts.append(contact)
except TalkException:
cids.remove(cid)
continue
no += 1
res += '\n│ %i. %s' % (no, contact.displayName)
cnames.append(contact.displayName)
if res:
if res.startswith('\n'): res = res[1:]
if point != parsed_len - 1:
ress.append(res)
if point != parsed_len - 1:
res = ''
else:
for cid in cids:
try:
contact = line.getContact(cid)
contacts.append(contact)
except TalkException:
cids.remove(cid)
continue
no += 1
res += '\n│ %i. %s' % (no, contact.displayName)
cnames.append(contact.displayName)
else:
res += '\n│ Nothing'
res += '\n├ Usage : '
res += '\n│ • {key}BlockList'
res += '\n│ • {key}BlockList Info <num/name>'
res += '\n│ • {key}BlockList Add <mention>'
res += '\n│ • {key}BlockList Del <mention/num/name/all>'
res += '\n╰───「 Hello World 」'
ress.append(res)
if cmd == 'blocklist':
for res in ress:
line.sendMessage(to, parsingRes(res).format_map(SafeDict(key=setKey.title())))
elif texttl.startswith('info '):
texts = textt[5:].split(', ')
if not cids:
return line.sendMessage(to, 'Failed display info blocked user, nothing user in list')
for texxt in texts:
num = None
name = None
try:
num = int(texxt)
except ValueError:
name = texxt
if num != None:
contact = contacts[num - 1]
if contact.pictureStatus:
line.sendImageWithURL(to, 'http://dl.profile.line-cdn.net/' + contact.pictureStatus)
cover = line.getProfileCoverURL(contact.mid)
line.sendImageWithURL(to, str(cover))
res = '╭───「 Contact Info 」'
res += '\n├ MID : ' + contact.mid
res += '\n├ Display Name : ' + str(contact.displayName)
if contact.displayNameOverridden: res += '\n├ Display Name Overridden : ' + str(contact.displayNameOverridden)
res += '\n├ Status Message : ' + str(contact.statusMessage)
res += '\n╰───「 Hello World 」'
line.sendMessage(to, parsingRes(res))
elif name != None:
if name in cnames:
contact = contacts[cnames.index(name)]
if contact.pictureStatus:
line.sendImageWithURL(to, 'http://dl.profile.line-cdn.net/' + contact.pictureStatus)
cover = line.getProfileCoverURL(contact.mid)
line.sendImageWithURL(to, str(cover))
res = '╭───「 Contact Info 」'
res += '\n├ MID : ' + contact.mid
res += '\n├ Display Name : ' + str(contact.displayName)
if contact.displayNameOverridden: res += '\n├ Display Name Overridden : ' + str(contact.displayNameOverridden)
res += '\n├ Status Message : ' + str(contact.statusMessage)
res += '\n╰───「 Hello World 」'
line.sendMessage(to, parsingRes(res))
elif texttl.startswith('add '):
res = '╭───「 Block List 」'
res += '\n├ Status : Add Block'
res += '\n├ Added :'
no = 0
added = []
if 'MENTION' in msg.contentMetadata.keys():
mentions = ast.literal_eval(msg.contentMetadata['MENTION'])
for mention in mentions['MENTIONEES']:
mid = mention['M']
if mid in cids or mid in added:
continue
no += 1
try:
line.blockContact(mid)
name = line.getContact(mid).displayName
except TalkException:
name = 'Unknown'
res += '\n│ %i. %s' % (no, name)
added.append(mid)
if no == 0: res += '\n│ Nothing'
res += '\n╰───「 Hello World 」'
line.sendMessage(to, res)
else:
line.sendMessage(to, 'Failed block contact, no one user mentioned')
elif texttl.startswith('del '):
texts = textt[4:].split(', ')
if not cids:
return line.sendMessage(to, 'Failed unblock contact, nothing user in list')
res = '╭───「 Block List 」'
res += '\n├ Status : Del Block'
res += '\n├ Deleted :'
no = 0
deleted = []
if 'MENTION' in msg.contentMetadata.keys():
mentions = ast.literal_eval(msg.contentMetadata['MENTION'])
for mention in mentions['MENTIONEES']:
mid = mention['M']
if mid not in cids or mid in deleted:
continue
no += 1
try:
line.unblockContact(mid)
name = line.getContact(mid).displayName
except TalkException:
name = 'Unknown'
res += '\n│ %i. %s' % (no, name)
deleted.append(mid)
for texxt in texts:
num = None
name = None
try:
num = int(texxt)
except ValueError:
name = texxt
if num != None:
contact = contacts[num - 1]
if contact.mid not in cids and contact.mid in deleted:
continue
no += 1
try:
line.unblockContact(contact.mid)
name = contact.displayName
except TalkException:
name = 'Unknown'
res += '\n│ %i. %s' % (no, name)
deleted.append(contact.mid)
elif name != None:
if name in cnames:
contact = contacts[cnames.index(name)]
if contact.mid not in cids and contact.mid in deleted:
continue
no += 1
try:
line.unblockContact(contact.mid)
name = contact.displayName
except TalkException:
name = 'Unknown'
res += '\n│ %i. %s' % (no, name)
deleted.append(contact.mid)
elif texxt.lower() == 'all':
for contact in contacts:
if contact.mid not in cids and contact.mid in deleted:
continue
no += 1
try:
line.unblockContact(contact.mid)
name = contact.displayName
except TalkException:
name = 'Unknown'
res += '\n│ %i. %s' % (no, name)
deleted.append(contact.mid)
time.sleep(0.8)
if no == 0: res += '\n│ Nothing'
res += '\n╰───「 Hello World 」'
line.sendMessage(to, res)
else:
for res in ress:
line.sendMessage(to, parsingRes(res).format_map(SafeDict(key=setKey.title())))
elif cmd == 'mentionall':
members = []
if msg.toType == 1:
room = line.getCompactRoom(to)
members = [mem.mid for mem in room.contacts]
elif msg.toType == 2:
group = line.getCompactGroup(to)
members = [mem.mid for mem in group.members]
else:
return line.sendMessage(to, 'Failed mentionall members, use this command only on room or group chat')
if members:
mentionMembers(to, members)
elif cmd == 'groupinfo':
if msg.toType != 2: return line.sendMessage(to, 'Failed display group info, use this command only on group chat')
group = line.getCompactGroup(to)
try:
ccreator = group.creator.mid
gcreator = group.creator.displayName
except:
ccreator = None
gcreator = 'Not found'
if not group.invitee:
pendings = 0
else:
pendings = len(group.invitee)
qr = 'Close' if group.preventedJoinByTicket else 'Open'
if group.preventedJoinByTicket:
ticket = 'Not found'
else:
ticket = 'https://line.me/R/ti/g/' + str(line.reissueGroupTicket(group.id))
created = time.strftime('%d-%m-%Y %H:%M:%S', time.localtime(int(group.createdTime) / 1000))
path = 'http://dl.profile.line-cdn.net/' + group.pictureStatus
res = '╭───「 Group Info 」'
res += '\n├ ID : ' + group.id
res += '\n├ Name : ' + group.name
res += '\n├ Creator : ' + gcreator
res += '\n├ Created Time : ' + created
res += '\n├ Member Count : ' + str(len(group.members))
res += '\n├ Pending Count : ' + str(pendings)
res += '\n├ QR Status : ' + qr
res += '\n├ Ticket : ' + ticket
res += '\n╰───「 Hello World 」'
line.sendImageWithURL(to, path)
if ccreator:
line.sendContact(to, ccreator)
line.sendMessage(to, res)
elif cmd.startswith('grouplist'):
textt = removeCmd(text, setKey)
texttl = textt.lower()
gids = line.getGroupIdsJoined()
gnames = []
ress = []
res = '╭───「 Group List 」'
res += '\n├ List:'
if gids:
groups = line.getGroups(gids)
no = 0
if len(groups) > 200:
parsed_len = len(groups)//200+1
for point in range(parsed_len):
for group in groups[point*200:(point+1)*200]:
no += 1
res += '\n│ %i. %s//%i' % (no, group.name, len(group.members))
gnames.append(group.name)
if res:
if res.startswith('\n'): res = res[1:]
if point != parsed_len - 1:
ress.append(res)
if point != parsed_len - 1:
res = ''
else:
for group in groups:
no += 1
res += '\n│ %i. %s//%i' % (no, group.name, len(group.members))
gnames.append(group.name)
else:
res += '\n│ Nothing'
res += '\n├ Usage : '
res += '\n│ • {key}GroupList'
res += '\n│ • {key}GroupList Leave <num/name/all>'
res += '\n╰───「 Hello World 」'
ress.append(res)
if cmd == 'grouplist':
for res in ress:
line.sendMessage(to, parsingRes(res).format_map(SafeDict(key=setKey.title())))
elif texttl.startswith('leave '):
texts = textt[6:].split(', ')
leaved = []
if not gids:
return line.sendMessage(to, 'Failed leave group, nothing group in list')
for texxt in texts:
num = None
name = None
try:
num = int(texxt)
except ValueError:
name = texxt
if num != None:
if num <= len(groups) and num > 0:
group = groups[num - 1]
if group.id in leaved:
line.sendMessage(to, 'Already leave group %s' % group.name)
continue
line.leaveGroup(group.id)
leaved.append(group.id)
if to not in leaved:
line.sendMessage(to, 'Success leave group %s' % group.name)
else:
line.sendMessage(to, 'Failed leave group number %i, number out of range' % num)
elif name != None:
if name in gnames:
group = groups[gnames.index(name)]
if group.id in leaved:
line.sendMessage(to, 'Already leave group %s' % group.name)
continue
line.leaveGroup(group.id)
leaved.append(group.id)
if to not in leaved:
line.sendMessage(to, 'Success leave group %s' % group.name)
else:
line.sendMessage(to, 'Failed leave group with name `%s`, name not in list' % name)
elif texxt.lower() == 'all':
for gid in gids:
if gid in leaved:
continue
line.leaveGroup(gid)
leaved.append(gid)
time.sleep(0.8)
if to not in leaved:
line.sendMessage(to, 'Success leave all group')
else:
for res in ress:
line.sendMessage(to, parsingRes(res).format_map(SafeDict(key=setKey.title())))
elif cmd.startswith('invitationlist'):
textt = removeCmd(text, setKey)
texttl = textt.lower()
gids = line.getGroupIdsInvited()
gnames = []
ress = []
res = '╭───「 Invitation List 」'
res += '\n├ List:'
if gids:
groups = line.getGroups(gids)
no = 0
if len(groups) > 200:
parsed_len = len(groups)//200+1
for point in range(parsed_len):
for group in groups[point*200:(point+1)*200]:
no += 1
res += '\n│ %i. %s//%i' % (no, group.name, len(group.members))
gnames.append(group.name)
if res:
if res.startswith('\n'): res = res[1:]
if point != parsed_len - 1:
ress.append(res)
if point != parsed_len - 1:
res = ''
else:
for group in groups:
no += 1
res += '\n│ %i. %s//%i' % (no, group.name, len(group.members))
gnames.append(group.name)
else:
res += '\n│ Nothing'
res += '\n├ Usage : '
res += '\n│ • {key}InvitationList'
res += '\n│ • {key}InvitationList Accept <num/name/all>'
res += '\n│ • {key}InvitationList Reject <num/name/all>'
res += '\n╰───「 Hello World 」'
ress.append(res)
if cmd == 'invitationlist':
for res in ress:
line.sendMessage(to, parsingRes(res).format_map(SafeDict(key=setKey.title())))
elif texttl.startswith('accept '):
texts = textt[7:].split(', ')
accepted = []
if not gids:
return line.sendMessage(to, 'Failed accept group, nothing invitation group in list')
for texxt in texts:
num = None
name = None
try:
num = int(texxt)
except ValueError:
name = texxt
if num != None:
if num <= len(groups) and num > 0:
group = groups[num - 1]
if group.id in accepted:
line.sendMessage(to, 'Already accept group %s' % group.name)
continue
line.acceptGroupInvitation(group.id)
accepted.append(group.id)
line.sendMessage(to, 'Success accept group %s' % group.name)
else:
line.sendMessage(to, 'Failed accept group number %i, number out of range' % num)
elif name != None:
if name in gnames:
group = groups[gnames.index(name)]
if group.id in accepted:
line.sendMessage(to, 'Already accept group %s' % group.name)
continue
line.acceptGroupInvitation(group.id)
accepted.append(group.id)
line.sendMessage(to, 'Success accept group %s' % group.name)
else:
line.sendMessage(to, 'Failed accept group with name `%s`, name not in list' % name)
elif texxt.lower() == 'all':
for gid in gids:
if gid in accepted:
continue
line.acceptGroupInvitation(group.id)
accepted.append(group.id)
time.sleep(0.8)
line.sendMessage(to, 'Success accept all invitation group')
elif texttl.startswith('reject '):
texts = textt[7:].split(', ')
rejected = []
if not gids:
return line.sendMessage(to, 'Failed reject group, nothing invitation group in list')
for texxt in texts:
num = None
name = None
try:
num = int(texxt)
except ValueError:
name = texxt
if num != None:
if num <= len(groups) and num > 0:
group = groups[num - 1]
if group.id in rejected:
line.sendMessage(to, 'Already reject group %s' % group.name)
continue
line.acceptGroupInvitation(group.id)
rejected.append(group.id)
line.sendMessage(to, 'Success reject group %s' % group.name)
else:
line.sendMessage(to, 'Failed reject group number %i, number out of range' % num)
elif name != None:
if name in gnames:
group = groups[gnames.index(name)]
if group.id in rejected:
line.sendMessage(to, 'Already reject group %s' % group.name)
continue
line.acceptGroupInvitation(group.id)
rejected.append(group.id)
line.sendMessage(to, 'Success reject group %s' % group.name)
else:
line.sendMessage(to, 'Failed reject group with name `%s`, name not in list' % name)
elif texxt.lower() == 'all':
for gid in gids:
if gid in rejected:
continue
line.acceptGroupInvitation(group.id)
rejected.append(group.id)
time.sleep(0.8)
line.sendMessage(to, 'Success reject all invitation group')
else:
for res in ress:
line.sendMessage(to, parsingRes(res).format_map(SafeDict(key=setKey.title())))
elif cmd == 'memberlist':
if msg.toType == 1:
room = line.getRoom(to)
members = room.contacts
elif msg.toType == 2:
group = line.getGroup(to)
members = group.members
else:
return line.sendMessage(to, 'Failed display member list, use this command only on room or group chat')
if not members:
return line.sendMessage(to, 'Failed display member list, no one contact')
res = '╭───「 Member List 」'
parsed_len = len(members)//200+1
no = 0
for point in range(parsed_len):
for member in members[point*200:(point+1)*200]:
no += 1
res += '\n│ %i. %s' % (no, member.displayName)
if member == members[-1]:
res += '\n╰───「 Hello World 」'
if res:
if res.startswith('\n'): res = res[1:]
line.sendMessage(to, res)
res = ''
elif cmd == 'pendinglist':
if msg.toType != 2: return line.sendMessage(to, 'Failed display pending list, use this command only on group chat')
group = line.getGroup(to)
members = group.members
if not members:
return line.sendMessage(to, 'Failed display pending list, no one contact')
res = '╭───「 Pending List 」'
parsed_len = len(members)//200+1
no = 0
for point in range(parsed_len):
for member in members[point*200:(point+1)*200]:
no += 1
res += '\n│ %i. %s' % (no, member.displayName)
if member == members[-1]:
res += '\n╰───「 Hello World 」'
if res:
if res.startswith('\n'): res = res[1:]
line.sendMessage(to, res)
res = ''
elif cmd == 'openqr':
if msg.toType != 2: return line.sendMessage(to, 'Failed open qr, use this command only on group chat')
group = line.getCompactGroup(to)
group.preventedJoinByTicket = False
line.updateGroup(group)
line.sendMessage(to, 'Success open group qr, you must be careful')
elif cmd == 'closeqr':
if msg.toType != 2: return line.sendMessage(to, 'Failed close qr, use this command only on group chat')
group = line.getCompactGroup(to)
group.preventedJoinByTicket = True
line.updateGroup(group)
line.sendMessage(to, 'Success close group qr')
elif cmd.startswith('changegroupname '):
if msg.toType != 2: return line.sendMessage(to, 'Failed change group name, use this command only on group chat')
group = line.getCompactGroup(to)
gname = removeCmd(text, setKey)
if len(gname) > 50:
return line.sendMessage(to, 'Failed change group name, the number of names cannot exceed 50')
group.name = gname
line.updateGroup(group)
line.sendMessage(to, 'Success change group name to `%s`' % gname)
elif cmd == 'changegrouppict':
if msg.toType != 2: return line.sendMessage(to, 'Failed change group picture, use this command only on group chat')
if to not in settings['changeGroupPicture']:
settings['changeGroupPicture'].append(to)
line.sendMessage(to, 'Please send the image, type `{key}Abort` if want cancel it.\nFYI: Downloading images will fail if too long upload the image'.format(key=setKey.title()))
else:
line.sendMessage(to, 'Command already active, please send the image or type `{key}Abort` if want cancel it.\nFYI: Downloading images will fail if too long upload the image'.format(key=setKey.title()))
elif cmd == 'kickall':
if msg.toType != 2: return line.sendMessage(to, 'Failed kick all members, use this command only on group chat')
group = line.getCompactGroup(to)
if not group.members:
return line.sendMessage(to, 'Failed kick all members, no member in list')
for member in group.members:
if member.mid == myMid:
continue
try:
line.kickoutFromGroup(to, [member.mid])
except TalkException as talk_error:
return line.sendMessage(to, 'Failed kick all members, the reason is `%s`' % talk_error.reason)
time.sleep(0.8)
line.sendMessage(to, 'Success kick all members, totals %i members' % len(group.members))
elif cmd == 'cancelall':
if msg.toType != 2: return line.sendMessage(to, 'Failed cancel all pending members, use this command only on group chat')
group = line.getCompactGroup(to)
if not group.invitee:
return line.sendMessage(to, 'Failed cancel all pending members, no pending member in list')
for member in group.invitee:
if member.mid == myMid:
continue
try:
line.cancelGroupInvitation(to, [member.mid])
except TalkException as talk_error:
return line.sendMessage(to, 'Failed cancel all pending members, the reason is `%s`' % talk_error.reason)
time.sleep(0.8)
line.sendMessage(to, 'Success cancel all pending members, totals %i pending members' % len(pendings))
elif cmd.startswith('lurk'):
textt = removeCmd(text, setKey)
texttl = textt.lower()
if msg.toType in [1, 2] and to not in lurking:
lurking[to] = {
'status': False,
'time': None,
'members': [],
'reply': {
'status': False,
'message': settings['defaultReplyReader']
}
}
res = '╭───「 Lurking 」'
if msg.toType in [1, 2]: res += '\n├ Status : ' + bool_dict[lurking[to]['status']][1]
if msg.toType in [1, 2]: res += '\n├ Reply Reader : ' + bool_dict[lurking[to]['reply']['status']][1]
if msg.toType in [1, 2]: res += '\n├ Reply Reader Message : ' + lurking[to]['reply']['message']
res += '\n├ Usage : '
res += '\n│ • {key}Lurk'
res += '\n│ • {key}Lurk <on/off>'
res += '\n│ • {key}Lurk Result'
res += '\n│ • {key}Lurk Reset'
res += '\n│ • {key}Lurk ReplyReader <on/off>'
res += '\n│ • {key}Lurk ReplyReader <message>'
res += '\n╰───「 Hello World 」'
if cmd == 'lurk':
line.sendMessage(to, parsingRes(res).format_map(SafeDict(key=setKey.title())))
elif msg.toType not in [1, 2]:
return line.sendMessage(to, 'Failed execute command lurking, use this command only on room or group chat')
elif texttl == 'on':
if lurking[to]['status']:
line.sendMessage(to, 'Lurking already active')
else:
lurking[to].update({
'status': True,
'time': datetime.now(tz=pytz.timezone('Asia/Jakarta')).strftime('%Y-%m-%d %H:%M:%S'),
'members': []
})
line.sendMessage(to, 'Success activated lurking')
elif texttl == 'off':
if not lurking[to]['status']:
line.sendMessage(to, 'Lurking already deactive')
else:
lurking[to].update({
'status': False,
'time': None,
'members': []
})
line.sendMessage(to, 'Success deactivated lurking')
elif texttl == 'result':
if not lurking[to]['status']:
line.sendMessage(to, 'Failed display lurking result, lurking has not been activated')
else:
if not lurking[to]['members']:
line.sendMessage(to, 'Failed display lurking result, no one members reading')
else:
members = lurking[to]['members']
res = '╭───「 Lurking 」'
if msg.toType == 2: res += '\n├ Group Name : ' + line.getGroup(to).name
parsed_len = len(members)//200+1
no = 0
for point in range(parsed_len):
for member in members[point*200:(point+1)*200]:
no += 1
try:
name = line.getContact(member).displayName
except TalkException:
name = 'Unknown'
res += '\n│ %i. %s' % (no, name)
if member == members[-1]:
res += '\n│'
res += '\n├ Time Set : ' + lurking[to]['time']
res += '\n╰───「 Hello World 」'
if res:
if res.startswith('\n'): res = res[1:]
line.sendMessage(to, res)
res = ''
elif texttl == 'reset':
if not lurking[to]['status']:
line.sendMessage(to, 'Failed reset lurking, lurking has not been activated')
else:
lurking[to].update({
'status': True,
'time': datetime.now(tz=pytz.timezone('Asia/Jakarta')).strftime('%Y-%m-%d %H:%M:%S'),
'members': []
})
line.sendMessage(to, 'Success resetted lurking')
elif texttl.startswith('replyreader '):
texts = textt[12:]
if texts == 'on':
if lurking[to]['reply']['status']:
line.sendMessage(to, 'Reply reader already active')
else:
lurking[to]['reply']['status'] = True
line.sendMessage(to, 'Success activated reply reader')
elif texts == 'off':
if not lurking[to]['reply']['status']:
line.sendMessage(to, 'Reply reader already deactive')
else:
lurking[to]['reply']['status'] = False
line.sendMessage(to, 'Success deactivated reply reader')
else:
lurking[to]['reply']['message'] = texts
line.sendMessage(to, 'Success set reply reader message to `%s`' % texts)
else:
line.sendMessage(to, parsingRes(res).format_map(SafeDict(key=setKey.title())))
elif cmd.startswith('greet'):
textt = removeCmd(text, setKey)
texttl = textt.lower()
res = '╭───「 Greet Message 」'
res += '\n├ Greetings Join Status : ' + bool_dict[settings['greet']['join']['status']][1]
res += '\n├ Greetings Join Message : ' + settings['greet']['join']['message']
res += '\n├ Greetings Leave Status : ' + bool_dict[settings['greet']['leave']['status']][0]
res += '\n├ Greetings Join Message : ' + settings['greet']['leave']['message']
res += '\n├ Usage : '
res += '\n│ • {key}Greet'
res += '\n│ • {key}Greet Join <on/off>'
res += '\n│ • {key}Greet Join <message>'
res += '\n│ • {key}Greet Leave <on/off>'
res += '\n│ • {key}Greet Leave <message>'
res += '\n╰───「 Hello World 」'
if cmd == 'greet':
line.sendMessage(to, parsingRes(res).format_map(SafeDict(key=setKey.title())))
elif texttl.startswith('join '):
texts = textt[5:]
textsl = texts.lower()
if textsl == 'on':
if settings['greet']['join']['status']:
line.sendMessage(to, 'Greetings join already active')
else:
settings['greet']['join']['status'] = True
line.sendMessage(to, 'Success activated greetings join')
elif textsl == 'off':
if not settings['greet']['join']['status']:
line.sendMessage(to, 'Greetings join already deactive')
else:
settings['greet']['join']['status'] = False
line.sendMessage(to, 'Success deactivated greetings join')
else:
settings['greet']['join']['message'] = texts
line.sendMessage(to, 'Success change greetings join message to `%s`' % texts)
elif texttl.startswith('leave '):
texts = textt[6:]
textsl = texts.lower()
if textsl == 'on':
if settings['greet']['leave']['status']:
line.sendMessage(to, 'Greetings leave already active')
else:
settings['greet']['leave']['status'] = True
line.sendMessage(to, 'Success activated greetings leave')
elif textsl == 'off':
if not settings['greet']['leave']['status']:
line.sendMessage(to, 'Greetings leave already deactive')
else:
settings['greet']['leave']['status'] = False
line.sendMessage(to, 'Success deactivated greetings leave')
else:
settings['greet']['leave']['message'] = texts
line.sendMessage(to, 'Success change greetings leave message to `%s`' % texts)
else:
line.sendMessage(to, parsingRes(res).format_map(SafeDict(key=setKey.title())))
elif cmd.startswith('kick '):
if msg.toType != 2: return line.sendMessage(to, 'Failed kick member, use this command only on group chat')
if 'MENTION' in msg.contentMetadata.keys():
mentions = ast.literal_eval(msg.contentMetadata['MENTION'])
for mention in mentions['MENTIONEES']:
mid = mention['M']
if mid == myMid:
continue
try:
line.kickoutFromGroup(to, [mid])
except TalkException as talk_error:
return line.sendMessage(to, 'Failed kick members, the reason is `%s`' % talk_error.reason)
time.sleep(0.8)
line.sendMessage(to, 'Success kick members, totals %i members' % len(mentions['MENTIONEES']))
else:
line.sendMessage(to, 'Failed kick member, please mention user you want to kick')
elif cmd.startswith('vkick '):
if msg.toType != 2: return line.sendMessage(to, 'Failed vultra kick member, use this command only on group chat')
if 'MENTION' in msg.contentMetadata.keys():
mentions = ast.literal_eval(msg.contentMetadata['MENTION'])
for mention in mentions['MENTIONEES']:
mid = mention['M']
if mid == myMid:
continue
try:
line.kickoutFromGroup(to, [mid])
line.findAndAddContactsByMid(mid)
line.inviteIntoGroup(to, [mid])
line.cancelGroupInvitation(to, [mid])
except TalkException as talk_error:
return line.sendMessage(to, 'Failed vultra kick members, the reason is `%s`' % talk_error.reason)
time.sleep(0.8)
line.sendMessage(to, 'Success vultra kick members, totals %i members' % len(mentions['MENTIONEES']))
else:
line.sendMessage(to, 'Failed vultra kick member, please mention user you want to kick')
def executeOp(op):
try:
print ('++ Operation : ( %i ) %s' % (op.type, OpType._VALUES_TO_NAMES[op.type].replace('_', ' ')))
if op.type == 5:
if settings['autoAdd']['status']:
line.findAndAddContactsByMid(op.param1)
if settings['autoAdd']['reply']:
if '@!' not in settings['autoAdd']['message']:
line.sendMessage(op.param1, settings['autoAdd']['message'])
else:
line.sendMentionV2(op.param1, settings['autoAdd']['message'], [op.param1])
if op.type == 13:
if settings['autoJoin']['status'] and myMid in op.param3:
line.acceptGroupInvitation(op.param1)
if settings['autoJoin']['reply']:
if '@!' not in settings['autoJoin']['message']:
line.sendMessage(op.param1, settings['autoJoin']['message'])
else:
line.sendMentionV2(op.param1, settings['autoJoin']['message'], [op.param2])
if op.type == 15:
if settings['greet']['leave']['status']:
if '@!' not in settings['greet']['leave']['message']:
line.sendMessage(op.param1, settings['greet']['leave']['message'].format(name=line.getCompactGroup(op.param1).name))
else:
line.sendMentionV2(op.param1, settings['greet']['leave']['message'].format(name=line.getCompactGroup(op.param1).name), [op.param2])
if op.type == 17:
if settings['greet']['join']['status']:
if '@!' not in settings['greet']['join']['message']:
line.sendMessage(op.param1, settings['greet']['join']['message'].format(name=line.getCompactGroup(op.param1).name))
else:
line.sendMentionV2(op.param1, settings['greet']['join']['message'].format(name=line.getCompactGroup(op.param1).name), [op.param2])
if op.type == 25:
msg = op.message
text = str(msg.text)
msg_id = msg.id
receiver = msg.to
sender = msg._from
to = sender if not msg.toType and sender != myMid else receiver
txt = text.lower()
cmd = command(text)
setKey = settings['setKey']['key'] if settings['setKey']['status'] else ''
if text in tmp_text:
return tmp_text.remove(text)
if msg.contentType == 0: # Content type is text
if '/ti/g/' in text and settings['autoJoin']['ticket']:
regex = re.compile('(?:line\:\/|line\.me\/R)\/ti\/g\/([a-zA-Z0-9_-]+)?')
links = regex.findall(text)
tickets = []
gids = line.getGroupIdsJoined()
for link in links:
if link not in tickets:
tickets.append(link)
for ticket in tickets:
try:
group = line.findGroupByTicket(ticket)
except:
continue
if group.id in gids:
line.sendMessage(to, 'I\'m aleady on group ' + group.name)
continue
line.acceptGroupInvitationByTicket(group.id, ticket)
if settings['autoJoin']['reply']:
if '@!' not in settings['autoJoin']['message']:
line.sendMessage(to, settings['autoJoin']['message'])
else:
line.sendMentionV2(to, settings['autoJoin']['message'], [sender])
line.sendMessage(to, 'Success join to group ' + group.name)
try:
executeCmd(msg, text, txt, cmd, msg_id, receiver, sender, to, setKey)
except TalkException as talk_error:
logError(talk_error)
if talk_error.code in [7, 8, 20]:
sys.exit(1)
line.sendMessage(to, 'Execute command error, ' + str(talk_error))
time.sleep(3)
except Exception as error:
logError(error)
line.sendMessage(to, 'Execute command error, ' + str(error))
time.sleep(3)
elif msg.contentType == 1: # Content type is image
if settings['changePictureProfile']:
path = line.downloadObjectMsg(msg_id, saveAs='tmp/picture.jpg')
line.updateProfilePicture(path)
line.sendMessage(to, 'Success change picture profile')
settings['changePictureProfile'] = False
elif settings['changeCoverProfile']:
path = line.downloadObjectMsg(msg_id, saveAs='tmp/cover.jpg')
line.updateProfileCover(path)
line.sendMessage(to, 'Success change cover profile')
settings['changeCoverProfile'] = False
elif to in settings['changeGroupPicture'] and msg.toType == 2:
path = line.downloadObjectMsg(msg_id, saveAs='tmp/grouppicture.jpg')
line.updateGroupPicture(to, path)
line.sendMessage(to, 'Success change group picture')
settings['changeGroupPicture'].remove(to)
elif msg.contentType == 7: # Content type is sticker
if settings['checkSticker']:
res = '╭───「 Sticker Info 」'
res += '\n├ Sticker ID : ' + msg.contentMetadata['STKID']
res += '\n├ Sticker Packages ID : ' + msg.contentMetadata['STKPKGID']
res += '\n├ Sticker Version : ' + msg.contentMetadata['STKVER']
res += '\n├ Sticker Link : line://shop/detail/' + msg.contentMetadata['STKPKGID']
res += '\n╰───「 Hello World 」'
line.sendMessage(to, parsingRes(res))
elif msg.contentType == 13: # Content type is contact
if settings['checkContact']:
mid = msg.contentMetadata['mid']
try:
contact = line.getContact(mid)
except:
return line.sendMessage(to, 'Failed get details contact with mid ' + mid)
res = '╭───「 Details Contact 」'
res += '\n├ MID : ' + mid
res += '\n├ Display Name : ' + str(contact.displayName)
if contact.displayNameOverridden: res += '\n├ Display Name Overridden : ' + str(contact.displayNameOverridden)
res += '\n├ Status Message : ' + str(contact.statusMessage)
res += '\n╰───「 Hello World 」'
if contact.pictureStatus:
line.sendImageWithURL(to, 'http://dl.profile.line-cdn.net/' + contact.pictureStatus)
cover = line.getProfileCoverURL(mid)
line.sendImageWithURL(to, str(cover))
line.sendMessage(to, parsingRes(res))
elif msg.contentType == 16: # Content type is album/note
if settings['checkPost']:
if msg.contentMetadata['serviceType'] in ['GB', 'NT', 'MH']:
if msg.contentMetadata['serviceType'] in ['GB', 'NT']:
contact = line.getContact(sender)
author = contact.displayName
else:
author = msg.contentMetadata['serviceName']
posturl = msg.contentMetadata['postEndUrl']
res = '╭───「 Details Post 」'
res += '\n├ Creator : ' + author
res += '\n├ Post Link : ' + posturl
res += '\n╰───「 Hello World 」'
elif op.type == 26:
msg = op.message
text = str(msg.text)
msg_id = msg.id
receiver = msg.to
sender = msg._from
to = sender if not msg.toType and sender != myMid else receiver
txt = text.lower()
if settings['autoRead']:
line.sendChatChecked(to, msg_id)
if msg.contentType == 0: # Content type is text
if '/ti/g/' in text and settings['autoJoin']['ticket']:
regex = re.compile('(?:line\:\/|line\.me\/R)\/ti\/g\/([a-zA-Z0-9_-]+)?')
links = regex.findall(text)
tickets = []
gids = line.getGroupIdsJoined()
for link in links:
if link not in tickets:
tickets.append(link)
for ticket in tickets:
try:
group = line.findGroupByTicket(ticket)
except:
continue
if group.id in gids:
line.sendMessage(to, 'I\'m aleady on group ' + group.name)
continue
line.acceptGroupInvitationByTicket(group.id, ticket)
if settings['autoJoin']['reply']:
if '@!' not in settings['autoJoin']['message']:
line.sendMessage(to, settings['autoJoin']['message'])
else:
line.sendMentionV2(to, settings['autoJoin']['message'], [sender])
line.sendMessage(to, 'Success join to group ' + group.name)
if settings['mimic']['status']:
if sender in settings['mimic']['target'] and settings['mimic']['target'][sender]:
try:
line.sendMessage(to, text, msg.contentMetadata)
tmp_text.append(text)
except:
pass
if settings['autoRespondMention']['status']:
if msg.toType in [1, 2] and 'MENTION' in msg.contentMetadata.keys() and sender != myMid and msg.contentType not in [6, 7, 9]:
mentions = ast.literal_eval(msg.contentMetadata['MENTION'])
mentionees = [mention['M'] for mention in mentions['MENTIONEES']]
if myMid in mentionees:
if line.getProfile().displayName in text:
if '@!' not in settings['autoRespondMention']['message']:
line.sendMessage(to, settings['autoRespondMention']['message'])
else:
line.sendMentionV2(to, settings['autoRespondMention']['message'], [sender])
if settings['autoRespond']['status']:
if msg.toType == 0:
contact = line.getContact(sender)
if contact.attributes != 32 and 'MENTION' not in msg.contentMetadata.keys():
if '@!' not in settings['autoRespond']['message']:
line.sendMessage(to, settings['autoRespond']['message'])
else:
line.sendMentionV2(to, settings['autoRespond']['message'], [sender])
if op.type == 55:
if op.param1 in lurking:
if lurking[op.param1]['status'] and op.param2 not in lurking[op.param1]['members']:
lurking[op.param1]['members'].append(op.param2)
if lurking[op.param1]['reply']['status']:
if '@!' not in lurking[op.param1]['reply']['message']:
line.sendMessage(op.param1, lurking[op.param1]['reply']['message'])
else:
line.sendMentionV2(op.param1, lurking[op.param1]['reply']['message'], [op.param2])
except TalkException as talk_error:
logError(talk_error)
if talk_error.code in [7, 8, 20]:
sys.exit(1)
except KeyboardInterrupt:
sys.exit('##---- KEYBOARD INTERRUPT -----##')
except Exception as error:
logError(error)
def runningProgram():
while True:
try:
ops = oepoll.singleTrace(count=50)
except TalkException as talk_error:
logError(talk_error)
if talk_error.code in [7, 8, 20]:
sys.exit(1)
continue
except KeyboardInterrupt:
sys.exit('##---- KEYBOARD INTERRUPT -----##')
except Exception as error:
logError(error)
continue
if ops:
for op in ops:
executeOp(op)
oepoll.setRevision(op.revision)
if __name__ == '__main__':
print ('##---- RUNNING PROGRAM -----##')
runningProgram()
| 48.747595 | 216 | 0.495499 |
175c439f1c9f06c783d5ee7ecca69fd522bebe45 | 1,800 | py | Python | 借来的plugins/OPQ-SetuBot/plugins/bot_eventMonitor.py | njjjay/IOTQQPlugins_selfuse | 23bda39647c14256e6366bf49d72bb71ba68cbd7 | [
"MIT"
]
| 19 | 2020-06-16T03:36:24.000Z | 2022-01-30T07:46:29.000Z | 借来的plugins/OPQ-SetuBot/plugins/bot_eventMonitor.py | njjjay/IOTQQPlugins_selfuse | 23bda39647c14256e6366bf49d72bb71ba68cbd7 | [
"MIT"
]
| 1 | 2020-08-01T18:20:10.000Z | 2020-08-03T10:42:04.000Z | 借来的plugins/OPQ-SetuBot/plugins/bot_eventMonitor.py | njjjay/IOTQQPlugins_selfuse | 23bda39647c14256e6366bf49d72bb71ba68cbd7 | [
"MIT"
]
| 5 | 2020-08-12T02:02:20.000Z | 2021-06-09T08:38:33.000Z | from botoy import EventMsg
from botoy.refine import refine_group_admin_event_msg, refine_group_join_event_msg
# from botoy import decorators as deco
from module import database, config
from loguru import logger
def receive_events(ctx: EventMsg):
if admin_info := refine_group_admin_event_msg(ctx):
if data_raw := database.BasicOperation.getGroupConf(admin_info.GroupID):
if admin_info.Flag == 1: # 变成管理员
logger.info('群:{} QQ:{}成为管理员'.format(admin_info.GroupID, admin_info.UserID))
if admin_info.UserID in data_raw['managers']: # 防止重叠
data_raw['managers'].remove(admin_info.UserID)
data_raw['admins'].append(admin_info.UserID)
else:
logger.info('群:{} QQ:{}被取消管理员'.format(admin_info.GroupID, admin_info.UserID))
try:
data_raw['admins'].remove(admin_info.UserID)
except: # 出错就说明群信息不正确,重新获取
logger.warning('从数据库删除管理员出错,尝试重新刷新群数据')
database.Getdata.updateGroupData(admin_info.GroupID)
return
database.BasicOperation.updateGroupData(admin_info.GroupID, data_raw)
else: # 如果没数据就重新获取
database.Getdata.updateGroupData(admin_info.GroupID)
elif join_info := refine_group_join_event_msg(ctx):
if join_info.UserID == config.botqq:
logger.info('bot加入群{}'.format(join_info.FromUin))
database.Getdata.updateGroupData(join_info.FromUin)
else:
logger.info('{}:{}加入群{}'.format(join_info.UserName, join_info.UserID, join_info.FromUin))
elif ctx.MsgType == 'ON_EVENT_GROUP_JOIN_SUCC':
logger.info('bot加入群{}'.format(ctx.FromUin))
database.Getdata.updateGroupData(ctx.FromUin)
| 50 | 101 | 0.652222 |
4e2a34ea074064288976537a4e74b039521a36e9 | 4,857 | py | Python | src/sentry/api/endpoints/team_details.py | AlexWayfer/sentry | ef935cda2b2e960bd602fda590540882d1b0712d | [
"BSD-3-Clause"
]
| 2 | 2019-03-04T12:45:54.000Z | 2019-03-04T12:45:55.000Z | src/sentry/api/endpoints/team_details.py | AlexWayfer/sentry | ef935cda2b2e960bd602fda590540882d1b0712d | [
"BSD-3-Clause"
]
| 196 | 2019-06-10T08:34:10.000Z | 2022-02-22T01:26:13.000Z | src/sentry/api/endpoints/team_details.py | AlexWayfer/sentry | ef935cda2b2e960bd602fda590540882d1b0712d | [
"BSD-3-Clause"
]
| 1 | 2020-07-03T00:52:19.000Z | 2020-07-03T00:52:19.000Z | from __future__ import absolute_import
import logging
from uuid import uuid4
from rest_framework import serializers, status
from rest_framework.response import Response
from sentry.api.base import DocSection
from sentry.api.bases.team import TeamEndpoint
from sentry.api.decorators import sudo_required
from sentry.api.serializers import serialize
from sentry.models import AuditLogEntryEvent, Team, TeamStatus
from sentry.tasks.deletion import delete_team
from sentry.utils.apidocs import scenario, attach_scenarios
delete_logger = logging.getLogger('sentry.deletions.api')
@scenario('GetTeam')
def get_team_scenario(runner):
runner.request(method='GET', path='/teams/%s/%s/' %
(runner.org.slug, runner.default_team.slug))
@scenario('UpdateTeam')
def update_team_scenario(runner):
team = runner.utils.create_team('The Obese Philosophers', runner.org)
runner.request(
method='PUT',
path='/teams/%s/%s/' % (runner.org.slug, team.slug),
data={'name': 'The Inflated Philosophers'}
)
class TeamSerializer(serializers.ModelSerializer):
slug = serializers.RegexField(r'^[a-z0-9_\-]+$', max_length=50)
class Meta:
model = Team
fields = ('name', 'slug')
def validate_slug(self, attrs, source):
value = attrs[source]
qs = Team.objects.filter(
slug=value,
organization=self.object.organization,
).exclude(id=self.object.id)
if qs.exists():
raise serializers.ValidationError('The slug "%s" is already in use.' % (value, ))
return attrs
class TeamDetailsEndpoint(TeamEndpoint):
doc_section = DocSection.TEAMS
@attach_scenarios([get_team_scenario])
def get(self, request, team):
"""
Retrieve a Team
```````````````
Return details on an individual team.
:pparam string organization_slug: the slug of the organization the
team belongs to.
:pparam string team_slug: the slug of the team to get.
:auth: required
"""
context = serialize(team, request.user)
context['organization'] = serialize(team.organization, request.user)
return Response(context)
@attach_scenarios([update_team_scenario])
def put(self, request, team):
"""
Update a Team
`````````````
Update various attributes and configurable settings for the given
team.
:pparam string organization_slug: the slug of the organization the
team belongs to.
:pparam string team_slug: the slug of the team to get.
:param string name: the new name for the team.
:param string slug: a new slug for the team. It has to be unique
and available.
:auth: required
"""
serializer = TeamSerializer(team, data=request.DATA, partial=True)
if serializer.is_valid():
team = serializer.save()
self.create_audit_entry(
request=request,
organization=team.organization,
target_object=team.id,
event=AuditLogEntryEvent.TEAM_EDIT,
data=team.get_audit_log_data(),
)
return Response(serialize(team, request.user))
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
@sudo_required
def delete(self, request, team):
"""
Delete a Team
`````````````
Schedules a team for deletion.
**Note:** Deletion happens asynchronously and therefor is not
immediate. However once deletion has begun the state of a project
changes and will be hidden from most public views.
"""
updated = Team.objects.filter(
id=team.id,
status=TeamStatus.VISIBLE,
).update(status=TeamStatus.PENDING_DELETION)
if updated:
transaction_id = uuid4().hex
self.create_audit_entry(
request=request,
organization=team.organization,
target_object=team.id,
event=AuditLogEntryEvent.TEAM_REMOVE,
data=team.get_audit_log_data(),
transaction_id=transaction_id,
)
delete_team.apply_async(
kwargs={
'object_id': team.id,
'transaction_id': transaction_id,
},
)
delete_logger.info(
'object.delete.queued',
extra={
'object_id': team.id,
'transaction_id': transaction_id,
'model': type(team).__name__,
}
)
return Response(status=204)
| 31.745098 | 93 | 0.597076 |
62ab5bc50ab12a960f299994eca779c93b81342c | 346 | py | Python | apps/order/migrations/0003_alter_order_table.py | xxcfun/mes_api | b694533f1bb7b7d79a01e949730a4fba7cece528 | [
"Apache-2.0"
]
| null | null | null | apps/order/migrations/0003_alter_order_table.py | xxcfun/mes_api | b694533f1bb7b7d79a01e949730a4fba7cece528 | [
"Apache-2.0"
]
| null | null | null | apps/order/migrations/0003_alter_order_table.py | xxcfun/mes_api | b694533f1bb7b7d79a01e949730a4fba7cece528 | [
"Apache-2.0"
]
| null | null | null | # Generated by Django 3.2 on 2022-01-10 09:47
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('order', '0002_alter_order_produce'),
]
operations = [
migrations.AlterModelTable(
name='order',
table='master_order',
),
]
| 19.222222 | 47 | 0.566474 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.